diff --git a/third_party/rust/bincode/.cargo-checksum.json b/third_party/rust/bincode/.cargo-checksum.json index 91127a9b15..d6e97cf32a 100644 --- a/third_party/rust/bincode/.cargo-checksum.json +++ b/third_party/rust/bincode/.cargo-checksum.json @@ -1 +1 @@ -{"files":{"Cargo.toml":"eda034c33e46b4b2ec6e6af6f1ddbcbf6642eaa88b46a1692ecc8acc7df9bf53","LICENSE.md":"90d7e062634054e6866d3c81e6a2b3058a840e6af733e98e80bdfe1a7dec6912","readme.md":"2ff3f54e6efd24b38f981b539be77438c10b9b8f67cad41ca2188396f95bcde8","src/config.rs":"2a6da0f5f95794add175c0a37e2a293f6d4c5932fc43f41492f646a98bf58aa9","src/de/mod.rs":"1da6149812e2d2c4ae8f9fdf7fc5bf85376d2f82da9a9311b0c45eff4033b71d","src/de/read.rs":"f8c5f2466c632f062e6fe55734d27c8fc7566c0894b610f012d8835af1810628","src/error.rs":"00a7a4de5cf1cc4a28dcf44c5c3ff607887baf41d5410b76468088963257b783","src/internal.rs":"6c827579a325e02bc7df8403965616b970b0f9b9763896ed938e719309a681b4","src/lib.rs":"9af9d73dcc1fc19240b8855ed521d38ae963e6685627e5d3eebf9acf40424aa3","src/ser/mod.rs":"b544c335a39a9b702772d586caa44f0e6febae3a0a109625fcee8ac68fc95645"},"package":"bda13183df33055cbb84b847becce220d392df502ebe7a4a78d7021771ed94d0"} \ No newline at end of file +{"files":{"Cargo.toml":"16c6c5374dd14773571dfab3254557ca0e3f7810e1fb27df6e27c2112e16c605","LICENSE.md":"90d7e062634054e6866d3c81e6a2b3058a840e6af733e98e80bdfe1a7dec6912","readme.md":"8d9ee7f575a20798e09471169ff3b5e3dea6ab74c50d2128cfc77f4074e97149","src/config.rs":"e3e6e264cdc736c442b9299d7ad39475457f0c69d2ea8fa0de14f8120c5f3023","src/de/mod.rs":"c431445d27366eaa05553fe1eb5dee320e87b7145b26fe50a56568fc83ccfe95","src/de/read.rs":"e188e291aef8c4ce41552390a28caacb26188c796e25c912d9730ad411a4abeb","src/error.rs":"ce6617bf8523392e6fc8b853b7768899a229b6b78dabc2918c0e2dd3f846aa01","src/internal.rs":"55a69c335cf15038eb76f7ba71b0828b20ee1d16adbc5e10e2087efbb74c55ea","src/lib.rs":"41258f970098e3b0421daf9fbaff34efa716039632f5d1b6409e22fe473c5775","src/ser/mod.rs":"323ca31c66188ba952faf6de111c91fe551a27ebc522c10a3cfe2e5348a74390"},"package":"5753e2a71534719bf3f4e57006c3a4f0d2c672a4b676eec84161f763eca87dbf"} \ No newline at end of file diff --git a/third_party/rust/bincode/Cargo.toml b/third_party/rust/bincode/Cargo.toml index b51a6d44b4..71d83f041b 100644 --- a/third_party/rust/bincode/Cargo.toml +++ b/third_party/rust/bincode/Cargo.toml @@ -3,7 +3,7 @@ # When uploading crates to the registry Cargo will automatically # "normalize" Cargo.toml files for maximal compatibility # with all versions of Cargo and also rewrite `path` dependencies -# to registry (e.g. crates.io) dependencies +# to registry (e.g., crates.io) dependencies # # If you believe there's an error in this file please file an # issue against the rust-lang/cargo repository. If you're @@ -12,24 +12,29 @@ [package] name = "bincode" -version = "1.0.0" +version = "1.2.1" authors = ["Ty Overby ", "Francesco Mazzoli ", "David Tolnay ", "Daniel Griffen"] -exclude = ["logo.png", "tests/*", "examples/*", ".gitignore", ".travis.yml", "changelist.org"] +exclude = ["logo.png", "tests/*", "examples/*", ".gitignore", ".travis.yml"] publish = true description = "A binary serialization / deserialization strategy that uses Serde for transforming structs into bytes and vice versa!" documentation = "https://docs.rs/bincode" readme = "./readme.md" keywords = ["binary", "encode", "decode", "serialize", "deserialize"] -categories = ["network-programming"] +categories = ["encoding", "network-programming"] license = "MIT" -repository = "https://github.com/TyOverby/bincode" +repository = "https://github.com/servo/bincode" [dependencies.byteorder] -version = "1.2.0" +version = "1.3.0" [dependencies.serde] -version = "^1.0.27" +version = "1.0.63" [dev-dependencies.serde_bytes] -version = "^0.10.3" +version = "0.11" [dev-dependencies.serde_derive] -version = "^1.0.27" +version = "1.0.27" + +[features] +i128 = [] +[badges.travis-ci] +repository = "servo/bincode" diff --git a/third_party/rust/bincode/readme.md b/third_party/rust/bincode/readme.md index b8ed2dbd0b..579332b913 100644 --- a/third_party/rust/bincode/readme.md +++ b/third_party/rust/bincode/readme.md @@ -2,9 +2,9 @@ -[![Build Status](https://travis-ci.org/TyOverby/bincode.svg)](https://travis-ci.org/TyOverby/bincode) +[![Build Status](https://travis-ci.com/servo/bincode.svg)](https://travis-ci.com/servo/bincode) [![](https://meritbadge.herokuapp.com/bincode)](https://crates.io/crates/bincode) -[![](https://img.shields.io/badge/license-MIT-blue.svg)](http://opensource.org/licenses/MIT) +[![](https://img.shields.io/badge/license-MIT-blue.svg)](https://opensource.org/licenses/MIT) A compact encoder / decoder pair that uses a binary zero-fluff encoding scheme. The size of the encoded object will be the same or smaller than the size that @@ -13,25 +13,22 @@ the object takes up in memory in a running Rust program. In addition to exposing two simple functions (one that encodes to `Vec`, and one that decodes from `&[u8]`), binary-encode exposes a Reader/Writer API that makes it work -perfectly with other stream-based apis such as rust files, network streams, +perfectly with other stream-based APIs such as Rust files, network streams, and the [flate2-rs](https://github.com/alexcrichton/flate2-rs) compression library. -## [Api Documentation](http://docs.rs/bincode/) +## [API Documentation](https://docs.rs/bincode/) ## Bincode in the wild * [google/tarpc](https://github.com/google/tarpc): Bincode is used to serialize and deserialize networked RPC messages. * [servo/webrender](https://github.com/servo/webrender): Bincode records webrender API calls for record/replay-style graphics debugging. -* [servo/ipc-channel](https://github.com/servo/ipc-channel): Ipc-Channel uses Bincode to send structs between processes using a channel-like API. +* [servo/ipc-channel](https://github.com/servo/ipc-channel): IPC-Channel uses Bincode to send structs between processes using a channel-like API. ## Example -```rust -#[macro_use] -extern crate serde_derive; -extern crate bincode; -use bincode::{serialize, deserialize, Infinite}; +```rust +use serde::{Serialize, Deserialize}; #[derive(Serialize, Deserialize, PartialEq, Debug)] struct Entity { @@ -45,18 +42,17 @@ struct World(Vec); fn main() { let world = World(vec![Entity { x: 0.0, y: 4.0 }, Entity { x: 10.0, y: 20.5 }]); - let encoded: Vec = serialize(&world, Infinite).unwrap(); + let encoded: Vec = bincode::serialize(&world).unwrap(); // 8 bytes for the length of the vector, 4 bytes per float. assert_eq!(encoded.len(), 8 + 4 * 4); - let decoded: World = deserialize(&encoded[..]).unwrap(); + let decoded: World = bincode::deserialize(&encoded[..]).unwrap(); assert_eq!(world, decoded); } ``` - ## Details The encoding (and thus decoding) proceeds unsurprisingly -- primitive diff --git a/third_party/rust/bincode/src/config.rs b/third_party/rust/bincode/src/config.rs index cebd0be64a..885dbc7f09 100644 --- a/third_party/rust/bincode/src/config.rs +++ b/third_party/rust/bincode/src/config.rs @@ -1,14 +1,14 @@ use super::internal::{Bounded, Infinite, SizeLimit}; -use ::error::Result; use byteorder::{BigEndian, ByteOrder, LittleEndian, NativeEndian}; -use {DeserializerAcceptor, SerializerAcceptor}; +use de::read::BincodeRead; +use error::Result; use serde; -use std::io::{Write, Read}; +use std::io::{Read, Write}; use std::marker::PhantomData; -use de::read::BincodeRead; +use {DeserializerAcceptor, SerializerAcceptor}; -use self::LimitOption::*; use self::EndianOption::*; +use self::LimitOption::*; struct DefaultOptions(Infinite); @@ -95,6 +95,7 @@ enum EndianOption { /// /// When a byte limit is set, bincode will return `Err` on any deserialization that goes over the limit, or any /// serialization that goes over the limit. +#[derive(Clone)] pub struct Config { limit: LimitOption, endian: EndianOption, @@ -110,7 +111,6 @@ pub(crate) struct WithOtherEndian { _endian: PhantomData, } - impl WithOtherLimit { #[inline(always)] pub(crate) fn new(options: O, limit: L) -> WithOtherLimit { @@ -179,7 +179,7 @@ macro_rules! config_map { $call } } - } + }; } impl Config { @@ -217,7 +217,7 @@ impl Config { /// Sets the endianness to big-endian #[inline(always)] pub fn big_endian(&mut self) -> &mut Self { - self.endian= EndianOption::Big; + self.endian = EndianOption::Big; self } @@ -245,7 +245,11 @@ impl Config { /// If the serialization would take more bytes than allowed by the size limit, an error /// is returned and *no bytes* will be written into the `Writer` #[inline(always)] - pub fn serialize_into(&self, w: W, t: &T) -> Result<()> { + pub fn serialize_into( + &self, + w: W, + t: &T, + ) -> Result<()> { config_map!(self, opts => ::internal::serialize_into(w, t, opts)) } @@ -258,38 +262,85 @@ impl Config { /// TODO: document #[doc(hidden)] #[inline(always)] - pub fn deserialize_in_place<'a, R, T: >(&self, reader: R, place: &mut T) -> Result<()> + pub fn deserialize_in_place<'a, R, T>(&self, reader: R, place: &mut T) -> Result<()> where R: BincodeRead<'a>, - T: serde::de::Deserialize<'a> + T: serde::de::Deserialize<'a>, { config_map!(self, opts => ::internal::deserialize_in_place(reader, opts, place)) } + /// Deserializes a slice of bytes with state `seed` using this configuration. + #[inline(always)] + pub fn deserialize_seed<'a, T: serde::de::DeserializeSeed<'a>>( + &self, + seed: T, + bytes: &'a [u8], + ) -> Result { + config_map!(self, opts => ::internal::deserialize_seed(seed, bytes, opts)) + } + /// Deserializes an object directly from a `Read`er using this configuration /// /// If this returns an `Error`, `reader` may be in an invalid state. #[inline(always)] - pub fn deserialize_from(&self, reader: R) -> Result { + pub fn deserialize_from( + &self, + reader: R, + ) -> Result { config_map!(self, opts => ::internal::deserialize_from(reader, opts)) } + /// Deserializes an object directly from a `Read`er with state `seed` using this configuration + /// + /// If this returns an `Error`, `reader` may be in an invalid state. + #[inline(always)] + pub fn deserialize_from_seed<'a, R: Read, T: serde::de::DeserializeSeed<'a>>( + &self, + seed: T, + reader: R, + ) -> Result { + config_map!(self, opts => ::internal::deserialize_from_seed(seed, reader, opts)) + } + /// Deserializes an object from a custom `BincodeRead`er using the default configuration. /// It is highly recommended to use `deserialize_from` unless you need to implement /// `BincodeRead` for performance reasons. /// /// If this returns an `Error`, `reader` may be in an invalid state. #[inline(always)] - pub fn deserialize_from_custom<'a, R: BincodeRead<'a>, T: serde::de::DeserializeOwned>(&self, reader: R) -> Result { + pub fn deserialize_from_custom<'a, R: BincodeRead<'a>, T: serde::de::DeserializeOwned>( + &self, + reader: R, + ) -> Result { config_map!(self, opts => ::internal::deserialize_from_custom(reader, opts)) } + /// Deserializes an object from a custom `BincodeRead`er with state `seed` using the default + /// configuration. It is highly recommended to use `deserialize_from` unless you need to + /// implement `BincodeRead` for performance reasons. + /// + /// If this returns an `Error`, `reader` may be in an invalid state. + #[inline(always)] + pub fn deserialize_from_custom_seed< + 'a, + R: BincodeRead<'a>, + T: serde::de::DeserializeSeed<'a>, + >( + &self, + seed: T, + reader: R, + ) -> Result { + config_map!(self, opts => ::internal::deserialize_from_custom_seed(seed, reader, opts)) + } + /// Executes the acceptor with a serde::Deserializer instance. /// NOT A PART OF THE STABLE PUBLIC API #[doc(hidden)] - pub fn with_deserializer<'a, A, R>(&self, reader: R, acceptor: A) -> A::Output - where A: DeserializerAcceptor<'a>, - R: BincodeRead<'a> + pub fn with_deserializer<'a, A, R>(&self, reader: R, acceptor: A) -> A::Output + where + A: DeserializerAcceptor<'a>, + R: BincodeRead<'a>, { config_map!(self, opts => { let mut deserializer = ::de::Deserializer::new(reader, opts); @@ -301,8 +352,9 @@ impl Config { /// NOT A PART OF THE STABLE PUBLIC API #[doc(hidden)] pub fn with_serializer(&self, writer: W, acceptor: A) -> A::Output - where A: SerializerAcceptor, - W: Write + where + A: SerializerAcceptor, + W: Write, { config_map!(self, opts => { let mut serializer = ::ser::Serializer::new(writer, opts); diff --git a/third_party/rust/bincode/src/de/mod.rs b/third_party/rust/bincode/src/de/mod.rs index 8f2c2cd13d..00f672d36b 100644 --- a/third_party/rust/bincode/src/de/mod.rs +++ b/third_party/rust/bincode/src/de/mod.rs @@ -1,13 +1,13 @@ +use config::Options; use std::io::Read; -use ::config::Options; -use serde; +use self::read::BincodeRead; use byteorder::ReadBytesExt; -use serde::de::IntoDeserializer; +use internal::SizeLimit; +use serde; use serde::de::Error as DeError; -use ::{Error, ErrorKind, Result}; -use ::internal::SizeLimit; -use self::read::BincodeRead; +use serde::de::IntoDeserializer; +use {Error, ErrorKind, Result}; pub mod read; @@ -19,7 +19,7 @@ pub mod read; /// The ByteOrder that is chosen will impact the endianness that /// is used to read integers out of the reader. /// -/// ```rust,ignore +/// ```ignore /// let d = Deserializer::new(&mut some_reader, SizeLimit::new()); /// serde::Deserialize::deserialize(&mut deserializer); /// let bytes_read = d.bytes_read(); @@ -108,6 +108,10 @@ where impl_nums!(f32, deserialize_f32, visit_f32, read_f32); impl_nums!(f64, deserialize_f64, visit_f64, read_f64); + serde_if_integer128! { + impl_nums!(u128, deserialize_u128, visit_u128, read_u128); + impl_nums!(i128, deserialize_i128, visit_i128, read_i128); + } #[inline] fn deserialize_u8(self, visitor: V) -> Result @@ -209,12 +213,16 @@ where V: serde::de::Visitor<'de>, { impl<'de, 'a, R: 'a, O> serde::de::EnumAccess<'de> for &'a mut Deserializer - where R: BincodeRead<'de>, O: Options { + where + R: BincodeRead<'de>, + O: Options, + { type Error = Error; type Variant = Self; fn variant_seed(self, seed: V) -> Result<(V::Value, Self::Variant)> - where V: serde::de::DeserializeSeed<'de>, + where + V: serde::de::DeserializeSeed<'de>, { let idx: u32 = try!(serde::de::Deserialize::deserialize(&mut *self)); let val: Result<_> = seed.deserialize(idx.into_deserializer()); @@ -234,13 +242,9 @@ where len: usize, } - impl< - 'de, - 'a, - 'b: 'a, - R: BincodeRead<'de> + 'b, - O: Options, - > serde::de::SeqAccess<'de> for Access<'a, R, O> { + impl<'de, 'a, 'b: 'a, R: BincodeRead<'de> + 'b, O: Options> serde::de::SeqAccess<'de> + for Access<'a, R, O> + { type Error = Error; fn next_element_seed(&mut self, seed: T) -> Result> @@ -300,13 +304,9 @@ where len: usize, } - impl< - 'de, - 'a, - 'b: 'a, - R: BincodeRead<'de> + 'b, - O: Options, - > serde::de::MapAccess<'de> for Access<'a, R, O> { + impl<'de, 'a, 'b: 'a, R: BincodeRead<'de> + 'b, O: Options> serde::de::MapAccess<'de> + for Access<'a, R, O> + { type Error = Error; fn next_key_seed(&mut self, seed: K) -> Result> @@ -409,7 +409,10 @@ where } impl<'de, 'a, R, O> serde::de::VariantAccess<'de> for &'a mut Deserializer -where R: BincodeRead<'de>, O: Options{ +where + R: BincodeRead<'de>, + O: Options, +{ type Error = Error; fn unit_variant(self) -> Result<()> { @@ -417,44 +420,43 @@ where R: BincodeRead<'de>, O: Options{ } fn newtype_variant_seed(self, seed: T) -> Result - where T: serde::de::DeserializeSeed<'de>, + where + T: serde::de::DeserializeSeed<'de>, { serde::de::DeserializeSeed::deserialize(seed, self) } - fn tuple_variant(self, - len: usize, - visitor: V) -> Result - where V: serde::de::Visitor<'de>, + fn tuple_variant(self, len: usize, visitor: V) -> Result + where + V: serde::de::Visitor<'de>, { serde::de::Deserializer::deserialize_tuple(self, len, visitor) } - fn struct_variant(self, - fields: &'static [&'static str], - visitor: V) -> Result - where V: serde::de::Visitor<'de>, + fn struct_variant(self, fields: &'static [&'static str], visitor: V) -> Result + where + V: serde::de::Visitor<'de>, { serde::de::Deserializer::deserialize_tuple(self, fields.len(), visitor) } } static UTF8_CHAR_WIDTH: [u8; 256] = [ -1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, -1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, // 0x1F -1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, -1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, // 0x3F -1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, -1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, // 0x5F -1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, -1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, // 0x7F -0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, -0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, // 0x9F -0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, -0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, // 0xBF -0,0,2,2,2,2,2,2,2,2,2,2,2,2,2,2, -2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2, // 0xDF -3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3, // 0xEF -4,4,4,4,4,0,0,0,0,0,0,0,0,0,0,0, // 0xFF + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, // 0x1F + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, // 0x3F + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, // 0x5F + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, // 0x7F + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, // 0x9F + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, // 0xBF + 0, 0, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, // 0xDF + 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, // 0xEF + 4, 4, 4, 4, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 0xFF ]; // This function is a copy of core::str::utf8_char_width diff --git a/third_party/rust/bincode/src/de/read.rs b/third_party/rust/bincode/src/de/read.rs index 3ddcee59b9..ffc5ae2ac0 100644 --- a/third_party/rust/bincode/src/de/read.rs +++ b/third_party/rust/bincode/src/de/read.rs @@ -1,6 +1,6 @@ -use std::io; use error::Result; use serde; +use std::{io, slice}; /// An optional Read trait for advanced Bincode usage. /// @@ -78,9 +78,10 @@ impl io::Read for IoReader { impl<'storage> SliceReader<'storage> { #[inline(always)] fn unexpected_eof() -> Box<::ErrorKind> { - return Box::new(::ErrorKind::Io( - io::Error::new(io::ErrorKind::UnexpectedEof, ""), - )); + return Box::new(::ErrorKind::Io(io::Error::new( + io::ErrorKind::UnexpectedEof, + "", + ))); } } @@ -135,27 +136,43 @@ where R: io::Read, { fn fill_buffer(&mut self, length: usize) -> Result<()> { + // We first reserve the space needed in our buffer. let current_length = self.temp_buffer.len(); if length > current_length { self.temp_buffer.reserve_exact(length - current_length); } + // Then create a slice with the length as our desired length. This is + // safe as long as we only write (no reads) to this buffer, because + // `reserve_exact` above has allocated this space. + let buf = unsafe { + slice::from_raw_parts_mut(self.temp_buffer.as_mut_ptr(), length) + }; + + // This method is assumed to properly handle slices which include + // uninitialized bytes (as ours does). See discussion at the link below. + // https://github.com/servo/bincode/issues/260 + self.reader.read_exact(buf)?; + + // Only after `read_exact` successfully returns do we set the buffer + // length. By doing this after the call to `read_exact`, we can avoid + // exposing uninitialized memory in the case of `read_exact` returning + // an error. unsafe { self.temp_buffer.set_len(length); } - self.reader.read_exact(&mut self.temp_buffer)?; Ok(()) } } -impl BincodeRead<'static> for IoReader +impl<'a, R> BincodeRead<'a> for IoReader where R: io::Read, { fn forward_read_str(&mut self, length: usize, visitor: V) -> Result where - V: serde::de::Visitor<'static>, + V: serde::de::Visitor<'a>, { self.fill_buffer(length)?; @@ -175,7 +192,7 @@ where fn forward_read_bytes(&mut self, length: usize, visitor: V) -> Result where - V: serde::de::Visitor<'static>, + V: serde::de::Visitor<'a>, { self.fill_buffer(length)?; let r = visitor.visit_bytes(&self.temp_buffer[..]); diff --git a/third_party/rust/bincode/src/error.rs b/third_party/rust/bincode/src/error.rs index f5cfca3931..1f52424c18 100644 --- a/third_party/rust/bincode/src/error.rs +++ b/third_party/rust/bincode/src/error.rs @@ -1,7 +1,7 @@ +use std::error::Error as StdError; use std::io; -use std::{error, fmt}; use std::str::Utf8Error; -use std::error::Error as StdError; +use std::{error, fmt}; use serde; @@ -47,14 +47,14 @@ impl StdError for ErrorKind { ErrorKind::InvalidBoolEncoding(_) => "invalid u8 while decoding bool", ErrorKind::InvalidCharEncoding => "char is not valid", ErrorKind::InvalidTagEncoding(_) => "tag for enum is not valid", - ErrorKind::SequenceMustHaveLength => - "Bincode can only encode sequences and maps that have a knowable size ahead of time", + ErrorKind::SequenceMustHaveLength => { + "Bincode can only encode sequences and maps that have a knowable size ahead of time" + } ErrorKind::DeserializeAnyNotSupported => { "Bincode doesn't support serde::Deserializer::deserialize_any" } ErrorKind::SizeLimit => "the size limit has been reached", ErrorKind::Custom(ref msg) => msg, - } } @@ -91,16 +91,12 @@ impl fmt::Display for ErrorKind { ErrorKind::InvalidTagEncoding(tag) => { write!(fmt, "{}, found {}", self.description(), tag) } - ErrorKind::SequenceMustHaveLength => { - write!(fmt, "{}", self.description()) - } + ErrorKind::SequenceMustHaveLength => write!(fmt, "{}", self.description()), ErrorKind::SizeLimit => write!(fmt, "{}", self.description()), - ErrorKind::DeserializeAnyNotSupported => { - write!( - fmt, - "Bincode does not support the serde::Deserializer::deserialize_any method" - ) - } + ErrorKind::DeserializeAnyNotSupported => write!( + fmt, + "Bincode does not support the serde::Deserializer::deserialize_any method" + ), ErrorKind::Custom(ref s) => s.fmt(fmt), } } diff --git a/third_party/rust/bincode/src/internal.rs b/third_party/rust/bincode/src/internal.rs index bab4a28501..968950a312 100644 --- a/third_party/rust/bincode/src/internal.rs +++ b/third_party/rust/bincode/src/internal.rs @@ -1,5 +1,6 @@ -use std::io::{Read, Write}; use serde; +use std::io::{Read, Write}; +use std::marker::PhantomData; use config::{Options, OptionsExt}; use de::read::BincodeRead; @@ -77,10 +78,18 @@ where R: Read, T: serde::de::DeserializeOwned, O: Options, +{ + deserialize_from_seed(PhantomData, reader, options) +} + +pub(crate) fn deserialize_from_seed<'a, R, T, O>(seed: T, reader: R, options: O) -> Result +where + R: Read, + T: serde::de::DeserializeSeed<'a>, + O: Options, { let reader = ::de::read::IoReader::new(reader); - let mut deserializer = ::de::Deserializer::<_, O>::new(reader, options); - serde::Deserialize::deserialize(&mut deserializer) + deserialize_from_custom_seed(seed, reader, options) } pub(crate) fn deserialize_from_custom<'a, R, T, O>(reader: R, options: O) -> Result @@ -88,9 +97,22 @@ where R: BincodeRead<'a>, T: serde::de::DeserializeOwned, O: Options, +{ + deserialize_from_custom_seed(PhantomData, reader, options) +} + +pub(crate) fn deserialize_from_custom_seed<'a, R, T, O>( + seed: T, + reader: R, + options: O, +) -> Result +where + R: BincodeRead<'a>, + T: serde::de::DeserializeSeed<'a>, + O: Options, { let mut deserializer = ::de::Deserializer::<_, O>::new(reader, options); - serde::Deserialize::deserialize(&mut deserializer) + seed.deserialize(&mut deserializer) } pub(crate) fn deserialize_in_place<'a, R, T, O>(reader: R, options: O, place: &mut T) -> Result<()> @@ -107,14 +129,20 @@ pub(crate) fn deserialize<'a, T, O>(bytes: &'a [u8], options: O) -> Result where T: serde::de::Deserialize<'a>, O: Options, +{ + deserialize_seed(PhantomData, bytes, options) +} + +pub(crate) fn deserialize_seed<'a, T, O>(seed: T, bytes: &'a [u8], options: O) -> Result +where + T: serde::de::DeserializeSeed<'a>, + O: Options, { let reader = ::de::read::SliceReader::new(bytes); let options = ::config::WithOtherLimit::new(options, Infinite); - let mut deserializer = ::de::Deserializer::new(reader, options); - serde::Deserialize::deserialize(&mut deserializer) + deserialize_from_custom_seed(seed, reader, options) } - pub(crate) trait SizeLimit: Clone { /// Tells the SizeLimit that a certain number of bytes has been /// read or written. Returns Err if the limit has been exceeded. @@ -123,7 +151,6 @@ pub(crate) trait SizeLimit: Clone { fn limit(&self) -> Option; } - /// A SizeLimit that restricts serialized or deserialized messages from /// exceeding a certain byte length. #[derive(Copy, Clone)] diff --git a/third_party/rust/bincode/src/lib.rs b/third_party/rust/bincode/src/lib.rs index e72ff25520..594b69d741 100644 --- a/third_party/rust/bincode/src/lib.rs +++ b/third_party/rust/bincode/src/lib.rs @@ -7,35 +7,40 @@ //! //! ### Using Basic Functions //! -//! ```rust -//! extern crate bincode; -//! use bincode::{serialize, deserialize}; +//! ```edition2018 //! fn main() { //! // The object that we will serialize. //! let target: Option = Some("hello world".to_string()); //! -//! let encoded: Vec = serialize(&target).unwrap(); -//! let decoded: Option = deserialize(&encoded[..]).unwrap(); +//! let encoded: Vec = bincode::serialize(&target).unwrap(); +//! let decoded: Option = bincode::deserialize(&encoded[..]).unwrap(); //! assert_eq!(target, decoded); //! } //! ``` +//! +//! ### 128bit numbers +//! +//! Support for `i128` and `u128` is automatically enabled on Rust toolchains +//! greater than or equal to `1.26.0` and disabled for targets which do not support it +#![doc(html_root_url = "https://docs.rs/bincode/1.2.1")] #![crate_name = "bincode"] #![crate_type = "rlib"] #![crate_type = "dylib"] extern crate byteorder; +#[macro_use] extern crate serde; mod config; -mod ser; -mod error; mod de; +mod error; mod internal; +mod ser; -pub use error::{Error, ErrorKind, Result}; pub use config::Config; pub use de::read::{BincodeRead, IoReader, SliceReader}; +pub use error::{Error, ErrorKind, Result}; /// An object that implements this trait can be passed a /// serde::Deserializer without knowing its concrete type. @@ -68,6 +73,7 @@ pub trait SerializerAcceptor { /// | Byte limit | Endianness | /// |------------|------------| /// | Unlimited | Little | +#[inline(always)] pub fn config() -> Config { Config::new() } @@ -123,7 +129,7 @@ where pub fn deserialize_in_place<'a, R, T>(reader: R, place: &mut T) -> Result<()> where T: serde::de::Deserialize<'a>, - R: BincodeRead<'a> + R: BincodeRead<'a>, { config().deserialize_in_place(reader, place) } @@ -147,9 +153,10 @@ where /// Executes the acceptor with a serde::Deserializer instance. /// NOT A PART OF THE STABLE PUBLIC API #[doc(hidden)] -pub fn with_deserializer<'a, A, R>(reader: R, acceptor: A) -> A::Output -where A: DeserializerAcceptor<'a>, - R: BincodeRead<'a> +pub fn with_deserializer<'a, A, R>(reader: R, acceptor: A) -> A::Output +where + A: DeserializerAcceptor<'a>, + R: BincodeRead<'a>, { config().with_deserializer(reader, acceptor) } @@ -158,8 +165,9 @@ where A: DeserializerAcceptor<'a>, /// NOT A PART OF THE STABLE PUBLIC API #[doc(hidden)] pub fn with_serializer(writer: W, acceptor: A) -> A::Output -where A: SerializerAcceptor, - W: std::io::Write +where + A: SerializerAcceptor, + W: std::io::Write, { config().with_serializer(writer, acceptor) } diff --git a/third_party/rust/bincode/src/ser/mod.rs b/third_party/rust/bincode/src/ser/mod.rs index f9dd26d80d..737c80d129 100644 --- a/third_party/rust/bincode/src/ser/mod.rs +++ b/third_party/rust/bincode/src/ser/mod.rs @@ -5,9 +5,9 @@ use serde; use byteorder::WriteBytesExt; -use super::{Result, Error, ErrorKind}; -use ::config::Options; use super::internal::SizeLimit; +use super::{Error, ErrorKind, Result}; +use config::Options; /// An Serializer that encodes values directly into a Writer. /// @@ -51,9 +51,9 @@ impl<'a, W: Write, O: Options> serde::Serializer for &'a mut Serializer { } fn serialize_bool(self, v: bool) -> Result<()> { - self.writer.write_u8(if v { 1 } else { 0 }).map_err( - Into::into, - ) + self.writer + .write_u8(if v { 1 } else { 0 }) + .map_err(Into::into) } fn serialize_u8(self, v: u8) -> Result<()> { @@ -88,6 +88,16 @@ impl<'a, W: Write, O: Options> serde::Serializer for &'a mut Serializer { self.writer.write_i64::(v).map_err(Into::into) } + serde_if_integer128! { + fn serialize_u128(self, v: u128) -> Result<()> { + self.writer.write_u128::(v).map_err(Into::into) + } + + fn serialize_i128(self, v: i128) -> Result<()> { + self.writer.write_i128::(v).map_err(Into::into) + } + } + fn serialize_f32(self, v: f32) -> Result<()> { self.writer.write_f32::(v).map_err(Into::into) } @@ -102,9 +112,9 @@ impl<'a, W: Write, O: Options> serde::Serializer for &'a mut Serializer { } fn serialize_char(self, c: char) -> Result<()> { - self.writer.write_all(encode_utf8(c).as_slice()).map_err( - Into::into, - ) + self.writer + .write_all(encode_utf8(c).as_slice()) + .map_err(Into::into) } fn serialize_bytes(self, v: &[u8]) -> Result<()> { @@ -283,6 +293,16 @@ impl<'a, O: Options> serde::Serializer for &'a mut SizeChecker { self.add_value(v) } + serde_if_integer128! { + fn serialize_u128(self, v: u128) -> Result<()> { + self.add_value(v) + } + + fn serialize_i128(self, v: i128) -> Result<()> { + self.add_value(v) + } + } + fn serialize_f32(self, v: f32) -> Result<()> { self.add_value(v) } @@ -644,7 +664,7 @@ impl<'a, O: Options> serde::ser::SerializeTupleVariant for SizeCompound<'a, O> { } } -impl<'a, O: Options+ 'a> serde::ser::SerializeMap for SizeCompound<'a, O> { +impl<'a, O: Options + 'a> serde::ser::SerializeMap for SizeCompound<'a, O> { type Ok = (); type Error = Error; diff --git a/third_party/rust/bindgen/.cargo-checksum.json b/third_party/rust/bindgen/.cargo-checksum.json index 95d8968827..6a3a609523 100644 --- a/third_party/rust/bindgen/.cargo-checksum.json +++ b/third_party/rust/bindgen/.cargo-checksum.json @@ -1 +1 @@ -{"files":{"Cargo.lock":"f1b56f3cb914b4ed3214d3ce87d599398b399841718fc938c1b5a309356a44ea","Cargo.toml":"a4656cdd7bd0794e6f10ba78ed3c9a82cd86bfcbec59be7731ee90984de64bde","LICENSE":"c23953d9deb0a3312dbeaf6c128a657f3591acee45067612fa68405eaa4525db","README.md":"5a1f556c6a57c0a6ccc65e19c27718e0f4b32381a8efcc80f6601b33c58c5d59","build.rs":"e1f148e01150af6a66b6af2e5d955c8b9fa092cb4697bae2bcec8a00119055ae","csmith-fuzzing/README.md":"7107b70fedb0c0a0cadb3c439a49c1bd0119a6d38dc63b1aecc74d1942256ef2","src/callbacks.rs":"82e0be9ca02e9a652af934ed546f1cedfc6db0716643123d9a5aab33b360c7d0","src/clang.rs":"66e86bfbbe872cc247cf3bc88a2155e25f587414834023515d184dc13f8f7287","src/codegen/bitfield_unit.rs":"a8fb1a2d97a99685106fcaac87d2013f79d2690d6a46ff05ad1e3629b6075664","src/codegen/bitfield_unit_tests.rs":"dd252134118450800b516e375c872e17b4c1aee63a7b8adbe5b2cd53434bbc7e","src/codegen/error.rs":"ebc9e0f50c6adc9558b76ce774346c02316500a1ebe3cbf56ed00e5e9fe3e456","src/codegen/helpers.rs":"304c9eb56ea6b2c054e1f9fefd5812b0df3a156eee5876f3051fd0b48c7aeac3","src/codegen/impl_debug.rs":"428df604b4be105e3275275e8be81e8306b940abddc1b89675f98a01bf5de8c1","src/codegen/impl_partialeq.rs":"83707f7b13501dc413c904a17163cb54af11e56138f36dfef40ce46c823200fd","src/codegen/mod.rs":"42732503dd25ed4b7924b71862f9100cf281e22f99016540da61a602c78a3650","src/codegen/struct_layout.rs":"482bab6384e65c78346de4f8d8e4d1c3b7df38250788b58bdd1f7b1c7bf70bac","src/extra_assertions.rs":"494534bd4f18b80d89b180c8a93733e6617edcf7deac413e9a73fd6e7bc9ced7","src/features.rs":"2d82f0700c22ea44e010a89c3ae857c3feaf2c85cab3fe4d0277a41a8c2841c4","src/ir/analysis/derive.rs":"2a2322f178760859cdb4b2d45d947ff213c7c684840b4ade46b7ceb34fa6705b","src/ir/analysis/has_destructor.rs":"10380d06ed03d058f10f6f6835d9b8fbebac455a1ea218780430a0ffd8d63472","src/ir/analysis/has_float.rs":"1838ba81eb05a9c3e311687e2247d561cc5093377b15ef8008257025ea56da04","src/ir/analysis/has_type_param_in_array.rs":"dddc5511a705e3a653b5e754e359637031b4862e1a1fc1e17f711fb2fbfc1cef","src/ir/analysis/has_vtable.rs":"8da9deec23c4552ecd5b883eaa036e4f2174a5949194c333a62ef463d28dcb6a","src/ir/analysis/mod.rs":"54993cb77df1870bb12cbc6b3a243c2da942cdc967a7d21dacb430601b49b2a1","src/ir/analysis/sizedness.rs":"d0673e19add38a07680ae3a9a5e998a0b2c3917e68efb6639ffe7ea193ada1b1","src/ir/analysis/template_params.rs":"9b662b5ec99cd8409d771a16ee42df500962b0c26f0da85e430ede19cc2b17c9","src/ir/annotations.rs":"268f90fc1d40fadee329c26b183b2aaa9de98f9246fea580404ee0e626315546","src/ir/comment.rs":"31d64a49ae3d9c3c348fa2539e03306ca3a23fae429cab452e42b31ecf632145","src/ir/comp.rs":"73d5d32d70b8e62d33ad4ed6bcbb9b23273c59b5b45570b85a2357c6e1116028","src/ir/context.rs":"c30be52b22fdb489afb34426bcb2e048ae2594846b15324693dd1b71e7dc3369","src/ir/derive.rs":"e5581852eec87918901a129284b4965aefc8a19394187a8095779a084f28fabe","src/ir/dot.rs":"5da8336bf5fd8efabd784a06e0d764eb91566c19ced8ce017a24ae237f0cbe18","src/ir/enum_ty.rs":"c303f3b271d2703c2487e4afaf4b8c9b5bbedb9e1c6a8044de667c21ad8f67fb","src/ir/function.rs":"7a25a55d7f2ded1724894bd1f7ee4766a4bf5f193967bf3a2628ec604b918018","src/ir/int.rs":"68a86182743ec338d58e42203364dc7c8970cb7ec3550433ca92f0c9489b4442","src/ir/item.rs":"203fe53efb0203e0ddc3fb9fcff7b2068f80f252d249a39c137e0cc070663a49","src/ir/item_kind.rs":"7666a1ff1b8260978b790a08b4139ab56b5c65714a5652bbcec7faa7443adc36","src/ir/layout.rs":"936f96fafab34e35b622a5f9e56b0fbd2c97d2e9222470e3687f882f40db1349","src/ir/mod.rs":"713cd537434567003197a123cbae679602c715e976d22f7b23dafd0826ea4c70","src/ir/module.rs":"a26bb0ac90d4cabb0a45d9c1a42b5515c74d4c233dc084e2f85161eac12bff15","src/ir/objc.rs":"ced8242068d5daa2940469693f7277c79368019f8e30ce1e4f55d834bf24c411","src/ir/template.rs":"6c2823c9bab82ab1d70f4d643e8f4d6420be5eafcb78324fb69649e407561cec","src/ir/traversal.rs":"5ac088277f4dfe2918d81b9294aaee41fd83db8e46def66a05f89de078bf4c49","src/ir/ty.rs":"5af2b62d278c679b7c4e597263fce01113e90242e7d263b948d93bc4274dfe9a","src/ir/var.rs":"9226241b188877b6a7bea6523e14318a8523a6dba57c4f15809c377f87540061","src/lib.rs":"b968f8d0858e3145137a2e33c0913acf19d21f884f914bc513bc18eea1c37bf1","src/log_stubs.rs":"6dfdd908b7c6453da416cf232893768f9480e551ca4add0858ef88bf71ee6ceb","src/main.rs":"6b42a74dfd5c3bde75b7fb984a82f3b3d652abd45aa54b31a40fbda6b02ae674","src/options.rs":"f08facc9d58cb79c7ab93c9d614f13d4d3eca2b5801012da56490a790a8d8c4c","src/parse.rs":"be7d13cc84fae79ec7b3aa9e77063fa475a48d74a854423e2c72d75006a25202","src/regex_set.rs":"5cb72fc3714c0d79e9e942d003349c0775fafd7cd0c9603c65f5261883bbf9cf","src/time.rs":"8efe317e7c6b5ba8e0865ce7b49ca775ee8a02590f4241ef62f647fa3c22b68e"},"package":"ebd71393f1ec0509b553aa012b9b58e81dadbdff7130bd3b8cba576e69b32f75"} \ No newline at end of file +{"files":{"Cargo.lock":"21010e02f3dd565f9184d565a22b36c7bcd2d905fa8eda4e454bf5127a48271f","Cargo.toml":"6236f5ccbfbeed5ff84a3c353bbf7048b5670bea0e3538e4994d1f88f1bdf4c3","LICENSE":"c23953d9deb0a3312dbeaf6c128a657f3591acee45067612fa68405eaa4525db","README.md":"5a1f556c6a57c0a6ccc65e19c27718e0f4b32381a8efcc80f6601b33c58c5d59","build.rs":"e1f148e01150af6a66b6af2e5d955c8b9fa092cb4697bae2bcec8a00119055ae","csmith-fuzzing/README.md":"7107b70fedb0c0a0cadb3c439a49c1bd0119a6d38dc63b1aecc74d1942256ef2","src/callbacks.rs":"1e7af281981d26e70e079c7b6569e3c786c298be9cb04872764d73e8d721f053","src/clang.rs":"3b3015a068394e14ab09f41091b0aa3b12eb75e4482052797ce3240cdc3a0aa3","src/codegen/bitfield_unit.rs":"a8fb1a2d97a99685106fcaac87d2013f79d2690d6a46ff05ad1e3629b6075664","src/codegen/bitfield_unit_tests.rs":"dd252134118450800b516e375c872e17b4c1aee63a7b8adbe5b2cd53434bbc7e","src/codegen/error.rs":"5e308b8c54b68511fc8ea2ad15ddac510172c4ff460a80a265336440b0c9653d","src/codegen/helpers.rs":"00af02fd70f7e0a3293bbcb6d76a63908dd163e6ac435e44254c399575afe5ae","src/codegen/impl_debug.rs":"428df604b4be105e3275275e8be81e8306b940abddc1b89675f98a01bf5de8c1","src/codegen/impl_partialeq.rs":"83707f7b13501dc413c904a17163cb54af11e56138f36dfef40ce46c823200fd","src/codegen/mod.rs":"8f0d433412823fa85841d25ff626f50850c6903b145d3c79688eceb0b2c3071c","src/codegen/struct_layout.rs":"0bd30896a81ffb4aa0453602f7a7f38c246fdb8624841bfe55984d6829b46cc4","src/extra_assertions.rs":"494534bd4f18b80d89b180c8a93733e6617edcf7deac413e9a73fd6e7bc9ced7","src/features.rs":"fafb85510b1dfc9a41ed71f7d765fca49b236deb4ee567e00204e751362aaf23","src/ir/analysis/derive.rs":"6e25c277a1acf0565b962a28a272c57661de7e8cb272516f0ee41f04883858ac","src/ir/analysis/has_destructor.rs":"10380d06ed03d058f10f6f6835d9b8fbebac455a1ea218780430a0ffd8d63472","src/ir/analysis/has_float.rs":"1838ba81eb05a9c3e311687e2247d561cc5093377b15ef8008257025ea56da04","src/ir/analysis/has_type_param_in_array.rs":"dddc5511a705e3a653b5e754e359637031b4862e1a1fc1e17f711fb2fbfc1cef","src/ir/analysis/has_vtable.rs":"8da9deec23c4552ecd5b883eaa036e4f2174a5949194c333a62ef463d28dcb6a","src/ir/analysis/mod.rs":"54993cb77df1870bb12cbc6b3a243c2da942cdc967a7d21dacb430601b49b2a1","src/ir/analysis/sizedness.rs":"d0673e19add38a07680ae3a9a5e998a0b2c3917e68efb6639ffe7ea193ada1b1","src/ir/analysis/template_params.rs":"9b662b5ec99cd8409d771a16ee42df500962b0c26f0da85e430ede19cc2b17c9","src/ir/annotations.rs":"268f90fc1d40fadee329c26b183b2aaa9de98f9246fea580404ee0e626315546","src/ir/comment.rs":"31d64a49ae3d9c3c348fa2539e03306ca3a23fae429cab452e42b31ecf632145","src/ir/comp.rs":"4a65eeb1b59fe44b49f453cdaa2616a8eb17fb67de85856bd0522fd4fc0858be","src/ir/context.rs":"59abb2cf8e8169c4247a50298c6a96407f10d7dd736959bdbdf152ef4f502685","src/ir/derive.rs":"e5581852eec87918901a129284b4965aefc8a19394187a8095779a084f28fabe","src/ir/dot.rs":"e25ff72ac174a798894c9673d81bdfb86fa9f4228b34a14ce0dc741a186a52bd","src/ir/enum_ty.rs":"f92220c9603c0746412b605f79f83774bfaa6c2272895945eeb504e6b98d54ef","src/ir/function.rs":"ce4fae30af77eeba74486462a13104e49888c2ba1bcefa8a6654fcf3056ffa06","src/ir/int.rs":"68a86182743ec338d58e42203364dc7c8970cb7ec3550433ca92f0c9489b4442","src/ir/item.rs":"d0804c62421e2c7afefd8844c0231f6f3ee995af6d376446740af425c8cbfb4e","src/ir/item_kind.rs":"7666a1ff1b8260978b790a08b4139ab56b5c65714a5652bbcec7faa7443adc36","src/ir/layout.rs":"936f96fafab34e35b622a5f9e56b0fbd2c97d2e9222470e3687f882f40db1349","src/ir/mod.rs":"713cd537434567003197a123cbae679602c715e976d22f7b23dafd0826ea4c70","src/ir/module.rs":"a26bb0ac90d4cabb0a45d9c1a42b5515c74d4c233dc084e2f85161eac12bff15","src/ir/objc.rs":"a637a0e759fd8d1eaed1afc32f494cb35456b8b1af402c6d59c72b5b29018ebf","src/ir/template.rs":"6c2823c9bab82ab1d70f4d643e8f4d6420be5eafcb78324fb69649e407561cec","src/ir/traversal.rs":"5ac088277f4dfe2918d81b9294aaee41fd83db8e46def66a05f89de078bf4c49","src/ir/ty.rs":"5af2b62d278c679b7c4e597263fce01113e90242e7d263b948d93bc4274dfe9a","src/ir/var.rs":"c5f4bd722a1f7f9e4b1d738204fde3fb47c95778369b83b29e8662211e1717d0","src/lib.rs":"6dfec11ff44c30a67942bc9383a48b9073b08f920af9e06eaecc0fc7c891ba7d","src/log_stubs.rs":"6dfdd908b7c6453da416cf232893768f9480e551ca4add0858ef88bf71ee6ceb","src/main.rs":"d38160b4060fe4fcb286a30f6bd6824764555f6a2c594b5c564d65f4a2ba00de","src/options.rs":"5edc1dae279a95642fbdeb20ffdb447e1fdaf463c90a9b434b984d7e3c718214","src/parse.rs":"be7d13cc84fae79ec7b3aa9e77063fa475a48d74a854423e2c72d75006a25202","src/regex_set.rs":"5cb72fc3714c0d79e9e942d003349c0775fafd7cd0c9603c65f5261883bbf9cf","src/time.rs":"8efe317e7c6b5ba8e0865ce7b49ca775ee8a02590f4241ef62f647fa3c22b68e"},"package":"6bb26d6a69a335b8cb0e7c7e9775cd5666611dc50a37177c3f2cedcfc040e8c8"} \ No newline at end of file diff --git a/third_party/rust/bindgen/Cargo.lock b/third_party/rust/bindgen/Cargo.lock index 1ddfdcce47..2abe0ecd73 100644 --- a/third_party/rust/bindgen/Cargo.lock +++ b/third_party/rust/bindgen/Cargo.lock @@ -2,10 +2,10 @@ # It is not intended for manual editing. [[package]] name = "aho-corasick" -version = "0.6.8" +version = "0.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "memchr 2.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "memchr 2.2.1 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -13,36 +13,36 @@ name = "ansi_term" version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "winapi 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)", + "winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "atty" -version = "0.2.11" +version = "0.2.13" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "libc 0.2.43 (registry+https://github.com/rust-lang/crates.io-index)", - "termion 1.5.1 (registry+https://github.com/rust-lang/crates.io-index)", - "winapi 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", + "winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "bindgen" -version = "0.51.1" +version = "0.53.2" dependencies = [ - "bitflags 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)", - "cexpr 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", - "cfg-if 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)", - "clang-sys 0.28.0 (registry+https://github.com/rust-lang/crates.io-index)", - "clap 2.32.0 (registry+https://github.com/rust-lang/crates.io-index)", + "bitflags 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)", + "cexpr 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", + "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", + "clang-sys 0.29.0 (registry+https://github.com/rust-lang/crates.io-index)", + "clap 2.33.0 (registry+https://github.com/rust-lang/crates.io-index)", "diff 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", - "env_logger 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)", - "lazy_static 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", - "log 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)", + "env_logger 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", + "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", + "lazycell 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", "peeking_take_while 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", - "proc-macro2 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", - "quote 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", - "regex 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)", + "proc-macro2 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)", + "quote 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", + "regex 1.3.1 (registry+https://github.com/rust-lang/crates.io-index)", "rustc-hash 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)", "shlex 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", "which 3.0.0 (registry+https://github.com/rust-lang/crates.io-index)", @@ -50,53 +50,53 @@ dependencies = [ [[package]] name = "bitflags" -version = "1.0.4" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "byteorder" -version = "1.2.7" +version = "1.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "cc" -version = "1.0.25" +version = "1.0.45" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "cexpr" -version = "0.3.3" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "nom 4.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "nom 5.1.1 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "cfg-if" -version = "0.1.5" +version = "0.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "clang-sys" -version = "0.28.0" +version = "0.29.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "glob 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.43 (registry+https://github.com/rust-lang/crates.io-index)", - "libloading 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)", + "glob 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", + "libloading 0.5.2 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "clap" -version = "2.32.0" +version = "2.33.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "ansi_term 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)", - "atty 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)", - "bitflags 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)", - "strsim 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", - "textwrap 0.10.0 (registry+https://github.com/rust-lang/crates.io-index)", - "unicode-width 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)", + "atty 0.2.13 (registry+https://github.com/rust-lang/crates.io-index)", + "bitflags 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)", + "strsim 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)", + "textwrap 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)", + "unicode-width 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)", "vec_map 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -107,24 +107,24 @@ source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "env_logger" -version = "0.6.0" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "atty 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)", - "humantime 1.1.1 (registry+https://github.com/rust-lang/crates.io-index)", - "log 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)", - "regex 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)", - "termcolor 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)", + "atty 0.2.13 (registry+https://github.com/rust-lang/crates.io-index)", + "humantime 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", + "regex 1.3.1 (registry+https://github.com/rust-lang/crates.io-index)", + "termcolor 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "glob" -version = "0.2.11" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "humantime" -version = "1.1.1" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "quick-error 1.2.2 (registry+https://github.com/rust-lang/crates.io-index)", @@ -132,50 +132,48 @@ dependencies = [ [[package]] name = "lazy_static" -version = "1.1.0" +version = "1.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "lazycell" +version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "version_check 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", -] [[package]] name = "libc" -version = "0.2.43" +version = "0.2.66" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "libloading" -version = "0.5.0" +version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "cc 1.0.25 (registry+https://github.com/rust-lang/crates.io-index)", - "winapi 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)", + "cc 1.0.45 (registry+https://github.com/rust-lang/crates.io-index)", + "winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "log" -version = "0.4.5" +version = "0.4.8" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "cfg-if 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)", + "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "memchr" -version = "2.1.0" +version = "2.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "cfg-if 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.43 (registry+https://github.com/rust-lang/crates.io-index)", - "version_check 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", -] [[package]] name = "nom" -version = "4.0.0" +version = "5.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "memchr 2.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "memchr 2.2.1 (registry+https://github.com/rust-lang/crates.io-index)", + "version_check 0.9.1 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -185,7 +183,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "proc-macro2" -version = "1.0.0" +version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "unicode-xid 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", @@ -198,51 +196,34 @@ source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "quote" -version = "1.0.0" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "proc-macro2 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "redox_syscall" -version = "0.1.40" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "redox_termios" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "redox_syscall 0.1.40 (registry+https://github.com/rust-lang/crates.io-index)", + "proc-macro2 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "regex" -version = "1.0.5" +version = "1.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "aho-corasick 0.6.8 (registry+https://github.com/rust-lang/crates.io-index)", - "memchr 2.1.0 (registry+https://github.com/rust-lang/crates.io-index)", - "regex-syntax 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)", + "aho-corasick 0.7.6 (registry+https://github.com/rust-lang/crates.io-index)", + "memchr 2.2.1 (registry+https://github.com/rust-lang/crates.io-index)", + "regex-syntax 0.6.12 (registry+https://github.com/rust-lang/crates.io-index)", "thread_local 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", - "utf8-ranges 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "regex-syntax" -version = "0.6.2" +version = "0.6.12" source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "ucd-util 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", -] [[package]] name = "rustc-hash" version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "byteorder 1.2.7 (registry+https://github.com/rust-lang/crates.io-index)", + "byteorder 1.3.2 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -252,33 +233,23 @@ source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "strsim" -version = "0.7.0" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "termcolor" -version = "1.0.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "wincolor 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "termion" -version = "1.5.1" +version = "1.0.5" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "libc 0.2.43 (registry+https://github.com/rust-lang/crates.io-index)", - "redox_syscall 0.1.40 (registry+https://github.com/rust-lang/crates.io-index)", - "redox_termios 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", + "wincolor 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "textwrap" -version = "0.10.0" +version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "unicode-width 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)", + "unicode-width 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -286,17 +257,12 @@ name = "thread_local" version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "lazy_static 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", ] -[[package]] -name = "ucd-util" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" - [[package]] name = "unicode-width" -version = "0.1.5" +version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] @@ -304,11 +270,6 @@ name = "unicode-xid" version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -[[package]] -name = "utf8-ranges" -version = "1.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" - [[package]] name = "vec_map" version = "0.8.1" @@ -316,7 +277,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "version_check" -version = "0.1.4" +version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] @@ -324,12 +285,12 @@ name = "which" version = "3.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "libc 0.2.43 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "winapi" -version = "0.3.5" +version = "0.3.8" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "winapi-i686-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", @@ -343,10 +304,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "winapi-util" -version = "0.1.1" +version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "winapi 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)", + "winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -356,58 +317,54 @@ source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "wincolor" -version = "1.0.1" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "winapi 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)", - "winapi-util 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", + "winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", + "winapi-util 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", ] [metadata] -"checksum aho-corasick 0.6.8 (registry+https://github.com/rust-lang/crates.io-index)" = "68f56c7353e5a9547cbd76ed90f7bb5ffc3ba09d4ea9bd1d8c06c8b1142eeb5a" +"checksum aho-corasick 0.7.6 (registry+https://github.com/rust-lang/crates.io-index)" = "58fb5e95d83b38284460a5fda7d6470aa0b8844d283a0b614b8535e880800d2d" "checksum ansi_term 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ee49baf6cb617b853aa8d93bf420db2383fab46d314482ca2803b40d5fde979b" -"checksum atty 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)" = "9a7d5b8723950951411ee34d271d99dddcc2035a16ab25310ea2c8cfd4369652" -"checksum bitflags 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)" = "228047a76f468627ca71776ecdebd732a3423081fcf5125585bcd7c49886ce12" -"checksum byteorder 1.2.7 (registry+https://github.com/rust-lang/crates.io-index)" = "94f88df23a25417badc922ab0f5716cc1330e87f71ddd9203b3a3ccd9cedf75d" -"checksum cc 1.0.25 (registry+https://github.com/rust-lang/crates.io-index)" = "f159dfd43363c4d08055a07703eb7a3406b0dac4d0584d96965a3262db3c9d16" -"checksum cexpr 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "8fc0086be9ca82f7fc89fc873435531cb898b86e850005850de1f820e2db6e9b" -"checksum cfg-if 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)" = "0c4e7bb64a8ebb0d856483e1e682ea3422f883c5f5615a90d51a2c82fe87fdd3" -"checksum clang-sys 0.28.0 (registry+https://github.com/rust-lang/crates.io-index)" = "4227269cec09f5f83ff160be12a1e9b0262dd1aa305302d5ba296c2ebd291055" -"checksum clap 2.32.0 (registry+https://github.com/rust-lang/crates.io-index)" = "b957d88f4b6a63b9d70d5f454ac8011819c6efa7727858f458ab71c756ce2d3e" +"checksum atty 0.2.13 (registry+https://github.com/rust-lang/crates.io-index)" = "1803c647a3ec87095e7ae7acfca019e98de5ec9a7d01343f611cf3152ed71a90" +"checksum bitflags 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "8a606a02debe2813760609f57a64a2ffd27d9fdf5b2f133eaca0b248dd92cdd2" +"checksum byteorder 1.3.2 (registry+https://github.com/rust-lang/crates.io-index)" = "a7c3dd8985a7111efc5c80b44e23ecdd8c007de8ade3b96595387e812b957cf5" +"checksum cc 1.0.45 (registry+https://github.com/rust-lang/crates.io-index)" = "4fc9a35e1f4290eb9e5fc54ba6cf40671ed2a2514c3eeb2b2a908dda2ea5a1be" +"checksum cexpr 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "f4aedb84272dbe89af497cf81375129abda4fc0a9e7c5d317498c15cc30c0d27" +"checksum cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)" = "4785bdd1c96b2a846b2bd7cc02e86b6b3dbf14e7e53446c4f54c92a361040822" +"checksum clang-sys 0.29.0 (registry+https://github.com/rust-lang/crates.io-index)" = "03542fa2ed0accba4a5c84ec031f09a20b725e7ba1b1c9b79f1feb4aa17d0c07" +"checksum clap 2.33.0 (registry+https://github.com/rust-lang/crates.io-index)" = "5067f5bb2d80ef5d68b4c87db81601f0b75bca627bc2ef76b141d7b846a3c6d9" "checksum diff 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)" = "3c2b69f912779fbb121ceb775d74d51e915af17aaebc38d28a592843a2dd0a3a" -"checksum env_logger 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)" = "afb070faf94c85d17d50ca44f6ad076bce18ae92f0037d350947240a36e9d42e" -"checksum glob 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)" = "8be18de09a56b60ed0edf84bc9df007e30040691af7acd1c41874faac5895bfb" -"checksum humantime 1.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "0484fda3e7007f2a4a0d9c3a703ca38c71c54c55602ce4660c419fd32e188c9e" -"checksum lazy_static 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ca488b89a5657b0a2ecd45b95609b3e848cf1755da332a0da46e2b2b1cb371a7" -"checksum libc 0.2.43 (registry+https://github.com/rust-lang/crates.io-index)" = "76e3a3ef172f1a0b9a9ff0dd1491ae5e6c948b94479a3021819ba7d860c8645d" -"checksum libloading 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)" = "9c3ad660d7cb8c5822cd83d10897b0f1f1526792737a179e73896152f85b88c2" -"checksum log 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)" = "d4fcce5fa49cc693c312001daf1d13411c4a5283796bac1084299ea3e567113f" -"checksum memchr 2.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "4b3629fe9fdbff6daa6c33b90f7c08355c1aca05a3d01fa8063b822fcf185f3b" -"checksum nom 4.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "898696750eb5c3ce5eb5afbfbe46e7f7c4e1936e19d3e97be4b7937da7b6d114" +"checksum env_logger 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "39ecdb7dd54465526f0a56d666e3b2dd5f3a218665a030b6e4ad9e70fa95d8fa" +"checksum glob 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "9b919933a397b79c37e33b77bb2aa3dc8eb6e165ad809e58ff75bc7db2e34574" +"checksum humantime 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "df004cfca50ef23c36850aaaa59ad52cc70d0e90243c3c7737a4dd32dc7a3c4f" +"checksum lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" +"checksum lazycell 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "b294d6fa9ee409a054354afc4352b0b9ef7ca222c69b8812cbea9e7d2bf3783f" +"checksum libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)" = "d515b1f41455adea1313a4a2ac8a8a477634fbae63cc6100e3aebb207ce61558" +"checksum libloading 0.5.2 (registry+https://github.com/rust-lang/crates.io-index)" = "f2b111a074963af1d37a139918ac6d49ad1d0d5e47f72fd55388619691a7d753" +"checksum log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)" = "14b6052be84e6b71ab17edffc2eeabf5c2c3ae1fdb464aae35ac50c67a44e1f7" +"checksum memchr 2.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "88579771288728879b57485cc7d6b07d648c9f0141eb955f8ab7f9d45394468e" +"checksum nom 5.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "0b471253da97532da4b61552249c521e01e736071f71c1a4f7ebbfbf0a06aad6" "checksum peeking_take_while 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "19b17cddbe7ec3f8bc800887bab5e717348c95ea2ca0b1bf0837fb964dc67099" -"checksum proc-macro2 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "19f287c234c9b2d0308d692dee5c449c1a171167a6f8150f7cf2a49d8fd96967" +"checksum proc-macro2 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)" = "afdc77cc74ec70ed262262942ebb7dac3d479e9e5cfa2da1841c0806f6cdabcc" "checksum quick-error 1.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "9274b940887ce9addde99c4eee6b5c44cc494b182b97e73dc8ffdcb3397fd3f0" -"checksum quote 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "7ab938ebe6f1c82426b5fb82eaf10c3e3028c53deaa3fbe38f5904b37cf4d767" -"checksum redox_syscall 0.1.40 (registry+https://github.com/rust-lang/crates.io-index)" = "c214e91d3ecf43e9a4e41e578973adeb14b474f2bee858742d127af75a0112b1" -"checksum redox_termios 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "7e891cfe48e9100a70a3b6eb652fef28920c117d366339687bd5576160db0f76" -"checksum regex 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)" = "2069749032ea3ec200ca51e4a31df41759190a88edca0d2d86ee8bedf7073341" -"checksum regex-syntax 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)" = "747ba3b235651f6e2f67dfa8bcdcd073ddb7c243cb21c442fc12395dfcac212d" +"checksum quote 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)" = "053a8c8bcc71fcce321828dc897a98ab9760bef03a4fc36693c231e5b3216cfe" +"checksum regex 1.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "dc220bd33bdce8f093101afe22a037b8eb0e5af33592e6a9caafff0d4cb81cbd" +"checksum regex-syntax 0.6.12 (registry+https://github.com/rust-lang/crates.io-index)" = "11a7e20d1cce64ef2fed88b66d347f88bd9babb82845b2b858f3edbf59a4f716" "checksum rustc-hash 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)" = "7540fc8b0c49f096ee9c961cda096467dce8084bec6bdca2fc83895fd9b28cb8" "checksum shlex 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "7fdf1b9db47230893d76faad238fd6097fd6d6a9245cd7a4d90dbd639536bbd2" -"checksum strsim 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "bb4f380125926a99e52bc279241539c018323fab05ad6368b56f93d9369ff550" -"checksum termcolor 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)" = "4096add70612622289f2fdcdbd5086dc81c1e2675e6ae58d6c4f62a16c6d7f2f" -"checksum termion 1.5.1 (registry+https://github.com/rust-lang/crates.io-index)" = "689a3bdfaab439fd92bc87df5c4c78417d3cbe537487274e9b0b2dce76e92096" -"checksum textwrap 0.10.0 (registry+https://github.com/rust-lang/crates.io-index)" = "307686869c93e71f94da64286f9a9524c0f308a9e1c87a583de8e9c9039ad3f6" +"checksum strsim 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)" = "8ea5119cdb4c55b55d432abb513a0429384878c15dde60cc77b1c99de1a95a6a" +"checksum termcolor 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)" = "96d6098003bde162e4277c70665bd87c326f5a0c3f3fbfb285787fa482d54e6e" +"checksum textwrap 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)" = "d326610f408c7a4eb6f51c37c330e496b08506c9457c9d34287ecc38809fb060" "checksum thread_local 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)" = "c6b53e329000edc2b34dbe8545fd20e55a333362d0a321909685a19bd28c3f1b" -"checksum ucd-util 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "fd2be2d6639d0f8fe6cdda291ad456e23629558d466e2789d2c3e9892bda285d" -"checksum unicode-width 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)" = "882386231c45df4700b275c7ff55b6f3698780a650026380e72dabe76fa46526" +"checksum unicode-width 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)" = "7007dbd421b92cc6e28410fe7362e2e0a2503394908f417b68ec8d1c364c4e20" "checksum unicode-xid 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "826e7639553986605ec5979c7dd957c7895e93eabed50ab2ffa7f6128a75097c" -"checksum utf8-ranges 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)" = "fd70f467df6810094968e2fce0ee1bd0e87157aceb026a8c083bcf5e25b9efe4" "checksum vec_map 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)" = "05c78687fb1a80548ae3250346c3db86a80a7cdd77bda190189f2d0a0987c81a" -"checksum version_check 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)" = "7716c242968ee87e5542f8021178248f267f295a5c4803beae8b8b7fd9bc6051" +"checksum version_check 0.9.1 (registry+https://github.com/rust-lang/crates.io-index)" = "078775d0255232fb988e6fccf26ddc9d1ac274299aaedcedce21c6f72cc533ce" "checksum which 3.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "240a31163872f7e8e49f35b42b58485e35355b07eb009d9f3686733541339a69" -"checksum winapi 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)" = "773ef9dcc5f24b7d850d0ff101e542ff24c3b090a9768e03ff889fdef41f00fd" +"checksum winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)" = "8093091eeb260906a183e6ae1abdba2ef5ef2257a21801128899c3fc699229c6" "checksum winapi-i686-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" -"checksum winapi-util 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "afc5508759c5bf4285e61feb862b6083c8480aec864fa17a81fdec6f69b461ab" +"checksum winapi-util 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "7168bab6e1daee33b4557efd0e95d5ca70a03706d39fa5f3fe7a236f584b03c9" "checksum winapi-x86_64-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" -"checksum wincolor 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)" = "561ed901ae465d6185fa7864d63fbd5720d0ef718366c9a4dc83cf6170d7e9ba" +"checksum wincolor 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)" = "96f5016b18804d24db43cebf3c77269e7569b8954a8464501c216cc5e070eaa9" diff --git a/third_party/rust/bindgen/Cargo.toml b/third_party/rust/bindgen/Cargo.toml index e60f4660ae..a248a5c75f 100644 --- a/third_party/rust/bindgen/Cargo.toml +++ b/third_party/rust/bindgen/Cargo.toml @@ -12,7 +12,7 @@ [package] name = "bindgen" -version = "0.51.1" +version = "0.53.2" authors = ["Jyun-Yan You ", "Emilio Cobos Álvarez ", "Nick Fitzgerald ", "The Servo project developers"] build = "build.rs" include = ["LICENSE", "README.md", "Cargo.toml", "build.rs", "src/*.rs", "src/**/*.rs"] @@ -37,26 +37,29 @@ required-features = ["clap"] version = "1.0.3" [dependencies.cexpr] -version = "0.3.3" +version = "0.4" [dependencies.cfg-if] version = "0.1.0" [dependencies.clang-sys] -version = "0.28.0" -features = ["runtime", "clang_6_0"] +version = "0.29" +features = ["clang_6_0"] [dependencies.clap] version = "2" optional = true [dependencies.env_logger] -version = "0.6" +version = "0.7" optional = true [dependencies.lazy_static] version = "1" +[dependencies.lazycell] +version = "1" + [dependencies.log] version = "0.4" optional = true @@ -74,6 +77,8 @@ default-features = false [dependencies.regex] version = "1.0" +features = ["std", "unicode"] +default-features = false [dependencies.rustc-hash] version = "1.0.1" @@ -95,15 +100,17 @@ version = "0.1" version = "0.1" [features] -default = ["logging", "clap", "which-rustfmt"] +default = ["logging", "clap", "runtime", "which-rustfmt"] logging = ["env_logger", "log"] -static = [] +runtime = ["clang-sys/runtime"] +static = ["clang-sys/static"] testing_only_docs = [] testing_only_extra_assertions = [] testing_only_libclang_3_8 = [] testing_only_libclang_3_9 = [] testing_only_libclang_4 = [] testing_only_libclang_5 = [] +testing_only_libclang_9 = [] which-rustfmt = ["which"] [badges.travis-ci] repository = "rust-lang/rust-bindgen" diff --git a/third_party/rust/bindgen/src/callbacks.rs b/third_party/rust/bindgen/src/callbacks.rs index 91920738d6..21478e4fe0 100644 --- a/third_party/rust/bindgen/src/callbacks.rs +++ b/third_party/rust/bindgen/src/callbacks.rs @@ -64,4 +64,7 @@ pub trait ParseCallbacks: fmt::Debug + UnwindSafe { fn item_name(&self, _original_item_name: &str) -> Option { None } + + /// This will be called on every file inclusion, with the full path of the included file. + fn include_file(&self, _filename: &str) {} } diff --git a/third_party/rust/bindgen/src/clang.rs b/third_party/rust/bindgen/src/clang.rs index 9af6b46d3e..4bcc4b4ac4 100644 --- a/third_party/rust/bindgen/src/clang.rs +++ b/third_party/rust/bindgen/src/clang.rs @@ -527,25 +527,32 @@ impl Cursor { } } - /// Does this cursor have the given simple attribute? + /// Whether this cursor has the `warn_unused_result` attribute. + pub fn has_warn_unused_result_attr(&self) -> bool { + // FIXME(emilio): clang-sys doesn't expose this (from clang 9). + const CXCursor_WarnUnusedResultAttr: CXCursorKind = 440; + self.has_attr("warn_unused_result", Some(CXCursor_WarnUnusedResultAttr)) + } + + /// Does this cursor have the given attribute? /// - /// Note that this will only work for attributes that don't have an existing libclang - /// CursorKind, e.g. pure, const, etc. - pub fn has_simple_attr(&self, attr: &str) -> bool { + /// `name` is checked against unexposed attributes. + fn has_attr(&self, name: &str, clang_kind: Option) -> bool { let mut found_attr = false; self.visit(|cur| { - if cur.kind() == CXCursor_UnexposedAttr { - found_attr = cur.tokens().iter().any(|t| { - t.kind == CXToken_Identifier && - t.spelling() == attr.as_bytes() - }); - - if found_attr { - return CXChildVisit_Break; - } + let kind = cur.kind(); + found_attr = clang_kind.map_or(false, |k| k == kind) || + (kind == CXCursor_UnexposedAttr && + cur.tokens().iter().any(|t| { + t.kind == CXToken_Identifier && + t.spelling() == name.as_bytes() + })); + + if found_attr { + CXChildVisit_Break + } else { + CXChildVisit_Continue } - - CXChildVisit_Continue }); found_attr @@ -717,6 +724,20 @@ impl Cursor { }) .collect() } + + /// Obtain the real path name of a cursor of InclusionDirective kind. + /// + /// Returns None if the cursor does not include a file, otherwise the file's full name + pub fn get_included_file_name(&self) -> Option { + let file = unsafe { clang_sys::clang_getIncludedFile(self.x) }; + if file.is_null() { + None + } else { + Some(unsafe { + cxstring_into_string(clang_sys::clang_getFileName(file)) + }) + } + } } /// A struct that owns the tokenizer result from a given cursor. diff --git a/third_party/rust/bindgen/src/codegen/error.rs b/third_party/rust/bindgen/src/codegen/error.rs index 8bf00e547d..c1bcf4e1cb 100644 --- a/third_party/rust/bindgen/src/codegen/error.rs +++ b/third_party/rust/bindgen/src/codegen/error.rs @@ -15,17 +15,7 @@ pub enum Error { impl fmt::Display for Error { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "{}", error::Error::description(self)) - } -} - -impl error::Error for Error { - fn cause(&self) -> Option<&dyn error::Error> { - None - } - - fn description(&self) -> &'static str { - match *self { + f.write_str(match *self { Error::NoLayoutForOpaqueBlob => { "Tried to generate an opaque blob, but had no layout" } @@ -33,9 +23,11 @@ impl error::Error for Error { "Instantiation of opaque template type or partial template \ specialization" } - } + }) } } +impl error::Error for Error {} + /// A `Result` of `T` or an error of `bindgen::codegen::error::Error`. pub type Result = ::std::result::Result; diff --git a/third_party/rust/bindgen/src/codegen/helpers.rs b/third_party/rust/bindgen/src/codegen/helpers.rs index b1cf2050e6..ec60742c17 100644 --- a/third_party/rust/bindgen/src/codegen/helpers.rs +++ b/third_party/rust/bindgen/src/codegen/helpers.rs @@ -143,6 +143,27 @@ pub mod ast_ty { use proc_macro2::{self, TokenStream}; use std::str::FromStr; + pub fn c_void(ctx: &BindgenContext) -> TokenStream { + // ctypes_prefix takes precedence + match ctx.options().ctypes_prefix { + Some(ref prefix) => { + let prefix = TokenStream::from_str(prefix.as_str()).unwrap(); + quote! { + #prefix::c_void + } + } + None => { + if ctx.options().use_core && + ctx.options().rust_features.core_ffi_c_void + { + quote! { ::core::ffi::c_void } + } else { + quote! { ::std::os::raw::c_void } + } + } + } + } + pub fn raw_type(ctx: &BindgenContext, name: &str) -> TokenStream { let ident = ctx.rust_ident_raw(name); match ctx.options().ctypes_prefix { diff --git a/third_party/rust/bindgen/src/codegen/mod.rs b/third_party/rust/bindgen/src/codegen/mod.rs index c7f1b2bdb3..9a3b10d70b 100644 --- a/third_party/rust/bindgen/src/codegen/mod.rs +++ b/third_party/rust/bindgen/src/codegen/mod.rs @@ -703,7 +703,8 @@ impl CodeGenerator for Type { let mut outer_params = item.used_template_params(ctx); - let inner_rust_type = if item.is_opaque(ctx, &()) { + let is_opaque = item.is_opaque(ctx, &()); + let inner_rust_type = if is_opaque { outer_params = vec![]; self.to_opaque(ctx, item) } else { @@ -748,6 +749,16 @@ impl CodeGenerator for Type { quote! {} }; + let alias_style = if ctx.options().type_alias.matches(&name) { + AliasVariation::TypeAlias + } else if ctx.options().new_type_alias.matches(&name) { + AliasVariation::NewType + } else if ctx.options().new_type_alias_deref.matches(&name) { + AliasVariation::NewTypeDeref + } else { + ctx.options().default_alias_style + }; + // We prefer using `pub use` over `pub type` because of: // https://github.com/rust-lang/rust/issues/26264 if inner_rust_type.to_string().chars().all(|c| match c { @@ -756,6 +767,8 @@ impl CodeGenerator for Type { 'A'..='Z' | 'a'..='z' | '0'..='9' | ':' | '_' | ' ' => true, _ => false, }) && outer_params.is_empty() && + !is_opaque && + alias_style == AliasVariation::TypeAlias && inner_item.expect_type().canonical_type(ctx).is_enum() { tokens.append_all(quote! { @@ -770,8 +783,21 @@ impl CodeGenerator for Type { return; } - tokens.append_all(quote! { - pub type #rust_name + tokens.append_all(match alias_style { + AliasVariation::TypeAlias => quote! { + pub type #rust_name + }, + AliasVariation::NewType | AliasVariation::NewTypeDeref => { + assert!( + ctx.options().rust_features().repr_transparent, + "repr_transparent feature is required to use {:?}", + alias_style + ); + quote! { + #[repr(transparent)] + pub struct #rust_name + } + } }); let params: Vec<_> = outer_params @@ -804,10 +830,36 @@ impl CodeGenerator for Type { }); } - tokens.append_all(quote! { - = #inner_rust_type ; + tokens.append_all(match alias_style { + AliasVariation::TypeAlias => quote! { + = #inner_rust_type ; + }, + AliasVariation::NewType | AliasVariation::NewTypeDeref => { + quote! { + (pub #inner_rust_type) ; + } + } }); + if alias_style == AliasVariation::NewTypeDeref { + let prefix = ctx.trait_prefix(); + tokens.append_all(quote! { + impl ::#prefix::ops::Deref for #rust_name { + type Target = #inner_rust_type; + #[inline] + fn deref(&self) -> &Self::Target { + &self.0 + } + } + impl ::#prefix::ops::DerefMut for #rust_name { + #[inline] + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.0 + } + } + }); + } + result.push(tokens); } TypeKind::Enum(ref ei) => ei.codegen(ctx, result, item), @@ -861,7 +913,7 @@ impl<'a> CodeGenerator for Vtable<'a> { // For now, generate an empty struct, later we should generate function // pointers and whatnot. let name = ctx.rust_ident(&self.canonical_name(ctx)); - let void = helpers::ast_ty::raw_type(ctx, "c_void"); + let void = helpers::ast_ty::c_void(ctx); result.push(quote! { #[repr(C)] pub struct #name ( #void ); @@ -1200,12 +1252,8 @@ impl BitfieldUnit { impl Bitfield { /// Extend an under construction bitfield unit constructor with this - /// bitfield. This involves two things: - /// - /// 1. Adding a parameter with this bitfield's name and its type. - /// - /// 2. Setting the relevant bits on the `__bindgen_bitfield_unit` variable - /// that's being constructed. + /// bitfield. This sets the relevant bits on the `__bindgen_bitfield_unit` + /// variable that's being constructed. fn extend_ctor_impl( &self, ctx: &BindgenContext, @@ -1216,7 +1264,11 @@ impl Bitfield { let bitfield_ty_layout = bitfield_ty .layout(ctx) .expect("Bitfield without layout? Gah!"); - let bitfield_int_ty = helpers::blob(ctx, bitfield_ty_layout); + let bitfield_int_ty = helpers::integer_type(ctx, bitfield_ty_layout) + .expect( + "Should already have verified that the bitfield is \ + representable as an int", + ); let offset = self.offset_into_unit(); let width = self.width() as u8; @@ -1258,23 +1310,26 @@ impl<'a> FieldCodegen<'a> for BitfieldUnit { F: Extend, M: Extend, { + use ir::ty::RUST_DERIVE_IN_ARRAY_LIMIT; + result.saw_bitfield_unit(); + let layout = self.layout(); + let unit_field_ty = helpers::bitfield_unit(ctx, layout); let field_ty = { - let ty = helpers::bitfield_unit(ctx, self.layout()); if parent.is_union() && !parent.can_be_rust_union(ctx) { result.saw_bindgen_union(); if ctx.options().enable_cxx_namespaces { quote! { - root::__BindgenUnionField<#ty> + root::__BindgenUnionField<#unit_field_ty> } } else { quote! { - __BindgenUnionField<#ty> + __BindgenUnionField<#unit_field_ty> } } } else { - ty + unit_field_ty.clone() } }; @@ -1286,12 +1341,13 @@ impl<'a> FieldCodegen<'a> for BitfieldUnit { }; fields.extend(Some(field)); - let unit_field_ty = helpers::bitfield_unit(ctx, self.layout()); - let ctor_name = self.ctor_name(); let mut ctor_params = vec![]; let mut ctor_impl = quote! {}; - let mut generate_ctor = true; + + // We cannot generate any constructor if the underlying storage can't + // implement AsRef<[u8]> / AsMut<[u8]> / etc. + let mut generate_ctor = layout.size <= RUST_DERIVE_IN_ARRAY_LIMIT; for bf in self.bitfields() { // Codegen not allowed for anonymous bitfields @@ -1299,6 +1355,10 @@ impl<'a> FieldCodegen<'a> for BitfieldUnit { continue; } + if layout.size > RUST_DERIVE_IN_ARRAY_LIMIT { + continue; + } + let mut bitfield_representable_as_int = true; bf.codegen( @@ -1343,7 +1403,7 @@ impl<'a> FieldCodegen<'a> for BitfieldUnit { })); } - struct_layout.saw_bitfield_unit(self.layout()); + struct_layout.saw_bitfield_unit(layout); } } @@ -2154,13 +2214,26 @@ impl MethodCodegen for Method { // variable called `__bindgen_tmp` we're going to create. if self.is_constructor() { let prefix = ctx.trait_prefix(); - let tmp_variable_decl = quote! { - let mut __bindgen_tmp = ::#prefix::mem::uninitialized() + let tmp_variable_decl = if ctx + .options() + .rust_features() + .maybe_uninit + { + exprs[0] = quote! { + __bindgen_tmp.as_mut_ptr() + }; + quote! { + let mut __bindgen_tmp = ::#prefix::mem::MaybeUninit::uninit() + } + } else { + exprs[0] = quote! { + &mut __bindgen_tmp + }; + quote! { + let mut __bindgen_tmp = ::#prefix::mem::uninitialized() + } }; stmts.push(tmp_variable_decl); - exprs[0] = quote! { - &mut __bindgen_tmp - }; } else if !self.is_static() { assert!(!exprs.is_empty()); exprs[0] = quote! { @@ -2175,9 +2248,15 @@ impl MethodCodegen for Method { stmts.push(call); if self.is_constructor() { - stmts.push(quote! { - __bindgen_tmp - }); + stmts.push(if ctx.options().rust_features().maybe_uninit { + quote! { + __bindgen_tmp.assume_init() + } + } else { + quote! { + __bindgen_tmp + } + }) } let block = quote! { @@ -2208,11 +2287,14 @@ impl MethodCodegen for Method { pub enum EnumVariation { /// The code for this enum will use a Rust enum Rust { - /// Indicates whether the generated struct should be #[non_exhaustive] + /// Indicates whether the generated struct should be `#[non_exhaustive]` non_exhaustive: bool, }, - /// The code for this enum will use a bitfield - Bitfield, + /// The code for this enum will use a newtype + NewType { + /// Indicates whether the newtype will have bitwise operators + is_bitfield: bool, + }, /// The code for this enum will use consts Consts, /// The code for this enum will use a module containing consts @@ -2249,15 +2331,24 @@ impl std::str::FromStr for EnumVariation { /// Create a `EnumVariation` from a string. fn from_str(s: &str) -> Result { match s { - "rust" => Ok(EnumVariation::Rust{ non_exhaustive: false }), - "rust_non_exhaustive" => Ok(EnumVariation::Rust{ non_exhaustive: true }), - "bitfield" => Ok(EnumVariation::Bitfield), + "rust" => Ok(EnumVariation::Rust { + non_exhaustive: false, + }), + "rust_non_exhaustive" => Ok(EnumVariation::Rust { + non_exhaustive: true, + }), + "bitfield" => Ok(EnumVariation::NewType { is_bitfield: true }), "consts" => Ok(EnumVariation::Consts), "moduleconsts" => Ok(EnumVariation::ModuleConsts), - _ => Err(std::io::Error::new(std::io::ErrorKind::InvalidInput, - concat!("Got an invalid EnumVariation. Accepted values ", - "are 'rust', 'rust_non_exhaustive', 'bitfield', 'consts', and ", - "'moduleconsts'."))), + "newtype" => Ok(EnumVariation::NewType { is_bitfield: false }), + _ => Err(std::io::Error::new( + std::io::ErrorKind::InvalidInput, + concat!( + "Got an invalid EnumVariation. Accepted values ", + "are 'rust', 'rust_non_exhaustive', 'bitfield', 'consts',", + "'moduleconsts', and 'newtype'." + ), + )), } } } @@ -2271,10 +2362,11 @@ enum EnumBuilder<'a> { tokens: proc_macro2::TokenStream, emitted_any_variants: bool, }, - Bitfield { + NewType { codegen_depth: usize, canonical_name: &'a str, tokens: proc_macro2::TokenStream, + is_bitfield: bool, }, Consts { variants: Vec, @@ -2292,7 +2384,7 @@ impl<'a> EnumBuilder<'a> { fn codegen_depth(&self) -> usize { match *self { EnumBuilder::Rust { codegen_depth, .. } | - EnumBuilder::Bitfield { codegen_depth, .. } | + EnumBuilder::NewType { codegen_depth, .. } | EnumBuilder::ModuleConsts { codegen_depth, .. } | EnumBuilder::Consts { codegen_depth, .. } => codegen_depth, } @@ -2310,13 +2402,14 @@ impl<'a> EnumBuilder<'a> { let ident = Ident::new(name, Span::call_site()); match enum_variation { - EnumVariation::Bitfield => EnumBuilder::Bitfield { + EnumVariation::NewType { is_bitfield } => EnumBuilder::NewType { codegen_depth: enum_codegen_depth, canonical_name: name, tokens: quote! { #( #attrs )* pub struct #ident (pub #repr); }, + is_bitfield, }, EnumVariation::Rust { .. } => { @@ -2404,7 +2497,7 @@ impl<'a> EnumBuilder<'a> { } } - EnumBuilder::Bitfield { canonical_name, .. } => { + EnumBuilder::NewType { canonical_name, .. } => { if ctx.options().rust_features().associated_const && is_ty_named { let enum_ident = ctx.rust_ident(canonical_name); @@ -2495,11 +2588,16 @@ impl<'a> EnumBuilder<'a> { } } } - EnumBuilder::Bitfield { + EnumBuilder::NewType { canonical_name, tokens, + is_bitfield, .. } => { + if !is_bitfield { + return tokens; + } + let rust_ty_name = ctx.rust_ident_raw(canonical_name); let prefix = ctx.trait_prefix(); @@ -2633,7 +2731,7 @@ impl CodeGenerator for Enum { panic!("The rust target you're using doesn't seem to support non_exhaustive enums"); } } - EnumVariation::Bitfield => { + EnumVariation::NewType { .. } => { if ctx.options().rust_features.repr_transparent { attrs.push(attributes::repr("transparent")); } else { @@ -2849,6 +2947,54 @@ impl CodeGenerator for Enum { } } +/// Enum for how aliases should be translated. +#[derive(Copy, Clone, PartialEq, Debug)] +pub enum AliasVariation { + /// Convert to regular Rust alias + TypeAlias, + /// Create a new type by wrapping the old type in a struct and using #[repr(transparent)] + NewType, + /// Same as NewStruct but also impl Deref to be able to use the methods of the wrapped type + NewTypeDeref, +} + +impl AliasVariation { + /// Convert an `AliasVariation` to its str representation. + pub fn as_str(&self) -> &str { + match self { + AliasVariation::TypeAlias => "type_alias", + AliasVariation::NewType => "new_type", + AliasVariation::NewTypeDeref => "new_type_deref", + } + } +} + +impl Default for AliasVariation { + fn default() -> AliasVariation { + AliasVariation::TypeAlias + } +} + +impl std::str::FromStr for AliasVariation { + type Err = std::io::Error; + + /// Create an `AliasVariation` from a string. + fn from_str(s: &str) -> Result { + match s { + "type_alias" => Ok(AliasVariation::TypeAlias), + "new_type" => Ok(AliasVariation::NewType), + "new_type_deref" => Ok(AliasVariation::NewTypeDeref), + _ => Err(std::io::Error::new( + std::io::ErrorKind::InvalidInput, + concat!( + "Got an invalid AliasVariation. Accepted values ", + "are 'type_alias', 'new_type', and 'new_type_deref'" + ), + )), + } + } +} + /// Fallible conversion to an opaque blob. /// /// Implementors of this trait should provide the `try_get_layout` method to @@ -3076,10 +3222,10 @@ impl TryToRustTy for Type { use self::helpers::ast_ty::*; match *self.kind() { - TypeKind::Void => Ok(raw_type(ctx, "c_void")), + TypeKind::Void => Ok(c_void(ctx)), // TODO: we should do something smart with nullptr, or maybe *const // c_void is enough? - TypeKind::NullPtr => Ok(raw_type(ctx, "c_void").to_ptr(true)), + TypeKind::NullPtr => Ok(c_void(ctx).to_ptr(true)), TypeKind::Int(ik) => { match ik { IntKind::Bool => Ok(quote! { bool }), @@ -3186,7 +3332,7 @@ impl TryToRustTy for Type { TypeKind::Alias(..) | TypeKind::BlockPointer(..) => { if self.is_block_pointer() && !ctx.options().generate_block { - let void = raw_type(ctx, "c_void"); + let void = c_void(ctx); return Ok(void.to_ptr(/* is_const = */ false)); } let template_params = item @@ -3473,11 +3619,22 @@ impl CodeGenerator for Function { attributes.push(attributes::link_name(link_name)); } + // Unfortunately this can't piggyback on the `attributes` list because + // the #[link(wasm_import_module)] needs to happen before the `extern + // "C"` block. It doesn't get picked up properly otherwise + let wasm_link_attribute = + ctx.options().wasm_import_module_name.as_ref().map(|name| { + quote! { #[link(wasm_import_module = #name)] } + }); + let ident = ctx.rust_ident(canonical_name); - let tokens = quote!( extern #abi { - #(#attributes)* - pub fn #ident ( #( #args ),* ) #ret; - }); + let tokens = quote! { + #wasm_link_attribute + extern #abi { + #(#attributes)* + pub fn #ident ( #( #args ),* ) #ret; + } + }; result.push(tokens); } } @@ -3579,18 +3736,44 @@ impl CodeGenerator for ObjCInterface { let trait_name = ctx.rust_ident(self.rust_name()); - let trait_block = quote! { - pub trait #trait_name { - #( #trait_items )* + let trait_block = if self.is_template() { + let template_names: Vec = self + .template_names + .iter() + .map(|g| ctx.rust_ident(g)) + .collect(); + quote! { + pub trait #trait_name <#(#template_names),*>{ + #( #trait_items )* + } + } + } else { + quote! { + pub trait #trait_name { + #( #trait_items )* + } } }; let ty_for_impl = quote! { id }; - let impl_block = quote! { - impl #trait_name for #ty_for_impl { - #( #impl_items )* + let impl_block = if self.is_template() { + let template_names: Vec = self + .template_names + .iter() + .map(|g| ctx.rust_ident(g)) + .collect(); + quote! { + impl <#(#template_names :'static),*> #trait_name <#(#template_names),*> for #ty_for_impl { + #( #impl_items )* + } + } + } else { + quote! { + impl #trait_name for #ty_for_impl { + #( #impl_items )* + } } }; @@ -3851,13 +4034,13 @@ mod utils { } #[inline] - pub unsafe fn as_ptr(&self) -> *const T { - ::#prefix::mem::transmute(self) + pub fn as_ptr(&self) -> *const T { + self as *const _ as *const T } #[inline] - pub unsafe fn as_mut_ptr(&mut self) -> *mut T { - ::#prefix::mem::transmute(self) + pub fn as_mut_ptr(&mut self) -> *mut T { + self as *mut _ as *mut T } #[inline] @@ -3881,20 +4064,10 @@ mod utils { } }; - let incomplete_array_clone_impl = quote! { - impl ::#prefix::clone::Clone for __IncompleteArrayField { - #[inline] - fn clone(&self) -> Self { - Self::new() - } - } - }; - let items = vec![ incomplete_array_decl, incomplete_array_impl, incomplete_array_debug_impl, - incomplete_array_clone_impl, ]; let old_items = mem::replace(result, items); @@ -3953,9 +4126,15 @@ mod utils { "int64_t" => primitive_ty(ctx, "i64"), "uint64_t" => primitive_ty(ctx, "u64"), - "uintptr_t" | "size_t" => primitive_ty(ctx, "usize"), + "size_t" if ctx.options().size_t_is_usize => { + primitive_ty(ctx, "usize") + } + "uintptr_t" => primitive_ty(ctx, "usize"), - "intptr_t" | "ptrdiff_t" | "ssize_t" => primitive_ty(ctx, "isize"), + "ssize_t" if ctx.options().size_t_is_usize => { + primitive_ty(ctx, "isize") + } + "intptr_t" | "ptrdiff_t" => primitive_ty(ctx, "isize"), _ => return None, }) } diff --git a/third_party/rust/bindgen/src/codegen/struct_layout.rs b/third_party/rust/bindgen/src/codegen/struct_layout.rs index 3c03ff1152..6e3f57b26b 100644 --- a/third_party/rust/bindgen/src/codegen/struct_layout.rs +++ b/third_party/rust/bindgen/src/codegen/struct_layout.rs @@ -9,6 +9,8 @@ use ir::ty::{Type, TypeKind}; use proc_macro2::{self, Ident, Span}; use std::cmp; +const MAX_GUARANTEED_ALIGN: usize = 8; + /// Trace the layout of struct. #[derive(Debug)] pub struct StructLayoutTracker<'a> { @@ -168,10 +170,10 @@ impl<'a> StructLayoutTracker<'a> { // much we can do about it. if let Some(layout) = self.ctx.resolve_type(inner).layout(self.ctx) { - if layout.align > self.ctx.target_pointer_size() { + if layout.align > MAX_GUARANTEED_ALIGN { field_layout.size = align_to(layout.size, layout.align) * len; - field_layout.align = self.ctx.target_pointer_size(); + field_layout.align = MAX_GUARANTEED_ALIGN; } } } @@ -191,7 +193,7 @@ impl<'a> StructLayoutTracker<'a> { // Otherwise the padding is useless. let need_padding = padding_bytes >= field_layout.align || - field_layout.align > self.ctx.target_pointer_size(); + field_layout.align > MAX_GUARANTEED_ALIGN; self.latest_offset += padding_bytes; @@ -213,10 +215,7 @@ impl<'a> StructLayoutTracker<'a> { if need_padding && padding_bytes != 0 { Some(Layout::new( padding_bytes, - cmp::min( - field_layout.align, - self.ctx.target_pointer_size(), - ), + cmp::min(field_layout.align, MAX_GUARANTEED_ALIGN), )) } else { None @@ -258,6 +257,11 @@ impl<'a> StructLayoutTracker<'a> { } let padding_bytes = layout.size - self.latest_offset; + if padding_bytes == 0 { + return None; + } + + let repr_align = self.ctx.options().rust_features().repr_align; // We always pad to get to the correct size if the struct is one of // those we can't align properly. @@ -265,17 +269,15 @@ impl<'a> StructLayoutTracker<'a> { // Note that if the last field we saw was a bitfield, we may need to pad // regardless, because bitfields don't respect alignment as strictly as // other fields. - if padding_bytes > 0 && - (padding_bytes >= layout.align || - (self.last_field_was_bitfield && - padding_bytes >= - self.latest_field_layout.unwrap().align) || - layout.align > self.ctx.target_pointer_size()) + if padding_bytes >= layout.align || + (self.last_field_was_bitfield && + padding_bytes >= self.latest_field_layout.unwrap().align) || + (!repr_align && layout.align > MAX_GUARANTEED_ALIGN) { let layout = if self.is_packed { Layout::new(padding_bytes, 1) } else if self.last_field_was_bitfield || - layout.align > self.ctx.target_pointer_size() + layout.align > MAX_GUARANTEED_ALIGN { // We've already given up on alignment here. Layout::for_size(self.ctx, padding_bytes) @@ -306,9 +308,9 @@ impl<'a> StructLayoutTracker<'a> { return false; } - // We can only generate up-to a word of alignment unless we support + // We can only generate up-to a 8-bytes of alignment unless we support // repr(align). - repr_align || layout.align <= self.ctx.target_pointer_size() + repr_align || layout.align <= MAX_GUARANTEED_ALIGN } fn padding_bytes(&self, layout: Layout) -> usize { diff --git a/third_party/rust/bindgen/src/features.rs b/third_party/rust/bindgen/src/features.rs index e700ca75b3..4ec9dee74d 100644 --- a/third_party/rust/bindgen/src/features.rs +++ b/third_party/rust/bindgen/src/features.rs @@ -1,7 +1,6 @@ //! Contains code for selecting features #![deny(missing_docs)] -#![deny(warnings)] #![deny(unused_extern_crates)] use std::io; @@ -88,25 +87,44 @@ macro_rules! rust_target_base { $x_macro!( /// Rust stable 1.0 => Stable_1_0 => 1.0; + /// Rust stable 1.1 + => Stable_1_1 => 1.1; /// Rust stable 1.19 + /// * Untagged unions ([RFC 1444](https://github.com/rust-lang/rfcs/blob/master/text/1444-union.md)) => Stable_1_19 => 1.19; /// Rust stable 1.20 + /// * Associated constants ([PR](https://github.com/rust-lang/rust/pull/42809)) => Stable_1_20 => 1.20; /// Rust stable 1.21 + /// * Builtin impls for `Clone` ([PR](https://github.com/rust-lang/rust/pull/43690)) => Stable_1_21 => 1.21; /// Rust stable 1.25 + /// * `repr(align)` ([PR](https://github.com/rust-lang/rust/pull/47006)) => Stable_1_25 => 1.25; /// Rust stable 1.26 + /// * [i128 / u128 support](https://doc.rust-lang.org/std/primitive.i128.html) => Stable_1_26 => 1.26; /// Rust stable 1.27 + /// * `must_use` attribute on functions ([PR](https://github.com/rust-lang/rust/pull/48925)) => Stable_1_27 => 1.27; /// Rust stable 1.28 + /// * `repr(transparent)` ([PR](https://github.com/rust-lang/rust/pull/51562)) => Stable_1_28 => 1.28; /// Rust stable 1.30 + /// * `const fn` support for limited cases ([PR](https://github.com/rust-lang/rust/pull/54835/) + /// * [c_void available in core](https://doc.rust-lang.org/core/ffi/enum.c_void.html) => Stable_1_30 => 1.30; /// Rust stable 1.33 + /// * repr(packed(N)) ([PR](https://github.com/rust-lang/rust/pull/57049)) => Stable_1_33 => 1.33; + /// Rust stable 1.36 + /// * `MaybeUninit` instead of `mem::uninitialized()` ([PR](https://github.com/rust-lang/rust/pull/60445)) + => Stable_1_36 => 1.36; + /// Rust stable 1.40 + /// * `non_exhaustive` enums/structs ([Tracking issue](https://github.com/rust-lang/rust/issues/44109)) + => Stable_1_40 => 1.40; /// Nightly rust + /// * `thiscall` calling convention ([Tracking issue](https://github.com/rust-lang/rust/issues/42202)) => Nightly => nightly; ); } @@ -116,7 +134,7 @@ rust_target_base!(rust_target_def); rust_target_base!(rust_target_values_def); /// Latest stable release of Rust -pub const LATEST_STABLE_RUST: RustTarget = RustTarget::Stable_1_33; +pub const LATEST_STABLE_RUST: RustTarget = RustTarget::Stable_1_40; /// Create RustFeatures struct definition, new(), and a getter for each field macro_rules! rust_feature_def { @@ -127,7 +145,8 @@ macro_rules! rust_feature_def { ) => { /// Features supported by a rust target #[derive(Debug, Copy, Clone, Eq, PartialEq, Hash)] - pub struct RustFeatures { + #[allow(missing_docs)] // Documentation should go into the relevant variants. + pub(crate) struct RustFeatures { $( $( $( #[$attr] @@ -165,49 +184,46 @@ macro_rules! rust_feature_def { } } +// NOTE(emilio): When adding or removing features here, make sure to update the +// documentation for the relevant variant in the rust_target_base macro +// definition. rust_feature_def!( Stable_1_19 { - /// Untagged unions ([RFC 1444](https://github.com/rust-lang/rfcs/blob/master/text/1444-union.md)) => untagged_union; } Stable_1_20 { - /// associated constants ([PR](https://github.com/rust-lang/rust/pull/42809)) => associated_const; } Stable_1_21 { - /// builtin impls for `Clone` ([PR](https://github.com/rust-lang/rust/pull/43690)) => builtin_clone_impls; } Stable_1_25 { - /// repr(align) ([PR](https://github.com/rust-lang/rust/pull/47006)) => repr_align; } Stable_1_26 { - /// [i128 / u128 support](https://doc.rust-lang.org/std/primitive.i128.html) => i128_and_u128; } Stable_1_27 { - /// `must_use` attribute on functions ([PR](https://github.com/rust-lang/rust/pull/48925)) => must_use_function; } Stable_1_28 { - /// repr(transparent) ([PR](https://github.com/rust-lang/rust/pull/51562)) => repr_transparent; } Stable_1_30 { - /// `const fn` support for limited cases - /// ([PR](https://github.com/rust-lang/rust/pull/54835/) => min_const_fn; + => core_ffi_c_void; } Stable_1_33 { - /// repr(packed(N)) ([PR](https://github.com/rust-lang/rust/pull/57049)) => repr_packed_n; } + Stable_1_36 { + => maybe_uninit; + } + Stable_1_40 { + => non_exhaustive; + } Nightly { - /// `thiscall` calling convention ([Tracking issue](https://github.com/rust-lang/rust/issues/42202)) => thiscall_abi; - /// `non_exhaustive` enums/structs ([Tracking issue](https://github.com/rust-lang/rust/issues/44109)) - => non_exhaustive; } ); @@ -227,7 +243,8 @@ mod test { fn target_features() { let f_1_0 = RustFeatures::from(RustTarget::Stable_1_0); assert!( - !f_1_0.untagged_union && + !f_1_0.core_ffi_c_void && + !f_1_0.untagged_union && !f_1_0.associated_const && !f_1_0.builtin_clone_impls && !f_1_0.repr_align && @@ -235,7 +252,8 @@ mod test { ); let f_1_21 = RustFeatures::from(RustTarget::Stable_1_21); assert!( - f_1_21.untagged_union && + !f_1_21.core_ffi_c_void && + f_1_21.untagged_union && f_1_21.associated_const && f_1_21.builtin_clone_impls && !f_1_21.repr_align && @@ -243,9 +261,11 @@ mod test { ); let f_nightly = RustFeatures::from(RustTarget::Nightly); assert!( - f_nightly.untagged_union && + f_nightly.core_ffi_c_void && + f_nightly.untagged_union && f_nightly.associated_const && f_nightly.builtin_clone_impls && + f_nightly.maybe_uninit && f_nightly.repr_align && f_nightly.thiscall_abi ); diff --git a/third_party/rust/bindgen/src/ir/analysis/derive.rs b/third_party/rust/bindgen/src/ir/analysis/derive.rs index e07f6bc2c0..f9cc404c03 100644 --- a/third_party/rust/bindgen/src/ir/analysis/derive.rs +++ b/third_party/rust/bindgen/src/ir/analysis/derive.rs @@ -240,25 +240,24 @@ impl<'ctx> CannotDerive<'ctx> { self.derive_trait ); return CanDerive::No; - } else { - if self.derive_trait.can_derive_large_array() { - trace!(" array can derive {}", self.derive_trait); - return CanDerive::Yes; - } else { - if len <= RUST_DERIVE_IN_ARRAY_LIMIT { - trace!( - " array is small enough to derive {}", - self.derive_trait - ); - return CanDerive::Yes; - } else { - trace!( - " array is too large to derive {}, but it may be implemented", self.derive_trait - ); - return CanDerive::Manually; - } - } } + + if self.derive_trait.can_derive_large_array() { + trace!(" array can derive {}", self.derive_trait); + return CanDerive::Yes; + } + + if len > RUST_DERIVE_IN_ARRAY_LIMIT { + trace!( + " array is too large to derive {}, but it may be implemented", self.derive_trait + ); + return CanDerive::Manually; + } + trace!( + " array is small enough to derive {}", + self.derive_trait + ); + return CanDerive::Yes; } TypeKind::Vector(t, len) => { let inner_type = @@ -362,6 +361,20 @@ impl<'ctx> CannotDerive<'ctx> { return CanDerive::No; } + // Bitfield units are always represented as arrays of u8, but + // they're not traced as arrays, so we need to check here + // instead. + if !self.derive_trait.can_derive_large_array() && + info.has_too_large_bitfield_unit() && + !item.is_opaque(self.ctx, &()) + { + trace!( + " cannot derive {} for comp with too large bitfield unit", + self.derive_trait + ); + return CanDerive::No; + } + let pred = self.derive_trait.consider_edge_comp(); return self.constrain_join(item, pred); } diff --git a/third_party/rust/bindgen/src/ir/comp.rs b/third_party/rust/bindgen/src/ir/comp.rs index a82fd756af..56479da2c4 100644 --- a/third_party/rust/bindgen/src/ir/comp.rs +++ b/third_party/rust/bindgen/src/ir/comp.rs @@ -6,9 +6,9 @@ use super::context::{BindgenContext, FunctionId, ItemId, TypeId, VarId}; use super::dot::DotAttributes; use super::item::{IsOpaque, Item}; use super::layout::Layout; -// use super::ty::RUST_DERIVE_IN_ARRAY_LIMIT; use super::template::TemplateParameters; use super::traversal::{EdgeKind, Trace, Tracer}; +use super::ty::RUST_DERIVE_IN_ARRAY_LIMIT; use clang; use codegen::struct_layout::{align_to, bytes_from_bits_pow2}; use ir::derive::CanDeriveCopy; @@ -356,7 +356,7 @@ impl Bitfield { if self.width() as u64 == mem::size_of::() as u64 * 8 { u64::MAX } else { - ((1u64 << self.width()) - 1u64) + (1u64 << self.width()) - 1u64 }; unoffseted_mask << self.offset_into_unit() @@ -496,7 +496,8 @@ impl FieldMethods for RawField { fn raw_fields_to_fields_and_bitfield_units( ctx: &BindgenContext, raw_fields: I, -) -> Result, ()> + packed: bool, +) -> Result<(Vec, bool), ()> where I: IntoIterator, { @@ -533,6 +534,7 @@ where &mut bitfield_unit_count, &mut fields, bitfields, + packed, )?; } @@ -541,7 +543,7 @@ where "The above loop should consume all items in `raw_fields`" ); - Ok(fields) + Ok((fields, bitfield_unit_count != 0)) } /// Given a set of contiguous raw bitfields, group and allocate them into @@ -551,6 +553,7 @@ fn bitfields_to_allocation_units( bitfield_unit_count: &mut usize, fields: &mut E, raw_bitfields: I, + packed: bool, ) -> Result<(), ()> where E: Extend, @@ -575,17 +578,22 @@ where unit_size_in_bits: usize, unit_align_in_bits: usize, bitfields: Vec, + packed: bool, ) where E: Extend, { *bitfield_unit_count += 1; - let align = bytes_from_bits_pow2(unit_align_in_bits); + let align = if packed { + 1 + } else { + bytes_from_bits_pow2(unit_align_in_bits) + }; let size = align_to(unit_size_in_bits, align * 8) / 8; let layout = Layout::new(size, align); fields.extend(Some(Field::Bitfields(BitfieldUnit { nth: *bitfield_unit_count, - layout: layout, - bitfields: bitfields, + layout, + bitfields, }))); } @@ -607,34 +615,39 @@ where let bitfield_align = bitfield_layout.align; let mut offset = unit_size_in_bits; - if is_ms_struct { - if unit_size_in_bits != 0 && - (bitfield_width == 0 || - bitfield_width > unfilled_bits_in_unit) - { - // We've reached the end of this allocation unit, so flush it - // and its bitfields. - unit_size_in_bits = align_to(unit_size_in_bits, unit_align * 8); - flush_allocation_unit( - fields, - bitfield_unit_count, - unit_size_in_bits, - unit_align, - mem::replace(&mut bitfields_in_unit, vec![]), - ); + if !packed { + if is_ms_struct { + if unit_size_in_bits != 0 && + (bitfield_width == 0 || + bitfield_width > unfilled_bits_in_unit) + { + // We've reached the end of this allocation unit, so flush it + // and its bitfields. + unit_size_in_bits = + align_to(unit_size_in_bits, unit_align * 8); + flush_allocation_unit( + fields, + bitfield_unit_count, + unit_size_in_bits, + unit_align, + mem::replace(&mut bitfields_in_unit, vec![]), + packed, + ); - // Now we're working on a fresh bitfield allocation unit, so reset - // the current unit size and alignment. - offset = 0; - unit_align = 0; - } - } else { - if offset != 0 && - (bitfield_width == 0 || - (offset & (bitfield_align * 8 - 1)) + bitfield_width > - bitfield_size * 8) - { - offset = align_to(offset, bitfield_align * 8); + // Now we're working on a fresh bitfield allocation unit, so reset + // the current unit size and alignment. + offset = 0; + unit_align = 0; + } + } else { + if offset != 0 && + (bitfield_width == 0 || + (offset & (bitfield_align * 8 - 1)) + + bitfield_width > + bitfield_size * 8) + { + offset = align_to(offset, bitfield_align * 8); + } } } @@ -677,6 +690,7 @@ where unit_size_in_bits, unit_align, bitfields_in_unit, + packed, ); } @@ -693,7 +707,10 @@ where #[derive(Debug)] enum CompFields { BeforeComputingBitfieldUnits(Vec), - AfterComputingBitfieldUnits(Vec), + AfterComputingBitfieldUnits { + fields: Vec, + has_bitfield_units: bool, + }, ErrorComputingBitfieldUnits, } @@ -717,7 +734,7 @@ impl CompFields { } } - fn compute_bitfield_units(&mut self, ctx: &BindgenContext) { + fn compute_bitfield_units(&mut self, ctx: &BindgenContext, packed: bool) { let raws = match *self { CompFields::BeforeComputingBitfieldUnits(ref mut raws) => { mem::replace(raws, vec![]) @@ -727,13 +744,16 @@ impl CompFields { } }; - let result = raw_fields_to_fields_and_bitfield_units(ctx, raws); + let result = raw_fields_to_fields_and_bitfield_units(ctx, raws, packed); match result { - Ok(fields_and_units) => { + Ok((fields, has_bitfield_units)) => { mem::replace( self, - CompFields::AfterComputingBitfieldUnits(fields_and_units), + CompFields::AfterComputingBitfieldUnits { + fields, + has_bitfield_units, + }, ); } Err(()) => { @@ -744,11 +764,11 @@ impl CompFields { fn deanonymize_fields(&mut self, ctx: &BindgenContext, methods: &[Method]) { let fields = match *self { - CompFields::AfterComputingBitfieldUnits(ref mut fields) => fields, - CompFields::ErrorComputingBitfieldUnits => { - // Nothing to do here. - return; - } + CompFields::AfterComputingBitfieldUnits { + ref mut fields, .. + } => fields, + // Nothing to do here. + CompFields::ErrorComputingBitfieldUnits => return, CompFields::BeforeComputingBitfieldUnits(_) => { panic!("Not yet computed bitfield units."); } @@ -845,7 +865,7 @@ impl Trace for CompFields { tracer.visit_kind(f.ty().into(), EdgeKind::Field); } } - CompFields::AfterComputingBitfieldUnits(ref fields) => { + CompFields::AfterComputingBitfieldUnits { ref fields, .. } => { for f in fields { f.trace(context, tracer, &()); } @@ -1047,7 +1067,7 @@ impl CompInfo { /// Construct a new compound type. pub fn new(kind: CompKind) -> Self { CompInfo { - kind: kind, + kind, fields: CompFields::default(), template_params: vec![], methods: vec![], @@ -1110,13 +1130,43 @@ impl CompInfo { pub fn fields(&self) -> &[Field] { match self.fields { CompFields::ErrorComputingBitfieldUnits => &[], - CompFields::AfterComputingBitfieldUnits(ref fields) => fields, + CompFields::AfterComputingBitfieldUnits { ref fields, .. } => { + fields + } + CompFields::BeforeComputingBitfieldUnits(_) => { + panic!("Should always have computed bitfield units first"); + } + } + } + + fn has_bitfields(&self) -> bool { + match self.fields { + CompFields::ErrorComputingBitfieldUnits => false, + CompFields::AfterComputingBitfieldUnits { + has_bitfield_units, + .. + } => has_bitfield_units, CompFields::BeforeComputingBitfieldUnits(_) => { panic!("Should always have computed bitfield units first"); } } } + /// Returns whether we have a too large bitfield unit, in which case we may + /// not be able to derive some of the things we should be able to normally + /// derive. + pub fn has_too_large_bitfield_unit(&self) -> bool { + if !self.has_bitfields() { + return false; + } + self.fields().iter().any(|field| match *field { + Field::DataMember(..) => false, + Field::Bitfields(ref unit) => { + unit.layout.size > RUST_DERIVE_IN_ARRAY_LIMIT + } + }) + } + /// Does this type have any template parameters that aren't types /// (e.g. int)? pub fn has_non_type_template_params(&self) -> bool { @@ -1126,7 +1176,7 @@ impl CompInfo { /// Do we see a virtual function during parsing? /// Get the has_own_virtual_method boolean. pub fn has_own_virtual_method(&self) -> bool { - return self.has_own_virtual_method; + self.has_own_virtual_method } /// Did we see a destructor when parsing this type? @@ -1566,7 +1616,9 @@ impl CompInfo { /// Compute this compound structure's bitfield allocation units. pub fn compute_bitfield_units(&mut self, ctx: &BindgenContext) { - self.fields.compute_bitfield_units(ctx); + // TODO(emilio): If we could detect #pragma packed here we'd fix layout + // tests in divide-by-zero-in-struct-layout.rs + self.fields.compute_bitfield_units(ctx, self.packed_attr) } /// Assign for each anonymous field a generated name. diff --git a/third_party/rust/bindgen/src/ir/context.rs b/third_party/rust/bindgen/src/ir/context.rs index 0fefe399a4..a3601793ae 100644 --- a/third_party/rust/bindgen/src/ir/context.rs +++ b/third_party/rust/bindgen/src/ir/context.rs @@ -553,6 +553,8 @@ impl BindgenContext { clang_sys::CXTranslationUnit_DetailedPreprocessingRecord; let translation_unit = { + let _t = + Timer::new("translation_unit").with_output(options.time_phases); let clang_args = if explicit_target { Cow::Borrowed(&options.clang_args) } else { @@ -573,6 +575,7 @@ impl BindgenContext { - Unrecognized flags - Invalid flag arguments - File I/O errors +- Host vs. target architecture mismatch If you encounter an error missing from this list, please file an issue or a PR!") }; @@ -931,6 +934,8 @@ If you encounter an error missing from this list, please file an issue or a PR!" /// Collect all of our unresolved type references and resolve them. fn resolve_typerefs(&mut self) { + let _t = self.timer("resolve_typerefs"); + let typerefs = self.collect_typerefs(); for (id, ty, loc, parent_id) in typerefs { @@ -987,6 +992,8 @@ If you encounter an error missing from this list, please file an issue or a PR!" /// Compute the bitfield allocation units for all `TypeKind::Comp` items we /// parsed. fn compute_bitfield_units(&mut self) { + let _t = self.timer("compute_bitfield_units"); + assert!(self.collected_typerefs()); let need_bitfield_allocation = @@ -2160,10 +2167,27 @@ If you encounter an error missing from this list, please file an issue or a PR!" } break; } - _ => { + spelling if !found_namespace_keyword => { + // This is _likely_, but not certainly, a macro that's been placed just before + // the namespace keyword. Unfortunately, clang tokens don't let us easily see + // through the ifdef tokens, so we don't know what this token should really be. + // Instead of panicking though, we warn the user that we assumed the token was + // blank, and then move on. + // + // See also https://github.com/rust-lang/rust-bindgen/issues/1676. + warn!( + "Ignored unknown namespace prefix '{}' at {:?} in {:?}", + String::from_utf8_lossy(spelling), + token, + cursor + ); + } + spelling => { panic!( - "Unknown token while processing namespace: {:?}", - token + "Unknown token '{}' while processing namespace at {:?} in {:?}", + String::from_utf8_lossy(spelling), + token, + cursor ); } } @@ -2321,7 +2345,7 @@ If you encounter an error missing from this list, please file an issue or a PR!" } let mut prefix_path = - parent.path_for_whitelisting(self); + parent.path_for_whitelisting(self).clone(); enum_.variants().iter().any(|variant| { prefix_path.push(variant.name().into()); let name = prefix_path[1..].join("::"); diff --git a/third_party/rust/bindgen/src/ir/dot.rs b/third_party/rust/bindgen/src/ir/dot.rs index d56902321d..6bf75bfa77 100644 --- a/third_party/rust/bindgen/src/ir/dot.rs +++ b/third_party/rust/bindgen/src/ir/dot.rs @@ -38,11 +38,7 @@ where &mut dot_file, r#"{} [fontname="courier", color={}, label=< "#, id.as_usize(), - if is_whitelisted { - "black" - } else { - "gray" - } + if is_whitelisted { "black" } else { "gray" } )?; item.dot_attributes(ctx, &mut dot_file)?; writeln!(&mut dot_file, r#"
>];"#)?; diff --git a/third_party/rust/bindgen/src/ir/enum_ty.rs b/third_party/rust/bindgen/src/ir/enum_ty.rs index 442b5e2e37..f2013844b2 100644 --- a/third_party/rust/bindgen/src/ir/enum_ty.rs +++ b/third_party/rust/bindgen/src/ir/enum_ty.rs @@ -183,7 +183,10 @@ impl Enum { &ctx.options().bitfield_enums, item, ) { - EnumVariation::Bitfield + EnumVariation::NewType { is_bitfield: true } + } else if self.is_matching_enum(ctx, &ctx.options().newtype_enums, item) + { + EnumVariation::NewType { is_bitfield: false } } else if self.is_matching_enum( ctx, &ctx.options().rustified_enums, diff --git a/third_party/rust/bindgen/src/ir/function.rs b/third_party/rust/bindgen/src/ir/function.rs index 9ccf4e14c2..0715ec546c 100644 --- a/third_party/rust/bindgen/src/ir/function.rs +++ b/third_party/rust/bindgen/src/ir/function.rs @@ -424,7 +424,7 @@ impl FunctionSig { }; let must_use = ctx.options().enable_function_attribute_detection && - cursor.has_simple_attr("warn_unused_result"); + cursor.has_warn_unused_result_attr(); let is_method = kind == CXCursor_CXXMethod; let is_constructor = kind == CXCursor_Constructor; let is_destructor = kind == CXCursor_Destructor; diff --git a/third_party/rust/bindgen/src/ir/item.rs b/third_party/rust/bindgen/src/ir/item.rs index 7b4f3e8348..eb5b4d25dc 100644 --- a/third_party/rust/bindgen/src/ir/item.rs +++ b/third_party/rust/bindgen/src/ir/item.rs @@ -20,9 +20,10 @@ use super::traversal::{EdgeKind, Trace, Tracer}; use super::ty::{Type, TypeKind}; use clang; use clang_sys; +use lazycell::LazyCell; use parse::{ClangItemParser, ClangSubItemParser, ParseError, ParseResult}; use regex; -use std::cell::{Cell, RefCell}; +use std::cell::Cell; use std::collections::BTreeSet; use std::fmt::Write; use std::io; @@ -387,7 +388,7 @@ pub struct Item { /// /// Note that only structs, unions, and enums get a local type id. In any /// case this is an implementation detail. - local_id: Cell>, + local_id: LazyCell, /// The next local id to use for a child or template instantiation. next_child_local_id: Cell, @@ -396,7 +397,11 @@ pub struct Item { /// /// This is a fairly used operation during codegen so this makes bindgen /// considerably faster in those cases. - canonical_name_cache: RefCell>, + canonical_name: LazyCell, + + /// The path to use for whitelisting and other name-based checks, as + /// returned by `path_for_whitelisting`, lazily constructed. + path_for_whitelisting: LazyCell>, /// A doc comment over the item, if any. comment: Option, @@ -431,9 +436,10 @@ impl Item { debug_assert!(id != parent_id || kind.is_module()); Item { id: id, - local_id: Cell::new(None), + local_id: LazyCell::new(), next_child_local_id: Cell::new(1), - canonical_name_cache: RefCell::new(None), + canonical_name: LazyCell::new(), + path_for_whitelisting: LazyCell::new(), parent_id: parent_id, comment: comment, annotations: annotations.unwrap_or_default(), @@ -520,11 +526,10 @@ impl Item { /// below this item's lexical scope, meaning that this can be useful for /// generating relatively stable identifiers within a scope. pub fn local_id(&self, ctx: &BindgenContext) -> usize { - if self.local_id.get().is_none() { + *self.local_id.borrow_with(|| { let parent = ctx.resolve_item(self.parent_id); - self.local_id.set(Some(parent.next_child_local_id())); - } - self.local_id.get().unwrap() + parent.next_child_local_id() + }) } /// Get an identifier that differentiates a child of this item of other @@ -793,6 +798,15 @@ impl Item { } } + fn is_anon(&self) -> bool { + match self.kind() { + ItemKind::Module(module) => module.name().is_none(), + ItemKind::Type(ty) => ty.name().is_none(), + ItemKind::Function(_) => false, + ItemKind::Var(_) => false, + } + } + /// Get the canonical name without taking into account the replaces /// annotation. /// @@ -804,6 +818,10 @@ impl Item { /// /// This name should be derived from the immutable state contained in the /// type and the parent chain, since it should be consistent. + /// + /// If `BindgenOptions::disable_nested_struct_naming` is true then returned + /// name is the inner most non-anonymous name plus all the anonymous base names + /// that follows. pub fn real_canonical_name( &self, ctx: &BindgenContext, @@ -827,8 +845,8 @@ impl Item { return base_name; } - // Concatenate this item's ancestors' names together. - let mut names: Vec<_> = target + // Ancestors' id iter + let mut ids_iter = target .parent_id() .ancestors(ctx) .filter(|id| *id != ctx.root_module()) @@ -847,7 +865,30 @@ impl Item { } true - }) + }); + + let ids: Vec<_> = if ctx.options().disable_nested_struct_naming { + let mut ids = Vec::new(); + + // If target is anonymous we need find its first named ancestor. + if target.is_anon() { + while let Some(id) = ids_iter.next() { + ids.push(id); + + if !ctx.resolve_item(id).is_anon() { + break; + } + } + } + + ids + } else { + ids_iter.collect() + }; + + // Concatenate this item's ancestors' names together. + let mut names: Vec<_> = ids + .into_iter() .map(|id| { let item = ctx.resolve_item(id); let target = ctx.resolve_item(item.name_target(ctx)); @@ -972,8 +1013,9 @@ impl Item { /// Returns the path we should use for whitelisting / blacklisting, which /// doesn't include user-mangling. - pub fn path_for_whitelisting(&self, ctx: &BindgenContext) -> Vec { - self.compute_path(ctx, UserMangled::No) + pub fn path_for_whitelisting(&self, ctx: &BindgenContext) -> &Vec { + self.path_for_whitelisting + .borrow_with(|| self.compute_path(ctx, UserMangled::No)) } fn compute_path( @@ -1357,7 +1399,6 @@ impl ClangItemParser for Item { CXCursor_UsingDeclaration | CXCursor_UsingDirective | CXCursor_StaticAssert | - CXCursor_InclusionDirective | CXCursor_FunctionTemplate => { debug!( "Unhandled cursor kind {:?}: {:?}", @@ -1365,6 +1406,22 @@ impl ClangItemParser for Item { cursor ); } + CXCursor_InclusionDirective => { + let file = cursor.get_included_file_name(); + match file { + None => { + warn!( + "Inclusion of a nameless file in {:?}", + cursor + ); + } + Some(filename) => { + if let Some(cb) = ctx.parse_callbacks() { + cb.include_file(&filename) + } + } + } + } _ => { // ignore toplevel operator overloads let spelling = cursor.spelling(); @@ -1812,17 +1869,18 @@ impl ItemCanonicalName for Item { ctx.in_codegen_phase(), "You're not supposed to call this yet" ); - if self.canonical_name_cache.borrow().is_none() { - let in_namespace = ctx.options().enable_cxx_namespaces || - ctx.options().disable_name_namespacing; + self.canonical_name + .borrow_with(|| { + let in_namespace = ctx.options().enable_cxx_namespaces || + ctx.options().disable_name_namespacing; - *self.canonical_name_cache.borrow_mut() = if in_namespace { - Some(self.name(ctx).within_namespaces().get()) - } else { - Some(self.name(ctx).get()) - }; - } - return self.canonical_name_cache.borrow().as_ref().unwrap().clone(); + if in_namespace { + self.name(ctx).within_namespaces().get() + } else { + self.name(ctx).get() + } + }) + .clone() } } diff --git a/third_party/rust/bindgen/src/ir/objc.rs b/third_party/rust/bindgen/src/ir/objc.rs index 0b75e55ab4..cfbf3dd0ea 100644 --- a/third_party/rust/bindgen/src/ir/objc.rs +++ b/third_party/rust/bindgen/src/ir/objc.rs @@ -12,6 +12,7 @@ use clang_sys::CXCursor_ObjCClassRef; use clang_sys::CXCursor_ObjCInstanceMethodDecl; use clang_sys::CXCursor_ObjCProtocolDecl; use clang_sys::CXCursor_ObjCProtocolRef; +use clang_sys::CXCursor_TemplateTypeParameter; use proc_macro2::{Ident, Span, TokenStream}; /// Objective C interface as used in TypeKind @@ -27,6 +28,9 @@ pub struct ObjCInterface { is_protocol: bool, + /// The list of template names almost always, ObjectType or KeyType + pub template_names: Vec, + conforms_to: Vec, /// List of the methods defined in this interfae @@ -58,6 +62,7 @@ impl ObjCInterface { name: name.to_owned(), category: None, is_protocol: false, + template_names: Vec::new(), conforms_to: Vec::new(), methods: Vec::new(), class_methods: Vec::new(), @@ -85,6 +90,11 @@ impl ObjCInterface { } } + /// Is this a template interface? + pub fn is_template(&self) -> bool { + !self.template_names.is_empty() + } + /// List of the methods defined in this interface pub fn methods(&self) -> &Vec { &self.methods @@ -154,6 +164,10 @@ impl ObjCInterface { let method = ObjCMethod::new(&name, signature, is_class_method); interface.add_method(method); } + CXCursor_TemplateTypeParameter => { + let name = c.spelling(); + interface.template_names.push(name); + } _ => {} } CXChildVisit_Continue @@ -183,8 +197,8 @@ impl ObjCMethod { ObjCMethod { name: name.to_owned(), rust_name: rust_name.to_owned(), - signature: signature, - is_class_method: is_class_method, + signature, + is_class_method, } } @@ -212,11 +226,16 @@ impl ObjCMethod { /// Formats the method call pub fn format_method_call(&self, args: &[TokenStream]) -> TokenStream { - let split_name: Vec<_> = self + let split_name: Vec> = self .name .split(':') - .filter(|p| !p.is_empty()) - .map(|name| Ident::new(name, Span::call_site())) + .map(|name| { + if name.is_empty() { + None + } else { + Some(Ident::new(name, Span::call_site())) + } + }) .collect(); // No arguments @@ -228,11 +247,11 @@ impl ObjCMethod { } // Check right amount of arguments - if args.len() != split_name.len() { + if args.len() != split_name.len() - 1 { panic!( "Incorrect method name or arguments for objc method, {:?} vs {:?}", args, - split_name + split_name, ); } @@ -245,10 +264,15 @@ impl ObjCMethod { args_without_types.push(Ident::new(name, Span::call_site())) } - let args = split_name - .into_iter() - .zip(args_without_types) - .map(|(arg, arg_val)| quote! { #arg : #arg_val }); + let args = split_name.into_iter().zip(args_without_types).map( + |(arg, arg_val)| { + if let Some(arg) = arg { + quote! { #arg: #arg_val } + } else { + quote! { #arg_val: #arg_val } + } + }, + ); quote! { #( #args )* diff --git a/third_party/rust/bindgen/src/ir/var.rs b/third_party/rust/bindgen/src/ir/var.rs index 921dcf98c7..e55308ce35 100644 --- a/third_party/rust/bindgen/src/ir/var.rs +++ b/third_party/rust/bindgen/src/ir/var.rs @@ -232,8 +232,12 @@ impl ClangSubItemParser for Var { let ty = cursor.cur_type(); - // XXX this is redundant, remove! - let is_const = ty.is_const(); + // TODO(emilio): do we have to special-case constant arrays in + // some other places? + let is_const = ty.is_const() || + (ty.kind() == CXType_ConstantArray && + ty.elem_type() + .map_or(false, |element| element.is_const())); let ty = match Item::from_ty(&ty, cursor, None, ctx) { Ok(ty) => ty, diff --git a/third_party/rust/bindgen/src/lib.rs b/third_party/rust/bindgen/src/lib.rs index 57d46be723..f5c58cd5af 100644 --- a/third_party/rust/bindgen/src/lib.rs +++ b/third_party/rust/bindgen/src/lib.rs @@ -8,7 +8,6 @@ //! See the [Users Guide](https://rust-lang.github.io/rust-bindgen/) for //! additional documentation. #![deny(missing_docs)] -#![deny(warnings)] #![deny(unused_extern_crates)] // To avoid rather annoying warnings when matching with CXCursor_xxx as a // constant. @@ -23,6 +22,7 @@ extern crate cexpr; #[allow(unused_extern_crates)] extern crate cfg_if; extern crate clang_sys; +extern crate lazycell; extern crate rustc_hash; #[macro_use] extern crate lazy_static; @@ -82,7 +82,7 @@ doc_mod!(ir, ir_docs); doc_mod!(parse, parse_docs); doc_mod!(regex_set, regex_set_docs); -pub use codegen::EnumVariation; +pub use codegen::{AliasVariation, EnumVariation}; use features::RustFeatures; pub use features::{RustTarget, LATEST_STABLE_RUST, RUST_TARGET_STRINGS}; use ir::context::{BindgenContext, ItemId}; @@ -95,7 +95,6 @@ use std::fs::{File, OpenOptions}; use std::io::{self, Write}; use std::path::{Path, PathBuf}; use std::process::{Command, Stdio}; -use std::sync::Arc; use std::{env, iter}; // Some convenient typedefs for a fast hash map and hash set. @@ -174,8 +173,8 @@ impl Default for CodegenConfig { /// /// // Configure and generate bindings. /// let bindings = builder().header("path/to/input/header") -/// .whitelisted_type("SomeCoolClass") -/// .whitelisted_function("do_some_cool_thing") +/// .whitelist_type("SomeCoolClass") +/// .whitelist_function("do_some_cool_thing") /// .generate()?; /// /// // Write the generated bindings to an output file. @@ -189,13 +188,15 @@ impl Default for CodegenConfig { /// /// 1. [`constified_enum_module()`](#method.constified_enum_module) /// 2. [`bitfield_enum()`](#method.bitfield_enum) -/// 3. [`rustified_enum()`](#method.rustified_enum) +/// 3. [`newtype_enum()`](#method.newtype_enum) +/// 4. [`rustified_enum()`](#method.rustified_enum) /// /// For each C enum, bindgen tries to match the pattern in the following order: /// /// 1. Constified enum module /// 2. Bitfield enum -/// 3. Rustified enum +/// 3. Newtype enum +/// 4. Rustified enum /// /// If none of the above patterns match, then bindgen will generate a set of Rust constants. #[derive(Debug, Default)] @@ -234,7 +235,12 @@ impl Builder { codegen::EnumVariation::Rust { non_exhaustive: true, } => "rust_non_exhaustive", - codegen::EnumVariation::Bitfield => "bitfield", + codegen::EnumVariation::NewType { is_bitfield: true } => { + "bitfield" + } + codegen::EnumVariation::NewType { is_bitfield: false } => { + "newtype" + } codegen::EnumVariation::Consts => "consts", codegen::EnumVariation::ModuleConsts => "moduleconsts", } @@ -252,6 +258,16 @@ impl Builder { }) .count(); + self.options + .newtype_enums + .get_items() + .iter() + .map(|item| { + output_vector.push("--newtype-enum".into()); + output_vector.push(item.to_owned()); + }) + .count(); + self.options .rustified_enums .get_items() @@ -292,6 +308,42 @@ impl Builder { }) .count(); + if self.options.default_alias_style != Default::default() { + output_vector.push("--default-alias-style=".into()); + output_vector + .push(self.options.default_alias_style.as_str().into()); + } + + self.options + .type_alias + .get_items() + .iter() + .map(|item| { + output_vector.push("--type-alias".into()); + output_vector.push(item.to_owned()); + }) + .count(); + + self.options + .new_type_alias + .get_items() + .iter() + .map(|item| { + output_vector.push("--new-type-alias".into()); + output_vector.push(item.to_owned()); + }) + .count(); + + self.options + .new_type_alias_deref + .get_items() + .iter() + .map(|item| { + output_vector.push("--new-type-alias-deref".into()); + output_vector.push(item.to_owned()); + }) + .count(); + self.options .blacklisted_types .get_items() @@ -421,6 +473,9 @@ impl Builder { if self.options.disable_name_namespacing { output_vector.push("--disable-name-namespacing".into()); } + if self.options.disable_nested_struct_naming { + output_vector.push("--disable-nested-struct-naming".into()); + } if !self.options.codegen_config.functions() { output_vector.push("--ignore-functions".into()); @@ -467,6 +522,13 @@ impl Builder { output_vector.push("--use-array-pointers-in-arguments".into()); } + if let Some(ref wasm_import_module_name) = + self.options.wasm_import_module_name + { + output_vector.push("--wasm-import-module-name".into()); + output_vector.push(wasm_import_module_name.clone()); + } + self.options .opaque_types .get_items() @@ -542,6 +604,10 @@ impl Builder { output_vector.push("--no-record-matches".into()); } + if self.options.size_t_is_usize { + output_vector.push("--size_t-is-usize".into()); + } + if !self.options.rustfmt_bindings { output_vector.push("--no-rustfmt-bindings".into()); } @@ -732,6 +798,10 @@ impl Builder { /// Hide the given type from the generated bindings. Regular expressions are /// supported. + /// + /// To blacklist types prefixed with "mylib" use `"mylib_.*"`. + /// For more complicated expressions check + /// [regex](https://docs.rs/regex/*/regex/) docs pub fn blacklist_type>(mut self, arg: T) -> Builder { self.options.blacklisted_types.insert(arg); self @@ -739,6 +809,10 @@ impl Builder { /// Hide the given function from the generated bindings. Regular expressions /// are supported. + /// + /// To blacklist functions prefixed with "mylib" use `"mylib_.*"`. + /// For more complicated expressions check + /// [regex](https://docs.rs/regex/*/regex/) docs pub fn blacklist_function>(mut self, arg: T) -> Builder { self.options.blacklisted_functions.insert(arg); self @@ -747,6 +821,10 @@ impl Builder { /// Hide the given item from the generated bindings, regardless of /// whether it's a type, function, module, etc. Regular /// expressions are supported. + /// + /// To blacklist items prefixed with "mylib" use `"mylib_.*"`. + /// For more complicated expressions check + /// [regex](https://docs.rs/regex/*/regex/) docs pub fn blacklist_item>(mut self, arg: T) -> Builder { self.options.blacklisted_items.insert(arg); self @@ -754,6 +832,10 @@ impl Builder { /// Treat the given type as opaque in the generated bindings. Regular /// expressions are supported. + /// + /// To change types prefixed with "mylib" into opaque, use `"mylib_.*"`. + /// For more complicated expressions check + /// [regex](https://docs.rs/regex/*/regex/) docs pub fn opaque_type>(mut self, arg: T) -> Builder { self.options.opaque_types.insert(arg); self @@ -770,6 +852,10 @@ impl Builder { /// Whitelist the given type so that it (and all types that it transitively /// refers to) appears in the generated bindings. Regular expressions are /// supported. + /// + /// To whitelist types prefixed with "mylib" use `"mylib_.*"`. + /// For more complicated expressions check + /// [regex](https://docs.rs/regex/*/regex/) docs pub fn whitelist_type>(mut self, arg: T) -> Builder { self.options.whitelisted_types.insert(arg); self @@ -778,6 +864,10 @@ impl Builder { /// Whitelist the given function so that it (and all types that it /// transitively refers to) appears in the generated bindings. Regular /// expressions are supported. + /// + /// To whitelist functions prefixed with "mylib" use `"mylib_.*"`. + /// For more complicated expressions check + /// [regex](https://docs.rs/regex/*/regex/) docs pub fn whitelist_function>(mut self, arg: T) -> Builder { self.options.whitelisted_functions.insert(arg); self @@ -794,6 +884,10 @@ impl Builder { /// Whitelist the given variable so that it (and all types that it /// transitively refers to) appears in the generated bindings. Regular /// expressions are supported. + /// + /// To whitelist variables prefixed with "mylib" use `"mylib_.*"`. + /// For more complicated expressions check + /// [regex](https://docs.rs/regex/*/regex/) docs pub fn whitelist_var>(mut self, arg: T) -> Builder { self.options.whitelisted_vars.insert(arg); self @@ -821,11 +915,24 @@ impl Builder { /// /// This makes bindgen generate a type that isn't a rust `enum`. Regular /// expressions are supported. + /// + /// This is similar to the newtype enum style, but with the bitwise + /// operators implemented. pub fn bitfield_enum>(mut self, arg: T) -> Builder { self.options.bitfield_enums.insert(arg); self } + /// Mark the given enum (or set of enums, if using a pattern) as a newtype. + /// Regular expressions are supported. + /// + /// This makes bindgen generate a type that isn't a Rust `enum`. Regular + /// expressions are supported. + pub fn newtype_enum>(mut self, arg: T) -> Builder { + self.options.newtype_enums.insert(arg); + self + } + /// Mark the given enum (or set of enums, if using a pattern) as a Rust /// enum. /// @@ -841,7 +948,7 @@ impl Builder { } /// Mark the given enum (or set of enums, if using a pattern) as a Rust - /// enum with the #[non_exhaustive] attribute. + /// enum with the `#[non_exhaustive]` attribute. /// /// This makes bindgen generate enums instead of constants. Regular /// expressions are supported. @@ -870,6 +977,45 @@ impl Builder { self } + /// Set the default style of code to generate for typedefs + pub fn default_alias_style( + mut self, + arg: codegen::AliasVariation, + ) -> Builder { + self.options.default_alias_style = arg; + self + } + + /// Mark the given typedef alias (or set of aliases, if using a pattern) to + /// use regular Rust type aliasing. + /// + /// This is the default behavior and should be used if `default_alias_style` + /// was set to NewType or NewTypeDeref and you want to override it for a + /// set of typedefs. + pub fn type_alias>(mut self, arg: T) -> Builder { + self.options.type_alias.insert(arg); + self + } + + /// Mark the given typedef alias (or set of aliases, if using a pattern) to + /// be generated as a new type by having the aliased type be wrapped in a + /// #[repr(transparent)] struct. + /// + /// Used to enforce stricter type checking. + pub fn new_type_alias>(mut self, arg: T) -> Builder { + self.options.new_type_alias.insert(arg); + self + } + + /// Mark the given typedef alias (or set of aliases, if using a pattern) to + /// be generated as a new type by having the aliased type be wrapped in a + /// #[repr(transparent)] struct and also have an automatically generated + /// impl's of `Deref` and `DerefMut` to their aliased type. + pub fn new_type_alias_deref>(mut self, arg: T) -> Builder { + self.options.new_type_alias_deref.insert(arg); + self + } + /// Add a string to prepend to the generated bindings. The string is passed /// through without any modification. pub fn raw_line>(mut self, arg: T) -> Self { @@ -1078,6 +1224,29 @@ impl Builder { self } + /// Disable nested struct naming. + /// + /// The following structs have different names for C and C++. In case of C + /// they are visible as `foo` and `bar`. In case of C++ they are visible as + /// `foo` and `foo::bar`. + /// + /// ```c + /// struct foo { + /// struct bar { + /// } b; + /// }; + /// ``` + /// + /// Bindgen wants to avoid duplicate names by default so it follows C++ naming + /// and it generates `foo`/`foo_bar` instead of just `foo`/`bar`. + /// + /// This method disables this behavior and it is indented to be used only + /// for headers that were written for C. + pub fn disable_nested_struct_naming(mut self) -> Builder { + self.options.disable_nested_struct_naming = true; + self + } + /// Treat inline namespaces conservatively. /// /// This is tricky, because in C++ is technically legal to override an item @@ -1179,12 +1348,18 @@ impl Builder { self } - /// Prepend the enum name to constant or bitfield variants. + /// Prepend the enum name to constant or newtype variants. pub fn prepend_enum_name(mut self, doit: bool) -> Self { self.options.prepend_enum_name = doit; self } + /// Set whether `size_t` should be translated to `usize` automatically. + pub fn size_t_is_usize(mut self, is: bool) -> Self { + self.options.size_t_is_usize = is; + self + } + /// Set whether rustfmt should format the generated bindings. pub fn rustfmt_bindings(mut self, doit: bool) -> Self { self.options.rustfmt_bindings = doit; @@ -1361,6 +1536,15 @@ impl Builder { self.options.array_pointers_in_arguments = doit; self } + + /// Set the wasm import module name + pub fn wasm_import_module_name>( + mut self, + import_name: T, + ) -> Self { + self.options.wasm_import_module_name = Some(import_name.into()); + self + } } /// Configuration options for generated bindings. @@ -1402,12 +1586,17 @@ struct BindgenOptions { /// The default style of code to generate for enums default_enum_style: codegen::EnumVariation, - /// The enum patterns to mark an enum as bitfield. + /// The enum patterns to mark an enum as a bitfield + /// (newtype with bitwise operations). bitfield_enums: RegexSet, + /// The enum patterns to mark an enum as a newtype. + newtype_enums: RegexSet, + /// The enum patterns to mark an enum as a Rust enum. rustified_enums: RegexSet, + /// The enum patterns to mark an enum as a non-exhaustive Rust enum. rustified_non_exhaustive_enums: RegexSet, /// The enum patterns to mark an enum as a module of constants. @@ -1416,6 +1605,19 @@ struct BindgenOptions { /// The enum patterns to mark an enum as a set of constants. constified_enums: RegexSet, + /// The default style of code to generate for typedefs. + default_alias_style: codegen::AliasVariation, + + /// Typedef patterns that will use regular type aliasing. + type_alias: RegexSet, + + /// Typedef patterns that will be aliased by creating a new struct. + new_type_alias: RegexSet, + + /// Typedef patterns that will be wrapped in a new struct and have + /// Deref and Deref to their aliased type. + new_type_alias_deref: RegexSet, + /// Whether we should generate builtins or not. builtins: bool, @@ -1439,6 +1641,9 @@ struct BindgenOptions { /// True if we should avoid mangling names with namespaces. disable_name_namespacing: bool, + /// True if we should avoid generating nested struct names. + disable_nested_struct_naming: bool, + /// True if we should generate layout tests for generated structures. layout_tests: bool, @@ -1566,7 +1771,7 @@ struct BindgenOptions { /// Whether to detect include paths using clang_sys. detect_include_paths: bool, - /// Whether to prepend the enum name to bitfield or constant variants. + /// Whether to prepend the enum name to constant or newtype variants. prepend_enum_name: bool, /// Version of the Rust compiler to target @@ -1581,6 +1786,9 @@ struct BindgenOptions { /// items via the `error!` log. record_matches: bool, + /// Whether `size_t` should be translated to `usize` automatically. + size_t_is_usize: bool, + /// Whether rustfmt should format the generated bindings. rustfmt_bindings: bool, @@ -1599,6 +1807,9 @@ struct BindgenOptions { /// Decide if C arrays should be regular pointers in rust or array pointers array_pointers_in_arguments: bool, + + /// Wasm import module name. + wasm_import_module_name: Option, } /// TODO(emilio): This is sort of a lie (see the error message that results from @@ -1619,7 +1830,12 @@ impl BindgenOptions { &mut self.bitfield_enums, &mut self.constified_enums, &mut self.constified_enum_modules, + &mut self.newtype_enums, &mut self.rustified_enums, + &mut self.rustified_non_exhaustive_enums, + &mut self.type_alias, + &mut self.new_type_alias, + &mut self.new_type_alias_deref, &mut self.no_partialeq_types, &mut self.no_copy_types, &mut self.no_hash_types, @@ -1661,10 +1877,15 @@ impl Default for BindgenOptions { whitelisted_vars: Default::default(), default_enum_style: Default::default(), bitfield_enums: Default::default(), + newtype_enums: Default::default(), rustified_enums: Default::default(), rustified_non_exhaustive_enums: Default::default(), constified_enums: Default::default(), constified_enum_modules: Default::default(), + default_alias_style: Default::default(), + type_alias: Default::default(), + new_type_alias: Default::default(), + new_type_alias_deref: Default::default(), builtins: false, emit_ast: false, emit_ir: false, @@ -1683,6 +1904,7 @@ impl Default for BindgenOptions { enable_cxx_namespaces: false, enable_function_attribute_detection: false, disable_name_namespacing: false, + disable_nested_struct_naming: false, use_core: false, ctypes_prefix: None, namespaced_constants: true, @@ -1708,15 +1930,18 @@ impl Default for BindgenOptions { time_phases: false, record_matches: true, rustfmt_bindings: true, + size_t_is_usize: false, rustfmt_configuration_file: None, no_partialeq_types: Default::default(), no_copy_types: Default::default(), no_hash_types: Default::default(), array_pointers_in_arguments: false, + wasm_import_module_name: None, } } } +#[cfg(feature = "runtime")] fn ensure_libclang_is_loaded() { if clang_sys::is_loaded() { return; @@ -1727,7 +1952,7 @@ fn ensure_libclang_is_loaded() { // across different threads. lazy_static! { - static ref LIBCLANG: Arc = { + static ref LIBCLANG: std::sync::Arc = { clang_sys::load().expect("Unable to find libclang"); clang_sys::get_library().expect( "We just loaded libclang and it had better still be \ @@ -1739,6 +1964,9 @@ fn ensure_libclang_is_loaded() { clang_sys::set_library(Some(LIBCLANG.clone())); } +#[cfg(not(feature = "runtime"))] +fn ensure_libclang_is_loaded() {} + /// Generated Rust bindings. #[derive(Debug)] pub struct Bindings { @@ -1753,10 +1981,13 @@ impl Bindings { ) -> Result { ensure_libclang_is_loaded(); + #[cfg(feature = "runtime")] debug!( "Generating bindings, libclang at {}", clang_sys::get_library().unwrap().path().display() ); + #[cfg(not(feature = "runtime"))] + debug!("Generating bindings, libclang linked"); options.build(); @@ -1958,10 +2189,9 @@ impl Bindings { } } #[cfg(not(feature = "which-rustfmt"))] - Err(io::Error::new( - io::ErrorKind::Other, - "which wasn't enabled, and no rustfmt binary specified", - )) + // No rustfmt binary was specified, so assume that the binary is called + // "rustfmt" and that it is in the user's PATH. + Ok(Cow::Owned("rustfmt".into())) } /// Checks if rustfmt_bindings is set and runs rustfmt on the string @@ -2113,10 +2343,7 @@ pub struct ClangVersion { /// Get the major and the minor semver numbers of Clang's version pub fn clang_version() -> ClangVersion { - if !clang_sys::is_loaded() { - // TODO(emilio): Return meaningful error (breaking). - clang_sys::load().expect("Unable to find libclang"); - } + ensure_libclang_is_loaded(); let raw_v: String = clang::extract_clang_version(); let split_v: Option> = raw_v @@ -2147,6 +2374,27 @@ pub fn clang_version() -> ClangVersion { } } +/// A ParseCallbacks implementation that will act on file includes by echoing a rerun-if-changed +/// line +/// +/// When running in side a `build.rs` script, this can be used to make cargo invalidate the +/// generated bindings whenever any of the files included from the header change: +/// ``` +/// use bindgen::builder; +/// let bindings = builder() +/// .header("path/to/input/header") +/// .parse_callbacks(Box::new(bindgen::CargoCallbacks)) +/// .generate(); +/// ``` +#[derive(Debug)] +pub struct CargoCallbacks; + +impl callbacks::ParseCallbacks for CargoCallbacks { + fn include_file(&self, filename: &str) { + println!("cargo:rerun-if-changed={}", filename); + } +} + /// Test command_line_flag function. #[test] fn commandline_flag_unit_test_function() { diff --git a/third_party/rust/bindgen/src/main.rs b/third_party/rust/bindgen/src/main.rs index 5e22b1e6bc..9cf030694d 100644 --- a/third_party/rust/bindgen/src/main.rs +++ b/third_party/rust/bindgen/src/main.rs @@ -17,34 +17,41 @@ mod log_stubs; mod options; use options::builder_from_flags; -pub fn main() { - #[cfg(feature = "logging")] - env_logger::init(); - - let bind_args: Vec<_> = env::args().collect(); - +fn clang_version_check() { let version = clang_version(); - let expected_version = if cfg!(feature = "testing_only_libclang_4") { - (4, 0) + let expected_version = if cfg!(feature = "testing_only_libclang_9") { + Some((9, 0)) + } else if cfg!(feature = "testing_only_libclang_5") { + Some((5, 0)) + } else if cfg!(feature = "testing_only_libclang_4") { + Some((4, 0)) + } else if cfg!(feature = "testing_only_libclang_3_9") { + Some((3, 9)) } else if cfg!(feature = "testing_only_libclang_3_8") { - (3, 8) + Some((3, 8)) } else { - // Default to 3.9. - (3, 9) + None }; - info!("Clang Version: {}", version.full); + info!( + "Clang Version: {}, parsed: {:?}", + version.full, version.parsed + ); - match version.parsed { - None => warn!("Couldn't parse libclang version"), - Some(version) if version != expected_version => { - warn!("Using clang {:?}, expected {:?}", version, expected_version); - } - _ => {} + if expected_version.is_some() { + assert_eq!(version.parsed, version.parsed); } +} + +pub fn main() { + #[cfg(feature = "logging")] + env_logger::init(); + + let bind_args: Vec<_> = env::args().collect(); match builder_from_flags(bind_args.into_iter()) { Ok((builder, output, verbose)) => { + clang_version_check(); let builder_result = panic::catch_unwind(|| { builder.generate().expect("Unable to generate bindings") }); diff --git a/third_party/rust/bindgen/src/options.rs b/third_party/rust/bindgen/src/options.rs index 6ead924141..b630bb4bed 100644 --- a/third_party/rust/bindgen/src/options.rs +++ b/third_party/rust/bindgen/src/options.rs @@ -1,4 +1,7 @@ -use bindgen::{builder, Builder, CodegenConfig, EnumVariation, RustTarget, RUST_TARGET_STRINGS}; +use bindgen::{ + builder, AliasVariation, Builder, CodegenConfig, EnumVariation, RustTarget, + RUST_TARGET_STRINGS, +}; use clap::{App, Arg}; use std::fs::File; use std::io::{self, stderr, Error, ErrorKind, Write}; @@ -6,7 +9,9 @@ use std::path::PathBuf; use std::str::FromStr; /// Construct a new [`Builder`](./struct.Builder.html) from command line flags. -pub fn builder_from_flags(args: I) -> Result<(Builder, Box, bool), io::Error> +pub fn builder_from_flags( + args: I, +) -> Result<(Builder, Box, bool), io::Error> where I: Iterator, { @@ -33,6 +38,7 @@ where "consts", "moduleconsts", "bitfield", + "newtype", "rust", "rust_non_exhaustive", ]) @@ -47,6 +53,13 @@ where .takes_value(true) .multiple(true) .number_of_values(1), + Arg::with_name("newtype-enum") + .long("newtype-enum") + .help("Mark any enum whose name matches as a newtype.") + .value_name("regex") + .takes_value(true) + .multiple(true) + .number_of_values(1), Arg::with_name("rustified-enum") .long("rustified-enum") .help("Mark any enum whose name matches as a Rust enum.") @@ -74,6 +87,47 @@ where .takes_value(true) .multiple(true) .number_of_values(1), + Arg::with_name("default-alias-style") + .long("default-alias-style") + .help("The default style of code used to generate typedefs.") + .value_name("variant") + .default_value("type_alias") + .possible_values(&[ + "type_alias", + "new_type", + "new_type_deref", + ]) + .multiple(false), + Arg::with_name("normal-alias") + .long("normal-alias") + .help( + "Mark any typedef alias whose name matches to use \ + normal type aliasing.", + ) + .value_name("regex") + .takes_value(true) + .multiple(true) + .number_of_values(1), + Arg::with_name("new-type-alias") + .long("new-type-alias") + .help( + "Mark any typedef alias whose name matches to have \ + a new type generated for it.", + ) + .value_name("regex") + .takes_value(true) + .multiple(true) + .number_of_values(1), + Arg::with_name("new-type-alias-deref") + .long("new-type-alias-deref") + .help( + "Mark any typedef alias whose name matches to have \ + a new type with Deref and DerefMut to the inner type.", + ) + .value_name("regex") + .takes_value(true) + .multiple(true) + .number_of_values(1), Arg::with_name("blacklist-type") .long("blacklist-type") .help("Mark as hidden.") @@ -206,6 +260,13 @@ where generate names like \"Baz\" instead of \"foo_bar_Baz\" \ for an input name \"foo::bar::Baz\".", ), + Arg::with_name("disable-nested-struct-naming") + .long("disable-nested-struct-naming") + .help( + "Disable nested struct naming, causing bindgen to generate \ + names like \"bar\" instead of \"foo_bar\" for a nested \ + definition \"struct foo { struct bar { } b; };\"." + ), Arg::with_name("ignore-functions") .long("ignore-functions") .help( @@ -228,7 +289,7 @@ where .help("Do not automatically convert floats to f32/f64."), Arg::with_name("no-prepend-enum-name") .long("no-prepend-enum-name") - .help("Do not prepend the enum name to bitfield or constant variants."), + .help("Do not prepend the enum name to constant or newtype variants."), Arg::with_name("no-include-path-detection") .long("no-include-path-detection") .help("Do not try to detect default include paths"), @@ -322,6 +383,9 @@ where "Do not record matching items in the regex sets. \ This disables reporting of unused items.", ), + Arg::with_name("size_t-is-usize") + .long("size_t-is-usize") + .help("Translate size_t to usize."), Arg::with_name("no-rustfmt-bindings") .long("no-rustfmt-bindings") .help("Do not format the generated bindings with rustfmt."), @@ -373,6 +437,11 @@ where Arg::with_name("use-array-pointers-in-arguments") .long("use-array-pointers-in-arguments") .help("Use `*const [T; size]` instead of `*const T` for C arrays"), + Arg::with_name("wasm-import-module-name") + .long("wasm-import-module-name") + .value_name("name") + .takes_value(true) + .help("The name to be used in a #[link(wasm_import_module = ...)] statement") ]) // .args() .get_matches_from(args); @@ -407,14 +476,20 @@ where } } + if let Some(newtypes) = matches.values_of("newtype-enum") { + for regex in newtypes { + builder = builder.newtype_enum(regex); + } + } + if let Some(rustifieds) = matches.values_of("rustified-enum") { for regex in rustifieds { builder = builder.rustified_enum(regex); } } - if let Some(bitfields) = matches.values_of("constified-enum") { - for regex in bitfields { + if let Some(const_enums) = matches.values_of("constified-enum") { + for regex in const_enums { builder = builder.constified_enum(regex); } } @@ -424,6 +499,30 @@ where builder = builder.constified_enum_module(regex); } } + + if let Some(variant) = matches.value_of("default-alias-style") { + builder = + builder.default_alias_style(AliasVariation::from_str(variant)?); + } + + if let Some(type_alias) = matches.values_of("normal-alias") { + for regex in type_alias { + builder = builder.type_alias(regex); + } + } + + if let Some(new_type) = matches.values_of("new-type-alias") { + for regex in new_type { + builder = builder.new_type_alias(regex); + } + } + + if let Some(new_type_deref) = matches.values_of("new-type-alias-deref") { + for regex in new_type_deref { + builder = builder.new_type_alias_deref(regex); + } + } + if let Some(hidden_types) = matches.values_of("blacklist-type") { for ty in hidden_types { builder = builder.blacklist_type(ty); @@ -510,6 +609,11 @@ where builder = builder.array_pointers_in_arguments(true); } + if let Some(wasm_import_name) = matches.value_of("wasm-import-module-name") + { + builder = builder.wasm_import_module_name(wasm_import_name); + } + if let Some(prefix) = matches.value_of("ctypes-prefix") { builder = builder.ctypes_prefix(prefix); } @@ -559,6 +663,10 @@ where builder = builder.disable_name_namespacing(); } + if matches.is_present("disable-nested-struct-naming") { + builder = builder.disable_nested_struct_naming(); + } + if matches.is_present("ignore-functions") { builder = builder.ignore_functions(); } @@ -658,6 +766,10 @@ where builder = builder.record_matches(false); } + if matches.is_present("size_t-is-usize") { + builder = builder.size_t_is_usize(true); + } + let no_rustfmt_bindings = matches.is_present("no-rustfmt-bindings"); if no_rustfmt_bindings { builder = builder.rustfmt_bindings(false); diff --git a/third_party/rust/binjs_meta/.cargo-checksum.json b/third_party/rust/binjs_meta/.cargo-checksum.json index 421ce1dff2..bcf84681a5 100644 --- a/third_party/rust/binjs_meta/.cargo-checksum.json +++ b/third_party/rust/binjs_meta/.cargo-checksum.json @@ -1 +1 @@ -{"files":{"Cargo.toml":"23870cd2ada8f913245771d63e99aea00df6917ca461644b5d51324f370b00ef","README.md":"17e5ed3a3bd9b898e73c3056711daabe1238fe9682d24d255f8263fae4eb783d","examples/generate_spidermonkey.rs":"913a34e84f45bd8bbe305629ca5abb2818370333a769074a757ce1bb4bb54778","src/export.rs":"920c045da0597fd330737c707a4d2ec2af6174b809b204b693f57ab45edbdc68","src/import.rs":"5d4c5ac03404ec04af21963f951626209c944ed5c55a51b09b05da596e5174d0","src/lib.rs":"546b6b13669d659d35a130dd29cfe3eac547a41d210adb194db1b214194295d7","src/spec.rs":"9adb6ff0168e3ec29735a5a9198756076385b6f6063883e47190846f46fa7247","src/util.rs":"47fcb109cd3d20eed52f7d1d99a12da86f40204fbf29f990ff60bb35bfe86fa1"},"package":"6c9a0da2208ceb785c1626fa8b7d250d2e5546ae230294b4a998e4f818c1768e"} \ No newline at end of file +{"files":{"Cargo.lock":"c4f860261c1fd22b88a416ddc2546fde7ac6b8d0ee584979b2cc930709840edd","Cargo.toml":"9fd70e5e3152c11487347f14f4f2703486e3de3a1c9f0249f6196ca3b0308511","README.md":"17e5ed3a3bd9b898e73c3056711daabe1238fe9682d24d255f8263fae4eb783d","examples/generate_spidermonkey.rs":"913a34e84f45bd8bbe305629ca5abb2818370333a769074a757ce1bb4bb54778","src/export.rs":"12d94158d1285ac880e3c105f086ad1c22c3c55432fdc3123f8f2ab4ccc71152","src/import.rs":"2f587c0737310e3b50280e0b62069de2bfa585d0812e2f0eb469cdada8502291","src/lib.rs":"546b6b13669d659d35a130dd29cfe3eac547a41d210adb194db1b214194295d7","src/spec.rs":"9adb6ff0168e3ec29735a5a9198756076385b6f6063883e47190846f46fa7247","src/util.rs":"9b19ace8cfc0e47e6090ba9223123eba0214d05c1969414272b6785b25de36d2"},"package":"d535cc5246fd9035268770420afd76c05f87e68b83ebed0ac94e8258e88fc353"} \ No newline at end of file diff --git a/third_party/rust/binjs_meta/Cargo.lock b/third_party/rust/binjs_meta/Cargo.lock new file mode 100644 index 0000000000..5bd2700968 --- /dev/null +++ b/third_party/rust/binjs_meta/Cargo.lock @@ -0,0 +1,315 @@ +# This file is automatically @generated by Cargo. +# It is not intended for manual editing. +[[package]] +name = "Inflector" +version = "0.11.4" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "aho-corasick" +version = "0.6.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "memchr 2.0.1 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "ansi_term" +version = "0.10.2" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "atty" +version = "0.2.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "libc 0.2.36 (registry+https://github.com/rust-lang/crates.io-index)", + "termion 1.5.1 (registry+https://github.com/rust-lang/crates.io-index)", + "winapi 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "binjs_meta" +version = "0.5.4" +dependencies = [ + "Inflector 0.11.4 (registry+https://github.com/rust-lang/crates.io-index)", + "clap 2.30.0 (registry+https://github.com/rust-lang/crates.io-index)", + "env_logger 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)", + "itertools 0.8.2 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)", + "weedle 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)", + "yaml-rust 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "bitflags" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "cfg-if" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "clap" +version = "2.30.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "ansi_term 0.10.2 (registry+https://github.com/rust-lang/crates.io-index)", + "atty 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)", + "bitflags 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)", + "strsim 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", + "textwrap 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", + "unicode-width 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", + "vec_map 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "either" +version = "1.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "env_logger" +version = "0.6.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "atty 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)", + "humantime 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)", + "regex 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)", + "termcolor 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "humantime" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "quick-error 1.2.3 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "itertools" +version = "0.8.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "either 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "lazy_static" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "libc" +version = "0.2.36" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "linked-hash-map" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "log" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "cfg-if 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "memchr" +version = "2.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "libc 0.2.36 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "nom" +version = "5.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "memchr 2.0.1 (registry+https://github.com/rust-lang/crates.io-index)", + "version_check 0.9.1 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "quick-error" +version = "1.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "redox_syscall" +version = "0.1.37" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "redox_termios" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "redox_syscall 0.1.37 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "regex" +version = "1.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "aho-corasick 0.6.4 (registry+https://github.com/rust-lang/crates.io-index)", + "memchr 2.0.1 (registry+https://github.com/rust-lang/crates.io-index)", + "regex-syntax 0.6.16 (registry+https://github.com/rust-lang/crates.io-index)", + "thread_local 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", + "utf8-ranges 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "regex-syntax" +version = "0.6.16" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "strsim" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "termcolor" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "winapi-util 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "termion" +version = "1.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "libc 0.2.36 (registry+https://github.com/rust-lang/crates.io-index)", + "redox_syscall 0.1.37 (registry+https://github.com/rust-lang/crates.io-index)", + "redox_termios 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "textwrap" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "unicode-width 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "thread_local" +version = "0.3.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "lazy_static 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "unicode-width" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "utf8-ranges" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "vec_map" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "version_check" +version = "0.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "weedle" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "nom 5.1.1 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "winapi" +version = "0.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "winapi-i686-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", + "winapi-x86_64-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "winapi-i686-pc-windows-gnu" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "winapi-util" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "winapi 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "winapi-x86_64-pc-windows-gnu" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "yaml-rust" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "linked-hash-map 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[metadata] +"checksum Inflector 0.11.4 (registry+https://github.com/rust-lang/crates.io-index)" = "fe438c63458706e03479442743baae6c88256498e6431708f6dfc520a26515d3" +"checksum aho-corasick 0.6.4 (registry+https://github.com/rust-lang/crates.io-index)" = "d6531d44de723825aa81398a6415283229725a00fa30713812ab9323faa82fc4" +"checksum ansi_term 0.10.2 (registry+https://github.com/rust-lang/crates.io-index)" = "6b3568b48b7cefa6b8ce125f9bb4989e52fbcc29ebea88df04cc7c5f12f70455" +"checksum atty 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)" = "8352656fd42c30a0c3c89d26dea01e3b77c0ab2af18230835c15e2e13cd51859" +"checksum bitflags 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)" = "b3c30d3802dfb7281680d6285f2ccdaa8c2d8fee41f93805dba5c4cf50dc23cf" +"checksum cfg-if 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "d4c819a1287eb618df47cc647173c5c4c66ba19d888a6e50d605672aed3140de" +"checksum clap 2.30.0 (registry+https://github.com/rust-lang/crates.io-index)" = "1c07b9257a00f3fc93b7f3c417fc15607ec7a56823bc2c37ec744e266387de5b" +"checksum either 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "740178ddf48b1a9e878e6d6509a1442a2d42fd2928aae8e7a6f8a36fb01981b3" +"checksum env_logger 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)" = "aafcde04e90a5226a6443b7aabdb016ba2f8307c847d524724bd9b346dd1a2d3" +"checksum humantime 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "df004cfca50ef23c36850aaaa59ad52cc70d0e90243c3c7737a4dd32dc7a3c4f" +"checksum itertools 0.8.2 (registry+https://github.com/rust-lang/crates.io-index)" = "f56a2d0bc861f9165be4eb3442afd3c236d8a98afd426f65d92324ae1091a484" +"checksum lazy_static 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "c8f31047daa365f19be14b47c29df4f7c3b581832407daabe6ae77397619237d" +"checksum libc 0.2.36 (registry+https://github.com/rust-lang/crates.io-index)" = "1e5d97d6708edaa407429faa671b942dc0f2727222fb6b6539bf1db936e4b121" +"checksum linked-hash-map 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)" = "70fb39025bc7cdd76305867c4eccf2f2dcf6e9a57f5b21a93e1c2d86cd03ec9e" +"checksum log 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)" = "89f010e843f2b1a31dbd316b3b8d443758bc634bed37aabade59c686d644e0a2" +"checksum memchr 2.0.1 (registry+https://github.com/rust-lang/crates.io-index)" = "796fba70e76612589ed2ce7f45282f5af869e0fdd7cc6199fa1aa1f1d591ba9d" +"checksum nom 5.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "0b471253da97532da4b61552249c521e01e736071f71c1a4f7ebbfbf0a06aad6" +"checksum quick-error 1.2.3 (registry+https://github.com/rust-lang/crates.io-index)" = "a1d01941d82fa2ab50be1e79e6714289dd7cde78eba4c074bc5a4374f650dfe0" +"checksum redox_syscall 0.1.37 (registry+https://github.com/rust-lang/crates.io-index)" = "0d92eecebad22b767915e4d529f89f28ee96dbbf5a4810d2b844373f136417fd" +"checksum redox_termios 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "7e891cfe48e9100a70a3b6eb652fef28920c117d366339687bd5576160db0f76" +"checksum regex 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)" = "3d8c9f33201f46669484bacc312b00e7541bed6aaf296dffe2bb4e0ac6b8ce2a" +"checksum regex-syntax 0.6.16 (registry+https://github.com/rust-lang/crates.io-index)" = "1132f845907680735a84409c3bebc64d1364a5683ffbce899550cd09d5eaefc1" +"checksum strsim 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "bb4f380125926a99e52bc279241539c018323fab05ad6368b56f93d9369ff550" +"checksum termcolor 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "bb6bfa289a4d7c5766392812c0a1f4c1ba45afa1ad47803c11e1f407d846d75f" +"checksum termion 1.5.1 (registry+https://github.com/rust-lang/crates.io-index)" = "689a3bdfaab439fd92bc87df5c4c78417d3cbe537487274e9b0b2dce76e92096" +"checksum textwrap 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)" = "c0b59b6b4b44d867f1370ef1bd91bfb262bf07bf0ae65c202ea2fbc16153b693" +"checksum thread_local 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)" = "c6b53e329000edc2b34dbe8545fd20e55a333362d0a321909685a19bd28c3f1b" +"checksum unicode-width 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)" = "bf3a113775714a22dcb774d8ea3655c53a32debae63a063acc00a91cc586245f" +"checksum utf8-ranges 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "662fab6525a98beff2921d7f61a39e7d59e0b425ebc7d0d9e66d316e55124122" +"checksum vec_map 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)" = "887b5b631c2ad01628bbbaa7dd4c869f80d3186688f8d0b6f58774fbe324988c" +"checksum version_check 0.9.1 (registry+https://github.com/rust-lang/crates.io-index)" = "078775d0255232fb988e6fccf26ddc9d1ac274299aaedcedce21c6f72cc533ce" +"checksum weedle 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)" = "8a7d4f9feb723a800d8f7b74edc9fa44ff35cb0b2ec64886714362f423427f37" +"checksum winapi 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)" = "04e3bd221fcbe8a271359c04f21a76db7d0c6028862d1bb5512d85e1e2eb5bb3" +"checksum winapi-i686-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" +"checksum winapi-util 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "4ccfbf554c6ad11084fb7517daca16cfdcaccbdadba4fc336f032a8b12c2ad80" +"checksum winapi-x86_64-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" +"checksum yaml-rust 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "57ab38ee1a4a266ed033496cf9af1828d8d6e6c1cfa5f643a2809effcae4d628" diff --git a/third_party/rust/binjs_meta/Cargo.toml b/third_party/rust/binjs_meta/Cargo.toml index 5ac9593460..2d0f3464af 100644 --- a/third_party/rust/binjs_meta/Cargo.toml +++ b/third_party/rust/binjs_meta/Cargo.toml @@ -12,7 +12,7 @@ [package] name = "binjs_meta" -version = "0.5.2" +version = "0.5.4" authors = ["David Teller "] description = "Part of binjs-ref. Tools for manipulating grammars. You probably do not want to use this crate directly unless you're writing an encoder, decoder or parser generator for binjs." homepage = "https://binast.github.io/ecmascript-binary-ast/" @@ -22,7 +22,8 @@ categories = ["compression", "parsing", "web-programming"] license = "MIT" repository = "https://github.com/binast/binjs-ref" [dependencies.Inflector] -version = "^0.11" +version = "^0.11.4" +default-features = false [dependencies.itertools] version = "^0.8" @@ -31,7 +32,7 @@ version = "^0.8" version = "^0.4" [dependencies.weedle] -version = "^0.8" +version = "0.11" [dev-dependencies.clap] version = "^2.0" diff --git a/third_party/rust/binjs_meta/src/export.rs b/third_party/rust/binjs_meta/src/export.rs index 4900fab4fe..b5fdfa79bc 100644 --- a/third_party/rust/binjs_meta/src/export.rs +++ b/third_party/rust/binjs_meta/src/export.rs @@ -348,7 +348,7 @@ impl TypeDeanonymizer { let mut names = vec![]; let mut subsums = vec![]; for sub_type in sum.types() { - let (mut sub_sum, name) = self.import_typespec(spec, sub_type, None); + let (sub_sum, name) = self.import_typespec(spec, sub_type, None); let mut sub_sum = sub_sum.unwrap_or_else( || panic!("While treating {:?}, attempting to create a sum containing {}, which isn't an interface or a sum of interfaces", type_spec, name) ); @@ -370,7 +370,7 @@ impl TypeDeanonymizer { }; for subsum_name in subsums { // So, `my_name` is a superset of `subsum_name`. - let mut supersum_entry = self + let supersum_entry = self .supersums_of .entry(subsum_name.clone()) .or_insert_with(|| HashSet::new()); diff --git a/third_party/rust/binjs_meta/src/import.rs b/third_party/rust/binjs_meta/src/import.rs index 30ca6d7410..329b139177 100644 --- a/third_party/rust/binjs_meta/src/import.rs +++ b/third_party/rust/binjs_meta/src/import.rs @@ -75,7 +75,7 @@ impl Importer { /// ``` pub fn import<'a>( sources: impl IntoIterator, - ) -> Result>> { + ) -> Result> { let mut importer = Importer { path: Vec::with_capacity(256), builder: SpecBuilder::new(), @@ -258,7 +258,7 @@ impl Importer { .builder .get_typedef_mut(&node_name) .unwrap_or_else(|| panic!("Could not find typedef {}", extended)); - let mut typespec = typedef.spec_mut(); + let typespec = typedef.spec_mut(); let typesum = if let TypeSpec::TypeSum(ref mut typesum) = *typespec { typesum } else { @@ -330,7 +330,9 @@ impl Importer { .list .iter() .map(|t| match t { - UnionMemberType::Single(t) => self.convert_single_type(t), + UnionMemberType::Single(AttributedNonAnyType { type_: t, .. }) => { + self.convert_single_type(t) + } UnionMemberType::Union(t) => self.convert_union_type(t), }) .map(|t| t.spec) diff --git a/third_party/rust/binjs_meta/src/util.rs b/third_party/rust/binjs_meta/src/util.rs index 8c2eb031c3..d5fb75de36 100644 --- a/third_party/rust/binjs_meta/src/util.rs +++ b/third_party/rust/binjs_meta/src/util.rs @@ -294,7 +294,7 @@ where let text = &line[indent_len..]; let mut gobbled = 0; while text.len() > gobbled { - let mut rest = &text[gobbled..]; + let rest = &text[gobbled..]; eprintln!("Line still contains {} ({})", rest, gobbled); if rest.len() + prefix.len() > columns { // Try and find the largest prefix of `text` that fits within `columns`. diff --git a/third_party/rust/bitreader/.cargo-checksum.json b/third_party/rust/bitreader/.cargo-checksum.json index bf29080e20..292b515694 100644 --- a/third_party/rust/bitreader/.cargo-checksum.json +++ b/third_party/rust/bitreader/.cargo-checksum.json @@ -1 +1 @@ -{"files":{"Cargo.toml":"4ea2fe4a12740a572f459cc5c51ca721b1a7b256a0976be561c9b0a9fce0dcc7","LICENSE-APACHE":"cfc7749b96f63bd31c3c42b5c471bf756814053e847c10f3eb003417bc523d30","LICENSE-MIT":"8583712ee2b062ff3d4d6d3e16f19ff0f92bc3a0a4beeec11a81ef00146fbd4f","README.md":"a8bfdd9509bb3bb30b30bbe308a717e9827cf97d7a97e5fb5cd69bdd3c88a490","src/lib.rs":"a7ed9d2607f47b7d5d11ccaccf23486a21d072435231d09f4548ad0c4ad62f5b","src/tests.rs":"c4e99780432b3ad05f625961699da72239a975f838cb0ab1cf2501424baed38c"},"package":"80b13e2ab064ff3aa0bdbf1eff533f9822dc37899821f5f98c67f263eab51707"} \ No newline at end of file +{"files":{"Cargo.toml":"27cfa3fe44cc78bce30786d727dedef7967e399c0cdec1282e8dafc9c38a6c10","LICENSE-APACHE":"cfc7749b96f63bd31c3c42b5c471bf756814053e847c10f3eb003417bc523d30","LICENSE-MIT":"8583712ee2b062ff3d4d6d3e16f19ff0f92bc3a0a4beeec11a81ef00146fbd4f","README.md":"28986de2e8d457e76ae3303d80094697e6ef4ad8da06a4a3178bb1b52bff63d5","src/lib.rs":"6947e329a2f20dca35ea14b6f483c23b4812ae55b6851a276355c31fb7dd65e9","src/tests.rs":"b3ed3ae22daa348dc786219b6e2c4f2b1d3ba35d7d4401ea05f773773fdf8807"},"package":"5fa7f0adf37cd5472c978a1ff4be89c1880a923d10df4cfef6a10855a666e09b"} \ No newline at end of file diff --git a/third_party/rust/bitreader/Cargo.toml b/third_party/rust/bitreader/Cargo.toml index 35fbf127a1..c37e7c33ca 100644 --- a/third_party/rust/bitreader/Cargo.toml +++ b/third_party/rust/bitreader/Cargo.toml @@ -1,19 +1,28 @@ +# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO +# +# When uploading crates to the registry Cargo will automatically +# "normalize" Cargo.toml files for maximal compatibility +# with all versions of Cargo and also rewrite `path` dependencies +# to registry (e.g., crates.io) dependencies +# +# If you believe there's an error in this file please file an +# issue against the rust-lang/cargo repository. If you're +# editing this file be aware that the upstream Cargo.toml +# will likely look very different (and much more reasonable) + [package] name = "bitreader" -version = "0.3.0" +version = "0.3.2" authors = ["Ilkka Rauta "] - -description = """ -BitReader helps reading individual bits from a slice of bytes. - -You can read "unusual" numbers of bits from the byte slice, for example 13 bits -at once. The reader internally keeps track of position within the buffer. -""" - -documentation = "https://docs.rs/bitreader" +description = "BitReader helps reading individual bits from a slice of bytes.\n\nYou can read \"unusual\" numbers of bits from the byte slice, for example 13 bits\nat once. The reader internally keeps track of position within the buffer.\n" homepage = "https://github.com/irauta/bitreader" -repository = "https://github.com/irauta/bitreader" - +documentation = "https://docs.rs/bitreader" keywords = ["bit", "bits", "bitstream"] - license = "MIT OR Apache-2.0" +repository = "https://github.com/irauta/bitreader" +[dependencies.cfg-if] +version = "0.1.9" + +[features] +default = ["std"] +std = [] diff --git a/third_party/rust/bitreader/README.md b/third_party/rust/bitreader/README.md index ef1debbb91..640071bf23 100644 --- a/third_party/rust/bitreader/README.md +++ b/third_party/rust/bitreader/README.md @@ -20,7 +20,7 @@ Here is how you read first a single bit, then three bits and finally four bits f You can naturally read bits from longer buffer of data than just a single byte. -As you read bits, the internal cursor of BitReader moves on along the stream of bits. Little endian format is assumed when reading the multi-byte values. BitReader supports reading maximum of 64 bits at a time (with read_u64). +As you read bits, the internal cursor of BitReader moves on along the stream of bits. Big endian format is assumed when reading the multi-byte values. BitReader supports reading maximum of 64 bits at a time (with read_u64). ## License diff --git a/third_party/rust/bitreader/src/lib.rs b/third_party/rust/bitreader/src/lib.rs index 3d04dc4fad..47305c15e5 100644 --- a/third_party/rust/bitreader/src/lib.rs +++ b/third_party/rust/bitreader/src/lib.rs @@ -30,7 +30,7 @@ //! ``` //! You can naturally read bits from longer buffer of data than just a single byte. //! -//! As you read bits, the internal cursor of BitReader moves on along the stream of bits. Little +//! As you read bits, the internal cursor of BitReader moves on along the stream of bits. Big //! endian format is assumed when reading the multi-byte values. BitReader supports reading maximum //! of 64 bits at a time (with read_u64). Reading signed values directly is not supported at the //! moment. @@ -46,10 +46,19 @@ //! Note that the code will likely not work correctly if the slice is longer than 2^61 bytes, but //! exceeding that should be pretty unlikely. Let's get back to this when people read exabytes of //! information one bit at a time. - -use std::fmt; -use std::error::Error; -use std::result; +#![no_std] +cfg_if::cfg_if!{ + if #[cfg(feature = "std")] { + extern crate std; + use std::prelude::v1::*; + use std::fmt; + use std::error::Error; + use std::result; + } else { + use core::result; + use core::fmt; + } +} #[cfg(test)] mod tests; @@ -105,60 +114,79 @@ impl<'a> BitReader<'a> { /// Read at most 8 bits into a u8. pub fn read_u8(&mut self, bit_count: u8) -> Result { - let value = try!(self.read_value(bit_count, 8)); + let value = self.read_value(bit_count, 8)?; Ok((value & 0xff) as u8) } + /// Fills the entire `output_bytes` slice. If there aren't enough bits remaining + /// after the internal cursor's current position, the cursor won't be moved forward + /// and the contents of `output_bytes` won't be modified. + pub fn read_u8_slice(&mut self, output_bytes: &mut [u8]) -> Result<()> { + let requested = output_bytes.len() as u64 * 8; + if requested > self.remaining() { + Err(BitReaderError::NotEnoughData { + position: self.position, + length: (self.bytes.len() * 8) as u64, + requested: requested, + }) + } else { + for byte in output_bytes.iter_mut() { + *byte = self.read_u8(8)?; + } + Ok(()) + } + } + /// Read at most 16 bits into a u16. pub fn read_u16(&mut self, bit_count: u8) -> Result { - let value = try!(self.read_value(bit_count, 16)); + let value = self.read_value(bit_count, 16)?; Ok((value & 0xffff) as u16) } /// Read at most 32 bits into a u32. pub fn read_u32(&mut self, bit_count: u8) -> Result { - let value = try!(self.read_value(bit_count, 32)); + let value = self.read_value(bit_count, 32)?; Ok((value & 0xffffffff) as u32) } /// Read at most 64 bits into a u64. pub fn read_u64(&mut self, bit_count: u8) -> Result { - let value = try!(self.read_value(bit_count, 64)); + let value = self.read_value(bit_count, 64)?; Ok(value) } /// Read at most 8 bits into a i8. /// Assumes the bits are stored in two's complement format. pub fn read_i8(&mut self, bit_count: u8) -> Result { - let value = try!(self.read_signed_value(bit_count, 8)); + let value = self.read_signed_value(bit_count, 8)?; Ok((value & 0xff) as i8) } /// Read at most 16 bits into a i16. /// Assumes the bits are stored in two's complement format. pub fn read_i16(&mut self, bit_count: u8) -> Result { - let value = try!(self.read_signed_value(bit_count, 16)); + let value = self.read_signed_value(bit_count, 16)?; Ok((value & 0xffff) as i16) } /// Read at most 32 bits into a i32. /// Assumes the bits are stored in two's complement format. pub fn read_i32(&mut self, bit_count: u8) -> Result { - let value = try!(self.read_signed_value(bit_count, 32)); + let value = self.read_signed_value(bit_count, 32)?; Ok((value & 0xffffffff) as i32) } /// Read at most 64 bits into a i64. /// Assumes the bits are stored in two's complement format. pub fn read_i64(&mut self, bit_count: u8) -> Result { - let value = try!(self.read_signed_value(bit_count, 64)); + let value = self.read_signed_value(bit_count, 64)?; Ok(value) } /// Read a single bit as a boolean value. /// Interprets 1 as true and 0 as false. pub fn read_bool(&mut self) -> Result { - match try!(self.read_value(1, 1)) { + match self.read_value(1, 1)? { 0 => Ok(false), _ => Ok(true), } @@ -183,6 +211,12 @@ impl<'a> BitReader<'a> { self.position - self.relative_offset } + /// Returns the number of bits not yet read from the underlying slice. + pub fn remaining(&self) -> u64 { + let total_bits = self.bytes.len() as u64 * 8; + total_bits - self.position + } + /// Helper to make sure the "bit cursor" is exactly at the beginning of a byte, or at specific /// multi-byte alignment position. /// @@ -198,7 +232,7 @@ impl<'a> BitReader<'a> { } fn read_signed_value(&mut self, bit_count: u8, maximum_count: u8) -> Result { - let unsigned = try!(self.read_value(bit_count, maximum_count)); + let unsigned = self.read_value(bit_count, maximum_count)?; // Fill the bits above the requested bits with all ones or all zeros, // depending on the sign bit. let sign_bit = unsigned >> (bit_count - 1) & 1; @@ -263,6 +297,7 @@ pub enum BitReaderError { } } +#[cfg(feature = "std")] impl Error for BitReaderError { fn description(&self) -> &str { match *self { @@ -339,7 +374,7 @@ impl_read_into!(i64, read_i64); // We can't cast to bool, so this requires a separate method. impl ReadInto for bool { fn read(reader: &mut BitReader, bits: u8) -> Result { - match try!(reader.read_u8(bits)) { + match reader.read_u8(bits)? { 0 => Ok(false), _ => Ok(true), } diff --git a/third_party/rust/bitreader/src/tests.rs b/third_party/rust/bitreader/src/tests.rs index f62be1f0c7..3ccafc0416 100644 --- a/third_party/rust/bitreader/src/tests.rs +++ b/third_party/rust/bitreader/src/tests.rs @@ -21,6 +21,9 @@ fn read_buffer() { assert_eq!(reader.read_u8(1).unwrap(), 0b0); assert_eq!(reader.read_u8(2).unwrap(), 0b11); + assert_eq!(reader.position(), 4); + assert_eq!(reader.remaining(), 60); + assert_eq!(reader.read_u8(4).unwrap(), 0b0101); assert!(reader.is_aligned(1)); @@ -29,6 +32,9 @@ fn read_buffer() { assert_eq!(reader.read_u16(10).unwrap(), 0b01_0101_0101); assert_eq!(reader.read_u8(3).unwrap(), 0b100); + assert_eq!(reader.position(), 24); + assert_eq!(reader.remaining(), 40); + assert!(reader.is_aligned(1)); assert_eq!(reader.read_u32(32).unwrap(), 0b1001_1001_1001_1001_1001_1001_1001_1001); @@ -150,3 +156,52 @@ fn boolean_values() { assert_eq!(reader.read_bool().unwrap(), v & 0x01 == 1); } } + +#[test] +fn read_slice() { + let bytes = &[ + 0b1111_0000, 0b0000_1111, 0b1111_0000, + 0b0000_1000, 0b0000_0100, 0b0000_0011, + 0b1111_1100, 0b0000_0011, 0b1101_1000, + ]; + let mut reader = BitReader::new(bytes); + assert_eq!(reader.read_u8(4).unwrap(), 0b1111); + // Just some pattern that's definitely not in the bytes array + let mut output = [0b1010_1101; 3]; + reader.read_u8_slice(&mut output).unwrap(); + assert_eq!(&output, &[0u8, 255u8, 0u8]); + + assert_eq!(reader.read_u8(1).unwrap(), 1); + + reader.read_u8_slice(&mut output[1..2]).unwrap(); + assert_eq!(&output, &[0u8, 0u8, 0u8]); + + assert_eq!(reader.read_u8(1).unwrap(), 1); + + output = [0b1010_1101; 3]; + reader.read_u8_slice(&mut output).unwrap(); + assert_eq!(&output, &[0u8, 255u8, 0u8]); + + reader.read_u8_slice(&mut output[0..1]).unwrap(); + assert_eq!(output[0], 0b1111_0110); + + assert_eq!(reader.read_u8(2).unwrap(), 0); +} + +#[test] +fn read_slice_too_much() { + let bytes = &[ + 0b1111_1111, 0b1111_1111, 0b1111_1111, 0b1111_1111, + ]; + let mut reader = BitReader::new(bytes); + assert_eq!(reader.read_u8(1).unwrap(), 1); + + let mut output = [0u8; 4]; + let should_be_error = reader.read_u8_slice(&mut output); + assert_eq!(should_be_error.unwrap_err(), BitReaderError::NotEnoughData { + position: 1, + length: (bytes.len() * 8) as u64, + requested: (&output.len() * 8) as u64 + }); + assert_eq!(&output, &[0u8; 4]); +} diff --git a/third_party/rust/cexpr/.cargo-checksum.json b/third_party/rust/cexpr/.cargo-checksum.json index 109d240dcb..f50ce9e867 100644 --- a/third_party/rust/cexpr/.cargo-checksum.json +++ b/third_party/rust/cexpr/.cargo-checksum.json @@ -1 +1 @@ -{"files":{"Cargo.toml":"32b00f47d6888b44ac5fb30e9693437dd95c98f000b5abb9a85880edc746dcb4","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"d9771b8c6cf4426d3846de54c1febe20907f1eeadf7adfb5ade89a83bd9ea77f","src/expr.rs":"b2c80d3125ff2fd66d0f889dfa2d6476b8e68cc9513e508ba862470182a3f7cc","src/lib.rs":"689f19cb8a8e88c2489e0d8f59ed75b03cb19f0e789e5a0d3447f695b2ef2259","src/literal.rs":"22aec22c7bdd374afd06c16c19f09b2763c5ffc37ecb0bbd60a5d9102f57ebc3","src/token.rs":"52d42deb2a2575bb8631e2e821593d8288fed16e21bab3ceeacb6a7b06c40087","tests/clang.rs":"0f820e2003e34c2ab69cd759314cebf755fd1b1929974976d3776968f687be7e","tests/input/chars.h":"69c8141870872b795b5174bad125b748732c2b01d0e98ffcfc37b19f3f791f69","tests/input/fail.h":"b0b6cffd2dd17410b5eb02ee79ab75754820480b960db8a9866cc9983bd36b65","tests/input/floats.h":"28ec664e793c494e1a31f3bc5b790014e9921fc741bf475a86319b9a9eee5915","tests/input/int_signed.h":"934199eded85dd7820ca08c0beb1381ee6d9339970d2720a69c23025571707ce","tests/input/int_unsigned.h":"d6b82716669aecbec4cfff2d1bf8c5af926f80ba01fe89de3b439264f3080ccb","tests/input/strings.h":"75c60527068172b97983d2b8361938e856ea394002d5bef05de1adc6a0f5fc01","tests/input/test_llvm_bug_9069.h":"8d9ae1d1eadc8f6d5c14296f984547fe894d0f2ce5cd6d7aa8caad40a56bc5e1"},"package":"8fc0086be9ca82f7fc89fc873435531cb898b86e850005850de1f820e2db6e9b"} \ No newline at end of file +{"files":{"Cargo.toml":"fa6eef7c30868053f4ef38aa6863b980639c3df49230691f28e8bdde91ab5071","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"d9771b8c6cf4426d3846de54c1febe20907f1eeadf7adfb5ade89a83bd9ea77f","bors.toml":"1c81ede536a37edd30fe4e622ff0531b25372403ac9475a5d6c50f14156565a2","rustfmt.toml":"d8e7f616455a670ba75e3e94bf6f88f4c168c481664d12501820c7dfff5c3cc2","src/expr.rs":"76e9dd53c1f964bfd83866c19849b4d940214032369403cafaa4a595dee66d95","src/lib.rs":"afaf05ed6d5bf93072b853c1999b87b0390311672c52500a0dd467c0caa8701d","src/literal.rs":"637cd13968a6b9cc34dcfa55da67ac144b4f0b1fea7395a746230a41fe1349db","src/token.rs":"cd1ba6315b0137de9a0711670dd1840ac76c41f3b88dcd1a93ad77e1800c703f","tests/clang.rs":"5bb9807f35f760065d15cb9dfb7d8b79c2f734aef7ba5fe3737154155ed8ee73","tests/input/chars.h":"69c8141870872b795b5174bad125b748732c2b01d0e98ffcfc37b19f3f791f69","tests/input/fail.h":"b0b6cffd2dd17410b5eb02ee79ab75754820480b960db8a9866cc9983bd36b65","tests/input/floats.h":"28ec664e793c494e1a31f3bc5b790014e9921fc741bf475a86319b9a9eee5915","tests/input/int_signed.h":"934199eded85dd7820ca08c0beb1381ee6d9339970d2720a69c23025571707ce","tests/input/int_unsigned.h":"7b8023ba468ec76b184912692bc40e8fbcdd92ad86ec5a7c0dbcb02f2b8d961d","tests/input/strings.h":"2dd11bc066f34e8cb1916a28353e9e9a3a21cd406651b2f94fc47e89c95d9cba","tests/input/test_llvm_bug_9069.h":"8d9ae1d1eadc8f6d5c14296f984547fe894d0f2ce5cd6d7aa8caad40a56bc5e1"},"package":"f4aedb84272dbe89af497cf81375129abda4fc0a9e7c5d317498c15cc30c0d27"} \ No newline at end of file diff --git a/third_party/rust/cexpr/Cargo.toml b/third_party/rust/cexpr/Cargo.toml index d787119c10..9ae350dd11 100644 --- a/third_party/rust/cexpr/Cargo.toml +++ b/third_party/rust/cexpr/Cargo.toml @@ -3,7 +3,7 @@ # When uploading crates to the registry Cargo will automatically # "normalize" Cargo.toml files for maximal compatibility # with all versions of Cargo and also rewrite `path` dependencies -# to registry (e.g. crates.io) dependencies +# to registry (e.g., crates.io) dependencies # # If you believe there's an error in this file please file an # issue against the rust-lang/cargo repository. If you're @@ -11,8 +11,9 @@ # will likely look very different (and much more reasonable) [package] +edition = "2018" name = "cexpr" -version = "0.3.3" +version = "0.4.0" authors = ["Jethro Beekman "] description = "A C expression parser and evaluator" documentation = "https://docs.rs/cexpr/" @@ -20,7 +21,10 @@ keywords = ["C", "expression", "parser"] license = "Apache-2.0/MIT" repository = "https://github.com/jethrogb/rust-cexpr" [dependencies.nom] -version = "^4" -features = ["verbose-errors"] +version = "5" +features = ["std"] +default-features = false [dev-dependencies.clang-sys] -version = ">= 0.13.0, < 0.27.0" +version = ">= 0.13.0, < 0.29.0" +[badges.travis-ci] +repository = "jethrogb/rust-cexpr" diff --git a/third_party/rust/cexpr/bors.toml b/third_party/rust/cexpr/bors.toml new file mode 100644 index 0000000000..ca08e818bf --- /dev/null +++ b/third_party/rust/cexpr/bors.toml @@ -0,0 +1,3 @@ +status = [ + "continuous-integration/travis-ci/push", +] diff --git a/third_party/rust/cexpr/rustfmt.toml b/third_party/rust/cexpr/rustfmt.toml new file mode 100644 index 0000000000..32a9786fa1 --- /dev/null +++ b/third_party/rust/cexpr/rustfmt.toml @@ -0,0 +1 @@ +edition = "2018" diff --git a/third_party/rust/cexpr/src/expr.rs b/third_party/rust/cexpr/src/expr.rs index ccfac83bc5..b1fbfb2605 100644 --- a/third_party/rust/cexpr/src/expr.rs +++ b/third_party/rust/cexpr/src/expr.rs @@ -20,36 +20,47 @@ //! Use the `IdentifierParser` to substitute identifiers found in expressions. use std::collections::HashMap; -use std::ops::{AddAssign,BitAndAssign,BitOrAssign,BitXorAssign,DivAssign,MulAssign,RemAssign,ShlAssign,ShrAssign,SubAssign}; use std::num::Wrapping; - -use literal::{self,CChar}; -use token::{Token,Kind as TokenKind}; -use nom_crate::*; +use std::ops::{ + AddAssign, BitAndAssign, BitOrAssign, BitXorAssign, DivAssign, MulAssign, RemAssign, ShlAssign, + ShrAssign, SubAssign, +}; + +use crate::literal::{self, CChar}; +use crate::token::{Kind as TokenKind, Token}; +use crate::ToCexprResult; +use nom::branch::alt; +use nom::combinator::{complete, map, map_opt}; +use nom::multi::{fold_many0, many0, separated_list}; +use nom::sequence::{delimited, pair, preceded}; +use nom::*; /// Expression parser/evaluator that supports identifiers. #[derive(Debug)] pub struct IdentifierParser<'ident> { - identifiers: &'ident HashMap,EvalResult>, + identifiers: &'ident HashMap, EvalResult>, } -#[derive(Copy,Clone)] +#[derive(Copy, Clone)] struct PRef<'a>(&'a IdentifierParser<'a>); -pub type CResult<'a,R> = IResult<&'a [Token],R,::Error>; +/// A shorthand for the type of cexpr expression evaluation results. +pub type CResult<'a, R> = IResult<&'a [Token], R, crate::Error<&'a [Token]>>; /// The result of parsing a literal or evaluating an expression. -#[derive(Debug,Clone,PartialEq)] +#[derive(Debug, Clone, PartialEq)] +#[allow(missing_docs)] pub enum EvalResult { - Int(Wrapping), - Float(f64), - Char(CChar), - Str(Vec), - Invalid, + Int(Wrapping), + Float(f64), + Char(CChar), + Str(Vec), + Invalid, } macro_rules! result_opt ( (fn $n:ident: $e:ident -> $t:ty) => ( #[allow(dead_code)] + #[allow(clippy::wrong_self_convention)] fn $n(self) -> Option<$t> { if let EvalResult::$e(v) = self { Some(v) @@ -61,23 +72,24 @@ macro_rules! result_opt ( ); impl EvalResult { - result_opt!(fn as_int: Int -> Wrapping); - result_opt!(fn as_float: Float -> f64); - result_opt!(fn as_char: Char -> CChar); - result_opt!(fn as_str: Str -> Vec); - - fn as_numeric(self) -> Option { - match self { - EvalResult::Int(_) | EvalResult::Float(_) => Some(self), - _ => None, - } - } + result_opt!(fn as_int: Int -> Wrapping); + result_opt!(fn as_float: Float -> f64); + result_opt!(fn as_char: Char -> CChar); + result_opt!(fn as_str: Str -> Vec); + + #[allow(clippy::wrong_self_convention)] + fn as_numeric(self) -> Option { + match self { + EvalResult::Int(_) | EvalResult::Float(_) => Some(self), + _ => None, + } + } } impl From> for EvalResult { - fn from(s: Vec) -> EvalResult { - EvalResult::Str(s) - } + fn from(s: Vec) -> EvalResult { + EvalResult::Str(s) + } } // =========================================== @@ -85,89 +97,76 @@ impl From> for EvalResult { // =========================================== macro_rules! exact_token ( - ($i:expr, $k:ident, $c:expr) => ({ - if $i.is_empty() { - let res: CResult<&[u8]> = Err(::nom_crate::Err::Incomplete(Needed::Size($c.len()))); + ($k:ident, $c:expr) => ({ + move |input: &[Token]| { + if input.is_empty() { + let res: CResult<'_, &[u8]> = Err(crate::nom::Err::Incomplete(Needed::Size($c.len()))); res } else { - if $i[0].kind==TokenKind::$k && &$i[0].raw[..]==$c { - Ok((&$i[1..], &$i[0].raw[..])) + if input[0].kind==TokenKind::$k && &input[0].raw[..]==$c { + Ok((&input[1..], &input[0].raw[..])) } else { - Err(::nom_crate::Err::Error(error_position!($i, ErrorKind::Custom(::Error::ExactToken(TokenKind::$k,$c))))) + Err(crate::nom::Err::Error((input, crate::ErrorKind::ExactToken(TokenKind::$k,$c)).into())) } } + } }); ); macro_rules! typed_token ( - ($i:expr, $k:ident) => ({ - if $i.is_empty() { - let res: CResult<&[u8]> = Err(::nom_crate::Err::Incomplete(Needed::Size(1))); + ($k:ident) => ({ + move |input: &[Token]| { + if input.is_empty() { + let res: CResult<'_, &[u8]> = Err(nom::Err::Incomplete(Needed::Size(1))); res } else { - if $i[0].kind==TokenKind::$k { - Ok((&$i[1..], &$i[0].raw[..])) + if input[0].kind==TokenKind::$k { + Ok((&input[1..], &input[0].raw[..])) } else { - Err(Err::Error(error_position!($i, ErrorKind::Custom(::Error::TypedToken(TokenKind::$k))))) + Err(crate::nom::Err::Error((input, crate::ErrorKind::TypedToken(TokenKind::$k)).into())) } } + } }); ); -#[allow(unused_macros)] -macro_rules! any_token ( - ($i:expr,) => ({ - if $i.is_empty() { - let res: CResult<&Token> = Err(::nom_crate::Err::Incomplete(Needed::Size(1))); - res - } else { - Ok((&$i[1..], &$i[0])) - } - }); -); - -macro_rules! p ( - ($i:expr, $c:expr) => (exact_token!($i,Punctuation,$c.as_bytes())) -); +#[allow(dead_code)] +fn any_token(input: &[Token]) -> CResult<'_, &Token> { + if input.is_empty() { + Err(crate::nom::Err::Incomplete(Needed::Size(1))) + } else { + Ok((&input[1..], &input[0])) + } +} -macro_rules! one_of_punctuation ( - ($i:expr, $c:expr) => ({ - if $i.is_empty() { - let min = $c.iter().map(|opt|opt.len()).min().expect("at least one option"); - let res: CResult<&[u8]> = Err(::nom_crate::Err::Incomplete(Needed::Size(min))); - res - } else { - if $i[0].kind==TokenKind::Punctuation && $c.iter().any(|opt|opt.as_bytes()==&$i[0].raw[..]) { - Ok((&$i[1..], &$i[0].raw[..])) - } else { - const VALID_VALUES: &'static [&'static str] = &$c; - Err(Err::Error(error_position!($i, ErrorKind::Custom(::Error::ExactTokens(TokenKind::Punctuation,VALID_VALUES))))) - } - } - }); -); +fn p(c: &'static str) -> impl Fn(&[Token]) -> CResult<'_, &[u8]> { + exact_token!(Punctuation, c.as_bytes()) +} -/// equivalent to nom's complete! macro, but adds the custom error type -#[macro_export] -macro_rules! comp ( - ($i:expr, $submac:ident!( $($args:tt)* )) => ( - { - use ::nom_crate::lib::std::result::Result::*; - use ::nom_crate::{Err,ErrorKind}; - - let i_ = $i.clone(); - match $submac!(i_, $($args)*) { - Err(Err::Incomplete(_)) => { - Err(Err::Error(error_position!($i, ErrorKind::Complete::<::Error>))) - }, - rest => rest - } - } - ); - ($i:expr, $f:expr) => ( - comp!($i, call!($f)); - ); -); +fn one_of_punctuation(c: &'static [&'static str]) -> impl Fn(&[Token]) -> CResult<'_, &[u8]> { + move |input| { + if input.is_empty() { + let min = c + .iter() + .map(|opt| opt.len()) + .min() + .expect("at least one option"); + Err(crate::nom::Err::Incomplete(Needed::Size(min))) + } else if input[0].kind == TokenKind::Punctuation + && c.iter().any(|opt| opt.as_bytes() == &input[0].raw[..]) + { + Ok((&input[1..], &input[0].raw[..])) + } else { + Err(crate::nom::Err::Error( + ( + input, + crate::ErrorKind::ExactTokens(TokenKind::Punctuation, c), + ) + .into(), + )) + } + } +} // ================================================== // ============= Numeric expressions ================ @@ -175,240 +174,242 @@ macro_rules! comp ( impl<'a> AddAssign<&'a EvalResult> for EvalResult { fn add_assign(&mut self, rhs: &'a EvalResult) { - use self::EvalResult::*; - *self=match (&*self,rhs) { - (&Int(a), &Int(b)) => Int(a+b), - (&Float(a),&Int(b)) => Float(a+(b.0 as f64)), - (&Int(a), &Float(b)) => Float(a.0 as f64+b), - (&Float(a),&Float(b)) => Float(a+b), - _ => Invalid - }; - } + use self::EvalResult::*; + *self = match (&*self, rhs) { + (&Int(a), &Int(b)) => Int(a + b), + (&Float(a), &Int(b)) => Float(a + (b.0 as f64)), + (&Int(a), &Float(b)) => Float(a.0 as f64 + b), + (&Float(a), &Float(b)) => Float(a + b), + _ => Invalid, + }; + } } impl<'a> BitAndAssign<&'a EvalResult> for EvalResult { fn bitand_assign(&mut self, rhs: &'a EvalResult) { - use self::EvalResult::*; - *self=match (&*self,rhs) { - (&Int(a),&Int(b)) => Int(a&b), - _ => Invalid - }; } + use self::EvalResult::*; + *self = match (&*self, rhs) { + (&Int(a), &Int(b)) => Int(a & b), + _ => Invalid, + }; + } } impl<'a> BitOrAssign<&'a EvalResult> for EvalResult { fn bitor_assign(&mut self, rhs: &'a EvalResult) { - use self::EvalResult::*; - *self=match (&*self,rhs) { - (&Int(a),&Int(b)) => Int(a|b), - _ => Invalid - }; - } + use self::EvalResult::*; + *self = match (&*self, rhs) { + (&Int(a), &Int(b)) => Int(a | b), + _ => Invalid, + }; + } } impl<'a> BitXorAssign<&'a EvalResult> for EvalResult { fn bitxor_assign(&mut self, rhs: &'a EvalResult) { - use self::EvalResult::*; - *self=match (&*self,rhs) { - (&Int(a),&Int(b)) => Int(a^b), - _ => Invalid - }; - } + use self::EvalResult::*; + *self = match (&*self, rhs) { + (&Int(a), &Int(b)) => Int(a ^ b), + _ => Invalid, + }; + } } impl<'a> DivAssign<&'a EvalResult> for EvalResult { fn div_assign(&mut self, rhs: &'a EvalResult) { - use self::EvalResult::*; - *self=match (&*self,rhs) { - (&Int(a), &Int(b)) => Int(a/b), - (&Float(a),&Int(b)) => Float(a/(b.0 as f64)), - (&Int(a), &Float(b)) => Float(a.0 as f64/b), - (&Float(a),&Float(b)) => Float(a/b), - _ => Invalid - }; - } + use self::EvalResult::*; + *self = match (&*self, rhs) { + (&Int(a), &Int(b)) => Int(a / b), + (&Float(a), &Int(b)) => Float(a / (b.0 as f64)), + (&Int(a), &Float(b)) => Float(a.0 as f64 / b), + (&Float(a), &Float(b)) => Float(a / b), + _ => Invalid, + }; + } } impl<'a> MulAssign<&'a EvalResult> for EvalResult { fn mul_assign(&mut self, rhs: &'a EvalResult) { - use self::EvalResult::*; - *self=match (&*self,rhs) { - (&Int(a), &Int(b)) => Int(a*b), - (&Float(a),&Int(b)) => Float(a*(b.0 as f64)), - (&Int(a), &Float(b)) => Float(a.0 as f64*b), - (&Float(a),&Float(b)) => Float(a*b), - _ => Invalid - }; - } + use self::EvalResult::*; + *self = match (&*self, rhs) { + (&Int(a), &Int(b)) => Int(a * b), + (&Float(a), &Int(b)) => Float(a * (b.0 as f64)), + (&Int(a), &Float(b)) => Float(a.0 as f64 * b), + (&Float(a), &Float(b)) => Float(a * b), + _ => Invalid, + }; + } } impl<'a> RemAssign<&'a EvalResult> for EvalResult { fn rem_assign(&mut self, rhs: &'a EvalResult) { - use self::EvalResult::*; - *self=match (&*self,rhs) { - (&Int(a), &Int(b)) => Int(a%b), - (&Float(a),&Int(b)) => Float(a%(b.0 as f64)), - (&Int(a), &Float(b)) => Float(a.0 as f64%b), - (&Float(a),&Float(b)) => Float(a%b), - _ => Invalid - }; - } + use self::EvalResult::*; + *self = match (&*self, rhs) { + (&Int(a), &Int(b)) => Int(a % b), + (&Float(a), &Int(b)) => Float(a % (b.0 as f64)), + (&Int(a), &Float(b)) => Float(a.0 as f64 % b), + (&Float(a), &Float(b)) => Float(a % b), + _ => Invalid, + }; + } } impl<'a> ShlAssign<&'a EvalResult> for EvalResult { fn shl_assign(&mut self, rhs: &'a EvalResult) { - use self::EvalResult::*; - *self=match (&*self,rhs) { - (&Int(a),&Int(b)) => Int(a<<(b.0 as usize)), - _ => Invalid - }; - } + use self::EvalResult::*; + *self = match (&*self, rhs) { + (&Int(a), &Int(b)) => Int(a << (b.0 as usize)), + _ => Invalid, + }; + } } impl<'a> ShrAssign<&'a EvalResult> for EvalResult { fn shr_assign(&mut self, rhs: &'a EvalResult) { - use self::EvalResult::*; - *self=match (&*self,rhs) { - (&Int(a),&Int(b)) => Int(a>>(b.0 as usize)), - _ => Invalid - }; - } + use self::EvalResult::*; + *self = match (&*self, rhs) { + (&Int(a), &Int(b)) => Int(a >> (b.0 as usize)), + _ => Invalid, + }; + } } impl<'a> SubAssign<&'a EvalResult> for EvalResult { fn sub_assign(&mut self, rhs: &'a EvalResult) { - use self::EvalResult::*; - *self=match (&*self,rhs) { - (&Int(a), &Int(b)) => Int(a-b), - (&Float(a),&Int(b)) => Float(a-(b.0 as f64)), - (&Int(a), &Float(b)) => Float(a.0 as f64-b), - (&Float(a),&Float(b)) => Float(a-b), - _ => Invalid - }; - } + use self::EvalResult::*; + *self = match (&*self, rhs) { + (&Int(a), &Int(b)) => Int(a - b), + (&Float(a), &Int(b)) => Float(a - (b.0 as f64)), + (&Int(a), &Float(b)) => Float(a.0 as f64 - b), + (&Float(a), &Float(b)) => Float(a - b), + _ => Invalid, + }; + } } -fn unary_op(input: (&[u8],EvalResult)) -> Option { - use self::EvalResult::*; - assert_eq!(input.0.len(),1); - match (input.0[0],input.1) { - (b'+',i) => Some(i), - (b'-',Int(i)) => Some(Int(Wrapping(i.0.wrapping_neg()))), // impl Neg for Wrapping not until rust 1.10... - (b'-',Float(i)) => Some(Float(-i)), - (b'-',_) => unreachable!("non-numeric unary op"), - (b'~',Int(i)) => Some(Int(!i)), - (b'~',Float(_)) => None, - (b'~',_) => unreachable!("non-numeric unary op"), - _ => unreachable!("invalid unary op"), - } +fn unary_op(input: (&[u8], EvalResult)) -> Option { + use self::EvalResult::*; + assert_eq!(input.0.len(), 1); + match (input.0[0], input.1) { + (b'+', i) => Some(i), + (b'-', Int(i)) => Some(Int(Wrapping(i.0.wrapping_neg()))), // impl Neg for Wrapping not until rust 1.10... + (b'-', Float(i)) => Some(Float(-i)), + (b'-', _) => unreachable!("non-numeric unary op"), + (b'~', Int(i)) => Some(Int(!i)), + (b'~', Float(_)) => None, + (b'~', _) => unreachable!("non-numeric unary op"), + _ => unreachable!("invalid unary op"), + } } -macro_rules! numeric ( - ($i:expr, $submac:ident!( $($args:tt)* )) => (map_opt!($i,$submac!($($args)*),EvalResult::as_numeric)); - ($i:expr, $f:expr ) => (map_opt!($i,call!($f),EvalResult::as_numeric)); -); +fn numeric, F>( + f: F, +) -> impl Fn(I) -> nom::IResult +where + F: Fn(I) -> nom::IResult, +{ + nom::combinator::map_opt(f, EvalResult::as_numeric) +} impl<'a> PRef<'a> { - method!(unary,&[Token],EvalResult,::Error>, mut self, - alt!( - delimited!(p!("("),call_m!(self.numeric_expr),p!(")")) | - numeric!(call_m!(self.literal)) | - numeric!(call_m!(self.identifier)) | - map_opt!(pair!(one_of_punctuation!(["+", "-", "~"]),call_m!(self.unary)),unary_op) - ) - ); - - method!(mul_div_rem,&[Token],EvalResult,::Error>, mut self, - do_parse!( - acc: call_m!(self.unary) >> - res: fold_many0!( - pair!(comp!(one_of_punctuation!(["*", "/", "%"])), call_m!(self.unary)), - acc, - |mut acc, (op, val): (&[u8], EvalResult)| { - match op[0] as char { - '*' => acc *= &val, - '/' => acc /= &val, - '%' => acc %= &val, - _ => unreachable!() - }; - acc - } - ) >> (res) - ) - ); - - method!(add_sub,&[Token],EvalResult,::Error>, mut self, - do_parse!( - acc: call_m!(self.mul_div_rem) >> - res: fold_many0!( - pair!(comp!(one_of_punctuation!(["+", "-"])), call_m!(self.mul_div_rem)), - acc, - |mut acc, (op, val): (&[u8], EvalResult)| { - match op[0] as char { - '+' => acc += &val, - '-' => acc -= &val, - _ => unreachable!() - }; - acc - } - ) >> (res) - ) - ); - - method!(shl_shr,&[Token],EvalResult,::Error>, mut self, - numeric!(do_parse!( - acc: call_m!(self.add_sub) >> - res: fold_many0!( - pair!(comp!(one_of_punctuation!(["<<", ">>"])), call_m!(self.add_sub)), - acc, - |mut acc, (op, val): (&[u8], EvalResult)| { - match op { - b"<<" => acc <<= &val, - b">>" => acc >>= &val, - _ => unreachable!() - }; - acc - } - ) >> (res) - )) - ); - - method!(and,&[Token],EvalResult,::Error>, mut self, - numeric!(do_parse!( - acc: call_m!(self.shl_shr) >> - res: fold_many0!( - preceded!(comp!(p!("&")), call_m!(self.shl_shr)), - acc, - |mut acc, val: EvalResult| { - acc &= &val; - acc - } - ) >> (res) - )) - ); - - method!(xor,&[Token],EvalResult,::Error>, mut self, - numeric!(do_parse!( - acc: call_m!(self.and) >> - res: fold_many0!( - preceded!(comp!(p!("^")), call_m!(self.and)), - acc, - |mut acc, val: EvalResult| { - acc ^= &val; - acc - } - ) >> (res) - )) - ); - - method!(or,&[Token],EvalResult,::Error>, mut self, - numeric!(do_parse!( - acc: call_m!(self.xor) >> - res: fold_many0!( - preceded!(comp!(p!("|")), call_m!(self.xor)), - acc, - |mut acc, val: EvalResult| { - acc |= &val; - acc - } - ) >> (res) - )) - ); - - #[inline(always)] - fn numeric_expr(self, input: &[Token]) -> (Self,CResult) { - self.or(input) - } + fn unary(self, input: &'_ [Token]) -> CResult<'_, EvalResult> { + alt(( + delimited(p("("), |i| self.numeric_expr(i), p(")")), + numeric(|i| self.literal(i)), + numeric(|i| self.identifier(i)), + map_opt( + pair(one_of_punctuation(&["+", "-", "~"][..]), |i| self.unary(i)), + unary_op, + ), + ))(input) + } + + fn mul_div_rem(self, input: &'_ [Token]) -> CResult<'_, EvalResult> { + let (input, acc) = self.unary(input)?; + fold_many0( + pair(complete(one_of_punctuation(&["*", "/", "%"][..])), |i| { + self.unary(i) + }), + acc, + |mut acc, (op, val): (&[u8], EvalResult)| { + match op[0] as char { + '*' => acc *= &val, + '/' => acc /= &val, + '%' => acc %= &val, + _ => unreachable!(), + }; + acc + }, + )(input) + } + + fn add_sub(self, input: &'_ [Token]) -> CResult<'_, EvalResult> { + let (input, acc) = self.mul_div_rem(input)?; + fold_many0( + pair(complete(one_of_punctuation(&["+", "-"][..])), |i| { + self.mul_div_rem(i) + }), + acc, + |mut acc, (op, val): (&[u8], EvalResult)| { + match op[0] as char { + '+' => acc += &val, + '-' => acc -= &val, + _ => unreachable!(), + }; + acc + }, + )(input) + } + + fn shl_shr(self, input: &'_ [Token]) -> CResult<'_, EvalResult> { + let (input, acc) = self.add_sub(input)?; + numeric(fold_many0( + pair(complete(one_of_punctuation(&["<<", ">>"][..])), |i| { + self.add_sub(i) + }), + acc, + |mut acc, (op, val): (&[u8], EvalResult)| { + match op { + b"<<" => acc <<= &val, + b">>" => acc >>= &val, + _ => unreachable!(), + }; + acc + }, + ))(input) + } + + fn and(self, input: &'_ [Token]) -> CResult<'_, EvalResult> { + let (input, acc) = self.shl_shr(input)?; + numeric(fold_many0( + preceded(complete(p("&")), |i| self.shl_shr(i)), + acc, + |mut acc, val: EvalResult| { + acc &= &val; + acc + }, + ))(input) + } + + fn xor(self, input: &'_ [Token]) -> CResult<'_, EvalResult> { + let (input, acc) = self.and(input)?; + numeric(fold_many0( + preceded(complete(p("^")), |i| self.and(i)), + acc, + |mut acc, val: EvalResult| { + acc ^= &val; + acc + }, + ))(input) + } + + fn or(self, input: &'_ [Token]) -> CResult<'_, EvalResult> { + let (input, acc) = self.xor(input)?; + numeric(fold_many0( + preceded(complete(p("|")), |i| self.xor(i)), + acc, + |mut acc, val: EvalResult| { + acc |= &val; + acc + }, + ))(input) + } + + #[inline(always)] + fn numeric_expr(self, input: &'_ [Token]) -> CResult<'_, EvalResult> { + self.or(input) + } } // ======================================================= @@ -416,126 +417,147 @@ impl<'a> PRef<'a> { // ======================================================= impl<'a> PRef<'a> { - fn identifier(self, input: &[Token]) -> (Self,CResult) { - (self,match input.split_first() { - None => - Err(Err::Incomplete(Needed::Size(1))), - Some((&Token{kind:TokenKind::Identifier,ref raw},rest)) => { - if let Some(r) = self.identifiers.get(&raw[..]) { - Ok((rest, r.clone())) - } else { - Err(Err::Error(error_position!(input, ErrorKind::Custom(::Error::UnknownIdentifier)))) - } - }, - Some(_) => - Err(Err::Error(error_position!(input, ErrorKind::Custom(::Error::TypedToken(TokenKind::Identifier))))), - }) - } - - fn literal(self, input: &[Token]) -> (Self,CResult) { - (self,match input.split_first() { - None => - Err(Err::Incomplete(Needed::Size(1))), - Some((&Token{kind:TokenKind::Literal,ref raw},rest)) => - match literal::parse(raw) { - Ok((_,result)) => Ok((rest, result)), - _ => { - Err(Err::Error(error_position!(input, ErrorKind::Custom(::Error::InvalidLiteral)))) - }, - }, - Some(_) => - Err(Err::Error(error_position!(input, ErrorKind::Custom(::Error::TypedToken(TokenKind::Literal))))), - }) - } - - method!(string,&[Token],Vec,::Error>, mut self, - alt!( - map_opt!(call_m!(self.literal),EvalResult::as_str) | - map_opt!(call_m!(self.identifier),EvalResult::as_str) - ) - ); - - // "string1" "string2" etc... - method!(concat_str,&[Token],EvalResult,::Error>, mut self, - map!( - pair!(call_m!(self.string),many0!(comp!(call_m!(self.string)))), - |(first,v)| Vec::into_iter(v).fold(first,|mut s,elem|{Vec::extend_from_slice(&mut s,Vec::::as_slice(&elem));s}).into() - ) - ); - - method!(expr,&[Token],EvalResult,::Error>, mut self, - alt!( - call_m!(self.numeric_expr) | - delimited!(p!("("),call_m!(self.expr),p!(")")) | - call_m!(self.concat_str) | - call_m!(self.literal) | - call_m!(self.identifier) - ) - ); - - method!(macro_definition,&[Token],(&[u8],EvalResult),::Error>, mut self, - pair!(typed_token!(Identifier),call_m!(self.expr)) - ); + fn identifier(self, input: &'_ [Token]) -> CResult<'_, EvalResult> { + match input.split_first() { + None => Err(Err::Incomplete(Needed::Size(1))), + Some(( + &Token { + kind: TokenKind::Identifier, + ref raw, + }, + rest, + )) => { + if let Some(r) = self.identifiers.get(&raw[..]) { + Ok((rest, r.clone())) + } else { + Err(Err::Error( + (input, crate::ErrorKind::UnknownIdentifier).into(), + )) + } + } + Some(_) => Err(Err::Error( + (input, crate::ErrorKind::TypedToken(TokenKind::Identifier)).into(), + )), + } + } + + fn literal(self, input: &'_ [Token]) -> CResult<'_, EvalResult> { + match input.split_first() { + None => Err(Err::Incomplete(Needed::Size(1))), + Some(( + &Token { + kind: TokenKind::Literal, + ref raw, + }, + rest, + )) => match literal::parse(raw) { + Ok((_, result)) => Ok((rest, result)), + _ => Err(Err::Error((input, crate::ErrorKind::InvalidLiteral).into())), + }, + Some(_) => Err(Err::Error( + (input, crate::ErrorKind::TypedToken(TokenKind::Literal)).into(), + )), + } + } + + fn string(self, input: &'_ [Token]) -> CResult<'_, Vec> { + alt(( + map_opt(|i| self.literal(i), EvalResult::as_str), + map_opt(|i| self.identifier(i), EvalResult::as_str), + ))(input) + .to_cexpr_result() + } + + // "string1" "string2" etc... + fn concat_str(self, input: &'_ [Token]) -> CResult<'_, EvalResult> { + map( + pair(|i| self.string(i), many0(complete(|i| self.string(i)))), + |(first, v)| { + Vec::into_iter(v) + .fold(first, |mut s, elem| { + Vec::extend_from_slice(&mut s, Vec::::as_slice(&elem)); + s + }) + .into() + }, + )(input) + .to_cexpr_result() + } + + fn expr(self, input: &'_ [Token]) -> CResult<'_, EvalResult> { + alt(( + |i| self.numeric_expr(i), + delimited(p("("), |i| self.expr(i), p(")")), + |i| self.concat_str(i), + |i| self.literal(i), + |i| self.identifier(i), + ))(input) + .to_cexpr_result() + } + + fn macro_definition(self, input: &'_ [Token]) -> CResult<'_, (&'_ [u8], EvalResult)> { + pair(typed_token!(Identifier), |i| self.expr(i))(input) + } } impl<'a> ::std::ops::Deref for PRef<'a> { - type Target=IdentifierParser<'a>; - fn deref(&self) -> &IdentifierParser<'a> { - self.0 - } + type Target = IdentifierParser<'a>; + fn deref(&self) -> &IdentifierParser<'a> { + self.0 + } } impl<'ident> IdentifierParser<'ident> { - fn as_ref(&self) -> PRef { - PRef(self) - } - - /// Create a new `IdentifierParser` with a set of known identifiers. When - /// a known identifier is encountered during parsing, it is substituted - /// for the value specified. - pub fn new(identifiers: &HashMap,EvalResult>) -> IdentifierParser { - IdentifierParser{identifiers:identifiers} - } - - /// Parse and evalute an expression of a list of tokens. - /// - /// Returns an error if the input is not a valid expression or if the token - /// stream contains comments, keywords or unknown identifiers. - pub fn expr<'a>(&self,input: &'a [Token]) -> CResult<'a,EvalResult> { - self.as_ref().expr(input).1 - } - - /// Parse and evaluate a macro definition from of a list of tokens. - /// - /// Returns the identifier for the macro and its replacement evaluated as an - /// expression. The input should not include `#define`. - /// - /// Returns an error if the replacement is not a valid expression, if called - /// on most function-like macros, or if the token stream contains comments, - /// keywords or unknown identifiers. - /// - /// N.B. This is intended to fail on function-like macros, but if it the - /// macro takes a single argument, the argument name is defined as an - /// identifier, and the macro otherwise parses as an expression, it will - /// return a result even on function-like macros. - /// - /// ```c - /// // will evaluate into IDENTIFIER - /// #define DELETE(IDENTIFIER) - /// // will evaluate into IDENTIFIER-3 - /// #define NEGATIVE_THREE(IDENTIFIER) -3 - /// ``` - pub fn macro_definition<'a>(&self,input: &'a [Token]) -> CResult<'a,(&'a [u8],EvalResult)> { - ::assert_full_parse(self.as_ref().macro_definition(input).1) - } + fn as_ref(&self) -> PRef<'_> { + PRef(self) + } + + /// Create a new `IdentifierParser` with a set of known identifiers. When + /// a known identifier is encountered during parsing, it is substituted + /// for the value specified. + pub fn new(identifiers: &HashMap, EvalResult>) -> IdentifierParser<'_> { + IdentifierParser { identifiers } + } + + /// Parse and evalute an expression of a list of tokens. + /// + /// Returns an error if the input is not a valid expression or if the token + /// stream contains comments, keywords or unknown identifiers. + pub fn expr<'a>(&self, input: &'a [Token]) -> CResult<'a, EvalResult> { + self.as_ref().expr(input) + } + + /// Parse and evaluate a macro definition from of a list of tokens. + /// + /// Returns the identifier for the macro and its replacement evaluated as an + /// expression. The input should not include `#define`. + /// + /// Returns an error if the replacement is not a valid expression, if called + /// on most function-like macros, or if the token stream contains comments, + /// keywords or unknown identifiers. + /// + /// N.B. This is intended to fail on function-like macros, but if it the + /// macro takes a single argument, the argument name is defined as an + /// identifier, and the macro otherwise parses as an expression, it will + /// return a result even on function-like macros. + /// + /// ```c + /// // will evaluate into IDENTIFIER + /// #define DELETE(IDENTIFIER) + /// // will evaluate into IDENTIFIER-3 + /// #define NEGATIVE_THREE(IDENTIFIER) -3 + /// ``` + pub fn macro_definition<'a>(&self, input: &'a [Token]) -> CResult<'a, (&'a [u8], EvalResult)> { + crate::assert_full_parse(self.as_ref().macro_definition(input)) + } } /// Parse and evalute an expression of a list of tokens. /// /// Returns an error if the input is not a valid expression or if the token /// stream contains comments, keywords or identifiers. -pub fn expr<'a>(input: &'a [Token]) -> CResult<'a,EvalResult> { - IdentifierParser::new(&HashMap::new()).expr(input) +pub fn expr(input: &[Token]) -> CResult<'_, EvalResult> { + IdentifierParser::new(&HashMap::new()).expr(input) } /// Parse and evaluate a macro definition from of a list of tokens. @@ -546,11 +568,10 @@ pub fn expr<'a>(input: &'a [Token]) -> CResult<'a,EvalResult> { /// Returns an error if the replacement is not a valid expression, if called /// on a function-like macro, or if the token stream contains comments, /// keywords or identifiers. -pub fn macro_definition<'a>(input: &'a [Token]) -> CResult<'a,(&'a [u8],EvalResult)> { - IdentifierParser::new(&HashMap::new()).macro_definition(input) +pub fn macro_definition(input: &[Token]) -> CResult<'_, (&'_ [u8], EvalResult)> { + IdentifierParser::new(&HashMap::new()).macro_definition(input) } -named_attr!( /// Parse a functional macro declaration from a list of tokens. /// /// Returns the identifier for the macro and the argument list (in order). The @@ -590,13 +611,13 @@ named_attr!( /// let (_, evaluated) = assert_full_parse(IdentifierParser::new(&idents).expr(expr)).unwrap(); /// assert_eq!(evaluated, EvalResult::Str(b"testsuffix".to_vec())); /// ``` -,pub fn_macro_declaration<&[Token],(&[u8],Vec<&[u8]>),::Error>, - pair!( - typed_token!(Identifier), - delimited!( - p!("("), - separated_list!(p!(","), typed_token!(Identifier)), - p!(")") - ) - ) -); +pub fn fn_macro_declaration(input: &[Token]) -> CResult<'_, (&[u8], Vec<&[u8]>)> { + pair( + typed_token!(Identifier), + delimited( + p("("), + separated_list(p(","), typed_token!(Identifier)), + p(")"), + ), + )(input) +} diff --git a/third_party/rust/cexpr/src/lib.rs b/third_party/rust/cexpr/src/lib.rs index 2799cb0ccb..84e1e83d0a 100644 --- a/third_party/rust/cexpr/src/lib.rs +++ b/third_party/rust/cexpr/src/lib.rs @@ -5,60 +5,136 @@ // , at your // option. This file may not be copied, modified, or distributed // except according to those terms. - -#[macro_use] -extern crate nom as nom_crate; +//! A C expression parser and evaluator. +//! +//! This crate provides methods for parsing and evaluating simple C expressions. In general, the +//! crate can handle most arithmetic expressions that would appear in macros or the definition of +//! constants, as well as string and character constants. +//! +//! The main entry point for is [`token::parse`], which parses a byte string and returns its +//! evaluated value. +#![warn(rust_2018_idioms)] +#![warn(missing_docs)] +#![allow(deprecated)] pub mod nom { //! nom's result types, re-exported. - pub use nom_crate::{IResult,Needed,Err,ErrorKind}; + pub use nom::{error::ErrorKind, Err, IResult, Needed}; } -pub mod literal; pub mod expr; +pub mod literal; pub mod token; -use nom::*; - -#[derive(Debug)] /// Parsing errors specific to C parsing -pub enum Error { +#[derive(Debug)] +pub enum ErrorKind { /// Expected the specified token - ExactToken(token::Kind,&'static [u8]), + ExactToken(token::Kind, &'static [u8]), /// Expected one of the specified tokens - ExactTokens(token::Kind,&'static [&'static str]), + ExactTokens(token::Kind, &'static [&'static str]), /// Expected a token of the specified kind - TypedToken(token::Kind), + TypedToken(token::Kind), /// An unknown identifier was encountered - UnknownIdentifier, + UnknownIdentifier, /// An invalid literal was encountered. /// /// When encountered, this generally means a bug exists in the data that /// was passed in or the parsing logic. - InvalidLiteral, + InvalidLiteral, /// A full parse was requested, but data was left over after parsing finished. Partial, + /// An error occurred in an underlying nom parser. + Parser(nom::ErrorKind), +} + +impl From for ErrorKind { + fn from(k: nom::ErrorKind) -> Self { + ErrorKind::Parser(k) + } +} + +impl From for ErrorKind { + fn from(_: u32) -> Self { + ErrorKind::InvalidLiteral + } } -impl From for Error { - fn from(_: u32) -> Self { - Error::InvalidLiteral - } +/// Parsing errors specific to C parsing. +/// +/// This is a superset of `(I, nom::ErrorKind)` that includes the additional errors specified by +/// [`ErrorKind`]. +#[derive(Debug)] +pub struct Error { + /// The remainder of the input stream at the time of the error. + pub input: I, + /// The error that occurred. + pub error: ErrorKind, +} + +impl From<(I, nom::ErrorKind)> for Error { + fn from(e: (I, nom::ErrorKind)) -> Self { + Self::from((e.0, ErrorKind::from(e.1))) + } +} + +impl From<(I, ErrorKind)> for Error { + fn from(e: (I, ErrorKind)) -> Self { + Self { + input: e.0, + error: e.1, + } + } } -macro_rules! identity ( - ($i:expr,$e:expr) => ($e); -); +impl ::nom::error::ParseError for Error { + fn from_error_kind(input: I, kind: nom::ErrorKind) -> Self { + Self { + input, + error: kind.into(), + } + } + + fn append(_: I, _: nom::ErrorKind, other: Self) -> Self { + other + } +} + +// in lieu of https://github.com/Geal/nom/issues/1010 +trait ToCexprResult { + fn to_cexpr_result(self) -> nom::IResult>; +} +impl ToCexprResult for nom::IResult +where + Error: From, +{ + fn to_cexpr_result(self) -> nom::IResult> { + match self { + Ok(v) => Ok(v), + Err(nom::Err::Incomplete(n)) => Err(nom::Err::Incomplete(n)), + Err(nom::Err::Error(e)) => Err(nom::Err::Error(e.into())), + Err(nom::Err::Failure(e)) => Err(nom::Err::Failure(e.into())), + } + } +} /// If the input result indicates a succesful parse, but there is data left, /// return an `Error::Partial` instead. -pub fn assert_full_parse(result: IResult<&[I],O,E>) -> IResult<&[I],O,::Error> - where Error: From { - match fix_error!((),::Error,identity!(result)) { - Ok((rem,output)) => if rem.len()==0 { - Ok((rem, output)) - } else { - Err(Err::Error(error_position!(rem, ErrorKind::Custom(::Error::Partial)))) - }, - r => r, - } +pub fn assert_full_parse<'i, I: 'i, O, E>( + result: nom::IResult<&'i [I], O, E>, +) -> nom::IResult<&'i [I], O, Error<&'i [I]>> +where + Error<&'i [I]>: From, +{ + match result.to_cexpr_result() { + Ok((rem, output)) => { + if rem.is_empty() { + Ok((rem, output)) + } else { + Err(nom::Err::Error((rem, ErrorKind::Partial).into())) + } + } + Err(nom::Err::Incomplete(n)) => Err(nom::Err::Incomplete(n)), + Err(nom::Err::Failure(e)) => Err(nom::Err::Failure(e)), + Err(nom::Err::Error(e)) => Err(nom::Err::Error(e)), + } } diff --git a/third_party/rust/cexpr/src/literal.rs b/third_party/rust/cexpr/src/literal.rs index d75241eff6..39f07beedf 100644 --- a/third_party/rust/cexpr/src/literal.rs +++ b/third_party/rust/cexpr/src/literal.rs @@ -6,7 +6,7 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. //! Parsing C literals from byte slices. -//! +//! //! This will parse a representation of a C literal into a Rust type. //! //! # characters @@ -37,235 +37,325 @@ //! supported. use std::char; -use std::str::{self,FromStr}; +use std::str::{self, FromStr}; -use nom_crate::*; +use nom::branch::alt; +use nom::bytes::complete::is_not; +use nom::bytes::complete::tag; +use nom::character::complete::{char, one_of}; +use nom::combinator::{complete, map, map_opt, opt, recognize}; +use nom::multi::{fold_many0, many0, many1, many_m_n}; +use nom::sequence::{delimited, pair, preceded, terminated, tuple}; +use nom::*; -use expr::EvalResult; +use crate::expr::EvalResult; +use crate::ToCexprResult; -#[derive(Debug,Copy,Clone,PartialEq,Eq)] +#[derive(Debug, Copy, Clone, PartialEq, Eq)] /// Representation of a C character pub enum CChar { - /// A character that can be represented as a `char` - Char(char), - /// Any other character (8-bit characters, unicode surrogates, etc.) - Raw(u64), + /// A character that can be represented as a `char` + Char(char), + /// Any other character (8-bit characters, unicode surrogates, etc.) + Raw(u64), } impl From for CChar { - fn from(i: u8) -> CChar { - match i { - 0 ... 0x7f => CChar::Char(i as u8 as char), - _ => CChar::Raw(i as u64), - } - } + fn from(i: u8) -> CChar { + match i { + 0..=0x7f => CChar::Char(i as u8 as char), + _ => CChar::Raw(i as u64), + } + } } // A non-allocating version of this would be nice... impl Into> for CChar { - fn into(self) -> Vec { - match self { - CChar::Char(c) => { - let mut s=String::with_capacity(4); - s.extend(&[c]); - s.into_bytes() - } - CChar::Raw(i) => { - let mut v=Vec::with_capacity(1); - v.push(i as u8); - v - } - } - } + fn into(self) -> Vec { + match self { + CChar::Char(c) => { + let mut s = String::with_capacity(4); + s.extend(&[c]); + s.into_bytes() + } + CChar::Raw(i) => { + let mut v = Vec::with_capacity(1); + v.push(i as u8); + v + } + } + } } /// ensures the child parser consumes the whole input -#[macro_export] -macro_rules! full ( - ($i: expr, $submac:ident!( $($args:tt)* )) => ( - { - use ::nom_crate::lib::std::result::Result::*; - let res = $submac!($i, $($args)*); - match res { - Ok((i, o)) => if i.len() == 0 { - Ok((i, o)) - } else { - Err(::nom_crate::Err::Error(error_position!(i, ::nom_crate::ErrorKind::Custom(42)))) - }, - r => r, - } - } - ); - ($i:expr, $f:ident) => ( - full!($i, call!($f)); - ); -); - -// ==================================================== -// ======== macros that shouldn't be necessary ======== -// ==================================================== - -macro_rules! force_type ( - ($input:expr,IResult<$i:ty,$o:ty,$e:ty>) => (Err::<($i,$o),Err<$i,$e>>(::nom_crate::Err::Error(error_position!($input, ErrorKind::Fix)))) -); - +pub fn full, F>( + f: F, +) -> impl Fn(I) -> nom::IResult +where + I: nom::InputLength, + F: Fn(I) -> nom::IResult, +{ + move |input| { + let res = f(input); + match res { + Ok((i, o)) => { + if i.input_len() == 0 { + Ok((i, o)) + } else { + Err(nom::Err::Error((i, nom::error::ErrorKind::Complete.into()))) + } + } + r => r, + } + } +} // ================================= // ======== matching digits ======== // ================================= -macro_rules! byte ( - ($i:expr, $($p: pat)|* ) => ({ - match $i.split_first() { - $(Some((&c @ $p,rest)))|* => Ok::<(&[_],u8),::nom_crate::Err<&[_],u32>>((rest,c)), - Some(_) => Err(::nom_crate::Err::Error(error_position!($i, ErrorKind::OneOf))), - None => Err(::nom_crate::Err::Incomplete(Needed::Size(1))), - } - }) -); +macro_rules! byte { + ($($p: pat)|* ) => {{ + fn parser(i: &[u8]) -> crate::nom::IResult<&[u8], u8> { + match i.split_first() { + $(Some((&c @ $p,rest)))|* => Ok((rest,c)), + Some(_) => Err(nom::Err::Error((i, nom::error::ErrorKind::OneOf))), + None => Err(nom::Err::Incomplete(Needed::Size(1))), + } + } + + parser + }} +} + +fn binary(i: &[u8]) -> nom::IResult<&[u8], u8> { + byte!(b'0'..=b'1')(i) +} + +fn octal(i: &[u8]) -> nom::IResult<&[u8], u8> { + byte!(b'0'..=b'7')(i) +} -named!(binary,byte!(b'0' ... b'1')); -named!(octal,byte!(b'0' ... b'7')); -named!(decimal,byte!(b'0' ... b'9')); -named!(hexadecimal,byte!(b'0' ... b'9' | b'a' ... b'f' | b'A' ... b'F')); +fn decimal(i: &[u8]) -> nom::IResult<&[u8], u8> { + byte!(b'0'..=b'9')(i) +} +fn hexadecimal(i: &[u8]) -> nom::IResult<&[u8], u8> { + byte!(b'0' ..= b'9' | b'a' ..= b'f' | b'A' ..= b'F')(i) +} // ======================================== // ======== characters and strings ======== // ======================================== fn escape2char(c: char) -> CChar { - CChar::Char(match c { - 'a' => '\x07', - 'b' => '\x08', - 'f' => '\x0c', - 'n' => '\n', - 'r' => '\r', - 't' => '\t', - 'v' => '\x0b', - _ => unreachable!("invalid escape {}",c) - }) + CChar::Char(match c { + 'a' => '\x07', + 'b' => '\x08', + 'f' => '\x0c', + 'n' => '\n', + 'r' => '\r', + 't' => '\t', + 'v' => '\x0b', + _ => unreachable!("invalid escape {}", c), + }) } fn c_raw_escape(n: Vec, radix: u32) -> Option { - str::from_utf8(&n).ok() - .and_then(|i|u64::from_str_radix(i,radix).ok()) - .map(|i|match i { - 0 ... 0x7f => CChar::Char(i as u8 as char), - _ => CChar::Raw(i), - }) + str::from_utf8(&n) + .ok() + .and_then(|i| u64::from_str_radix(i, radix).ok()) + .map(|i| match i { + 0..=0x7f => CChar::Char(i as u8 as char), + _ => CChar::Raw(i), + }) } fn c_unicode_escape(n: Vec) -> Option { - str::from_utf8(&n).ok() - .and_then(|i|u32::from_str_radix(i,16).ok()) - .and_then(char::from_u32) - .map(CChar::Char) + str::from_utf8(&n) + .ok() + .and_then(|i| u32::from_str_radix(i, 16).ok()) + .and_then(char::from_u32) + .map(CChar::Char) +} + +fn escaped_char(i: &[u8]) -> nom::IResult<&[u8], CChar> { + preceded( + char('\\'), + alt(( + map(one_of(r#"'"?\"#), CChar::Char), + map(one_of("abfnrtv"), escape2char), + map_opt(many_m_n(1, 3, octal), |v| c_raw_escape(v, 8)), + map_opt(preceded(char('x'), many1(hexadecimal)), |v| { + c_raw_escape(v, 16) + }), + map_opt( + preceded(char('u'), many_m_n(4, 4, hexadecimal)), + c_unicode_escape, + ), + map_opt( + preceded(char('U'), many_m_n(8, 8, hexadecimal)), + c_unicode_escape, + ), + )), + )(i) } -named!(escaped_char, - preceded!(complete!(char!('\\')),alt_complete!( - map!(one_of!(r#"'"?\"#),CChar::Char) | - map!(one_of!("abfnrtv"),escape2char) | - map_opt!(many_m_n!(1,3,octal),|v|c_raw_escape(v,8)) | - map_opt!(preceded!(char!('x'),many1!(hexadecimal)),|v|c_raw_escape(v,16)) | - map_opt!(preceded!(char!('u'),many_m_n!(4,4,hexadecimal)),c_unicode_escape) | - map_opt!(preceded!(char!('U'),many_m_n!(8,8,hexadecimal)),c_unicode_escape) - )) -); - -named!(c_width_prefix, - alt!( - tag!("u8") | - tag!("u") | - tag!("U") | - tag!("L") - ) -); - -named!(c_char, - delimited!( - terminated!(opt!(c_width_prefix),char!('\'')), - alt!( escaped_char | map!(byte!(0 ... 91 /* \=92 */ | 93 ... 255),CChar::from) ), - char!('\'') - ) -); - -named!(c_string >, - delimited!( - alt!( preceded!(c_width_prefix,char!('"')) | char!('"') ), - fold_many0!( - alt!(map!(escaped_char, |c:CChar| c.into()) | map!(complete!(is_not!("\"")), |c: &[u8]| c.into())), - Vec::new(), - |mut v: Vec, res:Vec| { v.extend_from_slice(&res); v } - ), - char!('"') - ) -); +fn c_width_prefix(i: &[u8]) -> nom::IResult<&[u8], &[u8]> { + alt((tag("u8"), tag("u"), tag("U"), tag("L")))(i) +} + +fn c_char(i: &[u8]) -> nom::IResult<&[u8], CChar> { + delimited( + terminated(opt(c_width_prefix), char('\'')), + alt(( + escaped_char, + map(byte!(0 ..= 91 /* \=92 */ | 93 ..= 255), CChar::from), + )), + char('\''), + )(i) +} + +fn c_string(i: &[u8]) -> nom::IResult<&[u8], Vec> { + delimited( + alt((preceded(c_width_prefix, char('"')), char('"'))), + fold_many0( + alt(( + map(escaped_char, |c: CChar| c.into()), + map(is_not([b'\\', b'"']), |c: &[u8]| c.into()), + )), + Vec::new(), + |mut v: Vec, res: Vec| { + v.extend_from_slice(&res); + v + }, + ), + char('"'), + )(i) +} // ================================ // ======== parse integers ======== // ================================ fn c_int_radix(n: Vec, radix: u32) -> Option { - str::from_utf8(&n).ok() - .and_then(|i|u64::from_str_radix(i,radix).ok()) + str::from_utf8(&n) + .ok() + .and_then(|i| u64::from_str_radix(i, radix).ok()) } fn take_ul(input: &[u8]) -> IResult<&[u8], &[u8]> { - use ::nom_crate::InputTakeAtPosition; - - let r = input.split_at_position(|c| c != b'u' && c != b'U' && c != b'l' && c != b'L'); - match r { - Err(Err::Incomplete(_)) => Ok((&input[input.len()..], input)), - res => res, - } + let r = input.split_at_position(|c| c != b'u' && c != b'U' && c != b'l' && c != b'L'); + match r { + Err(Err::Incomplete(_)) => Ok((&input[input.len()..], input)), + res => res, + } } -named!(c_int, - map!(terminated!(alt_complete!( - map_opt!(preceded!(tag!("0x"),many1!(complete!(hexadecimal))),|v|c_int_radix(v,16)) | - map_opt!(preceded!(tag!("0b"),many1!(complete!(binary))),|v|c_int_radix(v,2)) | - map_opt!(preceded!(char!('0'),many1!(complete!(octal))),|v|c_int_radix(v,8)) | - map_opt!(many1!(complete!(decimal)),|v|c_int_radix(v,10)) | - force_type!(IResult<_,_,u32>) - ),opt!(take_ul)),|i|i as i64) -); +fn c_int(i: &[u8]) -> nom::IResult<&[u8], i64> { + map( + terminated( + alt(( + map_opt(preceded(tag("0x"), many1(complete(hexadecimal))), |v| { + c_int_radix(v, 16) + }), + map_opt(preceded(tag("0X"), many1(complete(hexadecimal))), |v| { + c_int_radix(v, 16) + }), + map_opt(preceded(tag("0b"), many1(complete(binary))), |v| { + c_int_radix(v, 2) + }), + map_opt(preceded(tag("0B"), many1(complete(binary))), |v| { + c_int_radix(v, 2) + }), + map_opt(preceded(char('0'), many1(complete(octal))), |v| { + c_int_radix(v, 8) + }), + map_opt(many1(complete(decimal)), |v| c_int_radix(v, 10)), + |input| Err(crate::nom::Err::Error((input, crate::nom::ErrorKind::Fix))), + )), + opt(take_ul), + ), + |i| i as i64, + )(i) +} // ============================== // ======== parse floats ======== // ============================== -named!(float_width,complete!(byte!(b'f' | b'l' | b'F' | b'L'))); -named!(float_exp<(Option,Vec)>,preceded!(byte!(b'e'|b'E'),pair!(opt!(byte!(b'-'|b'+')),many1!(complete!(decimal))))); +fn float_width(i: &[u8]) -> nom::IResult<&[u8], u8> { + nom::combinator::complete(byte!(b'f' | b'l' | b'F' | b'L'))(i) +} + +fn float_exp(i: &[u8]) -> nom::IResult<&[u8], (Option, Vec)> { + preceded( + byte!(b'e' | b'E'), + pair(opt(byte!(b'-' | b'+')), many1(complete(decimal))), + )(i) +} -named!(c_float, - map_opt!(alt!( - terminated!(recognize!(tuple!(many1!(complete!(decimal)),byte!(b'.'),many0!(complete!(decimal)))),opt!(float_width)) | - terminated!(recognize!(tuple!(many0!(complete!(decimal)),byte!(b'.'),many1!(complete!(decimal)))),opt!(float_width)) | - terminated!(recognize!(tuple!(many0!(complete!(decimal)),opt!(byte!(b'.')),many1!(complete!(decimal)),float_exp)),opt!(float_width)) | - terminated!(recognize!(tuple!(many1!(complete!(decimal)),opt!(byte!(b'.')),many0!(complete!(decimal)),float_exp)),opt!(float_width)) | - terminated!(recognize!(many1!(complete!(decimal))),float_width) - ),|v|str::from_utf8(v).ok().and_then(|i|f64::from_str(i).ok())) -); +fn c_float(i: &[u8]) -> nom::IResult<&[u8], f64> { + map_opt( + alt(( + terminated( + recognize(tuple(( + many1(complete(decimal)), + byte!(b'.'), + many0(complete(decimal)), + ))), + opt(float_width), + ), + terminated( + recognize(tuple(( + many0(complete(decimal)), + byte!(b'.'), + many1(complete(decimal)), + ))), + opt(float_width), + ), + terminated( + recognize(tuple(( + many0(complete(decimal)), + opt(byte!(b'.')), + many1(complete(decimal)), + float_exp, + ))), + opt(float_width), + ), + terminated( + recognize(tuple(( + many1(complete(decimal)), + opt(byte!(b'.')), + many0(complete(decimal)), + float_exp, + ))), + opt(float_width), + ), + terminated(recognize(many1(complete(decimal))), float_width), + )), + |v| str::from_utf8(v).ok().and_then(|i| f64::from_str(i).ok()), + )(i) +} // ================================ // ======== main interface ======== // ================================ -named!(one_literal<&[u8],EvalResult,::Error>, - fix_error!(::Error,alt_complete!( - map!(full!(c_char),EvalResult::Char) | - map!(full!(c_int),|i|EvalResult::Int(::std::num::Wrapping(i))) | - map!(full!(c_float),EvalResult::Float) | - map!(full!(c_string),EvalResult::Str) - )) -); +fn one_literal(input: &[u8]) -> nom::IResult<&[u8], EvalResult, crate::Error<&[u8]>> { + alt(( + map(full(c_char), EvalResult::Char), + map(full(c_int), |i| EvalResult::Int(::std::num::Wrapping(i))), + map(full(c_float), EvalResult::Float), + map(full(c_string), EvalResult::Str), + ))(input) + .to_cexpr_result() +} /// Parse a C literal. /// /// The input must contain exactly the representation of a single literal /// token, and in particular no whitespace or sign prefixes. -pub fn parse(input: &[u8]) -> IResult<&[u8],EvalResult,::Error> { - ::assert_full_parse(one_literal(input)) +pub fn parse(input: &[u8]) -> IResult<&[u8], EvalResult, crate::Error<&[u8]>> { + crate::assert_full_parse(one_literal(input)) } diff --git a/third_party/rust/cexpr/src/token.rs b/third_party/rust/cexpr/src/token.rs index f2bbca23c2..dbc5949cd4 100644 --- a/third_party/rust/cexpr/src/token.rs +++ b/third_party/rust/cexpr/src/token.rs @@ -9,32 +9,36 @@ //! //! This is designed to map onto a libclang CXToken. -#[derive(Debug,Copy,Clone,PartialEq,Eq)] +#[derive(Debug, Copy, Clone, PartialEq, Eq)] +#[allow(missing_docs)] pub enum Kind { - Punctuation, - Keyword, - Identifier, - Literal, - Comment, + Punctuation, + Keyword, + Identifier, + Literal, + Comment, } -#[derive(Debug,Clone,PartialEq,Eq)] +/// A single token in a C expression. +#[derive(Debug, Clone, PartialEq, Eq)] pub struct Token { + /// The type of this token. pub kind: Kind, + /// The bytes that make up the token. pub raw: Box<[u8]>, } impl<'a> From<(Kind, &'a [u8])> for Token { - fn from((kind, value): (Kind, &'a [u8])) -> Token { - Token { - kind: kind, - raw: value.to_owned().into_boxed_slice() - } - } + fn from((kind, value): (Kind, &'a [u8])) -> Token { + Token { + kind, + raw: value.to_owned().into_boxed_slice(), + } + } } /// Remove all comment tokens from a vector of tokens pub fn remove_comments(v: &mut Vec) -> &mut Vec { - v.retain(|t|t.kind!=Kind::Comment); - v + v.retain(|t| t.kind != Kind::Comment); + v } diff --git a/third_party/rust/cexpr/tests/clang.rs b/third_party/rust/cexpr/tests/clang.rs index 322bf5ebb6..b2484f0778 100644 --- a/third_party/rust/cexpr/tests/clang.rs +++ b/third_party/rust/cexpr/tests/clang.rs @@ -8,234 +8,327 @@ extern crate cexpr; extern crate clang_sys; -use std::{ptr,mem,ffi,slice,char}; -use std::str::{self,FromStr}; use std::collections::HashMap; +use std::io::Write; +use std::str::{self, FromStr}; +use std::{char, ffi, mem, ptr, slice}; -use clang_sys::*; -use cexpr::token::Token; -use cexpr::expr::{IdentifierParser,EvalResult,fn_macro_declaration}; -use cexpr::literal::CChar; use cexpr::assert_full_parse; +use cexpr::expr::{fn_macro_declaration, EvalResult, IdentifierParser}; +use cexpr::literal::CChar; +use cexpr::token::Token; +use clang_sys::*; // main testing routine -fn test_definition(ident: Vec, tokens: &[Token], idents: &mut HashMap,EvalResult>) -> bool { - fn bytes_to_int(value: &[u8]) -> Option { - str::from_utf8(value).ok() - .map(|s|s.replace("n","-")) - .map(|s|s.replace("_","")) - .and_then(|v|i64::from_str(&v).ok()) - .map(::std::num::Wrapping) - .map(Int) - } - - use cexpr::expr::EvalResult::*; - - let display_name=String::from_utf8_lossy(&ident).into_owned(); - - let functional; - let test={ - // Split name such as Str_test_string into (Str,test_string) - let pos=ident.iter().position(|c|*c==b'_').expect(&format!("Invalid definition in testcase: {}",display_name)); - let mut expected=&ident[..pos]; - let mut value=&ident[(pos+1)..]; - - functional=expected==b"Fn"; - - if functional { - let ident=value; - let pos=ident.iter().position(|c|*c==b'_').expect(&format!("Invalid definition in testcase: {}",display_name)); - expected=&ident[..pos]; - value=&ident[(pos+1)..]; - } - - if expected==b"Str" { - Some(Str(value.to_owned())) - } else if expected==b"Int" { - bytes_to_int(value) - } else if expected==b"Float" { - str::from_utf8(value).ok().map(|s|s.replace("n","-").replace("p",".")).and_then(|v|f64::from_str(&v).ok()).map(Float) - } else if expected==b"CharRaw" { - str::from_utf8(value).ok().and_then(|v|u64::from_str(v).ok()).map(CChar::Raw).map(Char) - } else if expected==b"CharChar" { - str::from_utf8(value).ok().and_then(|v|u32::from_str(v).ok()).and_then(char::from_u32).map(CChar::Char).map(Char) - } else { - Some(Invalid) - }.expect(&format!("Invalid definition in testcase: {}",display_name)) - }; - - let result = if functional { - let mut fnidents; - let expr_tokens; - match fn_macro_declaration(&tokens) { - Ok((rest,(_,args))) => { - fnidents=idents.clone(); - expr_tokens=rest; - for arg in args { - let val = match test { - Int(_) => bytes_to_int(&arg), - Str(_) => Some(Str(arg.to_owned())), - _ => unimplemented!() - }.expect(&format!("Invalid argument in functional macro testcase: {}",display_name)); - fnidents.insert(arg.to_owned(), val); - } - }, - e => { - println!("Failed test for {}, unable to parse functional macro declaration: {:?}",display_name,e); - return false; - } - } - assert_full_parse(IdentifierParser::new(&fnidents).expr(&expr_tokens)) - } else { - IdentifierParser::new(idents).macro_definition(&tokens).map(|(i, (_,val))|(i, val)) - }; - - match result { - Ok((_,val)) => { - if val==test { - if let Some(_)=idents.insert(ident,val) { - panic!("Duplicate definition for testcase: {}",display_name); - } - true - } else { - println!("Failed test for {}, expected {:?}, got {:?}",display_name,test,val); - false - } - }, - e => { - if test==Invalid { - true - } else { - println!("Failed test for {}, expected {:?}, got {:?}",display_name,test,e); - false - } - } - } +fn test_definition( + ident: Vec, + tokens: &[Token], + idents: &mut HashMap, EvalResult>, +) -> bool { + fn bytes_to_int(value: &[u8]) -> Option { + str::from_utf8(value) + .ok() + .map(|s| s.replace("n", "-")) + .map(|s| s.replace("_", "")) + .and_then(|v| i64::from_str(&v).ok()) + .map(::std::num::Wrapping) + .map(Int) + } + + use cexpr::expr::EvalResult::*; + + let display_name = String::from_utf8_lossy(&ident).into_owned(); + + let functional; + let test = { + // Split name such as Str_test_string into (Str,test_string) + let pos = ident + .iter() + .position(|c| *c == b'_') + .expect(&format!("Invalid definition in testcase: {}", display_name)); + let mut expected = &ident[..pos]; + let mut value = &ident[(pos + 1)..]; + + functional = expected == b"Fn"; + + if functional { + let ident = value; + let pos = ident + .iter() + .position(|c| *c == b'_') + .expect(&format!("Invalid definition in testcase: {}", display_name)); + expected = &ident[..pos]; + value = &ident[(pos + 1)..]; + } + + if expected == b"Str" { + let mut splits = value.split(|c| *c == b'U'); + let mut s = Vec::with_capacity(value.len()); + s.extend_from_slice(splits.next().unwrap()); + for split in splits { + let (chr, rest) = split.split_at(6); + let chr = u32::from_str_radix(str::from_utf8(chr).unwrap(), 16).unwrap(); + write!(s, "{}", char::from_u32(chr).unwrap()).unwrap(); + s.extend_from_slice(rest); + } + Some(Str(s)) + } else if expected == b"Int" { + bytes_to_int(value) + } else if expected == b"Float" { + str::from_utf8(value) + .ok() + .map(|s| s.replace("n", "-").replace("p", ".")) + .and_then(|v| f64::from_str(&v).ok()) + .map(Float) + } else if expected == b"CharRaw" { + str::from_utf8(value) + .ok() + .and_then(|v| u64::from_str(v).ok()) + .map(CChar::Raw) + .map(Char) + } else if expected == b"CharChar" { + str::from_utf8(value) + .ok() + .and_then(|v| u32::from_str(v).ok()) + .and_then(char::from_u32) + .map(CChar::Char) + .map(Char) + } else { + Some(Invalid) + } + .expect(&format!("Invalid definition in testcase: {}", display_name)) + }; + + let result = if functional { + let mut fnidents; + let expr_tokens; + match fn_macro_declaration(&tokens) { + Ok((rest, (_, args))) => { + fnidents = idents.clone(); + expr_tokens = rest; + for arg in args { + let val = match test { + Int(_) => bytes_to_int(&arg), + Str(_) => Some(Str(arg.to_owned())), + _ => unimplemented!(), + } + .expect(&format!( + "Invalid argument in functional macro testcase: {}", + display_name + )); + fnidents.insert(arg.to_owned(), val); + } + } + e => { + println!( + "Failed test for {}, unable to parse functional macro declaration: {:?}", + display_name, e + ); + return false; + } + } + assert_full_parse(IdentifierParser::new(&fnidents).expr(&expr_tokens)) + } else { + IdentifierParser::new(idents) + .macro_definition(&tokens) + .map(|(i, (_, val))| (i, val)) + }; + + match result { + Ok((_, val)) => { + if val == test { + if let Some(_) = idents.insert(ident, val) { + panic!("Duplicate definition for testcase: {}", display_name); + } + true + } else { + println!( + "Failed test for {}, expected {:?}, got {:?}", + display_name, test, val + ); + false + } + } + e => { + if test == Invalid { + true + } else { + println!( + "Failed test for {}, expected {:?}, got {:?}", + display_name, test, e + ); + false + } + } + } } // support code for the clang lexer unsafe fn clang_str_to_vec(s: CXString) -> Vec { - let vec=ffi::CStr::from_ptr(clang_getCString(s)).to_bytes().to_owned(); - clang_disposeString(s); - vec + let vec = ffi::CStr::from_ptr(clang_getCString(s)) + .to_bytes() + .to_owned(); + clang_disposeString(s); + vec } #[allow(non_upper_case_globals)] unsafe fn token_clang_to_cexpr(tu: CXTranslationUnit, orig: &CXToken) -> Token { - Token { - kind:match clang_getTokenKind(*orig) { - CXToken_Comment => cexpr::token::Kind::Comment, - CXToken_Identifier => cexpr::token::Kind::Identifier, - CXToken_Keyword => cexpr::token::Kind::Keyword, - CXToken_Literal => cexpr::token::Kind::Literal, - CXToken_Punctuation => cexpr::token::Kind::Punctuation, - _ => panic!("invalid token kind: {:?}", *orig), - }, - raw:clang_str_to_vec(clang_getTokenSpelling(tu,*orig)).into_boxed_slice() - } + Token { + kind: match clang_getTokenKind(*orig) { + CXToken_Comment => cexpr::token::Kind::Comment, + CXToken_Identifier => cexpr::token::Kind::Identifier, + CXToken_Keyword => cexpr::token::Kind::Keyword, + CXToken_Literal => cexpr::token::Kind::Literal, + CXToken_Punctuation => cexpr::token::Kind::Punctuation, + _ => panic!("invalid token kind: {:?}", *orig), + }, + raw: clang_str_to_vec(clang_getTokenSpelling(tu, *orig)).into_boxed_slice(), + } } -extern "C" fn visit_children_thunk(cur: CXCursor, parent: CXCursor, closure: CXClientData) -> CXChildVisitResult - where F: FnMut(CXCursor,CXCursor) -> CXChildVisitResult +extern "C" fn visit_children_thunk( + cur: CXCursor, + parent: CXCursor, + closure: CXClientData, +) -> CXChildVisitResult +where + F: FnMut(CXCursor, CXCursor) -> CXChildVisitResult, { - unsafe{(&mut *(closure as *mut F))(cur,parent)} + unsafe { (&mut *(closure as *mut F))(cur, parent) } } unsafe fn visit_children(cursor: CXCursor, mut f: F) - where F: FnMut(CXCursor,CXCursor) -> CXChildVisitResult +where + F: FnMut(CXCursor, CXCursor) -> CXChildVisitResult, { - clang_visitChildren(cursor, visit_children_thunk:: as _, &mut f as *mut F as CXClientData); + clang_visitChildren( + cursor, + visit_children_thunk:: as _, + &mut f as *mut F as CXClientData, + ); } unsafe fn location_in_scope(r: CXSourceRange) -> bool { - let start=clang_getRangeStart(r); - let mut file=ptr::null_mut(); - clang_getSpellingLocation(start,&mut file,ptr::null_mut(),ptr::null_mut(),ptr::null_mut()); - clang_Location_isFromMainFile(start)!=0 - && clang_Location_isInSystemHeader(start)==0 - && file!=ptr::null_mut() + let start = clang_getRangeStart(r); + let mut file = ptr::null_mut(); + clang_getSpellingLocation( + start, + &mut file, + ptr::null_mut(), + ptr::null_mut(), + ptr::null_mut(), + ); + clang_Location_isFromMainFile(start) != 0 + && clang_Location_isInSystemHeader(start) == 0 + && file != ptr::null_mut() } /// tokenize_range_adjust can be used to work around LLVM bug 9069 /// https://bugs.llvm.org//show_bug.cgi?id=9069 -fn file_visit_macros, Vec)>(file: &str, tokenize_range_adjust: bool, mut visitor: F) { - unsafe { - let tu={ - let index=clang_createIndex(true as _, false as _); - let cfile=ffi::CString::new(file).unwrap(); - let mut tu=mem::uninitialized(); - assert!(clang_parseTranslationUnit2( - index, - cfile.as_ptr(), - [b"-std=c11\0".as_ptr() as *const ::std::os::raw::c_char].as_ptr(),1, - ptr::null_mut(),0, - CXTranslationUnit_DetailedPreprocessingRecord, - &mut tu - )==CXError_Success,"Failure reading test case {}",file); - tu - }; - visit_children(clang_getTranslationUnitCursor(tu),|cur,_parent| { - if cur.kind==CXCursor_MacroDefinition { - let mut range=clang_getCursorExtent(cur); - if !location_in_scope(range) { return CXChildVisit_Continue } - range.end_int_data-=if tokenize_range_adjust { 1 } else { 0 }; - let mut token_ptr=ptr::null_mut(); - let mut num=0; - clang_tokenize(tu,range,&mut token_ptr,&mut num); - if token_ptr!=ptr::null_mut() { - let tokens=slice::from_raw_parts(token_ptr,num as usize); - let tokens: Vec<_>=tokens.iter().filter_map(|t| - if clang_getTokenKind(*t)!=CXToken_Comment { - Some(token_clang_to_cexpr(tu,t)) - } else { - None - } - ).collect(); - clang_disposeTokens(tu,token_ptr,num); - visitor(clang_str_to_vec(clang_getCursorSpelling(cur)),tokens) - } - } - CXChildVisit_Continue - }); - clang_disposeTranslationUnit(tu); - }; +fn file_visit_macros, Vec)>( + file: &str, + tokenize_range_adjust: bool, + mut visitor: F, +) { + unsafe { + let tu = { + let index = clang_createIndex(true as _, false as _); + let cfile = ffi::CString::new(file).unwrap(); + let mut tu = mem::MaybeUninit::uninit(); + assert!( + clang_parseTranslationUnit2( + index, + cfile.as_ptr(), + [b"-std=c11\0".as_ptr() as *const ::std::os::raw::c_char].as_ptr(), + 1, + ptr::null_mut(), + 0, + CXTranslationUnit_DetailedPreprocessingRecord, + &mut *tu.as_mut_ptr() + ) == CXError_Success, + "Failure reading test case {}", + file + ); + tu.assume_init() + }; + visit_children(clang_getTranslationUnitCursor(tu), |cur, _parent| { + if cur.kind == CXCursor_MacroDefinition { + let mut range = clang_getCursorExtent(cur); + if !location_in_scope(range) { + return CXChildVisit_Continue; + } + range.end_int_data -= if tokenize_range_adjust { 1 } else { 0 }; + let mut token_ptr = ptr::null_mut(); + let mut num = 0; + clang_tokenize(tu, range, &mut token_ptr, &mut num); + if token_ptr != ptr::null_mut() { + let tokens = slice::from_raw_parts(token_ptr, num as usize); + let tokens: Vec<_> = tokens + .iter() + .filter_map(|t| { + if clang_getTokenKind(*t) != CXToken_Comment { + Some(token_clang_to_cexpr(tu, t)) + } else { + None + } + }) + .collect(); + clang_disposeTokens(tu, token_ptr, num); + visitor(clang_str_to_vec(clang_getCursorSpelling(cur)), tokens) + } + } + CXChildVisit_Continue + }); + clang_disposeTranslationUnit(tu); + }; } fn test_file(file: &str) -> bool { - let mut idents=HashMap::new(); - let mut all_succeeded=true; - file_visit_macros(file, fix_bug_9069(), |ident, tokens| all_succeeded&=test_definition(ident, &tokens, &mut idents)); - all_succeeded + let mut idents = HashMap::new(); + let mut all_succeeded = true; + file_visit_macros(file, fix_bug_9069(), |ident, tokens| { + all_succeeded &= test_definition(ident, &tokens, &mut idents) + }); + all_succeeded } fn fix_bug_9069() -> bool { - fn check_bug_9069() -> bool { - let mut token_sets = vec![]; - file_visit_macros("tests/input/test_llvm_bug_9069.h", false, |ident, tokens| { - assert_eq!(&ident, b"A"); - token_sets.push(tokens); - }); - assert_eq!(token_sets.len(), 2); - token_sets[0] != token_sets[1] - } + fn check_bug_9069() -> bool { + let mut token_sets = vec![]; + file_visit_macros( + "tests/input/test_llvm_bug_9069.h", + false, + |ident, tokens| { + assert_eq!(&ident, b"A"); + token_sets.push(tokens); + }, + ); + assert_eq!(token_sets.len(), 2); + token_sets[0] != token_sets[1] + } - use std::sync::{Once, ONCE_INIT}; - use std::sync::atomic::{AtomicBool, ATOMIC_BOOL_INIT, Ordering}; + use std::sync::atomic::{AtomicBool, Ordering}; + use std::sync::Once; - static CHECK_FIX: Once = ONCE_INIT; - static FIX: AtomicBool = ATOMIC_BOOL_INIT; + static CHECK_FIX: Once = Once::new(); + static FIX: AtomicBool = AtomicBool::new(false); - CHECK_FIX.call_once(|| FIX.store(check_bug_9069(), Ordering::SeqCst)); + CHECK_FIX.call_once(|| FIX.store(check_bug_9069(), Ordering::SeqCst)); - FIX.load(Ordering::SeqCst) + FIX.load(Ordering::SeqCst) } macro_rules! test_file { - ($f:ident) => { - #[test] fn $f() { - assert!(test_file(concat!("tests/input/",stringify!($f),".h")),"test_file") - } - } + ($f:ident) => { + #[test] + fn $f() { + assert!( + test_file(concat!("tests/input/", stringify!($f), ".h")), + "test_file" + ) + } + }; } test_file!(floats); diff --git a/third_party/rust/cexpr/tests/input/int_unsigned.h b/third_party/rust/cexpr/tests/input/int_unsigned.h index e42a975701..6663dda3d6 100644 --- a/third_party/rust/cexpr/tests/input/int_unsigned.h +++ b/third_party/rust/cexpr/tests/input/int_unsigned.h @@ -3,6 +3,8 @@ #define Int_1 0b1 #define Int_2 0x2 #define Int_3 3L +#define Int_4 0X4 +#define Int_5 0B101 #define Int_63 077 #define Int_123 123 #define Int_124 124u diff --git a/third_party/rust/cexpr/tests/input/strings.h b/third_party/rust/cexpr/tests/input/strings.h index e30bb640e2..d01d409cbf 100644 --- a/third_party/rust/cexpr/tests/input/strings.h +++ b/third_party/rust/cexpr/tests/input/strings.h @@ -5,6 +5,11 @@ #define Str_concat u"con" L"cat" #define Str_concat_parens ("concat" U"_parens") #define Str_concat_identifier (Str_concat L"_identifier") +#define Str_hex_escape_all "\x68\x65\x78\x5f\x65\x73\x63\x61\x70\x65\x5f\x61\x6c\x6c" +#define Str_hex_escape_hex "h\x65x_\x65s\x63\x61p\x65_h\x65x" +#define Str_quote_U000022_escape "quote_\"_escape" +#define Str_Fly_away_in_my_space_U01F680_You_no_need_put_U01F4B5_in_my_pocket \ + u8"Fly_away_in_my_space_🚀_You_no_need_put_💵_in_my_pocket" #define Fn_Str_no_args() "no_args" #define Fn_Str_no_args_concat() "no_args_" Str_concat #define Fn_Str_prepend_arg(arg) "prepend_" arg diff --git a/third_party/rust/cfg-if/.cargo-checksum.json b/third_party/rust/cfg-if/.cargo-checksum.json index b744a21d9f..c0c9f9d043 100644 --- a/third_party/rust/cfg-if/.cargo-checksum.json +++ b/third_party/rust/cfg-if/.cargo-checksum.json @@ -1 +1 @@ -{"files":{"Cargo.toml":"090d983ec20ad09e59f6b7679b48b9b54e9c0841cf2922b81cba485edcd40876","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"378f5840b258e2779c39418f3f2d7b2ba96f1c7917dd6be0713f88305dbda397","README.md":"1cd0ebc3b30a9c9eddb0fda5515b5a52ec2b85a087328f0ee9f4d68cbb28afc2","src/lib.rs":"f02d6e295109365cf54884e5282a3e7d1e1f62857c700f23cd013e94a56bd803","tests/xcrate.rs":"30dcb70fbb9c96fda2b7825592558279f534776f72e2a8a0a3e26df4dedb3caa"},"package":"082bb9b28e00d3c9d39cc03e64ce4cea0f1bb9b3fde493f0cbc008472d22bdf4"} \ No newline at end of file +{"files":{"Cargo.toml":"2cb2370b62c56a7d51b51f9e405b2f377b3ad6f7f8d33bc69e20eb819ad66012","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"378f5840b258e2779c39418f3f2d7b2ba96f1c7917dd6be0713f88305dbda397","README.md":"2406e83ee174e30aa67f8ab266836fa78545012b196395aff37c152321e2c713","src/lib.rs":"8dfd667d32d8b06e529643c975dfa14c29ce9a894a80e381a1bd867252e65e56","tests/xcrate.rs":"c0734dae6e63beafcd60bf53546115a2320735b51035c9e2387fdf9301580934"},"package":"4785bdd1c96b2a846b2bd7cc02e86b6b3dbf14e7e53446c4f54c92a361040822"} \ No newline at end of file diff --git a/third_party/rust/cfg-if/Cargo.toml b/third_party/rust/cfg-if/Cargo.toml index 84c4fc7835..5da1d1b4bf 100644 --- a/third_party/rust/cfg-if/Cargo.toml +++ b/third_party/rust/cfg-if/Cargo.toml @@ -3,7 +3,7 @@ # When uploading crates to the registry Cargo will automatically # "normalize" Cargo.toml files for maximal compatibility # with all versions of Cargo and also rewrite `path` dependencies -# to registry (e.g. crates.io) dependencies +# to registry (e.g., crates.io) dependencies # # If you believe there's an error in this file please file an # issue against the rust-lang/cargo repository. If you're @@ -11,8 +11,9 @@ # will likely look very different (and much more reasonable) [package] +edition = "2018" name = "cfg-if" -version = "0.1.6" +version = "0.1.10" authors = ["Alex Crichton "] description = "A macro to ergonomically define an item depending on a large number of #[cfg]\nparameters. Structured like an if-else chain, the first matching branch is the\nitem that gets emitted.\n" homepage = "https://github.com/alexcrichton/cfg-if" @@ -20,5 +21,16 @@ documentation = "https://docs.rs/cfg-if" readme = "README.md" license = "MIT/Apache-2.0" repository = "https://github.com/alexcrichton/cfg-if" +[dependencies.compiler_builtins] +version = "0.1.2" +optional = true + +[dependencies.core] +version = "1.0.0" +optional = true +package = "rustc-std-workspace-core" + +[features] +rustc-dep-of-std = ["core", "compiler_builtins"] [badges.travis-ci] repository = "alexcrichton/cfg-if" diff --git a/third_party/rust/cfg-if/README.md b/third_party/rust/cfg-if/README.md index 344a946c04..50b5e3b2dc 100644 --- a/third_party/rust/cfg-if/README.md +++ b/third_party/rust/cfg-if/README.md @@ -1,7 +1,5 @@ # cfg-if -[![Build Status](https://travis-ci.org/alexcrichton/cfg-if.svg?branch=master)](https://travis-ci.org/alexcrichton/cfg-if) - [Documentation](https://docs.rs/cfg-if) A macro to ergonomically define an item depending on a large number of #[cfg] @@ -16,10 +14,7 @@ cfg-if = "0.1" ## Example ```rust -#[macro_use] -extern crate cfg_if; - -cfg_if! { +cfg_if::cfg_if! { if #[cfg(unix)] { fn foo() { /* unix specific functionality */ } } else if #[cfg(target_pointer_width = "32")] { @@ -48,5 +43,5 @@ at your option. ### Contribution Unless you explicitly state otherwise, any contribution intentionally submitted -for inclusion in Serde by you, as defined in the Apache-2.0 license, shall be +for inclusion in `cfg-if` by you, as defined in the Apache-2.0 license, shall be dual licensed as above, without any additional terms or conditions. diff --git a/third_party/rust/cfg-if/src/lib.rs b/third_party/rust/cfg-if/src/lib.rs index ff144f69f8..6c5058dadf 100644 --- a/third_party/rust/cfg-if/src/lib.rs +++ b/third_party/rust/cfg-if/src/lib.rs @@ -10,10 +10,7 @@ //! # Example //! //! ``` -//! #[macro_use] -//! extern crate cfg_if; -//! -//! cfg_if! { +//! cfg_if::cfg_if! { //! if #[cfg(unix)] { //! fn foo() { /* unix specific functionality */ } //! } else if #[cfg(target_pointer_width = "32")] { @@ -27,39 +24,40 @@ //! ``` #![no_std] - #![doc(html_root_url = "https://docs.rs/cfg-if")] #![deny(missing_docs)] #![cfg_attr(test, deny(warnings))] -#[macro_export(local_inner_macros)] +/// The main macro provided by this crate. See crate documentation for more +/// information. +#[macro_export] macro_rules! cfg_if { // match if/else chains with a final `else` ($( - if #[cfg($($meta:meta),*)] { $($it:item)* } + if #[cfg($($meta:meta),*)] { $($tokens:tt)* } ) else * else { - $($it2:item)* + $($tokens2:tt)* }) => { - cfg_if! { + $crate::cfg_if! { @__items () ; - $( ( ($($meta),*) ($($it)*) ), )* - ( () ($($it2)*) ), + $( ( ($($meta),*) ($($tokens)*) ), )* + ( () ($($tokens2)*) ), } }; // match if/else chains lacking a final `else` ( - if #[cfg($($i_met:meta),*)] { $($i_it:item)* } + if #[cfg($($i_met:meta),*)] { $($i_tokens:tt)* } $( - else if #[cfg($($e_met:meta),*)] { $($e_it:item)* } + else if #[cfg($($e_met:meta),*)] { $($e_tokens:tt)* } )* ) => { - cfg_if! { + $crate::cfg_if! { @__items () ; - ( ($($i_met),*) ($($i_it)*) ), - $( ( ($($e_met),*) ($($e_it)*) ), )* + ( ($($i_met),*) ($($i_tokens)*) ), + $( ( ($($e_met),*) ($($e_tokens)*) ), )* ( () () ), } }; @@ -69,21 +67,22 @@ macro_rules! cfg_if { // Collects all the negated cfgs in a list at the beginning and after the // semicolon is all the remaining items (@__items ($($not:meta,)*) ; ) => {}; - (@__items ($($not:meta,)*) ; ( ($($m:meta),*) ($($it:item)*) ), $($rest:tt)*) => { - // Emit all items within one block, applying an approprate #[cfg]. The + (@__items ($($not:meta,)*) ; ( ($($m:meta),*) ($($tokens:tt)*) ), $($rest:tt)*) => { + // Emit all items within one block, applying an appropriate #[cfg]. The // #[cfg] will require all `$m` matchers specified and must also negate // all previous matchers. - cfg_if! { @__apply cfg(all($($m,)* not(any($($not),*)))), $($it)* } + #[cfg(all($($m,)* not(any($($not),*))))] $crate::cfg_if! { @__identity $($tokens)* } // Recurse to emit all other items in `$rest`, and when we do so add all // our `$m` matchers to the list of `$not` matchers as future emissions // will have to negate everything we just matched as well. - cfg_if! { @__items ($($not,)* $($m,)*) ; $($rest)* } + $crate::cfg_if! { @__items ($($not,)* $($m,)*) ; $($rest)* } }; - // Internal macro to Apply a cfg attribute to a list of items - (@__apply $m:meta, $($it:item)*) => { - $(#[$m] $it)* + // Internal macro to make __apply work out right for different match types, + // because of how macros matching/expand stuff. + (@__identity $($tokens:tt)*) => { + $($tokens)* }; } @@ -139,4 +138,39 @@ mod tests { assert!(works4().is_some()); assert!(works5()); } + + #[test] + #[allow(clippy::assertions_on_constants)] + fn test_usage_within_a_function() { + cfg_if! {if #[cfg(debug_assertions)] { + // we want to put more than one thing here to make sure that they + // all get configured properly. + assert!(cfg!(debug_assertions)); + assert_eq!(4, 2+2); + } else { + assert!(works1().is_some()); + assert_eq!(10, 5+5); + }} + } + + trait Trait { + fn blah(&self); + } + + #[allow(dead_code)] + struct Struct; + + impl Trait for Struct { + cfg_if! { + if #[cfg(feature = "blah")] { + fn blah(&self) { + unimplemented!(); + } + } else { + fn blah(&self) { + unimplemented!(); + } + } + } + } } diff --git a/third_party/rust/cfg-if/tests/xcrate.rs b/third_party/rust/cfg-if/tests/xcrate.rs index f42b877670..e7b4a362ad 100644 --- a/third_party/rust/cfg-if/tests/xcrate.rs +++ b/third_party/rust/cfg-if/tests/xcrate.rs @@ -1,7 +1,4 @@ -#[macro_use] -extern crate cfg_if; - -cfg_if! { +cfg_if::cfg_if! { if #[cfg(foo)] { fn works() -> bool { false } } else if #[cfg(test)] { diff --git a/third_party/rust/chrono/.cargo-checksum.json b/third_party/rust/chrono/.cargo-checksum.json index 19bebb3c4d..25230e1b01 100644 --- a/third_party/rust/chrono/.cargo-checksum.json +++ b/third_party/rust/chrono/.cargo-checksum.json @@ -1 +1 @@ -{"files":{"AUTHORS.txt":"80aa54d9642f63fc62f20f60e0550f3e596de6ea69883769313c7f07a4be8f4c","CHANGELOG.md":"b9d2b2edfb98954c22e3a34c044bbd2f542cae703d06e5cf15245a1e26b32f76","Cargo.toml":"95d58b3f9a862b6bfd497e8aa87cc14ba6a43e7f6d1818094073e611db14ce43","LICENSE.txt":"46610329ff0b38effb9cb05979ff1ef761e465fed96b2eaca39e439d00129fd7","Makefile":"d76b0b1a44e90b31f2a6f97f662d65df585b1dc88253c30c01ea38d9a097a83e","README.md":"8df7579a4ce5ed034b85b91f6f3106573443138dcc568fd76063016ad2d5cc38","appveyor.yml":"b10751e92a0299968ac5cfd65e918d99e680b6ac679362655b92a393cd22c212","ci/fix-readme.sh":"750d262640a6fdc846623a569f37954bfe7604f9bcbc8f7475db38192e1da0fb","ci/travis.sh":"48eb316d163a9c5b37e4b1d4773e2f9934359a3a1dbddc3b6ae6a58ef15856b1","src/date.rs":"c8d61716eaecf8d0e1a887a0ac9bc06d2e5bf0b47eccc61e5683bdeb0f886ff8","src/datetime.rs":"34e71d822cfa70bb6d1041e3d865fcf629ffb2e29021713bd6aee8a3a6d1410f","src/div.rs":"02e6ce9c4fcafcc7931574108dd7ec0cd28b137edb52eaea654a09ab05fbaf90","src/format/mod.rs":"e36b2bee352df7f0addec7365edfd73607ebaa903d3ddb9249f5fe3c11d9da7a","src/format/parse.rs":"8d5b39483c48771932fd75a9d9892967bd7ef6f0c88d55be6a2d35d35ba21f52","src/format/parsed.rs":"a65cbc0ba13190028ca7c4de4a830b8a64acaf375285cae3a1da1bfd6e5d32f8","src/format/scan.rs":"9f8e4ce8001caf9ec76b3eddf7aa9cc5a68606165e3bb53977350c0a03536b79","src/format/strftime.rs":"532f88654cc1531e6ebdea89039bcf2c364e97478c83824f97f1c38277f3c827","src/lib.rs":"1dae4eb3a73db8dc8fd4f5d3e431fc773104a35c9efaa7a301d73f7b898fc464","src/naive/date.rs":"2fbd7069fb576416f2111298cdd59c729e70736abe53d6e69313a4e45f8a6e3d","src/naive/datetime.rs":"5ae4ed07dc199f7f4be27ef18130de385b56dceb01cefafe5e0f0eb9ed39ce7b","src/naive/internals.rs":"db79eda586b7daad5a2645d21bda80ae92f9bee9870d93d2209a7d228e4286c7","src/naive/isoweek.rs":"75101e996e0eccc6f9b2147095d82050e6dac94a741db60f654f4267bbe96fed","src/naive/time.rs":"cfa4936b341246eb0692e0a071d93707f3545825c74aee67749442ecd2aba655","src/offset/fixed.rs":"e0e41c7081e908a8ada1c1bb67fd003f8a36510c542c5088756214e276407cb9","src/offset/local.rs":"c63a88b8ab4af289cef15d04189f9656c8dfcff77fe8417bbd1182b75184f4e6","src/offset/mod.rs":"2aeeb0fa4c657e810f78ff239d7c52f07c33a2b7bdfc8b3765f4339dcafa0088","src/offset/utc.rs":"630f9513f88353892c9f554eed35d5ec204da9b3d65e7b3c44998836ba3d2d9b","src/oldtime.rs":"42f09a5679c8326ba8f0fe068b35ed1066801903c44b2abfd93f00ef5ec62dbc","src/round.rs":"f7ef334fe4d3014b8a6421202b4a50d316d74199ac154ff553548e8c2c58aa80"},"package":"45912881121cb26fad7c38c17ba7daa18764771836b34fab7d3fbd93ed633878"} \ No newline at end of file +{"files":{"AUTHORS.txt":"80aa54d9642f63fc62f20f60e0550f3e596de6ea69883769313c7f07a4be8f4c","CHANGELOG.md":"47c77da2a9058f98fac8c91eb29938b3887c94943c24d3a0ae6daa75368d8f88","Cargo.toml":"32e663be86a0444953c611928165a364dfb44c6dffd77840506e1ae8687d40c2","LICENSE.txt":"46610329ff0b38effb9cb05979ff1ef761e465fed96b2eaca39e439d00129fd7","README.md":"d23488fcb5eaee5bcc5bbf912aa9cb253dc1d6108ba64dd3eee582f5ea7e9c37","src/date.rs":"74d7a5de252dc9ae6cda2a228ba8db325a81cca30fad577693c3759cff3dac04","src/datetime.rs":"0dcdd8f4fa97a246fbf6007aed7bf35506c3d10ed7f9a564ed7cad6e55587944","src/div.rs":"02e6ce9c4fcafcc7931574108dd7ec0cd28b137edb52eaea654a09ab05fbaf90","src/format/mod.rs":"1999f9ee2c4000b34c10170b0027e7ced0131e40714fe9070b5dd7f169bd5060","src/format/parse.rs":"2581eff06fa8c48d8689e71fc247a05c1e1ad97bcaf5d7ef5591ba4b9977e676","src/format/parsed.rs":"282dce506a6194b1b3aabbad2e97aae9c37b22280753bd85ecbcf23c3bf6be9d","src/format/scan.rs":"6964c4f9bb179bea908a79f916fb5b53492c0852c3ac29fd270f883678d95fa0","src/format/strftime.rs":"2acc27cfd4092dee2e0b79652db24eaf5671d15ecb2e1bab44c697cfe3d9bb7c","src/lib.rs":"59e45ddeed3df7dbecdc80053f7d91d06a81c157f5bf2f5b72ba18d13477511b","src/naive/date.rs":"379837c06e101d223f707dab15967e89214eb0c04dc04534b0de961d90a089da","src/naive/datetime.rs":"483d7e4af7b6b0bd4c8d6980c86ab01260792882821b40d76d610f501051e7ba","src/naive/internals.rs":"d8b1e53bb9f1a8abde2e759b2d172332aa2d7f3697d373c8dfcadde33d02f443","src/naive/isoweek.rs":"0fa12fc77cb44f0747c014c65bfc2820526f508d7e3d1ad4d041af8826f5cbda","src/naive/time.rs":"72c89226d09845aa73d481aca2de484ee3225069f61196e8509df584eabb527d","src/offset/fixed.rs":"4f248ff75733112e96e5e6815cbbca6ae0072928a328899a5110eefb32c8126a","src/offset/local.rs":"9c3135aa30658cc6b731b4437738ebb1c1ac36de5a697ded59300abe5088e895","src/offset/mod.rs":"775575113cec481b97ab33ec4719a571f1c5c83b78e8269429e5a4fbb45cc610","src/offset/utc.rs":"069a58cf994e83b35c806d74ed80120e4254d7d026cca1205fb2850e7805116b","src/oldtime.rs":"a81af067568d0dbdb2ca29adad99d829e8c0b20b7b88b2c3053cf70aecd14416","src/round.rs":"f357a87008cb5d601eae71bc6b94d7d43d0bc39986288a7d324a3d6deefab5e6","tests/wasm.rs":"c25fd76cb495e04f58617e842e10e1b86f18eab358a71608825fe5c62c9ecf11"},"package":"31850b4a4d6bae316f7a09e691c944c28299298837edc0a03f755618c23cbc01"} \ No newline at end of file diff --git a/third_party/rust/chrono/CHANGELOG.md b/third_party/rust/chrono/CHANGELOG.md index d4f05ba733..43f9b85e72 100644 --- a/third_party/rust/chrono/CHANGELOG.md +++ b/third_party/rust/chrono/CHANGELOG.md @@ -6,7 +6,76 @@ This documents all notable changes to [Chrono](https://github.com/chronotope/chr Chrono obeys the principle of [Semantic Versioning](http://semver.org/). There were/are numerous minor versions before 1.0 due to the language changes. -Versions with only mechnical changes will be omitted from the following list. +Versions with only mechanical changes will be omitted from the following list. + +## 0.4.10 + +### Improvements + +* `DateTime::parse_from_str` is more than 2x faster in some cases. (@michalsrb + #358) +* Significant improvements to no-std and alloc support (This should also make + many format/serialization operations induce zero unnecessary allocations) + (@CryZe #341) + +### Features + +* Functions that were accepting `Iterator` of `Item`s (for example + `format_with_items`) now accept `Iterator` of `Borrow`, so one can + use values or references. (@michalsrb #358) +* Add built-in support for structs with nested `Option` etc fields + (@manifest #302) + +### Internal/doc improvements + +* Use markdown footnotes on the `strftime` docs page (@qudlibetor #359) +* Migrate from `try!` -> `?` (question mark) because it is now emitting + deprecation warnings and has been stable since rustc 1.13.0 +* Deny dead code + +## 0.4.9 + +### Fixes + +* Make Datetime arithmatic adjust their offsets after discovering their new + timestamps (@quodlibetor #337) +* Put wasm-bindgen related code and dependencies behind a `wasmbind` feature + gate. (@quodlibetor #335) + +## 0.4.8 + +### Fixes + +* Add '0' to single-digit days in rfc2822 date format (@wyhaya #323) +* Correctly pad DelayedFormat (@SamokhinIlya #320) + +### Features + +* Support `wasm-unknown-unknown` via wasm-bindgen (in addition to + emscripten/`wasm-unknown-emscripten`). (finished by @evq in #331, initial + work by @jjpe #287) + +## 0.4.7 + +### Fixes + +* Disable libc default features so that CI continues to work on rust 1.13 +* Fix panic on negative inputs to timestamp_millis (@cmars #292) +* Make `LocalResult` `Copy/Eq/Hash` + +### Features + +* Add `std::convert::From` conversions between the different timezone formats + (@mqudsi #271) +* Add `timestamp_nanos` methods (@jean-airoldie #308) +* Documentation improvements + +## 0.4.6 + +### Maintenance + +* Doc improvements -- improve README CI verification, external links +* winapi upgrade to 0.3 ## 0.4.5 diff --git a/third_party/rust/chrono/Cargo.toml b/third_party/rust/chrono/Cargo.toml index 44448a75a3..5534ad2089 100644 --- a/third_party/rust/chrono/Cargo.toml +++ b/third_party/rust/chrono/Cargo.toml @@ -3,7 +3,7 @@ # When uploading crates to the registry Cargo will automatically # "normalize" Cargo.toml files for maximal compatibility # with all versions of Cargo and also rewrite `path` dependencies -# to registry (e.g. crates.io) dependencies +# to registry (e.g., crates.io) dependencies # # If you believe there's an error in this file please file an # issue against the rust-lang/cargo repository. If you're @@ -12,8 +12,9 @@ [package] name = "chrono" -version = "0.4.6" +version = "0.4.10" authors = ["Kang Seonghoon ", "Brandon W Maister "] +exclude = ["/ci/*", "/.travis.yml", "/appveyor.yml", "/Makefile"] description = "Date and time library for Rust" homepage = "https://github.com/chronotope/chrono" documentation = "https://docs.rs/chrono/" @@ -43,8 +44,9 @@ version = "0.3.20" optional = true [dependencies.serde] -version = "1" +version = "1.0.99" optional = true +default-features = false [dependencies.time] version = "0.1.39" @@ -52,19 +54,37 @@ optional = true [dev-dependencies.bincode] version = "0.8.0" +[dev-dependencies.doc-comment] +version = "0.3" + [dev-dependencies.num-iter] version = "0.1.35" default-features = false [dev-dependencies.serde_derive] version = "1" +default-features = false [dev-dependencies.serde_json] version = "1" +default-features = false [features] -clock = ["time"] -default = ["clock"] +alloc = [] +bench = ["std"] +clock = ["time", "std"] +default = ["clock", "std"] +std = [] +wasmbind = ["wasm-bindgen", "js-sys"] +[target."cfg(all(target_arch = \"wasm32\", not(target_os = \"emscripten\")))".dependencies.js-sys] +version = "0.3" +optional = true + +[target."cfg(all(target_arch = \"wasm32\", not(target_os = \"emscripten\")))".dependencies.wasm-bindgen] +version = "0.2" +optional = true +[target."cfg(all(target_arch = \"wasm32\", not(target_os = \"emscripten\")))".dev-dependencies.wasm-bindgen-test] +version = "0.2" [badges.appveyor] repository = "chronotope/chrono" diff --git a/third_party/rust/chrono/Makefile b/third_party/rust/chrono/Makefile deleted file mode 100644 index bb938ccea5..0000000000 --- a/third_party/rust/chrono/Makefile +++ /dev/null @@ -1,30 +0,0 @@ -# this Makefile is mostly for the packaging convenience. -# casual users should use `cargo` to retrieve the appropriate version of Chrono. - -.PHONY: all -all: - @echo 'Try `cargo build` instead.' - -.PHONY: authors -authors: - echo 'Chrono is mainly written by Kang Seonghoon ,' > AUTHORS.txt - echo 'and also the following people (in ascending order):' >> AUTHORS.txt - echo >> AUTHORS.txt - git log --format='%aN <%aE>' | grep -v 'Kang Seonghoon' | sort -u >> AUTHORS.txt - -.PHONY: readme README.md -readme: README.md - -README.md: src/lib.rs - ( ./ci/fix-readme.sh $< ) > $@ - -.PHONY: test -test: - TZ=UTC0 cargo test --features 'serde rustc-serialize bincode' --lib - TZ=ACST-9:30 cargo test --features 'serde rustc-serialize bincode' --lib - TZ=EST4 cargo test --features 'serde rustc-serialize bincode' - -.PHONY: doc -doc: authors readme - cargo doc --features 'serde rustc-serialize bincode' - diff --git a/third_party/rust/chrono/README.md b/third_party/rust/chrono/README.md index 8a37c7b775..6401210f1d 100644 --- a/third_party/rust/chrono/README.md +++ b/third_party/rust/chrono/README.md @@ -82,7 +82,7 @@ nanoseconds and does not represent "nominal" components such as days or months. Chrono does not yet natively support -the standard [`Duration`](https://docs.rs/time/0.1.40/time/struct.Duration.html) type, +the standard [`Duration`](https://doc.rust-lang.org/std/time/struct.Duration.html) type, but it will be supported in the future. Meanwhile you can convert between two types with [`Duration::from_std`](https://docs.rs/time/0.1.40/time/struct.Duration.html#method.from_std) @@ -93,7 +93,7 @@ methods. ### Date and Time Chrono provides a -[**`DateTime`**](https://docs.rs/chrono/0.4.6/chrono/struct.DateTime.html) +[**`DateTime`**](https://docs.rs/chrono/0.4/chrono/struct.DateTime.html) type to represent a date and a time in a timezone. For more abstract moment-in-time tracking such as internal timekeeping @@ -104,15 +104,15 @@ which tracks your system clock, or is an opaque but monotonically-increasing representation of a moment in time. `DateTime` is timezone-aware and must be constructed from -the [**`TimeZone`**](https://docs.rs/chrono/0.4.6/chrono/offset/trait.TimeZone.html) object, +the [**`TimeZone`**](https://docs.rs/chrono/0.4/chrono/offset/trait.TimeZone.html) object, which defines how the local date is converted to and back from the UTC date. There are three well-known `TimeZone` implementations: -* [**`Utc`**](https://docs.rs/chrono/0.4.6/chrono/offset/struct.Utc.html) specifies the UTC time zone. It is most efficient. +* [**`Utc`**](https://docs.rs/chrono/0.4/chrono/offset/struct.Utc.html) specifies the UTC time zone. It is most efficient. -* [**`Local`**](https://docs.rs/chrono/0.4.6/chrono/offset/struct.Local.html) specifies the system local time zone. +* [**`Local`**](https://docs.rs/chrono/0.4/chrono/offset/struct.Local.html) specifies the system local time zone. -* [**`FixedOffset`**](https://docs.rs/chrono/0.4.6/chrono/offset/struct.FixedOffset.html) specifies +* [**`FixedOffset`**](https://docs.rs/chrono/0.4/chrono/offset/struct.FixedOffset.html) specifies an arbitrary, fixed time zone such as UTC+09:00 or UTC-10:30. This often results from the parsed textual date and time. Since it stores the most information and does not depend on the system environment, @@ -120,12 +120,12 @@ There are three well-known `TimeZone` implementations: `DateTime`s with different `TimeZone` types are distinct and do not mix, but can be converted to each other using -the [`DateTime::with_timezone`](https://docs.rs/chrono/0.4.6/chrono/struct.DateTime.html#method.with_timezone) method. +the [`DateTime::with_timezone`](https://docs.rs/chrono/0.4/chrono/struct.DateTime.html#method.with_timezone) method. You can get the current date and time in the UTC time zone -([`Utc::now()`](https://docs.rs/chrono/0.4.6/chrono/offset/struct.Utc.html#method.now)) +([`Utc::now()`](https://docs.rs/chrono/0.4/chrono/offset/struct.Utc.html#method.now)) or in the local time zone -([`Local::now()`](https://docs.rs/chrono/0.4.6/chrono/offset/struct.Local.html#method.now)). +([`Local::now()`](https://docs.rs/chrono/0.4/chrono/offset/struct.Local.html#method.now)). ```rust use chrono::prelude::*; @@ -166,24 +166,26 @@ assert_eq!(dt, fixed_dt); ``` Various properties are available to the date and time, and can be altered individually. -Most of them are defined in the traits [`Datelike`](https://docs.rs/chrono/0.4.6/chrono/trait.Datelike.html) and -[`Timelike`](https://docs.rs/chrono/0.4.6/chrono/trait.Timelike.html) which you should `use` before. +Most of them are defined in the traits [`Datelike`](https://docs.rs/chrono/0.4/chrono/trait.Datelike.html) and +[`Timelike`](https://docs.rs/chrono/0.4/chrono/trait.Timelike.html) which you should `use` before. Addition and subtraction is also supported. The following illustrates most supported operations to the date and time: ```rust +extern crate time; + use chrono::prelude::*; use time::Duration; // assume this returned `2014-11-28T21:45:59.324310806+09:00`: -let dt = Local::now(); +let dt = FixedOffset::east(9*3600).ymd(2014, 11, 28).and_hms_nano(21, 45, 59, 324310806); // property accessors assert_eq!((dt.year(), dt.month(), dt.day()), (2014, 11, 28)); assert_eq!((dt.month0(), dt.day0()), (10, 27)); // for unfortunate souls assert_eq!((dt.hour(), dt.minute(), dt.second()), (21, 45, 59)); assert_eq!(dt.weekday(), Weekday::Fri); -assert_eq!(dt.weekday().number_from_monday(), 5); // Mon=1, ..., Sat=7 +assert_eq!(dt.weekday().number_from_monday(), 5); // Mon=1, ..., Sun=7 assert_eq!(dt.ordinal(), 332); // the day of year assert_eq!(dt.num_days_from_ce(), 735565); // the number of days from and including Jan 1, 1 @@ -210,15 +212,15 @@ assert_eq!(Utc.ymd(1970, 1, 1).and_hms(0, 0, 0) - Duration::seconds(1_000_000_00 ### Formatting and Parsing -Formatting is done via the [`format`](https://docs.rs/chrono/0.4.6/chrono/struct.DateTime.html#method.format) method, +Formatting is done via the [`format`](https://docs.rs/chrono/0.4/chrono/struct.DateTime.html#method.format) method, which format is equivalent to the familiar `strftime` format. -See [`format::strftime`](https://docs.rs/chrono/0.4.6/chrono/format/strftime/index.html#specifiers) +See [`format::strftime`](https://docs.rs/chrono/0.4/chrono/format/strftime/index.html#specifiers) documentation for full syntax and list of specifiers. The default `to_string` method and `{:?}` specifier also give a reasonable representation. -Chrono also provides [`to_rfc2822`](https://docs.rs/chrono/0.4.6/chrono/struct.DateTime.html#method.to_rfc2822) and -[`to_rfc3339`](https://docs.rs/chrono/0.4.6/chrono/struct.DateTime.html#method.to_rfc3339) methods +Chrono also provides [`to_rfc2822`](https://docs.rs/chrono/0.4/chrono/struct.DateTime.html#method.to_rfc2822) and +[`to_rfc3339`](https://docs.rs/chrono/0.4/chrono/struct.DateTime.html#method.to_rfc3339) methods for well-known formats. ```rust @@ -248,23 +250,23 @@ Parsing can be done with three methods: ([`std::fmt::Debug`](https://doc.rust-lang.org/std/fmt/trait.Debug.html)) format specifier prints, and requires the offset to be present. -2. [`DateTime::parse_from_str`](https://docs.rs/chrono/0.4.6/chrono/struct.DateTime.html#method.parse_from_str) parses +2. [`DateTime::parse_from_str`](https://docs.rs/chrono/0.4/chrono/struct.DateTime.html#method.parse_from_str) parses a date and time with offsets and returns `DateTime`. This should be used when the offset is a part of input and the caller cannot guess that. It *cannot* be used when the offset can be missing. - [`DateTime::parse_from_rfc2822`](https://docs.rs/chrono/0.4.6/chrono/struct.DateTime.html#method.parse_from_rfc2822) + [`DateTime::parse_from_rfc2822`](https://docs.rs/chrono/0.4/chrono/struct.DateTime.html#method.parse_from_rfc2822) and - [`DateTime::parse_from_rfc3339`](https://docs.rs/chrono/0.4.6/chrono/struct.DateTime.html#method.parse_from_rfc3339) + [`DateTime::parse_from_rfc3339`](https://docs.rs/chrono/0.4/chrono/struct.DateTime.html#method.parse_from_rfc3339) are similar but for well-known formats. -3. [`Offset::datetime_from_str`](https://docs.rs/chrono/0.4.6/chrono/offset/trait.TimeZone.html#method.datetime_from_str) is +3. [`Offset::datetime_from_str`](https://docs.rs/chrono/0.4/chrono/offset/trait.TimeZone.html#method.datetime_from_str) is similar but returns `DateTime` of given offset. When the explicit offset is missing from the input, it simply uses given offset. It issues an error when the input contains an explicit offset different from the current offset. More detailed control over the parsing process is available via -[`format`](https://docs.rs/chrono/0.4.6/chrono/format/index.html) module. +[`format`](https://docs.rs/chrono/0.4/chrono/format/index.html) module. ```rust use chrono::prelude::*; @@ -296,23 +298,23 @@ assert!(Utc.datetime_from_str("Fri Nov 28 12:00:09", "%a %b %e %T").is_err()); assert!(Utc.datetime_from_str("Sat Nov 28 12:00:09 2014", "%a %b %e %T %Y").is_err()); ``` -Again : See [`format::strftime`](https://docs.rs/chrono/0.4.6/chrono/format/strftime/index.html#specifiers) +Again : See [`format::strftime`](https://docs.rs/chrono/0.4/chrono/format/strftime/index.html#specifiers) documentation for full syntax and list of specifiers. ### Conversion from and to EPOCH timestamps -Use [`Utc.timestamp(seconds, nanoseconds)`](https://docs.rs/chrono/0.4.6/chrono/offset/trait.TimeZone.html#method.timestamp) -to construct a [`DateTime`](https://docs.rs/chrono/0.4.6/chrono/struct.DateTime.html) from a UNIX timestamp +Use [`Utc.timestamp(seconds, nanoseconds)`](https://docs.rs/chrono/0.4/chrono/offset/trait.TimeZone.html#method.timestamp) +to construct a [`DateTime`](https://docs.rs/chrono/0.4/chrono/struct.DateTime.html) from a UNIX timestamp (seconds, nanoseconds that passed since January 1st 1970). -Use [`DateTime.timestamp`](https://docs.rs/chrono/0.4.6/chrono/struct.DateTime.html#method.timestamp) to get the timestamp (in seconds) -from a [`DateTime`](https://docs.rs/chrono/0.4.6/chrono/struct.DateTime.html). Additionally, you can use -[`DateTime.timestamp_subsec_nanos`](https://docs.rs/chrono/0.4.6/chrono/struct.DateTime.html#method.timestamp_subsec_nanos) +Use [`DateTime.timestamp`](https://docs.rs/chrono/0.4/chrono/struct.DateTime.html#method.timestamp) to get the timestamp (in seconds) +from a [`DateTime`](https://docs.rs/chrono/0.4/chrono/struct.DateTime.html). Additionally, you can use +[`DateTime.timestamp_subsec_nanos`](https://docs.rs/chrono/0.4/chrono/struct.DateTime.html#method.timestamp_subsec_nanos) to get the number of additional number of nanoseconds. ```rust // We need the trait in scope to use Utc::timestamp(). -use chrono::TimeZone; +use chrono::{DateTime, TimeZone, Utc}; // Construct a datetime from epoch: let dt = Utc.timestamp(1_500_000_000, 0); @@ -325,7 +327,7 @@ assert_eq!(dt.timestamp(), 1_500_000_000); ### Individual date -Chrono also provides an individual date type ([**`Date`**](https://docs.rs/chrono/0.4.6/chrono/struct.Date.html)). +Chrono also provides an individual date type ([**`Date`**](https://docs.rs/chrono/0.4/chrono/struct.Date.html)). It also has time zones attached, and have to be constructed via time zones. Most operations available to `DateTime` are also available to `Date` whenever appropriate. @@ -344,26 +346,26 @@ assert_eq!(Utc.ymd(2014, 11, 28).and_hms_milli(7, 8, 9, 10).format("%H%M%S").to_ There is no timezone-aware `Time` due to the lack of usefulness and also the complexity. -`DateTime` has [`date`](https://docs.rs/chrono/0.4.6/chrono/struct.DateTime.html#method.date) method +`DateTime` has [`date`](https://docs.rs/chrono/0.4/chrono/struct.DateTime.html#method.date) method which returns a `Date` which represents its date component. -There is also a [`time`](https://docs.rs/chrono/0.4.6/chrono/struct.DateTime.html#method.time) method, +There is also a [`time`](https://docs.rs/chrono/0.4/chrono/struct.DateTime.html#method.time) method, which simply returns a naive local time described below. ### Naive date and time Chrono provides naive counterparts to `Date`, (non-existent) `Time` and `DateTime` -as [**`NaiveDate`**](https://docs.rs/chrono/0.4.6/chrono/naive/struct.NaiveDate.html), -[**`NaiveTime`**](https://docs.rs/chrono/0.4.6/chrono/naive/struct.NaiveTime.html) and -[**`NaiveDateTime`**](https://docs.rs/chrono/0.4.6/chrono/naive/struct.NaiveDateTime.html) respectively. +as [**`NaiveDate`**](https://docs.rs/chrono/0.4/chrono/naive/struct.NaiveDate.html), +[**`NaiveTime`**](https://docs.rs/chrono/0.4/chrono/naive/struct.NaiveTime.html) and +[**`NaiveDateTime`**](https://docs.rs/chrono/0.4/chrono/naive/struct.NaiveDateTime.html) respectively. They have almost equivalent interfaces as their timezone-aware twins, but are not associated to time zones obviously and can be quite low-level. They are mostly useful for building blocks for higher-level types. Timezone-aware `DateTime` and `Date` types have two methods returning naive versions: -[`naive_local`](https://docs.rs/chrono/0.4.6/chrono/struct.DateTime.html#method.naive_local) returns +[`naive_local`](https://docs.rs/chrono/0.4/chrono/struct.DateTime.html#method.naive_local) returns a view to the naive local time, -and [`naive_utc`](https://docs.rs/chrono/0.4.6/chrono/struct.DateTime.html#method.naive_utc) returns +and [`naive_utc`](https://docs.rs/chrono/0.4/chrono/struct.DateTime.html#method.naive_utc) returns a view to the naive UTC time. ## Limitations @@ -375,7 +377,7 @@ Date types are limited in about +/- 262,000 years from the common epoch. Time types are limited in the nanosecond accuracy. [Leap seconds are supported in the representation but -Chrono doesn't try to make use of them](https://docs.rs/chrono/0.4.6/chrono/naive/struct.NaiveTime.html#leap-second-handling). +Chrono doesn't try to make use of them](https://docs.rs/chrono/0.4/chrono/naive/struct.NaiveTime.html#leap-second-handling). (The main reason is that leap seconds are not really predictable.) Almost *every* operation over the possible leap seconds will ignore them. Consider using `NaiveDateTime` with the implicit TAI (International Atomic Time) scale diff --git a/third_party/rust/chrono/appveyor.yml b/third_party/rust/chrono/appveyor.yml deleted file mode 100644 index 42d85d9d82..0000000000 --- a/third_party/rust/chrono/appveyor.yml +++ /dev/null @@ -1,21 +0,0 @@ -environment: - matrix: - - TARGET: 1.13.0-x86_64-pc-windows-gnu - - TARGET: nightly-x86_64-pc-windows-msvc - - TARGET: nightly-i686-pc-windows-msvc - - TARGET: nightly-x86_64-pc-windows-gnu - - TARGET: nightly-i686-pc-windows-gnu -matrix: - allow_failures: - - channel: nightly -install: - - ps: Start-FileDownload "https://static.rust-lang.org/dist/rust-${env:TARGET}.exe" -FileName "rust-install.exe" - - ps: .\rust-install.exe /VERYSILENT /NORESTART /DIR="C:\rust" | Out-Null - - ps: $env:PATH="$env:PATH;C:\rust\bin" - - rustc -vV - - cargo -vV - -build: false - -test_script: - - sh -c 'PATH=`rustc --print sysroot`/bin:$PATH ./ci/travis.sh' diff --git a/third_party/rust/chrono/ci/fix-readme.sh b/third_party/rust/chrono/ci/fix-readme.sh deleted file mode 100644 index 87d838f640..0000000000 --- a/third_party/rust/chrono/ci/fix-readme.sh +++ /dev/null @@ -1,35 +0,0 @@ -#!/bin/bash - -VERSION="$( cargo read-manifest | python -c 'import json, sys; print(json.load(sys.stdin)["version"])')" -LIB="$1" - -# Make the Chrono in the header a link to the docs -awk '/^\/\/! # Chrono: / { print "[Chrono][docsrs]:", substr($0, index($0, $4))}' "$LIB" -awk '/^\/\/! # Chrono: / { print "[Chrono][docsrs]:", substr($0, index($0, $4))}' "$LIB" | sed 's/./=/g' -# Add all the badges -echo ' -[![Chrono on Travis CI][travis-image]][travis] -[![Chrono on Appveyor][appveyor-image]][appveyor] -[![Chrono on crates.io][cratesio-image]][cratesio] -[![Chrono on docs.rs][docsrs-image]][docsrs] -[![Join the chat at https://gitter.im/chrono-rs/chrono][gitter-image]][gitter] - -[travis-image]: https://travis-ci.org/chronotope/chrono.svg?branch=master -[travis]: https://travis-ci.org/chronotope/chrono -[appveyor-image]: https://ci.appveyor.com/api/projects/status/2ia91ofww4w31m2w/branch/master?svg=true -[appveyor]: https://ci.appveyor.com/project/chronotope/chrono -[cratesio-image]: https://img.shields.io/crates/v/chrono.svg -[cratesio]: https://crates.io/crates/chrono -[docsrs-image]: https://docs.rs/chrono/badge.svg -[docsrs]: https://docs.rs/chrono -[gitter-image]: https://badges.gitter.im/chrono-rs/chrono.svg -[gitter]: https://gitter.im/chrono-rs/chrono' - -# print the section between the header and the usage -awk '/^\/\/! # Chrono:/,/^\/\/! ## /' "$LIB" | cut -b 5- | grep -v '^#' | \ - sed 's/](\.\//](https:\/\/docs.rs\/chrono\/'$VERSION'\/chrono\//g' -echo -# Replace relative doc links with links to this exact version of docs on -# docs.rs -awk '/^\/\/! ## /,!/^\/\/!/' "$LIB" | cut -b 5- | grep -v '^# ' | \ - sed 's/](\.\//](https:\/\/docs.rs\/chrono\/'$VERSION'\/chrono\//g' \ diff --git a/third_party/rust/chrono/ci/travis.sh b/third_party/rust/chrono/ci/travis.sh deleted file mode 100644 index 4a974b1398..0000000000 --- a/third_party/rust/chrono/ci/travis.sh +++ /dev/null @@ -1,100 +0,0 @@ -#!/bin/bash - -# This is the script that's executed by travis, you can run it yourself to run -# the exact same suite - -set -e - -DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" - -channel() { - if [ -n "${TRAVIS}" ]; then - if [ "${TRAVIS_RUST_VERSION}" = "${CHANNEL}" ]; then - pwd - (set -x; cargo "$@") - fi - elif [ -n "${APPVEYOR}" ]; then - if [ "${APPVEYOR_RUST_CHANNEL}" = "${CHANNEL}" ]; then - pwd - (set -x; cargo "$@") - fi - else - pwd - (set -x; cargo "+${CHANNEL}" "$@") - fi -} - -build_and_test() { - # interleave building and testing in hope that it saves time - # also vary the local time zone to (hopefully) catch tz-dependent bugs - # also avoid doc-testing multiple times---it takes a lot and rarely helps - cargo clean - channel build -v - TZ=ACST-9:30 channel test -v --lib - channel build -v --features rustc-serialize - TZ=EST4 channel test -v --features rustc-serialize --lib - channel build -v --features serde - TZ=UTC0 channel test -v --features serde --lib - channel build -v --features serde,rustc-serialize - TZ=Asia/Katmandu channel test -v --features serde,rustc-serialize - - # without default "clock" feature - channel build -v --no-default-features - TZ=ACST-9:30 channel test -v --no-default-features --lib - channel build -v --no-default-features --features rustc-serialize - TZ=EST4 channel test -v --no-default-features --features rustc-serialize --lib - channel build -v --no-default-features --features serde - TZ=UTC0 channel test -v --no-default-features --features serde --lib - channel build -v --no-default-features --features serde,rustc-serialize - TZ=Asia/Katmandu channel test -v --no-default-features --features serde,rustc-serialize --lib - - if [[ "$CHANNEL" == stable ]]; then - if [[ -n "$TRAVIS" ]] ; then - check_readme - fi - fi -} - -build_only() { - # Rust 1.13 doesn't support custom derive, so, to avoid doctests which - # validate that, we just build there. - cargo clean - channel build -v - channel build -v --features rustc-serialize - channel build -v --features 'serde bincode' - channel build -v --no-default-features -} - -run_clippy() { - # cached installation will not work on a later nightly - if [ -n "${TRAVIS}" ] && ! cargo install clippy --debug --force; then - echo "COULD NOT COMPILE CLIPPY, IGNORING CLIPPY TESTS" - exit - fi - - cargo clippy --features 'serde bincode rustc-serialize' -- -Dclippy -} - -check_readme() { - make readme - (set -x; git diff --exit-code -- README.md) ; echo $? -} - -rustc --version -cargo --version - -CHANNEL=nightly -if [ "x${CLIPPY}" = xy ] ; then - run_clippy -else - build_and_test -fi - -CHANNEL=beta -build_and_test - -CHANNEL=stable -build_and_test - -CHANNEL=1.13.0 -build_only diff --git a/third_party/rust/chrono/src/date.rs b/third_party/rust/chrono/src/date.rs index f8b59cea77..7026c41345 100644 --- a/third_party/rust/chrono/src/date.rs +++ b/third_party/rust/chrono/src/date.rs @@ -3,16 +3,18 @@ //! ISO 8601 calendar date with time zone. -use std::{fmt, hash}; -use std::cmp::Ordering; -use std::ops::{Add, Sub}; +use core::borrow::Borrow; +use core::{fmt, hash}; +use core::cmp::Ordering; +use core::ops::{Add, Sub}; use oldtime::Duration as OldDuration; use {Weekday, Datelike}; use offset::{TimeZone, Utc}; use naive::{self, NaiveDate, NaiveTime, IsoWeek}; use DateTime; -use format::{Item, DelayedFormat, StrftimeItems}; +#[cfg(any(feature = "alloc", feature = "std", test))] +use format::{DelayedFormat, Item, StrftimeItems}; /// ISO 8601 calendar date with time zone. /// @@ -255,15 +257,17 @@ fn map_local(d: &Date, mut f: F) -> Option> impl Date where Tz::Offset: fmt::Display { /// Formats the date with the specified formatting items. + #[cfg(any(feature = "alloc", feature = "std", test))] #[inline] - pub fn format_with_items<'a, I>(&self, items: I) -> DelayedFormat - where I: Iterator> + Clone { + pub fn format_with_items<'a, I, B>(&self, items: I) -> DelayedFormat + where I: Iterator + Clone, B: Borrow> { DelayedFormat::new_with_offset(Some(self.naive_local()), None, &self.offset, items) } /// Formats the date with the specified format string. /// See the [`format::strftime` module](./format/strftime/index.html) /// on the supported escape sequences. + #[cfg(any(feature = "alloc", feature = "std", test))] #[inline] pub fn format<'a>(&self, fmt: &'a str) -> DelayedFormat> { self.format_with_items(StrftimeItems::new(fmt)) diff --git a/third_party/rust/chrono/src/datetime.rs b/third_party/rust/chrono/src/datetime.rs index 2e553062db..fcfdcd4206 100644 --- a/third_party/rust/chrono/src/datetime.rs +++ b/third_party/rust/chrono/src/datetime.rs @@ -3,12 +3,18 @@ //! ISO 8601 date and time with time zone. -use std::{str, fmt, hash}; -use std::cmp::Ordering; -use std::ops::{Add, Sub}; +use core::{str, fmt, hash}; +use core::cmp::Ordering; +use core::ops::{Add, Sub}; +#[cfg(any(feature = "std", test))] use std::time::{SystemTime, UNIX_EPOCH}; use oldtime::Duration as OldDuration; +#[cfg(all(not(feature = "std"), feature = "alloc"))] +use alloc::string::{String, ToString}; +#[cfg(feature = "std")] +use std::string::ToString; + use {Weekday, Timelike, Datelike}; #[cfg(feature="clock")] use offset::Local; @@ -16,7 +22,10 @@ use offset::{TimeZone, Offset, Utc, FixedOffset}; use naive::{NaiveTime, NaiveDateTime, IsoWeek}; use Date; use format::{Item, Numeric, Pad, Fixed}; -use format::{parse, Parsed, ParseError, ParseResult, DelayedFormat, StrftimeItems}; +use format::{parse, Parsed, ParseError, ParseResult, StrftimeItems}; +#[cfg(any(feature = "alloc", feature = "std", test))] +use format::DelayedFormat; +use core::borrow::Borrow; /// Specific formatting options for seconds. This may be extended in the /// future, so exhaustive matching in external code is not recommended. @@ -202,7 +211,8 @@ impl DateTime { #[inline] pub fn checked_add_signed(self, rhs: OldDuration) -> Option> { let datetime = try_opt!(self.datetime.checked_add_signed(rhs)); - Some(DateTime { datetime: datetime, offset: self.offset }) + let tz = self.timezone(); + Some(tz.from_utc_datetime(&datetime)) } /// Subtracts given `Duration` from the current date and time. @@ -211,7 +221,8 @@ impl DateTime { #[inline] pub fn checked_sub_signed(self, rhs: OldDuration) -> Option> { let datetime = try_opt!(self.datetime.checked_sub_signed(rhs)); - Some(DateTime { datetime: datetime, offset: self.offset }) + let tz = self.timezone(); + Some(tz.from_utc_datetime(&datetime)) } /// Subtracts another `DateTime` from the current date and time. @@ -235,6 +246,75 @@ impl DateTime { } } +/// Convert a `DateTime` instance into a `DateTime` instance. +impl From> for DateTime { + /// Convert this `DateTime` instance into a `DateTime` instance. + /// + /// Conversion is done via [`DateTime::with_timezone`]. Note that the converted value returned by + /// this will be created with a fixed timezone offset of 0. + fn from(src: DateTime) -> Self { + src.with_timezone(&FixedOffset::east(0)) + } +} + +/// Convert a `DateTime` instance into a `DateTime` instance. +#[cfg(feature="clock")] +impl From> for DateTime { + /// Convert this `DateTime` instance into a `DateTime` instance. + /// + /// Conversion is performed via [`DateTime::with_timezone`], accounting for the difference in timezones. + fn from(src: DateTime) -> Self { + src.with_timezone(&Local) + } +} + +/// Convert a `DateTime` instance into a `DateTime` instance. +impl From> for DateTime { + /// Convert this `DateTime` instance into a `DateTime` instance. + /// + /// Conversion is performed via [`DateTime::with_timezone`], accounting for the timezone + /// difference. + fn from(src: DateTime) -> Self { + src.with_timezone(&Utc) + } +} + +/// Convert a `DateTime` instance into a `DateTime` instance. +#[cfg(feature="clock")] +impl From> for DateTime { + /// Convert this `DateTime` instance into a `DateTime` instance. + /// + /// Conversion is performed via [`DateTime::with_timezone`]. Returns the equivalent value in local + /// time. + fn from(src: DateTime) -> Self { + src.with_timezone(&Local) + } +} + +/// Convert a `DateTime` instance into a `DateTime` instance. +#[cfg(feature="clock")] +impl From> for DateTime { + /// Convert this `DateTime` instance into a `DateTime` instance. + /// + /// Conversion is performed via [`DateTime::with_timezone`], accounting for the difference in + /// timezones. + fn from(src: DateTime) -> Self { + src.with_timezone(&Utc) + } +} + +/// Convert a `DateTime` instance into a `DateTime` instance. +#[cfg(feature="clock")] +impl From> for DateTime { + /// Convert this `DateTime` instance into a `DateTime` instance. + /// + /// Conversion is performed via [`DateTime::with_timezone`]. Note that the converted value returned + /// by this will be created with a fixed timezone offset of 0. + fn from(src: DateTime) -> Self { + src.with_timezone(&FixedOffset::east(0)) + } +} + /// Maps the local datetime to other datetime with given conversion function. fn map_local(dt: &DateTime, mut f: F) -> Option> where F: FnMut(NaiveDateTime) -> Option { @@ -247,7 +327,7 @@ impl DateTime { pub fn parse_from_rfc2822(s: &str) -> ParseResult> { const ITEMS: &'static [Item<'static>] = &[Item::Fixed(Fixed::RFC2822)]; let mut parsed = Parsed::new(); - try!(parse(&mut parsed, s, ITEMS.iter().cloned())); + parse(&mut parsed, s, ITEMS.iter())?; parsed.to_datetime() } @@ -259,7 +339,7 @@ impl DateTime { pub fn parse_from_rfc3339(s: &str) -> ParseResult> { const ITEMS: &'static [Item<'static>] = &[Item::Fixed(Fixed::RFC3339)]; let mut parsed = Parsed::new(); - try!(parse(&mut parsed, s, ITEMS.iter().cloned())); + parse(&mut parsed, s, ITEMS.iter())?; parsed.to_datetime() } @@ -285,22 +365,24 @@ impl DateTime { /// ``` pub fn parse_from_str(s: &str, fmt: &str) -> ParseResult> { let mut parsed = Parsed::new(); - try!(parse(&mut parsed, s, StrftimeItems::new(fmt))); + parse(&mut parsed, s, StrftimeItems::new(fmt))?; parsed.to_datetime() } } impl DateTime where Tz::Offset: fmt::Display { /// Returns an RFC 2822 date and time string such as `Tue, 1 Jul 2003 10:52:37 +0200`. + #[cfg(any(feature = "alloc", feature = "std", test))] pub fn to_rfc2822(&self) -> String { const ITEMS: &'static [Item<'static>] = &[Item::Fixed(Fixed::RFC2822)]; - self.format_with_items(ITEMS.iter().cloned()).to_string() + self.format_with_items(ITEMS.iter()).to_string() } /// Returns an RFC 3339 and ISO 8601 date and time string such as `1996-12-19T16:39:57-08:00`. + #[cfg(any(feature = "alloc", feature = "std", test))] pub fn to_rfc3339(&self) -> String { const ITEMS: &'static [Item<'static>] = &[Item::Fixed(Fixed::RFC3339)]; - self.format_with_items(ITEMS.iter().cloned()).to_string() + self.format_with_items(ITEMS.iter()).to_string() } /// Return an RFC 3339 and ISO 8601 date and time string with subseconds @@ -327,6 +409,7 @@ impl DateTime where Tz::Offset: fmt::Display { /// assert_eq!(dt.to_rfc3339_opts(SecondsFormat::Secs, true), /// "2018-01-26T10:30:09+08:00"); /// ``` + #[cfg(any(feature = "alloc", feature = "std", test))] pub fn to_rfc3339_opts(&self, secform: SecondsFormat, use_z: bool) -> String { use format::Numeric::*; use format::Pad::Zero; @@ -368,19 +451,20 @@ impl DateTime where Tz::Offset: fmt::Display { match ssitem { None => self.format_with_items( - PREFIX.iter().chain([tzitem].iter()).cloned() + PREFIX.iter().chain([tzitem].iter()) ).to_string(), Some(s) => self.format_with_items( - PREFIX.iter().chain([s, tzitem].iter()).cloned() + PREFIX.iter().chain([s, tzitem].iter()) ).to_string(), } } /// Formats the combined date and time with the specified formatting items. + #[cfg(any(feature = "alloc", feature = "std", test))] #[inline] - pub fn format_with_items<'a, I>(&self, items: I) -> DelayedFormat - where I: Iterator> + Clone { + pub fn format_with_items<'a, I, B>(&self, items: I) -> DelayedFormat + where I: Iterator + Clone, B: Borrow> { let local = self.naive_local(); DelayedFormat::new_with_offset(Some(local.date()), Some(local.time()), &self.offset, items) } @@ -388,6 +472,7 @@ impl DateTime where Tz::Offset: fmt::Display { /// Formats the combined date and time with the specified format string. /// See the [`format::strftime` module](./format/strftime/index.html) /// on the supported escape sequences. + #[cfg(any(feature = "alloc", feature = "std", test))] #[inline] pub fn format<'a>(&self, fmt: &'a str) -> DelayedFormat> { self.format_with_items(StrftimeItems::new(fmt)) @@ -537,24 +622,24 @@ impl str::FromStr for DateTime { fn from_str(s: &str) -> ParseResult> { const ITEMS: &'static [Item<'static>] = &[ - Item::Space(""), Item::Numeric(Numeric::Year, Pad::Zero), + Item::Numeric(Numeric::Year, Pad::Zero), Item::Space(""), Item::Literal("-"), - Item::Space(""), Item::Numeric(Numeric::Month, Pad::Zero), + Item::Numeric(Numeric::Month, Pad::Zero), Item::Space(""), Item::Literal("-"), - Item::Space(""), Item::Numeric(Numeric::Day, Pad::Zero), + Item::Numeric(Numeric::Day, Pad::Zero), Item::Space(""), Item::Literal("T"), // XXX shouldn't this be case-insensitive? - Item::Space(""), Item::Numeric(Numeric::Hour, Pad::Zero), + Item::Numeric(Numeric::Hour, Pad::Zero), Item::Space(""), Item::Literal(":"), - Item::Space(""), Item::Numeric(Numeric::Minute, Pad::Zero), + Item::Numeric(Numeric::Minute, Pad::Zero), Item::Space(""), Item::Literal(":"), - Item::Space(""), Item::Numeric(Numeric::Second, Pad::Zero), + Item::Numeric(Numeric::Second, Pad::Zero), Item::Fixed(Fixed::Nanosecond), Item::Space(""), Item::Fixed(Fixed::TimezoneOffsetZ), Item::Space(""), ]; let mut parsed = Parsed::new(); - try!(parse(&mut parsed, s, ITEMS.iter().cloned())); + parse(&mut parsed, s, ITEMS.iter())?; parsed.to_datetime() } } @@ -576,6 +661,7 @@ impl str::FromStr for DateTime { } } +#[cfg(any(feature = "std", test))] impl From for DateTime { fn from(t: SystemTime) -> DateTime { let (sec, nsec) = match t.duration_since(UNIX_EPOCH) { @@ -601,6 +687,7 @@ impl From for DateTime { } } +#[cfg(any(feature = "std", test))] impl From> for SystemTime { fn from(dt: DateTime) -> SystemTime { use std::time::Duration; @@ -616,11 +703,19 @@ impl From> for SystemTime { } } +#[test] +fn test_auto_conversion() { + let utc_dt = Utc.ymd(2018, 9, 5).and_hms(23, 58, 0); + let cdt_dt = FixedOffset::west(5 * 60 * 60).ymd(2018, 9, 5).and_hms(18, 58, 0); + let utc_dt2: DateTime = cdt_dt.into(); + assert_eq!(utc_dt, utc_dt2); +} + #[cfg(all(test, any(feature = "rustc-serialize", feature = "serde")))] fn test_encodable_json(to_string_utc: FUtc, to_string_fixed: FFixed) where FUtc: Fn(&DateTime) -> Result, FFixed: Fn(&DateTime) -> Result, - E: ::std::fmt::Debug + E: ::core::fmt::Debug { assert_eq!(to_string_utc(&Utc.ymd(2014, 7, 24).and_hms(12, 34, 6)).ok(), Some(r#""2014-07-24T12:34:06Z""#.into())); @@ -638,7 +733,7 @@ fn test_decodable_json(utc_from_str: FUtc, where FUtc: Fn(&str) -> Result, E>, FFixed: Fn(&str) -> Result, E>, FLocal: Fn(&str) -> Result, E>, - E: ::std::fmt::Debug + E: ::core::fmt::Debug { // should check against the offset as well (the normal DateTime comparison will ignore them) fn norm(dt: &Option>) -> Option<(&DateTime, &Tz::Offset)> { @@ -675,7 +770,7 @@ fn test_decodable_json_timestamps(utc_from_str: FUtc, where FUtc: Fn(&str) -> Result, E>, FFixed: Fn(&str) -> Result, E>, FLocal: Fn(&str) -> Result, E>, - E: ::std::fmt::Debug + E: ::core::fmt::Debug { fn norm(dt: &Option>) -> Option<(&DateTime, &Tz::Offset)> { dt.as_ref().map(|dt| (dt, dt.offset())) @@ -699,8 +794,8 @@ fn test_decodable_json_timestamps(utc_from_str: FUtc, #[cfg(feature = "rustc-serialize")] pub mod rustc_serialize { - use std::fmt; - use std::ops::Deref; + use core::fmt; + use core::ops::Deref; use super::DateTime; #[cfg(feature="clock")] use offset::Local; @@ -713,7 +808,7 @@ pub mod rustc_serialize { } } - // try!-like function to convert a LocalResult into a serde-ish Result + // lik? function to convert a LocalResult into a serde-ish Result fn from(me: LocalResult, d: &mut D) -> Result where D: Decoder, T: fmt::Display, @@ -828,25 +923,38 @@ pub mod rustc_serialize { /// documented at re-export site #[cfg(feature = "serde")] pub mod serde { - use std::fmt; + use core::fmt; use super::DateTime; #[cfg(feature="clock")] use offset::Local; use offset::{LocalResult, TimeZone, Utc, FixedOffset}; use serdelib::{ser, de}; + use {SerdeError, ne_timestamp}; + + #[doc(hidden)] + #[derive(Debug)] + pub struct SecondsTimestampVisitor; + + #[doc(hidden)] + #[derive(Debug)] + pub struct NanoSecondsTimestampVisitor; - // try!-like function to convert a LocalResult into a serde-ish Result + #[doc(hidden)] + #[derive(Debug)] + pub struct MilliSecondsTimestampVisitor; + + // lik? function to convert a LocalResult into a serde-ish Result fn serde_from(me: LocalResult, ts: &V) -> Result - where E: de::Error, - V: fmt::Display, - T: fmt::Display, + where + E: de::Error, + V: fmt::Display, + T: fmt::Display, { match me { LocalResult::None => Err(E::custom( - format!("value is not a legal timestamp: {}", ts))), + ne_timestamp(ts))), LocalResult::Ambiguous(min, max) => Err(E::custom( - format!("value is an ambiguous timestamp: {}, could be either of {}, {}", - ts, min, max))), + SerdeError::Ambiguous { timestamp: ts, min: min, max: max })), LocalResult::Single(val) => Ok(val) } } @@ -888,13 +996,13 @@ pub mod serde { /// # fn main() { example().unwrap(); } /// ``` pub mod ts_nanoseconds { - use std::fmt; + use core::fmt; use serdelib::{ser, de}; use {DateTime, Utc}; use offset::TimeZone; - use super::serde_from; + use super::{serde_from, NanoSecondsTimestampVisitor}; /// Serialize a UTC datetime into an integer number of nanoseconds since the epoch /// @@ -965,17 +1073,15 @@ pub mod serde { pub fn deserialize<'de, D>(d: D) -> Result, D::Error> where D: de::Deserializer<'de> { - Ok(try!(d.deserialize_i64(NanoSecondsTimestampVisitor))) + Ok(d.deserialize_i64(NanoSecondsTimestampVisitor)?) } - struct NanoSecondsTimestampVisitor; - impl<'de> de::Visitor<'de> for NanoSecondsTimestampVisitor { type Value = DateTime; fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - write!(formatter, "a unix timestamp in seconds") + write!(formatter, "a unix timestamp in nanoseconds") } /// Deserialize a timestamp in nanoseconds since the epoch @@ -998,6 +1104,152 @@ pub mod serde { } } + /// Ser/de to/from optional timestamps in nanoseconds + /// + /// Intended for use with `serde`'s `with` attribute. + /// + /// # Example: + /// + /// ```rust + /// # // We mark this ignored so that we can test on 1.13 (which does not + /// # // support custom derive), and run tests with --ignored on beta and + /// # // nightly to actually trigger these. + /// # + /// # #[macro_use] extern crate serde_derive; + /// # #[macro_use] extern crate serde_json; + /// # extern crate chrono; + /// # use chrono::{TimeZone, DateTime, Utc}; + /// use chrono::serde::ts_nanoseconds_option; + /// #[derive(Deserialize, Serialize)] + /// struct S { + /// #[serde(with = "ts_nanoseconds_option")] + /// time: Option> + /// } + /// + /// # fn example() -> Result { + /// let time = Some(Utc.ymd(2018, 5, 17).and_hms_nano(02, 04, 59, 918355733)); + /// let my_s = S { + /// time: time.clone(), + /// }; + /// + /// let as_string = serde_json::to_string(&my_s)?; + /// assert_eq!(as_string, r#"{"time":1526522699918355733}"#); + /// let my_s: S = serde_json::from_str(&as_string)?; + /// assert_eq!(my_s.time, time); + /// # Ok(my_s) + /// # } + /// # fn main() { example().unwrap(); } + /// ``` + pub mod ts_nanoseconds_option { + use core::fmt; + use serdelib::{ser, de}; + + use {DateTime, Utc}; + + use super::{ts_nanoseconds, NanoSecondsTimestampVisitor}; + + /// Serialize a UTC datetime into an integer number of nanoseconds since the epoch or none + /// + /// Intended for use with `serde`s `serialize_with` attribute. + /// + /// # Example: + /// + /// ```rust + /// # // We mark this ignored so that we can test on 1.13 (which does not + /// # // support custom derive), and run tests with --ignored on beta and + /// # // nightly to actually trigger these. + /// # + /// # #[macro_use] extern crate serde_derive; + /// # #[macro_use] extern crate serde_json; + /// # extern crate chrono; + /// # use chrono::{TimeZone, DateTime, Utc}; + /// use chrono::serde::ts_nanoseconds_option::serialize as to_nano_tsopt; + /// #[derive(Serialize)] + /// struct S { + /// #[serde(serialize_with = "to_nano_tsopt")] + /// time: Option> + /// } + /// + /// # fn example() -> Result { + /// let my_s = S { + /// time: Some(Utc.ymd(2018, 5, 17).and_hms_nano(02, 04, 59, 918355733)), + /// }; + /// let as_string = serde_json::to_string(&my_s)?; + /// assert_eq!(as_string, r#"{"time":1526522699918355733}"#); + /// # Ok(as_string) + /// # } + /// # fn main() { example().unwrap(); } + /// ``` + pub fn serialize(opt: &Option>, serializer: S) -> Result + where S: ser::Serializer + { + match *opt { + Some(ref dt) => ts_nanoseconds::serialize(dt, serializer), + None => serializer.serialize_none(), + } + } + + /// Deserialize a `DateTime` from a nanosecond timestamp or none + /// + /// Intended for use with `serde`s `deserialize_with` attribute. + /// + /// # Example: + /// + /// ```rust + /// # // We mark this ignored so that we can test on 1.13 (which does not + /// # // support custom derive), and run tests with --ignored on beta and + /// # // nightly to actually trigger these. + /// # + /// # #[macro_use] extern crate serde_derive; + /// # #[macro_use] extern crate serde_json; + /// # extern crate chrono; + /// # use chrono::{DateTime, Utc}; + /// use chrono::serde::ts_nanoseconds_option::deserialize as from_nano_tsopt; + /// #[derive(Deserialize)] + /// struct S { + /// #[serde(deserialize_with = "from_nano_tsopt")] + /// time: Option> + /// } + /// + /// # fn example() -> Result { + /// let my_s: S = serde_json::from_str(r#"{ "time": 1526522699918355733 }"#)?; + /// # Ok(my_s) + /// # } + /// # fn main() { example().unwrap(); } + /// ``` + pub fn deserialize<'de, D>(d: D) -> Result>, D::Error> + where D: de::Deserializer<'de> + { + Ok(d.deserialize_option(OptionNanoSecondsTimestampVisitor)?) + } + + struct OptionNanoSecondsTimestampVisitor; + + impl<'de> de::Visitor<'de> for OptionNanoSecondsTimestampVisitor { + type Value = Option>; + + fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result + { + formatter.write_str("a unix timestamp in nanoseconds or none") + } + + /// Deserialize a timestamp in seconds since the epoch + fn visit_some(self, d: D) -> Result>, D::Error> + where + D: de::Deserializer<'de>, + { + d.deserialize_i64(NanoSecondsTimestampVisitor).map(|val| Some(val)) + } + + /// Deserialize a timestamp in seconds since the epoch + fn visit_none(self) -> Result>, E> + where E: de::Error + { + Ok(None) + } + } + } + /// Ser/de to/from timestamps in milliseconds /// /// Intended for use with `serde`s `with` attribute. @@ -1035,13 +1287,13 @@ pub mod serde { /// # fn main() { example().unwrap(); } /// ``` pub mod ts_milliseconds { - use std::fmt; + use core::fmt; use serdelib::{ser, de}; use {DateTime, Utc}; use offset::TimeZone; - use super::serde_from; + use super::{serde_from, MilliSecondsTimestampVisitor}; /// Serialize a UTC datetime into an integer number of milliseconds since the epoch /// @@ -1112,11 +1364,9 @@ pub mod serde { pub fn deserialize<'de, D>(d: D) -> Result, D::Error> where D: de::Deserializer<'de> { - Ok(try!(d.deserialize_i64(MilliSecondsTimestampVisitor).map(|dt| dt.with_timezone(&Utc)))) + Ok(d.deserialize_i64(MilliSecondsTimestampVisitor).map(|dt| dt.with_timezone(&Utc))?) } - struct MilliSecondsTimestampVisitor; - impl<'de> de::Visitor<'de> for MilliSecondsTimestampVisitor { type Value = DateTime; @@ -1145,6 +1395,152 @@ pub mod serde { } } + /// Ser/de to/from optional timestamps in milliseconds + /// + /// Intended for use with `serde`s `with` attribute. + /// + /// # Example + /// + /// ```rust + /// # // We mark this ignored so that we can test on 1.13 (which does not + /// # // support custom derive), and run tests with --ignored on beta and + /// # // nightly to actually trigger these. + /// # + /// # #[macro_use] extern crate serde_derive; + /// # #[macro_use] extern crate serde_json; + /// # extern crate chrono; + /// # use chrono::{TimeZone, DateTime, Utc}; + /// use chrono::serde::ts_milliseconds_option; + /// #[derive(Deserialize, Serialize)] + /// struct S { + /// #[serde(with = "ts_milliseconds_option")] + /// time: Option> + /// } + /// + /// # fn example() -> Result { + /// let time = Some(Utc.ymd(2018, 5, 17).and_hms_milli(02, 04, 59, 918)); + /// let my_s = S { + /// time: time.clone(), + /// }; + /// + /// let as_string = serde_json::to_string(&my_s)?; + /// assert_eq!(as_string, r#"{"time":1526522699918}"#); + /// let my_s: S = serde_json::from_str(&as_string)?; + /// assert_eq!(my_s.time, time); + /// # Ok(my_s) + /// # } + /// # fn main() { example().unwrap(); } + /// ``` + pub mod ts_milliseconds_option { + use core::fmt; + use serdelib::{ser, de}; + + use {DateTime, Utc}; + + use super::{ts_milliseconds, MilliSecondsTimestampVisitor}; + + /// Serialize a UTC datetime into an integer number of milliseconds since the epoch or none + /// + /// Intended for use with `serde`s `serialize_with` attribute. + /// + /// # Example: + /// + /// ```rust + /// # // We mark this ignored so that we can test on 1.13 (which does not + /// # // support custom derive), and run tests with --ignored on beta and + /// # // nightly to actually trigger these. + /// # + /// # #[macro_use] extern crate serde_derive; + /// # #[macro_use] extern crate serde_json; + /// # extern crate chrono; + /// # use chrono::{TimeZone, DateTime, Utc}; + /// use chrono::serde::ts_milliseconds_option::serialize as to_milli_tsopt; + /// #[derive(Serialize)] + /// struct S { + /// #[serde(serialize_with = "to_milli_tsopt")] + /// time: Option> + /// } + /// + /// # fn example() -> Result { + /// let my_s = S { + /// time: Some(Utc.ymd(2018, 5, 17).and_hms_milli(02, 04, 59, 918)), + /// }; + /// let as_string = serde_json::to_string(&my_s)?; + /// assert_eq!(as_string, r#"{"time":1526522699918}"#); + /// # Ok(as_string) + /// # } + /// # fn main() { example().unwrap(); } + /// ``` + pub fn serialize(opt: &Option>, serializer: S) -> Result + where S: ser::Serializer + { + match *opt { + Some(ref dt) => ts_milliseconds::serialize(dt, serializer), + None => serializer.serialize_none(), + } + } + + /// Deserialize a `DateTime` from a millisecond timestamp or none + /// + /// Intended for use with `serde`s `deserialize_with` attribute. + /// + /// # Example: + /// + /// ```rust + /// # // We mark this ignored so that we can test on 1.13 (which does not + /// # // support custom derive), and run tests with --ignored on beta and + /// # // nightly to actually trigger these. + /// # + /// # #[macro_use] extern crate serde_derive; + /// # #[macro_use] extern crate serde_json; + /// # extern crate chrono; + /// # use chrono::{DateTime, Utc}; + /// use chrono::serde::ts_milliseconds_option::deserialize as from_milli_tsopt; + /// #[derive(Deserialize)] + /// struct S { + /// #[serde(deserialize_with = "from_milli_tsopt")] + /// time: Option> + /// } + /// + /// # fn example() -> Result { + /// let my_s: S = serde_json::from_str(r#"{ "time": 1526522699918 }"#)?; + /// # Ok(my_s) + /// # } + /// # fn main() { example().unwrap(); } + /// ``` + pub fn deserialize<'de, D>(d: D) -> Result>, D::Error> + where D: de::Deserializer<'de> + { + Ok(d.deserialize_option(OptionMilliSecondsTimestampVisitor).map(|opt| opt.map(|dt| dt.with_timezone(&Utc)))?) + } + + struct OptionMilliSecondsTimestampVisitor; + + impl<'de> de::Visitor<'de> for OptionMilliSecondsTimestampVisitor { + type Value = Option>; + + fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result + { + formatter.write_str("a unix timestamp in milliseconds or none") + } + + /// Deserialize a timestamp in seconds since the epoch + fn visit_some(self, d: D) -> Result>, D::Error> + where + D: de::Deserializer<'de>, + { + d.deserialize_i64(MilliSecondsTimestampVisitor).map(|val| Some(val)) + } + + /// Deserialize a timestamp in seconds since the epoch + fn visit_none(self) -> Result>, E> + where E: de::Error + { + Ok(None) + } + } + } + /// Ser/de to/from timestamps in seconds /// /// Intended for use with `serde`'s `with` attribute. @@ -1182,13 +1578,13 @@ pub mod serde { /// # fn main() { example().unwrap(); } /// ``` pub mod ts_seconds { - use std::fmt; + use core::fmt; use serdelib::{ser, de}; use {DateTime, Utc}; use offset::TimeZone; - use super::serde_from; + use super::{serde_from, SecondsTimestampVisitor}; /// Serialize a UTC datetime into an integer number of seconds since the epoch /// @@ -1259,11 +1655,9 @@ pub mod serde { pub fn deserialize<'de, D>(d: D) -> Result, D::Error> where D: de::Deserializer<'de> { - Ok(try!(d.deserialize_i64(SecondsTimestampVisitor))) + Ok(d.deserialize_i64(SecondsTimestampVisitor)?) } - struct SecondsTimestampVisitor; - impl<'de> de::Visitor<'de> for SecondsTimestampVisitor { type Value = DateTime; @@ -1288,6 +1682,152 @@ pub mod serde { } } + /// Ser/de to/from optional timestamps in seconds + /// + /// Intended for use with `serde`'s `with` attribute. + /// + /// # Example: + /// + /// ```rust + /// # // We mark this ignored so that we can test on 1.13 (which does not + /// # // support custom derive), and run tests with --ignored on beta and + /// # // nightly to actually trigger these. + /// # + /// # #[macro_use] extern crate serde_derive; + /// # #[macro_use] extern crate serde_json; + /// # extern crate chrono; + /// # use chrono::{TimeZone, DateTime, Utc}; + /// use chrono::serde::ts_seconds_option; + /// #[derive(Deserialize, Serialize)] + /// struct S { + /// #[serde(with = "ts_seconds_option")] + /// time: Option> + /// } + /// + /// # fn example() -> Result { + /// let time = Some(Utc.ymd(2015, 5, 15).and_hms(10, 0, 0)); + /// let my_s = S { + /// time: time.clone(), + /// }; + /// + /// let as_string = serde_json::to_string(&my_s)?; + /// assert_eq!(as_string, r#"{"time":1431684000}"#); + /// let my_s: S = serde_json::from_str(&as_string)?; + /// assert_eq!(my_s.time, time); + /// # Ok(my_s) + /// # } + /// # fn main() { example().unwrap(); } + /// ``` + pub mod ts_seconds_option { + use core::fmt; + use serdelib::{ser, de}; + + use {DateTime, Utc}; + + use super::{ts_seconds, SecondsTimestampVisitor}; + + /// Serialize a UTC datetime into an integer number of seconds since the epoch or none + /// + /// Intended for use with `serde`s `serialize_with` attribute. + /// + /// # Example: + /// + /// ```rust + /// # // We mark this ignored so that we can test on 1.13 (which does not + /// # // support custom derive), and run tests with --ignored on beta and + /// # // nightly to actually trigger these. + /// # + /// # #[macro_use] extern crate serde_derive; + /// # #[macro_use] extern crate serde_json; + /// # extern crate chrono; + /// # use chrono::{TimeZone, DateTime, Utc}; + /// use chrono::serde::ts_seconds_option::serialize as to_tsopt; + /// #[derive(Serialize)] + /// struct S { + /// #[serde(serialize_with = "to_tsopt")] + /// time: Option> + /// } + /// + /// # fn example() -> Result { + /// let my_s = S { + /// time: Some(Utc.ymd(2015, 5, 15).and_hms(10, 0, 0)), + /// }; + /// let as_string = serde_json::to_string(&my_s)?; + /// assert_eq!(as_string, r#"{"time":1431684000}"#); + /// # Ok(as_string) + /// # } + /// # fn main() { example().unwrap(); } + /// ``` + pub fn serialize(opt: &Option>, serializer: S) -> Result + where S: ser::Serializer + { + match *opt { + Some(ref dt) => ts_seconds::serialize(dt, serializer), + None => serializer.serialize_none(), + } + } + + /// Deserialize a `DateTime` from a seconds timestamp or none + /// + /// Intended for use with `serde`s `deserialize_with` attribute. + /// + /// # Example: + /// + /// ```rust + /// # // We mark this ignored so that we can test on 1.13 (which does not + /// # // support custom derive), and run tests with --ignored on beta and + /// # // nightly to actually trigger these. + /// # + /// # #[macro_use] extern crate serde_derive; + /// # #[macro_use] extern crate serde_json; + /// # extern crate chrono; + /// # use chrono::{DateTime, Utc}; + /// use chrono::serde::ts_seconds_option::deserialize as from_tsopt; + /// #[derive(Deserialize)] + /// struct S { + /// #[serde(deserialize_with = "from_tsopt")] + /// time: Option> + /// } + /// + /// # fn example() -> Result { + /// let my_s: S = serde_json::from_str(r#"{ "time": 1431684000 }"#)?; + /// # Ok(my_s) + /// # } + /// # fn main() { example().unwrap(); } + /// ``` + pub fn deserialize<'de, D>(d: D) -> Result>, D::Error> + where D: de::Deserializer<'de> + { + Ok(d.deserialize_option(OptionSecondsTimestampVisitor)?) + } + + struct OptionSecondsTimestampVisitor; + + impl<'de> de::Visitor<'de> for OptionSecondsTimestampVisitor { + type Value = Option>; + + fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result + { + formatter.write_str("a unix timestamp in seconds or none") + } + + /// Deserialize a timestamp in seconds since the epoch + fn visit_some(self, d: D) -> Result>, D::Error> + where + D: de::Deserializer<'de>, + { + d.deserialize_i64(SecondsTimestampVisitor).map(|val| Some(val)) + } + + /// Deserialize a timestamp in seconds since the epoch + fn visit_none(self) -> Result>, E> + where E: de::Error + { + Ok(None) + } + } + } + impl ser::Serialize for DateTime { /// Serialize into a rfc3339 time string /// @@ -1324,7 +1864,7 @@ pub mod serde { fn visit_str(self, value: &str) -> Result, E> where E: de::Error { - value.parse().map_err(|err| E::custom(format!("{}", err))) + value.parse().map_err(|err: ::format::ParseError| E::custom(err)) } } @@ -1644,4 +2184,83 @@ mod tests { assert_eq!(SystemTime::from(epoch.with_timezone(&FixedOffset::east(32400))), UNIX_EPOCH); assert_eq!(SystemTime::from(epoch.with_timezone(&FixedOffset::west(28800))), UNIX_EPOCH); } + + #[test] + fn test_datetime_format_alignment() { + let datetime = Utc.ymd(2007, 01, 02); + + // Item::Literal + let percent = datetime.format("%%"); + assert_eq!(" %", format!("{:>3}", percent)); + assert_eq!("% ", format!("{:<3}", percent)); + assert_eq!(" % ", format!("{:^3}", percent)); + + // Item::Numeric + let year = datetime.format("%Y"); + assert_eq!(" 2007", format!("{:>6}", year)); + assert_eq!("2007 ", format!("{:<6}", year)); + assert_eq!(" 2007 ", format!("{:^6}", year)); + + // Item::Fixed + let tz = datetime.format("%Z"); + assert_eq!(" UTC", format!("{:>5}", tz)); + assert_eq!("UTC ", format!("{:<5}", tz)); + assert_eq!(" UTC ", format!("{:^5}", tz)); + + // [Item::Numeric, Item::Space, Item::Literal, Item::Space, Item::Numeric] + let ymd = datetime.format("%Y %B %d"); + let ymd_formatted = "2007 January 02"; + assert_eq!(format!(" {}", ymd_formatted), format!("{:>17}", ymd)); + assert_eq!(format!("{} ", ymd_formatted), format!("{:<17}", ymd)); + assert_eq!(format!(" {} ", ymd_formatted), format!("{:^17}", ymd)); + } + + #[cfg(feature = "bench")] + #[bench] + fn bench_datetime_parse_from_rfc2822(bh: &mut test::Bencher) { + bh.iter(|| { + let str = test::black_box("Wed, 18 Feb 2015 23:16:09 +0000"); + DateTime::parse_from_rfc2822(str).unwrap() + }); + } + + #[cfg(feature = "bench")] + #[bench] + fn bench_datetime_parse_from_rfc3339(bh: &mut test::Bencher) { + bh.iter(|| { + let str = test::black_box("2015-02-18T23:59:60.234567+05:00"); + DateTime::parse_from_rfc3339(str).unwrap() + }); + } + + #[cfg(feature = "bench")] + #[bench] + fn bench_datetime_from_str(bh: &mut test::Bencher) { + use std::str::FromStr; + + bh.iter(|| { + let str = test::black_box("2019-03-30T18:46:57.193Z"); + DateTime::::from_str(str).unwrap() + }); + } + + #[cfg(feature = "bench")] + #[bench] + fn bench_datetime_to_rfc2822(bh: &mut test::Bencher) { + let pst = FixedOffset::east(8 * 60 * 60); + let dt = pst.ymd(2018, 1, 11).and_hms_nano(10, 5, 13, 084_660_000); + bh.iter(|| { + test::black_box(dt).to_rfc2822() + }); + } + + #[cfg(feature = "bench")] + #[bench] + fn bench_datetime_to_rfc3339(bh: &mut test::Bencher) { + let pst = FixedOffset::east(8 * 60 * 60); + let dt = pst.ymd(2018, 1, 11).and_hms_nano(10, 5, 13, 084_660_000); + bh.iter(|| { + test::black_box(dt).to_rfc3339() + }); + } } diff --git a/third_party/rust/chrono/src/format/mod.rs b/third_party/rust/chrono/src/format/mod.rs index bd8337ba00..83405d3bc7 100644 --- a/third_party/rust/chrono/src/format/mod.rs +++ b/third_party/rust/chrono/src/format/mod.rs @@ -15,20 +15,33 @@ //! currently Chrono supports [one built-in syntax closely resembling //! C's `strftime` format](./strftime/index.html). -use std::fmt; -use std::str::FromStr; -use std::error::Error; +#![allow(ellipsis_inclusive_range_patterns)] -use {Datelike, Timelike, Weekday, ParseWeekdayError}; +use core::borrow::Borrow; +use core::fmt; +use core::str::FromStr; +#[cfg(any(feature = "std", test))] +use std::error::Error; +#[cfg(feature = "alloc")] +use alloc::boxed::Box; +#[cfg(feature = "alloc")] +use alloc::string::{String, ToString}; + +#[cfg(any(feature = "alloc", feature = "std", test))] +use {Datelike, Timelike}; +use {Weekday, ParseWeekdayError}; +#[cfg(any(feature = "alloc", feature = "std", test))] use div::{div_floor, mod_floor}; +#[cfg(any(feature = "alloc", feature = "std", test))] use offset::{Offset, FixedOffset}; +#[cfg(any(feature = "alloc", feature = "std", test))] use naive::{NaiveDate, NaiveTime}; pub use self::strftime::StrftimeItems; pub use self::parsed::Parsed; pub use self::parse::parse; -/// An unhabitated type used for `InternalNumeric` and `InternalFixed` below. +/// An uninhabited type used for `InternalNumeric` and `InternalFixed` below. #[derive(Clone, PartialEq, Eq)] enum Void {} @@ -53,7 +66,7 @@ pub enum Pad { /// /// The **parsing width** is the maximal width to be scanned. /// The parser only tries to consume from one to given number of digits (greedily). -/// It also trims the preceding whitespaces if any. +/// It also trims the preceding whitespace if any. /// It cannot parse the negative number, so some date and time cannot be formatted then /// parsed with the same formatting items. #[derive(Clone, PartialEq, Eq, Debug)] @@ -183,13 +196,13 @@ pub enum Fixed { TimezoneName, /// Offset from the local time to UTC (`+09:00` or `-04:00` or `+00:00`). /// - /// In the parser, the colon can be omitted and/or surrounded with any amount of whitespaces. + /// In the parser, the colon can be omitted and/or surrounded with any amount of whitespace. /// The offset is limited from `-24:00` to `+24:00`, /// which is same to [`FixedOffset`](../offset/struct.FixedOffset.html)'s range. TimezoneOffsetColon, /// Offset from the local time to UTC (`+09:00` or `-04:00` or `Z`). /// - /// In the parser, the colon can be omitted and/or surrounded with any amount of whitespaces, + /// In the parser, the colon can be omitted and/or surrounded with any amount of whitespace, /// and `Z` can be either in upper case or in lower case. /// The offset is limited from `-24:00` to `+24:00`, /// which is same to [`FixedOffset`](../offset/struct.FixedOffset.html)'s range. @@ -243,10 +256,12 @@ pub enum Item<'a> { /// A literally printed and parsed text. Literal(&'a str), /// Same to `Literal` but with the string owned by the item. + #[cfg(any(feature = "alloc", feature = "std", test))] OwnedLiteral(Box), /// Whitespace. Prints literally but reads zero or more whitespace. Space(&'a str), /// Same to `Space` but with the string owned by the item. + #[cfg(any(feature = "alloc", feature = "std", test))] OwnedSpace(Box), /// Numeric item. Can be optionally padded to the maximal length (if any) when formatting; /// the parser simply ignores any padded whitespace and zeroes. @@ -303,13 +318,7 @@ enum ParseErrorKind { /// Same to `Result`. pub type ParseResult = Result; -impl fmt::Display for ParseError { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - self.description().fmt(f) - } -} - -impl Error for ParseError { +impl ParseError { fn description(&self) -> &str { match self.0 { ParseErrorKind::OutOfRange => "input is out of range", @@ -323,6 +332,19 @@ impl Error for ParseError { } } +impl fmt::Display for ParseError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + self.description().fmt(f) + } +} + +#[cfg(any(feature = "std", test))] +impl Error for ParseError { + fn description(&self) -> &str { + self.description() + } +} + // to be used in this module and submodules const OUT_OF_RANGE: ParseError = ParseError(ParseErrorKind::OutOfRange); const IMPOSSIBLE: ParseError = ParseError(ParseErrorKind::Impossible); @@ -334,9 +356,16 @@ const BAD_FORMAT: ParseError = ParseError(ParseErrorKind::BadFormat); /// Tries to format given arguments with given formatting items. /// Internally used by `DelayedFormat`. -pub fn format<'a, I>(w: &mut fmt::Formatter, date: Option<&NaiveDate>, time: Option<&NaiveTime>, - off: Option<&(String, FixedOffset)>, items: I) -> fmt::Result - where I: Iterator> { +#[cfg(any(feature = "alloc", feature = "std", test))] +pub fn format<'a, I, B>( + w: &mut fmt::Formatter, + date: Option<&NaiveDate>, + time: Option<&NaiveTime>, + off: Option<&(String, FixedOffset)>, + items: I, +) -> fmt::Result + where I: Iterator + Clone, B: Borrow> +{ // full and abbreviated month and weekday names static SHORT_MONTHS: [&'static str; 12] = ["Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec"]; @@ -348,12 +377,16 @@ pub fn format<'a, I>(w: &mut fmt::Formatter, date: Option<&NaiveDate>, time: Opt static LONG_WEEKDAYS: [&'static str; 7] = ["Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday", "Sunday"]; + use core::fmt::Write; + let mut result = String::new(); + for item in items { - match item { - Item::Literal(s) | Item::Space(s) => try!(write!(w, "{}", s)), - Item::OwnedLiteral(ref s) | Item::OwnedSpace(ref s) => try!(write!(w, "{}", s)), + match item.borrow() { + &Item::Literal(s) | &Item::Space(s) => result.push_str(s), + #[cfg(any(feature = "alloc", feature = "std", test))] + &Item::OwnedLiteral(ref s) | &Item::OwnedSpace(ref s) => result.push_str(s), - Item::Numeric(spec, pad) => { + &Item::Numeric(ref spec, ref pad) => { use self::Numeric::*; let week_from_sun = |d: &NaiveDate| @@ -362,31 +395,31 @@ pub fn format<'a, I>(w: &mut fmt::Formatter, date: Option<&NaiveDate>, time: Opt (d.ordinal() as i32 - d.weekday().num_days_from_monday() as i32 + 7) / 7; let (width, v) = match spec { - Year => (4, date.map(|d| i64::from(d.year()))), - YearDiv100 => (2, date.map(|d| div_floor(i64::from(d.year()), 100))), - YearMod100 => (2, date.map(|d| mod_floor(i64::from(d.year()), 100))), - IsoYear => (4, date.map(|d| i64::from(d.iso_week().year()))), - IsoYearDiv100 => (2, date.map(|d| div_floor( + &Year => (4, date.map(|d| i64::from(d.year()))), + &YearDiv100 => (2, date.map(|d| div_floor(i64::from(d.year()), 100))), + &YearMod100 => (2, date.map(|d| mod_floor(i64::from(d.year()), 100))), + &IsoYear => (4, date.map(|d| i64::from(d.iso_week().year()))), + &IsoYearDiv100 => (2, date.map(|d| div_floor( i64::from(d.iso_week().year()), 100))), - IsoYearMod100 => (2, date.map(|d| mod_floor( + &IsoYearMod100 => (2, date.map(|d| mod_floor( i64::from(d.iso_week().year()), 100))), - Month => (2, date.map(|d| i64::from(d.month()))), - Day => (2, date.map(|d| i64::from(d.day()))), - WeekFromSun => (2, date.map(|d| i64::from(week_from_sun(d)))), - WeekFromMon => (2, date.map(|d| i64::from(week_from_mon(d)))), - IsoWeek => (2, date.map(|d| i64::from(d.iso_week().week()))), - NumDaysFromSun => (1, date.map(|d| i64::from(d.weekday() + &Month => (2, date.map(|d| i64::from(d.month()))), + &Day => (2, date.map(|d| i64::from(d.day()))), + &WeekFromSun => (2, date.map(|d| i64::from(week_from_sun(d)))), + &WeekFromMon => (2, date.map(|d| i64::from(week_from_mon(d)))), + &IsoWeek => (2, date.map(|d| i64::from(d.iso_week().week()))), + &NumDaysFromSun => (1, date.map(|d| i64::from(d.weekday() .num_days_from_sunday()))), - WeekdayFromMon => (1, date.map(|d| i64::from(d.weekday() + &WeekdayFromMon => (1, date.map(|d| i64::from(d.weekday() .number_from_monday()))), - Ordinal => (3, date.map(|d| i64::from(d.ordinal()))), - Hour => (2, time.map(|t| i64::from(t.hour()))), - Hour12 => (2, time.map(|t| i64::from(t.hour12().1))), - Minute => (2, time.map(|t| i64::from(t.minute()))), - Second => (2, time.map(|t| i64::from(t.second() + + &Ordinal => (3, date.map(|d| i64::from(d.ordinal()))), + &Hour => (2, time.map(|t| i64::from(t.hour()))), + &Hour12 => (2, time.map(|t| i64::from(t.hour12().1))), + &Minute => (2, time.map(|t| i64::from(t.minute()))), + &Second => (2, time.map(|t| i64::from(t.second() + t.nanosecond() / 1_000_000_000))), - Nanosecond => (9, time.map(|t| i64::from(t.nanosecond() % 1_000_000_000))), - Timestamp => (1, match (date, time, off) { + &Nanosecond => (9, time.map(|t| i64::from(t.nanosecond() % 1_000_000_000))), + &Timestamp => (1, match (date, time, off) { (Some(d), Some(t), None) => Some(d.and_time(*t).timestamp()), (Some(d), Some(t), Some(&(_, off))) => @@ -395,152 +428,184 @@ pub fn format<'a, I>(w: &mut fmt::Formatter, date: Option<&NaiveDate>, time: Opt }), // for the future expansion - Internal(ref int) => match int._dummy {}, + &Internal(ref int) => match int._dummy {}, }; + if let Some(v) = v { - if (spec == Year || spec == IsoYear) && !(0 <= v && v < 10_000) { + if (spec == &Year || spec == &IsoYear) && !(0 <= v && v < 10_000) { // non-four-digit years require an explicit sign as per ISO 8601 match pad { - Pad::None => try!(write!(w, "{:+}", v)), - Pad::Zero => try!(write!(w, "{:+01$}", v, width + 1)), - Pad::Space => try!(write!(w, "{:+1$}", v, width + 1)), + &Pad::None => write!(result, "{:+}", v), + &Pad::Zero => write!(result, "{:+01$}", v, width + 1), + &Pad::Space => write!(result, "{:+1$}", v, width + 1), } } else { match pad { - Pad::None => try!(write!(w, "{}", v)), - Pad::Zero => try!(write!(w, "{:01$}", v, width)), - Pad::Space => try!(write!(w, "{:1$}", v, width)), + &Pad::None => write!(result, "{}", v), + &Pad::Zero => write!(result, "{:01$}", v, width), + &Pad::Space => write!(result, "{:1$}", v, width), } - } + }? } else { - return Err(fmt::Error); // insufficient arguments for given format + return Err(fmt::Error) // insufficient arguments for given format } }, - Item::Fixed(spec) => { + &Item::Fixed(ref spec) => { use self::Fixed::*; /// Prints an offset from UTC in the format of `+HHMM` or `+HH:MM`. /// `Z` instead of `+00[:]00` is allowed when `allow_zulu` is true. - fn write_local_minus_utc(w: &mut fmt::Formatter, off: FixedOffset, - allow_zulu: bool, use_colon: bool) -> fmt::Result { + fn write_local_minus_utc( + result: &mut String, + off: FixedOffset, + allow_zulu: bool, + use_colon: bool, + ) -> fmt::Result { let off = off.local_minus_utc(); if !allow_zulu || off != 0 { let (sign, off) = if off < 0 {('-', -off)} else {('+', off)}; if use_colon { - write!(w, "{}{:02}:{:02}", sign, off / 3600, off / 60 % 60) + write!(result, "{}{:02}:{:02}", sign, off / 3600, off / 60 % 60) } else { - write!(w, "{}{:02}{:02}", sign, off / 3600, off / 60 % 60) + write!(result, "{}{:02}{:02}", sign, off / 3600, off / 60 % 60) } } else { - write!(w, "Z") + result.push_str("Z"); + Ok(()) } } let ret = match spec { - ShortMonthName => - date.map(|d| write!(w, "{}", SHORT_MONTHS[d.month0() as usize])), - LongMonthName => - date.map(|d| write!(w, "{}", LONG_MONTHS[d.month0() as usize])), - ShortWeekdayName => - date.map(|d| write!(w, "{}", - SHORT_WEEKDAYS[d.weekday().num_days_from_monday() as usize])), - LongWeekdayName => - date.map(|d| write!(w, "{}", - LONG_WEEKDAYS[d.weekday().num_days_from_monday() as usize])), - LowerAmPm => - time.map(|t| write!(w, "{}", if t.hour12().0 {"pm"} else {"am"})), - UpperAmPm => - time.map(|t| write!(w, "{}", if t.hour12().0 {"PM"} else {"AM"})), - Nanosecond => + &ShortMonthName => + date.map(|d| { + result.push_str(SHORT_MONTHS[d.month0() as usize]); + Ok(()) + }), + &LongMonthName => + date.map(|d| { + result.push_str(LONG_MONTHS[d.month0() as usize]); + Ok(()) + }), + &ShortWeekdayName => + date.map(|d| { + result.push_str( + SHORT_WEEKDAYS[d.weekday().num_days_from_monday() as usize] + ); + Ok(()) + }), + &LongWeekdayName => + date.map(|d| { + result.push_str( + LONG_WEEKDAYS[d.weekday().num_days_from_monday() as usize] + ); + Ok(()) + }), + &LowerAmPm => + time.map(|t| { + result.push_str(if t.hour12().0 {"pm"} else {"am"}); + Ok(()) + }), + &UpperAmPm => + time.map(|t| { + result.push_str(if t.hour12().0 {"PM"} else {"AM"}); + Ok(()) + }), + &Nanosecond => time.map(|t| { let nano = t.nanosecond() % 1_000_000_000; if nano == 0 { Ok(()) } else if nano % 1_000_000 == 0 { - write!(w, ".{:03}", nano / 1_000_000) + write!(result, ".{:03}", nano / 1_000_000) } else if nano % 1_000 == 0 { - write!(w, ".{:06}", nano / 1_000) + write!(result, ".{:06}", nano / 1_000) } else { - write!(w, ".{:09}", nano) + write!(result, ".{:09}", nano) } }), - Nanosecond3 => + &Nanosecond3 => time.map(|t| { let nano = t.nanosecond() % 1_000_000_000; - write!(w, ".{:03}", nano / 1_000_000) + write!(result, ".{:03}", nano / 1_000_000) }), - Nanosecond6 => + &Nanosecond6 => time.map(|t| { let nano = t.nanosecond() % 1_000_000_000; - write!(w, ".{:06}", nano / 1_000) + write!(result, ".{:06}", nano / 1_000) }), - Nanosecond9 => + &Nanosecond9 => time.map(|t| { let nano = t.nanosecond() % 1_000_000_000; - write!(w, ".{:09}", nano) + write!(result, ".{:09}", nano) }), - Internal(InternalFixed { val: InternalInternal::Nanosecond3NoDot }) => + &Internal(InternalFixed { val: InternalInternal::Nanosecond3NoDot }) => time.map(|t| { let nano = t.nanosecond() % 1_000_000_000; - write!(w, "{:03}", nano / 1_000_000) + write!(result, "{:03}", nano / 1_000_000) }), - Internal(InternalFixed { val: InternalInternal::Nanosecond6NoDot }) => + &Internal(InternalFixed { val: InternalInternal::Nanosecond6NoDot }) => time.map(|t| { let nano = t.nanosecond() % 1_000_000_000; - write!(w, "{:06}", nano / 1_000) + write!(result, "{:06}", nano / 1_000) }), - Internal(InternalFixed { val: InternalInternal::Nanosecond9NoDot }) => + &Internal(InternalFixed { val: InternalInternal::Nanosecond9NoDot }) => time.map(|t| { let nano = t.nanosecond() % 1_000_000_000; - write!(w, "{:09}", nano) + write!(result, "{:09}", nano) + }), + &TimezoneName => + off.map(|&(ref name, _)| { + result.push_str(name); + Ok(()) }), - TimezoneName => - off.map(|&(ref name, _)| write!(w, "{}", *name)), - TimezoneOffsetColon => - off.map(|&(_, off)| write_local_minus_utc(w, off, false, true)), - TimezoneOffsetColonZ => - off.map(|&(_, off)| write_local_minus_utc(w, off, true, true)), - TimezoneOffset => - off.map(|&(_, off)| write_local_minus_utc(w, off, false, false)), - TimezoneOffsetZ => - off.map(|&(_, off)| write_local_minus_utc(w, off, true, false)), - Internal(InternalFixed { val: InternalInternal::TimezoneOffsetPermissive }) => + &TimezoneOffsetColon => + off.map(|&(_, off)| write_local_minus_utc(&mut result, off, false, true)), + &TimezoneOffsetColonZ => + off.map(|&(_, off)| write_local_minus_utc(&mut result, off, true, true)), + &TimezoneOffset => + off.map(|&(_, off)| write_local_minus_utc(&mut result, off, false, false)), + &TimezoneOffsetZ => + off.map(|&(_, off)| write_local_minus_utc(&mut result, off, true, false)), + &Internal(InternalFixed { val: InternalInternal::TimezoneOffsetPermissive }) => panic!("Do not try to write %#z it is undefined"), - RFC2822 => // same to `%a, %e %b %Y %H:%M:%S %z` + &RFC2822 => // same to `%a, %e %b %Y %H:%M:%S %z` if let (Some(d), Some(t), Some(&(_, off))) = (date, time, off) { let sec = t.second() + t.nanosecond() / 1_000_000_000; - try!(write!(w, "{}, {:2} {} {:04} {:02}:{:02}:{:02} ", - SHORT_WEEKDAYS[d.weekday().num_days_from_monday() as usize], - d.day(), SHORT_MONTHS[d.month0() as usize], d.year(), - t.hour(), t.minute(), sec)); - Some(write_local_minus_utc(w, off, false, false)) + write!( + result, + "{}, {:02} {} {:04} {:02}:{:02}:{:02} ", + SHORT_WEEKDAYS[d.weekday().num_days_from_monday() as usize], + d.day(), SHORT_MONTHS[d.month0() as usize], d.year(), + t.hour(), t.minute(), sec + )?; + Some(write_local_minus_utc(&mut result, off, false, false)) } else { None }, - RFC3339 => // same to `%Y-%m-%dT%H:%M:%S%.f%:z` + &RFC3339 => // same to `%Y-%m-%dT%H:%M:%S%.f%:z` if let (Some(d), Some(t), Some(&(_, off))) = (date, time, off) { // reuse `Debug` impls which already print ISO 8601 format. // this is faster in this way. - try!(write!(w, "{:?}T{:?}", d, t)); - Some(write_local_minus_utc(w, off, false, true)) + write!(result, "{:?}T{:?}", d, t)?; + Some(write_local_minus_utc(&mut result, off, false, true)) } else { None }, }; match ret { - Some(ret) => try!(ret), + Some(ret) => ret?, None => return Err(fmt::Error), // insufficient arguments for given format } }, - Item::Error => return Err(fmt::Error), + &Item::Error => return Err(fmt::Error), } } - Ok(()) + w.pad(&result) } mod parsed; @@ -553,6 +618,7 @@ pub mod strftime; /// A *temporary* object which can be used as an argument to `format!` or others. /// This is normally constructed via `format` methods of each date and time type. +#[cfg(any(feature = "alloc", feature = "std", test))] #[derive(Debug)] pub struct DelayedFormat { /// The date view, if any. @@ -565,7 +631,8 @@ pub struct DelayedFormat { items: I, } -impl<'a, I: Iterator> + Clone> DelayedFormat { +#[cfg(any(feature = "alloc", feature = "std", test))] +impl<'a, I: Iterator + Clone, B: Borrow>> DelayedFormat { /// Makes a new `DelayedFormat` value out of local date and time. pub fn new(date: Option, time: Option, items: I) -> DelayedFormat { DelayedFormat { date: date, time: time, off: None, items: items } @@ -580,7 +647,8 @@ impl<'a, I: Iterator> + Clone> DelayedFormat { } } -impl<'a, I: Iterator> + Clone> fmt::Display for DelayedFormat { +#[cfg(any(feature = "alloc", feature = "std", test))] +impl<'a, I: Iterator + Clone, B: Borrow>> fmt::Display for DelayedFormat { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { format(f, self.date.as_ref(), self.time.as_ref(), self.off.as_ref(), self.items.clone()) } diff --git a/third_party/rust/chrono/src/format/parse.rs b/third_party/rust/chrono/src/format/parse.rs index 88c32d104b..ec346828f0 100644 --- a/third_party/rust/chrono/src/format/parse.rs +++ b/third_party/rust/chrono/src/format/parse.rs @@ -4,7 +4,10 @@ //! Date and time parsing routines. -use std::usize; +#![allow(deprecated)] + +use core::borrow::Borrow; +use core::usize; use Weekday; @@ -30,7 +33,7 @@ fn set_weekday_with_number_from_monday(p: &mut Parsed, v: i64) -> ParseResult<() fn parse_rfc2822<'a>(parsed: &mut Parsed, mut s: &'a str) -> ParseResult<(&'a str, ())> { macro_rules! try_consume { - ($e:expr) => ({ let (s_, v) = try!($e); s = s_; v }) + ($e:expr) => ({ let (s_, v) = $e?; s = s_; v }) } // an adapted RFC 2822 syntax from Section 3.3 and 4.3: @@ -87,14 +90,14 @@ fn parse_rfc2822<'a>(parsed: &mut Parsed, mut s: &'a str) -> ParseResult<(&'a st if let Ok((s_, weekday)) = scan::short_weekday(s) { if !s_.starts_with(',') { return Err(INVALID); } s = &s_[1..]; - try!(parsed.set_weekday(weekday)); + parsed.set_weekday(weekday)?; } s = s.trim_left(); - try!(parsed.set_day(try_consume!(scan::number(s, 1, 2)))); - s = try!(scan::space(s)); // mandatory - try!(parsed.set_month(1 + i64::from(try_consume!(scan::short_month0(s))))); - s = try!(scan::space(s)); // mandatory + parsed.set_day(try_consume!(scan::number(s, 1, 2)))?; + s = scan::space(s)?; // mandatory + parsed.set_month(1 + i64::from(try_consume!(scan::short_month0(s))))?; + s = scan::space(s)?; // mandatory // distinguish two- and three-digit years from four-digit years let prevlen = s.len(); @@ -106,20 +109,20 @@ fn parse_rfc2822<'a>(parsed: &mut Parsed, mut s: &'a str) -> ParseResult<(&'a st (3, _) => { year += 1900; } // 112 -> 2012, 009 -> 1909 (_, _) => {} // 1987 -> 1987, 0654 -> 0654 } - try!(parsed.set_year(year)); + parsed.set_year(year)?; - s = try!(scan::space(s)); // mandatory - try!(parsed.set_hour(try_consume!(scan::number(s, 2, 2)))); - s = try!(scan::char(s.trim_left(), b':')).trim_left(); // *S ":" *S - try!(parsed.set_minute(try_consume!(scan::number(s, 2, 2)))); + s = scan::space(s)?; // mandatory + parsed.set_hour(try_consume!(scan::number(s, 2, 2)))?; + s = scan::char(s.trim_left(), b':')?.trim_left(); // *S ":" *S + parsed.set_minute(try_consume!(scan::number(s, 2, 2)))?; if let Ok(s_) = scan::char(s.trim_left(), b':') { // [ ":" *S 2DIGIT ] - try!(parsed.set_second(try_consume!(scan::number(s_, 2, 2)))); + parsed.set_second(try_consume!(scan::number(s_, 2, 2)))?; } - s = try!(scan::space(s)); // mandatory + s = scan::space(s)?; // mandatory if let Some(offset) = try_consume!(scan::timezone_offset_2822(s)) { // only set the offset when it is definitely known (i.e. not `-0000`) - try!(parsed.set_offset(i64::from(offset))); + parsed.set_offset(i64::from(offset))?; } Ok((s, ())) @@ -127,7 +130,7 @@ fn parse_rfc2822<'a>(parsed: &mut Parsed, mut s: &'a str) -> ParseResult<(&'a st fn parse_rfc3339<'a>(parsed: &mut Parsed, mut s: &'a str) -> ParseResult<(&'a str, ())> { macro_rules! try_consume { - ($e:expr) => ({ let (s_, v) = try!($e); s = s_; v }) + ($e:expr) => ({ let (s_, v) = $e?; s = s_; v }) } // an adapted RFC 3339 syntax from Section 5.6: @@ -157,11 +160,11 @@ fn parse_rfc3339<'a>(parsed: &mut Parsed, mut s: &'a str) -> ParseResult<(&'a st // note that this restriction is unique to RFC 3339 and not ISO 8601. // since this is not a typical Chrono behavior, we check it earlier. - try!(parsed.set_year(try_consume!(scan::number(s, 4, 4)))); - s = try!(scan::char(s, b'-')); - try!(parsed.set_month(try_consume!(scan::number(s, 2, 2)))); - s = try!(scan::char(s, b'-')); - try!(parsed.set_day(try_consume!(scan::number(s, 2, 2)))); + parsed.set_year(try_consume!(scan::number(s, 4, 4)))?; + s = scan::char(s, b'-')?; + parsed.set_month(try_consume!(scan::number(s, 2, 2)))?; + s = scan::char(s, b'-')?; + parsed.set_day(try_consume!(scan::number(s, 2, 2)))?; s = match s.as_bytes().first() { Some(&b't') | Some(&b'T') => &s[1..], @@ -169,19 +172,19 @@ fn parse_rfc3339<'a>(parsed: &mut Parsed, mut s: &'a str) -> ParseResult<(&'a st None => return Err(TOO_SHORT), }; - try!(parsed.set_hour(try_consume!(scan::number(s, 2, 2)))); - s = try!(scan::char(s, b':')); - try!(parsed.set_minute(try_consume!(scan::number(s, 2, 2)))); - s = try!(scan::char(s, b':')); - try!(parsed.set_second(try_consume!(scan::number(s, 2, 2)))); + parsed.set_hour(try_consume!(scan::number(s, 2, 2)))?; + s = scan::char(s, b':')?; + parsed.set_minute(try_consume!(scan::number(s, 2, 2)))?; + s = scan::char(s, b':')?; + parsed.set_second(try_consume!(scan::number(s, 2, 2)))?; if s.starts_with('.') { let nanosecond = try_consume!(scan::nanosecond(&s[1..])); - try!(parsed.set_nanosecond(nanosecond)); + parsed.set_nanosecond(nanosecond)?; } let offset = try_consume!(scan::timezone_offset_zulu(s, |s| scan::char(s, b':'))); if offset <= -86_400 || offset >= 86_400 { return Err(OUT_OF_RANGE); } - try!(parsed.set_offset(i64::from(offset))); + parsed.set_offset(i64::from(offset))?; Ok((s, ())) } @@ -202,65 +205,71 @@ fn parse_rfc3339<'a>(parsed: &mut Parsed, mut s: &'a str) -> ParseResult<(&'a st /// so one can prepend any number of whitespace then any number of zeroes before numbers. /// /// - (Still) obeying the intrinsic parsing width. This allows, for example, parsing `HHMMSS`. -pub fn parse<'a, I>(parsed: &mut Parsed, mut s: &str, items: I) -> ParseResult<()> - where I: Iterator> { +pub fn parse<'a, I, B>(parsed: &mut Parsed, mut s: &str, items: I) -> ParseResult<()> + where I: Iterator, B: Borrow> { macro_rules! try_consume { - ($e:expr) => ({ let (s_, v) = try!($e); s = s_; v }) + ($e:expr) => ({ let (s_, v) = $e?; s = s_; v }) } for item in items { - match item { - Item::Literal(prefix) => { + match item.borrow() { + &Item::Literal(prefix) => { if s.len() < prefix.len() { return Err(TOO_SHORT); } if !s.starts_with(prefix) { return Err(INVALID); } s = &s[prefix.len()..]; } - Item::OwnedLiteral(ref prefix) => { + #[cfg(any(feature = "alloc", feature = "std", test))] + &Item::OwnedLiteral(ref prefix) => { if s.len() < prefix.len() { return Err(TOO_SHORT); } if !s.starts_with(&prefix[..]) { return Err(INVALID); } s = &s[prefix.len()..]; } - Item::Space(_) | Item::OwnedSpace(_) => { + &Item::Space(_) => { + s = s.trim_left(); + } + + #[cfg(any(feature = "alloc", feature = "std", test))] + &Item::OwnedSpace(_) => { s = s.trim_left(); } - Item::Numeric(spec, _pad) => { + &Item::Numeric(ref spec, ref _pad) => { use super::Numeric::*; type Setter = fn(&mut Parsed, i64) -> ParseResult<()>; let (width, signed, set): (usize, bool, Setter) = match spec { - Year => (4, true, Parsed::set_year), - YearDiv100 => (2, false, Parsed::set_year_div_100), - YearMod100 => (2, false, Parsed::set_year_mod_100), - IsoYear => (4, true, Parsed::set_isoyear), - IsoYearDiv100 => (2, false, Parsed::set_isoyear_div_100), - IsoYearMod100 => (2, false, Parsed::set_isoyear_mod_100), - Month => (2, false, Parsed::set_month), - Day => (2, false, Parsed::set_day), - WeekFromSun => (2, false, Parsed::set_week_from_sun), - WeekFromMon => (2, false, Parsed::set_week_from_mon), - IsoWeek => (2, false, Parsed::set_isoweek), - NumDaysFromSun => (1, false, set_weekday_with_num_days_from_sunday), - WeekdayFromMon => (1, false, set_weekday_with_number_from_monday), - Ordinal => (3, false, Parsed::set_ordinal), - Hour => (2, false, Parsed::set_hour), - Hour12 => (2, false, Parsed::set_hour12), - Minute => (2, false, Parsed::set_minute), - Second => (2, false, Parsed::set_second), - Nanosecond => (9, false, Parsed::set_nanosecond), - Timestamp => (usize::MAX, false, Parsed::set_timestamp), + &Year => (4, true, Parsed::set_year), + &YearDiv100 => (2, false, Parsed::set_year_div_100), + &YearMod100 => (2, false, Parsed::set_year_mod_100), + &IsoYear => (4, true, Parsed::set_isoyear), + &IsoYearDiv100 => (2, false, Parsed::set_isoyear_div_100), + &IsoYearMod100 => (2, false, Parsed::set_isoyear_mod_100), + &Month => (2, false, Parsed::set_month), + &Day => (2, false, Parsed::set_day), + &WeekFromSun => (2, false, Parsed::set_week_from_sun), + &WeekFromMon => (2, false, Parsed::set_week_from_mon), + &IsoWeek => (2, false, Parsed::set_isoweek), + &NumDaysFromSun => (1, false, set_weekday_with_num_days_from_sunday), + &WeekdayFromMon => (1, false, set_weekday_with_number_from_monday), + &Ordinal => (3, false, Parsed::set_ordinal), + &Hour => (2, false, Parsed::set_hour), + &Hour12 => (2, false, Parsed::set_hour12), + &Minute => (2, false, Parsed::set_minute), + &Second => (2, false, Parsed::set_second), + &Nanosecond => (9, false, Parsed::set_nanosecond), + &Timestamp => (usize::MAX, false, Parsed::set_timestamp), // for the future expansion - Internal(ref int) => match int._dummy {}, + &Internal(ref int) => match int._dummy {}, }; s = s.trim_left(); let v = if signed { if s.starts_with('-') { let v = try_consume!(scan::number(&s[1..], 1, usize::MAX)); - try!(0i64.checked_sub(v).ok_or(OUT_OF_RANGE)) + 0i64.checked_sub(v).ok_or(OUT_OF_RANGE)? } else if s.starts_with('+') { try_consume!(scan::number(&s[1..], 1, usize::MAX)) } else { @@ -270,94 +279,94 @@ pub fn parse<'a, I>(parsed: &mut Parsed, mut s: &str, items: I) -> ParseResult<( } else { try_consume!(scan::number(s, 1, width)) }; - try!(set(parsed, v)); + set(parsed, v)?; } - Item::Fixed(spec) => { + &Item::Fixed(ref spec) => { use super::Fixed::*; match spec { - ShortMonthName => { + &ShortMonthName => { let month0 = try_consume!(scan::short_month0(s)); - try!(parsed.set_month(i64::from(month0) + 1)); + parsed.set_month(i64::from(month0) + 1)?; } - LongMonthName => { + &LongMonthName => { let month0 = try_consume!(scan::short_or_long_month0(s)); - try!(parsed.set_month(i64::from(month0) + 1)); + parsed.set_month(i64::from(month0) + 1)?; } - ShortWeekdayName => { + &ShortWeekdayName => { let weekday = try_consume!(scan::short_weekday(s)); - try!(parsed.set_weekday(weekday)); + parsed.set_weekday(weekday)?; } - LongWeekdayName => { + &LongWeekdayName => { let weekday = try_consume!(scan::short_or_long_weekday(s)); - try!(parsed.set_weekday(weekday)); + parsed.set_weekday(weekday)?; } - LowerAmPm | UpperAmPm => { + &LowerAmPm | &UpperAmPm => { if s.len() < 2 { return Err(TOO_SHORT); } let ampm = match (s.as_bytes()[0] | 32, s.as_bytes()[1] | 32) { (b'a',b'm') => false, (b'p',b'm') => true, _ => return Err(INVALID) }; - try!(parsed.set_ampm(ampm)); + parsed.set_ampm(ampm)?; s = &s[2..]; } - Nanosecond | Nanosecond3 | Nanosecond6 | Nanosecond9 => { + &Nanosecond | &Nanosecond3 | &Nanosecond6 | &Nanosecond9 => { if s.starts_with('.') { let nano = try_consume!(scan::nanosecond(&s[1..])); - try!(parsed.set_nanosecond(nano)); + parsed.set_nanosecond(nano)?; } } - Internal(InternalFixed { val: InternalInternal::Nanosecond3NoDot }) => { + &Internal(InternalFixed { val: InternalInternal::Nanosecond3NoDot }) => { if s.len() < 3 { return Err(TOO_SHORT); } let nano = try_consume!(scan::nanosecond_fixed(s, 3)); - try!(parsed.set_nanosecond(nano)); + parsed.set_nanosecond(nano)?; } - Internal(InternalFixed { val: InternalInternal::Nanosecond6NoDot }) => { + &Internal(InternalFixed { val: InternalInternal::Nanosecond6NoDot }) => { if s.len() < 6 { return Err(TOO_SHORT); } let nano = try_consume!(scan::nanosecond_fixed(s, 6)); - try!(parsed.set_nanosecond(nano)); + parsed.set_nanosecond(nano)?; } - Internal(InternalFixed { val: InternalInternal::Nanosecond9NoDot }) => { + &Internal(InternalFixed { val: InternalInternal::Nanosecond9NoDot }) => { if s.len() < 9 { return Err(TOO_SHORT); } let nano = try_consume!(scan::nanosecond_fixed(s, 9)); - try!(parsed.set_nanosecond(nano)); + parsed.set_nanosecond(nano)?; } - TimezoneName => return Err(BAD_FORMAT), + &TimezoneName => return Err(BAD_FORMAT), - TimezoneOffsetColon | TimezoneOffset => { + &TimezoneOffsetColon | &TimezoneOffset => { let offset = try_consume!(scan::timezone_offset(s.trim_left(), scan::colon_or_space)); - try!(parsed.set_offset(i64::from(offset))); + parsed.set_offset(i64::from(offset))?; } - TimezoneOffsetColonZ | TimezoneOffsetZ => { + &TimezoneOffsetColonZ | &TimezoneOffsetZ => { let offset = try_consume!(scan::timezone_offset_zulu(s.trim_left(), scan::colon_or_space)); - try!(parsed.set_offset(i64::from(offset))); + parsed.set_offset(i64::from(offset))?; } - Internal(InternalFixed { val: InternalInternal::TimezoneOffsetPermissive }) => { + &Internal(InternalFixed { val: InternalInternal::TimezoneOffsetPermissive }) => { let offset = try_consume!(scan::timezone_offset_permissive( s.trim_left(), scan::colon_or_space)); - try!(parsed.set_offset(i64::from(offset))); + parsed.set_offset(i64::from(offset))?; } - RFC2822 => try_consume!(parse_rfc2822(parsed, s)), - RFC3339 => try_consume!(parse_rfc3339(parsed, s)), + &RFC2822 => try_consume!(parse_rfc2822(parsed, s)), + &RFC3339 => try_consume!(parse_rfc3339(parsed, s)), } } - Item::Error => { + &Item::Error => { return Err(BAD_FORMAT); } } @@ -380,7 +389,7 @@ fn test_parse() { // workaround for Rust issue #22255 fn parse_all(s: &str, items: &[Item]) -> ParseResult { let mut parsed = Parsed::new(); - try!(parse(&mut parsed, s, items.iter().cloned())); + parse(&mut parsed, s, items.iter())?; Ok(parsed) } @@ -671,6 +680,8 @@ fn test_rfc2822() { // Test data - (input, Ok(expected result after parse and format) or Err(error code)) let testdates = [ ("Tue, 20 Jan 2015 17:35:20 -0800", Ok("Tue, 20 Jan 2015 17:35:20 -0800")), // normal case + ("Fri, 2 Jan 2015 17:35:20 -0800", Ok("Fri, 02 Jan 2015 17:35:20 -0800")), // folding whitespace + ("Fri, 02 Jan 2015 17:35:20 -0800", Ok("Fri, 02 Jan 2015 17:35:20 -0800")), // leading zero ("20 Jan 2015 17:35:20 -0800", Ok("Tue, 20 Jan 2015 17:35:20 -0800")), // no day of week ("20 JAN 2015 17:35:20 -0800", Ok("Tue, 20 Jan 2015 17:35:20 -0800")), // upper case month ("Tue, 20 Jan 2015 17:35 -0800", Ok("Tue, 20 Jan 2015 17:35:00 -0800")), // no second @@ -689,12 +700,12 @@ fn test_rfc2822() { fn rfc2822_to_datetime(date: &str) -> ParseResult> { let mut parsed = Parsed::new(); - try!(parse(&mut parsed, date, [Item::Fixed(Fixed::RFC2822)].iter().cloned())); + parse(&mut parsed, date, [Item::Fixed(Fixed::RFC2822)].iter())?; parsed.to_datetime() } fn fmt_rfc2822_datetime(dt: DateTime) -> String { - dt.format_with_items([Item::Fixed(Fixed::RFC2822)].iter().cloned()).to_string() + dt.format_with_items([Item::Fixed(Fixed::RFC2822)].iter()).to_string() } // Test against test data above @@ -770,12 +781,12 @@ fn test_rfc3339() { fn rfc3339_to_datetime(date: &str) -> ParseResult> { let mut parsed = Parsed::new(); - try!(parse(&mut parsed, date, [Item::Fixed(Fixed::RFC3339)].iter().cloned())); + parse(&mut parsed, date, [Item::Fixed(Fixed::RFC3339)].iter())?; parsed.to_datetime() } fn fmt_rfc3339_datetime(dt: DateTime) -> String { - dt.format_with_items([Item::Fixed(Fixed::RFC3339)].iter().cloned()).to_string() + dt.format_with_items([Item::Fixed(Fixed::RFC3339)].iter()).to_string() } // Test against test data above diff --git a/third_party/rust/chrono/src/format/parsed.rs b/third_party/rust/chrono/src/format/parsed.rs index 7b9708d07a..c1b02ffce5 100644 --- a/third_party/rust/chrono/src/format/parsed.rs +++ b/third_party/rust/chrono/src/format/parsed.rs @@ -112,6 +112,7 @@ pub struct Parsed { /// Checks if `old` is either empty or has the same value to `new` (i.e. "consistent"), /// and if it is empty, set `old` to `new` as well. +#[inline] fn set_if_consistent(old: &mut Option, new: T) -> ParseResult<()> { if let Some(ref old) = *old { if *old == new {Ok(())} else {Err(IMPOSSIBLE)} @@ -141,82 +142,97 @@ impl Parsed { } /// Tries to set the [`year`](#structfield.year) field from given value. + #[inline] pub fn set_year(&mut self, value: i64) -> ParseResult<()> { - set_if_consistent(&mut self.year, try!(value.to_i32().ok_or(OUT_OF_RANGE))) + set_if_consistent(&mut self.year, value.to_i32().ok_or(OUT_OF_RANGE)?) } /// Tries to set the [`year_div_100`](#structfield.year_div_100) field from given value. + #[inline] pub fn set_year_div_100(&mut self, value: i64) -> ParseResult<()> { if value < 0 { return Err(OUT_OF_RANGE); } - set_if_consistent(&mut self.year_div_100, try!(value.to_i32().ok_or(OUT_OF_RANGE))) + set_if_consistent(&mut self.year_div_100, value.to_i32().ok_or(OUT_OF_RANGE)?) } /// Tries to set the [`year_mod_100`](#structfield.year_mod_100) field from given value. + #[inline] pub fn set_year_mod_100(&mut self, value: i64) -> ParseResult<()> { if value < 0 { return Err(OUT_OF_RANGE); } - set_if_consistent(&mut self.year_mod_100, try!(value.to_i32().ok_or(OUT_OF_RANGE))) + set_if_consistent(&mut self.year_mod_100, value.to_i32().ok_or(OUT_OF_RANGE)?) } /// Tries to set the [`isoyear`](#structfield.isoyear) field from given value. + #[inline] pub fn set_isoyear(&mut self, value: i64) -> ParseResult<()> { - set_if_consistent(&mut self.isoyear, try!(value.to_i32().ok_or(OUT_OF_RANGE))) + set_if_consistent(&mut self.isoyear, value.to_i32().ok_or(OUT_OF_RANGE)?) } /// Tries to set the [`isoyear_div_100`](#structfield.isoyear_div_100) field from given value. + #[inline] pub fn set_isoyear_div_100(&mut self, value: i64) -> ParseResult<()> { if value < 0 { return Err(OUT_OF_RANGE); } - set_if_consistent(&mut self.isoyear_div_100, try!(value.to_i32().ok_or(OUT_OF_RANGE))) + set_if_consistent(&mut self.isoyear_div_100, value.to_i32().ok_or(OUT_OF_RANGE)?) } /// Tries to set the [`isoyear_mod_100`](#structfield.isoyear_mod_100) field from given value. + #[inline] pub fn set_isoyear_mod_100(&mut self, value: i64) -> ParseResult<()> { if value < 0 { return Err(OUT_OF_RANGE); } - set_if_consistent(&mut self.isoyear_mod_100, try!(value.to_i32().ok_or(OUT_OF_RANGE))) + set_if_consistent(&mut self.isoyear_mod_100, value.to_i32().ok_or(OUT_OF_RANGE)?) } /// Tries to set the [`month`](#structfield.month) field from given value. + #[inline] pub fn set_month(&mut self, value: i64) -> ParseResult<()> { - set_if_consistent(&mut self.month, try!(value.to_u32().ok_or(OUT_OF_RANGE))) + set_if_consistent(&mut self.month, value.to_u32().ok_or(OUT_OF_RANGE)?) } /// Tries to set the [`week_from_sun`](#structfield.week_from_sun) field from given value. + #[inline] pub fn set_week_from_sun(&mut self, value: i64) -> ParseResult<()> { - set_if_consistent(&mut self.week_from_sun, try!(value.to_u32().ok_or(OUT_OF_RANGE))) + set_if_consistent(&mut self.week_from_sun, value.to_u32().ok_or(OUT_OF_RANGE)?) } /// Tries to set the [`week_from_mon`](#structfield.week_from_mon) field from given value. + #[inline] pub fn set_week_from_mon(&mut self, value: i64) -> ParseResult<()> { - set_if_consistent(&mut self.week_from_mon, try!(value.to_u32().ok_or(OUT_OF_RANGE))) + set_if_consistent(&mut self.week_from_mon, value.to_u32().ok_or(OUT_OF_RANGE)?) } /// Tries to set the [`isoweek`](#structfield.isoweek) field from given value. + #[inline] pub fn set_isoweek(&mut self, value: i64) -> ParseResult<()> { - set_if_consistent(&mut self.isoweek, try!(value.to_u32().ok_or(OUT_OF_RANGE))) + set_if_consistent(&mut self.isoweek, value.to_u32().ok_or(OUT_OF_RANGE)?) } /// Tries to set the [`weekday`](#structfield.weekday) field from given value. + #[inline] pub fn set_weekday(&mut self, value: Weekday) -> ParseResult<()> { set_if_consistent(&mut self.weekday, value) } /// Tries to set the [`ordinal`](#structfield.ordinal) field from given value. + #[inline] pub fn set_ordinal(&mut self, value: i64) -> ParseResult<()> { - set_if_consistent(&mut self.ordinal, try!(value.to_u32().ok_or(OUT_OF_RANGE))) + set_if_consistent(&mut self.ordinal, value.to_u32().ok_or(OUT_OF_RANGE)?) } /// Tries to set the [`day`](#structfield.day) field from given value. + #[inline] pub fn set_day(&mut self, value: i64) -> ParseResult<()> { - set_if_consistent(&mut self.day, try!(value.to_u32().ok_or(OUT_OF_RANGE))) + set_if_consistent(&mut self.day, value.to_u32().ok_or(OUT_OF_RANGE)?) } /// Tries to set the [`hour_div_12`](#structfield.hour_div_12) field from given value. /// (`false` for AM, `true` for PM) + #[inline] pub fn set_ampm(&mut self, value: bool) -> ParseResult<()> { set_if_consistent(&mut self.hour_div_12, if value {1} else {0}) } /// Tries to set the [`hour_mod_12`](#structfield.hour_mod_12) field from /// given hour number in 12-hour clocks. + #[inline] pub fn set_hour12(&mut self, value: i64) -> ParseResult<()> { if value < 1 || value > 12 { return Err(OUT_OF_RANGE); } set_if_consistent(&mut self.hour_mod_12, value as u32 % 12) @@ -224,36 +240,42 @@ impl Parsed { /// Tries to set both [`hour_div_12`](#structfield.hour_div_12) and /// [`hour_mod_12`](#structfield.hour_mod_12) fields from given value. + #[inline] pub fn set_hour(&mut self, value: i64) -> ParseResult<()> { - let v = try!(value.to_u32().ok_or(OUT_OF_RANGE)); - try!(set_if_consistent(&mut self.hour_div_12, v / 12)); - try!(set_if_consistent(&mut self.hour_mod_12, v % 12)); + let v = value.to_u32().ok_or(OUT_OF_RANGE)?; + set_if_consistent(&mut self.hour_div_12, v / 12)?; + set_if_consistent(&mut self.hour_mod_12, v % 12)?; Ok(()) } /// Tries to set the [`minute`](#structfield.minute) field from given value. + #[inline] pub fn set_minute(&mut self, value: i64) -> ParseResult<()> { - set_if_consistent(&mut self.minute, try!(value.to_u32().ok_or(OUT_OF_RANGE))) + set_if_consistent(&mut self.minute, value.to_u32().ok_or(OUT_OF_RANGE)?) } /// Tries to set the [`second`](#structfield.second) field from given value. + #[inline] pub fn set_second(&mut self, value: i64) -> ParseResult<()> { - set_if_consistent(&mut self.second, try!(value.to_u32().ok_or(OUT_OF_RANGE))) + set_if_consistent(&mut self.second, value.to_u32().ok_or(OUT_OF_RANGE)?) } /// Tries to set the [`nanosecond`](#structfield.nanosecond) field from given value. + #[inline] pub fn set_nanosecond(&mut self, value: i64) -> ParseResult<()> { - set_if_consistent(&mut self.nanosecond, try!(value.to_u32().ok_or(OUT_OF_RANGE))) + set_if_consistent(&mut self.nanosecond, value.to_u32().ok_or(OUT_OF_RANGE)?) } /// Tries to set the [`timestamp`](#structfield.timestamp) field from given value. + #[inline] pub fn set_timestamp(&mut self, value: i64) -> ParseResult<()> { set_if_consistent(&mut self.timestamp, value) } /// Tries to set the [`offset`](#structfield.offset) field from given value. + #[inline] pub fn set_offset(&mut self, value: i64) -> ParseResult<()> { - set_if_consistent(&mut self.offset, try!(value.to_i32().ok_or(OUT_OF_RANGE))) + set_if_consistent(&mut self.offset, value.to_i32().ok_or(OUT_OF_RANGE)?) } /// Returns a parsed naive date out of given fields. @@ -294,7 +316,7 @@ impl Parsed { (None, Some(q), Some(r @ 0...99)) => { if q < 0 { return Err(OUT_OF_RANGE); } let y = q.checked_mul(100).and_then(|v| v.checked_add(r)); - Ok(Some(try!(y.ok_or(OUT_OF_RANGE)))) + Ok(Some(y.ok_or(OUT_OF_RANGE)?)) }, // we only have modulo. try to interpret a modulo as a conventional two-digit year. @@ -308,9 +330,9 @@ impl Parsed { } let given_year = - try!(resolve_year(self.year, self.year_div_100, self.year_mod_100)); + resolve_year(self.year, self.year_div_100, self.year_mod_100)?; let given_isoyear = - try!(resolve_year(self.isoyear, self.isoyear_div_100, self.isoyear_mod_100)); + resolve_year(self.isoyear, self.isoyear_div_100, self.isoyear_mod_100)?; // verify the normal year-month-day date. let verify_ymd = |date: NaiveDate| { @@ -366,20 +388,20 @@ impl Parsed { let (verified, parsed_date) = match (given_year, given_isoyear, self) { (Some(year), _, &Parsed { month: Some(month), day: Some(day), .. }) => { // year, month, day - let date = try!(NaiveDate::from_ymd_opt(year, month, day).ok_or(OUT_OF_RANGE)); + let date = NaiveDate::from_ymd_opt(year, month, day).ok_or(OUT_OF_RANGE)?; (verify_isoweekdate(date) && verify_ordinal(date), date) }, (Some(year), _, &Parsed { ordinal: Some(ordinal), .. }) => { // year, day of the year - let date = try!(NaiveDate::from_yo_opt(year, ordinal).ok_or(OUT_OF_RANGE)); + let date = NaiveDate::from_yo_opt(year, ordinal).ok_or(OUT_OF_RANGE)?; (verify_ymd(date) && verify_isoweekdate(date) && verify_ordinal(date), date) }, (Some(year), _, &Parsed { week_from_sun: Some(week_from_sun), weekday: Some(weekday), .. }) => { // year, week (starting at 1st Sunday), day of the week - let newyear = try!(NaiveDate::from_yo_opt(year, 1).ok_or(OUT_OF_RANGE)); + let newyear = NaiveDate::from_yo_opt(year, 1).ok_or(OUT_OF_RANGE)?; let firstweek = match newyear.weekday() { Weekday::Sun => 0, Weekday::Mon => 6, @@ -394,8 +416,8 @@ impl Parsed { if week_from_sun > 53 { return Err(OUT_OF_RANGE); } // can it overflow? let ndays = firstweek + (week_from_sun as i32 - 1) * 7 + weekday.num_days_from_sunday() as i32; - let date = try!(newyear.checked_add_signed(OldDuration::days(i64::from(ndays))) - .ok_or(OUT_OF_RANGE)); + let date = newyear.checked_add_signed(OldDuration::days(i64::from(ndays))) + .ok_or(OUT_OF_RANGE)?; if date.year() != year { return Err(OUT_OF_RANGE); } // early exit for correct error (verify_ymd(date) && verify_isoweekdate(date) && verify_ordinal(date), date) @@ -404,7 +426,7 @@ impl Parsed { (Some(year), _, &Parsed { week_from_mon: Some(week_from_mon), weekday: Some(weekday), .. }) => { // year, week (starting at 1st Monday), day of the week - let newyear = try!(NaiveDate::from_yo_opt(year, 1).ok_or(OUT_OF_RANGE)); + let newyear = NaiveDate::from_yo_opt(year, 1).ok_or(OUT_OF_RANGE)?; let firstweek = match newyear.weekday() { Weekday::Sun => 1, Weekday::Mon => 0, @@ -419,8 +441,8 @@ impl Parsed { if week_from_mon > 53 { return Err(OUT_OF_RANGE); } // can it overflow? let ndays = firstweek + (week_from_mon as i32 - 1) * 7 + weekday.num_days_from_monday() as i32; - let date = try!(newyear.checked_add_signed(OldDuration::days(i64::from(ndays))) - .ok_or(OUT_OF_RANGE)); + let date = newyear.checked_add_signed(OldDuration::days(i64::from(ndays))) + .ok_or(OUT_OF_RANGE)?; if date.year() != year { return Err(OUT_OF_RANGE); } // early exit for correct error (verify_ymd(date) && verify_isoweekdate(date) && verify_ordinal(date), date) @@ -429,7 +451,7 @@ impl Parsed { (_, Some(isoyear), &Parsed { isoweek: Some(isoweek), weekday: Some(weekday), .. }) => { // ISO year, week, day of the week let date = NaiveDate::from_isoywd_opt(isoyear, isoweek, weekday); - let date = try!(date.ok_or(OUT_OF_RANGE)); + let date = date.ok_or(OUT_OF_RANGE)?; (verify_ymd(date) && verify_ordinal(date), date) }, @@ -525,9 +547,9 @@ impl Parsed { } // reconstruct date and time fields from timestamp - let ts = try!(timestamp.checked_add(i64::from(offset)).ok_or(OUT_OF_RANGE)); + let ts = timestamp.checked_add(i64::from(offset)).ok_or(OUT_OF_RANGE)?; let datetime = NaiveDateTime::from_timestamp_opt(ts, 0); - let mut datetime = try!(datetime.ok_or(OUT_OF_RANGE)); + let mut datetime = datetime.ok_or(OUT_OF_RANGE)?; // fill year, ordinal, hour, minute and second fields from timestamp. // if existing fields are consistent, this will allow the full date/time reconstruction. @@ -544,21 +566,21 @@ impl Parsed { } // ...and we have the correct candidates for other fields. } else { - try!(parsed.set_second(i64::from(datetime.second()))); + parsed.set_second(i64::from(datetime.second()))?; } - try!(parsed.set_year (i64::from(datetime.year()))); - try!(parsed.set_ordinal(i64::from(datetime.ordinal()))); // more efficient than ymd - try!(parsed.set_hour (i64::from(datetime.hour()))); - try!(parsed.set_minute (i64::from(datetime.minute()))); + parsed.set_year (i64::from(datetime.year()))?; + parsed.set_ordinal(i64::from(datetime.ordinal()))?; // more efficient than ymd + parsed.set_hour (i64::from(datetime.hour()))?; + parsed.set_minute (i64::from(datetime.minute()))?; // validate other fields (e.g. week) and return - let date = try!(parsed.to_naive_date()); - let time = try!(parsed.to_naive_time()); + let date = parsed.to_naive_date()?; + let time = parsed.to_naive_time()?; Ok(date.and_time(time)) } else { // reproduce the previous error(s) - try!(date); - try!(time); + date?; + time?; unreachable!() } } @@ -575,9 +597,9 @@ impl Parsed { /// plus a time zone offset. /// Either way those fields have to be consistent to each other. pub fn to_datetime(&self) -> ParseResult> { - let offset = try!(self.offset.ok_or(NOT_ENOUGH)); - let datetime = try!(self.to_naive_datetime_with_offset(offset)); - let offset = try!(FixedOffset::east_opt(offset).ok_or(OUT_OF_RANGE)); + let offset = self.offset.ok_or(NOT_ENOUGH)?; + let datetime = self.to_naive_datetime_with_offset(offset)?; + let offset = FixedOffset::east_opt(offset).ok_or(OUT_OF_RANGE)?; match offset.from_local_datetime(&datetime) { LocalResult::None => Err(IMPOSSIBLE), LocalResult::Single(t) => Ok(t), @@ -602,7 +624,7 @@ impl Parsed { // an empty `nanosecond` is always equal to zero, so missing nanosecond is fine. let nanosecond = self.nanosecond.unwrap_or(0); let dt = NaiveDateTime::from_timestamp_opt(timestamp, nanosecond); - let dt = try!(dt.ok_or(OUT_OF_RANGE)); + let dt = dt.ok_or(OUT_OF_RANGE)?; guessed_offset = tz.offset_from_utc_datetime(&dt).fix().local_minus_utc(); } @@ -617,7 +639,7 @@ impl Parsed { // `guessed_offset` should be correct when `self.timestamp` is given. // it will be 0 otherwise, but this is fine as the algorithm ignores offset for that case. - let datetime = try!(self.to_naive_datetime_with_offset(guessed_offset)); + let datetime = self.to_naive_datetime_with_offset(guessed_offset)?; match tz.from_local_datetime(&datetime) { LocalResult::None => Err(IMPOSSIBLE), LocalResult::Single(t) => if check_offset(&t) {Ok(t)} else {Err(IMPOSSIBLE)}, diff --git a/third_party/rust/chrono/src/format/scan.rs b/third_party/rust/chrono/src/format/scan.rs index f2f97d91ce..7235d67498 100644 --- a/third_party/rust/chrono/src/format/scan.rs +++ b/third_party/rust/chrono/src/format/scan.rs @@ -5,6 +5,8 @@ * Various scanning routines for the parser. */ +#![allow(deprecated)] + use Weekday; use super::{ParseResult, TOO_SHORT, INVALID, OUT_OF_RANGE}; @@ -28,23 +30,35 @@ fn equals(s: &str, pattern: &str) -> bool { /// The absence of digits at all is an unconditional error. /// More than `max` digits are consumed up to the first `max` digits. /// Any number that does not fit in `i64` is an error. +#[inline] pub fn number(s: &str, min: usize, max: usize) -> ParseResult<(&str, i64)> { assert!(min <= max); - // limit `s` to given number of digits - let mut window = s.as_bytes(); - if window.len() > max { window = &window[..max]; } + // We are only interested in ascii numbers, so we can work with the `str` as bytes. We stop on + // the first non-numeric byte, which may be another ascii character or beginning of multi-byte + // UTF-8 character. + let bytes = s.as_bytes(); + if bytes.len() < min { + return Err(TOO_SHORT); + } + + let mut n = 0i64; + for (i, c) in bytes.iter().take(max).cloned().enumerate() { // cloned() = copied() + if c < b'0' || b'9' < c { + if i < min { + return Err(INVALID); + } else { + return Ok((&s[i..], n)); + } + } - // scan digits - let upto = window.iter().position(|&c| c < b'0' || b'9' < c) - .unwrap_or_else(|| window.len()); - if upto < min { - return Err(if window.is_empty() {TOO_SHORT} else {INVALID}); + n = match n.checked_mul(10).and_then(|n| n.checked_add((c - b'0') as i64)) { + Some(n) => n, + None => return Err(OUT_OF_RANGE), + }; } - // we can overflow here, which is the only possible cause of error from `parse`. - let v: i64 = try!(s[..upto].parse().map_err(|_| OUT_OF_RANGE)); - Ok((&s[upto..], v)) + Ok((&s[::core::cmp::min(max, bytes.len())..], n)) } /// Tries to consume at least one digits as a fractional second. @@ -52,13 +66,13 @@ pub fn number(s: &str, min: usize, max: usize) -> ParseResult<(&str, i64)> { pub fn nanosecond(s: &str) -> ParseResult<(&str, i64)> { // record the number of digits consumed for later scaling. let origlen = s.len(); - let (s, v) = try!(number(s, 1, 9)); + let (s, v) = number(s, 1, 9)?; let consumed = origlen - s.len(); // scale the number accordingly. static SCALE: [i64; 10] = [0, 100_000_000, 10_000_000, 1_000_000, 100_000, 10_000, 1_000, 100, 10, 1]; - let v = try!(v.checked_mul(SCALE[consumed]).ok_or(OUT_OF_RANGE)); + let v = v.checked_mul(SCALE[consumed]).ok_or(OUT_OF_RANGE)?; // if there are more than 9 digits, skip next digits. let s = s.trim_left_matches(|c: char| '0' <= c && c <= '9'); @@ -70,12 +84,12 @@ pub fn nanosecond(s: &str) -> ParseResult<(&str, i64)> { /// Returns the number of whole nanoseconds (0--999,999,999). pub fn nanosecond_fixed(s: &str, digits: usize) -> ParseResult<(&str, i64)> { // record the number of digits consumed for later scaling. - let (s, v) = try!(number(s, digits, digits)); + let (s, v) = number(s, digits, digits)?; // scale the number accordingly. static SCALE: [i64; 10] = [0, 100_000_000, 10_000_000, 1_000_000, 100_000, 10_000, 1_000, 100, 10, 1]; - let v = try!(v.checked_mul(SCALE[digits]).ok_or(OUT_OF_RANGE)); + let v = v.checked_mul(SCALE[digits]).ok_or(OUT_OF_RANGE)?; Ok((s, v)) } @@ -126,7 +140,7 @@ pub fn short_or_long_month0(s: &str) -> ParseResult<(&str, u8)> { static LONG_MONTH_SUFFIXES: [&'static str; 12] = ["uary", "ruary", "ch", "il", "", "e", "y", "ust", "tember", "ober", "ember", "ember"]; - let (mut s, month0) = try!(short_month0(s)); + let (mut s, month0) = short_month0(s)?; // tries to consume the suffix if possible let suffix = LONG_MONTH_SUFFIXES[month0 as usize]; @@ -144,7 +158,7 @@ pub fn short_or_long_weekday(s: &str) -> ParseResult<(&str, Weekday)> { static LONG_WEEKDAY_SUFFIXES: [&'static str; 7] = ["day", "sday", "nesday", "rsday", "day", "urday", "day"]; - let (mut s, weekday) = try!(short_weekday(s)); + let (mut s, weekday) = short_weekday(s)?; // tries to consume the suffix if possible let suffix = LONG_WEEKDAY_SUFFIXES[weekday.num_days_from_monday() as usize]; @@ -211,14 +225,14 @@ fn timezone_offset_internal(mut s: &str, mut consume_colon: F, allow_missing_ s = &s[1..]; // hours (00--99) - let hours = match try!(digits(s)) { + let hours = match digits(s)? { (h1 @ b'0'...b'9', h2 @ b'0'...b'9') => i32::from((h1 - b'0') * 10 + (h2 - b'0')), _ => return Err(INVALID), }; s = &s[2..]; // colons (and possibly other separators) - s = try!(consume_colon(s)); + s = consume_colon(s)?; // minutes (00--59) // if the next two items are digits then we have to add minutes @@ -293,7 +307,7 @@ pub fn timezone_offset_2822(s: &str) -> ParseResult<(&str, Option)> { Ok((s, None)) // recommended by RFC 2822: consume but treat it as -0000 } } else { - let (s_, offset) = try!(timezone_offset(s, |s| Ok(s))); + let (s_, offset) = timezone_offset(s, |s| Ok(s))?; if offset == 0 && s.starts_with('-') { // -0000 is not same to +0000 Ok((s_, None)) } else { diff --git a/third_party/rust/chrono/src/format/strftime.rs b/third_party/rust/chrono/src/format/strftime.rs index d7cf68c76b..e9cf18d4d7 100644 --- a/third_party/rust/chrono/src/format/strftime.rs +++ b/third_party/rust/chrono/src/format/strftime.rs @@ -11,9 +11,9 @@ The following specifiers are available both to formatting and parsing. | Spec. | Example | Description | |-------|----------|----------------------------------------------------------------------------| | | | **DATE SPECIFIERS:** | -| `%Y` | `2001` | The full proleptic Gregorian year, zero-padded to 4 digits. [1] | -| `%C` | `20` | The proleptic Gregorian year divided by 100, zero-padded to 2 digits. [2] | -| `%y` | `01` | The proleptic Gregorian year modulo 100, zero-padded to 2 digits. [2] | +| `%Y` | `2001` | The full proleptic Gregorian year, zero-padded to 4 digits. [^1] | +| `%C` | `20` | The proleptic Gregorian year divided by 100, zero-padded to 2 digits. [^2] | +| `%y` | `01` | The proleptic Gregorian year modulo 100, zero-padded to 2 digits. [^2] | | | | | | `%m` | `07` | Month number (01--12), zero-padded to 2 digits. | | `%b` | `Jul` | Abbreviated month name. Always 3 letters. | @@ -28,12 +28,12 @@ The following specifiers are available both to formatting and parsing. | `%w` | `0` | Sunday = 0, Monday = 1, ..., Saturday = 6. | | `%u` | `7` | Monday = 1, Tuesday = 2, ..., Sunday = 7. (ISO 8601) | | | | | -| `%U` | `28` | Week number starting with Sunday (00--53), zero-padded to 2 digits. [3] | +| `%U` | `28` | Week number starting with Sunday (00--53), zero-padded to 2 digits. [^3] | | `%W` | `27` | Same to `%U`, but week 1 starts with the first Monday in that year instead.| | | | | -| `%G` | `2001` | Same to `%Y` but uses the year number in ISO 8601 week date. [4] | -| `%g` | `01` | Same to `%y` but uses the year number in ISO 8601 week date. [4] | -| `%V` | `27` | Same to `%U` but uses the week number in ISO 8601 week date (01--53). [4] | +| `%G` | `2001` | Same to `%Y` but uses the year number in ISO 8601 week date. [^4] | +| `%g` | `01` | Same to `%y` but uses the year number in ISO 8601 week date. [^4] | +| `%V` | `27` | Same to `%U` but uses the week number in ISO 8601 week date (01--53). [^4] | | | | | | `%j` | `189` | Day of the year (001--366), zero-padded to 3 digits. | | | | | @@ -52,15 +52,15 @@ The following specifiers are available both to formatting and parsing. | `%p` | `AM` | `AM` or `PM` in 12-hour clocks. | | | | | | `%M` | `34` | Minute number (00--59), zero-padded to 2 digits. | -| `%S` | `60` | Second number (00--60), zero-padded to 2 digits. [5] | -| `%f` | `026490000` | The fractional seconds (in nanoseconds) since last whole second. [8] | -| `%.f` | `.026490`| Similar to `.%f` but left-aligned. These all consume the leading dot. [8] | -| `%.3f`| `.026` | Similar to `.%f` but left-aligned but fixed to a length of 3. [8] | -| `%.6f`| `.026490` | Similar to `.%f` but left-aligned but fixed to a length of 6. [8] | -| `%.9f`| `.026490000` | Similar to `.%f` but left-aligned but fixed to a length of 9. [8] | -| `%3f` | `026` | Similar to `%.3f` but without the leading dot. [8] | -| `%6f` | `026490` | Similar to `%.6f` but without the leading dot. [8] | -| `%9f` | `026490000` | Similar to `%.9f` but without the leading dot. [8] | +| `%S` | `60` | Second number (00--60), zero-padded to 2 digits. [^5] | +| `%f` | `026490000` | The fractional seconds (in nanoseconds) since last whole second. [^8] | +| `%.f` | `.026490`| Similar to `.%f` but left-aligned. These all consume the leading dot. [^8] | +| `%.3f`| `.026` | Similar to `.%f` but left-aligned but fixed to a length of 3. [^8] | +| `%.6f`| `.026490` | Similar to `.%f` but left-aligned but fixed to a length of 6. [^8] | +| `%.9f`| `.026490000` | Similar to `.%f` but left-aligned but fixed to a length of 9. [^8] | +| `%3f` | `026` | Similar to `%.3f` but without the leading dot. [^8] | +| `%6f` | `026490` | Similar to `%.6f` but without the leading dot. [^8] | +| `%9f` | `026490000` | Similar to `%.9f` but without the leading dot. [^8] | | | | | | `%R` | `00:34` | Hour-minute format. Same to `%H:%M`. | | `%T` | `00:34:60` | Hour-minute-second format. Same to `%H:%M:%S`. | @@ -75,9 +75,9 @@ The following specifiers are available both to formatting and parsing. | | | | | | | **DATE & TIME SPECIFIERS:** | |`%c`|`Sun Jul 8 00:34:60 2001`|`ctime` date & time format. Same to `%a %b %e %T %Y` sans `\n`.| -| `%+` | `2001-07-08T00:34:60.026490+09:30` | ISO 8601 / RFC 3339 date & time format. [6] | +| `%+` | `2001-07-08T00:34:60.026490+09:30` | ISO 8601 / RFC 3339 date & time format. [^6] | | | | | -| `%s` | `994518299` | UNIX timestamp, the number of seconds since 1970-01-01 00:00 UTC. [7] | +| `%s` | `994518299` | UNIX timestamp, the number of seconds since 1970-01-01 00:00 UTC. [^7]| | | | | | | | **SPECIAL SPECIFIERS:** | | `%t` | | Literal tab (`\t`). | @@ -95,59 +95,62 @@ Modifier | Description Notes: -1. `%Y`: +[^1]: `%Y`: Negative years are allowed in formatting but not in parsing. -2. `%C`, `%y`: +[^2]: `%C`, `%y`: This is floor division, so 100 BCE (year number -99) will print `-1` and `99` respectively. -3. `%U`: +[^3]: `%U`: Week 1 starts with the first Sunday in that year. It is possible to have week 0 for days before the first Sunday. -4. `%G`, `%g`, `%V`: +[^4]: `%G`, `%g`, `%V`: Week 1 is the first week with at least 4 days in that year. Week 0 does not exist, so this should be used with `%G` or `%g`. -5. `%S`: +[^5]: `%S`: It accounts for leap seconds, so `60` is possible. -6. `%+`: - Same to `%Y-%m-%dT%H:%M:%S%.f%:z`, - i.e. 0, 3, 6 or 9 fractional digits for seconds and colons in the time zone offset. +[^6]: `%+`: Same as `%Y-%m-%dT%H:%M:%S%.f%:z`, i.e. 0, 3, 6 or 9 fractional + digits for seconds and colons in the time zone offset. +
+
+ The typical `strftime` implementations have different (and locale-dependent) + formats for this specifier. While Chrono's format for `%+` is far more + stable, it is best to avoid this specifier if you want to control the exact + output. - The typical `strftime` implementations have - different (and locale-dependent) formats for this specifier. - While Chrono's format for `%+` is far more stable, - it is best to avoid this specifier if you want to control the exact output. - -7. `%s`: +[^7]: `%s`: This is not padded and can be negative. For the purpose of Chrono, it only accounts for non-leap seconds so it slightly differs from ISO C `strftime` behavior. -8. `%f`, `%.f`, `%.3f`, `%.6f`, `%.9f`, `%3f`, `%6f`, `%9f`: - +[^8]: `%f`, `%.f`, `%.3f`, `%.6f`, `%.9f`, `%3f`, `%6f`, `%9f`: +
The default `%f` is right-aligned and always zero-padded to 9 digits for the compatibility with glibc and others, so it always counts the number of nanoseconds since the last whole second. E.g. 7ms after the last second will print `007000000`, and parsing `7000000` will yield the same. - +
+
The variant `%.f` is left-aligned and print 0, 3, 6 or 9 fractional digits according to the precision. E.g. 70ms after the last second under `%.f` will print `.070` (note: not `.07`), and parsing `.07`, `.070000` etc. will yield the same. Note that they can print or read nothing if the fractional part is zero or the next character is not `.`. - +
+
The variant `%.3f`, `%.6f` and `%.9f` are left-aligned and print 3, 6 or 9 fractional digits according to the number preceding `f`. E.g. 70ms after the last second under `%.3f` will print `.070` (note: not `.07`), and parsing `.07`, `.070000` etc. will yield the same. Note that they can read nothing if the fractional part is zero or the next character is not `.` however will print with the specified length. - +
+
The variant `%3f`, `%6f` and `%9f` are left-aligned and print 3, 6 or 9 fractional digits according to the number preceding `f`, but without the leading dot. E.g. 70ms after the last second under `%3f` will print `070` (note: not `07`), diff --git a/third_party/rust/chrono/src/lib.rs b/third_party/rust/chrono/src/lib.rs index 1f149ed5dc..ef76a7f8a0 100644 --- a/third_party/rust/chrono/src/lib.rs +++ b/third_party/rust/chrono/src/lib.rs @@ -66,7 +66,7 @@ //! months. //! //! Chrono does not yet natively support -//! the standard [`Duration`](https://docs.rs/time/0.1.40/time/struct.Duration.html) type, +//! the standard [`Duration`](https://doc.rust-lang.org/std/time/struct.Duration.html) type, //! but it will be supported in the future. //! Meanwhile you can convert between two types with //! [`Duration::from_std`](https://docs.rs/time/0.1.40/time/struct.Duration.html#method.from_std) @@ -158,22 +158,22 @@ //! The following illustrates most supported operations to the date and time: //! //! ```rust -//! # extern crate chrono; extern crate time; fn main() { +//! # extern crate chrono; +//! extern crate time; +//! +//! # fn main() { //! use chrono::prelude::*; //! use time::Duration; //! -//! # /* we intentionally fake the datetime... //! // assume this returned `2014-11-28T21:45:59.324310806+09:00`: -//! let dt = Local::now(); -//! # */ // up to here. we now define a fixed datetime for the illustrative purpose. -//! # let dt = FixedOffset::east(9*3600).ymd(2014, 11, 28).and_hms_nano(21, 45, 59, 324310806); +//! let dt = FixedOffset::east(9*3600).ymd(2014, 11, 28).and_hms_nano(21, 45, 59, 324310806); //! //! // property accessors //! assert_eq!((dt.year(), dt.month(), dt.day()), (2014, 11, 28)); //! assert_eq!((dt.month0(), dt.day0()), (10, 27)); // for unfortunate souls //! assert_eq!((dt.hour(), dt.minute(), dt.second()), (21, 45, 59)); //! assert_eq!(dt.weekday(), Weekday::Fri); -//! assert_eq!(dt.weekday().number_from_monday(), 5); // Mon=1, ..., Sat=7 +//! assert_eq!(dt.weekday().number_from_monday(), 5); // Mon=1, ..., Sun=7 //! assert_eq!(dt.ordinal(), 332); // the day of year //! assert_eq!(dt.num_days_from_ce(), 735565); // the number of days from and including Jan 1, 1 //! @@ -302,10 +302,8 @@ //! to get the number of additional number of nanoseconds. //! //! ```rust -//! # use chrono::DateTime; -//! # use chrono::Utc; //! // We need the trait in scope to use Utc::timestamp(). -//! use chrono::TimeZone; +//! use chrono::{DateTime, TimeZone, Utc}; //! //! // Construct a datetime from epoch: //! let dt = Utc.timestamp(1_500_000_000, 0); @@ -385,9 +383,12 @@ #![doc(html_root_url = "https://docs.rs/chrono/latest/")] -#![cfg_attr(bench, feature(test))] // lib stability features as per RFC #507 +#![cfg_attr(feature = "bench", feature(test))] // lib stability features as per RFC #507 #![deny(missing_docs)] #![deny(missing_debug_implementations)] +#![deny(dead_code)] + +#![cfg_attr(not(any(feature = "std", test)), no_std)] // The explicit 'static lifetimes are still needed for rustc 1.13-16 // backward compatibility, and this appeases clippy. If minimum rustc @@ -405,6 +406,13 @@ trivially_copy_pass_by_ref, ))] +#[cfg(feature = "alloc")] +extern crate alloc; +#[cfg(any(feature = "std", test))] +extern crate std as core; +#[cfg(all(feature = "std", not(feature="alloc")))] +extern crate std as alloc; + #[cfg(feature="clock")] extern crate time as oldtime; extern crate num_integer; @@ -413,6 +421,18 @@ extern crate num_traits; extern crate rustc_serialize; #[cfg(feature = "serde")] extern crate serde as serdelib; +#[cfg(test)] +#[macro_use] +extern crate doc_comment; +#[cfg(all(target_arch = "wasm32", feature="wasmbind"))] +extern crate wasm_bindgen; +#[cfg(all(target_arch = "wasm32", feature="wasmbind"))] +extern crate js_sys; +#[cfg(feature = "bench")] +extern crate test; + +#[cfg(test)] +doctest!("../README.md"); // this reexport is to aid the transition and should not be in the prelude! pub use oldtime::Duration; @@ -451,7 +471,7 @@ mod div; mod oldtime; pub mod offset; pub mod naive { - //! Date and time types which do not concern about the timezones. + //! Date and time types unconcerned with timezones. //! //! They are primarily building blocks for other types //! (e.g. [`TimeZone`](../offset/trait.TimeZone.html)), @@ -503,6 +523,41 @@ pub mod serde { pub use super::datetime::serde::*; } +// Until rust 1.18 there is no "pub(crate)" so to share this we need it in the root + +#[cfg(feature = "serde")] +enum SerdeError { + NonExistent { timestamp: V }, + Ambiguous { timestamp: V, min: D, max: D }, +} + +/// Construct a [`SerdeError::NonExistent`] +#[cfg(feature = "serde")] +fn ne_timestamp(ts: T) -> SerdeError { + SerdeError::NonExistent:: { timestamp: ts } +} + +#[cfg(feature = "serde")] +impl fmt::Debug for SerdeError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "ChronoSerdeError({})", self) + } +} + +// impl core::error::Error for SerdeError {} +#[cfg(feature = "serde")] +impl fmt::Display for SerdeError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match self { + &SerdeError::NonExistent { ref timestamp } => write!( + f, "value is not a legal timestamp: {}", timestamp), + &SerdeError::Ambiguous { ref timestamp, ref min, ref max } => write!( + f, "value is an ambiguous timestamp: {}, could be either of {}, {}", + timestamp, min, max), + } + } +} + /// The day of week. /// /// The order of the days of week depends on the context. @@ -637,6 +692,20 @@ impl Weekday { } } +impl fmt::Display for Weekday { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.write_str(match *self { + Weekday::Mon => "Mon", + Weekday::Tue => "Tue", + Weekday::Wed => "Wed", + Weekday::Thu => "Thu", + Weekday::Fri => "Fri", + Weekday::Sat => "Sat", + Weekday::Sun => "Sun", + }) + } +} + /// Any weekday can be represented as an integer from 0 to 6, which equals to /// [`Weekday::num_days_from_monday`](#method.num_days_from_monday) in this implementation. /// Do not heavily depend on this though; use explicit methods whenever possible. @@ -670,7 +739,7 @@ impl num_traits::FromPrimitive for Weekday { } } -use std::fmt; +use core::fmt; /// An error resulting from reading `Weekday` value with `FromStr`. #[derive(Clone, PartialEq)] @@ -689,14 +758,14 @@ impl fmt::Debug for ParseWeekdayError { #[cfg(feature = "serde")] mod weekday_serde { use super::Weekday; - use std::fmt; + use core::fmt; use serdelib::{ser, de}; impl ser::Serialize for Weekday { fn serialize(&self, serializer: S) -> Result where S: ser::Serializer { - serializer.serialize_str(&format!("{:?}", self)) + serializer.collect_str(&self) } } @@ -880,16 +949,18 @@ pub trait Datelike: Sized { /// Returns `None` when the resulting value would be invalid. fn with_ordinal0(&self, ordinal0: u32) -> Option; - /// Returns the number of days since January 1, Year 1 (aka Day 1) in the - /// proleptic Gregorian calendar. + /// Counts the days in the proleptic Gregorian calendar, with January 1, Year 1 (CE) as day 1. /// - /// # Example: + /// # Examples /// - /// ~~~ + /// ``` /// use chrono::{NaiveDate, Datelike}; - /// assert_eq!(NaiveDate::from_ymd(1970, 1, 1).num_days_from_ce(), 719163); + /// + /// assert_eq!(NaiveDate::from_ymd(1970, 1, 1).num_days_from_ce(), 719_163); + /// assert_eq!(NaiveDate::from_ymd(2, 1, 1).num_days_from_ce(), 366); + /// assert_eq!(NaiveDate::from_ymd(1, 1, 1).num_days_from_ce(), 1); /// assert_eq!(NaiveDate::from_ymd(0, 1, 1).num_days_from_ce(), -365); - /// ~~~ + /// ``` fn num_days_from_ce(&self) -> i32 { // we know this wouldn't overflow since year is limited to 1/2^13 of i32's full range. let mut year = self.year() - 1; diff --git a/third_party/rust/chrono/src/naive/date.rs b/third_party/rust/chrono/src/naive/date.rs index aad98bc605..8470c5ac1a 100644 --- a/third_party/rust/chrono/src/naive/date.rs +++ b/third_party/rust/chrono/src/naive/date.rs @@ -3,8 +3,10 @@ //! ISO 8601 calendar date without timezone. -use std::{str, fmt}; -use std::ops::{Add, Sub, AddAssign, SubAssign}; +#[cfg(any(feature = "alloc", feature = "std", test))] +use core::borrow::Borrow; +use core::{str, fmt}; +use core::ops::{Add, Sub, AddAssign, SubAssign}; use num_traits::ToPrimitive; use oldtime::Duration as OldDuration; @@ -12,7 +14,9 @@ use {Weekday, Datelike}; use div::div_mod_floor; use naive::{NaiveTime, NaiveDateTime, IsoWeek}; use format::{Item, Numeric, Pad}; -use format::{parse, Parsed, ParseError, ParseResult, DelayedFormat, StrftimeItems}; +use format::{parse, Parsed, ParseError, ParseResult, StrftimeItems}; +#[cfg(any(feature = "alloc", feature = "std", test))] +use format::DelayedFormat; use super::isoweek; use super::internals::{self, DateImpl, Of, Mdf, YearFlags}; @@ -330,10 +334,10 @@ impl NaiveDate { } } - /// Makes a new `NaiveDate` from the number of days since January 1, 1 (Day 1) - /// in the proleptic Gregorian calendar. + /// Makes a new `NaiveDate` from a day's number in the proleptic Gregorian calendar, with + /// January 1, 1 being day 1. /// - /// Panics on the out-of-range date. + /// Panics if the date is out of range. /// /// # Example /// @@ -378,10 +382,10 @@ impl NaiveDate { NaiveDate::from_num_days_from_ce_opt(days).expect("out-of-range date") } - /// Makes a new `NaiveDate` from the number of days since January 1, 1 (Day 1) - /// in the proleptic Gregorian calendar. + /// Makes a new `NaiveDate` from a day's number in the proleptic Gregorian calendar, with + /// January 1, 1 being day 1. /// - /// Returns `None` on the out-of-range date. + /// Returns `None` if the date is out of range. /// /// # Example /// @@ -451,7 +455,7 @@ impl NaiveDate { /// ~~~~ pub fn parse_from_str(s: &str, fmt: &str) -> ParseResult { let mut parsed = Parsed::new(); - try!(parse(&mut parsed, s, StrftimeItems::new(fmt))); + parse(&mut parsed, s, StrftimeItems::new(fmt))?; parsed.to_naive_date() } @@ -916,9 +920,10 @@ impl NaiveDate { /// # let d = NaiveDate::from_ymd(2015, 9, 5); /// assert_eq!(format!("{}", d.format_with_items(fmt)), "2015-09-05"); /// ~~~~ + #[cfg(any(feature = "alloc", feature = "std", test))] #[inline] - pub fn format_with_items<'a, I>(&self, items: I) -> DelayedFormat - where I: Iterator> + Clone { + pub fn format_with_items<'a, I, B>(&self, items: I) -> DelayedFormat + where I: Iterator + Clone, B: Borrow> { DelayedFormat::new(Some(*self), None, items) } @@ -954,6 +959,7 @@ impl NaiveDate { /// assert_eq!(format!("{}", d.format("%Y-%m-%d")), "2015-09-05"); /// assert_eq!(format!("{}", d.format("%A, %-d %B, %C%y")), "Saturday, 5 September, 2015"); /// ~~~~ + #[cfg(any(feature = "alloc", feature = "std", test))] #[inline] pub fn format<'a>(&self, fmt: &'a str) -> DelayedFormat> { self.format_with_items(StrftimeItems::new(fmt)) @@ -1387,7 +1393,7 @@ impl SubAssign for NaiveDate { /// Subtracts another `NaiveDate` from the current date. /// Returns a `Duration` of integral numbers. -/// +/// /// This does not overflow or underflow at all, /// as all possible output fits in the range of `Duration`. /// @@ -1503,16 +1509,16 @@ impl str::FromStr for NaiveDate { fn from_str(s: &str) -> ParseResult { const ITEMS: &'static [Item<'static>] = &[ - Item::Space(""), Item::Numeric(Numeric::Year, Pad::Zero), + Item::Numeric(Numeric::Year, Pad::Zero), Item::Space(""), Item::Literal("-"), - Item::Space(""), Item::Numeric(Numeric::Month, Pad::Zero), + Item::Numeric(Numeric::Month, Pad::Zero), Item::Space(""), Item::Literal("-"), - Item::Space(""), Item::Numeric(Numeric::Day, Pad::Zero), + Item::Numeric(Numeric::Day, Pad::Zero), Item::Space(""), ]; let mut parsed = Parsed::new(); - try!(parse(&mut parsed, s, ITEMS.iter().cloned())); + parse(&mut parsed, s, ITEMS.iter())?; parsed.to_naive_date() } } @@ -1600,7 +1606,7 @@ mod rustc_serialize { #[cfg(feature = "serde")] mod serde { - use std::fmt; + use core::fmt; use super::NaiveDate; use serdelib::{ser, de}; @@ -1629,15 +1635,23 @@ mod serde { impl<'de> de::Visitor<'de> for NaiveDateVisitor { type Value = NaiveDate; - fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result + fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { write!(formatter, "a formatted date string") } + #[cfg(any(feature = "std", test))] + fn visit_str(self, value: &str) -> Result + where E: de::Error + { + value.parse().map_err(E::custom) + } + + #[cfg(not(any(feature = "std", test)))] fn visit_str(self, value: &str) -> Result where E: de::Error { - value.parse().map_err(|err| E::custom(format!("{}", err))) + value.parse().map_err(E::custom) } } diff --git a/third_party/rust/chrono/src/naive/datetime.rs b/third_party/rust/chrono/src/naive/datetime.rs index d228a54f55..b9f54777cb 100644 --- a/third_party/rust/chrono/src/naive/datetime.rs +++ b/third_party/rust/chrono/src/naive/datetime.rs @@ -3,8 +3,10 @@ //! ISO 8601 date and time without timezone. -use std::{str, fmt, hash}; -use std::ops::{Add, Sub, AddAssign, SubAssign}; +#[cfg(any(feature = "alloc", feature = "std", test))] +use core::borrow::Borrow; +use core::{str, fmt, hash}; +use core::ops::{Add, Sub, AddAssign, SubAssign}; use num_traits::ToPrimitive; use oldtime::Duration as OldDuration; @@ -12,7 +14,9 @@ use {Weekday, Timelike, Datelike}; use div::div_mod_floor; use naive::{NaiveTime, NaiveDate, IsoWeek}; use format::{Item, Numeric, Pad, Fixed}; -use format::{parse, Parsed, ParseError, ParseResult, DelayedFormat, StrftimeItems}; +use format::{parse, Parsed, ParseError, ParseResult, StrftimeItems}; +#[cfg(any(feature = "alloc", feature = "std", test))] +use format::DelayedFormat; /// The tight upper bound guarantees that a duration with `|Duration| >= 2^MAX_SECS_BITS` /// will always overflow the addition with any date and time type. @@ -206,7 +210,7 @@ impl NaiveDateTime { /// ~~~~ pub fn parse_from_str(s: &str, fmt: &str) -> ParseResult { let mut parsed = Parsed::new(); - try!(parse(&mut parsed, s, StrftimeItems::new(fmt))); + parse(&mut parsed, s, StrftimeItems::new(fmt))?; parsed.to_naive_datetime_with_offset(0) // no offset adjustment } @@ -305,21 +309,33 @@ impl NaiveDateTime { /// Note that this does *not* account for the timezone! /// The true "UNIX timestamp" would count seconds since the midnight *UTC* on the epoch. /// + /// # Panics + /// /// Note also that this does reduce the number of years that can be - /// represented from ~584 Billion to ~584. (If this is a problem, - /// please file an issue to let me know what domain needs nanosecond - /// precision over millenia, I'm curious.) + /// represented from ~584 Billion to ~584 years. The dates that can be + /// represented as nanoseconds are between 1677-09-21T00:12:44.0 and + /// 2262-04-11T23:47:16.854775804. + /// + /// (If this is a problem, please file an issue to let me know what domain + /// needs nanosecond precision over millenia, I'm curious.) /// /// # Example /// /// ~~~~ - /// use chrono::NaiveDate; + /// use chrono::{NaiveDate, NaiveDateTime}; /// /// let dt = NaiveDate::from_ymd(1970, 1, 1).and_hms_nano(0, 0, 1, 444); /// assert_eq!(dt.timestamp_nanos(), 1_000_000_444); /// /// let dt = NaiveDate::from_ymd(2001, 9, 9).and_hms_nano(1, 46, 40, 555); - /// assert_eq!(dt.timestamp_nanos(), 1_000_000_000_000_000_555); + /// + /// const A_BILLION: i64 = 1_000_000_000; + /// let nanos = dt.timestamp_nanos(); + /// assert_eq!(nanos, 1_000_000_000_000_000_555); + /// assert_eq!( + /// dt, + /// NaiveDateTime::from_timestamp(nanos / A_BILLION, (nanos % A_BILLION) as u32) + /// ); /// ~~~~ #[inline] pub fn timestamp_nanos(&self) -> i64 { @@ -633,9 +649,10 @@ impl NaiveDateTime { /// # let dt = NaiveDate::from_ymd(2015, 9, 5).and_hms(23, 56, 4); /// assert_eq!(format!("{}", dt.format_with_items(fmt)), "2015-09-05 23:56:04"); /// ~~~~ + #[cfg(any(feature = "alloc", feature = "std", test))] #[inline] - pub fn format_with_items<'a, I>(&self, items: I) -> DelayedFormat - where I: Iterator> + Clone { + pub fn format_with_items<'a, I, B>(&self, items: I) -> DelayedFormat + where I: Iterator + Clone, B: Borrow> { DelayedFormat::new(Some(self.date), Some(self.time), items) } @@ -671,6 +688,7 @@ impl NaiveDateTime { /// assert_eq!(format!("{}", dt.format("%Y-%m-%d %H:%M:%S")), "2015-09-05 23:56:04"); /// assert_eq!(format!("{}", dt.format("around %l %p on %b %-d")), "around 11 PM on Sep 5"); /// ~~~~ + #[cfg(any(feature = "alloc", feature = "std", test))] #[inline] pub fn format<'a>(&self, fmt: &'a str) -> DelayedFormat> { self.format_with_items(StrftimeItems::new(fmt)) @@ -1456,22 +1474,22 @@ impl str::FromStr for NaiveDateTime { fn from_str(s: &str) -> ParseResult { const ITEMS: &'static [Item<'static>] = &[ - Item::Space(""), Item::Numeric(Numeric::Year, Pad::Zero), + Item::Numeric(Numeric::Year, Pad::Zero), Item::Space(""), Item::Literal("-"), - Item::Space(""), Item::Numeric(Numeric::Month, Pad::Zero), + Item::Numeric(Numeric::Month, Pad::Zero), Item::Space(""), Item::Literal("-"), - Item::Space(""), Item::Numeric(Numeric::Day, Pad::Zero), + Item::Numeric(Numeric::Day, Pad::Zero), Item::Space(""), Item::Literal("T"), // XXX shouldn't this be case-insensitive? - Item::Space(""), Item::Numeric(Numeric::Hour, Pad::Zero), + Item::Numeric(Numeric::Hour, Pad::Zero), Item::Space(""), Item::Literal(":"), - Item::Space(""), Item::Numeric(Numeric::Minute, Pad::Zero), + Item::Numeric(Numeric::Minute, Pad::Zero), Item::Space(""), Item::Literal(":"), - Item::Space(""), Item::Numeric(Numeric::Second, Pad::Zero), + Item::Numeric(Numeric::Second, Pad::Zero), Item::Fixed(Fixed::Nanosecond), Item::Space(""), ]; let mut parsed = Parsed::new(); - try!(parse(&mut parsed, s, ITEMS.iter().cloned())); + parse(&mut parsed, s, ITEMS.iter())?; parsed.to_naive_datetime_with_offset(0) } } @@ -1651,7 +1669,7 @@ pub mod rustc_serialize { /// Tools to help serializing/deserializing `NaiveDateTime`s #[cfg(feature = "serde")] pub mod serde { - use std::fmt; + use core::fmt; use super::{NaiveDateTime}; use serdelib::{ser, de}; @@ -1690,7 +1708,7 @@ pub mod serde { fn visit_str(self, value: &str) -> Result where E: de::Error { - value.parse().map_err(|err| E::custom(format!("{}", err))) + value.parse().map_err(E::custom) } } @@ -1738,10 +1756,10 @@ pub mod serde { /// # fn main() { example().unwrap(); } /// ``` pub mod ts_nanoseconds { - use std::fmt; + use core::fmt; use serdelib::{ser, de}; - use NaiveDateTime; + use {NaiveDateTime, ne_timestamp}; /// Serialize a UTC datetime into an integer number of nanoseconds since the epoch /// @@ -1816,7 +1834,7 @@ pub mod serde { pub fn deserialize<'de, D>(d: D) -> Result where D: de::Deserializer<'de> { - Ok(try!(d.deserialize_i64(NaiveDateTimeFromNanoSecondsVisitor))) + Ok(d.deserialize_i64(NaiveDateTimeFromNanoSecondsVisitor)?) } struct NaiveDateTimeFromNanoSecondsVisitor; @@ -1834,7 +1852,7 @@ pub mod serde { { NaiveDateTime::from_timestamp_opt(value / 1_000_000_000, (value % 1_000_000_000) as u32) - .ok_or_else(|| E::custom(format!("value is not a legal timestamp: {}", value))) + .ok_or_else(|| E::custom(ne_timestamp(value))) } fn visit_u64(self, value: u64) -> Result @@ -1842,7 +1860,7 @@ pub mod serde { { NaiveDateTime::from_timestamp_opt(value as i64 / 1_000_000_000, (value as i64 % 1_000_000_000) as u32) - .ok_or_else(|| E::custom(format!("value is not a legal timestamp: {}", value))) + .ok_or_else(|| E::custom(ne_timestamp(value))) } } } @@ -1883,10 +1901,10 @@ pub mod serde { /// # fn main() { example().unwrap(); } /// ``` pub mod ts_milliseconds { - use std::fmt; + use core::fmt; use serdelib::{ser, de}; - use NaiveDateTime; + use {NaiveDateTime, ne_timestamp}; /// Serialize a UTC datetime into an integer number of milliseconds since the epoch /// @@ -1961,7 +1979,7 @@ pub mod serde { pub fn deserialize<'de, D>(d: D) -> Result where D: de::Deserializer<'de> { - Ok(try!(d.deserialize_i64(NaiveDateTimeFromMilliSecondsVisitor))) + Ok(d.deserialize_i64(NaiveDateTimeFromMilliSecondsVisitor)?) } struct NaiveDateTimeFromMilliSecondsVisitor; @@ -1979,7 +1997,7 @@ pub mod serde { { NaiveDateTime::from_timestamp_opt(value / 1000, ((value % 1000) * 1_000_000) as u32) - .ok_or_else(|| E::custom(format!("value is not a legal timestamp: {}", value))) + .ok_or_else(|| E::custom(ne_timestamp(value))) } fn visit_u64(self, value: u64) -> Result @@ -1987,7 +2005,7 @@ pub mod serde { { NaiveDateTime::from_timestamp_opt((value / 1000) as i64, ((value % 1000) * 1_000_000) as u32) - .ok_or_else(|| E::custom(format!("value is not a legal timestamp: {}", value))) + .ok_or_else(|| E::custom(ne_timestamp(value))) } } } @@ -2028,10 +2046,10 @@ pub mod serde { /// # fn main() { example().unwrap(); } /// ``` pub mod ts_seconds { - use std::fmt; + use core::fmt; use serdelib::{ser, de}; - use NaiveDateTime; + use {NaiveDateTime, ne_timestamp}; /// Serialize a UTC datetime into an integer number of seconds since the epoch /// @@ -2106,7 +2124,7 @@ pub mod serde { pub fn deserialize<'de, D>(d: D) -> Result where D: de::Deserializer<'de> { - Ok(try!(d.deserialize_i64(NaiveDateTimeFromSecondsVisitor))) + Ok(d.deserialize_i64(NaiveDateTimeFromSecondsVisitor)?) } struct NaiveDateTimeFromSecondsVisitor; @@ -2123,14 +2141,14 @@ pub mod serde { where E: de::Error { NaiveDateTime::from_timestamp_opt(value, 0) - .ok_or_else(|| E::custom(format!("value is not a legal timestamp: {}", value))) + .ok_or_else(|| E::custom(ne_timestamp(value))) } fn visit_u64(self, value: u64) -> Result where E: de::Error { NaiveDateTime::from_timestamp_opt(value as i64, 0) - .ok_or_else(|| E::custom(format!("value is not a legal timestamp: {}", value))) + .ok_or_else(|| E::custom(ne_timestamp(value))) } } } @@ -2348,4 +2366,24 @@ mod tests { let time = base + Duration::microseconds(t); assert_eq!(t, time.signed_duration_since(base).num_microseconds().unwrap()); } + + #[test] + fn test_nanosecond_range() { + const A_BILLION: i64 = 1_000_000_000; + let maximum = "2262-04-11T23:47:16.854775804"; + let parsed: NaiveDateTime = maximum.parse().unwrap(); + let nanos = parsed.timestamp_nanos(); + assert_eq!( + parsed, + NaiveDateTime::from_timestamp(nanos / A_BILLION, (nanos % A_BILLION) as u32) + ); + + let minimum = "1677-09-21T00:12:44.000000000"; + let parsed: NaiveDateTime = minimum.parse().unwrap(); + let nanos = parsed.timestamp_nanos(); + assert_eq!( + parsed, + NaiveDateTime::from_timestamp(nanos / A_BILLION, (nanos % A_BILLION) as u32) + ); + } } diff --git a/third_party/rust/chrono/src/naive/internals.rs b/third_party/rust/chrono/src/naive/internals.rs index dd9d535b1b..d0431634bf 100644 --- a/third_party/rust/chrono/src/naive/internals.rs +++ b/third_party/rust/chrono/src/naive/internals.rs @@ -15,7 +15,7 @@ #![allow(dead_code)] // some internal methods have been left for consistency -use std::{i32, fmt}; +use core::{i32, fmt}; use num_traits::FromPrimitive; use Weekday; use div::{div_rem, mod_floor}; @@ -470,7 +470,6 @@ impl fmt::Debug for Mdf { #[cfg(test)] mod tests { #[cfg(test)] extern crate num_iter; - #[cfg(bench)] extern crate test; use Weekday; use super::{Of, Mdf}; @@ -517,7 +516,7 @@ mod tests { assert_eq!(GF.nisoweeks(), 52); } - #[cfg(bench)] + #[cfg(feature = "bench")] #[bench] fn bench_year_flags_from_year(bh: &mut test::Bencher) { bh.iter(|| { diff --git a/third_party/rust/chrono/src/naive/isoweek.rs b/third_party/rust/chrono/src/naive/isoweek.rs index 667cf2f8e9..0aeedb0139 100644 --- a/third_party/rust/chrono/src/naive/isoweek.rs +++ b/third_party/rust/chrono/src/naive/isoweek.rs @@ -3,7 +3,7 @@ //! ISO 8601 week. -use std::fmt; +use core::fmt; use super::internals::{DateImpl, Of, YearFlags}; diff --git a/third_party/rust/chrono/src/naive/time.rs b/third_party/rust/chrono/src/naive/time.rs index 440c8a7ac0..7b59a5decd 100644 --- a/third_party/rust/chrono/src/naive/time.rs +++ b/third_party/rust/chrono/src/naive/time.rs @@ -3,14 +3,18 @@ //! ISO 8601 time without timezone. -use std::{str, fmt, hash}; -use std::ops::{Add, Sub, AddAssign, SubAssign}; +#[cfg(any(feature = "alloc", feature = "std", test))] +use core::borrow::Borrow; +use core::{str, fmt, hash}; +use core::ops::{Add, Sub, AddAssign, SubAssign}; use oldtime::Duration as OldDuration; use Timelike; use div::div_mod_floor; use format::{Item, Numeric, Pad, Fixed}; -use format::{parse, Parsed, ParseError, ParseResult, DelayedFormat, StrftimeItems}; +use format::{parse, Parsed, ParseError, ParseResult, StrftimeItems}; +#[cfg(any(feature = "alloc", feature = "std", test))] +use format::DelayedFormat; /// ISO 8601 time without timezone. /// Allows for the nanosecond precision and optional leap second representation. @@ -492,7 +496,7 @@ impl NaiveTime { /// ~~~~ pub fn parse_from_str(s: &str, fmt: &str) -> ParseResult { let mut parsed = Parsed::new(); - try!(parse(&mut parsed, s, StrftimeItems::new(fmt))); + parse(&mut parsed, s, StrftimeItems::new(fmt))?; parsed.to_naive_time() } @@ -681,7 +685,7 @@ impl NaiveTime { // `rhs.frac`|========================================>| // | | | `self - rhs` | | - use std::cmp::Ordering; + use core::cmp::Ordering; let secs = i64::from(self.secs) - i64::from(rhs.secs); let frac = i64::from(self.frac) - i64::from(rhs.frac); @@ -723,9 +727,10 @@ impl NaiveTime { /// # let t = NaiveTime::from_hms(23, 56, 4); /// assert_eq!(format!("{}", t.format_with_items(fmt)), "23:56:04"); /// ~~~~ + #[cfg(any(feature = "alloc", feature = "std", test))] #[inline] - pub fn format_with_items<'a, I>(&self, items: I) -> DelayedFormat - where I: Iterator> + Clone { + pub fn format_with_items<'a, I, B>(&self, items: I) -> DelayedFormat + where I: Iterator + Clone, B: Borrow> { DelayedFormat::new(None, Some(*self), items) } @@ -763,6 +768,7 @@ impl NaiveTime { /// assert_eq!(format!("{}", t.format("%H:%M:%S%.6f")), "23:56:04.012345"); /// assert_eq!(format!("{}", t.format("%-I:%M %p")), "11:56 PM"); /// ~~~~ + #[cfg(any(feature = "alloc", feature = "std", test))] #[inline] pub fn format<'a>(&self, fmt: &'a str) -> DelayedFormat> { self.format_with_items(StrftimeItems::new(fmt)) @@ -1230,7 +1236,7 @@ impl fmt::Debug for NaiveTime { (sec, self.frac) }; - try!(write!(f, "{:02}:{:02}:{:02}", hour, min, sec)); + write!(f, "{:02}:{:02}:{:02}", hour, min, sec)?; if nano == 0 { Ok(()) } else if nano % 1_000_000 == 0 { @@ -1299,16 +1305,16 @@ impl str::FromStr for NaiveTime { fn from_str(s: &str) -> ParseResult { const ITEMS: &'static [Item<'static>] = &[ - Item::Space(""), Item::Numeric(Numeric::Hour, Pad::Zero), + Item::Numeric(Numeric::Hour, Pad::Zero), Item::Space(""), Item::Literal(":"), - Item::Space(""), Item::Numeric(Numeric::Minute, Pad::Zero), + Item::Numeric(Numeric::Minute, Pad::Zero), Item::Space(""), Item::Literal(":"), - Item::Space(""), Item::Numeric(Numeric::Second, Pad::Zero), + Item::Numeric(Numeric::Second, Pad::Zero), Item::Fixed(Fixed::Nanosecond), Item::Space(""), ]; let mut parsed = Parsed::new(); - try!(parse(&mut parsed, s, ITEMS.iter().cloned())); + parse(&mut parsed, s, ITEMS.iter())?; parsed.to_naive_time() } } @@ -1411,7 +1417,7 @@ mod rustc_serialize { #[cfg(feature = "serde")] mod serde { - use std::fmt; + use core::fmt; use super::NaiveTime; use serdelib::{ser, de}; @@ -1431,7 +1437,7 @@ mod serde { impl<'de> de::Visitor<'de> for NaiveTimeVisitor { type Value = NaiveTime; - fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result + fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { write!(formatter, "a formatted time string") } @@ -1439,7 +1445,7 @@ mod serde { fn visit_str(self, value: &str) -> Result where E: de::Error { - value.parse().map_err(|err| E::custom(format!("{}", err))) + value.parse().map_err(E::custom) } } diff --git a/third_party/rust/chrono/src/offset/fixed.rs b/third_party/rust/chrono/src/offset/fixed.rs index ef861ceeca..01512c0124 100644 --- a/third_party/rust/chrono/src/offset/fixed.rs +++ b/third_party/rust/chrono/src/offset/fixed.rs @@ -3,8 +3,8 @@ //! The time zone which has a fixed offset from UTC. -use std::ops::{Add, Sub}; -use std::fmt; +use core::ops::{Add, Sub}; +use core::fmt; use oldtime::Duration as OldDuration; use Timelike; @@ -86,11 +86,13 @@ impl FixedOffset { } /// Returns the number of seconds to add to convert from UTC to the local time. + #[inline] pub fn local_minus_utc(&self) -> i32 { self.local_minus_utc } /// Returns the number of seconds to add to convert from the local time to UTC. + #[inline] pub fn utc_minus_local(&self) -> i32 { -self.local_minus_utc } diff --git a/third_party/rust/chrono/src/offset/local.rs b/third_party/rust/chrono/src/offset/local.rs index 6aa4ab7558..70c01e6b15 100644 --- a/third_party/rust/chrono/src/offset/local.rs +++ b/third_party/rust/chrono/src/offset/local.rs @@ -87,9 +87,21 @@ impl Local { } /// Returns a `DateTime` which corresponds to the current date. + #[cfg(not(all(target_arch = "wasm32", feature = "wasmbind")))] pub fn now() -> DateTime { tm_to_datetime(oldtime::now()) } + + /// Returns a `DateTime` which corresponds to the current date. + #[cfg(all(target_arch = "wasm32", feature = "wasmbind"))] + pub fn now() -> DateTime { + use super::Utc; + let now: DateTime = super::Utc::now(); + + // Workaround missing timezone logic in `time` crate + let offset = FixedOffset::west((js_sys::Date::new_0().get_timezone_offset() as i32) * 60); + DateTime::from_utc(now.naive_utc(), offset) + } } impl TimeZone for Local { @@ -179,4 +191,3 @@ mod tests { "unexpected timestr {:?}", timestr); } } - diff --git a/third_party/rust/chrono/src/offset/mod.rs b/third_party/rust/chrono/src/offset/mod.rs index ca86574718..0fe3ebd979 100644 --- a/third_party/rust/chrono/src/offset/mod.rs +++ b/third_party/rust/chrono/src/offset/mod.rs @@ -18,15 +18,15 @@ //! and provides implementations for 1 and 3. //! An `TimeZone` instance can be reconstructed from the corresponding `Offset` instance. -use std::fmt; +use core::fmt; +use format::{parse, ParseResult, Parsed, StrftimeItems}; +use naive::{NaiveDate, NaiveDateTime, NaiveTime}; use Weekday; -use naive::{NaiveDate, NaiveTime, NaiveDateTime}; use {Date, DateTime}; -use format::{parse, Parsed, ParseResult, StrftimeItems}; /// The conversion result from the local time to the timezone-aware datetime types. -#[derive(Clone, PartialEq, Debug)] +#[derive(Clone, PartialEq, Debug, Copy, Eq, Hash)] pub enum LocalResult { /// Given local time representation is invalid. /// This can occur when, for example, the positive timezone transition. @@ -41,17 +41,26 @@ pub enum LocalResult { impl LocalResult { /// Returns `Some` only when the conversion result is unique, or `None` otherwise. pub fn single(self) -> Option { - match self { LocalResult::Single(t) => Some(t), _ => None } + match self { + LocalResult::Single(t) => Some(t), + _ => None, + } } /// Returns `Some` for the earliest possible conversion result, or `None` if none. pub fn earliest(self) -> Option { - match self { LocalResult::Single(t) | LocalResult::Ambiguous(t,_) => Some(t), _ => None } + match self { + LocalResult::Single(t) | LocalResult::Ambiguous(t, _) => Some(t), + _ => None, + } } /// Returns `Some` for the latest possible conversion result, or `None` if none. pub fn latest(self) -> Option { - match self { LocalResult::Single(t) | LocalResult::Ambiguous(_,t) => Some(t), _ => None } + match self { + LocalResult::Single(t) | LocalResult::Ambiguous(_, t) => Some(t), + _ => None, + } } /// Maps a `LocalResult` into `LocalResult` with given function. @@ -72,8 +81,9 @@ impl LocalResult> { #[inline] pub fn and_time(self, time: NaiveTime) -> LocalResult> { match self { - LocalResult::Single(d) => d.and_time(time) - .map_or(LocalResult::None, LocalResult::Single), + LocalResult::Single(d) => d + .and_time(time) + .map_or(LocalResult::None, LocalResult::Single), _ => LocalResult::None, } } @@ -85,8 +95,9 @@ impl LocalResult> { #[inline] pub fn and_hms_opt(self, hour: u32, min: u32, sec: u32) -> LocalResult> { match self { - LocalResult::Single(d) => d.and_hms_opt(hour, min, sec) - .map_or(LocalResult::None, LocalResult::Single), + LocalResult::Single(d) => d + .and_hms_opt(hour, min, sec) + .map_or(LocalResult::None, LocalResult::Single), _ => LocalResult::None, } } @@ -97,11 +108,17 @@ impl LocalResult> { /// /// Propagates any error. Ambiguous result would be discarded. #[inline] - pub fn and_hms_milli_opt(self, hour: u32, min: u32, sec: u32, - milli: u32) -> LocalResult> { + pub fn and_hms_milli_opt( + self, + hour: u32, + min: u32, + sec: u32, + milli: u32, + ) -> LocalResult> { match self { - LocalResult::Single(d) => d.and_hms_milli_opt(hour, min, sec, milli) - .map_or(LocalResult::None, LocalResult::Single), + LocalResult::Single(d) => d + .and_hms_milli_opt(hour, min, sec, milli) + .map_or(LocalResult::None, LocalResult::Single), _ => LocalResult::None, } } @@ -112,11 +129,17 @@ impl LocalResult> { /// /// Propagates any error. Ambiguous result would be discarded. #[inline] - pub fn and_hms_micro_opt(self, hour: u32, min: u32, sec: u32, - micro: u32) -> LocalResult> { + pub fn and_hms_micro_opt( + self, + hour: u32, + min: u32, + sec: u32, + micro: u32, + ) -> LocalResult> { match self { - LocalResult::Single(d) => d.and_hms_micro_opt(hour, min, sec, micro) - .map_or(LocalResult::None, LocalResult::Single), + LocalResult::Single(d) => d + .and_hms_micro_opt(hour, min, sec, micro) + .map_or(LocalResult::None, LocalResult::Single), _ => LocalResult::None, } } @@ -127,15 +150,20 @@ impl LocalResult> { /// /// Propagates any error. Ambiguous result would be discarded. #[inline] - pub fn and_hms_nano_opt(self, hour: u32, min: u32, sec: u32, - nano: u32) -> LocalResult> { + pub fn and_hms_nano_opt( + self, + hour: u32, + min: u32, + sec: u32, + nano: u32, + ) -> LocalResult> { match self { - LocalResult::Single(d) => d.and_hms_nano_opt(hour, min, sec, nano) - .map_or(LocalResult::None, LocalResult::Single), + LocalResult::Single(d) => d + .and_hms_nano_opt(hour, min, sec, nano) + .map_or(LocalResult::None, LocalResult::Single), _ => LocalResult::None, } } - } impl LocalResult { @@ -144,7 +172,7 @@ impl LocalResult { match self { LocalResult::None => panic!("No such local time"), LocalResult::Single(t) => t, - LocalResult::Ambiguous(t1,t2) => { + LocalResult::Ambiguous(t1, t2) => { panic!("Ambiguous local time, ranging from {:?} to {:?}", t1, t2) } } @@ -345,10 +373,36 @@ pub trait TimeZone: Sized + Clone { /// }; /// ~~~~ fn timestamp_millis_opt(&self, millis: i64) -> LocalResult> { - let (secs, millis) = (millis / 1000, millis % 1000); + let (mut secs, mut millis) = (millis / 1000, millis % 1000); + if millis < 0 { + secs -= 1; + millis += 1000; + } self.timestamp_opt(secs, millis as u32 * 1_000_000) } + /// Makes a new `DateTime` from the number of non-leap nanoseconds + /// since January 1, 1970 0:00:00 UTC (aka "UNIX timestamp"). + /// + /// Unlike [`timestamp_millis`](#method.timestamp_millis), this never + /// panics. + /// + /// # Example + /// + /// ~~~~ + /// use chrono::{Utc, TimeZone}; + /// + /// assert_eq!(Utc.timestamp_nanos(1431648000000000).timestamp(), 1431648); + /// ~~~~ + fn timestamp_nanos(&self, nanos: i64) -> DateTime { + let (mut secs, mut nanos) = (nanos / 1_000_000_000, nanos % 1_000_000_000); + if nanos < 0 { + secs -= 1; + nanos += 1_000_000_000; + } + self.timestamp_opt(secs, nanos as u32).unwrap() + } + /// Parses a string with the specified format string and /// returns a `DateTime` with the current offset. /// See the [`format::strftime` module](../format/strftime/index.html) @@ -361,7 +415,7 @@ pub trait TimeZone: Sized + Clone { /// with parsed `FixedOffset`. fn datetime_from_str(&self, s: &str, fmt: &str) -> ParseResult> { let mut parsed = Parsed::new(); - try!(parse(&mut parsed, s, StrftimeItems::new(fmt))); + parse(&mut parsed, s, StrftimeItems::new(fmt))?; parsed.to_datetime_with_timezone(self) } @@ -384,9 +438,8 @@ pub trait TimeZone: Sized + Clone { /// Converts the local `NaiveDateTime` to the timezone-aware `DateTime` if possible. fn from_local_datetime(&self, local: &NaiveDateTime) -> LocalResult> { - self.offset_from_local_datetime(local).map(|offset| { - DateTime::from_utc(*local - offset.fix(), offset) - }) + self.offset_from_local_datetime(local) + .map(|offset| DateTime::from_utc(*local - offset.fix(), offset)) } /// Creates the offset for given UTC `NaiveDate`. This cannot fail. @@ -408,12 +461,72 @@ pub trait TimeZone: Sized + Clone { } } -mod utc; mod fixed; -#[cfg(feature="clock")] +#[cfg(feature = "clock")] mod local; +mod utc; -pub use self::utc::Utc; pub use self::fixed::FixedOffset; -#[cfg(feature="clock")] +#[cfg(feature = "clock")] pub use self::local::Local; +pub use self::utc::Utc; + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_negative_millis() { + let dt = Utc.timestamp_millis(-1000); + assert_eq!(dt.to_string(), "1969-12-31 23:59:59 UTC"); + let dt = Utc.timestamp_millis(-7000); + assert_eq!(dt.to_string(), "1969-12-31 23:59:53 UTC"); + let dt = Utc.timestamp_millis(-7001); + assert_eq!(dt.to_string(), "1969-12-31 23:59:52.999 UTC"); + let dt = Utc.timestamp_millis(-7003); + assert_eq!(dt.to_string(), "1969-12-31 23:59:52.997 UTC"); + let dt = Utc.timestamp_millis(-999); + assert_eq!(dt.to_string(), "1969-12-31 23:59:59.001 UTC"); + let dt = Utc.timestamp_millis(-1); + assert_eq!(dt.to_string(), "1969-12-31 23:59:59.999 UTC"); + let dt = Utc.timestamp_millis(-60000); + assert_eq!(dt.to_string(), "1969-12-31 23:59:00 UTC"); + let dt = Utc.timestamp_millis(-3600000); + assert_eq!(dt.to_string(), "1969-12-31 23:00:00 UTC"); + + for (millis, expected) in &[ + (-7000, "1969-12-31 23:59:53 UTC"), + (-7001, "1969-12-31 23:59:52.999 UTC"), + (-7003, "1969-12-31 23:59:52.997 UTC"), + ] { + match Utc.timestamp_millis_opt(*millis) { + LocalResult::Single(dt) => { + assert_eq!(dt.to_string(), *expected); + }, + e => panic!("Got {:?} instead of an okay answer", e), + } + } + + } + + #[test] + fn test_negative_nanos() { + let dt = Utc.timestamp_nanos(-1_000_000_000); + assert_eq!(dt.to_string(), "1969-12-31 23:59:59 UTC"); + let dt = Utc.timestamp_nanos(-999_999_999); + assert_eq!(dt.to_string(), "1969-12-31 23:59:59.000000001 UTC"); + let dt = Utc.timestamp_nanos(-1); + assert_eq!(dt.to_string(), "1969-12-31 23:59:59.999999999 UTC"); + let dt = Utc.timestamp_nanos(-60_000_000_000); + assert_eq!(dt.to_string(), "1969-12-31 23:59:00 UTC"); + let dt = Utc.timestamp_nanos(-3_600_000_000_000); + assert_eq!(dt.to_string(), "1969-12-31 23:00:00 UTC"); + } + + #[test] + fn test_nanos_never_panics() { + Utc.timestamp_nanos(i64::max_value()); + Utc.timestamp_nanos(i64::default()); + Utc.timestamp_nanos(i64::min_value()); + } +} diff --git a/third_party/rust/chrono/src/offset/utc.rs b/third_party/rust/chrono/src/offset/utc.rs index d4e8d10b5a..da8de11ffa 100644 --- a/third_party/rust/chrono/src/offset/utc.rs +++ b/third_party/rust/chrono/src/offset/utc.rs @@ -3,8 +3,8 @@ //! The UTC (Coordinated Universal Time) time zone. -use std::fmt; -#[cfg(feature="clock")] +use core::fmt; +#[cfg(all(feature="clock", not(all(target_arch = "wasm32", feature = "wasmbind"))))] use oldtime; use naive::{NaiveDate, NaiveDateTime}; @@ -38,11 +38,23 @@ impl Utc { pub fn today() -> Date { Utc::now().date() } /// Returns a `DateTime` which corresponds to the current date. + #[cfg(not(all(target_arch = "wasm32", feature = "wasmbind")))] pub fn now() -> DateTime { let spec = oldtime::get_time(); let naive = NaiveDateTime::from_timestamp(spec.sec, spec.nsec as u32); DateTime::from_utc(naive, Utc) } + + /// Returns a `DateTime` which corresponds to the current date. + #[cfg(all(target_arch = "wasm32", feature = "wasmbind"))] + pub fn now() -> DateTime { + let now = js_sys::Date::new_0(); + let millisecs_since_unix_epoch: u64 = now.get_time() as u64; + let secs = millisecs_since_unix_epoch / 1000; + let nanos = 1_000_000 * (millisecs_since_unix_epoch - 1000 * secs); + let naive = NaiveDateTime::from_timestamp(secs as i64, nanos as u32); + DateTime::from_utc(naive, Utc) + } } impl TimeZone for Utc { @@ -72,4 +84,3 @@ impl fmt::Debug for Utc { impl fmt::Display for Utc { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "UTC") } } - diff --git a/third_party/rust/chrono/src/oldtime.rs b/third_party/rust/chrono/src/oldtime.rs index 2bdc2f64ef..bed813677a 100644 --- a/third_party/rust/chrono/src/oldtime.rs +++ b/third_party/rust/chrono/src/oldtime.rs @@ -10,10 +10,11 @@ //! Temporal quantification -use std::{fmt, i64}; +use core::{fmt, i64}; +#[cfg(any(feature = "std", test))] use std::error::Error; -use std::ops::{Add, Sub, Mul, Div, Neg}; -use std::time::Duration as StdDuration; +use core::ops::{Add, Sub, Mul, Div, Neg}; +use core::time::Duration as StdDuration; /// The number of nanoseconds in a microsecond. const NANOS_PER_MICRO: i32 = 1000; @@ -363,20 +364,20 @@ impl fmt::Display for Duration { let hasdate = days != 0; let hastime = (secs != 0 || abs.nanos != 0) || !hasdate; - try!(write!(f, "{}P", sign)); + write!(f, "{}P", sign)?; if hasdate { - try!(write!(f, "{}D", days)); + write!(f, "{}D", days)?; } if hastime { if abs.nanos == 0 { - try!(write!(f, "T{}S", secs)); + write!(f, "T{}S", secs)?; } else if abs.nanos % NANOS_PER_MILLI == 0 { - try!(write!(f, "T{}.{:03}S", secs, abs.nanos / NANOS_PER_MILLI)); + write!(f, "T{}.{:03}S", secs, abs.nanos / NANOS_PER_MILLI)?; } else if abs.nanos % NANOS_PER_MICRO == 0 { - try!(write!(f, "T{}.{:06}S", secs, abs.nanos / NANOS_PER_MICRO)); + write!(f, "T{}.{:06}S", secs, abs.nanos / NANOS_PER_MICRO)?; } else { - try!(write!(f, "T{}.{:09}S", secs, abs.nanos)); + write!(f, "T{}.{:09}S", secs, abs.nanos)?; } } Ok(()) @@ -392,15 +393,22 @@ impl fmt::Display for Duration { #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub struct OutOfRangeError(()); +impl OutOfRangeError { + fn description(&self) -> &str { + "Source duration value is out of range for the target type" + } +} + impl fmt::Display for OutOfRangeError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } +#[cfg(any(feature = "std", test))] impl Error for OutOfRangeError { fn description(&self) -> &str { - "Source duration value is out of range for the target type" + self.description() } } diff --git a/third_party/rust/chrono/src/round.rs b/third_party/rust/chrono/src/round.rs index bf62762165..ac5b984be3 100644 --- a/third_party/rust/chrono/src/round.rs +++ b/third_party/rust/chrono/src/round.rs @@ -2,7 +2,7 @@ // See README.md and LICENSE.txt for details. use Timelike; -use std::ops::{Add, Sub}; +use core::ops::{Add, Sub}; use oldtime::Duration; /// Extension trait for subsecond rounding or truncation to a maximum number diff --git a/third_party/rust/chrono/tests/wasm.rs b/third_party/rust/chrono/tests/wasm.rs new file mode 100644 index 0000000000..48ac8db26a --- /dev/null +++ b/third_party/rust/chrono/tests/wasm.rs @@ -0,0 +1,28 @@ +#[cfg(all(target_arch = "wasm32", feature = "wasmbind"))] +mod test { + extern crate chrono; + extern crate wasm_bindgen_test; + + use self::chrono::prelude::*; + use self::wasm_bindgen_test::*; + + #[wasm_bindgen_test] + fn now() { + let utc: DateTime = Utc::now(); + let local: DateTime = Local::now(); + + // Ensure time fetched is correct + let actual = Utc.datetime_from_str(env!("NOW"), "%s").unwrap(); + assert!(utc - actual < chrono::Duration::minutes(5)); + + // Ensure offset retrieved when getting local time is correct + let expected_offset = match env!("TZ") { + "ACST-9:30" => FixedOffset::east(19 * 30 * 60), + "Asia/Katmandu" => FixedOffset::east(23 * 15 * 60), // No DST thankfully + "EST4" => FixedOffset::east(-4 * 60 * 60), + "UTC0" => FixedOffset::east(0), + _ => panic!("unexpected TZ"), + }; + assert_eq!(&expected_offset, local.offset()); + } +} diff --git a/third_party/rust/clang-sys/.cargo-checksum.json b/third_party/rust/clang-sys/.cargo-checksum.json index a65be68ad6..7460168fb9 100644 --- a/third_party/rust/clang-sys/.cargo-checksum.json +++ b/third_party/rust/clang-sys/.cargo-checksum.json @@ -1 +1 @@ -{"files":{"CHANGELOG.md":"c652c9b61c28c8412d7d2894fd40d16d99e480f1a107953e0d8e187ff682b1b5","Cargo.toml":"e404a59df6e05e1bd0f504b7cf493325d305d3a4c0367a1a2f4fa09cdf81431c","LICENSE.txt":"cfc7749b96f63bd31c3c42b5c471bf756814053e847c10f3eb003417bc523d30","README.md":"e0dbda475155169c7f68afad8f6158f38210ae80c561dc64335984714488d142","appveyor.yml":"af8991ca335fa4ad3bc027935230cbcea7f9bcb93f3ef01939f4c60df20d4414","build.rs":"7ca54d670b7d7dffd43d7c1ef521a6075b82cddd45947d30de260a880e0347c1","build/common.rs":"4575cb8c5ebdeb7a9451d7ab218c8556cd1fd8de46f1cdbc32e0488cf6e8f014","build/dynamic.rs":"0e158c311fcbaf3cde0da8b56811bcba146d638ee803248fc2110908624e5eb3","build/static.rs":"52ea2a1a742152fa6f7179b990da0d07fdd9c290f4204f8b31a4f128a9a38dc9","ci/before_install.sh":"49978df9e1e49892000bc039d3ceb0c24822948bf4a2cf7251dcf63953ecd5f7","ci/install.bat":"d7cd7c27132d80d2ecb9833b16af47ec8936beba4212cf999e85c3943d821018","ci/script.sh":"1bb1cd29bd9635cc126cdcbd6c02f3500620a231a86726bf2165a4b74baaf433","ci/test_script.bat":"73462f51aaa9a1c14ce9f55c41dc3672df64faa9789725384ae4f28d8ba3c90b","clippy.toml":"acef14b9acffa18d1069ae08a4e8fe824a614f91b0bc71a6b1c68e4d885397e6","src/lib.rs":"5892a027af827a5fd235a7e9c61fea1a80b4eff8970845db67a0b6698329b70f","src/link.rs":"6fc7904495307bca8174384e31c131e4c50d95afbbff9607b101f89ec94de7c6","src/support.rs":"87bb916e9a2f7d558f6246b793a3cffad0195385a40313614e4f34ebcf250008","tests/header.h":"b1cf564b21d76db78529d1934e1481a5f0452fdedc6e32954608293c310498b6","tests/lib.rs":"c1bd385dbc2af85270d63deea0fcb2ab0012fa0090da0b0882cade50a9baaa8b"},"package":"81de550971c976f176130da4b2978d3b524eaa0fd9ac31f3ceb5ae1231fb4853"} \ No newline at end of file +{"files":{"CHANGELOG.md":"fd58c20190e7e58c35cfd6d19810bb90105d98024a98c7beac3bf6f08878f498","Cargo.toml":"021b9974c21a0d3caa0e18fb4c670a73b1ee2c79284757fc0bc510c837a7f91c","LICENSE.txt":"3ddf9be5c28fe27dad143a5dc76eea25222ad1dd68934a047064e56ed2fa40c5","README.md":"b08a3079aed4d36d46822bf8db01cd0e07420a2f46289e9bc0062b130cd471ce","appveyor.yml":"1a5d6953fb6e373dc760d50659628f04d48f68bd3f3f8e434800b31e74ef1497","build.rs":"2693b9519440c2341fe51e8eef4ca63de31a93cadc30366e25788e8c3ed0a29a","build/common.rs":"c3e37974f698aa38f7a3012c03edadd14a3fcd3a27a008f55b6a2348539f471c","build/dynamic.rs":"32cff0b28aefdad4516ba8d295f3b3b1aaee99f41d1fda1561333b85f54ffbb0","build/static.rs":"acefad54c29b6e361bfb29696098779ea477c51ff09cd4c3d0195a9ded7d8f60","ci/before_install.sh":"cb6de6f230066004f568d0b5e9d940b3793ff8ee7eb5d35a4f9ec777ee369725","ci/install.bat":"a011e1b3b15db83d134675ceca9e769b1593e283fcea7f4835142af962f407b0","ci/script.sh":"52db533df970f1b44c0b2663f3bfac4476f2150e94fc392b2bab4145325f418b","ci/test_script.bat":"901609adc59dab2730e16dd374d0351d6406e7559fe4d86ddd9a857ad9c84d2a","clippy.toml":"fcf54943ba571514b244cc098ce08671b4117167733e8107e799d533a12a2195","src/lib.rs":"c80d69431089b85e7392be55687ec9c87bf20962afdfda16c6b9810cdc33688a","src/link.rs":"7af2af3940a649b069ea25ef119acbc03649285b3ff7de6485913bcdf512d64e","src/support.rs":"fc58df47c3b75e0f1a3e5d5303ee2e638cec9d6abbf1507c2cff7073062f0569","tests/header.h":"1b15a686d1c06561960045a26c25a34d840f26c8246f2f5e630f993b69c7492c","tests/lib.rs":"8a71eafaa28b66fc0f7fe2235ce0d91cdc50cb148a84aa2baa8a696b0f9d80ee"},"package":"f92986241798376849e1a007827041fed9bb36195822c2049d18e174420e0534"} \ No newline at end of file diff --git a/third_party/rust/clang-sys/CHANGELOG.md b/third_party/rust/clang-sys/CHANGELOG.md index e5785f4113..42bb529c66 100644 --- a/third_party/rust/clang-sys/CHANGELOG.md +++ b/third_party/rust/clang-sys/CHANGELOG.md @@ -1,343 +1,366 @@ -## [0.28.1] - 2019-07-28 - -### Changed -- Bumped `glob` version to `0.3.0` -- Improved error message when an invocation of an executable is not successful -- Allowed `LIBCLANG_PATH` to refer to a specific `libclang` instance (e.g., - `/usr/local/lib/libclang.so.10`) - -### Fixed -- Fixed - [`libclang-cpp`](https://github.com/llvm-mirror/clang/commit/90d6722bdcbc2af52306f7e948c556ad6185ac48) - being linked instead of `libclang` - -## [0.28.0] - 2019-02-17 - -### Changed -- Changed `llvm-config` to be first search candidate on macOS - -### Added -- Added support for `clang` 8.0.x - -### Removed -- Removed `assert-minimum` feature -- Removed version detection for libraries without versions embedded in the filename - -## [0.27.0] - 2019-01-10 - -### Changed -- Added version detection for libraries without versions embedded in the filename - -### Added -- Added `assert-minimum` feature (see `README.md` for details) - -## [0.26.4] - 2018-12-29 - -### Changed -- Added shared library path to `SharedLibrary` struct - -## [0.26.3] - 2018-11-14 - -### Changed -- Disable default features of `libc` dependency - -## [0.26.2] - 2018-11-03 - -### Fixed -- Fixed dynamic linking on macOS - -## [0.26.1] - 2018-10-10 - -### Fixed -- Fixed support for finding libraries in `bin` directories on Windows - -## [0.26.0] - 2018-10-07 - -### Changed -- Added support for finding libraries with version suffixes on Linux when using runtime linking (e.g., `libclang.so.1`) - -## [0.25.0] - 2018-10-06 - -### Changed -- Added support for versioned libraries on BSDs - -## [0.24.0] - 2018-09-15 - -### Changed -- Reworked finding of libraries (see `README.md` for details) - -### Added -- Added support for `clang` 7.0.x - -## [0.23.0] - 2018-06-16 - -### Changed -- Changed `Clang::find` to skip dynamic libraries for an incorrect architecture on Windows - -## [0.22.0] - 2018-03-11 - -### Added -- Added support for `clang` 6.0.x -- Bumped `libc` version to `0.2.39` -- Bumped `libloading` version to `0.5.0` - -## [0.21.2] - 2018-02-17 - -### Changed -- Added original errors to error messages -- Added support for searching for libraries in `LD_LIBRARY_PATH` directories - -## [0.21.1] - 2017-11-24 - -### Changed -- Improved finding of versioned libraries (e.g., `libclang-3.9.so`) - -### Fixed -* Fixed compilation failures on the beta and nightly channels caused by a [compiler bug](https://github.com/KyleMayes/clang-sys/pull/69) - -## [0.21.0] - 2017-10-11 - -### Changed -* Replaced `bitflags` usage with constants which avoids crashes on 32-bit Linux platforms - -## [0.20.1] - 2017-09-16 - -### Fixed -- Fixed static linking - -## [0.20.0] - 2017-09-14 - -### Added -- Added support for `clang` 5.0.x -- Added `clang` as a link target of this package -- Added dummy implementations of `is_loaded` for builds with the `static` Cargo feature enabled - -## [0.19.0] - 2017-07-02 - -### Changed -- Bumped `bitflags` version to `0.9.1` -- Added `args` parameter to `Clang::new` function which passes arguments to the Clang executable - -## [0.18.0] - 2017-05-16 - -### Changed -- Improved finding of versioned libraries (e.g., `libclang.so.3.9`) - -## [0.17.0] - 2017-05-08 - -### Changed -- Changed storage type of include search paths from `Vec` to `Option>` - -## [0.16.0] - 2017-05-02 - -### Changed -- Bumped `libloading` version to `0.4.0` - -## [0.15.2] - 2017-04-28 - -### Fixed -- Fixed finding of `libclang.so.1` on Linux - -## [0.15.1] - 2017-03-29 - -### Fixed -- Fixed static linking when libraries are in [different directories](https://github.com/KyleMayes/clang-sys/issues/50) - -## [0.15.0] - 2017-03-13 - -### Added -- Added support for `clang` 4.0.x - -### Changed -- Changed functions in the `Functions` struct to be `unsafe` (`runtime` feature only) -- Changed `Clang::find` method to ignore directories and non-executable files -- Changed `Clang::find` to skip dynamic libraries for an incorrect architecture on FreeBSD and Linux -- Bumped `bitflags` version to `0.7.0` - -## [0.14.0] - 2017-01-30 - -### Changed -- Changed all enum types from tuple structs to raw integers to avoid - [segmentation faults](https://github.com/rust-lang/rust/issues/39394) on some platforms - -## [0.13.0] - 2017-01-29 - -### Changed -- Changed all opaque pointers types from tuple structs to raw pointers to avoid - [segmentation faults](https://github.com/rust-lang/rust/issues/39394) on some platforms - -## [0.12.0] - 2016-12-13 - -### Changed -- Altered the runtime linking API to allow for testing the presence of functions - -## [0.11.1] - 2016-12-07 - -### Added -- Added support for linking to Clang on Windows from unofficial LLVM sources such as MSYS and MinGW - -## [0.11.0] - 2016-10-07 - -### Changed -- Changed all enums from Rust enums to typed constants to avoid - [undefined behavior](https://github.com/KyleMayes/clang-sys/issues/42) - -## [0.10.1] - 2016-08-21 - -### Changed -- Changed static linking on FreeBSD and macOS to link against `libc++` instead of `libstd++` - -## [0.10.0] - 2016-08-01 - -### Changed -- Added `runtime` Cargo feature that links to `libclang` shared library at runtime -- Added `from_raw` method to `CXTypeLayoutError` enum -- Added implementations of `Deref` for opaque FFI structs -- Changed `Default` implementations for structs to zero out the struct - -## [0.9.0] - 2016-07-21 - -### Added -- Added documentation bindings - -## [0.8.1] - 2016-07-20 - -### Changed -- Added `CLANG_PATH` environment variable for providing a path to `clang` executable -- Added usage of `llvm-config` to search for `clang` -- Added usage of `xcodebuild` to search for `clang` on macOS - -## [0.8.0] - 2016-07-18 - -### Added -- Added support for `clang` 3.9.x - -### Changed -- Bumped `libc` version to `0.2.14` - -### Fixed -- Fixed `LIBCLANG_PATH` usage on Windows to search both the `bin` and `lib` directories -- Fixed search path parsing on macOS -- Fixed search path parsing on Windows -- Fixed default search path ordering on macOS - -## [0.7.2] - 2016-06-17 - -### Fixed -- Fixed finding of `clang` executables when system has executables matching `clang-*` - (e.g., `clang-format`) - -## [0.7.1] - 2016-06-10 - -### Changed -- Bumped `libc` version to `0.2.12` - -### Fixed -- Fixed finding of `clang` executables suffixed by their version (e.g., `clang-3.5`) - -## [0.7.0] - 2016-05-31 - -### Changed -- Changed `Clang` struct `version` field type to `Option` - -## [0.6.0] - 2016-05-26 - -### Added -- Added `support` module - -### Fixed -- Fixed `libclang` linking on FreeBSD -- Fixed `libclang` linking on Windows with the MSVC toolchain -- Improved `libclang` static linking - -## [0.5.4] - 20160-5-19 - -### Changed -- Added implementations of `Default` for FFI structs - -## [0.5.3] - 2016-05-17 - -### Changed -- Bumped `bitflags` version to `0.7.0` - -## [0.5.2] - 2016-05-12 - -### Fixed -- Fixed `libclang` static linking - -## [0.5.1] - 2016-05-10 - -### Fixed -- Fixed `libclang` linking on macOS -- Fixed `libclang` linking on Windows - -## [0.5.0] - 2016-05-10 - -### Removed -- Removed `rustc_version` dependency -- Removed support for `LIBCLANG_STATIC` environment variable - -### Changed -- Bumped `bitflags` version to `0.6.0` -- Bumped `libc` version to `0.2.11` -- Improved `libclang` search path -- Improved `libclang` static linking - -## [0.4.2] - 2016-04-20 - -### Changed -- Bumped `libc` version to `0.2.10` - -## [0.4.1] - 2016-04-02 - -### Changed -- Bumped `libc` version to `0.2.9` -- Bumped `rustc_version` version to `0.1.7` - -## [0.4.0] - 2016-03-28 - -### Removed -- Removed support for `clang` 3.4.x - -## [0.3.1] - 2016-03-21 - -### Added -- Added support for finding `libclang` - -## [0.3.0] - 2016-03-16 - -### Removed -- Removed build system types and functions - -### Added -- Added support for `clang` 3.4.x - -### Changed -- Bumped `bitflags` version to `0.5.0` -- Bumped `libc` version to `0.2.8` - -## [0.2.1] - 2016-02-13 - -### Changed -- Simplified internal usage of conditional compilation -- Bumped `bitflags` version to `0.4.0` -- Bumped `libc` version to `0.2.7` -- Bumped `rustc_version` version to `0.1.6` - -## [0.2.0] - 2016-02-12 - -### Added -- Added support for `clang` 3.8.x - -## [0.1.2] - 2015-12-29 - -### Added -- Added derivations of `Debug` for FFI structs - -## [0.1.1] - 2015-12-26 - -### Added -- Added derivations of `PartialOrd` and `Ord` for FFI enums - -## [0.1.0] - 2015-12-22 -- Initial release +## [0.29.2] - 2020-03-09 + +### Added +- Revert unnecessary increase of minimum version of `libc` and `libloading` + +## [0.29.1] - 2020-03-06 + +### Added +- Added support for finding instances of `libclang` matching `libclang-*.so.*` + +## [0.29.0] - 2020-02-17 + +### Changed +- Wrapped function pointer fields in `Option` in the `CXCursorAndRangeVisitor` +and `IndexerCallbacks` structs (to permit nullability and to avoid undefined +behavior caused by `Default` implementations for these structs which returns a +zeroed value) + +### Added +- Added support for `clang` 9.0.x +- Added missing `CXCallingConv_AArch64VectorCall` variant to `CXCallingConv` enum +- Added missing `clang_CompileCommand_getNumMappedSources` function + +## [0.28.1] - 2019-07-28 + +### Changed +- Bumped `glob` version to `0.3.0` +- Improved error message when an invocation of an executable is not successful +- Allowed `LIBCLANG_PATH` to refer to a specific `libclang` instance (e.g., + `/usr/local/lib/libclang.so.10`) + +### Fixed +- Fixed + [`libclang-cpp`](https://github.com/llvm-mirror/clang/commit/90d6722bdcbc2af52306f7e948c556ad6185ac48) + being linked instead of `libclang` + +## [0.28.0] - 2019-02-17 + +### Changed +- Changed `llvm-config` to be first search candidate on macOS + +### Added +- Added support for `clang` 8.0.x + +### Removed +- Removed `assert-minimum` feature +- Removed version detection for libraries without versions embedded in the filename + +## [0.27.0] - 2019-01-10 + +### Changed +- Added version detection for libraries without versions embedded in the filename + +### Added +- Added `assert-minimum` feature (see `README.md` for details) + +## [0.26.4] - 2018-12-29 + +### Changed +- Added shared library path to `SharedLibrary` struct + +## [0.26.3] - 2018-11-14 + +### Changed +- Disable default features of `libc` dependency + +## [0.26.2] - 2018-11-03 + +### Fixed +- Fixed dynamic linking on macOS + +## [0.26.1] - 2018-10-10 + +### Fixed +- Fixed support for finding libraries in `bin` directories on Windows + +## [0.26.0] - 2018-10-07 + +### Changed +- Added support for finding libraries with version suffixes on Linux when using runtime linking (e.g., `libclang.so.1`) + +## [0.25.0] - 2018-10-06 + +### Changed +- Added support for versioned libraries on BSDs + +## [0.24.0] - 2018-09-15 + +### Changed +- Reworked finding of libraries (see `README.md` for details) + +### Added +- Added support for `clang` 7.0.x + +## [0.23.0] - 2018-06-16 + +### Changed +- Changed `Clang::find` to skip dynamic libraries for an incorrect architecture on Windows + +## [0.22.0] - 2018-03-11 + +### Added +- Added support for `clang` 6.0.x +- Bumped `libc` version to `0.2.39` +- Bumped `libloading` version to `0.5.0` + +## [0.21.2] - 2018-02-17 + +### Changed +- Added original errors to error messages +- Added support for searching for libraries in `LD_LIBRARY_PATH` directories + +## [0.21.1] - 2017-11-24 + +### Changed +- Improved finding of versioned libraries (e.g., `libclang-3.9.so`) + +### Fixed +* Fixed compilation failures on the beta and nightly channels caused by a [compiler bug](https://github.com/KyleMayes/clang-sys/pull/69) + +## [0.21.0] - 2017-10-11 + +### Changed +* Replaced `bitflags` usage with constants which avoids crashes on 32-bit Linux platforms + +## [0.20.1] - 2017-09-16 + +### Fixed +- Fixed static linking + +## [0.20.0] - 2017-09-14 + +### Added +- Added support for `clang` 5.0.x +- Added `clang` as a link target of this package +- Added dummy implementations of `is_loaded` for builds with the `static` Cargo feature enabled + +## [0.19.0] - 2017-07-02 + +### Changed +- Bumped `bitflags` version to `0.9.1` +- Added `args` parameter to `Clang::new` function which passes arguments to the Clang executable + +## [0.18.0] - 2017-05-16 + +### Changed +- Improved finding of versioned libraries (e.g., `libclang.so.3.9`) + +## [0.17.0] - 2017-05-08 + +### Changed +- Changed storage type of include search paths from `Vec` to `Option>` + +## [0.16.0] - 2017-05-02 + +### Changed +- Bumped `libloading` version to `0.4.0` + +## [0.15.2] - 2017-04-28 + +### Fixed +- Fixed finding of `libclang.so.1` on Linux + +## [0.15.1] - 2017-03-29 + +### Fixed +- Fixed static linking when libraries are in [different directories](https://github.com/KyleMayes/clang-sys/issues/50) + +## [0.15.0] - 2017-03-13 + +### Added +- Added support for `clang` 4.0.x + +### Changed +- Changed functions in the `Functions` struct to be `unsafe` (`runtime` feature only) +- Changed `Clang::find` method to ignore directories and non-executable files +- Changed `Clang::find` to skip dynamic libraries for an incorrect architecture on FreeBSD and Linux +- Bumped `bitflags` version to `0.7.0` + +## [0.14.0] - 2017-01-30 + +### Changed +- Changed all enum types from tuple structs to raw integers to avoid + [segmentation faults](https://github.com/rust-lang/rust/issues/39394) on some platforms + +## [0.13.0] - 2017-01-29 + +### Changed +- Changed all opaque pointers types from tuple structs to raw pointers to avoid + [segmentation faults](https://github.com/rust-lang/rust/issues/39394) on some platforms + +## [0.12.0] - 2016-12-13 + +### Changed +- Altered the runtime linking API to allow for testing the presence of functions + +## [0.11.1] - 2016-12-07 + +### Added +- Added support for linking to Clang on Windows from unofficial LLVM sources such as MSYS and MinGW + +## [0.11.0] - 2016-10-07 + +### Changed +- Changed all enums from Rust enums to typed constants to avoid + [undefined behavior](https://github.com/KyleMayes/clang-sys/issues/42) + +## [0.10.1] - 2016-08-21 + +### Changed +- Changed static linking on FreeBSD and macOS to link against `libc++` instead of `libstd++` + +## [0.10.0] - 2016-08-01 + +### Changed +- Added `runtime` Cargo feature that links to `libclang` shared library at runtime +- Added `from_raw` method to `CXTypeLayoutError` enum +- Added implementations of `Deref` for opaque FFI structs +- Changed `Default` implementations for structs to zero out the struct + +## [0.9.0] - 2016-07-21 + +### Added +- Added documentation bindings + +## [0.8.1] - 2016-07-20 + +### Changed +- Added `CLANG_PATH` environment variable for providing a path to `clang` executable +- Added usage of `llvm-config` to search for `clang` +- Added usage of `xcodebuild` to search for `clang` on macOS + +## [0.8.0] - 2016-07-18 + +### Added +- Added support for `clang` 3.9.x + +### Changed +- Bumped `libc` version to `0.2.14` + +### Fixed +- Fixed `LIBCLANG_PATH` usage on Windows to search both the `bin` and `lib` directories +- Fixed search path parsing on macOS +- Fixed search path parsing on Windows +- Fixed default search path ordering on macOS + +## [0.7.2] - 2016-06-17 + +### Fixed +- Fixed finding of `clang` executables when system has executables matching `clang-*` + (e.g., `clang-format`) + +## [0.7.1] - 2016-06-10 + +### Changed +- Bumped `libc` version to `0.2.12` + +### Fixed +- Fixed finding of `clang` executables suffixed by their version (e.g., `clang-3.5`) + +## [0.7.0] - 2016-05-31 + +### Changed +- Changed `Clang` struct `version` field type to `Option` + +## [0.6.0] - 2016-05-26 + +### Added +- Added `support` module + +### Fixed +- Fixed `libclang` linking on FreeBSD +- Fixed `libclang` linking on Windows with the MSVC toolchain +- Improved `libclang` static linking + +## [0.5.4] - 20160-5-19 + +### Changed +- Added implementations of `Default` for FFI structs + +## [0.5.3] - 2016-05-17 + +### Changed +- Bumped `bitflags` version to `0.7.0` + +## [0.5.2] - 2016-05-12 + +### Fixed +- Fixed `libclang` static linking + +## [0.5.1] - 2016-05-10 + +### Fixed +- Fixed `libclang` linking on macOS +- Fixed `libclang` linking on Windows + +## [0.5.0] - 2016-05-10 + +### Removed +- Removed `rustc_version` dependency +- Removed support for `LIBCLANG_STATIC` environment variable + +### Changed +- Bumped `bitflags` version to `0.6.0` +- Bumped `libc` version to `0.2.11` +- Improved `libclang` search path +- Improved `libclang` static linking + +## [0.4.2] - 2016-04-20 + +### Changed +- Bumped `libc` version to `0.2.10` + +## [0.4.1] - 2016-04-02 + +### Changed +- Bumped `libc` version to `0.2.9` +- Bumped `rustc_version` version to `0.1.7` + +## [0.4.0] - 2016-03-28 + +### Removed +- Removed support for `clang` 3.4.x + +## [0.3.1] - 2016-03-21 + +### Added +- Added support for finding `libclang` + +## [0.3.0] - 2016-03-16 + +### Removed +- Removed build system types and functions + +### Added +- Added support for `clang` 3.4.x + +### Changed +- Bumped `bitflags` version to `0.5.0` +- Bumped `libc` version to `0.2.8` + +## [0.2.1] - 2016-02-13 + +### Changed +- Simplified internal usage of conditional compilation +- Bumped `bitflags` version to `0.4.0` +- Bumped `libc` version to `0.2.7` +- Bumped `rustc_version` version to `0.1.6` + +## [0.2.0] - 2016-02-12 + +### Added +- Added support for `clang` 3.8.x + +## [0.1.2] - 2015-12-29 + +### Added +- Added derivations of `Debug` for FFI structs + +## [0.1.1] - 2015-12-26 + +### Added +- Added derivations of `PartialOrd` and `Ord` for FFI enums + +## [0.1.0] - 2015-12-22 +- Initial release diff --git a/third_party/rust/clang-sys/Cargo.toml b/third_party/rust/clang-sys/Cargo.toml index 9b514ee951..d3d84ffbc6 100644 --- a/third_party/rust/clang-sys/Cargo.toml +++ b/third_party/rust/clang-sys/Cargo.toml @@ -12,7 +12,7 @@ [package] name = "clang-sys" -version = "0.28.1" +version = "0.29.2" authors = ["Kyle Mayes "] build = "build.rs" links = "clang" @@ -29,7 +29,7 @@ version = "0.2.39" default-features = false [dependencies.libloading] -version = "0.5.0" +version = "0.5" optional = true [build-dependencies.glob] version = "0.3" @@ -45,6 +45,7 @@ clang_5_0 = ["gte_clang_3_6", "gte_clang_3_7", "gte_clang_3_8", "gte_clang_3_9", clang_6_0 = ["gte_clang_3_6", "gte_clang_3_7", "gte_clang_3_8", "gte_clang_3_9", "gte_clang_4_0", "gte_clang_5_0", "gte_clang_6_0"] clang_7_0 = ["gte_clang_3_6", "gte_clang_3_7", "gte_clang_3_8", "gte_clang_3_9", "gte_clang_4_0", "gte_clang_5_0", "gte_clang_6_0", "gte_clang_7_0"] clang_8_0 = ["gte_clang_3_6", "gte_clang_3_7", "gte_clang_3_8", "gte_clang_3_9", "gte_clang_4_0", "gte_clang_5_0", "gte_clang_6_0", "gte_clang_7_0", "gte_clang_8_0"] +clang_9_0 = ["gte_clang_3_6", "gte_clang_3_7", "gte_clang_3_8", "gte_clang_3_9", "gte_clang_4_0", "gte_clang_5_0", "gte_clang_6_0", "gte_clang_7_0", "gte_clang_8_0", "gte_clang_9_0"] gte_clang_3_6 = [] gte_clang_3_7 = [] gte_clang_3_8 = [] @@ -54,5 +55,6 @@ gte_clang_5_0 = [] gte_clang_6_0 = [] gte_clang_7_0 = [] gte_clang_8_0 = [] +gte_clang_9_0 = [] runtime = ["libloading"] static = [] diff --git a/third_party/rust/clang-sys/LICENSE.txt b/third_party/rust/clang-sys/LICENSE.txt index d645695673..75b52484ea 100644 --- a/third_party/rust/clang-sys/LICENSE.txt +++ b/third_party/rust/clang-sys/LICENSE.txt @@ -1,202 +1,202 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/third_party/rust/clang-sys/README.md b/third_party/rust/clang-sys/README.md index 9ac8e77da7..fd1a2fbe28 100644 --- a/third_party/rust/clang-sys/README.md +++ b/third_party/rust/clang-sys/README.md @@ -1,124 +1,128 @@ -# clang-sys - -[![crates.io](https://img.shields.io/crates/v/clang-sys.svg)](https://crates.io/crates/clang-sys) -[![Travis CI](https://travis-ci.org/KyleMayes/clang-sys.svg?branch=master)](https://travis-ci.org/KyleMayes/clang-sys) -[![AppVeyor](https://ci.appveyor.com/api/projects/status/7tv5mjyg55rof356/branch/master?svg=true)](https://ci.appveyor.com/project/KyleMayes/clang-sys-vtvy5/branch/master) - -Rust bindings for `libclang`. - -If you are interested in a Rust wrapper for these bindings, see -[clang-rs](https://github.com/KyleMayes/clang-rs). - -Supported on the stable, beta, and nightly Rust channels. - -Released under the Apache License 2.0. - -## Supported Versions - -To target a version of `libclang`, enable one of the following Cargo features: - -* `clang_3_5` - requires `libclang` 3.5 or later - ([Documentation](https://kylemayes.github.io/clang-sys/3_5/clang_sys)) -* `clang_3_6` - requires `libclang` 3.6 or later - ([Documentation](https://kylemayes.github.io/clang-sys/3_6/clang_sys)) -* `clang_3_7` - requires `libclang` 3.7 or later - ([Documentation](https://kylemayes.github.io/clang-sys/3_7/clang_sys)) -* `clang_3_8` - requires `libclang` 3.8 or later - ([Documentation](https://kylemayes.github.io/clang-sys/3_8/clang_sys)) -* `clang_3_9` - requires `libclang` 3.9 or later - ([Documentation](https://kylemayes.github.io/clang-sys/3_9/clang_sys)) -* `clang_4_0` - requires `libclang` 4.0 or later - ([Documentation](https://kylemayes.github.io/clang-sys/4_0/clang_sys)) -* `clang_5_0` - requires `libclang` 5.0 or later - ([Documentation](https://kylemayes.github.io/clang-sys/5_0/clang_sys)) -* `clang_6_0` - requires `libclang` 6.0 or later - ([Documentation](https://kylemayes.github.io/clang-sys/6_0/clang_sys)) -* `clang_7_0` - requires `libclang` 7.0 or later - ([Documentation](https://kylemayes.github.io/clang-sys/7_0/clang_sys)) -* `clang_8_0` - requires `libclang` 8.0 or later - ([Documentation](https://kylemayes.github.io/clang-sys/8_0/clang_sys)) - -If you do not enable one of these features, the API provided by `libclang` 3.5 will be available by -default. - -## Dependencies - -By default, this crate will attempt to link to `libclang` dynamically. In this case, this crate -depends on the `libclang` shared library (`libclang.so` on Linux, `libclang.dylib` on macOS, -`libclang.dll` on Windows). If you want to link to `libclang` statically instead, enable the -`static` Cargo feature. In this case, this crate depends on the LLVM and Clang static libraries. If -you don't want to link to `libclang` at compiletime but instead want to load it at runtime, enable -the `runtime` Cargo feature. - -These libraries can be either be installed as a part of Clang or downloaded -[here](http://llvm.org/releases/download.html). - -**Note:** The downloads for LLVM and Clang 3.8 and later do not include the `libclang.a` static -library. This means you cannot link to any of these versions of `libclang` statically unless you -build it from source. - -### Versioned Dependencies - -This crate supports finding versioned instances of `libclang.so` (e.g.,`libclang-3.9.so`). -In the case where there are multiple instances to choose from, this crate will prefer instances with -higher versions. For example, the following instances of `libclang.so` are listed in descending -order of preference: - -1. `libclang-4.0.so` -2. `libclang-4.so` -3. `libclang-3.9.so` -4. `libclang-3.so` -5. `libclang.so` - -**Note:** On BSD distributions, versioned instances of `libclang.so` matching the pattern -`libclang.so.*` (e.g., `libclang.so.7.0`) are also included. - -**Note:** On Linux distributions when the `runtime` features is enabled, versioned instances of -`libclang.so` matching the pattern `libclang.so.*` (e.g., `libclang.so.1`) are also included. - -## Environment Variables - -The following environment variables, if set, are used by this crate to find the required libraries -and executables: - -* `LLVM_CONFIG_PATH` **(compiletime)** - provides a path to an `llvm-config` executable -* `LIBCLANG_PATH` **(compiletime)** - provides a path to a directory containing a `libclang` shared - library or a path to a specific `libclang` shared library -* `LIBCLANG_STATIC_PATH` **(compiletime)** - provides a path to a directory containing LLVM and - Clang static libraries -* `CLANG_PATH` **(runtime)** - provides a path to a `clang` executable - -## Linking - -### Dynamic - -`libclang` shared libraries will be searched for in the following directories: - -* the directory provided by the `LIBCLANG_PATH` environment variable -* the `bin` and `lib` directories in the directory provided by `llvm-config --libdir` -* the directories provided by `LD_LIBRARY_PATH` environment variable -* a list of likely directories for the target platform (e.g., `/usr/local/lib` on Linux) -* **macOS only:** the toolchain directory in the directory provided by `xcode-select --print-path` - -On Linux, running an executable that has been dynamically linked to `libclang` may require you to -add a path to `libclang.so` to the `LD_LIBRARY_PATH` environment variable. The same is true on OS -X, except the `DYLD_LIBRARY_PATH` environment variable is used instead. - -On Windows, running an executable that has been dynamically linked to `libclang` requires that -`libclang.dll` can be found by the executable at runtime. See -[here](https://msdn.microsoft.com/en-us/library/7d83bc18.aspx) for more information. - -### Static - -The availability of `llvm-config` is not optional for static linking. Ensure that an instance of -this executable can be found on your system's path or set the `LLVM_CONFIG_PATH` environment -variable. The required LLVM and Clang static libraries will be searched for in the same way as -shared libraries are searched for, except the `LIBCLANG_STATIC_PATH` environment variable is used in -place of the `LIBCLANG_PATH` environment variable. - -### Runtime - -The `clang_sys::load` function is used to load a `libclang` shared library for use in the thread in -which it is called. The `clang_sys::unload` function will unload the `libclang` shared library. -`clang_sys::load` searches for a `libclang` shared library in the same way one is searched for when -linking to `libclang` dynamically at compiletime. +# clang-sys + +[![crates.io](https://img.shields.io/crates/v/clang-sys.svg)](https://crates.io/crates/clang-sys) +[![Travis CI](https://travis-ci.org/KyleMayes/clang-sys.svg?branch=master)](https://travis-ci.org/KyleMayes/clang-sys) +[![AppVeyor](https://ci.appveyor.com/api/projects/status/7tv5mjyg55rof356/branch/master?svg=true)](https://ci.appveyor.com/project/KyleMayes/clang-sys-vtvy5/branch/master) + +Rust bindings for `libclang`. + +If you are interested in a Rust wrapper for these bindings, see +[clang-rs](https://github.com/KyleMayes/clang-rs). + +Supported on the stable, beta, and nightly Rust channels.
+Minimum supported Rust version: **1.36.0** + +Released under the Apache License 2.0. + +## Supported Versions + +To target a version of `libclang`, enable one of the following Cargo features: + +* `clang_3_5` - requires `libclang` 3.5 or later + ([Documentation](https://kylemayes.github.io/clang-sys/3_5/clang_sys)) +* `clang_3_6` - requires `libclang` 3.6 or later + ([Documentation](https://kylemayes.github.io/clang-sys/3_6/clang_sys)) +* `clang_3_7` - requires `libclang` 3.7 or later + ([Documentation](https://kylemayes.github.io/clang-sys/3_7/clang_sys)) +* `clang_3_8` - requires `libclang` 3.8 or later + ([Documentation](https://kylemayes.github.io/clang-sys/3_8/clang_sys)) +* `clang_3_9` - requires `libclang` 3.9 or later + ([Documentation](https://kylemayes.github.io/clang-sys/3_9/clang_sys)) +* `clang_4_0` - requires `libclang` 4.0 or later + ([Documentation](https://kylemayes.github.io/clang-sys/4_0/clang_sys)) +* `clang_5_0` - requires `libclang` 5.0 or later + ([Documentation](https://kylemayes.github.io/clang-sys/5_0/clang_sys)) +* `clang_6_0` - requires `libclang` 6.0 or later + ([Documentation](https://kylemayes.github.io/clang-sys/6_0/clang_sys)) +* `clang_7_0` - requires `libclang` 7.0 or later + ([Documentation](https://kylemayes.github.io/clang-sys/7_0/clang_sys)) +* `clang_8_0` - requires `libclang` 8.0 or later + ([Documentation](https://kylemayes.github.io/clang-sys/8_0/clang_sys)) +* `clang_9_0` - requires `libclang` 9.0 or later + ([Documentation](https://kylemayes.github.io/clang-sys/9_0/clang_sys)) + +If you do not enable one of these features, the API provided by `libclang` 3.5 will be available by +default. + +## Dependencies + +By default, this crate will attempt to link to `libclang` dynamically. In this case, this crate +depends on the `libclang` shared library (`libclang.so` on Linux, `libclang.dylib` on macOS, +`libclang.dll` on Windows). If you want to link to `libclang` statically instead, enable the +`static` Cargo feature. In this case, this crate depends on the LLVM and Clang static libraries. If +you don't want to link to `libclang` at compiletime but instead want to load it at runtime, enable +the `runtime` Cargo feature. + +These libraries can be either be installed as a part of Clang or downloaded +[here](http://llvm.org/releases/download.html). + +**Note:** The downloads for LLVM and Clang 3.8 and later do not include the `libclang.a` static +library. This means you cannot link to any of these versions of `libclang` statically unless you +build it from source. + +### Versioned Dependencies + +This crate supports finding versioned instances of `libclang.so` (e.g.,`libclang-3.9.so`). +In the case where there are multiple instances to choose from, this crate will prefer instances with +higher versions. For example, the following instances of `libclang.so` are listed in descending +order of preference: + +1. `libclang-4.0.so` +2. `libclang-4.so` +3. `libclang-3.9.so` +4. `libclang-3.so` +5. `libclang.so` + +**Note:** On BSD distributions, versioned instances of `libclang.so` matching the pattern +`libclang.so.*` (e.g., `libclang.so.7.0`) are also included. + +**Note:** On Linux distributions when the `runtime` features is enabled, versioned instances of +`libclang.so` matching the pattern `libclang.so.*` (e.g., `libclang.so.1`) are also included. + +## Environment Variables + +The following environment variables, if set, are used by this crate to find the required libraries +and executables: + +* `LLVM_CONFIG_PATH` **(compiletime)** - provides a full path to an `llvm-config` executable + (including the executable itself [i.e., `/usr/local/bin/llvm-config-8.0`]) +* `LIBCLANG_PATH` **(compiletime)** - provides a path to a directory containing a `libclang` shared + library or a full path to a specific `libclang` shared library +* `LIBCLANG_STATIC_PATH` **(compiletime)** - provides a path to a directory containing LLVM and + Clang static libraries +* `CLANG_PATH` **(runtime)** - provides a path to a `clang` executable + +## Linking + +### Dynamic + +`libclang` shared libraries will be searched for in the following directories: + +* the directory provided by the `LIBCLANG_PATH` environment variable +* the `bin` and `lib` directories in the directory provided by `llvm-config --libdir` +* the directories provided by `LD_LIBRARY_PATH` environment variable +* a list of likely directories for the target platform (e.g., `/usr/local/lib` on Linux) +* **macOS only:** the toolchain directory in the directory provided by `xcode-select --print-path` + +On Linux, running an executable that has been dynamically linked to `libclang` may require you to +add a path to `libclang.so` to the `LD_LIBRARY_PATH` environment variable. The same is true on OS +X, except the `DYLD_LIBRARY_PATH` environment variable is used instead. + +On Windows, running an executable that has been dynamically linked to `libclang` requires that +`libclang.dll` can be found by the executable at runtime. See +[here](https://msdn.microsoft.com/en-us/library/7d83bc18.aspx) for more information. + +### Static + +The availability of `llvm-config` is not optional for static linking. Ensure that an instance of +this executable can be found on your system's path or set the `LLVM_CONFIG_PATH` environment +variable. The required LLVM and Clang static libraries will be searched for in the same way as +shared libraries are searched for, except the `LIBCLANG_STATIC_PATH` environment variable is used in +place of the `LIBCLANG_PATH` environment variable. + +### Runtime + +The `clang_sys::load` function is used to load a `libclang` shared library for use in the thread in +which it is called. The `clang_sys::unload` function will unload the `libclang` shared library. +`clang_sys::load` searches for a `libclang` shared library in the same way one is searched for when +linking to `libclang` dynamically at compiletime. diff --git a/third_party/rust/clang-sys/appveyor.yml b/third_party/rust/clang-sys/appveyor.yml index 060cf7321a..614577feba 100644 --- a/third_party/rust/clang-sys/appveyor.yml +++ b/third_party/rust/clang-sys/appveyor.yml @@ -1,12 +1,12 @@ -environment: - matrix: - - LLVM_VERSION: 7.0.0 - CLANG_VERSION: clang_7_0 - -install: - - .\ci\install.bat - -build: false - -test_script: - - .\ci\test_script.bat +environment: + matrix: + - LLVM_VERSION: 7.0.0 + CLANG_VERSION: clang_7_0 + +install: + - .\ci\install.bat + +build: false + +test_script: + - .\ci\test_script.bat diff --git a/third_party/rust/clang-sys/build.rs b/third_party/rust/clang-sys/build.rs index cf5baa39f3..70d2c9dbb4 100644 --- a/third_party/rust/clang-sys/build.rs +++ b/third_party/rust/clang-sys/build.rs @@ -1,85 +1,85 @@ -// Copyright 2016 Kyle Mayes -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Finds `libclang` static or dynamic libraries and links to them. -//! -//! # Environment Variables -//! -//! This build script can make use of several environment variables to help it -//! find the required static or dynamic libraries. -//! -//! * `LLVM_CONFIG_PATH` - provides a path to an `llvm-config` executable -//! * `LIBCLANG_PATH` - provides a path to a directory containing a `libclang` -//! shared library or a path to a specific `libclang` shared library -//! * `LIBCLANG_STATIC_PATH` - provides a path to a directory containing LLVM -//! and Clang static libraries - -#![allow(unused_attributes)] - -extern crate glob; - -use std::path::Path; - -#[path = "build/common.rs"] -pub mod common; -#[path = "build/dynamic.rs"] -pub mod dynamic; -#[path = "build/static.rs"] -pub mod static_; - -/// Copy the file from the supplied source to the supplied destination. -#[cfg(feature = "runtime")] -fn copy(source: &str, destination: &Path) { - use std::fs::File; - use std::io::{Read, Write}; - - let mut string = String::new(); - File::open(source) - .unwrap() - .read_to_string(&mut string) - .unwrap(); - File::create(destination) - .unwrap() - .write_all(string.as_bytes()) - .unwrap(); -} - -/// Generates the finding and linking code so that it may be used at runtime. -#[cfg(feature = "runtime")] -fn main() { - use std::env; - - if cfg!(feature = "static") { - panic!("`runtime` and `static` features can't be combined"); - } - - let out = env::var("OUT_DIR").unwrap(); - copy("build/common.rs", &Path::new(&out).join("common.rs")); - copy("build/dynamic.rs", &Path::new(&out).join("dynamic.rs")); -} - -/// Finds and links to the required libraries. -#[cfg(not(feature = "runtime"))] -fn main() { - if cfg!(feature = "static") { - static_::link(); - } else { - dynamic::link(); - } - - if let Some(output) = common::run_llvm_config(&["--includedir"]) { - let directory = Path::new(output.trim_end()); - println!("cargo:include={}", directory.display()); - } -} +// Copyright 2016 Kyle Mayes +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Finds `libclang` static or dynamic libraries and links to them. +//! +//! # Environment Variables +//! +//! This build script can make use of several environment variables to help it +//! find the required static or dynamic libraries. +//! +//! * `LLVM_CONFIG_PATH` - provides a path to an `llvm-config` executable +//! * `LIBCLANG_PATH` - provides a path to a directory containing a `libclang` +//! shared library or a path to a specific `libclang` shared library +//! * `LIBCLANG_STATIC_PATH` - provides a path to a directory containing LLVM +//! and Clang static libraries + +#![allow(unused_attributes)] + +extern crate glob; + +use std::path::Path; + +#[path = "build/common.rs"] +pub mod common; +#[path = "build/dynamic.rs"] +pub mod dynamic; +#[path = "build/static.rs"] +pub mod static_; + +/// Copy the file from the supplied source to the supplied destination. +#[cfg(feature = "runtime")] +fn copy(source: &str, destination: &Path) { + use std::fs::File; + use std::io::{Read, Write}; + + let mut string = String::new(); + File::open(source) + .unwrap() + .read_to_string(&mut string) + .unwrap(); + File::create(destination) + .unwrap() + .write_all(string.as_bytes()) + .unwrap(); +} + +/// Generates the finding and linking code so that it may be used at runtime. +#[cfg(feature = "runtime")] +fn main() { + use std::env; + + if cfg!(feature = "static") { + panic!("`runtime` and `static` features can't be combined"); + } + + let out = env::var("OUT_DIR").unwrap(); + copy("build/common.rs", &Path::new(&out).join("common.rs")); + copy("build/dynamic.rs", &Path::new(&out).join("dynamic.rs")); +} + +/// Finds and links to the required libraries. +#[cfg(not(feature = "runtime"))] +fn main() { + if cfg!(feature = "static") { + static_::link(); + } else { + dynamic::link(); + } + + if let Some(output) = common::run_llvm_config(&["--includedir"]) { + let directory = Path::new(output.trim_end()); + println!("cargo:include={}", directory.display()); + } +} diff --git a/third_party/rust/clang-sys/build/common.rs b/third_party/rust/clang-sys/build/common.rs index 3ce42a6268..b19d02da47 100644 --- a/third_party/rust/clang-sys/build/common.rs +++ b/third_party/rust/clang-sys/build/common.rs @@ -1,217 +1,221 @@ -// Copyright 2018 Kyle Mayes -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -extern crate glob; - -use std::env; -use std::path::{Path, PathBuf}; -use std::process::Command; - -use glob::MatchOptions; - -/// `libclang` directory patterns for FreeBSD and Linux. -const DIRECTORIES_LINUX: &[&str] = &[ - "/usr/lib*", - "/usr/lib*/*", - "/usr/lib*/*/*", - "/usr/local/lib*", - "/usr/local/lib*/*", - "/usr/local/lib*/*/*", - "/usr/local/llvm*/lib*", -]; - -/// `libclang` directory patterns for macOS. -const DIRECTORIES_MACOS: &[&str] = &[ - "/usr/local/opt/llvm*/lib", - "/Applications/Xcode.app/Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/lib", - "/Library/Developer/CommandLineTools/usr/lib", - "/usr/local/opt/llvm*/lib/llvm*/lib", -]; - -/// `libclang` directory patterns for Windows. -const DIRECTORIES_WINDOWS: &[&str] = &[ - "C:\\LLVM\\lib", - "C:\\Program Files*\\LLVM\\lib", - "C:\\MSYS*\\MinGW*\\lib", -]; - -/// Executes the supplied console command, returning the `stdout` output if the -/// command was successfully executed. -fn run_command(command: &str, arguments: &[&str]) -> Option { - macro_rules! warn { - ($error:expr) => { - println!( - "cargo:warning=couldn't execute `{} {}` ({})", - command, - arguments.join(" "), - $error, - ); - }; - } - - let output = match Command::new(command).args(arguments).output() { - Ok(output) => output, - Err(error) => { - warn!(format!("error: {}", error)); - return None; - } - }; - - if !output.status.success() { - warn!(format!("exit code: {}", output.status)); - return None; - } - - Some(String::from_utf8_lossy(&output.stdout).into_owned()) -} - -/// Executes `llvm-config`, returning the `stdout` output if the command was -/// successfully executed. -pub fn run_llvm_config(arguments: &[&str]) -> Option { - let path = env::var("LLVM_CONFIG_PATH").unwrap_or_else(|_| "llvm-config".into()); - - let output = run_command(&path, arguments); - if output.is_none() { - println!("cargo:warning=set the LLVM_CONFIG_PATH environment variable to a valid `llvm-config` executable"); - } - - output -} - -/// Returns the paths to and the filenames of the files matching the supplied -/// filename patterns in the supplied directory. -fn search_directory(directory: &Path, filenames: &[String]) -> Vec<(PathBuf, String)> { - // Join the directory to the filename patterns to obtain the path patterns. - let paths = filenames - .iter() - .filter_map(|f| directory.join(f).to_str().map(ToOwned::to_owned)); - - // Prevent wildcards from matching path separators. - let mut options = MatchOptions::new(); - options.require_literal_separator = true; - - paths - .flat_map(|p| { - if let Ok(paths) = glob::glob_with(&p, options) { - paths.filter_map(Result::ok).collect() - } else { - vec![] - } - }) - .filter_map(|p| { - let filename = p.file_name().and_then(|f| f.to_str())?; - - // The `libclang_shared` library has been renamed to `libclang-cpp` - // in Clang 10. This can cause instances of this library (e.g., - // `libclang-cpp.so.10`) to be matched by patterns looking for - // instances of `libclang`. - if filename.contains("-cpp.") { - return None; - } - - Some((directory.to_owned(), filename.into())) - }) - .collect::>() -} - -/// Returns the paths to and the filenames of the files matching the supplied -/// filename patterns in the supplied directory, checking any relevant sibling -/// directories. -fn search_directories(directory: &Path, filenames: &[String]) -> Vec<(PathBuf, String)> { - let mut results = search_directory(directory, filenames); - - // On Windows, `libclang.dll` is usually found in the LLVM `bin` directory - // while `libclang.lib` is usually found in the LLVM `lib` directory. To - // keep things consistent with other platforms, only LLVM `lib` directories - // are included in the backup search directory globs so we need to search - // the LLVM `bin` directory here. - if cfg!(target_os = "windows") && directory.ends_with("lib") { - let sibling = directory.parent().unwrap().join("bin"); - results.extend(search_directory(&sibling, filenames).into_iter()); - } - - results -} - -/// Returns the paths to and the filenames of the `libclang` static or dynamic -/// libraries matching the supplied filename patterns. -pub fn search_libclang_directories(files: &[String], variable: &str) -> Vec<(PathBuf, String)> { - // Use the path provided by the relevant environment variable. - if let Ok(path) = env::var(variable).map(|d| Path::new(&d).to_path_buf()) { - // Check if the path is referring to a matching file already. - if let Some(parent) = path.parent() { - let filename = path.file_name().unwrap().to_str().unwrap(); - let libraries = search_directories(parent, files); - if libraries.iter().any(|(_, f)| f == filename) { - return vec![(parent.into(), filename.into())]; - } - } - - return search_directories(&path, files); - } - - let mut found = vec![]; - - // Search the `bin` and `lib` directories in directory provided by - // `llvm-config --prefix`. - if let Some(output) = run_llvm_config(&["--prefix"]) { - let directory = Path::new(output.lines().next().unwrap()).to_path_buf(); - found.extend(search_directories(&directory.join("bin"), files)); - found.extend(search_directories(&directory.join("lib"), files)); - found.extend(search_directories(&directory.join("lib64"), files)); - } - - // Search the toolchain directory in the directory provided by - // `xcode-select --print-path`. - if cfg!(target_os = "macos") { - if let Some(output) = run_command("xcode-select", &["--print-path"]) { - let directory = Path::new(output.lines().next().unwrap()).to_path_buf(); - let directory = directory.join("Toolchains/XcodeDefault.xctoolchain/usr/lib"); - found.extend(search_directories(&directory, files)); - } - } - - // Search the directories provided by the `LD_LIBRARY_PATH` environment - // variable. - if let Ok(path) = env::var("LD_LIBRARY_PATH") { - for directory in path.split(':').map(Path::new) { - found.extend(search_directories(&directory, files)); - } - } - - // Determine the `libclang` directory patterns. - let directories = if cfg!(any(target_os = "freebsd", target_os = "linux")) { - DIRECTORIES_LINUX - } else if cfg!(target_os = "macos") { - DIRECTORIES_MACOS - } else if cfg!(target_os = "windows") { - DIRECTORIES_WINDOWS - } else { - &[] - }; - - // Search the directories provided by the `libclang` directory patterns. - let mut options = MatchOptions::new(); - options.case_sensitive = false; - options.require_literal_separator = true; - for directory in directories.iter().rev() { - if let Ok(directories) = glob::glob_with(directory, options) { - for directory in directories.filter_map(Result::ok).filter(|p| p.is_dir()) { - found.extend(search_directories(&directory, files)); - } - } - } - - found -} +// Copyright 2018 Kyle Mayes +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +extern crate glob; + +use std::env; +use std::path::{Path, PathBuf}; +use std::process::Command; + +use glob::MatchOptions; + +/// `libclang` directory patterns for FreeBSD and Linux. +const DIRECTORIES_LINUX: &[&str] = &[ + "/usr/lib*", + "/usr/lib*/*", + "/usr/lib*/*/*", + "/usr/local/lib*", + "/usr/local/lib*/*", + "/usr/local/lib*/*/*", + "/usr/local/llvm*/lib*", +]; + +/// `libclang` directory patterns for macOS. +const DIRECTORIES_MACOS: &[&str] = &[ + "/usr/local/opt/llvm*/lib", + "/Applications/Xcode.app/Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/lib", + "/Library/Developer/CommandLineTools/usr/lib", + "/usr/local/opt/llvm*/lib/llvm*/lib", +]; + +/// `libclang` directory patterns for Windows. +const DIRECTORIES_WINDOWS: &[&str] = &[ + "C:\\LLVM\\lib", + "C:\\Program Files*\\LLVM\\lib", + "C:\\MSYS*\\MinGW*\\lib", +]; + +/// Executes the supplied console command, returning the `stdout` output if the +/// command was successfully executed. +fn run_command(command: &str, arguments: &[&str]) -> Option { + macro_rules! warn { + ($error:expr) => { + println!( + "cargo:warning=couldn't execute `{} {}` ({})", + command, + arguments.join(" "), + $error, + ); + }; + } + + let output = match Command::new(command).args(arguments).output() { + Ok(output) => output, + Err(error) => { + warn!(format!("error: {}", error)); + return None; + } + }; + + if !output.status.success() { + warn!(format!("exit code: {}", output.status)); + return None; + } + + Some(String::from_utf8_lossy(&output.stdout).into_owned()) +} + +/// Executes `llvm-config`, returning the `stdout` output if the command was +/// successfully executed. +pub fn run_llvm_config(arguments: &[&str]) -> Option { + let path = env::var("LLVM_CONFIG_PATH").unwrap_or_else(|_| "llvm-config".into()); + + let output = run_command(&path, arguments); + if output.is_none() { + println!( + "cargo:warning=set the LLVM_CONFIG_PATH environment variable to \ + the full path to a valid `llvm-config` executable (including the \ + executable itself)" + ); + } + + output +} + +/// Returns the paths to and the filenames of the files matching the supplied +/// filename patterns in the supplied directory. +fn search_directory(directory: &Path, filenames: &[String]) -> Vec<(PathBuf, String)> { + // Join the directory to the filename patterns to obtain the path patterns. + let paths = filenames + .iter() + .filter_map(|f| directory.join(f).to_str().map(ToOwned::to_owned)); + + // Prevent wildcards from matching path separators. + let mut options = MatchOptions::new(); + options.require_literal_separator = true; + + paths + .flat_map(|p| { + if let Ok(paths) = glob::glob_with(&p, options) { + paths.filter_map(Result::ok).collect() + } else { + vec![] + } + }) + .filter_map(|p| { + let filename = p.file_name().and_then(|f| f.to_str())?; + + // The `libclang_shared` library has been renamed to `libclang-cpp` + // in Clang 10. This can cause instances of this library (e.g., + // `libclang-cpp.so.10`) to be matched by patterns looking for + // instances of `libclang`. + if filename.contains("-cpp.") { + return None; + } + + Some((directory.to_owned(), filename.into())) + }) + .collect::>() +} + +/// Returns the paths to and the filenames of the files matching the supplied +/// filename patterns in the supplied directory, checking any relevant sibling +/// directories. +fn search_directories(directory: &Path, filenames: &[String]) -> Vec<(PathBuf, String)> { + let mut results = search_directory(directory, filenames); + + // On Windows, `libclang.dll` is usually found in the LLVM `bin` directory + // while `libclang.lib` is usually found in the LLVM `lib` directory. To + // keep things consistent with other platforms, only LLVM `lib` directories + // are included in the backup search directory globs so we need to search + // the LLVM `bin` directory here. + if cfg!(target_os = "windows") && directory.ends_with("lib") { + let sibling = directory.parent().unwrap().join("bin"); + results.extend(search_directory(&sibling, filenames).into_iter()); + } + + results +} + +/// Returns the paths to and the filenames of the `libclang` static or dynamic +/// libraries matching the supplied filename patterns. +pub fn search_libclang_directories(files: &[String], variable: &str) -> Vec<(PathBuf, String)> { + // Use the path provided by the relevant environment variable. + if let Ok(path) = env::var(variable).map(|d| Path::new(&d).to_path_buf()) { + // Check if the path is referring to a matching file already. + if let Some(parent) = path.parent() { + let filename = path.file_name().unwrap().to_str().unwrap(); + let libraries = search_directories(parent, files); + if libraries.iter().any(|(_, f)| f == filename) { + return vec![(parent.into(), filename.into())]; + } + } + + return search_directories(&path, files); + } + + let mut found = vec![]; + + // Search the `bin` and `lib` directories in directory provided by + // `llvm-config --prefix`. + if let Some(output) = run_llvm_config(&["--prefix"]) { + let directory = Path::new(output.lines().next().unwrap()).to_path_buf(); + found.extend(search_directories(&directory.join("bin"), files)); + found.extend(search_directories(&directory.join("lib"), files)); + found.extend(search_directories(&directory.join("lib64"), files)); + } + + // Search the toolchain directory in the directory provided by + // `xcode-select --print-path`. + if cfg!(target_os = "macos") { + if let Some(output) = run_command("xcode-select", &["--print-path"]) { + let directory = Path::new(output.lines().next().unwrap()).to_path_buf(); + let directory = directory.join("Toolchains/XcodeDefault.xctoolchain/usr/lib"); + found.extend(search_directories(&directory, files)); + } + } + + // Search the directories provided by the `LD_LIBRARY_PATH` environment + // variable. + if let Ok(path) = env::var("LD_LIBRARY_PATH") { + for directory in path.split(':').map(Path::new) { + found.extend(search_directories(&directory, files)); + } + } + + // Determine the `libclang` directory patterns. + let directories = if cfg!(any(target_os = "freebsd", target_os = "linux")) { + DIRECTORIES_LINUX + } else if cfg!(target_os = "macos") { + DIRECTORIES_MACOS + } else if cfg!(target_os = "windows") { + DIRECTORIES_WINDOWS + } else { + &[] + }; + + // Search the directories provided by the `libclang` directory patterns. + let mut options = MatchOptions::new(); + options.case_sensitive = false; + options.require_literal_separator = true; + for directory in directories.iter().rev() { + if let Ok(directories) = glob::glob_with(directory, options) { + for directory in directories.filter_map(Result::ok).filter(|p| p.is_dir()) { + found.extend(search_directories(&directory, files)); + } + } + } + + found +} diff --git a/third_party/rust/clang-sys/build/dynamic.rs b/third_party/rust/clang-sys/build/dynamic.rs index d1a162a1c4..94d3a16ca4 100644 --- a/third_party/rust/clang-sys/build/dynamic.rs +++ b/third_party/rust/clang-sys/build/dynamic.rs @@ -1,243 +1,244 @@ -// Copyright 2018 Kyle Mayes -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -use std::env; -use std::fs::File; -use std::io::{self, Error, ErrorKind, Read, Seek, SeekFrom}; -use std::path::{Path, PathBuf}; - -use super::common; - -/// Returns the ELF class from the ELF header in the supplied file. -fn parse_elf_header(path: &Path) -> io::Result { - let mut file = File::open(path)?; - let mut buffer = [0; 5]; - file.read_exact(&mut buffer)?; - if buffer[..4] == [127, 69, 76, 70] { - Ok(buffer[4]) - } else { - Err(Error::new(ErrorKind::InvalidData, "invalid ELF header")) - } -} - -/// Returns the magic number from the PE header in the supplied file. -fn parse_pe_header(path: &Path) -> io::Result { - let mut file = File::open(path)?; - - // Determine the header offset. - let mut buffer = [0; 4]; - let start = SeekFrom::Start(0x3C); - file.seek(start)?; - file.read_exact(&mut buffer)?; - let offset = i32::from_le_bytes(buffer); - - // Determine the validity of the header. - file.seek(SeekFrom::Start(offset as u64))?; - file.read_exact(&mut buffer)?; - if buffer != [80, 69, 0, 0] { - return Err(Error::new(ErrorKind::InvalidData, "invalid PE header")); - } - - // Find the magic number. - let mut buffer = [0; 2]; - file.seek(SeekFrom::Current(20))?; - file.read_exact(&mut buffer)?; - Ok(u16::from_le_bytes(buffer)) -} - -/// Validates the header for the supplied `libclang` shared library. -fn validate_header(path: &Path) -> Result<(), String> { - if cfg!(any(target_os = "freebsd", target_os = "linux")) { - let class = parse_elf_header(path).map_err(|e| e.to_string())?; - - if cfg!(target_pointer_width = "32") && class != 1 { - return Err("invalid ELF class (64-bit)".into()); - } - - if cfg!(target_pointer_width = "64") && class != 2 { - return Err("invalid ELF class (32-bit)".into()); - } - - Ok(()) - } else if cfg!(target_os = "windows") { - let magic = parse_pe_header(path).map_err(|e| e.to_string())?; - - if cfg!(target_pointer_width = "32") && magic != 267 { - return Err("invalid DLL (64-bit)".into()); - } - - if cfg!(target_pointer_width = "64") && magic != 523 { - return Err("invalid DLL (32-bit)".into()); - } - - Ok(()) - } else { - Ok(()) - } -} - -/// Returns the components of the version in the supplied `libclang` shared -// library filename. -fn parse_version(filename: &str) -> Vec { - let version = if filename.starts_with("libclang.so.") { - &filename[12..] - } else if filename.starts_with("libclang-") { - &filename[9..filename.len() - 3] - } else { - return vec![]; - }; - - version.split('.').map(|s| s.parse().unwrap_or(0)).collect() -} - -/// Returns the paths to, the filenames, and the versions of the `libclang` -// shared libraries. -fn search_libclang_directories(runtime: bool) -> Result)>, String> { - let mut files = vec![format!( - "{}clang{}", - env::consts::DLL_PREFIX, - env::consts::DLL_SUFFIX - )]; - - if cfg!(target_os = "linux") { - // Some Linux distributions don't create a `libclang.so` symlink, so we - // need to look for versioned files (e.g., `libclang-3.9.so`). - files.push("libclang-*.so".into()); - - // Some Linux distributions don't create a `libclang.so` symlink and - // don't have versioned files as described above, so we need to look for - // suffix versioned files (e.g., `libclang.so.1`). However, `ld` cannot - // link to these files, so this will only be included when linking at - // runtime. - if runtime { - files.push("libclang.so.*".into()); - } - } - - if cfg!(any( - target_os = "openbsd", - target_os = "freebsd", - target_os = "netbsd" - )) { - // Some BSD distributions don't create a `libclang.so` symlink either, - // but use a different naming scheme for versioned files (e.g., - // `libclang.so.7.0`). - files.push("libclang.so.*".into()); - } - - if cfg!(target_os = "windows") { - // The official LLVM build uses `libclang.dll` on Windows instead of - // `clang.dll`. However, unofficial builds such as MinGW use `clang.dll`. - files.push("libclang.dll".into()); - } - - // Validate the `libclang` shared libraries and collect the versions. - let mut valid = vec![]; - let mut invalid = vec![]; - for (directory, filename) in common::search_libclang_directories(&files, "LIBCLANG_PATH") { - let path = directory.join(&filename); - match validate_header(&path) { - Ok(()) => { - let version = parse_version(&filename); - valid.push((directory, filename, version)) - } - Err(message) => invalid.push(format!("({}: {})", path.display(), message)), - } - } - - if !valid.is_empty() { - return Ok(valid); - } - - let message = format!( - "couldn't find any valid shared libraries matching: [{}], set the \ - `LIBCLANG_PATH` environment variable to a path where one of these files \ - can be found (invalid: [{}])", - files - .iter() - .map(|f| format!("'{}'", f)) - .collect::>() - .join(", "), - invalid.join(", "), - ); - - Err(message) -} - -/// Returns the directory and filename of the "best" available `libclang` shared -/// library. -pub fn find(runtime: bool) -> Result<(PathBuf, String), String> { - search_libclang_directories(runtime)? - .iter() - .max_by_key(|f| &f.2) - .cloned() - .map(|(path, filename, _)| (path, filename)) - .ok_or_else(|| "unreachable".into()) -} - -/// Find and link to `libclang` dynamically. -#[cfg(not(feature = "runtime"))] -pub fn link() { - use std::fs; - - let (directory, filename) = find(false).unwrap(); - println!("cargo:rustc-link-search={}", directory.display()); - - if cfg!(all(target_os = "windows", target_env = "msvc")) { - // Find the `libclang` stub static library required for the MSVC - // toolchain. - let lib = if !directory.ends_with("bin") { - directory.to_owned() - } else { - directory.parent().unwrap().join("lib") - }; - - if lib.join("libclang.lib").exists() { - println!("cargo:rustc-link-search={}", lib.display()); - } else if lib.join("libclang.dll.a").exists() { - // MSYS and MinGW use `libclang.dll.a` instead of `libclang.lib`. - // It is linkable with the MSVC linker, but Rust doesn't recognize - // the `.a` suffix, so we need to copy it with a different name. - // - // FIXME: Maybe we can just hardlink or symlink it? - let out = env::var("OUT_DIR").unwrap(); - fs::copy( - lib.join("libclang.dll.a"), - Path::new(&out).join("libclang.lib"), - ) - .unwrap(); - println!("cargo:rustc-link-search=native={}", out); - } else { - panic!( - "using '{}', so 'libclang.lib' or 'libclang.dll.a' must be \ - available in {}", - filename, - lib.display(), - ); - } - - println!("cargo:rustc-link-lib=dylib=libclang"); - } else { - let name = filename.trim_start_matches("lib"); - - // Strip extensions and trailing version numbers (e.g., the `.so.7.0` in - // `libclang.so.7.0`). - let name = match name.find(".dylib").or(name.find(".so")) { - Some(index) => &name[0..index], - None => &name, - }; - - println!("cargo:rustc-link-lib=dylib={}", name); - } -} +// Copyright 2018 Kyle Mayes +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use std::env; +use std::fs::File; +use std::io::{self, Error, ErrorKind, Read, Seek, SeekFrom}; +use std::path::{Path, PathBuf}; + +use super::common; + +/// Returns the ELF class from the ELF header in the supplied file. +fn parse_elf_header(path: &Path) -> io::Result { + let mut file = File::open(path)?; + let mut buffer = [0; 5]; + file.read_exact(&mut buffer)?; + if buffer[..4] == [127, 69, 76, 70] { + Ok(buffer[4]) + } else { + Err(Error::new(ErrorKind::InvalidData, "invalid ELF header")) + } +} + +/// Returns the magic number from the PE header in the supplied file. +fn parse_pe_header(path: &Path) -> io::Result { + let mut file = File::open(path)?; + + // Determine the header offset. + let mut buffer = [0; 4]; + let start = SeekFrom::Start(0x3C); + file.seek(start)?; + file.read_exact(&mut buffer)?; + let offset = i32::from_le_bytes(buffer); + + // Determine the validity of the header. + file.seek(SeekFrom::Start(offset as u64))?; + file.read_exact(&mut buffer)?; + if buffer != [80, 69, 0, 0] { + return Err(Error::new(ErrorKind::InvalidData, "invalid PE header")); + } + + // Find the magic number. + let mut buffer = [0; 2]; + file.seek(SeekFrom::Current(20))?; + file.read_exact(&mut buffer)?; + Ok(u16::from_le_bytes(buffer)) +} + +/// Validates the header for the supplied `libclang` shared library. +fn validate_header(path: &Path) -> Result<(), String> { + if cfg!(any(target_os = "freebsd", target_os = "linux")) { + let class = parse_elf_header(path).map_err(|e| e.to_string())?; + + if cfg!(target_pointer_width = "32") && class != 1 { + return Err("invalid ELF class (64-bit)".into()); + } + + if cfg!(target_pointer_width = "64") && class != 2 { + return Err("invalid ELF class (32-bit)".into()); + } + + Ok(()) + } else if cfg!(target_os = "windows") { + let magic = parse_pe_header(path).map_err(|e| e.to_string())?; + + if cfg!(target_pointer_width = "32") && magic != 267 { + return Err("invalid DLL (64-bit)".into()); + } + + if cfg!(target_pointer_width = "64") && magic != 523 { + return Err("invalid DLL (32-bit)".into()); + } + + Ok(()) + } else { + Ok(()) + } +} + +/// Returns the components of the version in the supplied `libclang` shared +// library filename. +fn parse_version(filename: &str) -> Vec { + let version = if filename.starts_with("libclang.so.") { + &filename[12..] + } else if filename.starts_with("libclang-") { + &filename[9..filename.len() - 3] + } else { + return vec![]; + }; + + version.split('.').map(|s| s.parse().unwrap_or(0)).collect() +} + +/// Returns the paths to, the filenames, and the versions of the `libclang` +// shared libraries. +fn search_libclang_directories(runtime: bool) -> Result)>, String> { + let mut files = vec![format!( + "{}clang{}", + env::consts::DLL_PREFIX, + env::consts::DLL_SUFFIX + )]; + + if cfg!(target_os = "linux") { + // Some Linux distributions don't create a `libclang.so` symlink, so we + // need to look for versioned files (e.g., `libclang-3.9.so`). + files.push("libclang-*.so".into()); + + // Some Linux distributions don't create a `libclang.so` symlink and + // don't have versioned files as described above, so we need to look for + // suffix versioned files (e.g., `libclang.so.1`). However, `ld` cannot + // link to these files, so this will only be included when linking at + // runtime. + if runtime { + files.push("libclang.so.*".into()); + files.push("libclang-*.so.*".into()); + } + } + + if cfg!(any( + target_os = "openbsd", + target_os = "freebsd", + target_os = "netbsd" + )) { + // Some BSD distributions don't create a `libclang.so` symlink either, + // but use a different naming scheme for versioned files (e.g., + // `libclang.so.7.0`). + files.push("libclang.so.*".into()); + } + + if cfg!(target_os = "windows") { + // The official LLVM build uses `libclang.dll` on Windows instead of + // `clang.dll`. However, unofficial builds such as MinGW use `clang.dll`. + files.push("libclang.dll".into()); + } + + // Validate the `libclang` shared libraries and collect the versions. + let mut valid = vec![]; + let mut invalid = vec![]; + for (directory, filename) in common::search_libclang_directories(&files, "LIBCLANG_PATH") { + let path = directory.join(&filename); + match validate_header(&path) { + Ok(()) => { + let version = parse_version(&filename); + valid.push((directory, filename, version)) + } + Err(message) => invalid.push(format!("({}: {})", path.display(), message)), + } + } + + if !valid.is_empty() { + return Ok(valid); + } + + let message = format!( + "couldn't find any valid shared libraries matching: [{}], set the \ + `LIBCLANG_PATH` environment variable to a path where one of these files \ + can be found (invalid: [{}])", + files + .iter() + .map(|f| format!("'{}'", f)) + .collect::>() + .join(", "), + invalid.join(", "), + ); + + Err(message) +} + +/// Returns the directory and filename of the "best" available `libclang` shared +/// library. +pub fn find(runtime: bool) -> Result<(PathBuf, String), String> { + search_libclang_directories(runtime)? + .iter() + .max_by_key(|f| &f.2) + .cloned() + .map(|(path, filename, _)| (path, filename)) + .ok_or_else(|| "unreachable".into()) +} + +/// Find and link to `libclang` dynamically. +#[cfg(not(feature = "runtime"))] +pub fn link() { + use std::fs; + + let (directory, filename) = find(false).unwrap(); + println!("cargo:rustc-link-search={}", directory.display()); + + if cfg!(all(target_os = "windows", target_env = "msvc")) { + // Find the `libclang` stub static library required for the MSVC + // toolchain. + let lib = if !directory.ends_with("bin") { + directory + } else { + directory.parent().unwrap().join("lib") + }; + + if lib.join("libclang.lib").exists() { + println!("cargo:rustc-link-search={}", lib.display()); + } else if lib.join("libclang.dll.a").exists() { + // MSYS and MinGW use `libclang.dll.a` instead of `libclang.lib`. + // It is linkable with the MSVC linker, but Rust doesn't recognize + // the `.a` suffix, so we need to copy it with a different name. + // + // FIXME: Maybe we can just hardlink or symlink it? + let out = env::var("OUT_DIR").unwrap(); + fs::copy( + lib.join("libclang.dll.a"), + Path::new(&out).join("libclang.lib"), + ) + .unwrap(); + println!("cargo:rustc-link-search=native={}", out); + } else { + panic!( + "using '{}', so 'libclang.lib' or 'libclang.dll.a' must be \ + available in {}", + filename, + lib.display(), + ); + } + + println!("cargo:rustc-link-lib=dylib=libclang"); + } else { + let name = filename.trim_start_matches("lib"); + + // Strip extensions and trailing version numbers (e.g., the `.so.7.0` in + // `libclang.so.7.0`). + let name = match name.find(".dylib").or_else(|| name.find(".so")) { + Some(index) => &name[0..index], + None => &name, + }; + + println!("cargo:rustc-link-lib=dylib={}", name); + } +} diff --git a/third_party/rust/clang-sys/build/static.rs b/third_party/rust/clang-sys/build/static.rs index 6dd13e5a9c..0dfb9715d2 100644 --- a/third_party/rust/clang-sys/build/static.rs +++ b/third_party/rust/clang-sys/build/static.rs @@ -1,136 +1,136 @@ -// Copyright 2018 Kyle Mayes -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -extern crate glob; - -use std::path::{Path, PathBuf}; - -use common; - -/// Returns the name of an LLVM or Clang library from a path to such a library. -fn get_library_name(path: &Path) -> Option { - path.file_stem().map(|p| { - let string = p.to_string_lossy(); - if string.starts_with("lib") { - string[3..].to_owned() - } else { - string.to_string() - } - }) -} - -/// Returns the LLVM libraries required to link to `libclang` statically. -fn get_llvm_libraries() -> Vec { - common::run_llvm_config(&["--libs"]) - .unwrap() - .split_whitespace() - .filter_map(|p| { - // Depending on the version of `llvm-config` in use, listed - // libraries may be in one of two forms, a full path to the library - // or simply prefixed with `-l`. - if p.starts_with("-l") { - Some(p[2..].into()) - } else { - get_library_name(Path::new(p)) - } - }) - .collect() -} - -/// Clang libraries required to link to `libclang` 3.5 and later statically. -const CLANG_LIBRARIES: &[&str] = &[ - "clang", - "clangAST", - "clangAnalysis", - "clangBasic", - "clangDriver", - "clangEdit", - "clangFrontend", - "clangIndex", - "clangLex", - "clangParse", - "clangRewrite", - "clangSema", - "clangSerialization", -]; - -/// Returns the Clang libraries required to link to `libclang` statically. -fn get_clang_libraries>(directory: P) -> Vec { - let pattern = directory - .as_ref() - .join("libclang*.a") - .to_string_lossy() - .to_string(); - if let Ok(libraries) = glob::glob(&pattern) { - libraries - .filter_map(|l| l.ok().and_then(|l| get_library_name(&l))) - .collect() - } else { - CLANG_LIBRARIES.iter().map(|l| l.to_string()).collect() - } -} - -/// Returns a directory containing `libclang` static libraries. -fn find() -> PathBuf { - let name = if cfg!(target_os = "windows") { - "libclang.lib" - } else { - "libclang.a" - }; - - let files = common::search_libclang_directories(&[name.into()], "LIBCLANG_STATIC_PATH"); - if let Some((directory, _)) = files.into_iter().nth(0) { - directory - } else { - panic!("could not find any static libraries"); - } -} - -/// Find and link to `libclang` statically. -pub fn link() { - let directory = find(); - - // Specify required Clang static libraries. - println!("cargo:rustc-link-search=native={}", directory.display()); - for library in get_clang_libraries(directory) { - println!("cargo:rustc-link-lib=static={}", library); - } - - // Determine the shared mode used by LLVM. - let mode = common::run_llvm_config(&["--shared-mode"]).map(|m| m.trim().to_owned()); - let prefix = if mode.map_or(false, |m| m == "static") { - "static=" - } else { - "" - }; - - // Specify required LLVM static libraries. - println!( - "cargo:rustc-link-search=native={}", - common::run_llvm_config(&["--libdir"]).unwrap().trim_end() - ); - for library in get_llvm_libraries() { - println!("cargo:rustc-link-lib={}{}", prefix, library); - } - - // Specify required system libraries. - // MSVC doesn't need this, as it tracks dependencies inside `.lib` files. - if cfg!(target_os = "freebsd") { - println!("cargo:rustc-flags=-l ffi -l ncursesw -l c++ -l z"); - } else if cfg!(target_os = "linux") { - println!("cargo:rustc-flags=-l ffi -l ncursesw -l stdc++ -l z"); - } else if cfg!(target_os = "macos") { - println!("cargo:rustc-flags=-l ffi -l ncurses -l c++ -l z"); - } -} +// Copyright 2018 Kyle Mayes +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +extern crate glob; + +use std::path::{Path, PathBuf}; + +use common; + +/// Returns the name of an LLVM or Clang library from a path to such a library. +fn get_library_name(path: &Path) -> Option { + path.file_stem().map(|p| { + let string = p.to_string_lossy(); + if string.starts_with("lib") { + string[3..].to_owned() + } else { + string.to_string() + } + }) +} + +/// Returns the LLVM libraries required to link to `libclang` statically. +fn get_llvm_libraries() -> Vec { + common::run_llvm_config(&["--libs"]) + .unwrap() + .split_whitespace() + .filter_map(|p| { + // Depending on the version of `llvm-config` in use, listed + // libraries may be in one of two forms, a full path to the library + // or simply prefixed with `-l`. + if p.starts_with("-l") { + Some(p[2..].into()) + } else { + get_library_name(Path::new(p)) + } + }) + .collect() +} + +/// Clang libraries required to link to `libclang` 3.5 and later statically. +const CLANG_LIBRARIES: &[&str] = &[ + "clang", + "clangAST", + "clangAnalysis", + "clangBasic", + "clangDriver", + "clangEdit", + "clangFrontend", + "clangIndex", + "clangLex", + "clangParse", + "clangRewrite", + "clangSema", + "clangSerialization", +]; + +/// Returns the Clang libraries required to link to `libclang` statically. +fn get_clang_libraries>(directory: P) -> Vec { + let pattern = directory + .as_ref() + .join("libclang*.a") + .to_string_lossy() + .to_string(); + if let Ok(libraries) = glob::glob(&pattern) { + libraries + .filter_map(|l| l.ok().and_then(|l| get_library_name(&l))) + .collect() + } else { + CLANG_LIBRARIES.iter().map(|l| (*l).to_string()).collect() + } +} + +/// Returns a directory containing `libclang` static libraries. +fn find() -> PathBuf { + let name = if cfg!(target_os = "windows") { + "libclang.lib" + } else { + "libclang.a" + }; + + let files = common::search_libclang_directories(&[name.into()], "LIBCLANG_STATIC_PATH"); + if let Some((directory, _)) = files.into_iter().nth(0) { + directory + } else { + panic!("could not find any static libraries"); + } +} + +/// Find and link to `libclang` statically. +pub fn link() { + let directory = find(); + + // Specify required Clang static libraries. + println!("cargo:rustc-link-search=native={}", directory.display()); + for library in get_clang_libraries(directory) { + println!("cargo:rustc-link-lib=static={}", library); + } + + // Determine the shared mode used by LLVM. + let mode = common::run_llvm_config(&["--shared-mode"]).map(|m| m.trim().to_owned()); + let prefix = if mode.map_or(false, |m| m == "static") { + "static=" + } else { + "" + }; + + // Specify required LLVM static libraries. + println!( + "cargo:rustc-link-search=native={}", + common::run_llvm_config(&["--libdir"]).unwrap().trim_end() + ); + for library in get_llvm_libraries() { + println!("cargo:rustc-link-lib={}{}", prefix, library); + } + + // Specify required system libraries. + // MSVC doesn't need this, as it tracks dependencies inside `.lib` files. + if cfg!(target_os = "freebsd") { + println!("cargo:rustc-flags=-l ffi -l ncursesw -l c++ -l z"); + } else if cfg!(target_os = "linux") { + println!("cargo:rustc-flags=-l ffi -l ncursesw -l stdc++ -l z"); + } else if cfg!(target_os = "macos") { + println!("cargo:rustc-flags=-l ffi -l ncurses -l c++ -l z"); + } +} diff --git a/third_party/rust/clang-sys/ci/before_install.sh b/third_party/rust/clang-sys/ci/before_install.sh index a0de565f60..8976247270 100644 --- a/third_party/rust/clang-sys/ci/before_install.sh +++ b/third_party/rust/clang-sys/ci/before_install.sh @@ -1,57 +1,57 @@ -set -e -pushd ~ - -# Workaround for Travis CI macOS bug (https://github.com/travis-ci/travis-ci/issues/6307) -if [ "${TRAVIS_OS_NAME}" == "osx" ]; then - rvm get head || true -fi - -function llvm_version_triple() { - if [ "$1" == "3.5" ]; then - echo "3.5.2" - elif [ "$1" == "3.6" ]; then - echo "3.6.2" - elif [ "$1" == "3.7" ]; then - echo "3.7.1" - elif [ "$1" == "3.8" ]; then - echo "3.8.1" - elif [ "$1" == "3.9" ]; then - echo "3.9.0" - elif [ "$1" == "4.0" ]; then - echo "4.0.1" - elif [ "$1" == "5.0" ]; then - echo "5.0.2" - elif [ "$1" == "6.0" ]; then - echo "6.0.1" - elif [ "$1" == "7.0" ]; then - echo "7.0.0" - fi -} - -function llvm_download() { - export LLVM_VERSION_TRIPLE=`llvm_version_triple ${LLVM_VERSION}` - export LLVM=clang+llvm-${LLVM_VERSION_TRIPLE}-$1 - export LLVM_DIRECTORY="$HOME/.llvm/${LLVM}" - - if [ -d "${LLVM_DIRECTORY}" ]; then - echo "Using cached LLVM download for ${LLVM}..." - else - wget http://releases.llvm.org/${LLVM_VERSION_TRIPLE}/${LLVM}.tar.xz - mkdir -p "${LLVM_DIRECTORY}" - tar xf ${LLVM}.tar.xz -C "${LLVM_DIRECTORY}" --strip-components=1 - fi - - export LLVM_CONFIG_PATH="${LLVM_DIRECTORY}/bin/llvm-config" -} - -if [ "${TRAVIS_OS_NAME}" == "linux" ]; then - llvm_download x86_64-linux-gnu-ubuntu-14.04 - export LD_LIBRARY_PATH="${LLVM_DIRECTORY}/lib":$LD_LIBRARY_PATH -else - llvm_download x86_64-apple-darwin - cp "${LLVM_DIRECTORY}/lib/libclang.dylib" /usr/local/lib/libclang.dylib - export DYLD_LIBRARY_PATH="${LLVM_DIRECTORY}/lib":$DYLD_LIBRARY_PATH -fi - -popd -set +e +set -e +pushd ~ + +# Workaround for Travis CI macOS bug (https://github.com/travis-ci/travis-ci/issues/6307) +if [ "${TRAVIS_OS_NAME}" == "osx" ]; then + rvm get head || true +fi + +function llvm_version_triple() { + if [ "$1" == "3.5" ]; then + echo "3.5.2" + elif [ "$1" == "3.6" ]; then + echo "3.6.2" + elif [ "$1" == "3.7" ]; then + echo "3.7.1" + elif [ "$1" == "3.8" ]; then + echo "3.8.1" + elif [ "$1" == "3.9" ]; then + echo "3.9.0" + elif [ "$1" == "4.0" ]; then + echo "4.0.1" + elif [ "$1" == "5.0" ]; then + echo "5.0.2" + elif [ "$1" == "6.0" ]; then + echo "6.0.1" + elif [ "$1" == "7.0" ]; then + echo "7.0.0" + fi +} + +function llvm_download() { + export LLVM_VERSION_TRIPLE=`llvm_version_triple ${LLVM_VERSION}` + export LLVM=clang+llvm-${LLVM_VERSION_TRIPLE}-$1 + export LLVM_DIRECTORY="$HOME/.llvm/${LLVM}" + + if [ -d "${LLVM_DIRECTORY}" ]; then + echo "Using cached LLVM download for ${LLVM}..." + else + wget http://releases.llvm.org/${LLVM_VERSION_TRIPLE}/${LLVM}.tar.xz + mkdir -p "${LLVM_DIRECTORY}" + tar xf ${LLVM}.tar.xz -C "${LLVM_DIRECTORY}" --strip-components=1 + fi + + export LLVM_CONFIG_PATH="${LLVM_DIRECTORY}/bin/llvm-config" +} + +if [ "${TRAVIS_OS_NAME}" == "linux" ]; then + llvm_download x86_64-linux-gnu-ubuntu-14.04 + export LD_LIBRARY_PATH="${LLVM_DIRECTORY}/lib":$LD_LIBRARY_PATH +else + llvm_download x86_64-apple-darwin + cp "${LLVM_DIRECTORY}/lib/libclang.dylib" /usr/local/lib/libclang.dylib + export DYLD_LIBRARY_PATH="${LLVM_DIRECTORY}/lib":$DYLD_LIBRARY_PATH +fi + +popd +set +e diff --git a/third_party/rust/clang-sys/ci/install.bat b/third_party/rust/clang-sys/ci/install.bat index a3d54df5fe..4e3f315c5e 100644 --- a/third_party/rust/clang-sys/ci/install.bat +++ b/third_party/rust/clang-sys/ci/install.bat @@ -1,8 +1,8 @@ -curl -sSf https://static.rust-lang.org/dist/rust-1.36.0-i686-pc-windows-msvc.exe -o rust.exe -rust.exe /VERYSILENT /NORESTART /DIR="C:\Rust" -set PATH=%PATH%;C:\Rust\bin - -curl -sSf http://releases.llvm.org/%LLVM_VERSION%/LLVM-%LLVM_VERSION%-win32.exe -o LLVM.exe -7z x LLVM.exe -oC:\LLVM -set PATH=%PATH%;C:\LLVM\bin -set LIBCLANG_PATH=C:\LLVM\bin +curl -sSf https://static.rust-lang.org/dist/rust-1.36.0-i686-pc-windows-msvc.exe -o rust.exe +rust.exe /VERYSILENT /NORESTART /DIR="C:\Rust" +set PATH=%PATH%;C:\Rust\bin + +curl -sSf http://releases.llvm.org/%LLVM_VERSION%/LLVM-%LLVM_VERSION%-win32.exe -o LLVM.exe +7z x LLVM.exe -oC:\LLVM +set PATH=%PATH%;C:\LLVM\bin +set LIBCLANG_PATH=C:\LLVM\bin diff --git a/third_party/rust/clang-sys/ci/script.sh b/third_party/rust/clang-sys/ci/script.sh index 47ca43994b..1d7d902dc7 100644 --- a/third_party/rust/clang-sys/ci/script.sh +++ b/third_party/rust/clang-sys/ci/script.sh @@ -1,13 +1,13 @@ -if [ "${TRAVIS_OS_NAME}" == "osx" ]; then - rvm get head || true -fi - -set -e - -RUST_BACKTRACE=1 cargo test --verbose --features $CLANG_VERSION -- --nocapture - -if [ "${CLANG_VERSION}" \< "clang_3_7" ]; then - RUST_BACKTRACE=1 cargo test --verbose --features "$CLANG_VERSION static" -- --nocapture -fi - -RUST_BACKTRACE=1 cargo test --verbose --features "$CLANG_VERSION runtime" -- --nocapture +if [ "${TRAVIS_OS_NAME}" == "osx" ]; then + rvm get head || true +fi + +set -e + +RUST_BACKTRACE=1 cargo test --verbose --features $CLANG_VERSION -- --nocapture + +if [ "${CLANG_VERSION}" \< "clang_3_7" ]; then + RUST_BACKTRACE=1 cargo test --verbose --features "$CLANG_VERSION static" -- --nocapture +fi + +RUST_BACKTRACE=1 cargo test --verbose --features "$CLANG_VERSION runtime" -- --nocapture diff --git a/third_party/rust/clang-sys/ci/test_script.bat b/third_party/rust/clang-sys/ci/test_script.bat index 2928409f68..6f0f513afe 100644 --- a/third_party/rust/clang-sys/ci/test_script.bat +++ b/third_party/rust/clang-sys/ci/test_script.bat @@ -1,2 +1,2 @@ -set RUST_BACKTRACE=1 -cargo test --verbose --features %CLANG_VERSION% -- --nocapture +set RUST_BACKTRACE=1 +cargo test --verbose --features %CLANG_VERSION% -- --nocapture diff --git a/third_party/rust/clang-sys/clippy.toml b/third_party/rust/clang-sys/clippy.toml index 006bfc40b9..6f41284e10 100644 --- a/third_party/rust/clang-sys/clippy.toml +++ b/third_party/rust/clang-sys/clippy.toml @@ -1 +1 @@ -doc-valid-idents = ["FreeBSD"] +doc-valid-idents = ["FreeBSD"] diff --git a/third_party/rust/clang-sys/src/lib.rs b/third_party/rust/clang-sys/src/lib.rs index be4092f314..d8b6352972 100644 --- a/third_party/rust/clang-sys/src/lib.rs +++ b/third_party/rust/clang-sys/src/lib.rs @@ -1,2062 +1,2089 @@ -// Copyright 2016 Kyle Mayes -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Rust bindings for `libclang`. -//! -//! ## Supported Versions -//! -//! * 3.5 - [Documentation](https://kylemayes.github.io/clang-sys/3_5/clang_sys) -//! * 3.6 - [Documentation](https://kylemayes.github.io/clang-sys/3_6/clang_sys) -//! * 3.7 - [Documentation](https://kylemayes.github.io/clang-sys/3_7/clang_sys) -//! * 3.8 - [Documentation](https://kylemayes.github.io/clang-sys/3_8/clang_sys) -//! * 3.9 - [Documentation](https://kylemayes.github.io/clang-sys/3_9/clang_sys) -//! * 4.0 - [Documentation](https://kylemayes.github.io/clang-sys/4_0/clang_sys) -//! * 5.0 - [Documentation](https://kylemayes.github.io/clang-sys/5_0/clang_sys) -//! * 6.0 - [Documentation](https://kylemayes.github.io/clang-sys/6_0/clang_sys) -//! * 7.0 - [Documentation](https://kylemayes.github.io/clang-sys/7_0/clang_sys) - -#![allow(non_camel_case_types, non_snake_case, non_upper_case_globals)] -#![cfg_attr(feature = "cargo-clippy", allow(unreadable_literal))] - -extern crate glob; -extern crate libc; -#[cfg(feature = "runtime")] -extern crate libloading; - -pub mod support; - -#[macro_use] -mod link; - -use std::mem; - -use libc::*; - -pub type CXClientData = *mut c_void; -pub type CXCursorVisitor = extern "C" fn(CXCursor, CXCursor, CXClientData) -> CXChildVisitResult; -#[cfg(feature = "gte_clang_3_7")] -pub type CXFieldVisitor = extern "C" fn(CXCursor, CXClientData) -> CXVisitorResult; -pub type CXInclusionVisitor = extern "C" fn(CXFile, *mut CXSourceLocation, c_uint, CXClientData); - -//================================================ -// Macros -//================================================ - -/// Defines a C enum as a series of constants. -macro_rules! cenum { - ($(#[$meta:meta])* enum $name:ident { - $($(#[$vmeta:meta])* const $variant:ident = $value:expr), +, - }) => ( - pub type $name = c_int; - - $($(#[$vmeta])* pub const $variant: $name = $value;)+ - ); - ($(#[$meta:meta])* enum $name:ident { - $($(#[$vmeta:meta])* const $variant:ident = $value:expr); +; - }) => ( - pub type $name = c_int; - - $($(#[$vmeta])* pub const $variant: $name = $value;)+ - ); -} - -/// Implements a zeroing implementation of `Default` for the supplied type. -macro_rules! default { - (#[$meta:meta] $ty:ty) => { - #[$meta] - impl Default for $ty { - fn default() -> $ty { - unsafe { mem::zeroed() } - } - } - }; - - ($ty:ty) => { - impl Default for $ty { - fn default() -> $ty { - unsafe { mem::zeroed() } - } - } - }; -} - -//================================================ -// Enums -//================================================ - -cenum! { - enum CXAvailabilityKind { - const CXAvailability_Available = 0, - const CXAvailability_Deprecated = 1, - const CXAvailability_NotAvailable = 2, - const CXAvailability_NotAccessible = 3, - } -} - -cenum! { - enum CXCallingConv { - const CXCallingConv_Default = 0, - const CXCallingConv_C = 1, - const CXCallingConv_X86StdCall = 2, - const CXCallingConv_X86FastCall = 3, - const CXCallingConv_X86ThisCall = 4, - const CXCallingConv_X86Pascal = 5, - const CXCallingConv_AAPCS = 6, - const CXCallingConv_AAPCS_VFP = 7, - /// Only produced by `libclang` 4.0 and later. - const CXCallingConv_X86RegCall = 8, - const CXCallingConv_IntelOclBicc = 9, - const CXCallingConv_Win64 = 10, - const CXCallingConv_X86_64Win64 = 10, - const CXCallingConv_X86_64SysV = 11, - /// Only produced by `libclang` 3.6 and later. - const CXCallingConv_X86VectorCall = 12, - /// Only produced by `libclang` 3.9 and later. - const CXCallingConv_Swift = 13, - /// Only produced by `libclang` 3.9 and later. - const CXCallingConv_PreserveMost = 14, - /// Only produced by `libclang` 3.9 and later. - const CXCallingConv_PreserveAll = 15, - const CXCallingConv_Invalid = 100, - const CXCallingConv_Unexposed = 200, - } -} - -cenum! { - enum CXChildVisitResult { - const CXChildVisit_Break = 0, - const CXChildVisit_Continue = 1, - const CXChildVisit_Recurse = 2, - } -} - -cenum! { - enum CXCommentInlineCommandRenderKind { - const CXCommentInlineCommandRenderKind_Normal = 0, - const CXCommentInlineCommandRenderKind_Bold = 1, - const CXCommentInlineCommandRenderKind_Monospaced = 2, - const CXCommentInlineCommandRenderKind_Emphasized = 3, - } -} - -cenum! { - enum CXCommentKind { - const CXComment_Null = 0, - const CXComment_Text = 1, - const CXComment_InlineCommand = 2, - const CXComment_HTMLStartTag = 3, - const CXComment_HTMLEndTag = 4, - const CXComment_Paragraph = 5, - const CXComment_BlockCommand = 6, - const CXComment_ParamCommand = 7, - const CXComment_TParamCommand = 8, - const CXComment_VerbatimBlockCommand = 9, - const CXComment_VerbatimBlockLine = 10, - const CXComment_VerbatimLine = 11, - const CXComment_FullComment = 12, - } -} - -cenum! { - enum CXCommentParamPassDirection { - const CXCommentParamPassDirection_In = 0, - const CXCommentParamPassDirection_Out = 1, - const CXCommentParamPassDirection_InOut = 2, - } -} - -cenum! { - enum CXCompilationDatabase_Error { - const CXCompilationDatabase_NoError = 0, - const CXCompilationDatabase_CanNotLoadDatabase = 1, - } -} - -cenum! { - enum CXCompletionChunkKind { - const CXCompletionChunk_Optional = 0, - const CXCompletionChunk_TypedText = 1, - const CXCompletionChunk_Text = 2, - const CXCompletionChunk_Placeholder = 3, - const CXCompletionChunk_Informative = 4, - const CXCompletionChunk_CurrentParameter = 5, - const CXCompletionChunk_LeftParen = 6, - const CXCompletionChunk_RightParen = 7, - const CXCompletionChunk_LeftBracket = 8, - const CXCompletionChunk_RightBracket = 9, - const CXCompletionChunk_LeftBrace = 10, - const CXCompletionChunk_RightBrace = 11, - const CXCompletionChunk_LeftAngle = 12, - const CXCompletionChunk_RightAngle = 13, - const CXCompletionChunk_Comma = 14, - const CXCompletionChunk_ResultType = 15, - const CXCompletionChunk_Colon = 16, - const CXCompletionChunk_SemiColon = 17, - const CXCompletionChunk_Equal = 18, - const CXCompletionChunk_HorizontalSpace = 19, - const CXCompletionChunk_VerticalSpace = 20, - } -} - -cenum! { - enum CXCursorKind { - const CXCursor_UnexposedDecl = 1, - const CXCursor_StructDecl = 2, - const CXCursor_UnionDecl = 3, - const CXCursor_ClassDecl = 4, - const CXCursor_EnumDecl = 5, - const CXCursor_FieldDecl = 6, - const CXCursor_EnumConstantDecl = 7, - const CXCursor_FunctionDecl = 8, - const CXCursor_VarDecl = 9, - const CXCursor_ParmDecl = 10, - const CXCursor_ObjCInterfaceDecl = 11, - const CXCursor_ObjCCategoryDecl = 12, - const CXCursor_ObjCProtocolDecl = 13, - const CXCursor_ObjCPropertyDecl = 14, - const CXCursor_ObjCIvarDecl = 15, - const CXCursor_ObjCInstanceMethodDecl = 16, - const CXCursor_ObjCClassMethodDecl = 17, - const CXCursor_ObjCImplementationDecl = 18, - const CXCursor_ObjCCategoryImplDecl = 19, - const CXCursor_TypedefDecl = 20, - const CXCursor_CXXMethod = 21, - const CXCursor_Namespace = 22, - const CXCursor_LinkageSpec = 23, - const CXCursor_Constructor = 24, - const CXCursor_Destructor = 25, - const CXCursor_ConversionFunction = 26, - const CXCursor_TemplateTypeParameter = 27, - const CXCursor_NonTypeTemplateParameter = 28, - const CXCursor_TemplateTemplateParameter = 29, - const CXCursor_FunctionTemplate = 30, - const CXCursor_ClassTemplate = 31, - const CXCursor_ClassTemplatePartialSpecialization = 32, - const CXCursor_NamespaceAlias = 33, - const CXCursor_UsingDirective = 34, - const CXCursor_UsingDeclaration = 35, - const CXCursor_TypeAliasDecl = 36, - const CXCursor_ObjCSynthesizeDecl = 37, - const CXCursor_ObjCDynamicDecl = 38, - const CXCursor_CXXAccessSpecifier = 39, - const CXCursor_ObjCSuperClassRef = 40, - const CXCursor_ObjCProtocolRef = 41, - const CXCursor_ObjCClassRef = 42, - const CXCursor_TypeRef = 43, - const CXCursor_CXXBaseSpecifier = 44, - const CXCursor_TemplateRef = 45, - const CXCursor_NamespaceRef = 46, - const CXCursor_MemberRef = 47, - const CXCursor_LabelRef = 48, - const CXCursor_OverloadedDeclRef = 49, - const CXCursor_VariableRef = 50, - const CXCursor_InvalidFile = 70, - const CXCursor_NoDeclFound = 71, - const CXCursor_NotImplemented = 72, - const CXCursor_InvalidCode = 73, - const CXCursor_UnexposedExpr = 100, - const CXCursor_DeclRefExpr = 101, - const CXCursor_MemberRefExpr = 102, - const CXCursor_CallExpr = 103, - const CXCursor_ObjCMessageExpr = 104, - const CXCursor_BlockExpr = 105, - const CXCursor_IntegerLiteral = 106, - const CXCursor_FloatingLiteral = 107, - const CXCursor_ImaginaryLiteral = 108, - const CXCursor_StringLiteral = 109, - const CXCursor_CharacterLiteral = 110, - const CXCursor_ParenExpr = 111, - const CXCursor_UnaryOperator = 112, - const CXCursor_ArraySubscriptExpr = 113, - const CXCursor_BinaryOperator = 114, - const CXCursor_CompoundAssignOperator = 115, - const CXCursor_ConditionalOperator = 116, - const CXCursor_CStyleCastExpr = 117, - const CXCursor_CompoundLiteralExpr = 118, - const CXCursor_InitListExpr = 119, - const CXCursor_AddrLabelExpr = 120, - const CXCursor_StmtExpr = 121, - const CXCursor_GenericSelectionExpr = 122, - const CXCursor_GNUNullExpr = 123, - const CXCursor_CXXStaticCastExpr = 124, - const CXCursor_CXXDynamicCastExpr = 125, - const CXCursor_CXXReinterpretCastExpr = 126, - const CXCursor_CXXConstCastExpr = 127, - const CXCursor_CXXFunctionalCastExpr = 128, - const CXCursor_CXXTypeidExpr = 129, - const CXCursor_CXXBoolLiteralExpr = 130, - const CXCursor_CXXNullPtrLiteralExpr = 131, - const CXCursor_CXXThisExpr = 132, - const CXCursor_CXXThrowExpr = 133, - const CXCursor_CXXNewExpr = 134, - const CXCursor_CXXDeleteExpr = 135, - const CXCursor_UnaryExpr = 136, - const CXCursor_ObjCStringLiteral = 137, - const CXCursor_ObjCEncodeExpr = 138, - const CXCursor_ObjCSelectorExpr = 139, - const CXCursor_ObjCProtocolExpr = 140, - const CXCursor_ObjCBridgedCastExpr = 141, - const CXCursor_PackExpansionExpr = 142, - const CXCursor_SizeOfPackExpr = 143, - const CXCursor_LambdaExpr = 144, - const CXCursor_ObjCBoolLiteralExpr = 145, - const CXCursor_ObjCSelfExpr = 146, - /// Only produced by `libclang` 3.8 and later. - const CXCursor_OMPArraySectionExpr = 147, - /// Only produced by `libclang` 3.9 and later. - const CXCursor_ObjCAvailabilityCheckExpr = 148, - /// Only produced by `libclang` 7.0 and later. - const CXCursor_FixedPointLiteral = 149, - const CXCursor_UnexposedStmt = 200, - const CXCursor_LabelStmt = 201, - const CXCursor_CompoundStmt = 202, - const CXCursor_CaseStmt = 203, - const CXCursor_DefaultStmt = 204, - const CXCursor_IfStmt = 205, - const CXCursor_SwitchStmt = 206, - const CXCursor_WhileStmt = 207, - const CXCursor_DoStmt = 208, - const CXCursor_ForStmt = 209, - const CXCursor_GotoStmt = 210, - const CXCursor_IndirectGotoStmt = 211, - const CXCursor_ContinueStmt = 212, - const CXCursor_BreakStmt = 213, - const CXCursor_ReturnStmt = 214, - /// Duplicate of `CXCursor_GccAsmStmt`. - const CXCursor_AsmStmt = 215, - const CXCursor_ObjCAtTryStmt = 216, - const CXCursor_ObjCAtCatchStmt = 217, - const CXCursor_ObjCAtFinallyStmt = 218, - const CXCursor_ObjCAtThrowStmt = 219, - const CXCursor_ObjCAtSynchronizedStmt = 220, - const CXCursor_ObjCAutoreleasePoolStmt = 221, - const CXCursor_ObjCForCollectionStmt = 222, - const CXCursor_CXXCatchStmt = 223, - const CXCursor_CXXTryStmt = 224, - const CXCursor_CXXForRangeStmt = 225, - const CXCursor_SEHTryStmt = 226, - const CXCursor_SEHExceptStmt = 227, - const CXCursor_SEHFinallyStmt = 228, - const CXCursor_MSAsmStmt = 229, - const CXCursor_NullStmt = 230, - const CXCursor_DeclStmt = 231, - const CXCursor_OMPParallelDirective = 232, - const CXCursor_OMPSimdDirective = 233, - const CXCursor_OMPForDirective = 234, - const CXCursor_OMPSectionsDirective = 235, - const CXCursor_OMPSectionDirective = 236, - const CXCursor_OMPSingleDirective = 237, - const CXCursor_OMPParallelForDirective = 238, - const CXCursor_OMPParallelSectionsDirective = 239, - const CXCursor_OMPTaskDirective = 240, - const CXCursor_OMPMasterDirective = 241, - const CXCursor_OMPCriticalDirective = 242, - const CXCursor_OMPTaskyieldDirective = 243, - const CXCursor_OMPBarrierDirective = 244, - const CXCursor_OMPTaskwaitDirective = 245, - const CXCursor_OMPFlushDirective = 246, - const CXCursor_SEHLeaveStmt = 247, - /// Only produced by `libclang` 3.6 and later. - const CXCursor_OMPOrderedDirective = 248, - /// Only produced by `libclang` 3.6 and later. - const CXCursor_OMPAtomicDirective = 249, - /// Only produced by `libclang` 3.6 and later. - const CXCursor_OMPForSimdDirective = 250, - /// Only produced by `libclang` 3.6 and later. - const CXCursor_OMPParallelForSimdDirective = 251, - /// Only produced by `libclang` 3.6 and later. - const CXCursor_OMPTargetDirective = 252, - /// Only produced by `libclang` 3.6 and later. - const CXCursor_OMPTeamsDirective = 253, - /// Only produced by `libclang` 3.7 and later. - const CXCursor_OMPTaskgroupDirective = 254, - /// Only produced by `libclang` 3.7 and later. - const CXCursor_OMPCancellationPointDirective = 255, - /// Only produced by `libclang` 3.7 and later. - const CXCursor_OMPCancelDirective = 256, - /// Only produced by `libclang` 3.8 and later. - const CXCursor_OMPTargetDataDirective = 257, - /// Only produced by `libclang` 3.8 and later. - const CXCursor_OMPTaskLoopDirective = 258, - /// Only produced by `libclang` 3.8 and later. - const CXCursor_OMPTaskLoopSimdDirective = 259, - /// Only produced by `libclang` 3.8 and later. - const CXCursor_OMPDistributeDirective = 260, - /// Only produced by `libclang` 3.9 and later. - const CXCursor_OMPTargetEnterDataDirective = 261, - /// Only produced by `libclang` 3.9 and later. - const CXCursor_OMPTargetExitDataDirective = 262, - /// Only produced by `libclang` 3.9 and later. - const CXCursor_OMPTargetParallelDirective = 263, - /// Only produced by `libclang` 3.9 and later. - const CXCursor_OMPTargetParallelForDirective = 264, - /// Only produced by `libclang` 3.9 and later. - const CXCursor_OMPTargetUpdateDirective = 265, - /// Only produced by `libclang` 3.9 and later. - const CXCursor_OMPDistributeParallelForDirective = 266, - /// Only produced by `libclang` 3.9 and later. - const CXCursor_OMPDistributeParallelForSimdDirective = 267, - /// Only produced by `libclang` 3.9 and later. - const CXCursor_OMPDistributeSimdDirective = 268, - /// Only produced by `libclang` 3.9 and later. - const CXCursor_OMPTargetParallelForSimdDirective = 269, - /// Only produced by `libclang` 4.0 and later. - const CXCursor_OMPTargetSimdDirective = 270, - /// Only produced by `libclang` 4.0 and later. - const CXCursor_OMPTeamsDistributeDirective = 271, - /// Only produced by `libclang` 4.0 and later. - const CXCursor_OMPTeamsDistributeSimdDirective = 272, - /// Only produced by `libclang` 4.0 and later. - const CXCursor_OMPTeamsDistributeParallelForSimdDirective = 273, - /// Only produced by `libclang` 4.0 and later. - const CXCursor_OMPTeamsDistributeParallelForDirective = 274, - /// Only produced by `libclang` 4.0 and later. - const CXCursor_OMPTargetTeamsDirective = 275, - /// Only produced by `libclang` 4.0 and later. - const CXCursor_OMPTargetTeamsDistributeDirective = 276, - /// Only produced by `libclang` 4.0 and later. - const CXCursor_OMPTargetTeamsDistributeParallelForDirective = 277, - /// Only produced by `libclang` 4.0 and later. - const CXCursor_OMPTargetTeamsDistributeParallelForSimdDirective = 278, - /// Only producer by `libclang` 4.0 and later. - const CXCursor_OMPTargetTeamsDistributeSimdDirective = 279, - const CXCursor_TranslationUnit = 300, - const CXCursor_UnexposedAttr = 400, - const CXCursor_IBActionAttr = 401, - const CXCursor_IBOutletAttr = 402, - const CXCursor_IBOutletCollectionAttr = 403, - const CXCursor_CXXFinalAttr = 404, - const CXCursor_CXXOverrideAttr = 405, - const CXCursor_AnnotateAttr = 406, - const CXCursor_AsmLabelAttr = 407, - const CXCursor_PackedAttr = 408, - const CXCursor_PureAttr = 409, - const CXCursor_ConstAttr = 410, - const CXCursor_NoDuplicateAttr = 411, - const CXCursor_CUDAConstantAttr = 412, - const CXCursor_CUDADeviceAttr = 413, - const CXCursor_CUDAGlobalAttr = 414, - const CXCursor_CUDAHostAttr = 415, - /// Only produced by `libclang` 3.6 and later. - const CXCursor_CUDASharedAttr = 416, - /// Only produced by `libclang` 3.8 and later. - const CXCursor_VisibilityAttr = 417, - /// Only produced by `libclang` 3.8 and later. - const CXCursor_DLLExport = 418, - /// Only produced by `libclang` 3.8 and later. - const CXCursor_DLLImport = 419, - /// Only produced by `libclang` 8.0 and later. - const CXCursor_NSReturnsRetained = 420, - /// Only produced by `libclang` 8.0 and later. - const CXCursor_NSReturnsNotRetained = 421, - /// Only produced by `libclang` 8.0 and later. - const CXCursor_NSReturnsAutoreleased = 422, - /// Only produced by `libclang` 8.0 and later. - const CXCursor_NSConsumesSelf = 423, - /// Only produced by `libclang` 8.0 and later. - const CXCursor_NSConsumed = 424, - /// Only produced by `libclang` 8.0 and later. - const CXCursor_ObjCException = 425, - /// Only produced by `libclang` 8.0 and later. - const CXCursor_ObjCNSObject = 426, - /// Only produced by `libclang` 8.0 and later. - const CXCursor_ObjCIndependentClass = 427, - /// Only produced by `libclang` 8.0 and later. - const CXCursor_ObjCPreciseLifetime = 428, - /// Only produced by `libclang` 8.0 and later. - const CXCursor_ObjCReturnsInnerPointer = 429, - /// Only produced by `libclang` 8.0 and later. - const CXCursor_ObjCRequiresSuper = 430, - /// Only produced by `libclang` 8.0 and later. - const CXCursor_ObjCRootClass = 431, - /// Only produced by `libclang` 8.0 and later. - const CXCursor_ObjCSubclassingRestricted = 432, - /// Only produced by `libclang` 8.0 and later. - const CXCursor_ObjCExplicitProtocolImpl = 433, - /// Only produced by `libclang` 8.0 and later. - const CXCursor_ObjCDesignatedInitializer = 434, - /// Only produced by `libclang` 8.0 and later. - const CXCursor_ObjCRuntimeVisible = 435, - /// Only produced by `libclang` 8.0 and later. - const CXCursor_ObjCBoxable = 436, - /// Only produced by `libclang` 8.0 and later. - const CXCursor_FlagEnum = 437, - const CXCursor_PreprocessingDirective = 500, - const CXCursor_MacroDefinition = 501, - /// Duplicate of `CXCursor_MacroInstantiation`. - const CXCursor_MacroExpansion = 502, - const CXCursor_InclusionDirective = 503, - const CXCursor_ModuleImportDecl = 600, - /// Only produced by `libclang` 3.8 and later. - const CXCursor_TypeAliasTemplateDecl = 601, - /// Only produced by `libclang` 3.9 and later. - const CXCursor_StaticAssert = 602, - /// Only produced by `libclang` 4.0 and later. - const CXCursor_FriendDecl = 603, - /// Only produced by `libclang` 3.7 and later. - const CXCursor_OverloadCandidate = 700, - } -} - -cenum! { - #[cfg(feature="gte_clang_5_0")] - enum CXCursor_ExceptionSpecificationKind { - const CXCursor_ExceptionSpecificationKind_None = 0, - const CXCursor_ExceptionSpecificationKind_DynamicNone = 1, - const CXCursor_ExceptionSpecificationKind_Dynamic = 2, - const CXCursor_ExceptionSpecificationKind_MSAny = 3, - const CXCursor_ExceptionSpecificationKind_BasicNoexcept = 4, - const CXCursor_ExceptionSpecificationKind_ComputedNoexcept = 5, - const CXCursor_ExceptionSpecificationKind_Unevaluated = 6, - const CXCursor_ExceptionSpecificationKind_Uninstantiated = 7, - const CXCursor_ExceptionSpecificationKind_Unparsed = 8, - } -} - -cenum! { - enum CXDiagnosticSeverity { - const CXDiagnostic_Ignored = 0, - const CXDiagnostic_Note = 1, - const CXDiagnostic_Warning = 2, - const CXDiagnostic_Error = 3, - const CXDiagnostic_Fatal = 4, - } -} - -cenum! { - enum CXErrorCode { - const CXError_Success = 0, - const CXError_Failure = 1, - const CXError_Crashed = 2, - const CXError_InvalidArguments = 3, - const CXError_ASTReadError = 4, - } -} - -cenum! { - enum CXEvalResultKind { - const CXEval_UnExposed = 0, - const CXEval_Int = 1 , - const CXEval_Float = 2, - const CXEval_ObjCStrLiteral = 3, - const CXEval_StrLiteral = 4, - const CXEval_CFStr = 5, - const CXEval_Other = 6, - } -} - -cenum! { - enum CXIdxAttrKind { - const CXIdxAttr_Unexposed = 0, - const CXIdxAttr_IBAction = 1, - const CXIdxAttr_IBOutlet = 2, - const CXIdxAttr_IBOutletCollection = 3, - } -} - -cenum! { - enum CXIdxEntityCXXTemplateKind { - const CXIdxEntity_NonTemplate = 0, - const CXIdxEntity_Template = 1, - const CXIdxEntity_TemplatePartialSpecialization = 2, - const CXIdxEntity_TemplateSpecialization = 3, - } -} - -cenum! { - enum CXIdxEntityKind { - const CXIdxEntity_Unexposed = 0, - const CXIdxEntity_Typedef = 1, - const CXIdxEntity_Function = 2, - const CXIdxEntity_Variable = 3, - const CXIdxEntity_Field = 4, - const CXIdxEntity_EnumConstant = 5, - const CXIdxEntity_ObjCClass = 6, - const CXIdxEntity_ObjCProtocol = 7, - const CXIdxEntity_ObjCCategory = 8, - const CXIdxEntity_ObjCInstanceMethod = 9, - const CXIdxEntity_ObjCClassMethod = 10, - const CXIdxEntity_ObjCProperty = 11, - const CXIdxEntity_ObjCIvar = 12, - const CXIdxEntity_Enum = 13, - const CXIdxEntity_Struct = 14, - const CXIdxEntity_Union = 15, - const CXIdxEntity_CXXClass = 16, - const CXIdxEntity_CXXNamespace = 17, - const CXIdxEntity_CXXNamespaceAlias = 18, - const CXIdxEntity_CXXStaticVariable = 19, - const CXIdxEntity_CXXStaticMethod = 20, - const CXIdxEntity_CXXInstanceMethod = 21, - const CXIdxEntity_CXXConstructor = 22, - const CXIdxEntity_CXXDestructor = 23, - const CXIdxEntity_CXXConversionFunction = 24, - const CXIdxEntity_CXXTypeAlias = 25, - const CXIdxEntity_CXXInterface = 26, - } -} - -cenum! { - enum CXIdxEntityLanguage { - const CXIdxEntityLang_None = 0, - const CXIdxEntityLang_C = 1, - const CXIdxEntityLang_ObjC = 2, - const CXIdxEntityLang_CXX = 3, - /// Only produced by `libclang` 5.0 and later. - const CXIdxEntityLang_Swift = 4, - } -} - -cenum! { - enum CXIdxEntityRefKind { - const CXIdxEntityRef_Direct = 1, - const CXIdxEntityRef_Implicit = 2, - } -} - -cenum! { - enum CXIdxObjCContainerKind { - const CXIdxObjCContainer_ForwardRef = 0, - const CXIdxObjCContainer_Interface = 1, - const CXIdxObjCContainer_Implementation = 2, - } -} - -cenum! { - enum CXLanguageKind { - const CXLanguage_Invalid = 0, - const CXLanguage_C = 1, - const CXLanguage_ObjC = 2, - const CXLanguage_CPlusPlus = 3, - } -} - -cenum! { - enum CXLinkageKind { - const CXLinkage_Invalid = 0, - const CXLinkage_NoLinkage = 1, - const CXLinkage_Internal = 2, - const CXLinkage_UniqueExternal = 3, - const CXLinkage_External = 4, - } -} - -cenum! { - enum CXLoadDiag_Error { - const CXLoadDiag_None = 0, - const CXLoadDiag_Unknown = 1, - const CXLoadDiag_CannotLoad = 2, - const CXLoadDiag_InvalidFile = 3, - } -} - -cenum! { - #[cfg(feature="gte_clang_7_0")] - enum CXPrintingPolicyProperty { - const CXPrintingPolicy_Indentation = 0, - const CXPrintingPolicy_SuppressSpecifiers = 1, - const CXPrintingPolicy_SuppressTagKeyword = 2, - const CXPrintingPolicy_IncludeTagDefinition = 3, - const CXPrintingPolicy_SuppressScope = 4, - const CXPrintingPolicy_SuppressUnwrittenScope = 5, - const CXPrintingPolicy_SuppressInitializers = 6, - const CXPrintingPolicy_ConstantArraySizeAsWritten = 7, - const CXPrintingPolicy_AnonymousTagLocations = 8, - const CXPrintingPolicy_SuppressStrongLifetime = 9, - const CXPrintingPolicy_SuppressLifetimeQualifiers = 10, - const CXPrintingPolicy_SuppressTemplateArgsInCXXConstructors = 11, - const CXPrintingPolicy_Bool = 12, - const CXPrintingPolicy_Restrict = 13, - const CXPrintingPolicy_Alignof = 14, - const CXPrintingPolicy_UnderscoreAlignof = 15, - const CXPrintingPolicy_UseVoidForZeroParams = 16, - const CXPrintingPolicy_TerseOutput = 17, - const CXPrintingPolicy_PolishForDeclaration = 18, - const CXPrintingPolicy_Half = 19, - const CXPrintingPolicy_MSWChar = 20, - const CXPrintingPolicy_IncludeNewlines = 21, - const CXPrintingPolicy_MSVCFormatting = 22, - const CXPrintingPolicy_ConstantsAsWritten = 23, - const CXPrintingPolicy_SuppressImplicitBase = 24, - const CXPrintingPolicy_FullyQualifiedName = 25, - } -} - -cenum! { - enum CXRefQualifierKind { - const CXRefQualifier_None = 0, - const CXRefQualifier_LValue = 1, - const CXRefQualifier_RValue = 2, - } -} - -cenum! { - enum CXResult { - const CXResult_Success = 0, - const CXResult_Invalid = 1, - const CXResult_VisitBreak = 2, - } -} - -cenum! { - enum CXSaveError { - const CXSaveError_None = 0, - const CXSaveError_Unknown = 1, - const CXSaveError_TranslationErrors = 2, - const CXSaveError_InvalidTU = 3, - } -} - -cenum! { - #[cfg(feature="gte_clang_6_0")] - enum CXTLSKind { - const CXTLS_None = 0, - const CXTLS_Dynamic = 1, - const CXTLS_Static = 2, - } -} - -cenum! { - enum CXTUResourceUsageKind { - const CXTUResourceUsage_AST = 1, - const CXTUResourceUsage_Identifiers = 2, - const CXTUResourceUsage_Selectors = 3, - const CXTUResourceUsage_GlobalCompletionResults = 4, - const CXTUResourceUsage_SourceManagerContentCache = 5, - const CXTUResourceUsage_AST_SideTables = 6, - const CXTUResourceUsage_SourceManager_Membuffer_Malloc = 7, - const CXTUResourceUsage_SourceManager_Membuffer_MMap = 8, - const CXTUResourceUsage_ExternalASTSource_Membuffer_Malloc = 9, - const CXTUResourceUsage_ExternalASTSource_Membuffer_MMap = 10, - const CXTUResourceUsage_Preprocessor = 11, - const CXTUResourceUsage_PreprocessingRecord = 12, - const CXTUResourceUsage_SourceManager_DataStructures = 13, - const CXTUResourceUsage_Preprocessor_HeaderSearch = 14, - } -} - -cenum! { - #[cfg(feature="gte_clang_3_6")] - enum CXTemplateArgumentKind { - const CXTemplateArgumentKind_Null = 0, - const CXTemplateArgumentKind_Type = 1, - const CXTemplateArgumentKind_Declaration = 2, - const CXTemplateArgumentKind_NullPtr = 3, - const CXTemplateArgumentKind_Integral = 4, - const CXTemplateArgumentKind_Template = 5, - const CXTemplateArgumentKind_TemplateExpansion = 6, - const CXTemplateArgumentKind_Expression = 7, - const CXTemplateArgumentKind_Pack = 8, - const CXTemplateArgumentKind_Invalid = 9, - } -} - -cenum! { - enum CXTokenKind { - const CXToken_Punctuation = 0, - const CXToken_Keyword = 1, - const CXToken_Identifier = 2, - const CXToken_Literal = 3, - const CXToken_Comment = 4, - } -} - -cenum! { - enum CXTypeKind { - const CXType_Invalid = 0, - const CXType_Unexposed = 1, - const CXType_Void = 2, - const CXType_Bool = 3, - const CXType_Char_U = 4, - const CXType_UChar = 5, - const CXType_Char16 = 6, - const CXType_Char32 = 7, - const CXType_UShort = 8, - const CXType_UInt = 9, - const CXType_ULong = 10, - const CXType_ULongLong = 11, - const CXType_UInt128 = 12, - const CXType_Char_S = 13, - const CXType_SChar = 14, - const CXType_WChar = 15, - const CXType_Short = 16, - const CXType_Int = 17, - const CXType_Long = 18, - const CXType_LongLong = 19, - const CXType_Int128 = 20, - const CXType_Float = 21, - const CXType_Double = 22, - const CXType_LongDouble = 23, - const CXType_NullPtr = 24, - const CXType_Overload = 25, - const CXType_Dependent = 26, - const CXType_ObjCId = 27, - const CXType_ObjCClass = 28, - const CXType_ObjCSel = 29, - /// Only produced by `libclang` 3.9 and later. - const CXType_Float128 = 30, - /// Only produced by `libclang` 5.0 and later. - const CXType_Half = 31, - /// Only produced by `libclang` 6.0 and later. - const CXType_Float16 = 32, - /// Only produced by `libclang` 7.0 and later. - const CXType_ShortAccum = 33, - /// Only produced by `libclang` 7.0 and later. - const CXType_Accum = 34, - /// Only produced by `libclang` 7.0 and later. - const CXType_LongAccum = 35, - /// Only produced by `libclang` 7.0 and later. - const CXType_UShortAccum = 36, - /// Only produced by `libclang` 7.0 and later. - const CXType_UAccum = 37, - /// Only produced by `libclang` 7.0 and later. - const CXType_ULongAccum = 38, - const CXType_Complex = 100, - const CXType_Pointer = 101, - const CXType_BlockPointer = 102, - const CXType_LValueReference = 103, - const CXType_RValueReference = 104, - const CXType_Record = 105, - const CXType_Enum = 106, - const CXType_Typedef = 107, - const CXType_ObjCInterface = 108, - const CXType_ObjCObjectPointer = 109, - const CXType_FunctionNoProto = 110, - const CXType_FunctionProto = 111, - const CXType_ConstantArray = 112, - const CXType_Vector = 113, - const CXType_IncompleteArray = 114, - const CXType_VariableArray = 115, - const CXType_DependentSizedArray = 116, - const CXType_MemberPointer = 117, - /// Only produced by `libclang` 3.8 and later. - const CXType_Auto = 118, - /// Only produced by `libclang` 3.9 and later. - const CXType_Elaborated = 119, - /// Only produced by `libclang` 5.0 and later. - const CXType_Pipe = 120, - /// Only produced by `libclang` 5.0 and later. - const CXType_OCLImage1dRO = 121, - /// Only produced by `libclang` 5.0 and later. - const CXType_OCLImage1dArrayRO = 122, - /// Only produced by `libclang` 5.0 and later. - const CXType_OCLImage1dBufferRO = 123, - /// Only produced by `libclang` 5.0 and later. - const CXType_OCLImage2dRO = 124, - /// Only produced by `libclang` 5.0 and later. - const CXType_OCLImage2dArrayRO = 125, - /// Only produced by `libclang` 5.0 and later. - const CXType_OCLImage2dDepthRO = 126, - /// Only produced by `libclang` 5.0 and later. - const CXType_OCLImage2dArrayDepthRO = 127, - /// Only produced by `libclang` 5.0 and later. - const CXType_OCLImage2dMSAARO = 128, - /// Only produced by `libclang` 5.0 and later. - const CXType_OCLImage2dArrayMSAARO = 129, - /// Only produced by `libclang` 5.0 and later. - const CXType_OCLImage2dMSAADepthRO = 130, - /// Only produced by `libclang` 5.0 and later. - const CXType_OCLImage2dArrayMSAADepthRO = 131, - /// Only produced by `libclang` 5.0 and later. - const CXType_OCLImage3dRO = 132, - /// Only produced by `libclang` 5.0 and later. - const CXType_OCLImage1dWO = 133, - /// Only produced by `libclang` 5.0 and later. - const CXType_OCLImage1dArrayWO = 134, - /// Only produced by `libclang` 5.0 and later. - const CXType_OCLImage1dBufferWO = 135, - /// Only produced by `libclang` 5.0 and later. - const CXType_OCLImage2dWO = 136, - /// Only produced by `libclang` 5.0 and later. - const CXType_OCLImage2dArrayWO = 137, - /// Only produced by `libclang` 5.0 and later. - const CXType_OCLImage2dDepthWO = 138, - /// Only produced by `libclang` 5.0 and later. - const CXType_OCLImage2dArrayDepthWO = 139, - /// Only produced by `libclang` 5.0 and later. - const CXType_OCLImage2dMSAAWO = 140, - /// Only produced by `libclang` 5.0 and later. - const CXType_OCLImage2dArrayMSAAWO = 141, - /// Only produced by `libclang` 5.0 and later. - const CXType_OCLImage2dMSAADepthWO = 142, - /// Only produced by `libclang` 5.0 and later. - const CXType_OCLImage2dArrayMSAADepthWO = 143, - /// Only produced by `libclang` 5.0 and later. - const CXType_OCLImage3dWO = 144, - /// Only produced by `libclang` 5.0 and later. - const CXType_OCLImage1dRW = 145, - /// Only produced by `libclang` 5.0 and later. - const CXType_OCLImage1dArrayRW = 146, - /// Only produced by `libclang` 5.0 and later. - const CXType_OCLImage1dBufferRW = 147, - /// Only produced by `libclang` 5.0 and later. - const CXType_OCLImage2dRW = 148, - /// Only produced by `libclang` 5.0 and later. - const CXType_OCLImage2dArrayRW = 149, - /// Only produced by `libclang` 5.0 and later. - const CXType_OCLImage2dDepthRW = 150, - /// Only produced by `libclang` 5.0 and later. - const CXType_OCLImage2dArrayDepthRW = 151, - /// Only produced by `libclang` 5.0 and later. - const CXType_OCLImage2dMSAARW = 152, - /// Only produced by `libclang` 5.0 and later. - const CXType_OCLImage2dArrayMSAARW = 153, - /// Only produced by `libclang` 5.0 and later. - const CXType_OCLImage2dMSAADepthRW = 154, - /// Only produced by `libclang` 5.0 and later. - const CXType_OCLImage2dArrayMSAADepthRW = 155, - /// Only produced by `libclang` 5.0 and later. - const CXType_OCLImage3dRW = 156, - /// Only produced by `libclang` 5.0 and later. - const CXType_OCLSampler = 157, - /// Only produced by `libclang` 5.0 and later. - const CXType_OCLEvent = 158, - /// Only produced by `libclang` 5.0 and later. - const CXType_OCLQueue = 159, - /// Only produced by `libclang` 5.0 and later. - const CXType_OCLReserveID = 160, - /// Only produced by `libclang` 8.0 and later. - const CXType_ObjCObject = 161, - /// Only produced by `libclang` 8.0 and later. - const CXType_ObjCTypeParam = 162, - /// Only produced by `libclang` 8.0 and later. - const CXType_Attributed = 163, - /// Only produced by `libclang` 8.0 and later. - const CXType_OCLIntelSubgroupAVCMcePayload = 164, - /// Only produced by `libclang` 8.0 and later. - const CXType_OCLIntelSubgroupAVCImePayload = 165, - /// Only produced by `libclang` 8.0 and later. - const CXType_OCLIntelSubgroupAVCRefPayload = 166, - /// Only produced by `libclang` 8.0 and later. - const CXType_OCLIntelSubgroupAVCSicPayload = 167, - /// Only produced by `libclang` 8.0 and later. - const CXType_OCLIntelSubgroupAVCMceResult = 168, - /// Only produced by `libclang` 8.0 and later. - const CXType_OCLIntelSubgroupAVCImeResult = 169, - /// Only produced by `libclang` 8.0 and later. - const CXType_OCLIntelSubgroupAVCRefResult = 170, - /// Only produced by `libclang` 8.0 and later. - const CXType_OCLIntelSubgroupAVCSicResult = 171, - /// Only produced by `libclang` 8.0 and later. - const CXType_OCLIntelSubgroupAVCImeResultSingleRefStreamout = 172, - /// Only produced by `libclang` 8.0 and later. - const CXType_OCLIntelSubgroupAVCImeResultDualRefStreamout = 173, - /// Only produced by `libclang` 8.0 and later. - const CXType_OCLIntelSubgroupAVCImeSingleRefStreamin = 174, - /// Only produced by `libclang` 8.0 and later. - const CXType_OCLIntelSubgroupAVCImeDualRefStreamin = 175, - } -} - -cenum! { - enum CXTypeLayoutError { - const CXTypeLayoutError_Invalid = -1, - const CXTypeLayoutError_Incomplete = -2, - const CXTypeLayoutError_Dependent = -3, - const CXTypeLayoutError_NotConstantSize = -4, - const CXTypeLayoutError_InvalidFieldName = -5, - } -} - -cenum! { - #[cfg(feature="gte_clang_3_8")] - enum CXVisibilityKind { - const CXVisibility_Invalid = 0, - const CXVisibility_Hidden = 1, - const CXVisibility_Protected = 2, - const CXVisibility_Default = 3, - } -} - -cenum! { - #[cfg(feature="gte_clang_8_0")] - enum CXTypeNullabilityKind { - const CXTypeNullability_NonNull = 0, - const CXTypeNullability_Nullable = 1, - const CXTypeNullability_Unspecified = 2, - const CXTypeNullability_Invalid = 3, - } -} - -cenum! { - enum CXVisitorResult { - const CXVisit_Break = 0, - const CXVisit_Continue = 1, - } -} - -cenum! { - enum CX_CXXAccessSpecifier { - const CX_CXXInvalidAccessSpecifier = 0, - const CX_CXXPublic = 1, - const CX_CXXProtected = 2, - const CX_CXXPrivate = 3, - } -} - -cenum! { - #[cfg(feature="gte_clang_3_6")] - enum CX_StorageClass { - const CX_SC_Invalid = 0, - const CX_SC_None = 1, - const CX_SC_Extern = 2, - const CX_SC_Static = 3, - const CX_SC_PrivateExtern = 4, - const CX_SC_OpenCLWorkGroupLocal = 5, - const CX_SC_Auto = 6, - const CX_SC_Register = 7, - } -} - -//================================================ -// Flags -//================================================ - -cenum! { - enum CXCodeComplete_Flags { - const CXCodeComplete_IncludeMacros = 1; - const CXCodeComplete_IncludeCodePatterns = 2; - const CXCodeComplete_IncludeBriefComments = 4; - const CXCodeComplete_SkipPreamble = 8; - const CXCodeComplete_IncludeCompletionsWithFixIts = 16; - } -} - -cenum! { - enum CXCompletionContext { - const CXCompletionContext_Unexposed = 0; - const CXCompletionContext_AnyType = 1; - const CXCompletionContext_AnyValue = 2; - const CXCompletionContext_ObjCObjectValue = 4; - const CXCompletionContext_ObjCSelectorValue = 8; - const CXCompletionContext_CXXClassTypeValue = 16; - const CXCompletionContext_DotMemberAccess = 32; - const CXCompletionContext_ArrowMemberAccess = 64; - const CXCompletionContext_ObjCPropertyAccess = 128; - const CXCompletionContext_EnumTag = 256; - const CXCompletionContext_UnionTag = 512; - const CXCompletionContext_StructTag = 1024; - const CXCompletionContext_ClassTag = 2048; - const CXCompletionContext_Namespace = 4096; - const CXCompletionContext_NestedNameSpecifier = 8192; - const CXCompletionContext_ObjCInterface = 16384; - const CXCompletionContext_ObjCProtocol = 32768; - const CXCompletionContext_ObjCCategory = 65536; - const CXCompletionContext_ObjCInstanceMessage = 131072; - const CXCompletionContext_ObjCClassMessage = 262144; - const CXCompletionContext_ObjCSelectorName = 524288; - const CXCompletionContext_MacroName = 1048576; - const CXCompletionContext_NaturalLanguage = 2097152; - const CXCompletionContext_IncludedFile = 4194304; - const CXCompletionContext_Unknown = 8388607; - } -} - -cenum! { - enum CXDiagnosticDisplayOptions { - const CXDiagnostic_DisplaySourceLocation = 1; - const CXDiagnostic_DisplayColumn = 2; - const CXDiagnostic_DisplaySourceRanges = 4; - const CXDiagnostic_DisplayOption = 8; - const CXDiagnostic_DisplayCategoryId = 16; - const CXDiagnostic_DisplayCategoryName = 32; - } -} - -cenum! { - enum CXGlobalOptFlags { - const CXGlobalOpt_None = 0; - const CXGlobalOpt_ThreadBackgroundPriorityForIndexing = 1; - const CXGlobalOpt_ThreadBackgroundPriorityForEditing = 2; - const CXGlobalOpt_ThreadBackgroundPriorityForAll = 3; - } -} - -cenum! { - enum CXIdxDeclInfoFlags { - const CXIdxDeclFlag_Skipped = 1; - } -} - -cenum! { - enum CXIndexOptFlags { - const CXIndexOptNone = 0; - const CXIndexOptSuppressRedundantRefs = 1; - const CXIndexOptIndexFunctionLocalSymbols = 2; - const CXIndexOptIndexImplicitTemplateInstantiations = 4; - const CXIndexOptSuppressWarnings = 8; - const CXIndexOptSkipParsedBodiesInSession = 16; - } -} - -cenum! { - enum CXNameRefFlags { - const CXNameRange_WantQualifier = 1; - const CXNameRange_WantTemplateArgs = 2; - const CXNameRange_WantSinglePiece = 4; - } -} - -cenum! { - enum CXObjCDeclQualifierKind { - const CXObjCDeclQualifier_None = 0; - const CXObjCDeclQualifier_In = 1; - const CXObjCDeclQualifier_Inout = 2; - const CXObjCDeclQualifier_Out = 4; - const CXObjCDeclQualifier_Bycopy = 8; - const CXObjCDeclQualifier_Byref = 16; - const CXObjCDeclQualifier_Oneway = 32; - } -} - -cenum! { - enum CXObjCPropertyAttrKind { - const CXObjCPropertyAttr_noattr = 0; - const CXObjCPropertyAttr_readonly = 1; - const CXObjCPropertyAttr_getter = 2; - const CXObjCPropertyAttr_assign = 4; - const CXObjCPropertyAttr_readwrite = 8; - const CXObjCPropertyAttr_retain = 16; - const CXObjCPropertyAttr_copy = 32; - const CXObjCPropertyAttr_nonatomic = 64; - const CXObjCPropertyAttr_setter = 128; - const CXObjCPropertyAttr_atomic = 256; - const CXObjCPropertyAttr_weak = 512; - const CXObjCPropertyAttr_strong = 1024; - const CXObjCPropertyAttr_unsafe_unretained = 2048; - #[cfg(feature="gte_clang_3_9")] - const CXObjCPropertyAttr_class = 4096; - } -} - -cenum! { - enum CXReparse_Flags { - const CXReparse_None = 0; - } -} - -cenum! { - enum CXSaveTranslationUnit_Flags { - const CXSaveTranslationUnit_None = 0; - } -} - -cenum! { - #[cfg(feature="gte_clang_7_0")] - enum CXSymbolRole { - const CXSymbolRole_None = 0; - const CXSymbolRole_Declaration = 1; - const CXSymbolRole_Definition = 2; - const CXSymbolRole_Reference = 4; - const CXSymbolRole_Read = 8; - const CXSymbolRole_Write = 16; - const CXSymbolRole_Call = 32; - const CXSymbolRole_Dynamic = 64; - const CXSymbolRole_AddressOf = 128; - const CXSymbolRole_Implicit = 256; - } -} - -cenum! { - enum CXTranslationUnit_Flags { - const CXTranslationUnit_None = 0; - const CXTranslationUnit_DetailedPreprocessingRecord = 1; - const CXTranslationUnit_Incomplete = 2; - const CXTranslationUnit_PrecompiledPreamble = 4; - const CXTranslationUnit_CacheCompletionResults = 8; - const CXTranslationUnit_ForSerialization = 16; - const CXTranslationUnit_CXXChainedPCH = 32; - const CXTranslationUnit_SkipFunctionBodies = 64; - const CXTranslationUnit_IncludeBriefCommentsInCodeCompletion = 128; - #[cfg(feature="gte_clang_3_8")] - const CXTranslationUnit_CreatePreambleOnFirstParse = 256; - #[cfg(feature="gte_clang_3_9")] - const CXTranslationUnit_KeepGoing = 512; - #[cfg(feature="gte_clang_5_0")] - const CXTranslationUnit_SingleFileParse = 1024; - #[cfg(feature="gte_clang_7_0")] - const CXTranslationUnit_LimitSkipFunctionBodiesToPreamble = 2048; - #[cfg(feature="gte_clang_8_0")] - const CXTranslationUnit_IncludeAttributedTypes = 4096; - #[cfg(feature="gte_clang_8_0")] - const CXTranslationUnit_VisitImplicitAttributes = 8192; - } -} - -//================================================ -// Structs -//================================================ - -// Opaque ________________________________________ - -macro_rules! opaque { - ($name:ident) => { - pub type $name = *mut c_void; - }; -} - -opaque!(CXCompilationDatabase); -opaque!(CXCompileCommand); -opaque!(CXCompileCommands); -opaque!(CXCompletionString); -opaque!(CXCursorSet); -opaque!(CXDiagnostic); -opaque!(CXDiagnosticSet); -#[cfg(feature = "gte_clang_3_9")] -opaque!(CXEvalResult); -opaque!(CXFile); -opaque!(CXIdxClientASTFile); -opaque!(CXIdxClientContainer); -opaque!(CXIdxClientEntity); -opaque!(CXIdxClientFile); -opaque!(CXIndex); -opaque!(CXIndexAction); -opaque!(CXModule); -#[cfg(feature = "gte_clang_7_0")] -opaque!(CXPrintingPolicy); -opaque!(CXRemapping); -#[cfg(feature = "gte_clang_5_0")] -opaque!(CXTargetInfo); -opaque!(CXTranslationUnit); - -// Transparent ___________________________________ - -#[derive(Copy, Clone, Debug)] -#[repr(C)] -pub struct CXCodeCompleteResults { - pub Results: *mut CXCompletionResult, - pub NumResults: c_uint, -} - -default!(CXCodeCompleteResults); - -#[derive(Copy, Clone, Debug)] -#[repr(C)] -pub struct CXComment { - pub ASTNode: *const c_void, - pub TranslationUnit: CXTranslationUnit, -} - -default!(CXComment); - -#[derive(Copy, Clone, Debug)] -#[repr(C)] -pub struct CXCompletionResult { - pub CursorKind: CXCursorKind, - pub CompletionString: CXCompletionString, -} - -default!(CXCompletionResult); - -#[derive(Copy, Clone, Debug)] -#[repr(C)] -pub struct CXCursor { - pub kind: CXCursorKind, - pub xdata: c_int, - pub data: [*const c_void; 3], -} - -default!(CXCursor); - -#[derive(Copy, Clone, Debug)] -#[repr(C)] -pub struct CXCursorAndRangeVisitor { - pub context: *mut c_void, - pub visit: extern "C" fn(*mut c_void, CXCursor, CXSourceRange) -> CXVisitorResult, -} - -default!(CXCursorAndRangeVisitor); - -#[derive(Copy, Clone, Debug)] -#[repr(C)] -pub struct CXFileUniqueID { - pub data: [c_ulonglong; 3], -} - -default!(CXFileUniqueID); - -#[derive(Copy, Clone, Debug)] -#[repr(C)] -pub struct CXIdxAttrInfo { - pub kind: CXIdxAttrKind, - pub cursor: CXCursor, - pub loc: CXIdxLoc, -} - -default!(CXIdxAttrInfo); - -#[derive(Copy, Clone, Debug)] -#[repr(C)] -pub struct CXIdxBaseClassInfo { - pub base: *const CXIdxEntityInfo, - pub cursor: CXCursor, - pub loc: CXIdxLoc, -} - -default!(CXIdxBaseClassInfo); - -#[derive(Copy, Clone, Debug)] -#[repr(C)] -pub struct CXIdxCXXClassDeclInfo { - pub declInfo: *const CXIdxDeclInfo, - pub bases: *const *const CXIdxBaseClassInfo, - pub numBases: c_uint, -} - -default!(CXIdxCXXClassDeclInfo); - -#[derive(Copy, Clone, Debug)] -#[repr(C)] -pub struct CXIdxContainerInfo { - pub cursor: CXCursor, -} - -default!(CXIdxContainerInfo); - -#[derive(Copy, Clone, Debug)] -#[repr(C)] -pub struct CXIdxDeclInfo { - pub entityInfo: *const CXIdxEntityInfo, - pub cursor: CXCursor, - pub loc: CXIdxLoc, - pub semanticContainer: *const CXIdxContainerInfo, - pub lexicalContainer: *const CXIdxContainerInfo, - pub isRedeclaration: c_int, - pub isDefinition: c_int, - pub isContainer: c_int, - pub declAsContainer: *const CXIdxContainerInfo, - pub isImplicit: c_int, - pub attributes: *const *const CXIdxAttrInfo, - pub numAttributes: c_uint, - pub flags: c_uint, -} - -default!(CXIdxDeclInfo); - -#[derive(Copy, Clone, Debug)] -#[repr(C)] -pub struct CXIdxEntityInfo { - pub kind: CXIdxEntityKind, - pub templateKind: CXIdxEntityCXXTemplateKind, - pub lang: CXIdxEntityLanguage, - pub name: *const c_char, - pub USR: *const c_char, - pub cursor: CXCursor, - pub attributes: *const *const CXIdxAttrInfo, - pub numAttributes: c_uint, -} - -default!(CXIdxEntityInfo); - -#[derive(Copy, Clone, Debug)] -#[repr(C)] -pub struct CXIdxEntityRefInfo { - pub kind: CXIdxEntityRefKind, - pub cursor: CXCursor, - pub loc: CXIdxLoc, - pub referencedEntity: *const CXIdxEntityInfo, - pub parentEntity: *const CXIdxEntityInfo, - pub container: *const CXIdxContainerInfo, - #[cfg(feature = "gte_clang_7_0")] - pub role: CXSymbolRole, -} - -default!(CXIdxEntityRefInfo); - -#[derive(Copy, Clone, Debug)] -#[repr(C)] -pub struct CXIdxIBOutletCollectionAttrInfo { - pub attrInfo: *const CXIdxAttrInfo, - pub objcClass: *const CXIdxEntityInfo, - pub classCursor: CXCursor, - pub classLoc: CXIdxLoc, -} - -default!(CXIdxIBOutletCollectionAttrInfo); - -#[derive(Copy, Clone, Debug)] -#[repr(C)] -pub struct CXIdxImportedASTFileInfo { - pub file: CXFile, - pub module: CXModule, - pub loc: CXIdxLoc, - pub isImplicit: c_int, -} - -default!(CXIdxImportedASTFileInfo); - -#[derive(Copy, Clone, Debug)] -#[repr(C)] -pub struct CXIdxIncludedFileInfo { - pub hashLoc: CXIdxLoc, - pub filename: *const c_char, - pub file: CXFile, - pub isImport: c_int, - pub isAngled: c_int, - pub isModuleImport: c_int, -} - -default!(CXIdxIncludedFileInfo); - -#[derive(Copy, Clone, Debug)] -#[repr(C)] -pub struct CXIdxLoc { - pub ptr_data: [*mut c_void; 2], - pub int_data: c_uint, -} - -default!(CXIdxLoc); - -#[derive(Copy, Clone, Debug)] -#[repr(C)] -pub struct CXIdxObjCCategoryDeclInfo { - pub containerInfo: *const CXIdxObjCContainerDeclInfo, - pub objcClass: *const CXIdxEntityInfo, - pub classCursor: CXCursor, - pub classLoc: CXIdxLoc, - pub protocols: *const CXIdxObjCProtocolRefListInfo, -} - -default!(CXIdxObjCCategoryDeclInfo); - -#[derive(Copy, Clone, Debug)] -#[repr(C)] -pub struct CXIdxObjCContainerDeclInfo { - pub declInfo: *const CXIdxDeclInfo, - pub kind: CXIdxObjCContainerKind, -} - -default!(CXIdxObjCContainerDeclInfo); - -#[derive(Copy, Clone, Debug)] -#[repr(C)] -pub struct CXIdxObjCInterfaceDeclInfo { - pub containerInfo: *const CXIdxObjCContainerDeclInfo, - pub superInfo: *const CXIdxBaseClassInfo, - pub protocols: *const CXIdxObjCProtocolRefListInfo, -} - -default!(CXIdxObjCInterfaceDeclInfo); - -#[derive(Copy, Clone, Debug)] -#[repr(C)] -pub struct CXIdxObjCPropertyDeclInfo { - pub declInfo: *const CXIdxDeclInfo, - pub getter: *const CXIdxEntityInfo, - pub setter: *const CXIdxEntityInfo, -} - -default!(CXIdxObjCPropertyDeclInfo); - -#[derive(Copy, Clone, Debug)] -#[repr(C)] -pub struct CXIdxObjCProtocolRefInfo { - pub protocol: *const CXIdxEntityInfo, - pub cursor: CXCursor, - pub loc: CXIdxLoc, -} - -default!(CXIdxObjCProtocolRefInfo); - -#[derive(Copy, Clone, Debug)] -#[repr(C)] -pub struct CXIdxObjCProtocolRefListInfo { - pub protocols: *const *const CXIdxObjCProtocolRefInfo, - pub numProtocols: c_uint, -} - -default!(CXIdxObjCProtocolRefListInfo); - -#[derive(Copy, Clone, Debug)] -#[repr(C)] -pub struct CXPlatformAvailability { - pub Platform: CXString, - pub Introduced: CXVersion, - pub Deprecated: CXVersion, - pub Obsoleted: CXVersion, - pub Unavailable: c_int, - pub Message: CXString, -} - -default!(CXPlatformAvailability); - -#[derive(Copy, Clone, Debug)] -#[repr(C)] -pub struct CXSourceLocation { - pub ptr_data: [*const c_void; 2], - pub int_data: c_uint, -} - -default!(CXSourceLocation); - -#[derive(Copy, Clone, Debug)] -#[repr(C)] -pub struct CXSourceRange { - pub ptr_data: [*const c_void; 2], - pub begin_int_data: c_uint, - pub end_int_data: c_uint, -} - -default!(CXSourceRange); - -#[derive(Copy, Clone, Debug)] -#[repr(C)] -pub struct CXSourceRangeList { - pub count: c_uint, - pub ranges: *mut CXSourceRange, -} - -default!(CXSourceRangeList); - -#[derive(Copy, Clone, Debug)] -#[repr(C)] -pub struct CXString { - pub data: *const c_void, - pub private_flags: c_uint, -} - -default!(CXString); - -#[cfg(feature = "gte_clang_3_8")] -#[derive(Copy, Clone, Debug)] -#[repr(C)] -pub struct CXStringSet { - pub Strings: *mut CXString, - pub Count: c_uint, -} - -#[cfg(feature = "gte_clang_3_8")] -default!(CXStringSet); - -#[derive(Copy, Clone, Debug)] -#[repr(C)] -pub struct CXTUResourceUsage { - pub data: *mut c_void, - pub numEntries: c_uint, - pub entries: *mut CXTUResourceUsageEntry, -} - -default!(CXTUResourceUsage); - -#[derive(Copy, Clone, Debug)] -#[repr(C)] -pub struct CXTUResourceUsageEntry { - pub kind: CXTUResourceUsageKind, - pub amount: c_ulong, -} - -default!(CXTUResourceUsageEntry); - -#[derive(Copy, Clone, Debug)] -#[repr(C)] -pub struct CXToken { - pub int_data: [c_uint; 4], - pub ptr_data: *mut c_void, -} - -default!(CXToken); - -#[derive(Copy, Clone, Debug)] -#[repr(C)] -pub struct CXType { - pub kind: CXTypeKind, - pub data: [*mut c_void; 2], -} - -default!(CXType); - -#[derive(Copy, Clone, Debug)] -#[repr(C)] -pub struct CXUnsavedFile { - pub Filename: *const c_char, - pub Contents: *const c_char, - pub Length: c_ulong, -} - -default!(CXUnsavedFile); - -#[derive(Copy, Clone, Debug)] -#[repr(C)] -pub struct CXVersion { - pub Major: c_int, - pub Minor: c_int, - pub Subminor: c_int, -} - -default!(CXVersion); - -#[derive(Copy, Clone, Debug)] -#[repr(C)] -#[rustfmt::skip] -pub struct IndexerCallbacks { - pub abortQuery: extern "C" fn(CXClientData, *mut c_void) -> c_int, - pub diagnostic: extern "C" fn(CXClientData, CXDiagnosticSet, *mut c_void), - pub enteredMainFile: extern "C" fn(CXClientData, CXFile, *mut c_void) -> CXIdxClientFile, - pub ppIncludedFile: extern "C" fn(CXClientData, *const CXIdxIncludedFileInfo) -> CXIdxClientFile, - pub importedASTFile: extern "C" fn(CXClientData, *const CXIdxImportedASTFileInfo) -> CXIdxClientASTFile, - pub startedTranslationUnit: extern "C" fn(CXClientData, *mut c_void) -> CXIdxClientContainer, - pub indexDeclaration: extern "C" fn(CXClientData, *const CXIdxDeclInfo), - pub indexEntityReference: extern "C" fn(CXClientData, *const CXIdxEntityRefInfo), -} - -default!(IndexerCallbacks); - -//================================================ -// Functions -//================================================ - -link! { - pub fn clang_CXCursorSet_contains(set: CXCursorSet, cursor: CXCursor) -> c_uint; - pub fn clang_CXCursorSet_insert(set: CXCursorSet, cursor: CXCursor) -> c_uint; - pub fn clang_CXIndex_getGlobalOptions(index: CXIndex) -> CXGlobalOptFlags; - pub fn clang_CXIndex_setGlobalOptions(index: CXIndex, flags: CXGlobalOptFlags); - #[cfg(feature="gte_clang_6_0")] - pub fn clang_CXIndex_setInvocationEmissionPathOption(index: CXIndex, path: *const c_char); - #[cfg(feature="gte_clang_3_9")] - pub fn clang_CXXConstructor_isConvertingConstructor(cursor: CXCursor) -> c_uint; - #[cfg(feature="gte_clang_3_9")] - pub fn clang_CXXConstructor_isCopyConstructor(cursor: CXCursor) -> c_uint; - #[cfg(feature="gte_clang_3_9")] - pub fn clang_CXXConstructor_isDefaultConstructor(cursor: CXCursor) -> c_uint; - #[cfg(feature="gte_clang_3_9")] - pub fn clang_CXXConstructor_isMoveConstructor(cursor: CXCursor) -> c_uint; - #[cfg(feature="gte_clang_3_8")] - pub fn clang_CXXField_isMutable(cursor: CXCursor) -> c_uint; - pub fn clang_CXXMethod_isConst(cursor: CXCursor) -> c_uint; - #[cfg(feature="gte_clang_3_9")] - pub fn clang_CXXMethod_isDefaulted(cursor: CXCursor) -> c_uint; - pub fn clang_CXXMethod_isPureVirtual(cursor: CXCursor) -> c_uint; - pub fn clang_CXXMethod_isStatic(cursor: CXCursor) -> c_uint; - pub fn clang_CXXMethod_isVirtual(cursor: CXCursor) -> c_uint; - #[cfg(feature="gte_clang_6_0")] - pub fn clang_CXXRecord_isAbstract(cursor: CXCursor) -> c_uint; - pub fn clang_CompilationDatabase_dispose(database: CXCompilationDatabase); - pub fn clang_CompilationDatabase_fromDirectory(directory: *const c_char, error: *mut CXCompilationDatabase_Error) -> CXCompilationDatabase; - pub fn clang_CompilationDatabase_getAllCompileCommands(database: CXCompilationDatabase) -> CXCompileCommands; - pub fn clang_CompilationDatabase_getCompileCommands(database: CXCompilationDatabase, filename: *const c_char) -> CXCompileCommands; - pub fn clang_CompileCommand_getArg(command: CXCompileCommand, index: c_uint) -> CXString; - pub fn clang_CompileCommand_getDirectory(command: CXCompileCommand) -> CXString; - #[cfg(feature="gte_clang_3_8")] - pub fn clang_CompileCommand_getFilename(command: CXCompileCommand) -> CXString; - #[cfg(feature="gte_clang_3_8")] - pub fn clang_CompileCommand_getMappedSourceContent(command: CXCompileCommand, index: c_uint) -> CXString; - #[cfg(feature="gte_clang_3_8")] - pub fn clang_CompileCommand_getMappedSourcePath(command: CXCompileCommand, index: c_uint) -> CXString; - pub fn clang_CompileCommand_getNumArgs(command: CXCompileCommand) -> c_uint; - pub fn clang_CompileCommands_dispose(command: CXCompileCommands); - pub fn clang_CompileCommands_getCommand(command: CXCompileCommands, index: c_uint) -> CXCompileCommand; - pub fn clang_CompileCommands_getSize(command: CXCompileCommands) -> c_uint; - #[cfg(feature="gte_clang_3_9")] - pub fn clang_Cursor_Evaluate(cursor: CXCursor) -> CXEvalResult; - pub fn clang_Cursor_getArgument(cursor: CXCursor, index: c_uint) -> CXCursor; - pub fn clang_Cursor_getBriefCommentText(cursor: CXCursor) -> CXString; - #[cfg(feature="gte_clang_3_8")] - pub fn clang_Cursor_getCXXManglings(cursor: CXCursor) -> *mut CXStringSet; - pub fn clang_Cursor_getCommentRange(cursor: CXCursor) -> CXSourceRange; - #[cfg(feature="gte_clang_3_6")] - pub fn clang_Cursor_getMangling(cursor: CXCursor) -> CXString; - pub fn clang_Cursor_getModule(cursor: CXCursor) -> CXModule; - pub fn clang_Cursor_getNumArguments(cursor: CXCursor) -> c_int; - #[cfg(feature="gte_clang_3_6")] - pub fn clang_Cursor_getNumTemplateArguments(cursor: CXCursor) -> c_int; - pub fn clang_Cursor_getObjCDeclQualifiers(cursor: CXCursor) -> CXObjCDeclQualifierKind; - #[cfg(feature="gte_clang_6_0")] - pub fn clang_Cursor_getObjCManglings(cursor: CXCursor) -> *mut CXStringSet; - pub fn clang_Cursor_getObjCPropertyAttributes(cursor: CXCursor, reserved: c_uint) -> CXObjCPropertyAttrKind; - #[cfg(feature="gte_clang_8_0")] - pub fn clang_Cursor_getObjCPropertyGetterName(cursor: CXCursor) -> CXString; - #[cfg(feature="gte_clang_8_0")] - pub fn clang_Cursor_getObjCPropertySetterName(cursor: CXCursor) -> CXString; - pub fn clang_Cursor_getObjCSelectorIndex(cursor: CXCursor) -> c_int; - #[cfg(feature="gte_clang_3_7")] - pub fn clang_Cursor_getOffsetOfField(cursor: CXCursor) -> c_longlong; - pub fn clang_Cursor_getRawCommentText(cursor: CXCursor) -> CXString; - pub fn clang_Cursor_getReceiverType(cursor: CXCursor) -> CXType; - pub fn clang_Cursor_getSpellingNameRange(cursor: CXCursor, index: c_uint, reserved: c_uint) -> CXSourceRange; - #[cfg(feature="gte_clang_3_6")] - pub fn clang_Cursor_getStorageClass(cursor: CXCursor) -> CX_StorageClass; - #[cfg(feature="gte_clang_3_6")] - pub fn clang_Cursor_getTemplateArgumentKind(cursor: CXCursor, index: c_uint) -> CXTemplateArgumentKind; - #[cfg(feature="gte_clang_3_6")] - pub fn clang_Cursor_getTemplateArgumentType(cursor: CXCursor, index: c_uint) -> CXType; - #[cfg(feature="gte_clang_3_6")] - pub fn clang_Cursor_getTemplateArgumentUnsignedValue(cursor: CXCursor, index: c_uint) -> c_ulonglong; - #[cfg(feature="gte_clang_3_6")] - pub fn clang_Cursor_getTemplateArgumentValue(cursor: CXCursor, index: c_uint) -> c_longlong; - pub fn clang_Cursor_getTranslationUnit(cursor: CXCursor) -> CXTranslationUnit; - #[cfg(feature="gte_clang_3_9")] - pub fn clang_Cursor_hasAttrs(cursor: CXCursor) -> c_uint; - #[cfg(feature="gte_clang_3_7")] - pub fn clang_Cursor_isAnonymous(cursor: CXCursor) -> c_uint; - pub fn clang_Cursor_isBitField(cursor: CXCursor) -> c_uint; - pub fn clang_Cursor_isDynamicCall(cursor: CXCursor) -> c_int; - #[cfg(feature="gte_clang_5_0")] - pub fn clang_Cursor_isExternalSymbol(cursor: CXCursor, language: *mut CXString, from: *mut CXString, generated: *mut c_uint) -> c_uint; - #[cfg(feature="gte_clang_3_9")] - pub fn clang_Cursor_isFunctionInlined(cursor: CXCursor) -> c_uint; - #[cfg(feature="gte_clang_3_9")] - pub fn clang_Cursor_isMacroBuiltin(cursor: CXCursor) -> c_uint; - #[cfg(feature="gte_clang_3_9")] - pub fn clang_Cursor_isMacroFunctionLike(cursor: CXCursor) -> c_uint; - pub fn clang_Cursor_isNull(cursor: CXCursor) -> c_int; - pub fn clang_Cursor_isObjCOptional(cursor: CXCursor) -> c_uint; - pub fn clang_Cursor_isVariadic(cursor: CXCursor) -> c_uint; - #[cfg(feature="gte_clang_5_0")] - pub fn clang_EnumDecl_isScoped(cursor: CXCursor) -> c_uint; - #[cfg(feature="gte_clang_3_9")] - pub fn clang_EvalResult_dispose(result: CXEvalResult); - #[cfg(feature="gte_clang_3_9")] - pub fn clang_EvalResult_getAsDouble(result: CXEvalResult) -> libc::c_double; - #[cfg(feature="gte_clang_3_9")] - pub fn clang_EvalResult_getAsInt(result: CXEvalResult) -> c_int; - #[cfg(feature="gte_clang_4_0")] - pub fn clang_EvalResult_getAsLongLong(result: CXEvalResult) -> c_longlong; - #[cfg(feature="gte_clang_3_9")] - pub fn clang_EvalResult_getAsStr(result: CXEvalResult) -> *const c_char; - #[cfg(feature="gte_clang_4_0")] - pub fn clang_EvalResult_getAsUnsigned(result: CXEvalResult) -> c_ulonglong; - #[cfg(feature="gte_clang_3_9")] - pub fn clang_EvalResult_getKind(result: CXEvalResult) -> CXEvalResultKind; - #[cfg(feature="gte_clang_4_0")] - pub fn clang_EvalResult_isUnsignedInt(result: CXEvalResult) -> c_uint; - #[cfg(feature="gte_clang_3_6")] - pub fn clang_File_isEqual(left: CXFile, right: CXFile) -> c_int; - #[cfg(feature="gte_clang_7_0")] - pub fn clang_File_tryGetRealPathName(file: CXFile) -> CXString; - pub fn clang_IndexAction_create(index: CXIndex) -> CXIndexAction; - pub fn clang_IndexAction_dispose(index: CXIndexAction); - pub fn clang_Location_isFromMainFile(location: CXSourceLocation) -> c_int; - pub fn clang_Location_isInSystemHeader(location: CXSourceLocation) -> c_int; - pub fn clang_Module_getASTFile(module: CXModule) -> CXFile; - pub fn clang_Module_getFullName(module: CXModule) -> CXString; - pub fn clang_Module_getName(module: CXModule) -> CXString; - pub fn clang_Module_getNumTopLevelHeaders(tu: CXTranslationUnit, module: CXModule) -> c_uint; - pub fn clang_Module_getParent(module: CXModule) -> CXModule; - pub fn clang_Module_getTopLevelHeader(tu: CXTranslationUnit, module: CXModule, index: c_uint) -> CXFile; - pub fn clang_Module_isSystem(module: CXModule) -> c_int; - #[cfg(feature="gte_clang_7_0")] - pub fn clang_PrintingPolicy_dispose(policy: CXPrintingPolicy); - #[cfg(feature="gte_clang_7_0")] - pub fn clang_PrintingPolicy_getProperty(policy: CXPrintingPolicy, property: CXPrintingPolicyProperty) -> c_uint; - #[cfg(feature="gte_clang_7_0")] - pub fn clang_PrintingPolicy_setProperty(policy: CXPrintingPolicy, property: CXPrintingPolicyProperty, value: c_uint); - pub fn clang_Range_isNull(range: CXSourceRange) -> c_int; - #[cfg(feature="gte_clang_5_0")] - pub fn clang_TargetInfo_dispose(info: CXTargetInfo); - #[cfg(feature="gte_clang_5_0")] - pub fn clang_TargetInfo_getPointerWidth(info: CXTargetInfo) -> c_int; - #[cfg(feature="gte_clang_5_0")] - pub fn clang_TargetInfo_getTriple(info: CXTargetInfo) -> CXString; - pub fn clang_Type_getAlignOf(type_: CXType) -> c_longlong; - pub fn clang_Type_getCXXRefQualifier(type_: CXType) -> CXRefQualifierKind; - pub fn clang_Type_getClassType(type_: CXType) -> CXType; - #[cfg(feature="gte_clang_3_9")] - pub fn clang_Type_getNamedType(type_: CXType) -> CXType; - pub fn clang_Type_getNumTemplateArguments(type_: CXType) -> c_int; - #[cfg(feature="gte_clang_8_0")] - pub fn clang_Type_getObjCObjectBaseType(type_: CXType) -> CXType; - #[cfg(feature="gte_clang_8_0")] - pub fn clang_Type_getNumObjCProtocolRefs(type_: CXType) -> c_uint; - #[cfg(feature="gte_clang_8_0")] - pub fn clang_Type_getObjCProtocolDecl(type_: CXType, index: c_uint) -> CXCursor; - #[cfg(feature="gte_clang_8_0")] - pub fn clang_Type_getNumObjCTypeArgs(type_: CXType) -> c_uint; - #[cfg(feature="gte_clang_8_0")] - pub fn clang_Type_getObjCTypeArg(type_: CXType, index: c_uint) -> CXType; - #[cfg(feature="gte_clang_3_9")] - pub fn clang_Type_getObjCEncoding(type_: CXType) -> CXString; - pub fn clang_Type_getOffsetOf(type_: CXType, field: *const c_char) -> c_longlong; - #[cfg(feature="gte_clang_8_0")] - pub fn clang_Type_getModifiedType(type_: CXType) -> CXType; - pub fn clang_Type_getSizeOf(type_: CXType) -> c_longlong; - pub fn clang_Type_getTemplateArgumentAsType(type_: CXType, index: c_uint) -> CXType; - #[cfg(feature="gte_clang_5_0")] - pub fn clang_Type_isTransparentTagTypedef(type_: CXType) -> c_uint; - #[cfg(feature="gte_clang_8_0")] - pub fn clang_Type_getNullability(type_: CXType) -> CXTypeNullabilityKind; - #[cfg(feature="gte_clang_3_7")] - pub fn clang_Type_visitFields(type_: CXType, visitor: CXFieldVisitor, data: CXClientData) -> CXVisitorResult; - pub fn clang_annotateTokens(tu: CXTranslationUnit, tokens: *mut CXToken, n_tokens: c_uint, cursors: *mut CXCursor); - pub fn clang_codeCompleteAt(tu: CXTranslationUnit, file: *const c_char, line: c_uint, column: c_uint, unsaved: *mut CXUnsavedFile, n_unsaved: c_uint, flags: CXCodeComplete_Flags) -> *mut CXCodeCompleteResults; - pub fn clang_codeCompleteGetContainerKind(results: *mut CXCodeCompleteResults, incomplete: *mut c_uint) -> CXCursorKind; - pub fn clang_codeCompleteGetContainerUSR(results: *mut CXCodeCompleteResults) -> CXString; - pub fn clang_codeCompleteGetContexts(results: *mut CXCodeCompleteResults) -> c_ulonglong; - pub fn clang_codeCompleteGetDiagnostic(results: *mut CXCodeCompleteResults, index: c_uint) -> CXDiagnostic; - pub fn clang_codeCompleteGetNumDiagnostics(results: *mut CXCodeCompleteResults) -> c_uint; - pub fn clang_codeCompleteGetObjCSelector(results: *mut CXCodeCompleteResults) -> CXString; - pub fn clang_constructUSR_ObjCCategory(class: *const c_char, category: *const c_char) -> CXString; - pub fn clang_constructUSR_ObjCClass(class: *const c_char) -> CXString; - pub fn clang_constructUSR_ObjCIvar(name: *const c_char, usr: CXString) -> CXString; - pub fn clang_constructUSR_ObjCMethod(name: *const c_char, instance: c_uint, usr: CXString) -> CXString; - pub fn clang_constructUSR_ObjCProperty(property: *const c_char, usr: CXString) -> CXString; - pub fn clang_constructUSR_ObjCProtocol(protocol: *const c_char) -> CXString; - pub fn clang_createCXCursorSet() -> CXCursorSet; - pub fn clang_createIndex(exclude: c_int, display: c_int) -> CXIndex; - pub fn clang_createTranslationUnit(index: CXIndex, file: *const c_char) -> CXTranslationUnit; - pub fn clang_createTranslationUnit2(index: CXIndex, file: *const c_char, tu: *mut CXTranslationUnit) -> CXErrorCode; - pub fn clang_createTranslationUnitFromSourceFile(index: CXIndex, file: *const c_char, n_arguments: c_int, arguments: *const *const c_char, n_unsaved: c_uint, unsaved: *mut CXUnsavedFile) -> CXTranslationUnit; - pub fn clang_defaultCodeCompleteOptions() -> CXCodeComplete_Flags; - pub fn clang_defaultDiagnosticDisplayOptions() -> CXDiagnosticDisplayOptions; - pub fn clang_defaultEditingTranslationUnitOptions() -> CXTranslationUnit_Flags; - pub fn clang_defaultReparseOptions(tu: CXTranslationUnit) -> CXReparse_Flags; - pub fn clang_defaultSaveOptions(tu: CXTranslationUnit) -> CXSaveTranslationUnit_Flags; - pub fn clang_disposeCXCursorSet(set: CXCursorSet); - pub fn clang_disposeCXPlatformAvailability(availability: *mut CXPlatformAvailability); - pub fn clang_disposeCXTUResourceUsage(usage: CXTUResourceUsage); - pub fn clang_disposeCodeCompleteResults(results: *mut CXCodeCompleteResults); - pub fn clang_disposeDiagnostic(diagnostic: CXDiagnostic); - pub fn clang_disposeDiagnosticSet(diagnostic: CXDiagnosticSet); - pub fn clang_disposeIndex(index: CXIndex); - pub fn clang_disposeOverriddenCursors(cursors: *mut CXCursor); - pub fn clang_disposeSourceRangeList(list: *mut CXSourceRangeList); - pub fn clang_disposeString(string: CXString); - #[cfg(feature="gte_clang_3_8")] - pub fn clang_disposeStringSet(set: *mut CXStringSet); - pub fn clang_disposeTokens(tu: CXTranslationUnit, tokens: *mut CXToken, n_tokens: c_uint); - pub fn clang_disposeTranslationUnit(tu: CXTranslationUnit); - pub fn clang_enableStackTraces(); - pub fn clang_equalCursors(left: CXCursor, right: CXCursor) -> c_uint; - pub fn clang_equalLocations(left: CXSourceLocation, right: CXSourceLocation) -> c_uint; - pub fn clang_equalRanges(left: CXSourceRange, right: CXSourceRange) -> c_uint; - pub fn clang_equalTypes(left: CXType, right: CXType) -> c_uint; - pub fn clang_executeOnThread(function: extern fn(*mut c_void), data: *mut c_void, stack: c_uint); - pub fn clang_findIncludesInFile(tu: CXTranslationUnit, file: CXFile, cursor: CXCursorAndRangeVisitor) -> CXResult; - pub fn clang_findReferencesInFile(cursor: CXCursor, file: CXFile, visitor: CXCursorAndRangeVisitor) -> CXResult; - pub fn clang_formatDiagnostic(diagnostic: CXDiagnostic, flags: CXDiagnosticDisplayOptions) -> CXString; - #[cfg(feature="gte_clang_3_7")] - pub fn clang_free(buffer: *mut c_void); - #[cfg(feature="gte_clang_5_0")] - pub fn clang_getAddressSpace(type_: CXType) -> c_uint; - #[cfg(feature="gte_clang_4_0")] - pub fn clang_getAllSkippedRanges(tu: CXTranslationUnit) -> *mut CXSourceRangeList; - pub fn clang_getArgType(type_: CXType, index: c_uint) -> CXType; - pub fn clang_getArrayElementType(type_: CXType) -> CXType; - pub fn clang_getArraySize(type_: CXType) -> c_longlong; - pub fn clang_getCString(string: CXString) -> *const c_char; - pub fn clang_getCXTUResourceUsage(tu: CXTranslationUnit) -> CXTUResourceUsage; - pub fn clang_getCXXAccessSpecifier(cursor: CXCursor) -> CX_CXXAccessSpecifier; - pub fn clang_getCanonicalCursor(cursor: CXCursor) -> CXCursor; - pub fn clang_getCanonicalType(type_: CXType) -> CXType; - pub fn clang_getChildDiagnostics(diagnostic: CXDiagnostic) -> CXDiagnosticSet; - pub fn clang_getClangVersion() -> CXString; - pub fn clang_getCompletionAnnotation(string: CXCompletionString, index: c_uint) -> CXString; - pub fn clang_getCompletionAvailability(string: CXCompletionString) -> CXAvailabilityKind; - pub fn clang_getCompletionBriefComment(string: CXCompletionString) -> CXString; - pub fn clang_getCompletionChunkCompletionString(string: CXCompletionString, index: c_uint) -> CXCompletionString; - pub fn clang_getCompletionChunkKind(string: CXCompletionString, index: c_uint) -> CXCompletionChunkKind; - pub fn clang_getCompletionChunkText(string: CXCompletionString, index: c_uint) -> CXString; - #[cfg(feature="gte_clang_7_0")] - pub fn clang_getCompletionFixIt(results: *mut CXCodeCompleteResults, completion_index: c_uint, fixit_index: c_uint, range: *mut CXSourceRange) -> CXString; - pub fn clang_getCompletionNumAnnotations(string: CXCompletionString) -> c_uint; - #[cfg(feature="gte_clang_7_0")] - pub fn clang_getCompletionNumFixIts(results: *mut CXCodeCompleteResults, completion_index: c_uint) -> c_uint; - pub fn clang_getCompletionParent(string: CXCompletionString, kind: *mut CXCursorKind) -> CXString; - pub fn clang_getCompletionPriority(string: CXCompletionString) -> c_uint; - pub fn clang_getCursor(tu: CXTranslationUnit, location: CXSourceLocation) -> CXCursor; - pub fn clang_getCursorAvailability(cursor: CXCursor) -> CXAvailabilityKind; - pub fn clang_getCursorCompletionString(cursor: CXCursor) -> CXCompletionString; - pub fn clang_getCursorDefinition(cursor: CXCursor) -> CXCursor; - pub fn clang_getCursorDisplayName(cursor: CXCursor) -> CXString; - #[cfg(feature="gte_clang_5_0")] - pub fn clang_getCursorExceptionSpecificationType(cursor: CXCursor) -> CXCursor_ExceptionSpecificationKind; - pub fn clang_getCursorExtent(cursor: CXCursor) -> CXSourceRange; - pub fn clang_getCursorKind(cursor: CXCursor) -> CXCursorKind; - pub fn clang_getCursorKindSpelling(kind: CXCursorKind) -> CXString; - pub fn clang_getCursorLanguage(cursor: CXCursor) -> CXLanguageKind; - pub fn clang_getCursorLexicalParent(cursor: CXCursor) -> CXCursor; - pub fn clang_getCursorLinkage(cursor: CXCursor) -> CXLinkageKind; - pub fn clang_getCursorLocation(cursor: CXCursor) -> CXSourceLocation; - pub fn clang_getCursorPlatformAvailability(cursor: CXCursor, deprecated: *mut c_int, deprecated_message: *mut CXString, unavailable: *mut c_int, unavailable_message: *mut CXString, availability: *mut CXPlatformAvailability, n_availability: c_int) -> c_int; - #[cfg(feature="gte_clang_7_0")] - pub fn clang_getCursorPrettyPrinted(cursor: CXCursor, policy: CXPrintingPolicy) -> CXString; - #[cfg(feature="gte_clang_7_0")] - pub fn clang_getCursorPrintingPolicy(cursor: CXCursor) -> CXPrintingPolicy; - pub fn clang_getCursorReferenceNameRange(cursor: CXCursor, flags: CXNameRefFlags, index: c_uint) -> CXSourceRange; - pub fn clang_getCursorReferenced(cursor: CXCursor) -> CXCursor; - pub fn clang_getCursorResultType(cursor: CXCursor) -> CXType; - pub fn clang_getCursorSemanticParent(cursor: CXCursor) -> CXCursor; - pub fn clang_getCursorSpelling(cursor: CXCursor) -> CXString; - #[cfg(feature="gte_clang_6_0")] - pub fn clang_getCursorTLSKind(cursor: CXCursor) -> CXTLSKind; - pub fn clang_getCursorType(cursor: CXCursor) -> CXType; - pub fn clang_getCursorUSR(cursor: CXCursor) -> CXString; - #[cfg(feature="gte_clang_3_8")] - pub fn clang_getCursorVisibility(cursor: CXCursor) -> CXVisibilityKind; - pub fn clang_getDeclObjCTypeEncoding(cursor: CXCursor) -> CXString; - pub fn clang_getDefinitionSpellingAndExtent(cursor: CXCursor, start: *mut *const c_char, end: *mut *const c_char, start_line: *mut c_uint, start_column: *mut c_uint, end_line: *mut c_uint, end_column: *mut c_uint); - pub fn clang_getDiagnostic(tu: CXTranslationUnit, index: c_uint) -> CXDiagnostic; - pub fn clang_getDiagnosticCategory(diagnostic: CXDiagnostic) -> c_uint; - pub fn clang_getDiagnosticCategoryName(category: c_uint) -> CXString; - pub fn clang_getDiagnosticCategoryText(diagnostic: CXDiagnostic) -> CXString; - pub fn clang_getDiagnosticFixIt(diagnostic: CXDiagnostic, index: c_uint, range: *mut CXSourceRange) -> CXString; - pub fn clang_getDiagnosticInSet(diagnostic: CXDiagnosticSet, index: c_uint) -> CXDiagnostic; - pub fn clang_getDiagnosticLocation(diagnostic: CXDiagnostic) -> CXSourceLocation; - pub fn clang_getDiagnosticNumFixIts(diagnostic: CXDiagnostic) -> c_uint; - pub fn clang_getDiagnosticNumRanges(diagnostic: CXDiagnostic) -> c_uint; - pub fn clang_getDiagnosticOption(diagnostic: CXDiagnostic, option: *mut CXString) -> CXString; - pub fn clang_getDiagnosticRange(diagnostic: CXDiagnostic, index: c_uint) -> CXSourceRange; - pub fn clang_getDiagnosticSetFromTU(tu: CXTranslationUnit) -> CXDiagnosticSet; - pub fn clang_getDiagnosticSeverity(diagnostic: CXDiagnostic) -> CXDiagnosticSeverity; - pub fn clang_getDiagnosticSpelling(diagnostic: CXDiagnostic) -> CXString; - pub fn clang_getElementType(type_: CXType) -> CXType; - pub fn clang_getEnumConstantDeclUnsignedValue(cursor: CXCursor) -> c_ulonglong; - pub fn clang_getEnumConstantDeclValue(cursor: CXCursor) -> c_longlong; - pub fn clang_getEnumDeclIntegerType(cursor: CXCursor) -> CXType; - #[cfg(feature="gte_clang_5_0")] - pub fn clang_getExceptionSpecificationType(type_: CXType) -> CXCursor_ExceptionSpecificationKind; - pub fn clang_getExpansionLocation(location: CXSourceLocation, file: *mut CXFile, line: *mut c_uint, column: *mut c_uint, offset: *mut c_uint); - pub fn clang_getFieldDeclBitWidth(cursor: CXCursor) -> c_int; - pub fn clang_getFile(tu: CXTranslationUnit, file: *const c_char) -> CXFile; - #[cfg(feature="gte_clang_6_0")] - pub fn clang_getFileContents(tu: CXTranslationUnit, file: CXFile, size: *mut size_t) -> *const c_char; - pub fn clang_getFileLocation(location: CXSourceLocation, file: *mut CXFile, line: *mut c_uint, column: *mut c_uint, offset: *mut c_uint); - pub fn clang_getFileName(file: CXFile) -> CXString; - pub fn clang_getFileTime(file: CXFile) -> time_t; - pub fn clang_getFileUniqueID(file: CXFile, id: *mut CXFileUniqueID) -> c_int; - pub fn clang_getFunctionTypeCallingConv(type_: CXType) -> CXCallingConv; - pub fn clang_getIBOutletCollectionType(cursor: CXCursor) -> CXType; - pub fn clang_getIncludedFile(cursor: CXCursor) -> CXFile; - pub fn clang_getInclusions(tu: CXTranslationUnit, visitor: CXInclusionVisitor, data: CXClientData); - pub fn clang_getInstantiationLocation(location: CXSourceLocation, file: *mut CXFile, line: *mut c_uint, column: *mut c_uint, offset: *mut c_uint); - pub fn clang_getLocation(tu: CXTranslationUnit, file: CXFile, line: c_uint, column: c_uint) -> CXSourceLocation; - pub fn clang_getLocationForOffset(tu: CXTranslationUnit, file: CXFile, offset: c_uint) -> CXSourceLocation; - pub fn clang_getModuleForFile(tu: CXTranslationUnit, file: CXFile) -> CXModule; - pub fn clang_getNullCursor() -> CXCursor; - pub fn clang_getNullLocation() -> CXSourceLocation; - pub fn clang_getNullRange() -> CXSourceRange; - pub fn clang_getNumArgTypes(type_: CXType) -> c_int; - pub fn clang_getNumCompletionChunks(string: CXCompletionString) -> c_uint; - pub fn clang_getNumDiagnostics(tu: CXTranslationUnit) -> c_uint; - pub fn clang_getNumDiagnosticsInSet(diagnostic: CXDiagnosticSet) -> c_uint; - pub fn clang_getNumElements(type_: CXType) -> c_longlong; - pub fn clang_getNumOverloadedDecls(cursor: CXCursor) -> c_uint; - pub fn clang_getOverloadedDecl(cursor: CXCursor, index: c_uint) -> CXCursor; - pub fn clang_getOverriddenCursors(cursor: CXCursor, cursors: *mut *mut CXCursor, n_cursors: *mut c_uint); - pub fn clang_getPointeeType(type_: CXType) -> CXType; - pub fn clang_getPresumedLocation(location: CXSourceLocation, file: *mut CXString, line: *mut c_uint, column: *mut c_uint); - pub fn clang_getRange(start: CXSourceLocation, end: CXSourceLocation) -> CXSourceRange; - pub fn clang_getRangeEnd(range: CXSourceRange) -> CXSourceLocation; - pub fn clang_getRangeStart(range: CXSourceRange) -> CXSourceLocation; - pub fn clang_getRemappings(file: *const c_char) -> CXRemapping; - pub fn clang_getRemappingsFromFileList(files: *mut *const c_char, n_files: c_uint) -> CXRemapping; - pub fn clang_getResultType(type_: CXType) -> CXType; - pub fn clang_getSkippedRanges(tu: CXTranslationUnit, file: CXFile) -> *mut CXSourceRangeList; - pub fn clang_getSpecializedCursorTemplate(cursor: CXCursor) -> CXCursor; - pub fn clang_getSpellingLocation(location: CXSourceLocation, file: *mut CXFile, line: *mut c_uint, column: *mut c_uint, offset: *mut c_uint); - pub fn clang_getTUResourceUsageName(kind: CXTUResourceUsageKind) -> *const c_char; - #[cfg(feature="gte_clang_5_0")] - pub fn clang_getTranslationUnitTargetInfo(tu: CXTranslationUnit) -> CXTargetInfo; - pub fn clang_getTemplateCursorKind(cursor: CXCursor) -> CXCursorKind; - pub fn clang_getTokenExtent(tu: CXTranslationUnit, token: CXToken) -> CXSourceRange; - pub fn clang_getTokenKind(token: CXToken) -> CXTokenKind; - pub fn clang_getTokenLocation(tu: CXTranslationUnit, token: CXToken) -> CXSourceLocation; - pub fn clang_getTokenSpelling(tu: CXTranslationUnit, token: CXToken) -> CXString; - pub fn clang_getTranslationUnitCursor(tu: CXTranslationUnit) -> CXCursor; - pub fn clang_getTranslationUnitSpelling(tu: CXTranslationUnit) -> CXString; - pub fn clang_getTypeDeclaration(type_: CXType) -> CXCursor; - pub fn clang_getTypeKindSpelling(type_: CXTypeKind) -> CXString; - pub fn clang_getTypeSpelling(type_: CXType) -> CXString; - pub fn clang_getTypedefDeclUnderlyingType(cursor: CXCursor) -> CXType; - #[cfg(feature="gte_clang_5_0")] - pub fn clang_getTypedefName(type_: CXType) -> CXString; - pub fn clang_hashCursor(cursor: CXCursor) -> c_uint; - pub fn clang_indexLoc_getCXSourceLocation(location: CXIdxLoc) -> CXSourceLocation; - pub fn clang_indexLoc_getFileLocation(location: CXIdxLoc, index_file: *mut CXIdxClientFile, file: *mut CXFile, line: *mut c_uint, column: *mut c_uint, offset: *mut c_uint); - pub fn clang_indexSourceFile(index: CXIndexAction, data: CXClientData, callbacks: *mut IndexerCallbacks, n_callbacks: c_uint, index_flags: CXIndexOptFlags, file: *const c_char, arguments: *const *const c_char, n_arguments: c_int, unsaved: *mut CXUnsavedFile, n_unsaved: c_uint, tu: *mut CXTranslationUnit, tu_flags: CXTranslationUnit_Flags) -> CXErrorCode; - #[cfg(feature="gte_clang_3_8")] - pub fn clang_indexSourceFileFullArgv(index: CXIndexAction, data: CXClientData, callbacks: *mut IndexerCallbacks, n_callbacks: c_uint, index_flags: CXIndexOptFlags, file: *const c_char, arguments: *const *const c_char, n_arguments: c_int, unsaved: *mut CXUnsavedFile, n_unsaved: c_uint, tu: *mut CXTranslationUnit, tu_flags: CXTranslationUnit_Flags) -> CXErrorCode; - pub fn clang_indexTranslationUnit(index: CXIndexAction, data: CXClientData, callbacks: *mut IndexerCallbacks, n_callbacks: c_uint, flags: CXIndexOptFlags, tu: CXTranslationUnit) -> c_int; - pub fn clang_index_getCXXClassDeclInfo(info: *const CXIdxDeclInfo) -> *const CXIdxCXXClassDeclInfo; - pub fn clang_index_getClientContainer(info: *const CXIdxContainerInfo) -> CXIdxClientContainer; - pub fn clang_index_getClientEntity(info: *const CXIdxEntityInfo) -> CXIdxClientEntity; - pub fn clang_index_getIBOutletCollectionAttrInfo(info: *const CXIdxAttrInfo) -> *const CXIdxIBOutletCollectionAttrInfo; - pub fn clang_index_getObjCCategoryDeclInfo(info: *const CXIdxDeclInfo) -> *const CXIdxObjCCategoryDeclInfo; - pub fn clang_index_getObjCContainerDeclInfo(info: *const CXIdxDeclInfo) -> *const CXIdxObjCContainerDeclInfo; - pub fn clang_index_getObjCInterfaceDeclInfo(info: *const CXIdxDeclInfo) -> *const CXIdxObjCInterfaceDeclInfo; - pub fn clang_index_getObjCPropertyDeclInfo(info: *const CXIdxDeclInfo) -> *const CXIdxObjCPropertyDeclInfo; - pub fn clang_index_getObjCProtocolRefListInfo(info: *const CXIdxDeclInfo) -> *const CXIdxObjCProtocolRefListInfo; - pub fn clang_index_isEntityObjCContainerKind(info: CXIdxEntityKind) -> c_int; - pub fn clang_index_setClientContainer(info: *const CXIdxContainerInfo, container: CXIdxClientContainer); - pub fn clang_index_setClientEntity(info: *const CXIdxEntityInfo, entity: CXIdxClientEntity); - pub fn clang_isAttribute(kind: CXCursorKind) -> c_uint; - pub fn clang_isConstQualifiedType(type_: CXType) -> c_uint; - pub fn clang_isCursorDefinition(cursor: CXCursor) -> c_uint; - pub fn clang_isDeclaration(kind: CXCursorKind) -> c_uint; - pub fn clang_isExpression(kind: CXCursorKind) -> c_uint; - pub fn clang_isFileMultipleIncludeGuarded(tu: CXTranslationUnit, file: CXFile) -> c_uint; - pub fn clang_isFunctionTypeVariadic(type_: CXType) -> c_uint; - pub fn clang_isInvalid(kind: CXCursorKind) -> c_uint; - #[cfg(feature="gte_clang_7_0")] - pub fn clang_isInvalidDeclaration(cursor: CXCursor) -> c_uint; - pub fn clang_isPODType(type_: CXType) -> c_uint; - pub fn clang_isPreprocessing(kind: CXCursorKind) -> c_uint; - pub fn clang_isReference(kind: CXCursorKind) -> c_uint; - pub fn clang_isRestrictQualifiedType(type_: CXType) -> c_uint; - pub fn clang_isStatement(kind: CXCursorKind) -> c_uint; - pub fn clang_isTranslationUnit(kind: CXCursorKind) -> c_uint; - pub fn clang_isUnexposed(kind: CXCursorKind) -> c_uint; - pub fn clang_isVirtualBase(cursor: CXCursor) -> c_uint; - pub fn clang_isVolatileQualifiedType(type_: CXType) -> c_uint; - pub fn clang_loadDiagnostics(file: *const c_char, error: *mut CXLoadDiag_Error, message: *mut CXString) -> CXDiagnosticSet; - pub fn clang_parseTranslationUnit(index: CXIndex, file: *const c_char, arguments: *const *const c_char, n_arguments: c_int, unsaved: *mut CXUnsavedFile, n_unsaved: c_uint, flags: CXTranslationUnit_Flags) -> CXTranslationUnit; - pub fn clang_parseTranslationUnit2(index: CXIndex, file: *const c_char, arguments: *const *const c_char, n_arguments: c_int, unsaved: *mut CXUnsavedFile, n_unsaved: c_uint, flags: CXTranslationUnit_Flags, tu: *mut CXTranslationUnit) -> CXErrorCode; - #[cfg(feature="gte_clang_3_8")] - pub fn clang_parseTranslationUnit2FullArgv(index: CXIndex, file: *const c_char, arguments: *const *const c_char, n_arguments: c_int, unsaved: *mut CXUnsavedFile, n_unsaved: c_uint, flags: CXTranslationUnit_Flags, tu: *mut CXTranslationUnit) -> CXErrorCode; - pub fn clang_remap_dispose(remapping: CXRemapping); - pub fn clang_remap_getFilenames(remapping: CXRemapping, index: c_uint, original: *mut CXString, transformed: *mut CXString); - pub fn clang_remap_getNumFiles(remapping: CXRemapping) -> c_uint; - pub fn clang_reparseTranslationUnit(tu: CXTranslationUnit, n_unsaved: c_uint, unsaved: *mut CXUnsavedFile, flags: CXReparse_Flags) -> CXErrorCode; - pub fn clang_saveTranslationUnit(tu: CXTranslationUnit, file: *const c_char, options: CXSaveTranslationUnit_Flags) -> CXSaveError; - pub fn clang_sortCodeCompletionResults(results: *mut CXCompletionResult, n_results: c_uint); - #[cfg(feature="gte_clang_5_0")] - pub fn clang_suspendTranslationUnit(tu: CXTranslationUnit) -> c_uint; - pub fn clang_toggleCrashRecovery(recovery: c_uint); - pub fn clang_tokenize(tu: CXTranslationUnit, range: CXSourceRange, tokens: *mut *mut CXToken, n_tokens: *mut c_uint); - pub fn clang_visitChildren(cursor: CXCursor, visitor: CXCursorVisitor, data: CXClientData) -> c_uint; - - // Documentation - pub fn clang_BlockCommandComment_getArgText(comment: CXComment, index: c_uint) -> CXString; - pub fn clang_BlockCommandComment_getCommandName(comment: CXComment) -> CXString; - pub fn clang_BlockCommandComment_getNumArgs(comment: CXComment) -> c_uint; - pub fn clang_BlockCommandComment_getParagraph(comment: CXComment) -> CXComment; - pub fn clang_Comment_getChild(comment: CXComment, index: c_uint) -> CXComment; - pub fn clang_Comment_getKind(comment: CXComment) -> CXCommentKind; - pub fn clang_Comment_getNumChildren(comment: CXComment) -> c_uint; - pub fn clang_Comment_isWhitespace(comment: CXComment) -> c_uint; - pub fn clang_Cursor_getParsedComment(C: CXCursor) -> CXComment; - pub fn clang_FullComment_getAsHTML(comment: CXComment) -> CXString; - pub fn clang_FullComment_getAsXML(comment: CXComment) -> CXString; - pub fn clang_HTMLStartTagComment_isSelfClosing(comment: CXComment) -> c_uint; - pub fn clang_HTMLStartTag_getAttrName(comment: CXComment, index: c_uint) -> CXString; - pub fn clang_HTMLStartTag_getAttrValue(comment: CXComment, index: c_uint) -> CXString; - pub fn clang_HTMLStartTag_getNumAttrs(comment: CXComment) -> c_uint; - pub fn clang_HTMLTagComment_getAsString(comment: CXComment) -> CXString; - pub fn clang_HTMLTagComment_getTagName(comment: CXComment) -> CXString; - pub fn clang_InlineCommandComment_getArgText(comment: CXComment, index: c_uint) -> CXString; - pub fn clang_InlineCommandComment_getCommandName(comment: CXComment) -> CXString; - pub fn clang_InlineCommandComment_getNumArgs(comment: CXComment) -> c_uint; - pub fn clang_InlineCommandComment_getRenderKind(comment: CXComment) -> CXCommentInlineCommandRenderKind; - pub fn clang_InlineContentComment_hasTrailingNewline(comment: CXComment) -> c_uint; - pub fn clang_ParamCommandComment_getDirection(comment: CXComment) -> CXCommentParamPassDirection; - pub fn clang_ParamCommandComment_getParamIndex(comment: CXComment) -> c_uint; - pub fn clang_ParamCommandComment_getParamName(comment: CXComment) -> CXString; - pub fn clang_ParamCommandComment_isDirectionExplicit(comment: CXComment) -> c_uint; - pub fn clang_ParamCommandComment_isParamIndexValid(comment: CXComment) -> c_uint; - pub fn clang_TParamCommandComment_getDepth(comment: CXComment) -> c_uint; - pub fn clang_TParamCommandComment_getIndex(comment: CXComment, depth: c_uint) -> c_uint; - pub fn clang_TParamCommandComment_getParamName(comment: CXComment) -> CXString; - pub fn clang_TParamCommandComment_isParamPositionValid(comment: CXComment) -> c_uint; - pub fn clang_TextComment_getText(comment: CXComment) -> CXString; - pub fn clang_VerbatimBlockLineComment_getText(comment: CXComment) -> CXString; - pub fn clang_VerbatimLineComment_getText(comment: CXComment) -> CXString; -} +// Copyright 2016 Kyle Mayes +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Rust bindings for `libclang`. +//! +//! ## Supported Versions +//! +//! * 3.5 - [Documentation](https://kylemayes.github.io/clang-sys/3_5/clang_sys) +//! * 3.6 - [Documentation](https://kylemayes.github.io/clang-sys/3_6/clang_sys) +//! * 3.7 - [Documentation](https://kylemayes.github.io/clang-sys/3_7/clang_sys) +//! * 3.8 - [Documentation](https://kylemayes.github.io/clang-sys/3_8/clang_sys) +//! * 3.9 - [Documentation](https://kylemayes.github.io/clang-sys/3_9/clang_sys) +//! * 4.0 - [Documentation](https://kylemayes.github.io/clang-sys/4_0/clang_sys) +//! * 5.0 - [Documentation](https://kylemayes.github.io/clang-sys/5_0/clang_sys) +//! * 6.0 - [Documentation](https://kylemayes.github.io/clang-sys/6_0/clang_sys) +//! * 7.0 - [Documentation](https://kylemayes.github.io/clang-sys/7_0/clang_sys) +//! * 8.0 - [Documentation](https://kylemayes.github.io/clang-sys/8_0/clang_sys) +//! * 9.0 - [Documentation](https://kylemayes.github.io/clang-sys/9_0/clang_sys) + +#![allow(non_camel_case_types, non_snake_case, non_upper_case_globals)] +#![cfg_attr(feature = "cargo-clippy", allow(clippy::unreadable_literal))] + +extern crate glob; +extern crate libc; +#[cfg(feature = "runtime")] +extern crate libloading; + +pub mod support; + +#[macro_use] +mod link; + +use std::mem; + +use libc::*; + +pub type CXClientData = *mut c_void; +pub type CXCursorVisitor = extern "C" fn(CXCursor, CXCursor, CXClientData) -> CXChildVisitResult; +#[cfg(feature = "gte_clang_3_7")] +pub type CXFieldVisitor = extern "C" fn(CXCursor, CXClientData) -> CXVisitorResult; +pub type CXInclusionVisitor = extern "C" fn(CXFile, *mut CXSourceLocation, c_uint, CXClientData); + +//================================================ +// Macros +//================================================ + +/// Defines a C enum as a series of constants. +macro_rules! cenum { + ($(#[$meta:meta])* enum $name:ident { + $($(#[$vmeta:meta])* const $variant:ident = $value:expr), +, + }) => ( + pub type $name = c_int; + + $($(#[$vmeta])* pub const $variant: $name = $value;)+ + ); + ($(#[$meta:meta])* enum $name:ident { + $($(#[$vmeta:meta])* const $variant:ident = $value:expr); +; + }) => ( + pub type $name = c_int; + + $($(#[$vmeta])* pub const $variant: $name = $value;)+ + ); +} + +/// Implements a zeroing implementation of `Default` for the supplied type. +macro_rules! default { + (#[$meta:meta] $ty:ty) => { + #[$meta] + impl Default for $ty { + fn default() -> $ty { + unsafe { mem::zeroed() } + } + } + }; + + ($ty:ty) => { + impl Default for $ty { + fn default() -> $ty { + unsafe { mem::zeroed() } + } + } + }; +} + +//================================================ +// Enums +//================================================ + +cenum! { + enum CXAvailabilityKind { + const CXAvailability_Available = 0, + const CXAvailability_Deprecated = 1, + const CXAvailability_NotAvailable = 2, + const CXAvailability_NotAccessible = 3, + } +} + +cenum! { + enum CXCallingConv { + const CXCallingConv_Default = 0, + const CXCallingConv_C = 1, + const CXCallingConv_X86StdCall = 2, + const CXCallingConv_X86FastCall = 3, + const CXCallingConv_X86ThisCall = 4, + const CXCallingConv_X86Pascal = 5, + const CXCallingConv_AAPCS = 6, + const CXCallingConv_AAPCS_VFP = 7, + /// Only produced by `libclang` 4.0 and later. + const CXCallingConv_X86RegCall = 8, + const CXCallingConv_IntelOclBicc = 9, + const CXCallingConv_Win64 = 10, + const CXCallingConv_X86_64Win64 = 10, + const CXCallingConv_X86_64SysV = 11, + /// Only produced by `libclang` 3.6 and later. + const CXCallingConv_X86VectorCall = 12, + /// Only produced by `libclang` 3.9 and later. + const CXCallingConv_Swift = 13, + /// Only produced by `libclang` 3.9 and later. + const CXCallingConv_PreserveMost = 14, + /// Only produced by `libclang` 3.9 and later. + const CXCallingConv_PreserveAll = 15, + /// Only produced by `libclang` 8.0 and later. + const CXCallingConv_AArch64VectorCall = 16, + const CXCallingConv_Invalid = 100, + const CXCallingConv_Unexposed = 200, + } +} + +cenum! { + enum CXChildVisitResult { + const CXChildVisit_Break = 0, + const CXChildVisit_Continue = 1, + const CXChildVisit_Recurse = 2, + } +} + +cenum! { + enum CXCommentInlineCommandRenderKind { + const CXCommentInlineCommandRenderKind_Normal = 0, + const CXCommentInlineCommandRenderKind_Bold = 1, + const CXCommentInlineCommandRenderKind_Monospaced = 2, + const CXCommentInlineCommandRenderKind_Emphasized = 3, + } +} + +cenum! { + enum CXCommentKind { + const CXComment_Null = 0, + const CXComment_Text = 1, + const CXComment_InlineCommand = 2, + const CXComment_HTMLStartTag = 3, + const CXComment_HTMLEndTag = 4, + const CXComment_Paragraph = 5, + const CXComment_BlockCommand = 6, + const CXComment_ParamCommand = 7, + const CXComment_TParamCommand = 8, + const CXComment_VerbatimBlockCommand = 9, + const CXComment_VerbatimBlockLine = 10, + const CXComment_VerbatimLine = 11, + const CXComment_FullComment = 12, + } +} + +cenum! { + enum CXCommentParamPassDirection { + const CXCommentParamPassDirection_In = 0, + const CXCommentParamPassDirection_Out = 1, + const CXCommentParamPassDirection_InOut = 2, + } +} + +cenum! { + enum CXCompilationDatabase_Error { + const CXCompilationDatabase_NoError = 0, + const CXCompilationDatabase_CanNotLoadDatabase = 1, + } +} + +cenum! { + enum CXCompletionChunkKind { + const CXCompletionChunk_Optional = 0, + const CXCompletionChunk_TypedText = 1, + const CXCompletionChunk_Text = 2, + const CXCompletionChunk_Placeholder = 3, + const CXCompletionChunk_Informative = 4, + const CXCompletionChunk_CurrentParameter = 5, + const CXCompletionChunk_LeftParen = 6, + const CXCompletionChunk_RightParen = 7, + const CXCompletionChunk_LeftBracket = 8, + const CXCompletionChunk_RightBracket = 9, + const CXCompletionChunk_LeftBrace = 10, + const CXCompletionChunk_RightBrace = 11, + const CXCompletionChunk_LeftAngle = 12, + const CXCompletionChunk_RightAngle = 13, + const CXCompletionChunk_Comma = 14, + const CXCompletionChunk_ResultType = 15, + const CXCompletionChunk_Colon = 16, + const CXCompletionChunk_SemiColon = 17, + const CXCompletionChunk_Equal = 18, + const CXCompletionChunk_HorizontalSpace = 19, + const CXCompletionChunk_VerticalSpace = 20, + } +} + +cenum! { + enum CXCursorKind { + const CXCursor_UnexposedDecl = 1, + const CXCursor_StructDecl = 2, + const CXCursor_UnionDecl = 3, + const CXCursor_ClassDecl = 4, + const CXCursor_EnumDecl = 5, + const CXCursor_FieldDecl = 6, + const CXCursor_EnumConstantDecl = 7, + const CXCursor_FunctionDecl = 8, + const CXCursor_VarDecl = 9, + const CXCursor_ParmDecl = 10, + const CXCursor_ObjCInterfaceDecl = 11, + const CXCursor_ObjCCategoryDecl = 12, + const CXCursor_ObjCProtocolDecl = 13, + const CXCursor_ObjCPropertyDecl = 14, + const CXCursor_ObjCIvarDecl = 15, + const CXCursor_ObjCInstanceMethodDecl = 16, + const CXCursor_ObjCClassMethodDecl = 17, + const CXCursor_ObjCImplementationDecl = 18, + const CXCursor_ObjCCategoryImplDecl = 19, + const CXCursor_TypedefDecl = 20, + const CXCursor_CXXMethod = 21, + const CXCursor_Namespace = 22, + const CXCursor_LinkageSpec = 23, + const CXCursor_Constructor = 24, + const CXCursor_Destructor = 25, + const CXCursor_ConversionFunction = 26, + const CXCursor_TemplateTypeParameter = 27, + const CXCursor_NonTypeTemplateParameter = 28, + const CXCursor_TemplateTemplateParameter = 29, + const CXCursor_FunctionTemplate = 30, + const CXCursor_ClassTemplate = 31, + const CXCursor_ClassTemplatePartialSpecialization = 32, + const CXCursor_NamespaceAlias = 33, + const CXCursor_UsingDirective = 34, + const CXCursor_UsingDeclaration = 35, + const CXCursor_TypeAliasDecl = 36, + const CXCursor_ObjCSynthesizeDecl = 37, + const CXCursor_ObjCDynamicDecl = 38, + const CXCursor_CXXAccessSpecifier = 39, + const CXCursor_ObjCSuperClassRef = 40, + const CXCursor_ObjCProtocolRef = 41, + const CXCursor_ObjCClassRef = 42, + const CXCursor_TypeRef = 43, + const CXCursor_CXXBaseSpecifier = 44, + const CXCursor_TemplateRef = 45, + const CXCursor_NamespaceRef = 46, + const CXCursor_MemberRef = 47, + const CXCursor_LabelRef = 48, + const CXCursor_OverloadedDeclRef = 49, + const CXCursor_VariableRef = 50, + const CXCursor_InvalidFile = 70, + const CXCursor_NoDeclFound = 71, + const CXCursor_NotImplemented = 72, + const CXCursor_InvalidCode = 73, + const CXCursor_UnexposedExpr = 100, + const CXCursor_DeclRefExpr = 101, + const CXCursor_MemberRefExpr = 102, + const CXCursor_CallExpr = 103, + const CXCursor_ObjCMessageExpr = 104, + const CXCursor_BlockExpr = 105, + const CXCursor_IntegerLiteral = 106, + const CXCursor_FloatingLiteral = 107, + const CXCursor_ImaginaryLiteral = 108, + const CXCursor_StringLiteral = 109, + const CXCursor_CharacterLiteral = 110, + const CXCursor_ParenExpr = 111, + const CXCursor_UnaryOperator = 112, + const CXCursor_ArraySubscriptExpr = 113, + const CXCursor_BinaryOperator = 114, + const CXCursor_CompoundAssignOperator = 115, + const CXCursor_ConditionalOperator = 116, + const CXCursor_CStyleCastExpr = 117, + const CXCursor_CompoundLiteralExpr = 118, + const CXCursor_InitListExpr = 119, + const CXCursor_AddrLabelExpr = 120, + const CXCursor_StmtExpr = 121, + const CXCursor_GenericSelectionExpr = 122, + const CXCursor_GNUNullExpr = 123, + const CXCursor_CXXStaticCastExpr = 124, + const CXCursor_CXXDynamicCastExpr = 125, + const CXCursor_CXXReinterpretCastExpr = 126, + const CXCursor_CXXConstCastExpr = 127, + const CXCursor_CXXFunctionalCastExpr = 128, + const CXCursor_CXXTypeidExpr = 129, + const CXCursor_CXXBoolLiteralExpr = 130, + const CXCursor_CXXNullPtrLiteralExpr = 131, + const CXCursor_CXXThisExpr = 132, + const CXCursor_CXXThrowExpr = 133, + const CXCursor_CXXNewExpr = 134, + const CXCursor_CXXDeleteExpr = 135, + const CXCursor_UnaryExpr = 136, + const CXCursor_ObjCStringLiteral = 137, + const CXCursor_ObjCEncodeExpr = 138, + const CXCursor_ObjCSelectorExpr = 139, + const CXCursor_ObjCProtocolExpr = 140, + const CXCursor_ObjCBridgedCastExpr = 141, + const CXCursor_PackExpansionExpr = 142, + const CXCursor_SizeOfPackExpr = 143, + const CXCursor_LambdaExpr = 144, + const CXCursor_ObjCBoolLiteralExpr = 145, + const CXCursor_ObjCSelfExpr = 146, + /// Only produced by `libclang` 3.8 and later. + const CXCursor_OMPArraySectionExpr = 147, + /// Only produced by `libclang` 3.9 and later. + const CXCursor_ObjCAvailabilityCheckExpr = 148, + /// Only produced by `libclang` 7.0 and later. + const CXCursor_FixedPointLiteral = 149, + const CXCursor_UnexposedStmt = 200, + const CXCursor_LabelStmt = 201, + const CXCursor_CompoundStmt = 202, + const CXCursor_CaseStmt = 203, + const CXCursor_DefaultStmt = 204, + const CXCursor_IfStmt = 205, + const CXCursor_SwitchStmt = 206, + const CXCursor_WhileStmt = 207, + const CXCursor_DoStmt = 208, + const CXCursor_ForStmt = 209, + const CXCursor_GotoStmt = 210, + const CXCursor_IndirectGotoStmt = 211, + const CXCursor_ContinueStmt = 212, + const CXCursor_BreakStmt = 213, + const CXCursor_ReturnStmt = 214, + /// Duplicate of `CXCursor_GccAsmStmt`. + const CXCursor_AsmStmt = 215, + const CXCursor_ObjCAtTryStmt = 216, + const CXCursor_ObjCAtCatchStmt = 217, + const CXCursor_ObjCAtFinallyStmt = 218, + const CXCursor_ObjCAtThrowStmt = 219, + const CXCursor_ObjCAtSynchronizedStmt = 220, + const CXCursor_ObjCAutoreleasePoolStmt = 221, + const CXCursor_ObjCForCollectionStmt = 222, + const CXCursor_CXXCatchStmt = 223, + const CXCursor_CXXTryStmt = 224, + const CXCursor_CXXForRangeStmt = 225, + const CXCursor_SEHTryStmt = 226, + const CXCursor_SEHExceptStmt = 227, + const CXCursor_SEHFinallyStmt = 228, + const CXCursor_MSAsmStmt = 229, + const CXCursor_NullStmt = 230, + const CXCursor_DeclStmt = 231, + const CXCursor_OMPParallelDirective = 232, + const CXCursor_OMPSimdDirective = 233, + const CXCursor_OMPForDirective = 234, + const CXCursor_OMPSectionsDirective = 235, + const CXCursor_OMPSectionDirective = 236, + const CXCursor_OMPSingleDirective = 237, + const CXCursor_OMPParallelForDirective = 238, + const CXCursor_OMPParallelSectionsDirective = 239, + const CXCursor_OMPTaskDirective = 240, + const CXCursor_OMPMasterDirective = 241, + const CXCursor_OMPCriticalDirective = 242, + const CXCursor_OMPTaskyieldDirective = 243, + const CXCursor_OMPBarrierDirective = 244, + const CXCursor_OMPTaskwaitDirective = 245, + const CXCursor_OMPFlushDirective = 246, + const CXCursor_SEHLeaveStmt = 247, + /// Only produced by `libclang` 3.6 and later. + const CXCursor_OMPOrderedDirective = 248, + /// Only produced by `libclang` 3.6 and later. + const CXCursor_OMPAtomicDirective = 249, + /// Only produced by `libclang` 3.6 and later. + const CXCursor_OMPForSimdDirective = 250, + /// Only produced by `libclang` 3.6 and later. + const CXCursor_OMPParallelForSimdDirective = 251, + /// Only produced by `libclang` 3.6 and later. + const CXCursor_OMPTargetDirective = 252, + /// Only produced by `libclang` 3.6 and later. + const CXCursor_OMPTeamsDirective = 253, + /// Only produced by `libclang` 3.7 and later. + const CXCursor_OMPTaskgroupDirective = 254, + /// Only produced by `libclang` 3.7 and later. + const CXCursor_OMPCancellationPointDirective = 255, + /// Only produced by `libclang` 3.7 and later. + const CXCursor_OMPCancelDirective = 256, + /// Only produced by `libclang` 3.8 and later. + const CXCursor_OMPTargetDataDirective = 257, + /// Only produced by `libclang` 3.8 and later. + const CXCursor_OMPTaskLoopDirective = 258, + /// Only produced by `libclang` 3.8 and later. + const CXCursor_OMPTaskLoopSimdDirective = 259, + /// Only produced by `libclang` 3.8 and later. + const CXCursor_OMPDistributeDirective = 260, + /// Only produced by `libclang` 3.9 and later. + const CXCursor_OMPTargetEnterDataDirective = 261, + /// Only produced by `libclang` 3.9 and later. + const CXCursor_OMPTargetExitDataDirective = 262, + /// Only produced by `libclang` 3.9 and later. + const CXCursor_OMPTargetParallelDirective = 263, + /// Only produced by `libclang` 3.9 and later. + const CXCursor_OMPTargetParallelForDirective = 264, + /// Only produced by `libclang` 3.9 and later. + const CXCursor_OMPTargetUpdateDirective = 265, + /// Only produced by `libclang` 3.9 and later. + const CXCursor_OMPDistributeParallelForDirective = 266, + /// Only produced by `libclang` 3.9 and later. + const CXCursor_OMPDistributeParallelForSimdDirective = 267, + /// Only produced by `libclang` 3.9 and later. + const CXCursor_OMPDistributeSimdDirective = 268, + /// Only produced by `libclang` 3.9 and later. + const CXCursor_OMPTargetParallelForSimdDirective = 269, + /// Only produced by `libclang` 4.0 and later. + const CXCursor_OMPTargetSimdDirective = 270, + /// Only produced by `libclang` 4.0 and later. + const CXCursor_OMPTeamsDistributeDirective = 271, + /// Only produced by `libclang` 4.0 and later. + const CXCursor_OMPTeamsDistributeSimdDirective = 272, + /// Only produced by `libclang` 4.0 and later. + const CXCursor_OMPTeamsDistributeParallelForSimdDirective = 273, + /// Only produced by `libclang` 4.0 and later. + const CXCursor_OMPTeamsDistributeParallelForDirective = 274, + /// Only produced by `libclang` 4.0 and later. + const CXCursor_OMPTargetTeamsDirective = 275, + /// Only produced by `libclang` 4.0 and later. + const CXCursor_OMPTargetTeamsDistributeDirective = 276, + /// Only produced by `libclang` 4.0 and later. + const CXCursor_OMPTargetTeamsDistributeParallelForDirective = 277, + /// Only produced by `libclang` 4.0 and later. + const CXCursor_OMPTargetTeamsDistributeParallelForSimdDirective = 278, + /// Only producer by `libclang` 4.0 and later. + const CXCursor_OMPTargetTeamsDistributeSimdDirective = 279, + /// Only produced by 'libclang' 9.0 and later. + const CXCursor_BuiltinBitCastExpr = 280, + const CXCursor_TranslationUnit = 300, + const CXCursor_UnexposedAttr = 400, + const CXCursor_IBActionAttr = 401, + const CXCursor_IBOutletAttr = 402, + const CXCursor_IBOutletCollectionAttr = 403, + const CXCursor_CXXFinalAttr = 404, + const CXCursor_CXXOverrideAttr = 405, + const CXCursor_AnnotateAttr = 406, + const CXCursor_AsmLabelAttr = 407, + const CXCursor_PackedAttr = 408, + const CXCursor_PureAttr = 409, + const CXCursor_ConstAttr = 410, + const CXCursor_NoDuplicateAttr = 411, + const CXCursor_CUDAConstantAttr = 412, + const CXCursor_CUDADeviceAttr = 413, + const CXCursor_CUDAGlobalAttr = 414, + const CXCursor_CUDAHostAttr = 415, + /// Only produced by `libclang` 3.6 and later. + const CXCursor_CUDASharedAttr = 416, + /// Only produced by `libclang` 3.8 and later. + const CXCursor_VisibilityAttr = 417, + /// Only produced by `libclang` 3.8 and later. + const CXCursor_DLLExport = 418, + /// Only produced by `libclang` 3.8 and later. + const CXCursor_DLLImport = 419, + /// Only produced by `libclang` 8.0 and later. + const CXCursor_NSReturnsRetained = 420, + /// Only produced by `libclang` 8.0 and later. + const CXCursor_NSReturnsNotRetained = 421, + /// Only produced by `libclang` 8.0 and later. + const CXCursor_NSReturnsAutoreleased = 422, + /// Only produced by `libclang` 8.0 and later. + const CXCursor_NSConsumesSelf = 423, + /// Only produced by `libclang` 8.0 and later. + const CXCursor_NSConsumed = 424, + /// Only produced by `libclang` 8.0 and later. + const CXCursor_ObjCException = 425, + /// Only produced by `libclang` 8.0 and later. + const CXCursor_ObjCNSObject = 426, + /// Only produced by `libclang` 8.0 and later. + const CXCursor_ObjCIndependentClass = 427, + /// Only produced by `libclang` 8.0 and later. + const CXCursor_ObjCPreciseLifetime = 428, + /// Only produced by `libclang` 8.0 and later. + const CXCursor_ObjCReturnsInnerPointer = 429, + /// Only produced by `libclang` 8.0 and later. + const CXCursor_ObjCRequiresSuper = 430, + /// Only produced by `libclang` 8.0 and later. + const CXCursor_ObjCRootClass = 431, + /// Only produced by `libclang` 8.0 and later. + const CXCursor_ObjCSubclassingRestricted = 432, + /// Only produced by `libclang` 8.0 and later. + const CXCursor_ObjCExplicitProtocolImpl = 433, + /// Only produced by `libclang` 8.0 and later. + const CXCursor_ObjCDesignatedInitializer = 434, + /// Only produced by `libclang` 8.0 and later. + const CXCursor_ObjCRuntimeVisible = 435, + /// Only produced by `libclang` 8.0 and later. + const CXCursor_ObjCBoxable = 436, + /// Only produced by `libclang` 8.0 and later. + const CXCursor_FlagEnum = 437, + /// Only produced by `libclang` 9.0 and later. + const CXCursor_ConvergentAttr = 438, + /// Only produced by `libclang` 9.0 and later. + const CXCursor_WarnUnusedAttr = 439, + /// Only produced by `libclang` 9.0 and later. + const CXCursor_WarnUnusedResultAttr = 440, + /// Only produced by `libclang` 9.0 and later. + const CXCursor_AlignedAttr = 441, + const CXCursor_PreprocessingDirective = 500, + const CXCursor_MacroDefinition = 501, + /// Duplicate of `CXCursor_MacroInstantiation`. + const CXCursor_MacroExpansion = 502, + const CXCursor_InclusionDirective = 503, + const CXCursor_ModuleImportDecl = 600, + /// Only produced by `libclang` 3.8 and later. + const CXCursor_TypeAliasTemplateDecl = 601, + /// Only produced by `libclang` 3.9 and later. + const CXCursor_StaticAssert = 602, + /// Only produced by `libclang` 4.0 and later. + const CXCursor_FriendDecl = 603, + /// Only produced by `libclang` 3.7 and later. + const CXCursor_OverloadCandidate = 700, + } +} + +cenum! { + #[cfg(feature="gte_clang_5_0")] + enum CXCursor_ExceptionSpecificationKind { + const CXCursor_ExceptionSpecificationKind_None = 0, + const CXCursor_ExceptionSpecificationKind_DynamicNone = 1, + const CXCursor_ExceptionSpecificationKind_Dynamic = 2, + const CXCursor_ExceptionSpecificationKind_MSAny = 3, + const CXCursor_ExceptionSpecificationKind_BasicNoexcept = 4, + const CXCursor_ExceptionSpecificationKind_ComputedNoexcept = 5, + const CXCursor_ExceptionSpecificationKind_Unevaluated = 6, + const CXCursor_ExceptionSpecificationKind_Uninstantiated = 7, + const CXCursor_ExceptionSpecificationKind_Unparsed = 8, + #[cfg(feature="gte_clang_9_0")] + const CXCursor_ExceptionSpecificationKind_NoThrow = 9, + } +} + +cenum! { + enum CXDiagnosticSeverity { + const CXDiagnostic_Ignored = 0, + const CXDiagnostic_Note = 1, + const CXDiagnostic_Warning = 2, + const CXDiagnostic_Error = 3, + const CXDiagnostic_Fatal = 4, + } +} + +cenum! { + enum CXErrorCode { + const CXError_Success = 0, + const CXError_Failure = 1, + const CXError_Crashed = 2, + const CXError_InvalidArguments = 3, + const CXError_ASTReadError = 4, + } +} + +cenum! { + enum CXEvalResultKind { + const CXEval_UnExposed = 0, + const CXEval_Int = 1 , + const CXEval_Float = 2, + const CXEval_ObjCStrLiteral = 3, + const CXEval_StrLiteral = 4, + const CXEval_CFStr = 5, + const CXEval_Other = 6, + } +} + +cenum! { + enum CXIdxAttrKind { + const CXIdxAttr_Unexposed = 0, + const CXIdxAttr_IBAction = 1, + const CXIdxAttr_IBOutlet = 2, + const CXIdxAttr_IBOutletCollection = 3, + } +} + +cenum! { + enum CXIdxEntityCXXTemplateKind { + const CXIdxEntity_NonTemplate = 0, + const CXIdxEntity_Template = 1, + const CXIdxEntity_TemplatePartialSpecialization = 2, + const CXIdxEntity_TemplateSpecialization = 3, + } +} + +cenum! { + enum CXIdxEntityKind { + const CXIdxEntity_Unexposed = 0, + const CXIdxEntity_Typedef = 1, + const CXIdxEntity_Function = 2, + const CXIdxEntity_Variable = 3, + const CXIdxEntity_Field = 4, + const CXIdxEntity_EnumConstant = 5, + const CXIdxEntity_ObjCClass = 6, + const CXIdxEntity_ObjCProtocol = 7, + const CXIdxEntity_ObjCCategory = 8, + const CXIdxEntity_ObjCInstanceMethod = 9, + const CXIdxEntity_ObjCClassMethod = 10, + const CXIdxEntity_ObjCProperty = 11, + const CXIdxEntity_ObjCIvar = 12, + const CXIdxEntity_Enum = 13, + const CXIdxEntity_Struct = 14, + const CXIdxEntity_Union = 15, + const CXIdxEntity_CXXClass = 16, + const CXIdxEntity_CXXNamespace = 17, + const CXIdxEntity_CXXNamespaceAlias = 18, + const CXIdxEntity_CXXStaticVariable = 19, + const CXIdxEntity_CXXStaticMethod = 20, + const CXIdxEntity_CXXInstanceMethod = 21, + const CXIdxEntity_CXXConstructor = 22, + const CXIdxEntity_CXXDestructor = 23, + const CXIdxEntity_CXXConversionFunction = 24, + const CXIdxEntity_CXXTypeAlias = 25, + const CXIdxEntity_CXXInterface = 26, + } +} + +cenum! { + enum CXIdxEntityLanguage { + const CXIdxEntityLang_None = 0, + const CXIdxEntityLang_C = 1, + const CXIdxEntityLang_ObjC = 2, + const CXIdxEntityLang_CXX = 3, + /// Only produced by `libclang` 5.0 and later. + const CXIdxEntityLang_Swift = 4, + } +} + +cenum! { + enum CXIdxEntityRefKind { + const CXIdxEntityRef_Direct = 1, + const CXIdxEntityRef_Implicit = 2, + } +} + +cenum! { + enum CXIdxObjCContainerKind { + const CXIdxObjCContainer_ForwardRef = 0, + const CXIdxObjCContainer_Interface = 1, + const CXIdxObjCContainer_Implementation = 2, + } +} + +cenum! { + enum CXLanguageKind { + const CXLanguage_Invalid = 0, + const CXLanguage_C = 1, + const CXLanguage_ObjC = 2, + const CXLanguage_CPlusPlus = 3, + } +} + +cenum! { + enum CXLinkageKind { + const CXLinkage_Invalid = 0, + const CXLinkage_NoLinkage = 1, + const CXLinkage_Internal = 2, + const CXLinkage_UniqueExternal = 3, + const CXLinkage_External = 4, + } +} + +cenum! { + enum CXLoadDiag_Error { + const CXLoadDiag_None = 0, + const CXLoadDiag_Unknown = 1, + const CXLoadDiag_CannotLoad = 2, + const CXLoadDiag_InvalidFile = 3, + } +} + +cenum! { + #[cfg(feature="gte_clang_7_0")] + enum CXPrintingPolicyProperty { + const CXPrintingPolicy_Indentation = 0, + const CXPrintingPolicy_SuppressSpecifiers = 1, + const CXPrintingPolicy_SuppressTagKeyword = 2, + const CXPrintingPolicy_IncludeTagDefinition = 3, + const CXPrintingPolicy_SuppressScope = 4, + const CXPrintingPolicy_SuppressUnwrittenScope = 5, + const CXPrintingPolicy_SuppressInitializers = 6, + const CXPrintingPolicy_ConstantArraySizeAsWritten = 7, + const CXPrintingPolicy_AnonymousTagLocations = 8, + const CXPrintingPolicy_SuppressStrongLifetime = 9, + const CXPrintingPolicy_SuppressLifetimeQualifiers = 10, + const CXPrintingPolicy_SuppressTemplateArgsInCXXConstructors = 11, + const CXPrintingPolicy_Bool = 12, + const CXPrintingPolicy_Restrict = 13, + const CXPrintingPolicy_Alignof = 14, + const CXPrintingPolicy_UnderscoreAlignof = 15, + const CXPrintingPolicy_UseVoidForZeroParams = 16, + const CXPrintingPolicy_TerseOutput = 17, + const CXPrintingPolicy_PolishForDeclaration = 18, + const CXPrintingPolicy_Half = 19, + const CXPrintingPolicy_MSWChar = 20, + const CXPrintingPolicy_IncludeNewlines = 21, + const CXPrintingPolicy_MSVCFormatting = 22, + const CXPrintingPolicy_ConstantsAsWritten = 23, + const CXPrintingPolicy_SuppressImplicitBase = 24, + const CXPrintingPolicy_FullyQualifiedName = 25, + } +} + +cenum! { + enum CXRefQualifierKind { + const CXRefQualifier_None = 0, + const CXRefQualifier_LValue = 1, + const CXRefQualifier_RValue = 2, + } +} + +cenum! { + enum CXResult { + const CXResult_Success = 0, + const CXResult_Invalid = 1, + const CXResult_VisitBreak = 2, + } +} + +cenum! { + enum CXSaveError { + const CXSaveError_None = 0, + const CXSaveError_Unknown = 1, + const CXSaveError_TranslationErrors = 2, + const CXSaveError_InvalidTU = 3, + } +} + +cenum! { + #[cfg(feature="gte_clang_6_0")] + enum CXTLSKind { + const CXTLS_None = 0, + const CXTLS_Dynamic = 1, + const CXTLS_Static = 2, + } +} + +cenum! { + enum CXTUResourceUsageKind { + const CXTUResourceUsage_AST = 1, + const CXTUResourceUsage_Identifiers = 2, + const CXTUResourceUsage_Selectors = 3, + const CXTUResourceUsage_GlobalCompletionResults = 4, + const CXTUResourceUsage_SourceManagerContentCache = 5, + const CXTUResourceUsage_AST_SideTables = 6, + const CXTUResourceUsage_SourceManager_Membuffer_Malloc = 7, + const CXTUResourceUsage_SourceManager_Membuffer_MMap = 8, + const CXTUResourceUsage_ExternalASTSource_Membuffer_Malloc = 9, + const CXTUResourceUsage_ExternalASTSource_Membuffer_MMap = 10, + const CXTUResourceUsage_Preprocessor = 11, + const CXTUResourceUsage_PreprocessingRecord = 12, + const CXTUResourceUsage_SourceManager_DataStructures = 13, + const CXTUResourceUsage_Preprocessor_HeaderSearch = 14, + } +} + +cenum! { + #[cfg(feature="gte_clang_3_6")] + enum CXTemplateArgumentKind { + const CXTemplateArgumentKind_Null = 0, + const CXTemplateArgumentKind_Type = 1, + const CXTemplateArgumentKind_Declaration = 2, + const CXTemplateArgumentKind_NullPtr = 3, + const CXTemplateArgumentKind_Integral = 4, + const CXTemplateArgumentKind_Template = 5, + const CXTemplateArgumentKind_TemplateExpansion = 6, + const CXTemplateArgumentKind_Expression = 7, + const CXTemplateArgumentKind_Pack = 8, + const CXTemplateArgumentKind_Invalid = 9, + } +} + +cenum! { + enum CXTokenKind { + const CXToken_Punctuation = 0, + const CXToken_Keyword = 1, + const CXToken_Identifier = 2, + const CXToken_Literal = 3, + const CXToken_Comment = 4, + } +} + +cenum! { + enum CXTypeKind { + const CXType_Invalid = 0, + const CXType_Unexposed = 1, + const CXType_Void = 2, + const CXType_Bool = 3, + const CXType_Char_U = 4, + const CXType_UChar = 5, + const CXType_Char16 = 6, + const CXType_Char32 = 7, + const CXType_UShort = 8, + const CXType_UInt = 9, + const CXType_ULong = 10, + const CXType_ULongLong = 11, + const CXType_UInt128 = 12, + const CXType_Char_S = 13, + const CXType_SChar = 14, + const CXType_WChar = 15, + const CXType_Short = 16, + const CXType_Int = 17, + const CXType_Long = 18, + const CXType_LongLong = 19, + const CXType_Int128 = 20, + const CXType_Float = 21, + const CXType_Double = 22, + const CXType_LongDouble = 23, + const CXType_NullPtr = 24, + const CXType_Overload = 25, + const CXType_Dependent = 26, + const CXType_ObjCId = 27, + const CXType_ObjCClass = 28, + const CXType_ObjCSel = 29, + /// Only produced by `libclang` 3.9 and later. + const CXType_Float128 = 30, + /// Only produced by `libclang` 5.0 and later. + const CXType_Half = 31, + /// Only produced by `libclang` 6.0 and later. + const CXType_Float16 = 32, + /// Only produced by `libclang` 7.0 and later. + const CXType_ShortAccum = 33, + /// Only produced by `libclang` 7.0 and later. + const CXType_Accum = 34, + /// Only produced by `libclang` 7.0 and later. + const CXType_LongAccum = 35, + /// Only produced by `libclang` 7.0 and later. + const CXType_UShortAccum = 36, + /// Only produced by `libclang` 7.0 and later. + const CXType_UAccum = 37, + /// Only produced by `libclang` 7.0 and later. + const CXType_ULongAccum = 38, + const CXType_Complex = 100, + const CXType_Pointer = 101, + const CXType_BlockPointer = 102, + const CXType_LValueReference = 103, + const CXType_RValueReference = 104, + const CXType_Record = 105, + const CXType_Enum = 106, + const CXType_Typedef = 107, + const CXType_ObjCInterface = 108, + const CXType_ObjCObjectPointer = 109, + const CXType_FunctionNoProto = 110, + const CXType_FunctionProto = 111, + const CXType_ConstantArray = 112, + const CXType_Vector = 113, + const CXType_IncompleteArray = 114, + const CXType_VariableArray = 115, + const CXType_DependentSizedArray = 116, + const CXType_MemberPointer = 117, + /// Only produced by `libclang` 3.8 and later. + const CXType_Auto = 118, + /// Only produced by `libclang` 3.9 and later. + const CXType_Elaborated = 119, + /// Only produced by `libclang` 5.0 and later. + const CXType_Pipe = 120, + /// Only produced by `libclang` 5.0 and later. + const CXType_OCLImage1dRO = 121, + /// Only produced by `libclang` 5.0 and later. + const CXType_OCLImage1dArrayRO = 122, + /// Only produced by `libclang` 5.0 and later. + const CXType_OCLImage1dBufferRO = 123, + /// Only produced by `libclang` 5.0 and later. + const CXType_OCLImage2dRO = 124, + /// Only produced by `libclang` 5.0 and later. + const CXType_OCLImage2dArrayRO = 125, + /// Only produced by `libclang` 5.0 and later. + const CXType_OCLImage2dDepthRO = 126, + /// Only produced by `libclang` 5.0 and later. + const CXType_OCLImage2dArrayDepthRO = 127, + /// Only produced by `libclang` 5.0 and later. + const CXType_OCLImage2dMSAARO = 128, + /// Only produced by `libclang` 5.0 and later. + const CXType_OCLImage2dArrayMSAARO = 129, + /// Only produced by `libclang` 5.0 and later. + const CXType_OCLImage2dMSAADepthRO = 130, + /// Only produced by `libclang` 5.0 and later. + const CXType_OCLImage2dArrayMSAADepthRO = 131, + /// Only produced by `libclang` 5.0 and later. + const CXType_OCLImage3dRO = 132, + /// Only produced by `libclang` 5.0 and later. + const CXType_OCLImage1dWO = 133, + /// Only produced by `libclang` 5.0 and later. + const CXType_OCLImage1dArrayWO = 134, + /// Only produced by `libclang` 5.0 and later. + const CXType_OCLImage1dBufferWO = 135, + /// Only produced by `libclang` 5.0 and later. + const CXType_OCLImage2dWO = 136, + /// Only produced by `libclang` 5.0 and later. + const CXType_OCLImage2dArrayWO = 137, + /// Only produced by `libclang` 5.0 and later. + const CXType_OCLImage2dDepthWO = 138, + /// Only produced by `libclang` 5.0 and later. + const CXType_OCLImage2dArrayDepthWO = 139, + /// Only produced by `libclang` 5.0 and later. + const CXType_OCLImage2dMSAAWO = 140, + /// Only produced by `libclang` 5.0 and later. + const CXType_OCLImage2dArrayMSAAWO = 141, + /// Only produced by `libclang` 5.0 and later. + const CXType_OCLImage2dMSAADepthWO = 142, + /// Only produced by `libclang` 5.0 and later. + const CXType_OCLImage2dArrayMSAADepthWO = 143, + /// Only produced by `libclang` 5.0 and later. + const CXType_OCLImage3dWO = 144, + /// Only produced by `libclang` 5.0 and later. + const CXType_OCLImage1dRW = 145, + /// Only produced by `libclang` 5.0 and later. + const CXType_OCLImage1dArrayRW = 146, + /// Only produced by `libclang` 5.0 and later. + const CXType_OCLImage1dBufferRW = 147, + /// Only produced by `libclang` 5.0 and later. + const CXType_OCLImage2dRW = 148, + /// Only produced by `libclang` 5.0 and later. + const CXType_OCLImage2dArrayRW = 149, + /// Only produced by `libclang` 5.0 and later. + const CXType_OCLImage2dDepthRW = 150, + /// Only produced by `libclang` 5.0 and later. + const CXType_OCLImage2dArrayDepthRW = 151, + /// Only produced by `libclang` 5.0 and later. + const CXType_OCLImage2dMSAARW = 152, + /// Only produced by `libclang` 5.0 and later. + const CXType_OCLImage2dArrayMSAARW = 153, + /// Only produced by `libclang` 5.0 and later. + const CXType_OCLImage2dMSAADepthRW = 154, + /// Only produced by `libclang` 5.0 and later. + const CXType_OCLImage2dArrayMSAADepthRW = 155, + /// Only produced by `libclang` 5.0 and later. + const CXType_OCLImage3dRW = 156, + /// Only produced by `libclang` 5.0 and later. + const CXType_OCLSampler = 157, + /// Only produced by `libclang` 5.0 and later. + const CXType_OCLEvent = 158, + /// Only produced by `libclang` 5.0 and later. + const CXType_OCLQueue = 159, + /// Only produced by `libclang` 5.0 and later. + const CXType_OCLReserveID = 160, + /// Only produced by `libclang` 8.0 and later. + const CXType_ObjCObject = 161, + /// Only produced by `libclang` 8.0 and later. + const CXType_ObjCTypeParam = 162, + /// Only produced by `libclang` 8.0 and later. + const CXType_Attributed = 163, + /// Only produced by `libclang` 8.0 and later. + const CXType_OCLIntelSubgroupAVCMcePayload = 164, + /// Only produced by `libclang` 8.0 and later. + const CXType_OCLIntelSubgroupAVCImePayload = 165, + /// Only produced by `libclang` 8.0 and later. + const CXType_OCLIntelSubgroupAVCRefPayload = 166, + /// Only produced by `libclang` 8.0 and later. + const CXType_OCLIntelSubgroupAVCSicPayload = 167, + /// Only produced by `libclang` 8.0 and later. + const CXType_OCLIntelSubgroupAVCMceResult = 168, + /// Only produced by `libclang` 8.0 and later. + const CXType_OCLIntelSubgroupAVCImeResult = 169, + /// Only produced by `libclang` 8.0 and later. + const CXType_OCLIntelSubgroupAVCRefResult = 170, + /// Only produced by `libclang` 8.0 and later. + const CXType_OCLIntelSubgroupAVCSicResult = 171, + /// Only produced by `libclang` 8.0 and later. + const CXType_OCLIntelSubgroupAVCImeResultSingleRefStreamout = 172, + /// Only produced by `libclang` 8.0 and later. + const CXType_OCLIntelSubgroupAVCImeResultDualRefStreamout = 173, + /// Only produced by `libclang` 8.0 and later. + const CXType_OCLIntelSubgroupAVCImeSingleRefStreamin = 174, + /// Only produced by `libclang` 8.0 and later. + const CXType_OCLIntelSubgroupAVCImeDualRefStreamin = 175, + /// Only produced by `libclang` 9.0 and later. + const CXType_ExtVector = 176, + } +} + +cenum! { + enum CXTypeLayoutError { + const CXTypeLayoutError_Invalid = -1, + const CXTypeLayoutError_Incomplete = -2, + const CXTypeLayoutError_Dependent = -3, + const CXTypeLayoutError_NotConstantSize = -4, + const CXTypeLayoutError_InvalidFieldName = -5, + /// Only produced by `libclang` 9.0 and later. + const CXTypeLayoutError_Undeduced = -6, + } +} + +cenum! { + #[cfg(feature="gte_clang_3_8")] + enum CXVisibilityKind { + const CXVisibility_Invalid = 0, + const CXVisibility_Hidden = 1, + const CXVisibility_Protected = 2, + const CXVisibility_Default = 3, + } +} + +cenum! { + #[cfg(feature="gte_clang_8_0")] + enum CXTypeNullabilityKind { + const CXTypeNullability_NonNull = 0, + const CXTypeNullability_Nullable = 1, + const CXTypeNullability_Unspecified = 2, + const CXTypeNullability_Invalid = 3, + } +} + +cenum! { + enum CXVisitorResult { + const CXVisit_Break = 0, + const CXVisit_Continue = 1, + } +} + +cenum! { + enum CX_CXXAccessSpecifier { + const CX_CXXInvalidAccessSpecifier = 0, + const CX_CXXPublic = 1, + const CX_CXXProtected = 2, + const CX_CXXPrivate = 3, + } +} + +cenum! { + #[cfg(feature="gte_clang_3_6")] + enum CX_StorageClass { + const CX_SC_Invalid = 0, + const CX_SC_None = 1, + const CX_SC_Extern = 2, + const CX_SC_Static = 3, + const CX_SC_PrivateExtern = 4, + const CX_SC_OpenCLWorkGroupLocal = 5, + const CX_SC_Auto = 6, + const CX_SC_Register = 7, + } +} + +//================================================ +// Flags +//================================================ + +cenum! { + enum CXCodeComplete_Flags { + const CXCodeComplete_IncludeMacros = 1; + const CXCodeComplete_IncludeCodePatterns = 2; + const CXCodeComplete_IncludeBriefComments = 4; + const CXCodeComplete_SkipPreamble = 8; + const CXCodeComplete_IncludeCompletionsWithFixIts = 16; + } +} + +cenum! { + enum CXCompletionContext { + const CXCompletionContext_Unexposed = 0; + const CXCompletionContext_AnyType = 1; + const CXCompletionContext_AnyValue = 2; + const CXCompletionContext_ObjCObjectValue = 4; + const CXCompletionContext_ObjCSelectorValue = 8; + const CXCompletionContext_CXXClassTypeValue = 16; + const CXCompletionContext_DotMemberAccess = 32; + const CXCompletionContext_ArrowMemberAccess = 64; + const CXCompletionContext_ObjCPropertyAccess = 128; + const CXCompletionContext_EnumTag = 256; + const CXCompletionContext_UnionTag = 512; + const CXCompletionContext_StructTag = 1024; + const CXCompletionContext_ClassTag = 2048; + const CXCompletionContext_Namespace = 4096; + const CXCompletionContext_NestedNameSpecifier = 8192; + const CXCompletionContext_ObjCInterface = 16384; + const CXCompletionContext_ObjCProtocol = 32768; + const CXCompletionContext_ObjCCategory = 65536; + const CXCompletionContext_ObjCInstanceMessage = 131072; + const CXCompletionContext_ObjCClassMessage = 262144; + const CXCompletionContext_ObjCSelectorName = 524288; + const CXCompletionContext_MacroName = 1048576; + const CXCompletionContext_NaturalLanguage = 2097152; + const CXCompletionContext_IncludedFile = 4194304; + const CXCompletionContext_Unknown = 8388607; + } +} + +cenum! { + enum CXDiagnosticDisplayOptions { + const CXDiagnostic_DisplaySourceLocation = 1; + const CXDiagnostic_DisplayColumn = 2; + const CXDiagnostic_DisplaySourceRanges = 4; + const CXDiagnostic_DisplayOption = 8; + const CXDiagnostic_DisplayCategoryId = 16; + const CXDiagnostic_DisplayCategoryName = 32; + } +} + +cenum! { + enum CXGlobalOptFlags { + const CXGlobalOpt_None = 0; + const CXGlobalOpt_ThreadBackgroundPriorityForIndexing = 1; + const CXGlobalOpt_ThreadBackgroundPriorityForEditing = 2; + const CXGlobalOpt_ThreadBackgroundPriorityForAll = 3; + } +} + +cenum! { + enum CXIdxDeclInfoFlags { + const CXIdxDeclFlag_Skipped = 1; + } +} + +cenum! { + enum CXIndexOptFlags { + const CXIndexOptNone = 0; + const CXIndexOptSuppressRedundantRefs = 1; + const CXIndexOptIndexFunctionLocalSymbols = 2; + const CXIndexOptIndexImplicitTemplateInstantiations = 4; + const CXIndexOptSuppressWarnings = 8; + const CXIndexOptSkipParsedBodiesInSession = 16; + } +} + +cenum! { + enum CXNameRefFlags { + const CXNameRange_WantQualifier = 1; + const CXNameRange_WantTemplateArgs = 2; + const CXNameRange_WantSinglePiece = 4; + } +} + +cenum! { + enum CXObjCDeclQualifierKind { + const CXObjCDeclQualifier_None = 0; + const CXObjCDeclQualifier_In = 1; + const CXObjCDeclQualifier_Inout = 2; + const CXObjCDeclQualifier_Out = 4; + const CXObjCDeclQualifier_Bycopy = 8; + const CXObjCDeclQualifier_Byref = 16; + const CXObjCDeclQualifier_Oneway = 32; + } +} + +cenum! { + enum CXObjCPropertyAttrKind { + const CXObjCPropertyAttr_noattr = 0; + const CXObjCPropertyAttr_readonly = 1; + const CXObjCPropertyAttr_getter = 2; + const CXObjCPropertyAttr_assign = 4; + const CXObjCPropertyAttr_readwrite = 8; + const CXObjCPropertyAttr_retain = 16; + const CXObjCPropertyAttr_copy = 32; + const CXObjCPropertyAttr_nonatomic = 64; + const CXObjCPropertyAttr_setter = 128; + const CXObjCPropertyAttr_atomic = 256; + const CXObjCPropertyAttr_weak = 512; + const CXObjCPropertyAttr_strong = 1024; + const CXObjCPropertyAttr_unsafe_unretained = 2048; + #[cfg(feature="gte_clang_3_9")] + const CXObjCPropertyAttr_class = 4096; + } +} + +cenum! { + enum CXReparse_Flags { + const CXReparse_None = 0; + } +} + +cenum! { + enum CXSaveTranslationUnit_Flags { + const CXSaveTranslationUnit_None = 0; + } +} + +cenum! { + #[cfg(feature="gte_clang_7_0")] + enum CXSymbolRole { + const CXSymbolRole_None = 0; + const CXSymbolRole_Declaration = 1; + const CXSymbolRole_Definition = 2; + const CXSymbolRole_Reference = 4; + const CXSymbolRole_Read = 8; + const CXSymbolRole_Write = 16; + const CXSymbolRole_Call = 32; + const CXSymbolRole_Dynamic = 64; + const CXSymbolRole_AddressOf = 128; + const CXSymbolRole_Implicit = 256; + } +} + +cenum! { + enum CXTranslationUnit_Flags { + const CXTranslationUnit_None = 0; + const CXTranslationUnit_DetailedPreprocessingRecord = 1; + const CXTranslationUnit_Incomplete = 2; + const CXTranslationUnit_PrecompiledPreamble = 4; + const CXTranslationUnit_CacheCompletionResults = 8; + const CXTranslationUnit_ForSerialization = 16; + const CXTranslationUnit_CXXChainedPCH = 32; + const CXTranslationUnit_SkipFunctionBodies = 64; + const CXTranslationUnit_IncludeBriefCommentsInCodeCompletion = 128; + #[cfg(feature="gte_clang_3_8")] + const CXTranslationUnit_CreatePreambleOnFirstParse = 256; + #[cfg(feature="gte_clang_3_9")] + const CXTranslationUnit_KeepGoing = 512; + #[cfg(feature="gte_clang_5_0")] + const CXTranslationUnit_SingleFileParse = 1024; + #[cfg(feature="gte_clang_7_0")] + const CXTranslationUnit_LimitSkipFunctionBodiesToPreamble = 2048; + #[cfg(feature="gte_clang_8_0")] + const CXTranslationUnit_IncludeAttributedTypes = 4096; + #[cfg(feature="gte_clang_8_0")] + const CXTranslationUnit_VisitImplicitAttributes = 8192; + #[cfg(feature="gte_clang_9_0")] + const CXTranslationUnit_IgnoreNonErrorsFromIncludedFiles = 16384; + } +} + +//================================================ +// Structs +//================================================ + +// Opaque ________________________________________ + +macro_rules! opaque { + ($name:ident) => { + pub type $name = *mut c_void; + }; +} + +opaque!(CXCompilationDatabase); +opaque!(CXCompileCommand); +opaque!(CXCompileCommands); +opaque!(CXCompletionString); +opaque!(CXCursorSet); +opaque!(CXDiagnostic); +opaque!(CXDiagnosticSet); +#[cfg(feature = "gte_clang_3_9")] +opaque!(CXEvalResult); +opaque!(CXFile); +opaque!(CXIdxClientASTFile); +opaque!(CXIdxClientContainer); +opaque!(CXIdxClientEntity); +opaque!(CXIdxClientFile); +opaque!(CXIndex); +opaque!(CXIndexAction); +opaque!(CXModule); +#[cfg(feature = "gte_clang_7_0")] +opaque!(CXPrintingPolicy); +opaque!(CXRemapping); +#[cfg(feature = "gte_clang_5_0")] +opaque!(CXTargetInfo); +opaque!(CXTranslationUnit); + +// Transparent ___________________________________ + +#[derive(Copy, Clone, Debug)] +#[repr(C)] +pub struct CXCodeCompleteResults { + pub Results: *mut CXCompletionResult, + pub NumResults: c_uint, +} + +default!(CXCodeCompleteResults); + +#[derive(Copy, Clone, Debug)] +#[repr(C)] +pub struct CXComment { + pub ASTNode: *const c_void, + pub TranslationUnit: CXTranslationUnit, +} + +default!(CXComment); + +#[derive(Copy, Clone, Debug)] +#[repr(C)] +pub struct CXCompletionResult { + pub CursorKind: CXCursorKind, + pub CompletionString: CXCompletionString, +} + +default!(CXCompletionResult); + +#[derive(Copy, Clone, Debug)] +#[repr(C)] +pub struct CXCursor { + pub kind: CXCursorKind, + pub xdata: c_int, + pub data: [*const c_void; 3], +} + +default!(CXCursor); + +#[derive(Copy, Clone, Debug)] +#[repr(C)] +pub struct CXCursorAndRangeVisitor { + pub context: *mut c_void, + pub visit: Option CXVisitorResult>, +} + +default!(CXCursorAndRangeVisitor); + +#[derive(Copy, Clone, Debug)] +#[repr(C)] +pub struct CXFileUniqueID { + pub data: [c_ulonglong; 3], +} + +default!(CXFileUniqueID); + +#[derive(Copy, Clone, Debug)] +#[repr(C)] +pub struct CXIdxAttrInfo { + pub kind: CXIdxAttrKind, + pub cursor: CXCursor, + pub loc: CXIdxLoc, +} + +default!(CXIdxAttrInfo); + +#[derive(Copy, Clone, Debug)] +#[repr(C)] +pub struct CXIdxBaseClassInfo { + pub base: *const CXIdxEntityInfo, + pub cursor: CXCursor, + pub loc: CXIdxLoc, +} + +default!(CXIdxBaseClassInfo); + +#[derive(Copy, Clone, Debug)] +#[repr(C)] +pub struct CXIdxCXXClassDeclInfo { + pub declInfo: *const CXIdxDeclInfo, + pub bases: *const *const CXIdxBaseClassInfo, + pub numBases: c_uint, +} + +default!(CXIdxCXXClassDeclInfo); + +#[derive(Copy, Clone, Debug)] +#[repr(C)] +pub struct CXIdxContainerInfo { + pub cursor: CXCursor, +} + +default!(CXIdxContainerInfo); + +#[derive(Copy, Clone, Debug)] +#[repr(C)] +pub struct CXIdxDeclInfo { + pub entityInfo: *const CXIdxEntityInfo, + pub cursor: CXCursor, + pub loc: CXIdxLoc, + pub semanticContainer: *const CXIdxContainerInfo, + pub lexicalContainer: *const CXIdxContainerInfo, + pub isRedeclaration: c_int, + pub isDefinition: c_int, + pub isContainer: c_int, + pub declAsContainer: *const CXIdxContainerInfo, + pub isImplicit: c_int, + pub attributes: *const *const CXIdxAttrInfo, + pub numAttributes: c_uint, + pub flags: c_uint, +} + +default!(CXIdxDeclInfo); + +#[derive(Copy, Clone, Debug)] +#[repr(C)] +pub struct CXIdxEntityInfo { + pub kind: CXIdxEntityKind, + pub templateKind: CXIdxEntityCXXTemplateKind, + pub lang: CXIdxEntityLanguage, + pub name: *const c_char, + pub USR: *const c_char, + pub cursor: CXCursor, + pub attributes: *const *const CXIdxAttrInfo, + pub numAttributes: c_uint, +} + +default!(CXIdxEntityInfo); + +#[derive(Copy, Clone, Debug)] +#[repr(C)] +pub struct CXIdxEntityRefInfo { + pub kind: CXIdxEntityRefKind, + pub cursor: CXCursor, + pub loc: CXIdxLoc, + pub referencedEntity: *const CXIdxEntityInfo, + pub parentEntity: *const CXIdxEntityInfo, + pub container: *const CXIdxContainerInfo, + #[cfg(feature = "gte_clang_7_0")] + pub role: CXSymbolRole, +} + +default!(CXIdxEntityRefInfo); + +#[derive(Copy, Clone, Debug)] +#[repr(C)] +pub struct CXIdxIBOutletCollectionAttrInfo { + pub attrInfo: *const CXIdxAttrInfo, + pub objcClass: *const CXIdxEntityInfo, + pub classCursor: CXCursor, + pub classLoc: CXIdxLoc, +} + +default!(CXIdxIBOutletCollectionAttrInfo); + +#[derive(Copy, Clone, Debug)] +#[repr(C)] +pub struct CXIdxImportedASTFileInfo { + pub file: CXFile, + pub module: CXModule, + pub loc: CXIdxLoc, + pub isImplicit: c_int, +} + +default!(CXIdxImportedASTFileInfo); + +#[derive(Copy, Clone, Debug)] +#[repr(C)] +pub struct CXIdxIncludedFileInfo { + pub hashLoc: CXIdxLoc, + pub filename: *const c_char, + pub file: CXFile, + pub isImport: c_int, + pub isAngled: c_int, + pub isModuleImport: c_int, +} + +default!(CXIdxIncludedFileInfo); + +#[derive(Copy, Clone, Debug)] +#[repr(C)] +pub struct CXIdxLoc { + pub ptr_data: [*mut c_void; 2], + pub int_data: c_uint, +} + +default!(CXIdxLoc); + +#[derive(Copy, Clone, Debug)] +#[repr(C)] +pub struct CXIdxObjCCategoryDeclInfo { + pub containerInfo: *const CXIdxObjCContainerDeclInfo, + pub objcClass: *const CXIdxEntityInfo, + pub classCursor: CXCursor, + pub classLoc: CXIdxLoc, + pub protocols: *const CXIdxObjCProtocolRefListInfo, +} + +default!(CXIdxObjCCategoryDeclInfo); + +#[derive(Copy, Clone, Debug)] +#[repr(C)] +pub struct CXIdxObjCContainerDeclInfo { + pub declInfo: *const CXIdxDeclInfo, + pub kind: CXIdxObjCContainerKind, +} + +default!(CXIdxObjCContainerDeclInfo); + +#[derive(Copy, Clone, Debug)] +#[repr(C)] +pub struct CXIdxObjCInterfaceDeclInfo { + pub containerInfo: *const CXIdxObjCContainerDeclInfo, + pub superInfo: *const CXIdxBaseClassInfo, + pub protocols: *const CXIdxObjCProtocolRefListInfo, +} + +default!(CXIdxObjCInterfaceDeclInfo); + +#[derive(Copy, Clone, Debug)] +#[repr(C)] +pub struct CXIdxObjCPropertyDeclInfo { + pub declInfo: *const CXIdxDeclInfo, + pub getter: *const CXIdxEntityInfo, + pub setter: *const CXIdxEntityInfo, +} + +default!(CXIdxObjCPropertyDeclInfo); + +#[derive(Copy, Clone, Debug)] +#[repr(C)] +pub struct CXIdxObjCProtocolRefInfo { + pub protocol: *const CXIdxEntityInfo, + pub cursor: CXCursor, + pub loc: CXIdxLoc, +} + +default!(CXIdxObjCProtocolRefInfo); + +#[derive(Copy, Clone, Debug)] +#[repr(C)] +pub struct CXIdxObjCProtocolRefListInfo { + pub protocols: *const *const CXIdxObjCProtocolRefInfo, + pub numProtocols: c_uint, +} + +default!(CXIdxObjCProtocolRefListInfo); + +#[derive(Copy, Clone, Debug)] +#[repr(C)] +pub struct CXPlatformAvailability { + pub Platform: CXString, + pub Introduced: CXVersion, + pub Deprecated: CXVersion, + pub Obsoleted: CXVersion, + pub Unavailable: c_int, + pub Message: CXString, +} + +default!(CXPlatformAvailability); + +#[derive(Copy, Clone, Debug)] +#[repr(C)] +pub struct CXSourceLocation { + pub ptr_data: [*const c_void; 2], + pub int_data: c_uint, +} + +default!(CXSourceLocation); + +#[derive(Copy, Clone, Debug)] +#[repr(C)] +pub struct CXSourceRange { + pub ptr_data: [*const c_void; 2], + pub begin_int_data: c_uint, + pub end_int_data: c_uint, +} + +default!(CXSourceRange); + +#[derive(Copy, Clone, Debug)] +#[repr(C)] +pub struct CXSourceRangeList { + pub count: c_uint, + pub ranges: *mut CXSourceRange, +} + +default!(CXSourceRangeList); + +#[derive(Copy, Clone, Debug)] +#[repr(C)] +pub struct CXString { + pub data: *const c_void, + pub private_flags: c_uint, +} + +default!(CXString); + +#[cfg(feature = "gte_clang_3_8")] +#[derive(Copy, Clone, Debug)] +#[repr(C)] +pub struct CXStringSet { + pub Strings: *mut CXString, + pub Count: c_uint, +} + +#[cfg(feature = "gte_clang_3_8")] +default!(CXStringSet); + +#[derive(Copy, Clone, Debug)] +#[repr(C)] +pub struct CXTUResourceUsage { + pub data: *mut c_void, + pub numEntries: c_uint, + pub entries: *mut CXTUResourceUsageEntry, +} + +default!(CXTUResourceUsage); + +#[derive(Copy, Clone, Debug)] +#[repr(C)] +pub struct CXTUResourceUsageEntry { + pub kind: CXTUResourceUsageKind, + pub amount: c_ulong, +} + +default!(CXTUResourceUsageEntry); + +#[derive(Copy, Clone, Debug)] +#[repr(C)] +pub struct CXToken { + pub int_data: [c_uint; 4], + pub ptr_data: *mut c_void, +} + +default!(CXToken); + +#[derive(Copy, Clone, Debug)] +#[repr(C)] +pub struct CXType { + pub kind: CXTypeKind, + pub data: [*mut c_void; 2], +} + +default!(CXType); + +#[derive(Copy, Clone, Debug)] +#[repr(C)] +pub struct CXUnsavedFile { + pub Filename: *const c_char, + pub Contents: *const c_char, + pub Length: c_ulong, +} + +default!(CXUnsavedFile); + +#[derive(Copy, Clone, Debug)] +#[repr(C)] +pub struct CXVersion { + pub Major: c_int, + pub Minor: c_int, + pub Subminor: c_int, +} + +default!(CXVersion); + +#[derive(Copy, Clone, Debug)] +#[repr(C)] +#[rustfmt::skip] +pub struct IndexerCallbacks { + pub abortQuery: Option c_int>, + pub diagnostic: Option, + pub enteredMainFile: Option CXIdxClientFile>, + pub ppIncludedFile: Option CXIdxClientFile>, + pub importedASTFile: Option CXIdxClientASTFile>, + pub startedTranslationUnit: Option CXIdxClientContainer>, + pub indexDeclaration: Option, + pub indexEntityReference: Option, +} + +default!(IndexerCallbacks); + +//================================================ +// Functions +//================================================ + +link! { + pub fn clang_CXCursorSet_contains(set: CXCursorSet, cursor: CXCursor) -> c_uint; + pub fn clang_CXCursorSet_insert(set: CXCursorSet, cursor: CXCursor) -> c_uint; + pub fn clang_CXIndex_getGlobalOptions(index: CXIndex) -> CXGlobalOptFlags; + pub fn clang_CXIndex_setGlobalOptions(index: CXIndex, flags: CXGlobalOptFlags); + #[cfg(feature="gte_clang_6_0")] + pub fn clang_CXIndex_setInvocationEmissionPathOption(index: CXIndex, path: *const c_char); + #[cfg(feature="gte_clang_3_9")] + pub fn clang_CXXConstructor_isConvertingConstructor(cursor: CXCursor) -> c_uint; + #[cfg(feature="gte_clang_3_9")] + pub fn clang_CXXConstructor_isCopyConstructor(cursor: CXCursor) -> c_uint; + #[cfg(feature="gte_clang_3_9")] + pub fn clang_CXXConstructor_isDefaultConstructor(cursor: CXCursor) -> c_uint; + #[cfg(feature="gte_clang_3_9")] + pub fn clang_CXXConstructor_isMoveConstructor(cursor: CXCursor) -> c_uint; + #[cfg(feature="gte_clang_3_8")] + pub fn clang_CXXField_isMutable(cursor: CXCursor) -> c_uint; + pub fn clang_CXXMethod_isConst(cursor: CXCursor) -> c_uint; + #[cfg(feature="gte_clang_3_9")] + pub fn clang_CXXMethod_isDefaulted(cursor: CXCursor) -> c_uint; + pub fn clang_CXXMethod_isPureVirtual(cursor: CXCursor) -> c_uint; + pub fn clang_CXXMethod_isStatic(cursor: CXCursor) -> c_uint; + pub fn clang_CXXMethod_isVirtual(cursor: CXCursor) -> c_uint; + #[cfg(feature="gte_clang_6_0")] + pub fn clang_CXXRecord_isAbstract(cursor: CXCursor) -> c_uint; + pub fn clang_CompilationDatabase_dispose(database: CXCompilationDatabase); + pub fn clang_CompilationDatabase_fromDirectory(directory: *const c_char, error: *mut CXCompilationDatabase_Error) -> CXCompilationDatabase; + pub fn clang_CompilationDatabase_getAllCompileCommands(database: CXCompilationDatabase) -> CXCompileCommands; + pub fn clang_CompilationDatabase_getCompileCommands(database: CXCompilationDatabase, filename: *const c_char) -> CXCompileCommands; + pub fn clang_CompileCommand_getArg(command: CXCompileCommand, index: c_uint) -> CXString; + pub fn clang_CompileCommand_getDirectory(command: CXCompileCommand) -> CXString; + #[cfg(feature="gte_clang_3_8")] + pub fn clang_CompileCommand_getFilename(command: CXCompileCommand) -> CXString; + #[cfg(feature="gte_clang_3_8")] + pub fn clang_CompileCommand_getMappedSourceContent(command: CXCompileCommand, index: c_uint) -> CXString; + #[cfg(feature="gte_clang_3_8")] + pub fn clang_CompileCommand_getMappedSourcePath(command: CXCompileCommand, index: c_uint) -> CXString; + pub fn clang_CompileCommand_getNumArgs(command: CXCompileCommand) -> c_uint; + pub fn clang_CompileCommand_getNumMappedSources(command: CXCompileCommand) -> c_uint; + pub fn clang_CompileCommands_dispose(command: CXCompileCommands); + pub fn clang_CompileCommands_getCommand(command: CXCompileCommands, index: c_uint) -> CXCompileCommand; + pub fn clang_CompileCommands_getSize(command: CXCompileCommands) -> c_uint; + #[cfg(feature="gte_clang_3_9")] + pub fn clang_Cursor_Evaluate(cursor: CXCursor) -> CXEvalResult; + pub fn clang_Cursor_getArgument(cursor: CXCursor, index: c_uint) -> CXCursor; + pub fn clang_Cursor_getBriefCommentText(cursor: CXCursor) -> CXString; + #[cfg(feature="gte_clang_3_8")] + pub fn clang_Cursor_getCXXManglings(cursor: CXCursor) -> *mut CXStringSet; + pub fn clang_Cursor_getCommentRange(cursor: CXCursor) -> CXSourceRange; + #[cfg(feature="gte_clang_3_6")] + pub fn clang_Cursor_getMangling(cursor: CXCursor) -> CXString; + pub fn clang_Cursor_getModule(cursor: CXCursor) -> CXModule; + pub fn clang_Cursor_getNumArguments(cursor: CXCursor) -> c_int; + #[cfg(feature="gte_clang_3_6")] + pub fn clang_Cursor_getNumTemplateArguments(cursor: CXCursor) -> c_int; + pub fn clang_Cursor_getObjCDeclQualifiers(cursor: CXCursor) -> CXObjCDeclQualifierKind; + #[cfg(feature="gte_clang_6_0")] + pub fn clang_Cursor_getObjCManglings(cursor: CXCursor) -> *mut CXStringSet; + pub fn clang_Cursor_getObjCPropertyAttributes(cursor: CXCursor, reserved: c_uint) -> CXObjCPropertyAttrKind; + #[cfg(feature="gte_clang_8_0")] + pub fn clang_Cursor_getObjCPropertyGetterName(cursor: CXCursor) -> CXString; + #[cfg(feature="gte_clang_8_0")] + pub fn clang_Cursor_getObjCPropertySetterName(cursor: CXCursor) -> CXString; + pub fn clang_Cursor_getObjCSelectorIndex(cursor: CXCursor) -> c_int; + #[cfg(feature="gte_clang_3_7")] + pub fn clang_Cursor_getOffsetOfField(cursor: CXCursor) -> c_longlong; + #[cfg(feature="gte_clang_9_0")] + pub fn clang_Cursor_isAnonymousRecordDecl(cursor: CXCursor) -> c_uint; + #[cfg(feature="gte_clang_9_0")] + pub fn clang_Cursor_isInlineNamespace(cursor: CXCursor) -> c_uint; + pub fn clang_Cursor_getRawCommentText(cursor: CXCursor) -> CXString; + pub fn clang_Cursor_getReceiverType(cursor: CXCursor) -> CXType; + pub fn clang_Cursor_getSpellingNameRange(cursor: CXCursor, index: c_uint, reserved: c_uint) -> CXSourceRange; + #[cfg(feature="gte_clang_3_6")] + pub fn clang_Cursor_getStorageClass(cursor: CXCursor) -> CX_StorageClass; + #[cfg(feature="gte_clang_3_6")] + pub fn clang_Cursor_getTemplateArgumentKind(cursor: CXCursor, index: c_uint) -> CXTemplateArgumentKind; + #[cfg(feature="gte_clang_3_6")] + pub fn clang_Cursor_getTemplateArgumentType(cursor: CXCursor, index: c_uint) -> CXType; + #[cfg(feature="gte_clang_3_6")] + pub fn clang_Cursor_getTemplateArgumentUnsignedValue(cursor: CXCursor, index: c_uint) -> c_ulonglong; + #[cfg(feature="gte_clang_3_6")] + pub fn clang_Cursor_getTemplateArgumentValue(cursor: CXCursor, index: c_uint) -> c_longlong; + pub fn clang_Cursor_getTranslationUnit(cursor: CXCursor) -> CXTranslationUnit; + #[cfg(feature="gte_clang_3_9")] + pub fn clang_Cursor_hasAttrs(cursor: CXCursor) -> c_uint; + #[cfg(feature="gte_clang_3_7")] + pub fn clang_Cursor_isAnonymous(cursor: CXCursor) -> c_uint; + pub fn clang_Cursor_isBitField(cursor: CXCursor) -> c_uint; + pub fn clang_Cursor_isDynamicCall(cursor: CXCursor) -> c_int; + #[cfg(feature="gte_clang_5_0")] + pub fn clang_Cursor_isExternalSymbol(cursor: CXCursor, language: *mut CXString, from: *mut CXString, generated: *mut c_uint) -> c_uint; + #[cfg(feature="gte_clang_3_9")] + pub fn clang_Cursor_isFunctionInlined(cursor: CXCursor) -> c_uint; + #[cfg(feature="gte_clang_3_9")] + pub fn clang_Cursor_isMacroBuiltin(cursor: CXCursor) -> c_uint; + #[cfg(feature="gte_clang_3_9")] + pub fn clang_Cursor_isMacroFunctionLike(cursor: CXCursor) -> c_uint; + pub fn clang_Cursor_isNull(cursor: CXCursor) -> c_int; + pub fn clang_Cursor_isObjCOptional(cursor: CXCursor) -> c_uint; + pub fn clang_Cursor_isVariadic(cursor: CXCursor) -> c_uint; + #[cfg(feature="gte_clang_5_0")] + pub fn clang_EnumDecl_isScoped(cursor: CXCursor) -> c_uint; + #[cfg(feature="gte_clang_3_9")] + pub fn clang_EvalResult_dispose(result: CXEvalResult); + #[cfg(feature="gte_clang_3_9")] + pub fn clang_EvalResult_getAsDouble(result: CXEvalResult) -> libc::c_double; + #[cfg(feature="gte_clang_3_9")] + pub fn clang_EvalResult_getAsInt(result: CXEvalResult) -> c_int; + #[cfg(feature="gte_clang_4_0")] + pub fn clang_EvalResult_getAsLongLong(result: CXEvalResult) -> c_longlong; + #[cfg(feature="gte_clang_3_9")] + pub fn clang_EvalResult_getAsStr(result: CXEvalResult) -> *const c_char; + #[cfg(feature="gte_clang_4_0")] + pub fn clang_EvalResult_getAsUnsigned(result: CXEvalResult) -> c_ulonglong; + #[cfg(feature="gte_clang_3_9")] + pub fn clang_EvalResult_getKind(result: CXEvalResult) -> CXEvalResultKind; + #[cfg(feature="gte_clang_4_0")] + pub fn clang_EvalResult_isUnsignedInt(result: CXEvalResult) -> c_uint; + #[cfg(feature="gte_clang_3_6")] + pub fn clang_File_isEqual(left: CXFile, right: CXFile) -> c_int; + #[cfg(feature="gte_clang_7_0")] + pub fn clang_File_tryGetRealPathName(file: CXFile) -> CXString; + pub fn clang_IndexAction_create(index: CXIndex) -> CXIndexAction; + pub fn clang_IndexAction_dispose(index: CXIndexAction); + pub fn clang_Location_isFromMainFile(location: CXSourceLocation) -> c_int; + pub fn clang_Location_isInSystemHeader(location: CXSourceLocation) -> c_int; + pub fn clang_Module_getASTFile(module: CXModule) -> CXFile; + pub fn clang_Module_getFullName(module: CXModule) -> CXString; + pub fn clang_Module_getName(module: CXModule) -> CXString; + pub fn clang_Module_getNumTopLevelHeaders(tu: CXTranslationUnit, module: CXModule) -> c_uint; + pub fn clang_Module_getParent(module: CXModule) -> CXModule; + pub fn clang_Module_getTopLevelHeader(tu: CXTranslationUnit, module: CXModule, index: c_uint) -> CXFile; + pub fn clang_Module_isSystem(module: CXModule) -> c_int; + #[cfg(feature="gte_clang_7_0")] + pub fn clang_PrintingPolicy_dispose(policy: CXPrintingPolicy); + #[cfg(feature="gte_clang_7_0")] + pub fn clang_PrintingPolicy_getProperty(policy: CXPrintingPolicy, property: CXPrintingPolicyProperty) -> c_uint; + #[cfg(feature="gte_clang_7_0")] + pub fn clang_PrintingPolicy_setProperty(policy: CXPrintingPolicy, property: CXPrintingPolicyProperty, value: c_uint); + pub fn clang_Range_isNull(range: CXSourceRange) -> c_int; + #[cfg(feature="gte_clang_5_0")] + pub fn clang_TargetInfo_dispose(info: CXTargetInfo); + #[cfg(feature="gte_clang_5_0")] + pub fn clang_TargetInfo_getPointerWidth(info: CXTargetInfo) -> c_int; + #[cfg(feature="gte_clang_5_0")] + pub fn clang_TargetInfo_getTriple(info: CXTargetInfo) -> CXString; + pub fn clang_Type_getAlignOf(type_: CXType) -> c_longlong; + pub fn clang_Type_getCXXRefQualifier(type_: CXType) -> CXRefQualifierKind; + pub fn clang_Type_getClassType(type_: CXType) -> CXType; + #[cfg(feature="gte_clang_3_9")] + pub fn clang_Type_getNamedType(type_: CXType) -> CXType; + pub fn clang_Type_getNumTemplateArguments(type_: CXType) -> c_int; + #[cfg(feature="gte_clang_8_0")] + pub fn clang_Type_getObjCObjectBaseType(type_: CXType) -> CXType; + #[cfg(feature="gte_clang_8_0")] + pub fn clang_Type_getNumObjCProtocolRefs(type_: CXType) -> c_uint; + #[cfg(feature="gte_clang_8_0")] + pub fn clang_Type_getObjCProtocolDecl(type_: CXType, index: c_uint) -> CXCursor; + #[cfg(feature="gte_clang_8_0")] + pub fn clang_Type_getNumObjCTypeArgs(type_: CXType) -> c_uint; + #[cfg(feature="gte_clang_8_0")] + pub fn clang_Type_getObjCTypeArg(type_: CXType, index: c_uint) -> CXType; + #[cfg(feature="gte_clang_3_9")] + pub fn clang_Type_getObjCEncoding(type_: CXType) -> CXString; + pub fn clang_Type_getOffsetOf(type_: CXType, field: *const c_char) -> c_longlong; + #[cfg(feature="gte_clang_8_0")] + pub fn clang_Type_getModifiedType(type_: CXType) -> CXType; + pub fn clang_Type_getSizeOf(type_: CXType) -> c_longlong; + pub fn clang_Type_getTemplateArgumentAsType(type_: CXType, index: c_uint) -> CXType; + #[cfg(feature="gte_clang_5_0")] + pub fn clang_Type_isTransparentTagTypedef(type_: CXType) -> c_uint; + #[cfg(feature="gte_clang_8_0")] + pub fn clang_Type_getNullability(type_: CXType) -> CXTypeNullabilityKind; + #[cfg(feature="gte_clang_3_7")] + pub fn clang_Type_visitFields(type_: CXType, visitor: CXFieldVisitor, data: CXClientData) -> CXVisitorResult; + pub fn clang_annotateTokens(tu: CXTranslationUnit, tokens: *mut CXToken, n_tokens: c_uint, cursors: *mut CXCursor); + pub fn clang_codeCompleteAt(tu: CXTranslationUnit, file: *const c_char, line: c_uint, column: c_uint, unsaved: *mut CXUnsavedFile, n_unsaved: c_uint, flags: CXCodeComplete_Flags) -> *mut CXCodeCompleteResults; + pub fn clang_codeCompleteGetContainerKind(results: *mut CXCodeCompleteResults, incomplete: *mut c_uint) -> CXCursorKind; + pub fn clang_codeCompleteGetContainerUSR(results: *mut CXCodeCompleteResults) -> CXString; + pub fn clang_codeCompleteGetContexts(results: *mut CXCodeCompleteResults) -> c_ulonglong; + pub fn clang_codeCompleteGetDiagnostic(results: *mut CXCodeCompleteResults, index: c_uint) -> CXDiagnostic; + pub fn clang_codeCompleteGetNumDiagnostics(results: *mut CXCodeCompleteResults) -> c_uint; + pub fn clang_codeCompleteGetObjCSelector(results: *mut CXCodeCompleteResults) -> CXString; + pub fn clang_constructUSR_ObjCCategory(class: *const c_char, category: *const c_char) -> CXString; + pub fn clang_constructUSR_ObjCClass(class: *const c_char) -> CXString; + pub fn clang_constructUSR_ObjCIvar(name: *const c_char, usr: CXString) -> CXString; + pub fn clang_constructUSR_ObjCMethod(name: *const c_char, instance: c_uint, usr: CXString) -> CXString; + pub fn clang_constructUSR_ObjCProperty(property: *const c_char, usr: CXString) -> CXString; + pub fn clang_constructUSR_ObjCProtocol(protocol: *const c_char) -> CXString; + pub fn clang_createCXCursorSet() -> CXCursorSet; + pub fn clang_createIndex(exclude: c_int, display: c_int) -> CXIndex; + pub fn clang_createTranslationUnit(index: CXIndex, file: *const c_char) -> CXTranslationUnit; + pub fn clang_createTranslationUnit2(index: CXIndex, file: *const c_char, tu: *mut CXTranslationUnit) -> CXErrorCode; + pub fn clang_createTranslationUnitFromSourceFile(index: CXIndex, file: *const c_char, n_arguments: c_int, arguments: *const *const c_char, n_unsaved: c_uint, unsaved: *mut CXUnsavedFile) -> CXTranslationUnit; + pub fn clang_defaultCodeCompleteOptions() -> CXCodeComplete_Flags; + pub fn clang_defaultDiagnosticDisplayOptions() -> CXDiagnosticDisplayOptions; + pub fn clang_defaultEditingTranslationUnitOptions() -> CXTranslationUnit_Flags; + pub fn clang_defaultReparseOptions(tu: CXTranslationUnit) -> CXReparse_Flags; + pub fn clang_defaultSaveOptions(tu: CXTranslationUnit) -> CXSaveTranslationUnit_Flags; + pub fn clang_disposeCXCursorSet(set: CXCursorSet); + pub fn clang_disposeCXPlatformAvailability(availability: *mut CXPlatformAvailability); + pub fn clang_disposeCXTUResourceUsage(usage: CXTUResourceUsage); + pub fn clang_disposeCodeCompleteResults(results: *mut CXCodeCompleteResults); + pub fn clang_disposeDiagnostic(diagnostic: CXDiagnostic); + pub fn clang_disposeDiagnosticSet(diagnostic: CXDiagnosticSet); + pub fn clang_disposeIndex(index: CXIndex); + pub fn clang_disposeOverriddenCursors(cursors: *mut CXCursor); + pub fn clang_disposeSourceRangeList(list: *mut CXSourceRangeList); + pub fn clang_disposeString(string: CXString); + #[cfg(feature="gte_clang_3_8")] + pub fn clang_disposeStringSet(set: *mut CXStringSet); + pub fn clang_disposeTokens(tu: CXTranslationUnit, tokens: *mut CXToken, n_tokens: c_uint); + pub fn clang_disposeTranslationUnit(tu: CXTranslationUnit); + pub fn clang_enableStackTraces(); + pub fn clang_equalCursors(left: CXCursor, right: CXCursor) -> c_uint; + pub fn clang_equalLocations(left: CXSourceLocation, right: CXSourceLocation) -> c_uint; + pub fn clang_equalRanges(left: CXSourceRange, right: CXSourceRange) -> c_uint; + pub fn clang_equalTypes(left: CXType, right: CXType) -> c_uint; + pub fn clang_executeOnThread(function: extern fn(*mut c_void), data: *mut c_void, stack: c_uint); + pub fn clang_findIncludesInFile(tu: CXTranslationUnit, file: CXFile, cursor: CXCursorAndRangeVisitor) -> CXResult; + pub fn clang_findReferencesInFile(cursor: CXCursor, file: CXFile, visitor: CXCursorAndRangeVisitor) -> CXResult; + pub fn clang_formatDiagnostic(diagnostic: CXDiagnostic, flags: CXDiagnosticDisplayOptions) -> CXString; + #[cfg(feature="gte_clang_3_7")] + pub fn clang_free(buffer: *mut c_void); + #[cfg(feature="gte_clang_5_0")] + pub fn clang_getAddressSpace(type_: CXType) -> c_uint; + #[cfg(feature="gte_clang_4_0")] + pub fn clang_getAllSkippedRanges(tu: CXTranslationUnit) -> *mut CXSourceRangeList; + pub fn clang_getArgType(type_: CXType, index: c_uint) -> CXType; + pub fn clang_getArrayElementType(type_: CXType) -> CXType; + pub fn clang_getArraySize(type_: CXType) -> c_longlong; + pub fn clang_getCString(string: CXString) -> *const c_char; + pub fn clang_getCXTUResourceUsage(tu: CXTranslationUnit) -> CXTUResourceUsage; + pub fn clang_getCXXAccessSpecifier(cursor: CXCursor) -> CX_CXXAccessSpecifier; + pub fn clang_getCanonicalCursor(cursor: CXCursor) -> CXCursor; + pub fn clang_getCanonicalType(type_: CXType) -> CXType; + pub fn clang_getChildDiagnostics(diagnostic: CXDiagnostic) -> CXDiagnosticSet; + pub fn clang_getClangVersion() -> CXString; + pub fn clang_getCompletionAnnotation(string: CXCompletionString, index: c_uint) -> CXString; + pub fn clang_getCompletionAvailability(string: CXCompletionString) -> CXAvailabilityKind; + pub fn clang_getCompletionBriefComment(string: CXCompletionString) -> CXString; + pub fn clang_getCompletionChunkCompletionString(string: CXCompletionString, index: c_uint) -> CXCompletionString; + pub fn clang_getCompletionChunkKind(string: CXCompletionString, index: c_uint) -> CXCompletionChunkKind; + pub fn clang_getCompletionChunkText(string: CXCompletionString, index: c_uint) -> CXString; + #[cfg(feature="gte_clang_7_0")] + pub fn clang_getCompletionFixIt(results: *mut CXCodeCompleteResults, completion_index: c_uint, fixit_index: c_uint, range: *mut CXSourceRange) -> CXString; + pub fn clang_getCompletionNumAnnotations(string: CXCompletionString) -> c_uint; + #[cfg(feature="gte_clang_7_0")] + pub fn clang_getCompletionNumFixIts(results: *mut CXCodeCompleteResults, completion_index: c_uint) -> c_uint; + pub fn clang_getCompletionParent(string: CXCompletionString, kind: *mut CXCursorKind) -> CXString; + pub fn clang_getCompletionPriority(string: CXCompletionString) -> c_uint; + pub fn clang_getCursor(tu: CXTranslationUnit, location: CXSourceLocation) -> CXCursor; + pub fn clang_getCursorAvailability(cursor: CXCursor) -> CXAvailabilityKind; + pub fn clang_getCursorCompletionString(cursor: CXCursor) -> CXCompletionString; + pub fn clang_getCursorDefinition(cursor: CXCursor) -> CXCursor; + pub fn clang_getCursorDisplayName(cursor: CXCursor) -> CXString; + #[cfg(feature="gte_clang_5_0")] + pub fn clang_getCursorExceptionSpecificationType(cursor: CXCursor) -> CXCursor_ExceptionSpecificationKind; + pub fn clang_getCursorExtent(cursor: CXCursor) -> CXSourceRange; + pub fn clang_getCursorKind(cursor: CXCursor) -> CXCursorKind; + pub fn clang_getCursorKindSpelling(kind: CXCursorKind) -> CXString; + pub fn clang_getCursorLanguage(cursor: CXCursor) -> CXLanguageKind; + pub fn clang_getCursorLexicalParent(cursor: CXCursor) -> CXCursor; + pub fn clang_getCursorLinkage(cursor: CXCursor) -> CXLinkageKind; + pub fn clang_getCursorLocation(cursor: CXCursor) -> CXSourceLocation; + pub fn clang_getCursorPlatformAvailability(cursor: CXCursor, deprecated: *mut c_int, deprecated_message: *mut CXString, unavailable: *mut c_int, unavailable_message: *mut CXString, availability: *mut CXPlatformAvailability, n_availability: c_int) -> c_int; + #[cfg(feature="gte_clang_7_0")] + pub fn clang_getCursorPrettyPrinted(cursor: CXCursor, policy: CXPrintingPolicy) -> CXString; + #[cfg(feature="gte_clang_7_0")] + pub fn clang_getCursorPrintingPolicy(cursor: CXCursor) -> CXPrintingPolicy; + pub fn clang_getCursorReferenceNameRange(cursor: CXCursor, flags: CXNameRefFlags, index: c_uint) -> CXSourceRange; + pub fn clang_getCursorReferenced(cursor: CXCursor) -> CXCursor; + pub fn clang_getCursorResultType(cursor: CXCursor) -> CXType; + pub fn clang_getCursorSemanticParent(cursor: CXCursor) -> CXCursor; + pub fn clang_getCursorSpelling(cursor: CXCursor) -> CXString; + #[cfg(feature="gte_clang_6_0")] + pub fn clang_getCursorTLSKind(cursor: CXCursor) -> CXTLSKind; + pub fn clang_getCursorType(cursor: CXCursor) -> CXType; + pub fn clang_getCursorUSR(cursor: CXCursor) -> CXString; + #[cfg(feature="gte_clang_3_8")] + pub fn clang_getCursorVisibility(cursor: CXCursor) -> CXVisibilityKind; + pub fn clang_getDeclObjCTypeEncoding(cursor: CXCursor) -> CXString; + pub fn clang_getDefinitionSpellingAndExtent(cursor: CXCursor, start: *mut *const c_char, end: *mut *const c_char, start_line: *mut c_uint, start_column: *mut c_uint, end_line: *mut c_uint, end_column: *mut c_uint); + pub fn clang_getDiagnostic(tu: CXTranslationUnit, index: c_uint) -> CXDiagnostic; + pub fn clang_getDiagnosticCategory(diagnostic: CXDiagnostic) -> c_uint; + pub fn clang_getDiagnosticCategoryName(category: c_uint) -> CXString; + pub fn clang_getDiagnosticCategoryText(diagnostic: CXDiagnostic) -> CXString; + pub fn clang_getDiagnosticFixIt(diagnostic: CXDiagnostic, index: c_uint, range: *mut CXSourceRange) -> CXString; + pub fn clang_getDiagnosticInSet(diagnostic: CXDiagnosticSet, index: c_uint) -> CXDiagnostic; + pub fn clang_getDiagnosticLocation(diagnostic: CXDiagnostic) -> CXSourceLocation; + pub fn clang_getDiagnosticNumFixIts(diagnostic: CXDiagnostic) -> c_uint; + pub fn clang_getDiagnosticNumRanges(diagnostic: CXDiagnostic) -> c_uint; + pub fn clang_getDiagnosticOption(diagnostic: CXDiagnostic, option: *mut CXString) -> CXString; + pub fn clang_getDiagnosticRange(diagnostic: CXDiagnostic, index: c_uint) -> CXSourceRange; + pub fn clang_getDiagnosticSetFromTU(tu: CXTranslationUnit) -> CXDiagnosticSet; + pub fn clang_getDiagnosticSeverity(diagnostic: CXDiagnostic) -> CXDiagnosticSeverity; + pub fn clang_getDiagnosticSpelling(diagnostic: CXDiagnostic) -> CXString; + pub fn clang_getElementType(type_: CXType) -> CXType; + pub fn clang_getEnumConstantDeclUnsignedValue(cursor: CXCursor) -> c_ulonglong; + pub fn clang_getEnumConstantDeclValue(cursor: CXCursor) -> c_longlong; + pub fn clang_getEnumDeclIntegerType(cursor: CXCursor) -> CXType; + #[cfg(feature="gte_clang_5_0")] + pub fn clang_getExceptionSpecificationType(type_: CXType) -> CXCursor_ExceptionSpecificationKind; + pub fn clang_getExpansionLocation(location: CXSourceLocation, file: *mut CXFile, line: *mut c_uint, column: *mut c_uint, offset: *mut c_uint); + pub fn clang_getFieldDeclBitWidth(cursor: CXCursor) -> c_int; + pub fn clang_getFile(tu: CXTranslationUnit, file: *const c_char) -> CXFile; + #[cfg(feature="gte_clang_6_0")] + pub fn clang_getFileContents(tu: CXTranslationUnit, file: CXFile, size: *mut size_t) -> *const c_char; + pub fn clang_getFileLocation(location: CXSourceLocation, file: *mut CXFile, line: *mut c_uint, column: *mut c_uint, offset: *mut c_uint); + pub fn clang_getFileName(file: CXFile) -> CXString; + pub fn clang_getFileTime(file: CXFile) -> time_t; + pub fn clang_getFileUniqueID(file: CXFile, id: *mut CXFileUniqueID) -> c_int; + pub fn clang_getFunctionTypeCallingConv(type_: CXType) -> CXCallingConv; + pub fn clang_getIBOutletCollectionType(cursor: CXCursor) -> CXType; + pub fn clang_getIncludedFile(cursor: CXCursor) -> CXFile; + pub fn clang_getInclusions(tu: CXTranslationUnit, visitor: CXInclusionVisitor, data: CXClientData); + pub fn clang_getInstantiationLocation(location: CXSourceLocation, file: *mut CXFile, line: *mut c_uint, column: *mut c_uint, offset: *mut c_uint); + pub fn clang_getLocation(tu: CXTranslationUnit, file: CXFile, line: c_uint, column: c_uint) -> CXSourceLocation; + pub fn clang_getLocationForOffset(tu: CXTranslationUnit, file: CXFile, offset: c_uint) -> CXSourceLocation; + pub fn clang_getModuleForFile(tu: CXTranslationUnit, file: CXFile) -> CXModule; + pub fn clang_getNullCursor() -> CXCursor; + pub fn clang_getNullLocation() -> CXSourceLocation; + pub fn clang_getNullRange() -> CXSourceRange; + pub fn clang_getNumArgTypes(type_: CXType) -> c_int; + pub fn clang_getNumCompletionChunks(string: CXCompletionString) -> c_uint; + pub fn clang_getNumDiagnostics(tu: CXTranslationUnit) -> c_uint; + pub fn clang_getNumDiagnosticsInSet(diagnostic: CXDiagnosticSet) -> c_uint; + pub fn clang_getNumElements(type_: CXType) -> c_longlong; + pub fn clang_getNumOverloadedDecls(cursor: CXCursor) -> c_uint; + pub fn clang_getOverloadedDecl(cursor: CXCursor, index: c_uint) -> CXCursor; + pub fn clang_getOverriddenCursors(cursor: CXCursor, cursors: *mut *mut CXCursor, n_cursors: *mut c_uint); + pub fn clang_getPointeeType(type_: CXType) -> CXType; + pub fn clang_getPresumedLocation(location: CXSourceLocation, file: *mut CXString, line: *mut c_uint, column: *mut c_uint); + pub fn clang_getRange(start: CXSourceLocation, end: CXSourceLocation) -> CXSourceRange; + pub fn clang_getRangeEnd(range: CXSourceRange) -> CXSourceLocation; + pub fn clang_getRangeStart(range: CXSourceRange) -> CXSourceLocation; + pub fn clang_getRemappings(file: *const c_char) -> CXRemapping; + pub fn clang_getRemappingsFromFileList(files: *mut *const c_char, n_files: c_uint) -> CXRemapping; + pub fn clang_getResultType(type_: CXType) -> CXType; + pub fn clang_getSkippedRanges(tu: CXTranslationUnit, file: CXFile) -> *mut CXSourceRangeList; + pub fn clang_getSpecializedCursorTemplate(cursor: CXCursor) -> CXCursor; + pub fn clang_getSpellingLocation(location: CXSourceLocation, file: *mut CXFile, line: *mut c_uint, column: *mut c_uint, offset: *mut c_uint); + pub fn clang_getTUResourceUsageName(kind: CXTUResourceUsageKind) -> *const c_char; + #[cfg(feature="gte_clang_5_0")] + pub fn clang_getTranslationUnitTargetInfo(tu: CXTranslationUnit) -> CXTargetInfo; + pub fn clang_getTemplateCursorKind(cursor: CXCursor) -> CXCursorKind; + pub fn clang_getTokenExtent(tu: CXTranslationUnit, token: CXToken) -> CXSourceRange; + pub fn clang_getTokenKind(token: CXToken) -> CXTokenKind; + pub fn clang_getTokenLocation(tu: CXTranslationUnit, token: CXToken) -> CXSourceLocation; + pub fn clang_getTokenSpelling(tu: CXTranslationUnit, token: CXToken) -> CXString; + pub fn clang_getTranslationUnitCursor(tu: CXTranslationUnit) -> CXCursor; + pub fn clang_getTranslationUnitSpelling(tu: CXTranslationUnit) -> CXString; + pub fn clang_getTypeDeclaration(type_: CXType) -> CXCursor; + pub fn clang_getTypeKindSpelling(type_: CXTypeKind) -> CXString; + pub fn clang_getTypeSpelling(type_: CXType) -> CXString; + pub fn clang_getTypedefDeclUnderlyingType(cursor: CXCursor) -> CXType; + #[cfg(feature="gte_clang_5_0")] + pub fn clang_getTypedefName(type_: CXType) -> CXString; + pub fn clang_hashCursor(cursor: CXCursor) -> c_uint; + pub fn clang_indexLoc_getCXSourceLocation(location: CXIdxLoc) -> CXSourceLocation; + pub fn clang_indexLoc_getFileLocation(location: CXIdxLoc, index_file: *mut CXIdxClientFile, file: *mut CXFile, line: *mut c_uint, column: *mut c_uint, offset: *mut c_uint); + pub fn clang_indexSourceFile(index: CXIndexAction, data: CXClientData, callbacks: *mut IndexerCallbacks, n_callbacks: c_uint, index_flags: CXIndexOptFlags, file: *const c_char, arguments: *const *const c_char, n_arguments: c_int, unsaved: *mut CXUnsavedFile, n_unsaved: c_uint, tu: *mut CXTranslationUnit, tu_flags: CXTranslationUnit_Flags) -> CXErrorCode; + #[cfg(feature="gte_clang_3_8")] + pub fn clang_indexSourceFileFullArgv(index: CXIndexAction, data: CXClientData, callbacks: *mut IndexerCallbacks, n_callbacks: c_uint, index_flags: CXIndexOptFlags, file: *const c_char, arguments: *const *const c_char, n_arguments: c_int, unsaved: *mut CXUnsavedFile, n_unsaved: c_uint, tu: *mut CXTranslationUnit, tu_flags: CXTranslationUnit_Flags) -> CXErrorCode; + pub fn clang_indexTranslationUnit(index: CXIndexAction, data: CXClientData, callbacks: *mut IndexerCallbacks, n_callbacks: c_uint, flags: CXIndexOptFlags, tu: CXTranslationUnit) -> c_int; + pub fn clang_index_getCXXClassDeclInfo(info: *const CXIdxDeclInfo) -> *const CXIdxCXXClassDeclInfo; + pub fn clang_index_getClientContainer(info: *const CXIdxContainerInfo) -> CXIdxClientContainer; + pub fn clang_index_getClientEntity(info: *const CXIdxEntityInfo) -> CXIdxClientEntity; + pub fn clang_index_getIBOutletCollectionAttrInfo(info: *const CXIdxAttrInfo) -> *const CXIdxIBOutletCollectionAttrInfo; + pub fn clang_index_getObjCCategoryDeclInfo(info: *const CXIdxDeclInfo) -> *const CXIdxObjCCategoryDeclInfo; + pub fn clang_index_getObjCContainerDeclInfo(info: *const CXIdxDeclInfo) -> *const CXIdxObjCContainerDeclInfo; + pub fn clang_index_getObjCInterfaceDeclInfo(info: *const CXIdxDeclInfo) -> *const CXIdxObjCInterfaceDeclInfo; + pub fn clang_index_getObjCPropertyDeclInfo(info: *const CXIdxDeclInfo) -> *const CXIdxObjCPropertyDeclInfo; + pub fn clang_index_getObjCProtocolRefListInfo(info: *const CXIdxDeclInfo) -> *const CXIdxObjCProtocolRefListInfo; + pub fn clang_index_isEntityObjCContainerKind(info: CXIdxEntityKind) -> c_int; + pub fn clang_index_setClientContainer(info: *const CXIdxContainerInfo, container: CXIdxClientContainer); + pub fn clang_index_setClientEntity(info: *const CXIdxEntityInfo, entity: CXIdxClientEntity); + pub fn clang_isAttribute(kind: CXCursorKind) -> c_uint; + pub fn clang_isConstQualifiedType(type_: CXType) -> c_uint; + pub fn clang_isCursorDefinition(cursor: CXCursor) -> c_uint; + pub fn clang_isDeclaration(kind: CXCursorKind) -> c_uint; + pub fn clang_isExpression(kind: CXCursorKind) -> c_uint; + pub fn clang_isFileMultipleIncludeGuarded(tu: CXTranslationUnit, file: CXFile) -> c_uint; + pub fn clang_isFunctionTypeVariadic(type_: CXType) -> c_uint; + pub fn clang_isInvalid(kind: CXCursorKind) -> c_uint; + #[cfg(feature="gte_clang_7_0")] + pub fn clang_isInvalidDeclaration(cursor: CXCursor) -> c_uint; + pub fn clang_isPODType(type_: CXType) -> c_uint; + pub fn clang_isPreprocessing(kind: CXCursorKind) -> c_uint; + pub fn clang_isReference(kind: CXCursorKind) -> c_uint; + pub fn clang_isRestrictQualifiedType(type_: CXType) -> c_uint; + pub fn clang_isStatement(kind: CXCursorKind) -> c_uint; + pub fn clang_isTranslationUnit(kind: CXCursorKind) -> c_uint; + pub fn clang_isUnexposed(kind: CXCursorKind) -> c_uint; + pub fn clang_isVirtualBase(cursor: CXCursor) -> c_uint; + pub fn clang_isVolatileQualifiedType(type_: CXType) -> c_uint; + pub fn clang_loadDiagnostics(file: *const c_char, error: *mut CXLoadDiag_Error, message: *mut CXString) -> CXDiagnosticSet; + pub fn clang_parseTranslationUnit(index: CXIndex, file: *const c_char, arguments: *const *const c_char, n_arguments: c_int, unsaved: *mut CXUnsavedFile, n_unsaved: c_uint, flags: CXTranslationUnit_Flags) -> CXTranslationUnit; + pub fn clang_parseTranslationUnit2(index: CXIndex, file: *const c_char, arguments: *const *const c_char, n_arguments: c_int, unsaved: *mut CXUnsavedFile, n_unsaved: c_uint, flags: CXTranslationUnit_Flags, tu: *mut CXTranslationUnit) -> CXErrorCode; + #[cfg(feature="gte_clang_3_8")] + pub fn clang_parseTranslationUnit2FullArgv(index: CXIndex, file: *const c_char, arguments: *const *const c_char, n_arguments: c_int, unsaved: *mut CXUnsavedFile, n_unsaved: c_uint, flags: CXTranslationUnit_Flags, tu: *mut CXTranslationUnit) -> CXErrorCode; + pub fn clang_remap_dispose(remapping: CXRemapping); + pub fn clang_remap_getFilenames(remapping: CXRemapping, index: c_uint, original: *mut CXString, transformed: *mut CXString); + pub fn clang_remap_getNumFiles(remapping: CXRemapping) -> c_uint; + pub fn clang_reparseTranslationUnit(tu: CXTranslationUnit, n_unsaved: c_uint, unsaved: *mut CXUnsavedFile, flags: CXReparse_Flags) -> CXErrorCode; + pub fn clang_saveTranslationUnit(tu: CXTranslationUnit, file: *const c_char, options: CXSaveTranslationUnit_Flags) -> CXSaveError; + pub fn clang_sortCodeCompletionResults(results: *mut CXCompletionResult, n_results: c_uint); + #[cfg(feature="gte_clang_5_0")] + pub fn clang_suspendTranslationUnit(tu: CXTranslationUnit) -> c_uint; + pub fn clang_toggleCrashRecovery(recovery: c_uint); + pub fn clang_tokenize(tu: CXTranslationUnit, range: CXSourceRange, tokens: *mut *mut CXToken, n_tokens: *mut c_uint); + pub fn clang_visitChildren(cursor: CXCursor, visitor: CXCursorVisitor, data: CXClientData) -> c_uint; + + // Documentation + pub fn clang_BlockCommandComment_getArgText(comment: CXComment, index: c_uint) -> CXString; + pub fn clang_BlockCommandComment_getCommandName(comment: CXComment) -> CXString; + pub fn clang_BlockCommandComment_getNumArgs(comment: CXComment) -> c_uint; + pub fn clang_BlockCommandComment_getParagraph(comment: CXComment) -> CXComment; + pub fn clang_Comment_getChild(comment: CXComment, index: c_uint) -> CXComment; + pub fn clang_Comment_getKind(comment: CXComment) -> CXCommentKind; + pub fn clang_Comment_getNumChildren(comment: CXComment) -> c_uint; + pub fn clang_Comment_isWhitespace(comment: CXComment) -> c_uint; + pub fn clang_Cursor_getParsedComment(C: CXCursor) -> CXComment; + pub fn clang_FullComment_getAsHTML(comment: CXComment) -> CXString; + pub fn clang_FullComment_getAsXML(comment: CXComment) -> CXString; + pub fn clang_HTMLStartTagComment_isSelfClosing(comment: CXComment) -> c_uint; + pub fn clang_HTMLStartTag_getAttrName(comment: CXComment, index: c_uint) -> CXString; + pub fn clang_HTMLStartTag_getAttrValue(comment: CXComment, index: c_uint) -> CXString; + pub fn clang_HTMLStartTag_getNumAttrs(comment: CXComment) -> c_uint; + pub fn clang_HTMLTagComment_getAsString(comment: CXComment) -> CXString; + pub fn clang_HTMLTagComment_getTagName(comment: CXComment) -> CXString; + pub fn clang_InlineCommandComment_getArgText(comment: CXComment, index: c_uint) -> CXString; + pub fn clang_InlineCommandComment_getCommandName(comment: CXComment) -> CXString; + pub fn clang_InlineCommandComment_getNumArgs(comment: CXComment) -> c_uint; + pub fn clang_InlineCommandComment_getRenderKind(comment: CXComment) -> CXCommentInlineCommandRenderKind; + pub fn clang_InlineContentComment_hasTrailingNewline(comment: CXComment) -> c_uint; + pub fn clang_ParamCommandComment_getDirection(comment: CXComment) -> CXCommentParamPassDirection; + pub fn clang_ParamCommandComment_getParamIndex(comment: CXComment) -> c_uint; + pub fn clang_ParamCommandComment_getParamName(comment: CXComment) -> CXString; + pub fn clang_ParamCommandComment_isDirectionExplicit(comment: CXComment) -> c_uint; + pub fn clang_ParamCommandComment_isParamIndexValid(comment: CXComment) -> c_uint; + pub fn clang_TParamCommandComment_getDepth(comment: CXComment) -> c_uint; + pub fn clang_TParamCommandComment_getIndex(comment: CXComment, depth: c_uint) -> c_uint; + pub fn clang_TParamCommandComment_getParamName(comment: CXComment) -> CXString; + pub fn clang_TParamCommandComment_isParamPositionValid(comment: CXComment) -> c_uint; + pub fn clang_TextComment_getText(comment: CXComment) -> CXString; + pub fn clang_VerbatimBlockLineComment_getText(comment: CXComment) -> CXString; + pub fn clang_VerbatimLineComment_getText(comment: CXComment) -> CXString; +} diff --git a/third_party/rust/clang-sys/src/link.rs b/third_party/rust/clang-sys/src/link.rs index 050681bed7..0bbd168930 100644 --- a/third_party/rust/clang-sys/src/link.rs +++ b/third_party/rust/clang-sys/src/link.rs @@ -1,198 +1,198 @@ -// Copyright 2016 Kyle Mayes -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//================================================ -// Macros -//================================================ - -#[cfg(feature = "runtime")] -macro_rules! link { - (@LOAD: #[cfg($cfg:meta)] fn $name:ident($($pname:ident: $pty:ty), *) $(-> $ret:ty)*) => ( - #[cfg($cfg)] - pub fn $name(library: &mut super::SharedLibrary) { - let symbol = unsafe { library.library.get(stringify!($name).as_bytes()) }.ok(); - library.functions.$name = match symbol { - Some(s) => *s, - None => None, - }; - } - - #[cfg(not($cfg))] - pub fn $name(_: &mut super::SharedLibrary) {} - ); - - (@LOAD: fn $name:ident($($pname:ident: $pty:ty), *) $(-> $ret:ty)*) => ( - link!(@LOAD: #[cfg(feature="runtime")] fn $name($($pname: $pty), *) $(-> $ret)*); - ); - - ($($(#[cfg($cfg:meta)])* pub fn $name:ident($($pname:ident: $pty:ty), *) $(-> $ret:ty)*;)+) => ( - use std::cell::{RefCell}; - use std::sync::{Arc}; - use std::path::{Path, PathBuf}; - - /// The set of functions loaded dynamically. - #[derive(Debug, Default)] - pub struct Functions { - $($(#[cfg($cfg)])* pub $name: Option $ret)*>,)+ - } - - /// A dynamically loaded instance of the `libclang` library. - #[derive(Debug)] - pub struct SharedLibrary { - library: libloading::Library, - path: PathBuf, - pub functions: Functions, - } - - impl SharedLibrary { - fn new(library: libloading::Library, path: PathBuf) -> Self { - Self { library, path, functions: Functions::default() } - } - - pub fn path(&self) -> &Path { - &self.path - } - } - - thread_local!(static LIBRARY: RefCell>> = RefCell::new(None)); - - /// Returns whether a `libclang` shared library is loaded on this thread. - pub fn is_loaded() -> bool { - LIBRARY.with(|l| l.borrow().is_some()) - } - - fn with_library(f: F) -> Option where F: FnOnce(&SharedLibrary) -> T { - LIBRARY.with(|l| { - match l.borrow().as_ref() { - Some(library) => Some(f(&library)), - _ => None, - } - }) - } - - $( - #[cfg_attr(feature="cargo-clippy", allow(too_many_arguments))] - $(#[cfg($cfg)])* - pub unsafe fn $name($($pname: $pty), *) $(-> $ret)* { - let f = with_library(|l| { - match l.functions.$name { - Some(f) => f, - _ => panic!(concat!("function not loaded: ", stringify!($name))), - } - }).expect("a `libclang` shared library is not loaded on this thread"); - f($($pname), *) - } - - $(#[cfg($cfg)])* - pub mod $name { - pub fn is_loaded() -> bool { - super::with_library(|l| l.functions.$name.is_some()).unwrap_or(false) - } - } - )+ - - mod load { - $(link!(@LOAD: $(#[cfg($cfg)])* fn $name($($pname: $pty), *) $(-> $ret)*);)+ - } - - /// Loads a `libclang` shared library and returns the library instance. - /// - /// This function does not attempt to load any functions from the shared library. The caller - /// is responsible for loading the functions they require. - /// - /// # Failures - /// - /// * a `libclang` shared library could not be found - /// * the `libclang` shared library could not be opened - pub fn load_manually() -> Result { - mod build { - pub mod common { include!(concat!(env!("OUT_DIR"), "/common.rs")); } - pub mod dynamic { include!(concat!(env!("OUT_DIR"), "/dynamic.rs")); } - } - - let (directory, filename) = try!(build::dynamic::find(true)); - let path = directory.join(filename); - - let library = libloading::Library::new(&path).map_err(|e| { - format!( - "the `libclang` shared library at {} could not be opened: {}", - path.display(), - e, - ) - }); - - let mut library = SharedLibrary::new(try!(library), path); - $(load::$name(&mut library);)+ - Ok(library) - } - - /// Loads a `libclang` shared library for use in the current thread. - /// - /// This functions attempts to load all the functions in the shared library. Whether a - /// function has been loaded can be tested by calling the `is_loaded` function on the - /// module with the same name as the function (e.g., `clang_createIndex::is_loaded()` for - /// the `clang_createIndex` function). - /// - /// # Failures - /// - /// * a `libclang` shared library could not be found - /// * the `libclang` shared library could not be opened - #[allow(dead_code)] - pub fn load() -> Result<(), String> { - let library = Arc::new(try!(load_manually())); - LIBRARY.with(|l| *l.borrow_mut() = Some(library)); - Ok(()) - } - - /// Unloads the `libclang` shared library in use in the current thread. - /// - /// # Failures - /// - /// * a `libclang` shared library is not in use in the current thread - pub fn unload() -> Result<(), String> { - let library = set_library(None); - if library.is_some() { - Ok(()) - } else { - Err("a `libclang` shared library is not in use in the current thread".into()) - } - } - - /// Returns the library instance stored in TLS. - /// - /// This functions allows for sharing library instances between threads. - pub fn get_library() -> Option> { - LIBRARY.with(|l| l.borrow_mut().clone()) - } - - /// Sets the library instance stored in TLS and returns the previous library. - /// - /// This functions allows for sharing library instances between threads. - pub fn set_library(library: Option>) -> Option> { - LIBRARY.with(|l| mem::replace(&mut *l.borrow_mut(), library)) - } - ) -} - -#[cfg(not(feature = "runtime"))] -macro_rules! link { - ($($(#[cfg($cfg:meta)])* pub fn $name:ident($($pname:ident: $pty:ty), *) $(-> $ret:ty)*;)+) => ( - extern { $($(#[cfg($cfg)])* pub fn $name($($pname: $pty), *) $(-> $ret)*;)+ } - - $($(#[cfg($cfg)])* - pub mod $name { - pub fn is_loaded() -> bool { true } - })+ - ) -} +// Copyright 2016 Kyle Mayes +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//================================================ +// Macros +//================================================ + +#[cfg(feature = "runtime")] +macro_rules! link { + (@LOAD: #[cfg($cfg:meta)] fn $name:ident($($pname:ident: $pty:ty), *) $(-> $ret:ty)*) => ( + #[cfg($cfg)] + pub fn $name(library: &mut super::SharedLibrary) { + let symbol = unsafe { library.library.get(stringify!($name).as_bytes()) }.ok(); + library.functions.$name = match symbol { + Some(s) => *s, + None => None, + }; + } + + #[cfg(not($cfg))] + pub fn $name(_: &mut super::SharedLibrary) {} + ); + + (@LOAD: fn $name:ident($($pname:ident: $pty:ty), *) $(-> $ret:ty)*) => ( + link!(@LOAD: #[cfg(feature="runtime")] fn $name($($pname: $pty), *) $(-> $ret)*); + ); + + ($($(#[cfg($cfg:meta)])* pub fn $name:ident($($pname:ident: $pty:ty), *) $(-> $ret:ty)*;)+) => ( + use std::cell::{RefCell}; + use std::sync::{Arc}; + use std::path::{Path, PathBuf}; + + /// The set of functions loaded dynamically. + #[derive(Debug, Default)] + pub struct Functions { + $($(#[cfg($cfg)])* pub $name: Option $ret)*>,)+ + } + + /// A dynamically loaded instance of the `libclang` library. + #[derive(Debug)] + pub struct SharedLibrary { + library: libloading::Library, + path: PathBuf, + pub functions: Functions, + } + + impl SharedLibrary { + fn new(library: libloading::Library, path: PathBuf) -> Self { + Self { library, path, functions: Functions::default() } + } + + pub fn path(&self) -> &Path { + &self.path + } + } + + thread_local!(static LIBRARY: RefCell>> = RefCell::new(None)); + + /// Returns whether a `libclang` shared library is loaded on this thread. + pub fn is_loaded() -> bool { + LIBRARY.with(|l| l.borrow().is_some()) + } + + fn with_library(f: F) -> Option where F: FnOnce(&SharedLibrary) -> T { + LIBRARY.with(|l| { + match l.borrow().as_ref() { + Some(library) => Some(f(&library)), + _ => None, + } + }) + } + + $( + #[cfg_attr(feature="cargo-clippy", allow(too_many_arguments))] + $(#[cfg($cfg)])* + pub unsafe fn $name($($pname: $pty), *) $(-> $ret)* { + let f = with_library(|l| { + match l.functions.$name { + Some(f) => f, + _ => panic!(concat!("function not loaded: ", stringify!($name))), + } + }).expect("a `libclang` shared library is not loaded on this thread"); + f($($pname), *) + } + + $(#[cfg($cfg)])* + pub mod $name { + pub fn is_loaded() -> bool { + super::with_library(|l| l.functions.$name.is_some()).unwrap_or(false) + } + } + )+ + + mod load { + $(link!(@LOAD: $(#[cfg($cfg)])* fn $name($($pname: $pty), *) $(-> $ret)*);)+ + } + + /// Loads a `libclang` shared library and returns the library instance. + /// + /// This function does not attempt to load any functions from the shared library. The caller + /// is responsible for loading the functions they require. + /// + /// # Failures + /// + /// * a `libclang` shared library could not be found + /// * the `libclang` shared library could not be opened + pub fn load_manually() -> Result { + mod build { + pub mod common { include!(concat!(env!("OUT_DIR"), "/common.rs")); } + pub mod dynamic { include!(concat!(env!("OUT_DIR"), "/dynamic.rs")); } + } + + let (directory, filename) = try!(build::dynamic::find(true)); + let path = directory.join(filename); + + let library = libloading::Library::new(&path).map_err(|e| { + format!( + "the `libclang` shared library at {} could not be opened: {}", + path.display(), + e, + ) + }); + + let mut library = SharedLibrary::new(try!(library), path); + $(load::$name(&mut library);)+ + Ok(library) + } + + /// Loads a `libclang` shared library for use in the current thread. + /// + /// This functions attempts to load all the functions in the shared library. Whether a + /// function has been loaded can be tested by calling the `is_loaded` function on the + /// module with the same name as the function (e.g., `clang_createIndex::is_loaded()` for + /// the `clang_createIndex` function). + /// + /// # Failures + /// + /// * a `libclang` shared library could not be found + /// * the `libclang` shared library could not be opened + #[allow(dead_code)] + pub fn load() -> Result<(), String> { + let library = Arc::new(try!(load_manually())); + LIBRARY.with(|l| *l.borrow_mut() = Some(library)); + Ok(()) + } + + /// Unloads the `libclang` shared library in use in the current thread. + /// + /// # Failures + /// + /// * a `libclang` shared library is not in use in the current thread + pub fn unload() -> Result<(), String> { + let library = set_library(None); + if library.is_some() { + Ok(()) + } else { + Err("a `libclang` shared library is not in use in the current thread".into()) + } + } + + /// Returns the library instance stored in TLS. + /// + /// This functions allows for sharing library instances between threads. + pub fn get_library() -> Option> { + LIBRARY.with(|l| l.borrow_mut().clone()) + } + + /// Sets the library instance stored in TLS and returns the previous library. + /// + /// This functions allows for sharing library instances between threads. + pub fn set_library(library: Option>) -> Option> { + LIBRARY.with(|l| mem::replace(&mut *l.borrow_mut(), library)) + } + ) +} + +#[cfg(not(feature = "runtime"))] +macro_rules! link { + ($($(#[cfg($cfg:meta)])* pub fn $name:ident($($pname:ident: $pty:ty), *) $(-> $ret:ty)*;)+) => ( + extern { $($(#[cfg($cfg)])* pub fn $name($($pname: $pty), *) $(-> $ret)*;)+ } + + $($(#[cfg($cfg)])* + pub mod $name { + pub fn is_loaded() -> bool { true } + })+ + ) +} diff --git a/third_party/rust/clang-sys/src/support.rs b/third_party/rust/clang-sys/src/support.rs index acf6522b0c..3e8181a1e2 100644 --- a/third_party/rust/clang-sys/src/support.rs +++ b/third_party/rust/clang-sys/src/support.rs @@ -1,210 +1,210 @@ -// Copyright 2016 Kyle Mayes -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Provides helper functionality. - -use std::path::{Path, PathBuf}; -use std::process::Command; -use std::{env, io}; - -use glob; - -use libc::c_int; - -use super::CXVersion; - -//================================================ -// Macros -//================================================ - -macro_rules! try_opt { - ($option:expr) => {{ - match $option { - Some(some) => some, - None => return None, - } - }}; -} - -//================================================ -// Structs -//================================================ - -/// A `clang` executable. -#[derive(Clone, Debug)] -pub struct Clang { - /// The path to this `clang` executable. - pub path: PathBuf, - /// The version of this `clang` executable if it could be parsed. - pub version: Option, - /// The directories searched by this `clang` executable for C headers if - /// they could be parsed. - pub c_search_paths: Option>, - /// The directories searched by this `clang` executable for C++ headers if - /// they could be parsed. - pub cpp_search_paths: Option>, -} - -impl Clang { - fn new(path: impl AsRef, args: &[String]) -> Self { - Self { - path: path.as_ref().into(), - version: parse_version(path.as_ref()), - c_search_paths: parse_search_paths(path.as_ref(), "c", args), - cpp_search_paths: parse_search_paths(path.as_ref(), "c++", args), - } - } - - /// Returns a `clang` executable if one can be found. - /// - /// If the `CLANG_PATH` environment variable is set, that is the instance of - /// `clang` used. Otherwise, a series of directories are searched. First, if - /// a path is supplied, that is the first directory searched. Then, the - /// directory returned by `llvm-config --bindir` is searched. On macOS - /// systems, `xcodebuild -find clang` will next be queried. Last, the - /// directories in the system's `PATH` are searched. - pub fn find(path: Option<&Path>, args: &[String]) -> Option { - if let Ok(path) = env::var("CLANG_PATH") { - return Some(Clang::new(path, args)); - } - - let mut paths = vec![]; - if let Some(path) = path { - paths.push(path.into()); - } - if let Ok(path) = run_llvm_config(&["--bindir"]) { - paths.push(path.into()); - } - if cfg!(target_os = "macos") { - if let Ok((path, _)) = run("xcodebuild", &["-find", "clang"]) { - paths.push(path.into()); - } - } - paths.extend(env::split_paths(&env::var("PATH").unwrap())); - - let default = format!("clang{}", env::consts::EXE_SUFFIX); - let versioned = format!("clang-[0-9]*{}", env::consts::EXE_SUFFIX); - let patterns = &[&default[..], &versioned[..]]; - for path in paths { - if let Some(path) = find(&path, patterns) { - return Some(Clang::new(path, args)); - } - } - - None - } -} - -//================================================ -// Functions -//================================================ - -/// Returns the first match to the supplied glob patterns in the supplied -/// directory if there are any matches. -fn find(directory: &Path, patterns: &[&str]) -> Option { - for pattern in patterns { - let pattern = directory.join(pattern).to_string_lossy().into_owned(); - if let Some(path) = try_opt!(glob::glob(&pattern).ok()) - .filter_map(|p| p.ok()) - .next() - { - if path.is_file() && is_executable(&path).unwrap_or(false) { - return Some(path); - } - } - } - - None -} - -#[cfg(unix)] -fn is_executable(path: &Path) -> io::Result { - use std::ffi::CString; - use std::os::unix::ffi::OsStrExt; - - let path = CString::new(path.as_os_str().as_bytes())?; - unsafe { Ok(libc::access(path.as_ptr(), libc::X_OK) == 0) } -} - -#[cfg(not(unix))] -fn is_executable(_: &Path) -> io::Result { - Ok(true) -} - -/// Attempts to run an executable, returning the `stdout` and `stderr` output if -/// successful. -fn run(executable: &str, arguments: &[&str]) -> Result<(String, String), String> { - Command::new(executable) - .args(arguments) - .output() - .map(|o| { - let stdout = String::from_utf8_lossy(&o.stdout).into_owned(); - let stderr = String::from_utf8_lossy(&o.stderr).into_owned(); - (stdout, stderr) - }) - .map_err(|e| format!("could not run executable `{}`: {}", executable, e)) -} - -/// Runs `clang`, returning the `stdout` and `stderr` output. -fn run_clang(path: &Path, arguments: &[&str]) -> (String, String) { - run(&path.to_string_lossy().into_owned(), arguments).unwrap() -} - -/// Runs `llvm-config`, returning the `stdout` output if successful. -fn run_llvm_config(arguments: &[&str]) -> Result { - let config = env::var("LLVM_CONFIG_PATH").unwrap_or_else(|_| "llvm-config".to_string()); - run(&config, arguments).map(|(o, _)| o) -} - -/// Parses a version number if possible, ignoring trailing non-digit characters. -fn parse_version_number(number: &str) -> Option { - number - .chars() - .take_while(|c| c.is_digit(10)) - .collect::() - .parse() - .ok() -} - -/// Parses the version from the output of a `clang` executable if possible. -fn parse_version(path: &Path) -> Option { - let output = run_clang(path, &["--version"]).0; - let start = try_opt!(output.find("version ")) + 8; - let mut numbers = try_opt!(output[start..].split_whitespace().nth(0)).split('.'); - let major = try_opt!(numbers.next().and_then(parse_version_number)); - let minor = try_opt!(numbers.next().and_then(parse_version_number)); - let subminor = numbers.next().and_then(parse_version_number).unwrap_or(0); - Some(CXVersion { - Major: major, - Minor: minor, - Subminor: subminor, - }) -} - -/// Parses the search paths from the output of a `clang` executable if possible. -fn parse_search_paths(path: &Path, language: &str, args: &[String]) -> Option> { - let mut clang_args = vec!["-E", "-x", language, "-", "-v"]; - clang_args.extend(args.iter().map(|s| &**s)); - let output = run_clang(path, &clang_args).1; - let start = try_opt!(output.find("#include <...> search starts here:")) + 34; - let end = try_opt!(output.find("End of search list.")); - let paths = output[start..end].replace("(framework directory)", ""); - Some( - paths - .lines() - .filter(|l| !l.is_empty()) - .map(|l| Path::new(l.trim()).into()) - .collect(), - ) -} +// Copyright 2016 Kyle Mayes +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Provides helper functionality. + +use std::path::{Path, PathBuf}; +use std::process::Command; +use std::{env, io}; + +use glob; + +use libc::c_int; + +use super::CXVersion; + +//================================================ +// Macros +//================================================ + +macro_rules! try_opt { + ($option:expr) => {{ + match $option { + Some(some) => some, + None => return None, + } + }}; +} + +//================================================ +// Structs +//================================================ + +/// A `clang` executable. +#[derive(Clone, Debug)] +pub struct Clang { + /// The path to this `clang` executable. + pub path: PathBuf, + /// The version of this `clang` executable if it could be parsed. + pub version: Option, + /// The directories searched by this `clang` executable for C headers if + /// they could be parsed. + pub c_search_paths: Option>, + /// The directories searched by this `clang` executable for C++ headers if + /// they could be parsed. + pub cpp_search_paths: Option>, +} + +impl Clang { + fn new(path: impl AsRef, args: &[String]) -> Self { + Self { + path: path.as_ref().into(), + version: parse_version(path.as_ref()), + c_search_paths: parse_search_paths(path.as_ref(), "c", args), + cpp_search_paths: parse_search_paths(path.as_ref(), "c++", args), + } + } + + /// Returns a `clang` executable if one can be found. + /// + /// If the `CLANG_PATH` environment variable is set, that is the instance of + /// `clang` used. Otherwise, a series of directories are searched. First, if + /// a path is supplied, that is the first directory searched. Then, the + /// directory returned by `llvm-config --bindir` is searched. On macOS + /// systems, `xcodebuild -find clang` will next be queried. Last, the + /// directories in the system's `PATH` are searched. + pub fn find(path: Option<&Path>, args: &[String]) -> Option { + if let Ok(path) = env::var("CLANG_PATH") { + return Some(Clang::new(path, args)); + } + + let mut paths = vec![]; + if let Some(path) = path { + paths.push(path.into()); + } + if let Ok(path) = run_llvm_config(&["--bindir"]) { + paths.push(path.into()); + } + if cfg!(target_os = "macos") { + if let Ok((path, _)) = run("xcodebuild", &["-find", "clang"]) { + paths.push(path.into()); + } + } + paths.extend(env::split_paths(&env::var("PATH").unwrap())); + + let default = format!("clang{}", env::consts::EXE_SUFFIX); + let versioned = format!("clang-[0-9]*{}", env::consts::EXE_SUFFIX); + let patterns = &[&default[..], &versioned[..]]; + for path in paths { + if let Some(path) = find(&path, patterns) { + return Some(Clang::new(path, args)); + } + } + + None + } +} + +//================================================ +// Functions +//================================================ + +/// Returns the first match to the supplied glob patterns in the supplied +/// directory if there are any matches. +fn find(directory: &Path, patterns: &[&str]) -> Option { + for pattern in patterns { + let pattern = directory.join(pattern).to_string_lossy().into_owned(); + if let Some(path) = try_opt!(glob::glob(&pattern).ok()) + .filter_map(|p| p.ok()) + .next() + { + if path.is_file() && is_executable(&path).unwrap_or(false) { + return Some(path); + } + } + } + + None +} + +#[cfg(unix)] +fn is_executable(path: &Path) -> io::Result { + use std::ffi::CString; + use std::os::unix::ffi::OsStrExt; + + let path = CString::new(path.as_os_str().as_bytes())?; + unsafe { Ok(libc::access(path.as_ptr(), libc::X_OK) == 0) } +} + +#[cfg(not(unix))] +fn is_executable(_: &Path) -> io::Result { + Ok(true) +} + +/// Attempts to run an executable, returning the `stdout` and `stderr` output if +/// successful. +fn run(executable: &str, arguments: &[&str]) -> Result<(String, String), String> { + Command::new(executable) + .args(arguments) + .output() + .map(|o| { + let stdout = String::from_utf8_lossy(&o.stdout).into_owned(); + let stderr = String::from_utf8_lossy(&o.stderr).into_owned(); + (stdout, stderr) + }) + .map_err(|e| format!("could not run executable `{}`: {}", executable, e)) +} + +/// Runs `clang`, returning the `stdout` and `stderr` output. +fn run_clang(path: &Path, arguments: &[&str]) -> (String, String) { + run(&path.to_string_lossy().into_owned(), arguments).unwrap() +} + +/// Runs `llvm-config`, returning the `stdout` output if successful. +fn run_llvm_config(arguments: &[&str]) -> Result { + let config = env::var("LLVM_CONFIG_PATH").unwrap_or_else(|_| "llvm-config".to_string()); + run(&config, arguments).map(|(o, _)| o) +} + +/// Parses a version number if possible, ignoring trailing non-digit characters. +fn parse_version_number(number: &str) -> Option { + number + .chars() + .take_while(|c| c.is_digit(10)) + .collect::() + .parse() + .ok() +} + +/// Parses the version from the output of a `clang` executable if possible. +fn parse_version(path: &Path) -> Option { + let output = run_clang(path, &["--version"]).0; + let start = try_opt!(output.find("version ")) + 8; + let mut numbers = try_opt!(output[start..].split_whitespace().nth(0)).split('.'); + let major = try_opt!(numbers.next().and_then(parse_version_number)); + let minor = try_opt!(numbers.next().and_then(parse_version_number)); + let subminor = numbers.next().and_then(parse_version_number).unwrap_or(0); + Some(CXVersion { + Major: major, + Minor: minor, + Subminor: subminor, + }) +} + +/// Parses the search paths from the output of a `clang` executable if possible. +fn parse_search_paths(path: &Path, language: &str, args: &[String]) -> Option> { + let mut clang_args = vec!["-E", "-x", language, "-", "-v"]; + clang_args.extend(args.iter().map(|s| &**s)); + let output = run_clang(path, &clang_args).1; + let start = try_opt!(output.find("#include <...> search starts here:")) + 34; + let end = try_opt!(output.find("End of search list.")); + let paths = output[start..end].replace("(framework directory)", ""); + Some( + paths + .lines() + .filter(|l| !l.is_empty()) + .map(|l| Path::new(l.trim()).into()) + .collect(), + ) +} diff --git a/third_party/rust/clang-sys/tests/header.h b/third_party/rust/clang-sys/tests/header.h index 3767c8e754..5c392d3145 100644 --- a/third_party/rust/clang-sys/tests/header.h +++ b/third_party/rust/clang-sys/tests/header.h @@ -1,6 +1,6 @@ -#ifndef HEADER_H_ -#define HEADER_H_ - -int add(int a, int b); - -#endif +#ifndef HEADER_H_ +#define HEADER_H_ + +int add(int a, int b); + +#endif diff --git a/third_party/rust/clang-sys/tests/lib.rs b/third_party/rust/clang-sys/tests/lib.rs index 29f7386e15..23cb6271a5 100644 --- a/third_party/rust/clang-sys/tests/lib.rs +++ b/third_party/rust/clang-sys/tests/lib.rs @@ -1,46 +1,46 @@ -extern crate clang_sys; -extern crate libc; - -use std::ptr; - -use clang_sys::*; - -use libc::c_char; - -fn parse() { - unsafe { - let index = clang_createIndex(0, 0); - assert!(!index.is_null()); - - let tu = clang_parseTranslationUnit( - index, - "tests/header.h\0".as_ptr() as *const c_char, - ptr::null_mut(), - 0, - ptr::null_mut(), - 0, - 0, - ); - assert!(!tu.is_null()); - } -} - -#[cfg(feature = "runtime")] -#[test] -fn test() { - load().unwrap(); - parse(); - unload().unwrap(); -} - -#[cfg(not(feature = "runtime"))] -#[test] -fn test() { - parse(); -} - -#[test] -fn test_support() { - let clang = support::Clang::find(None, &[]).unwrap(); - println!("{:?}", clang); -} +extern crate clang_sys; +extern crate libc; + +use std::ptr; + +use clang_sys::*; + +use libc::c_char; + +fn parse() { + unsafe { + let index = clang_createIndex(0, 0); + assert!(!index.is_null()); + + let tu = clang_parseTranslationUnit( + index, + "tests/header.h\0".as_ptr() as *const c_char, + ptr::null_mut(), + 0, + ptr::null_mut(), + 0, + 0, + ); + assert!(!tu.is_null()); + } +} + +#[cfg(feature = "runtime")] +#[test] +fn test() { + load().unwrap(); + parse(); + unload().unwrap(); +} + +#[cfg(not(feature = "runtime"))] +#[test] +fn test() { + parse(); +} + +#[test] +fn test_support() { + let clang = support::Clang::find(None, &[]).unwrap(); + println!("{:?}", clang); +} diff --git a/third_party/rust/cranelift-codegen/Cargo.toml b/third_party/rust/cranelift-codegen/Cargo.toml index eb6a1094c8..9535983a10 100644 --- a/third_party/rust/cranelift-codegen/Cargo.toml +++ b/third_party/rust/cranelift-codegen/Cargo.toml @@ -19,10 +19,10 @@ cranelift-bforest = { path = "../cranelift-bforest", version = "0.44.0", default failure = { version = "0.1.1", default-features = false, features = ["derive"] } failure_derive = { version = "0.1.1", default-features = false } hashmap_core = { version = "0.1.9", optional = true } -target-lexicon = "0.8.1" +target-lexicon = "0.9.0" log = { version = "0.4.6", default-features = false } serde = { version = "1.0.94", features = ["derive"], optional = true } -smallvec = { version = "0.6.10" } +smallvec = { version = "1.2.0" } # It is a goal of the cranelift-codegen crate to have minimal external dependencies. # Please don't add any unless they are essential to the task of creating binary # machine code. Integration tests that need external dependencies can be diff --git a/third_party/rust/cranelift-frontend/Cargo.toml b/third_party/rust/cranelift-frontend/Cargo.toml index e30bde657e..49adb285ea 100644 --- a/third_party/rust/cranelift-frontend/Cargo.toml +++ b/third_party/rust/cranelift-frontend/Cargo.toml @@ -12,10 +12,10 @@ edition = "2018" [dependencies] cranelift-codegen = { path = "../cranelift-codegen", version = "0.44.0", default-features = false } -target-lexicon = "0.8.1" +target-lexicon = "0.9.0" log = { version = "0.4.6", default-features = false } hashmap_core = { version = "0.1.9", optional = true } -smallvec = { version = "0.6.10" } +smallvec = { version = "1.2.0" } [features] default = ["std"] diff --git a/third_party/rust/cranelift-wasm/Cargo.toml b/third_party/rust/cranelift-wasm/Cargo.toml index fd7f51608d..b49623c9da 100644 --- a/third_party/rust/cranelift-wasm/Cargo.toml +++ b/third_party/rust/cranelift-wasm/Cargo.toml @@ -23,7 +23,7 @@ serde = { version = "1.0.94", features = ["derive"], optional = true } [dev-dependencies] wabt = "0.9.1" -target-lexicon = "0.8.1" +target-lexicon = "0.9.0" [features] default = ["std"] diff --git a/third_party/rust/devd-rs/.cargo-checksum.json b/third_party/rust/devd-rs/.cargo-checksum.json index b9ac284777..df23cf3721 100644 --- a/third_party/rust/devd-rs/.cargo-checksum.json +++ b/third_party/rust/devd-rs/.cargo-checksum.json @@ -1 +1 @@ -{"files":{"CODE_OF_CONDUCT.md":"62f073941a34756006851cef8d5d081f6332a986063e87deafeb621f3f6ff554","Cargo.toml":"5b63e110cc58510911b48f6ccfc5748d56d1b88bb9eefc1fb1806fa14213b030","README.md":"88550411d0440cc5931cff6e4265ec676291b2b18989f4fb5779c493e40392ae","UNLICENSE":"7e12e5df4bae12cb21581ba157ced20e1986a0508dd10d0e8a4ab9a4cf94e85c","examples/main.rs":"734a87846b61d09d2aaca444c69dc61765f66df34602f3a4acf1255f95404226","src/data.rs":"677b52a636deb1f0ffc623dbdc5ed7acd78d915117825ced7031c6fa6f0c861e","src/lib.rs":"b485b0fcc73deaad31c9b6e5762dd2d325ecebca100a127432596afeb9058d0e","src/parser.rs":"4d6fedcdc976e9d305e737ef23c643af3754cb3bb3e36ad3330fd8eee4923d69","src/result.rs":"4088fc879652c115a13d8a6e6a71fab8571a7982e740af6a91115f3a82aef236"},"package":"0d009f166c0d9e9f9909dc751630b3a6411ab7f85a153d32d01deb364ffe52a7"} \ No newline at end of file +{"files":{"CODE_OF_CONDUCT.md":"62f073941a34756006851cef8d5d081f6332a986063e87deafeb621f3f6ff554","Cargo.lock":"bc2219b06850f8a2660bfd2f8af2c30ec5893f51d9e067f4489fc342f42754ee","Cargo.toml":"1c0d31cba751ef65905017ac764bfb82682125d9fc08d39f9976301c4c365bd4","README.md":"88550411d0440cc5931cff6e4265ec676291b2b18989f4fb5779c493e40392ae","UNLICENSE":"7e12e5df4bae12cb21581ba157ced20e1986a0508dd10d0e8a4ab9a4cf94e85c","examples/main.rs":"734a87846b61d09d2aaca444c69dc61765f66df34602f3a4acf1255f95404226","src/data.rs":"677b52a636deb1f0ffc623dbdc5ed7acd78d915117825ced7031c6fa6f0c861e","src/lib.rs":"41f7a2a7170f238ce368e0b1c540f77afadf911cd141d3652302355babf7eefc","src/parser.rs":"b32cc1a50c598cdb810914792de8aa4bf52a28002e8f4df578a271bab8e02bc2","src/result.rs":"4088fc879652c115a13d8a6e6a71fab8571a7982e740af6a91115f3a82aef236"},"package":"1945ccb7caedabdfb9347766ead740fb1e0582b7425598325f546adbd832cce1"} \ No newline at end of file diff --git a/third_party/rust/devd-rs/Cargo.lock b/third_party/rust/devd-rs/Cargo.lock new file mode 100644 index 0000000000..4ba5bed600 --- /dev/null +++ b/third_party/rust/devd-rs/Cargo.lock @@ -0,0 +1,37 @@ +# This file is automatically @generated by Cargo. +# It is not intended for manual editing. +[[package]] +name = "devd-rs" +version = "0.3.1" +dependencies = [ + "libc", + "nom", +] + +[[package]] +name = "libc" +version = "0.2.66" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d515b1f41455adea1313a4a2ac8a8a477634fbae63cc6100e3aebb207ce61558" + +[[package]] +name = "memchr" +version = "2.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "53445de381a1f436797497c61d851644d0e8e88e6140f22872ad33a704933978" + +[[package]] +name = "nom" +version = "5.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c433f4d505fe6ce7ff78523d2fa13a0b9f2690e181fc26168bcbe5ccc5d14e07" +dependencies = [ + "memchr", + "version_check", +] + +[[package]] +name = "version_check" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "914b1a6776c4c929a602fafd8bc742e06365d4bcbe48c30f9cca5824f70dc9dd" diff --git a/third_party/rust/devd-rs/Cargo.toml b/third_party/rust/devd-rs/Cargo.toml index e6778f582b..4a7aa49735 100644 --- a/third_party/rust/devd-rs/Cargo.toml +++ b/third_party/rust/devd-rs/Cargo.toml @@ -3,7 +3,7 @@ # When uploading crates to the registry Cargo will automatically # "normalize" Cargo.toml files for maximal compatibility # with all versions of Cargo and also rewrite `path` dependencies -# to registry (e.g. crates.io) dependencies +# to registry (e.g., crates.io) dependencies # # If you believe there's an error in this file please file an # issue against the rust-lang/cargo repository. If you're @@ -12,7 +12,7 @@ [package] name = "devd-rs" -version = "0.3.0" +version = "0.3.1" authors = ["Greg V "] description = "An interface to devd, the device hotplug daemon on FreeBSD and DragonFlyBSD" homepage = "https://github.com/myfreeweb/devd-rs" @@ -25,4 +25,6 @@ repository = "https://github.com/myfreeweb/devd-rs" version = "0" [dependencies.nom] -version = "4" +version = "5" +features = ["std"] +default-features = false diff --git a/third_party/rust/devd-rs/src/lib.rs b/third_party/rust/devd-rs/src/lib.rs index 0dfb28fd77..f872167a73 100644 --- a/third_party/rust/devd-rs/src/lib.rs +++ b/third_party/rust/devd-rs/src/lib.rs @@ -2,28 +2,23 @@ extern crate libc; #[macro_use] extern crate nom; -pub mod result; pub mod data; pub mod parser; +pub mod result; -use libc::{ - c_int, nfds_t, - poll, pollfd, POLLIN, - socket, connect, sockaddr_un, AF_UNIX, SOCK_SEQPACKET -}; +use io::{BufRead, BufReader}; +use libc::{c_int, connect, nfds_t, poll, pollfd, sockaddr_un, socket, AF_UNIX, POLLIN, SOCK_SEQPACKET}; use std::os::unix::io::{FromRawFd, RawFd}; use std::os::unix::net::UnixStream; use std::{io, mem, ptr}; -use io::{BufRead, BufReader}; -pub use result::*; pub use data::*; -use nom::types::CompleteStr; +pub use result::*; const SOCKET_PATH: &'static str = "/var/run/devd.seqpacket.pipe"; pub fn parse_devd_event(e: String) -> Result { - match parser::event(CompleteStr(e.as_str())) { + match parser::event(e.as_str()) { Ok((_, x)) => Ok(x), _ => Err(Error::Parse), } @@ -42,18 +37,9 @@ impl Context { if sockfd < 0 { return Err(io::Error::last_os_error().into()); } - let mut sockaddr = sockaddr_un { - sun_family: AF_UNIX as _, - .. mem::zeroed() - }; - ptr::copy_nonoverlapping( - SOCKET_PATH.as_ptr(), - sockaddr.sun_path.as_mut_ptr() as *mut u8, - SOCKET_PATH.len()); - if connect( - sockfd, - &sockaddr as *const sockaddr_un as *const _, - (mem::size_of_val(&AF_UNIX) + SOCKET_PATH.len()) as _) < 0 { + let mut sockaddr = sockaddr_un { sun_family: AF_UNIX as _, ..mem::zeroed() }; + ptr::copy_nonoverlapping(SOCKET_PATH.as_ptr(), sockaddr.sun_path.as_mut_ptr() as *mut u8, SOCKET_PATH.len()); + if connect(sockfd, &sockaddr as *const sockaddr_un as *const _, (mem::size_of_val(&AF_UNIX) + SOCKET_PATH.len()) as _) < 0 { return Err(io::Error::last_os_error().into()); } Ok(Context { @@ -80,8 +66,7 @@ impl Context { /// Waits for an event using poll(), reads and parses it pub fn wait_for_event<'a>(&mut self, timeout_ms: usize) -> Result { - self.wait_for_event_raw(timeout_ms) - .and_then(parse_devd_event) + self.wait_for_event_raw(timeout_ms).and_then(parse_devd_event) } /// Returns the devd socket file descriptor in case you want to select/poll on it together with diff --git a/third_party/rust/devd-rs/src/parser.rs b/third_party/rust/devd-rs/src/parser.rs index 22ad8bb698..45738aa40c 100644 --- a/third_party/rust/devd-rs/src/parser.rs +++ b/third_party/rust/devd-rs/src/parser.rs @@ -1,83 +1,80 @@ -use nom::{alphanumeric, multispace}; -use nom::types::CompleteStr; use data::*; +use nom::branch::alt; +use nom::bytes::complete::take_while; +use nom::character::complete::{alphanumeric1, char, multispace1}; +use nom::sequence::delimited; -named!( - val, - alt!( - delimited!(char!('"'), take_while!(call!(|c| c != '"')), char!('"')) - | - take_while!(call!(|c| c != '\n' && c != ' ')) - ) -); +fn val(i: &str) -> nom::IResult<&str, &str> { + alt((delimited(char('"'), take_while(|c| c != '"'), char('"')), take_while(|c| c != '\n' && c != ' ')))(i) +} -named!(keyval , +named!(keyval <&str, (&str, &str)>, do_parse!( - key: alphanumeric + key: alphanumeric1 >> char!('=') >> val: val >> (key, val) ) ); -named!(keyvals >, +named!(keyvals <&str, BTreeMap >, map!( - many0!(terminated!(keyval, opt!(multispace))), + many0!(terminated!(keyval, opt!(multispace1))), |vec: Vec<_>| vec.into_iter().map(|(k, v)| (k.to_string(), v.to_string())).collect() ) ); -named!(pub event , +named!(pub event <&str, Event>, alt!( do_parse!( tag!("!") >> tag!("system=") >> sys: val >> - multispace >> + multispace1 >> tag!("subsystem=") >> subsys: val >> - multispace >> + multispace1 >> tag!("type=") >> kind: val >> - multispace >> + multispace1 >> data: keyvals >> (Event::Notify { system: sys.to_string(), subsystem: subsys.to_string(), kind: kind.to_string(), data: data }) ) | do_parse!( tag!("+") >> - dev: alphanumeric >> - multispace >> + dev: alphanumeric1 >> + multispace1 >> tag!("at") >> - multispace >> + multispace1 >> parent: keyvals >> tag!("on") >> - multispace >> + multispace1 >> loc: val >> (Event::Attach { dev: dev.to_string(), parent: parent, location: loc.to_string() }) ) | do_parse!( tag!("-") >> - dev: alphanumeric >> - multispace >> + dev: alphanumeric1 >> + multispace1 >> tag!("at") >> - multispace >> + multispace1 >> parent: keyvals >> tag!("on") >> - multispace >> + multispace1 >> loc: val >> (Event::Detach { dev: dev.to_string(), parent: parent, location: loc.to_string() }) ) | do_parse!( tag!("?") >> - multispace >> + multispace1 >> tag!("at") >> - multispace >> + multispace1 >> parent: keyvals >> tag!("on") >> - multispace >> + multispace1 >> loc: val >> (Event::Nomatch { parent: parent, location: loc.to_string() }) ) @@ -91,7 +88,7 @@ mod tests { #[test] fn test_notify() { let txt = "!system=USB subsystem=INTERFACE type=ATTACH ugen=ugen0.2 vendor=0x1050 sernum=\"\" mode=host\n"; - let res = event(CompleteStr(txt)); + let res = event(txt); let mut data = BTreeMap::new(); data.insert("ugen".to_owned(), "ugen0.2".to_owned()); data.insert("vendor".to_owned(), "0x1050".to_owned()); @@ -100,7 +97,7 @@ mod tests { assert_eq!( res, Ok(( - CompleteStr(""), + "", Event::Notify { system: "USB".to_owned(), subsystem: "INTERFACE".to_owned(), @@ -114,14 +111,14 @@ mod tests { #[test] fn test_attach() { let txt = "+uhid1 at bus=0 sernum=\"\" on uhub1"; - let res = event(CompleteStr(txt)); + let res = event(txt); let mut data = BTreeMap::new(); data.insert("bus".to_owned(), "0".to_owned()); data.insert("sernum".to_owned(), "".to_owned()); assert_eq!( res, Ok(( - CompleteStr(""), + "", Event::Attach { dev: "uhid1".to_owned(), parent: data, @@ -134,12 +131,12 @@ mod tests { #[test] fn test_detach() { let txt = "-uhid1 at on uhub1"; - let res = event(CompleteStr(txt)); + let res = event(txt); let data = BTreeMap::new(); assert_eq!( res, Ok(( - CompleteStr(""), + "", Event::Detach { dev: "uhid1".to_owned(), parent: data.to_owned(), @@ -152,14 +149,10 @@ mod tests { #[test] fn test_nomatch() { let txt = "? at bus=0 on uhub1"; - let res = event(CompleteStr(txt)); + let res = event(txt); let mut data = BTreeMap::new(); data.insert("bus".to_owned(), "0".to_owned()); - assert_eq!( - res, - Ok((CompleteStr(""), Event::Nomatch { parent: data, location: "uhub1".to_owned() })) - ) + assert_eq!(res, Ok(("", Event::Nomatch { parent: data, location: "uhub1".to_owned() }))) } - } diff --git a/third_party/rust/goblin/.cargo-checksum.json b/third_party/rust/goblin/.cargo-checksum.json index 43a900fbd0..578fa762d7 100644 --- a/third_party/rust/goblin/.cargo-checksum.json +++ b/third_party/rust/goblin/.cargo-checksum.json @@ -1 +1 @@ -{"files":{"CHANGELOG.md":"2687aebcf734e1a8add45fb8b009cccc63d97db7820470a518ff5000634a93e4","Cargo.toml":"d7c562d7144ff9b2a72b0a8488156920ce64ae6cd0763b1ca5d055dd2b40aff1","LICENSE":"036bf6b6d6fd6dd1abda2ff6cdb672a63bdf32c468048720072910f2268a965f","README.md":"5ba373319553c5155cfd9d75a2fc962e17b39872a03a19eab34cc872c82b7a78","etc/crt1.rs":"50667c3066e00f92e038bfa4fe6968f49a31d7c6986e19c7c9e695ad9b836f74","etc/crt132.rs":"e69920ceeab57958a1890979158b57fc43f33c5a135b5798d43664c989c8c686","etc/crt1a.rs":"d158350f72aaf4fecd8501735d2f33a1a226e99bf370c794b90f7e9882c8ca64","examples/ar.rs":"e299cdc8478148b4d20788aa7d9cac04ea9587a405380e2bf889a998e68a1d02","examples/automagic.rs":"f202c7d3c6096a6c883c03d0352a750ad9185811e897046d028dba30aa1dcaf2","examples/dotnet_pe_analysis.rs":"b85ea80e45ac8b3fe9fc6111ab2dee4738878811abad3571192924237d5cd949","examples/dyldinfo.rs":"54b2e04f2f94d5f9c9a8b19cb84f768e339d509472bdcfa317eb9bc3c47caa5f","examples/lipo.rs":"3c2ebe95ac4e38d836f795c65b6e740ae08b751e5ee615bb3feb8620934ca2c9","examples/rdr.rs":"fb1442a6e4678c62983c9a963198a1e67a19169241c43aac840d41192c57a52b","examples/scroll.rs":"2cdb39c29dafd28ed6bfe99cc980480b49fc8e62bef14ab9eb7c45d0d66866d6","src/archive/mod.rs":"8f84e19cbac174b4f34d539755dd87b32b7ef029e5cef667c11b16652664eeef","src/elf/compression_header.rs":"bb6911bccd2d97af8ae721a410f28bc8a2bc6387c412a82909395676cf4a7364","src/elf/constants_header.rs":"cdd0eea9f4617f86f14b57dccf5124ae62376df6efe2bf617f95e4b24c176d1d","src/elf/constants_relocation.rs":"a010071cd2a25ab71e0c7181eb1d9f417daa2d1ec25a09c74bd12ad944892225","src/elf/dynamic.rs":"f2cd1c40257c597058f0ffab7786e7f6d6a18234ccc20e4fb267221ca5fcffce","src/elf/gnu_hash.rs":"718851196316077c270522f4b8e14454af30c364e0ec917f6eb5c2ed6f84f1af","src/elf/header.rs":"27e09865180b20718b30ea57bb60584958c05fdadb626053b0bbc5f733943f4b","src/elf/mod.rs":"e16fcb1c3ef2bcaa2e37168229876a4f085ed420852b93874e8af10694237e73","src/elf/note.rs":"3428ddd17d4ad840ee1f9831ae1913c94b2b82e85d63b45ff7402e3e45200113","src/elf/program_header.rs":"cb14ed59bcf92595ebefaa8752d4449a6f4ec38373ce5709248bd4883690de3c","src/elf/reloc.rs":"5205c33d897de1f8dfad6ba97b597c2a609a78e93fc231508f243ba4eff7ca78","src/elf/section_header.rs":"e379b303869e410060a0f7507bd8cb49dc268105826aae0f92cebb187386b0da","src/elf/sym.rs":"5f50b5eb5c22cc4d53641f8f37106c8c321a08ed90aaedc6e9fa6e1c4175743a","src/error.rs":"308448cc0f6c72f9da0f16ff11c50be98a94f024e86f4607dd6f084374dbaa00","src/lib.rs":"e43e08e9b7cfd0ee8341dd6ed2c718978f3ddab976cab98b5fdadbab315c197c","src/mach/bind_opcodes.rs":"2477021270083fd3d9200adebf5d2a1963b9014c7400be39fb68c8b309e4eebe","src/mach/constants.rs":"36d9011c2db6fac7b561b44350f08e56885fa721329d316e79d1e112e83ebbf5","src/mach/exports.rs":"16910411ca6e13cbe87154b653a9f74ecc3664d198c24b13a6d605ed00901e35","src/mach/fat.rs":"534b01ddeb803217a5e0a8b2bbd7306fd24f5a4ea99f7615714bbeee6b8fb194","src/mach/header.rs":"91d13e0a986933eff9479aafcca46ba2f8fdb042ae6407800e02b3a240b8ae86","src/mach/imports.rs":"d0ecaa49219afa28613759eea0fdf1974d2b91f78502d137a09ccacf4f7c586a","src/mach/load_command.rs":"c04df81c03d1450be4ced724edb48c5b79e78b6c292397bed12e5f7b9109e2b4","src/mach/mod.rs":"9a776bdaf8b5f3f2e8f8a6a658d62266f3233c80c439d1c872334994e40866d1","src/mach/relocation.rs":"9e09da219bd78d9d5caba22a893622b426afa7548472686b7edb7f74aa115eb5","src/mach/segment.rs":"51d1fd608c5ca311c089bc5640daa6f2e6a2a224cca9e2c30d98c72b4b130701","src/mach/symbols.rs":"bd62ce00c94e8c5ce63293404fc51bde3965e2be3015fa0a84632506a4181bd0","src/pe/characteristic.rs":"4fa8a7e6de20795b6d70d80fc25568eb999bb3dd9f9735d1407302a8053b3dd1","src/pe/data_directories.rs":"a5de9ca2b4e23e7644a31554276f5f68eed12c8808617a3e480427cbe5df3504","src/pe/debug.rs":"5a5215f2f341eb476626c2fd457d23f82610b783819ac2667793272ebf78b650","src/pe/exception.rs":"952a4d8380a6f89592707a8c9ff4b152da2a620e53f248d378d6c49134140016","src/pe/export.rs":"720f701057ea92628828ef96f836f5f8024290f017fd9df4e8ef0b4b79ed3eda","src/pe/header.rs":"50c560d0712a2128ed3af6a93f5c7248fbcaa91d1aec58496e3d4446ec51d0ea","src/pe/import.rs":"300502b117279ea5eafea3b1a97a9809ed303bbefcdaed2abd332e4a80142c8e","src/pe/mod.rs":"95fb58479453acd6fae7dfab7d236dee2eeb7d8e9b21abc7e1cf5ccf9486c4a5","src/pe/optional_header.rs":"0d947e997d657f98e4cc737ef3c2cd7e6a4e4e5270a4403b9622ba44d1eda4cd","src/pe/relocation.rs":"c479b80bb1d6910f2168505dda4f2d8925b7edc34bed4e25d069546f88f52bb3","src/pe/section_table.rs":"92eb6ef848b701346181d9c5bd8382114bbcbfef74e67035311310ad1385bee8","src/pe/symbol.rs":"b9ac555f3ad652c39daba17afe9e0474d97ccbb34f24d5f363a9f6dbf483f6a0","src/pe/utils.rs":"624adb9e2baef91e915989ffa29433d09c8d08033b526bae697fe3cec91293ad","src/strtab.rs":"c157ab7b0033d1879ba6c991706dae397b1f202be4087b693d4879d64160ea1a","tests/archive.rs":"8736af2c5b4749067c9aa34ab03a7f063f0f850d77db2619bea4172ab725cee0","tests/compare_dyldinfos.rs":"bd5f3c22a8a7c1563bf23fa12d95868bd630f3ea5ba3ffe659a602de4ec26e39","tests/macho.rs":"4c892dd614646d3bce79c3bbae731e3a8df947ea3082498a3b7b381813d60123"},"package":"e3fa261d919c1ae9d1e4533c4a2f99e10938603c4208d56c05bec7a872b661b0"} \ No newline at end of file +{"files":{"CHANGELOG.md":"40429580e8d584d3e0c6a207fd6174c323a2fe527ea13fc289c3d3a078c9a813","Cargo.lock":"764aeda5cef59d0aeb05f21665de54159add83aaef4104d4b629711e5bdd0194","Cargo.toml":"e64c2898115cc6255f0da44a6f8ba02664c3e9922cdc4fc6b6755c0d6c012573","LICENSE":"036bf6b6d6fd6dd1abda2ff6cdb672a63bdf32c468048720072910f2268a965f","README.md":"f2fbf86aa968ca84d9b55f22b499750c765b2e6496110a8c1afbc9756f77fab9","etc/crt1.rs":"50667c3066e00f92e038bfa4fe6968f49a31d7c6986e19c7c9e695ad9b836f74","etc/crt132.rs":"e69920ceeab57958a1890979158b57fc43f33c5a135b5798d43664c989c8c686","etc/crt1a.rs":"d158350f72aaf4fecd8501735d2f33a1a226e99bf370c794b90f7e9882c8ca64","examples/ar.rs":"e299cdc8478148b4d20788aa7d9cac04ea9587a405380e2bf889a998e68a1d02","examples/automagic.rs":"f202c7d3c6096a6c883c03d0352a750ad9185811e897046d028dba30aa1dcaf2","examples/dotnet_pe_analysis.rs":"70f541958e4bc5d4eca91018211e4a3e9fe8194931e1ecbeb3a465dfc14694dd","examples/dyldinfo.rs":"54b2e04f2f94d5f9c9a8b19cb84f768e339d509472bdcfa317eb9bc3c47caa5f","examples/lipo.rs":"3c2ebe95ac4e38d836f795c65b6e740ae08b751e5ee615bb3feb8620934ca2c9","examples/rdr.rs":"fb1442a6e4678c62983c9a963198a1e67a19169241c43aac840d41192c57a52b","examples/scroll.rs":"2cdb39c29dafd28ed6bfe99cc980480b49fc8e62bef14ab9eb7c45d0d66866d6","src/archive/mod.rs":"ace6e75bdad542f87b859812b40de4d59ce397122e035ce252c3c2ee9adebe47","src/elf/compression_header.rs":"865343698ad343705ba1643c5e144db913282973c6e7aec52f2b11b5954718b0","src/elf/constants_header.rs":"cdd0eea9f4617f86f14b57dccf5124ae62376df6efe2bf617f95e4b24c176d1d","src/elf/constants_relocation.rs":"a010071cd2a25ab71e0c7181eb1d9f417daa2d1ec25a09c74bd12ad944892225","src/elf/dynamic.rs":"13eb19c0c83c223ef55c1f8b091a1cbecad8980f1e9827ec3e9852a1db32d841","src/elf/gnu_hash.rs":"0f5c9d89735388569fb56090efbdd32e98f76b2e74d9dd35dfc7a61d8af9fe69","src/elf/header.rs":"f333bde9b957dc398631115708337e980490e3f2605c125f83d10157f4e0913d","src/elf/mod.rs":"48b6ae783bbdcee183b441d66d87eeefb127d4b2c6223261aa2d7f6dd7514ec0","src/elf/note.rs":"7e1317893d5eb970cdfee3bd5affb91f959f29401fa8aedf8485f09aba2c9277","src/elf/program_header.rs":"9e36af989e884b691bf298a0176cef757c46c99520482fe1b989e8e529775c94","src/elf/reloc.rs":"7c575c1f9f5677d951111329dfbbc4b8a2dfd0c6f7233f9d224e9f97a9a3dc9d","src/elf/section_header.rs":"7c3c4ec822046dd67be54ca421618497fbc2b501e4275dfb03336e791a7328c3","src/elf/sym.rs":"e0858c5e74cde3eb0d9c59c7bb1d7ff1d2b5b6686ecefa5142ca56cd7ec37f4a","src/error.rs":"308448cc0f6c72f9da0f16ff11c50be98a94f024e86f4607dd6f084374dbaa00","src/lib.rs":"e43e08e9b7cfd0ee8341dd6ed2c718978f3ddab976cab98b5fdadbab315c197c","src/mach/bind_opcodes.rs":"2477021270083fd3d9200adebf5d2a1963b9014c7400be39fb68c8b309e4eebe","src/mach/constants.rs":"36d9011c2db6fac7b561b44350f08e56885fa721329d316e79d1e112e83ebbf5","src/mach/exports.rs":"16910411ca6e13cbe87154b653a9f74ecc3664d198c24b13a6d605ed00901e35","src/mach/fat.rs":"534b01ddeb803217a5e0a8b2bbd7306fd24f5a4ea99f7615714bbeee6b8fb194","src/mach/header.rs":"dc151e6d53c497838bf1e60f0586f6927d33a4d619bd74ce4e44ff6dd78ea5f4","src/mach/imports.rs":"d0ecaa49219afa28613759eea0fdf1974d2b91f78502d137a09ccacf4f7c586a","src/mach/load_command.rs":"ac3c853cf9bb1c7de4f20a9345c0d8a6a73037ceef979a6715a3d6101675a28d","src/mach/mod.rs":"9a776bdaf8b5f3f2e8f8a6a658d62266f3233c80c439d1c872334994e40866d1","src/mach/relocation.rs":"9e09da219bd78d9d5caba22a893622b426afa7548472686b7edb7f74aa115eb5","src/mach/segment.rs":"a38fbc0ffcdc16331730727ea63a291afda96553fe62b5a92d0be6b9b64b8bf8","src/mach/symbols.rs":"80db23a5a0b8e987d192c7a96691e15696d1b417e4a9f81027f88b1b5140a61d","src/pe/characteristic.rs":"4fa8a7e6de20795b6d70d80fc25568eb999bb3dd9f9735d1407302a8053b3dd1","src/pe/data_directories.rs":"a5de9ca2b4e23e7644a31554276f5f68eed12c8808617a3e480427cbe5df3504","src/pe/debug.rs":"4cc117f1f9621fe11fab114c9d827483620efd367035f6dbcf1351571b4ce8fe","src/pe/exception.rs":"1ce70539eb36e3a85b4d6444136881202c9f7e82b524c7b4c33f1885e94500a7","src/pe/export.rs":"247d37e23cc23056dc52b467cabcc0f4845260f1a215bb8ec5e3c20d4cff1935","src/pe/header.rs":"50c560d0712a2128ed3af6a93f5c7248fbcaa91d1aec58496e3d4446ec51d0ea","src/pe/import.rs":"9379bacb0e1931c4bc3a3e1de4d7801e8d47e50a4db9f97eb86c4b6d88bc58c3","src/pe/mod.rs":"5c872ccf74e45f47063296b3aaff91861dbac0338e7a508bed2f214510d4e1f2","src/pe/optional_header.rs":"e329be7081cd7734a7ca2de486303034f179d2c2fd07095476e4f39e475e072a","src/pe/relocation.rs":"c479b80bb1d6910f2168505dda4f2d8925b7edc34bed4e25d069546f88f52bb3","src/pe/section_table.rs":"cd3be25fe5da05440c6873ed50b8b788acbe6377f3777d44c4f5f65d08818b31","src/pe/symbol.rs":"b9ac555f3ad652c39daba17afe9e0474d97ccbb34f24d5f363a9f6dbf483f6a0","src/pe/utils.rs":"6aad2b9ebbf1a7b5b1ad6c535691f35b234e4abff82bd0df16a4c7a2bdc084ee","src/strtab.rs":"c157ab7b0033d1879ba6c991706dae397b1f202be4087b693d4879d64160ea1a","tests/archive.rs":"9a633de16ab12fcb92be0dc237031460a69dbd63cfb59a5a46ba67c872ccc180","tests/compare_dyldinfos.rs":"bd5f3c22a8a7c1563bf23fa12d95868bd630f3ea5ba3ffe659a602de4ec26e39","tests/macho.rs":"4c892dd614646d3bce79c3bbae731e3a8df947ea3082498a3b7b381813d60123"},"package":"88a79ef1f0dad46fd78075b6f80f92d97710eddf87b3e18a15a66761e8942672"} \ No newline at end of file diff --git a/third_party/rust/goblin/CHANGELOG.md b/third_party/rust/goblin/CHANGELOG.md index e368511fb4..92b0e24ada 100644 --- a/third_party/rust/goblin/CHANGELOG.md +++ b/third_party/rust/goblin/CHANGELOG.md @@ -3,7 +3,25 @@ All notable changes to this project will be documented in this file. Before 1.0, this project does not adhere to [Semantic Versioning](http://semver.org/spec/v2.0.0.html). -I'm sorry, I will try my best to ease breaking changes. We're almost to 1.0, don't worry! +Goblin is now 0.1, which means we will try our best to ease breaking changes. Tracking issue is here: https://github.com/m4b/goblin/issues/97 + +## [0.1.2] - 2019-12-02 +### Fixed +mach: don't return data for zerofill sections, @philipc https://github.com/m4b/goblin/pull/195 + +## [0.1.1] - 2019-11-10 +### Fixed +elf: Don't fail entire elf parse when interpreter is malformed string, @jsgf https://github.com/m4b/goblin/pull/192 + +## [0.1.0] - 2019-11-3 +### Added +- update to scroll 0.10 api +### Changed +- BREAKING: rename export to lib in Reexport::DLLOrdinal from @lzybkr +- pe: only parse ExceptionData for machine X86_64, thanks @wyxloading +### Fixed +pe: Fix resolution of redirect unwind info, thanks @jan-auer https://github.com/m4b/goblin/pull/183 +pe: fix reexport dll and ordinal, thanks @lzybkr: d62889f469846af0cceb789b415f1e14f5f9e402 ## [0.0.24] - 2019-7-13 ### Added diff --git a/third_party/rust/goblin/Cargo.lock b/third_party/rust/goblin/Cargo.lock new file mode 100644 index 0000000000..b32340ebaa --- /dev/null +++ b/third_party/rust/goblin/Cargo.lock @@ -0,0 +1,88 @@ +# This file is automatically @generated by Cargo. +# It is not intended for manual editing. +[[package]] +name = "cfg-if" +version = "0.1.10" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "goblin" +version = "0.1.2" +dependencies = [ + "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", + "plain 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)", + "scroll 0.10.1 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "log" +version = "0.4.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "plain" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "proc-macro2" +version = "1.0.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "unicode-xid 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "quote" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "proc-macro2 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "scroll" +version = "0.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "scroll_derive 0.10.1 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "scroll_derive" +version = "0.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "proc-macro2 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)", + "quote 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", + "syn 1.0.11 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "syn" +version = "1.0.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "proc-macro2 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)", + "quote 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", + "unicode-xid 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "unicode-xid" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[metadata] +"checksum cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)" = "4785bdd1c96b2a846b2bd7cc02e86b6b3dbf14e7e53446c4f54c92a361040822" +"checksum log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)" = "14b6052be84e6b71ab17edffc2eeabf5c2c3ae1fdb464aae35ac50c67a44e1f7" +"checksum plain 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)" = "b4596b6d070b27117e987119b4dac604f3c58cfb0b191112e24771b2faeac1a6" +"checksum proc-macro2 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)" = "9c9e470a8dc4aeae2dee2f335e8f533e2d4b347e1434e5671afc49b054592f27" +"checksum quote 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)" = "053a8c8bcc71fcce321828dc897a98ab9760bef03a4fc36693c231e5b3216cfe" +"checksum scroll 0.10.1 (registry+https://github.com/rust-lang/crates.io-index)" = "abb2332cb595d33f7edd5700f4cbf94892e680c7f0ae56adab58a35190b66cb1" +"checksum scroll_derive 0.10.1 (registry+https://github.com/rust-lang/crates.io-index)" = "f8584eea9b9ff42825b46faf46a8c24d2cff13ec152fa2a50df788b87c07ee28" +"checksum syn 1.0.11 (registry+https://github.com/rust-lang/crates.io-index)" = "dff0acdb207ae2fe6d5976617f887eb1e35a2ba52c13c7234c790960cdad9238" +"checksum unicode-xid 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "826e7639553986605ec5979c7dd957c7895e93eabed50ab2ffa7f6128a75097c" diff --git a/third_party/rust/goblin/Cargo.toml b/third_party/rust/goblin/Cargo.toml index 0db947b7a5..6a7a88b30b 100644 --- a/third_party/rust/goblin/Cargo.toml +++ b/third_party/rust/goblin/Cargo.toml @@ -13,7 +13,7 @@ [package] edition = "2018" name = "goblin" -version = "0.0.24" +version = "0.1.2" authors = ["m4b ", "seu ", "Will Glynn ", "Philip Craig "] include = ["src/**/*", "Cargo.toml", "CHANGELOG.md", "LICENSE", "README.md", "etc/*", "examples/*", "tests/*", "fuzz/**/*"] description = "An impish, cross-platform, ELF, Mach-o, and PE binary parsing and loading crate" @@ -32,7 +32,7 @@ default-features = false version = "0.2.3" [dependencies.scroll] -version = "0.9" +version = "0.10" default_features = false [features] diff --git a/third_party/rust/goblin/README.md b/third_party/rust/goblin/README.md index 0cfbf9346f..100f5126a6 100644 --- a/third_party/rust/goblin/README.md +++ b/third_party/rust/goblin/README.md @@ -23,7 +23,7 @@ Add to your `Cargo.toml` ```toml [dependencies] -goblin = "0.0.24" +goblin = "0.1" ``` ### Features @@ -99,7 +99,7 @@ Here are some things you could do with this crate (or help to implement so they Thank you all :heart: ! -In alphabetic order: +In lexicographic order: - [@amanieu] - [@burjui] @@ -108,11 +108,14 @@ In alphabetic order: - [@jan-auer] - [@jdub] - [@jrmuizel] +- [@jsgf] - [@kjempelodott] - [@le-jzr] - [@lion128] - [@llogiq] - [@lzutao] +- [@lzybkr] +- [@m4b] - [@mitsuhiko] - [@mre] - [@pchickey] @@ -126,37 +129,41 @@ In alphabetic order: - [@ticki] - [@wickerwacka] - [@willglynn] +- [@wyxloading] - [@xcoldhandsx] -[@m4b]: https://github.com/m4b [@amanieu]: https://github.com/amanieu +[@burjui]: https://github.com/burjui [@flanfly]: https://github.com/flanfly [@ibabushkin]: https://github.com/ibabushkin [@jan-auer]: https://github.com/jan-auer [@jdub]: https://github.com/jdub [@jrmuizel]: https://github.com/jrmuizel +[@jsgf]: https://github.com/jsgf [@kjempelodott]: https://github.com/kjempelodott [@le-jzr]: https://github.com/le-jzr [@lion128]: https://github.com/lion128 [@llogiq]: https://github.com/llogiq +[@lzutao]: https://github.com/lzutao +[@lzybkr]: https://github.com/lzybkr +[@m4b]: https://github.com/m4b [@mitsuhiko]: https://github.com/mitsuhiko [@mre]: https://github.com/mre [@pchickey]: https://github.com/pchickey [@philipc]: https://github.com/philipc [@Pzixel]: https://github.com/Pzixel +[@raindev]: https://github.com/raindev [@rocallahan]: https://github.com/rocallahan [@sanxiyn]: https://github.com/sanxiyn [@tathanhdinh]: https://github.com/tathanhdinh [@Techno-coder]: https://github.com/Techno-coder [@ticki]: https://github.com/ticki +[@wickerwacka]: https://github.com/wickerwaka [@willglynn]: https://github.com/willglynn +[@wyxloading]: https://github.com/wyxloading [@xcoldhandsx]: https://github.com/xcoldhandsx -[@lzutao]: https://github.com/lzutao -[@wickerwacka]: https://github.com/wickerwaka -[@raindev]: https://github.com/raindev -[@burjui]: https://github.com/burjui ## Contributing diff --git a/third_party/rust/goblin/examples/dotnet_pe_analysis.rs b/third_party/rust/goblin/examples/dotnet_pe_analysis.rs index 63eae537fb..00568e9f50 100644 --- a/third_party/rust/goblin/examples/dotnet_pe_analysis.rs +++ b/third_party/rust/goblin/examples/dotnet_pe_analysis.rs @@ -31,9 +31,7 @@ struct MetadataRoot<'a> { impl<'a> TryFromCtx<'a, Endian> for MetadataRoot<'a> { type Error = scroll::Error; - type Size = usize; - - fn try_from_ctx(src: &'a [u8], endian: Endian) -> Result<(Self, Self::Size), Self::Error> { + fn try_from_ctx(src: &'a [u8], endian: Endian) -> Result<(Self, usize), Self::Error> { let offset = &mut 0; let signature = src.gread_with(offset, endian)?; let major_version = src.gread_with(offset, endian)?; diff --git a/third_party/rust/goblin/src/archive/mod.rs b/third_party/rust/goblin/src/archive/mod.rs index af1be19041..74b6bd9d74 100644 --- a/third_party/rust/goblin/src/archive/mod.rs +++ b/third_party/rust/goblin/src/archive/mod.rs @@ -464,7 +464,8 @@ impl<'a> Archive<'a> { // build the symbol index, translating symbol names into member indexes let mut symbol_index: BTreeMap<&str, usize> = BTreeMap::new(); for (member_offset, name) in index.symbol_indexes.iter().zip(index.strtab.iter()) { - let member_index = member_index_by_offset[member_offset]; + let member_index = *member_index_by_offset.get(member_offset) + .ok_or(Error::Malformed(format!("Could not get member {:?} at offset: {}", name, member_offset)))?; symbol_index.insert(&name, member_index); } diff --git a/third_party/rust/goblin/src/elf/compression_header.rs b/third_party/rust/goblin/src/elf/compression_header.rs index 18e6dc3278..823b28ec9e 100644 --- a/third_party/rust/goblin/src/elf/compression_header.rs +++ b/third_party/rust/goblin/src/elf/compression_header.rs @@ -213,8 +213,7 @@ if_alloc! { } impl ctx::SizeWith for CompressionHeader { - type Units = usize; - fn size_with( &Ctx { container, .. }: &Ctx) -> Self::Units { + fn size_with( &Ctx { container, .. }: &Ctx) -> usize { match container { Container::Little => { compression_header32::SIZEOF_CHDR @@ -228,8 +227,7 @@ if_alloc! { impl<'a> ctx::TryFromCtx<'a, Ctx> for CompressionHeader { type Error = crate::error::Error; - type Size = usize; - fn try_from_ctx(bytes: &'a [u8], Ctx {container, le}: Ctx) -> result::Result<(Self, Self::Size), Self::Error> { + fn try_from_ctx(bytes: &'a [u8], Ctx {container, le}: Ctx) -> result::Result<(Self, usize), Self::Error> { use scroll::Pread; let res = match container { Container::Little => { @@ -245,8 +243,7 @@ if_alloc! { impl ctx::TryIntoCtx for CompressionHeader { type Error = crate::error::Error; - type Size = usize; - fn try_into_ctx(self, bytes: &mut [u8], Ctx {container, le}: Ctx) -> result::Result { + fn try_into_ctx(self, bytes: &mut [u8], Ctx {container, le}: Ctx) -> result::Result { use scroll::Pwrite; match container { Container::Little => { diff --git a/third_party/rust/goblin/src/elf/dynamic.rs b/third_party/rust/goblin/src/elf/dynamic.rs index 52ca0bd025..94e8e5dc9f 100644 --- a/third_party/rust/goblin/src/elf/dynamic.rs +++ b/third_party/rust/goblin/src/elf/dynamic.rs @@ -1,11 +1,14 @@ - macro_rules! elf_dyn { ($size:ty) => { - #[cfg(feature = "alloc")] - use scroll::{Pread, Pwrite, SizeWith}; + // XXX: Do not import scroll traits here. + // See: https://github.com/rust-lang/rust/issues/65090#issuecomment-538668155 + #[repr(C)] #[derive(Copy, Clone, PartialEq, Default)] - #[cfg_attr(feature = "alloc", derive(Pread, Pwrite, SizeWith))] + #[cfg_attr( + feature = "alloc", + derive(scroll::Pread, scroll::Pwrite, scroll::SizeWith) + )] /// An entry in the dynamic array pub struct Dyn { /// Dynamic entry type @@ -16,7 +19,7 @@ macro_rules! elf_dyn { use plain; unsafe impl plain::Plain for Dyn {} - } + }; } // TODO: figure out what's the best, most friendly + safe API choice here - u32s or u64s @@ -305,7 +308,6 @@ if_alloc! { } impl ctx::SizeWith for Dyn { - type Units = usize; fn size_with(&Ctx { container, .. }: &Ctx) -> usize { match container { Container::Little => { @@ -320,8 +322,7 @@ if_alloc! { impl<'a> ctx::TryFromCtx<'a, Ctx> for Dyn { type Error = crate::error::Error; - type Size = usize; - fn try_from_ctx(bytes: &'a [u8], Ctx { container, le}: Ctx) -> result::Result<(Self, Self::Size), Self::Error> { + fn try_from_ctx(bytes: &'a [u8], Ctx { container, le}: Ctx) -> result::Result<(Self, usize), Self::Error> { use scroll::Pread; let dynamic = match container { Container::Little => { @@ -337,8 +338,7 @@ if_alloc! { impl ctx::TryIntoCtx for Dyn { type Error = crate::error::Error; - type Size = usize; - fn try_into_ctx(self, bytes: &mut [u8], Ctx { container, le}: Ctx) -> result::Result { + fn try_into_ctx(self, bytes: &mut [u8], Ctx { container, le}: Ctx) -> result::Result { use scroll::Pwrite; match container { Container::Little => { diff --git a/third_party/rust/goblin/src/elf/gnu_hash.rs b/third_party/rust/goblin/src/elf/gnu_hash.rs index 9f050e2638..b95bab82c4 100644 --- a/third_party/rust/goblin/src/elf/gnu_hash.rs +++ b/third_party/rust/goblin/src/elf/gnu_hash.rs @@ -31,11 +31,11 @@ mod tests { use super::hash; #[test] fn test_hash() { - assert_eq!(hash("") , 0x00001505); - assert_eq!(hash("printf") , 0x156b2bb8); - assert_eq!(hash("exit") , 0x7c967e3f); - assert_eq!(hash("syscall") , 0xbac212a0); - assert_eq!(hash("flapenguin.me"), 0x8ae9f18e); + assert_eq!(hash("") , 0x0000_1505); + assert_eq!(hash("printf") , 0x156b_2bb8); + assert_eq!(hash("exit") , 0x7c96_7e3f); + assert_eq!(hash("syscall") , 0xbac2_12a0); + assert_eq!(hash("flapenguin.me"), 0x8ae9_f18e); } } diff --git a/third_party/rust/goblin/src/elf/header.rs b/third_party/rust/goblin/src/elf/header.rs index 046ba84e74..efb3634354 100644 --- a/third_party/rust/goblin/src/elf/header.rs +++ b/third_party/rust/goblin/src/elf/header.rs @@ -276,7 +276,6 @@ if_alloc! { } impl ctx::SizeWith for Header { - type Units = usize; fn size_with(ctx: &crate::container::Ctx) -> usize { match ctx.container { Container::Little => { @@ -291,8 +290,7 @@ if_alloc! { impl<'a> ctx::TryFromCtx<'a, scroll::Endian> for Header { type Error = crate::error::Error; - type Size = usize; - fn try_from_ctx(bytes: &'a [u8], _ctx: scroll::Endian) -> error::Result<(Self, Self::Size)> { + fn try_from_ctx(bytes: &'a [u8], _ctx: scroll::Endian) -> error::Result<(Self, usize)> { use scroll::Pread; if bytes.len() < SIZEOF_IDENT { return Err(error::Error::Malformed("Too small".to_string())); @@ -317,11 +315,9 @@ if_alloc! { } } - // TODO: i think we should remove this forcing of the information in the header, it causes too many conflicts impl ctx::TryIntoCtx for Header { type Error = crate::error::Error; - type Size = usize; - fn try_into_ctx(self, bytes: &mut [u8], _ctx: scroll::Endian) -> Result { + fn try_into_ctx(self, bytes: &mut [u8], _ctx: scroll::Endian) -> Result { use scroll::Pwrite; match self.container()? { Container::Little => { @@ -410,8 +406,7 @@ macro_rules! elf_header_std_impl { impl<'a> ctx::TryFromCtx<'a, scroll::Endian> for Header { type Error = crate::error::Error; - type Size = usize; - fn try_from_ctx(bytes: &'a [u8], _: scroll::Endian) -> result::Result<(Self, Self::Size), Self::Error> { + fn try_from_ctx(bytes: &'a [u8], _: scroll::Endian) -> result::Result<(Self, usize), Self::Error> { let mut elf_header = Header::default(); let offset = &mut 0; bytes.gread_inout(offset, &mut elf_header.e_ident)?; @@ -440,9 +435,8 @@ macro_rules! elf_header_std_impl { impl ctx::TryIntoCtx for Header { type Error = crate::error::Error; - type Size = usize; /// a Pwrite impl for Header: **note** we use the endianness value in the header, and not a parameter - fn try_into_ctx(self, bytes: &mut [u8], _endianness: scroll::Endian) -> result::Result { + fn try_into_ctx(self, bytes: &mut [u8], _endianness: scroll::Endian) -> result::Result { use scroll::{Pwrite}; let offset = &mut 0; let endianness = diff --git a/third_party/rust/goblin/src/elf/mod.rs b/third_party/rust/goblin/src/elf/mod.rs index 0b2ab7f167..c984a9aa2a 100644 --- a/third_party/rust/goblin/src/elf/mod.rs +++ b/third_party/rust/goblin/src/elf/mod.rs @@ -225,7 +225,7 @@ if_sylvan! { if ph.p_type == program_header::PT_INTERP && ph.p_filesz != 0 { let count = (ph.p_filesz - 1) as usize; let offset = ph.p_offset as usize; - interpreter = Some(bytes.pread_with::<&str>(offset, ::scroll::ctx::StrCtx::Length(count))?); + interpreter = bytes.pread_with::<&str>(offset, ::scroll::ctx::StrCtx::Length(count)).ok(); } } @@ -339,8 +339,7 @@ if_sylvan! { impl<'a> ctx::TryFromCtx<'a, (usize, Endian)> for Elf<'a> { type Error = crate::error::Error; - type Size = usize; - fn try_from_ctx(src: &'a [u8], (_, _): (usize, Endian)) -> Result<(Elf<'a>, Self::Size), Self::Error> { + fn try_from_ctx(src: &'a [u8], (_, _): (usize, Endian)) -> Result<(Elf<'a>, usize), Self::Error> { let elf = Elf::parse(src)?; Ok((elf, src.len())) } diff --git a/third_party/rust/goblin/src/elf/note.rs b/third_party/rust/goblin/src/elf/note.rs index ea10a81001..c57d3d9ee3 100644 --- a/third_party/rust/goblin/src/elf/note.rs +++ b/third_party/rust/goblin/src/elf/note.rs @@ -179,8 +179,7 @@ if_alloc! { impl<'a> ctx::TryFromCtx<'a, (usize, container::Ctx)> for Note<'a> { type Error = error::Error; - type Size = usize; - fn try_from_ctx(bytes: &'a [u8], (alignment, ctx): (usize, container::Ctx)) -> Result<(Self, Self::Size), Self::Error> { + fn try_from_ctx(bytes: &'a [u8], (alignment, ctx): (usize, container::Ctx)) -> Result<(Self, usize), Self::Error> { let offset = &mut 0; let mut alignment = alignment; if alignment < 4 { diff --git a/third_party/rust/goblin/src/elf/program_header.rs b/third_party/rust/goblin/src/elf/program_header.rs index f8ab76381e..5812e9f3fe 100644 --- a/third_party/rust/goblin/src/elf/program_header.rs +++ b/third_party/rust/goblin/src/elf/program_header.rs @@ -179,7 +179,6 @@ if_alloc! { } impl ctx::SizeWith for ProgramHeader { - type Units = usize; fn size_with(ctx: &Ctx) -> usize { match ctx.container { Container::Little => { @@ -194,8 +193,7 @@ if_alloc! { impl<'a> ctx::TryFromCtx<'a, Ctx> for ProgramHeader { type Error = crate::error::Error; - type Size = usize; - fn try_from_ctx(bytes: &'a [u8], Ctx { container, le}: Ctx) -> result::Result<(Self, Self::Size), Self::Error> { + fn try_from_ctx(bytes: &'a [u8], Ctx { container, le}: Ctx) -> result::Result<(Self, usize), Self::Error> { use scroll::Pread; let res = match container { Container::Little => { @@ -211,8 +209,7 @@ if_alloc! { impl ctx::TryIntoCtx for ProgramHeader { type Error = crate::error::Error; - type Size = usize; - fn try_into_ctx(self, bytes: &mut [u8], Ctx {container, le}: Ctx) -> result::Result { + fn try_into_ctx(self, bytes: &mut [u8], Ctx {container, le}: Ctx) -> result::Result { use scroll::Pwrite; match container { Container::Little => { diff --git a/third_party/rust/goblin/src/elf/reloc.rs b/third_party/rust/goblin/src/elf/reloc.rs index 512b8f62ea..630f08b2ee 100644 --- a/third_party/rust/goblin/src/elf/reloc.rs +++ b/third_party/rust/goblin/src/elf/reloc.rs @@ -308,8 +308,7 @@ if_alloc! { type RelocCtx = (bool, Ctx); impl ctx::SizeWith for Reloc { - type Units = usize; - fn size_with( &(is_rela, Ctx { container, .. }): &RelocCtx) -> Self::Units { + fn size_with( &(is_rela, Ctx { container, .. }): &RelocCtx) -> usize { match container { Container::Little => { if is_rela { reloc32::SIZEOF_RELA } else { reloc32::SIZEOF_REL } @@ -323,8 +322,7 @@ if_alloc! { impl<'a> ctx::TryFromCtx<'a, RelocCtx> for Reloc { type Error = crate::error::Error; - type Size = usize; - fn try_from_ctx(bytes: &'a [u8], (is_rela, Ctx { container, le }): RelocCtx) -> result::Result<(Self, Self::Size), Self::Error> { + fn try_from_ctx(bytes: &'a [u8], (is_rela, Ctx { container, le }): RelocCtx) -> result::Result<(Self, usize), Self::Error> { use scroll::Pread; let reloc = match container { Container::Little => { @@ -348,10 +346,8 @@ if_alloc! { impl ctx::TryIntoCtx for Reloc { type Error = crate::error::Error; - type Size = usize; - // TODO: I think this is a bad idea /// Writes the relocation into `bytes` - fn try_into_ctx(self, bytes: &mut [u8], (is_rela, Ctx {container, le}): RelocCtx) -> result::Result { + fn try_into_ctx(self, bytes: &mut [u8], (is_rela, Ctx {container, le}): RelocCtx) -> result::Result { use scroll::Pwrite; match container { Container::Little => { diff --git a/third_party/rust/goblin/src/elf/section_header.rs b/third_party/rust/goblin/src/elf/section_header.rs index 2eb9143c6b..ebd9ccb06e 100644 --- a/third_party/rust/goblin/src/elf/section_header.rs +++ b/third_party/rust/goblin/src/elf/section_header.rs @@ -1,10 +1,14 @@ macro_rules! elf_section_header { ($size:ident) => { - #[cfg(feature = "alloc")] - use scroll::{Pread, Pwrite, SizeWith}; + // XXX: Do not import scroll traits here. + // See: https://github.com/rust-lang/rust/issues/65090#issuecomment-538668155 + #[repr(C)] #[derive(Copy, Clone, Eq, PartialEq, Default)] - #[cfg_attr(feature = "alloc", derive(Pread, Pwrite, SizeWith))] + #[cfg_attr( + feature = "alloc", + derive(scroll::Pread, scroll::Pwrite, scroll::SizeWith) + )] /// Section Headers are typically used by humans and static linkers for additional information or how to relocate the object /// /// **NOTE** section headers are strippable from a binary without any loss of portability/executability; _do not_ rely on them being there! @@ -51,7 +55,7 @@ macro_rules! elf_section_header { .finish() } } - } + }; } /// Undefined section. @@ -490,8 +494,7 @@ if_alloc! { } impl ctx::SizeWith for SectionHeader { - type Units = usize; - fn size_with( &Ctx { container, .. }: &Ctx) -> Self::Units { + fn size_with( &Ctx { container, .. }: &Ctx) -> usize { match container { Container::Little => { section_header32::SIZEOF_SHDR @@ -505,8 +508,7 @@ if_alloc! { impl<'a> ctx::TryFromCtx<'a, Ctx> for SectionHeader { type Error = crate::error::Error; - type Size = usize; - fn try_from_ctx(bytes: &'a [u8], Ctx {container, le}: Ctx) -> result::Result<(Self, Self::Size), Self::Error> { + fn try_from_ctx(bytes: &'a [u8], Ctx {container, le}: Ctx) -> result::Result<(Self, usize), Self::Error> { use scroll::Pread; let res = match container { Container::Little => { @@ -522,8 +524,7 @@ if_alloc! { impl ctx::TryIntoCtx for SectionHeader { type Error = crate::error::Error; - type Size = usize; - fn try_into_ctx(self, bytes: &mut [u8], Ctx {container, le}: Ctx) -> result::Result { + fn try_into_ctx(self, bytes: &mut [u8], Ctx {container, le}: Ctx) -> result::Result { use scroll::Pwrite; match container { Container::Little => { diff --git a/third_party/rust/goblin/src/elf/sym.rs b/third_party/rust/goblin/src/elf/sym.rs index 0a59490429..c0afde1249 100644 --- a/third_party/rust/goblin/src/elf/sym.rs +++ b/third_party/rust/goblin/src/elf/sym.rs @@ -408,7 +408,6 @@ if_alloc! { } impl ctx::SizeWith for Sym { - type Units = usize; #[inline] fn size_with(&Ctx {container, .. }: &Ctx) -> usize { match container { @@ -424,9 +423,8 @@ if_alloc! { impl<'a> ctx::TryFromCtx<'a, Ctx> for Sym { type Error = crate::error::Error; - type Size = usize; #[inline] - fn try_from_ctx(bytes: &'a [u8], Ctx { container, le}: Ctx) -> result::Result<(Self, Self::Size), Self::Error> { + fn try_from_ctx(bytes: &'a [u8], Ctx { container, le}: Ctx) -> result::Result<(Self, usize), Self::Error> { use scroll::Pread; let sym = match container { Container::Little => { @@ -442,9 +440,8 @@ if_alloc! { impl ctx::TryIntoCtx for Sym { type Error = crate::error::Error; - type Size = usize; #[inline] - fn try_into_ctx(self, bytes: &mut [u8], Ctx {container, le}: Ctx) -> result::Result { + fn try_into_ctx(self, bytes: &mut [u8], Ctx {container, le}: Ctx) -> result::Result { use scroll::Pwrite; match container { Container::Little => { diff --git a/third_party/rust/goblin/src/mach/header.rs b/third_party/rust/goblin/src/mach/header.rs index de7e03f3fe..d19aaccc74 100644 --- a/third_party/rust/goblin/src/mach/header.rs +++ b/third_party/rust/goblin/src/mach/header.rs @@ -341,7 +341,6 @@ impl Header { } impl ctx::SizeWith for Header { - type Units = usize; fn size_with(container: &container::Ctx) -> usize { match container.container { Container::Little => { @@ -355,7 +354,6 @@ impl ctx::SizeWith for Header { } impl ctx::SizeWith for Header { - type Units = usize; fn size_with(container: &Container) -> usize { match container { Container::Little => { @@ -370,8 +368,7 @@ impl ctx::SizeWith for Header { impl<'a> ctx::TryFromCtx<'a, container::Ctx> for Header { type Error = crate::error::Error; - type Size = usize; - fn try_from_ctx(bytes: &'a [u8], container::Ctx { le, container }: container::Ctx) -> error::Result<(Self, Self::Size)> { + fn try_from_ctx(bytes: &'a [u8], container::Ctx { le, container }: container::Ctx) -> error::Result<(Self, usize)> { let size = bytes.len(); if size < SIZEOF_HEADER_32 || size < SIZEOF_HEADER_64 { let error = error::Error::Malformed("bytes size is smaller than a Mach-o header".into()); @@ -393,8 +390,7 @@ impl<'a> ctx::TryFromCtx<'a, container::Ctx> for Header { impl ctx::TryIntoCtx for Header { type Error = crate::error::Error; - type Size = usize; - fn try_into_ctx(self, bytes: &mut [u8], ctx: container::Ctx) -> error::Result { + fn try_into_ctx(self, bytes: &mut [u8], ctx: container::Ctx) -> error::Result { match ctx.container { Container::Little => { bytes.pwrite_with(Header32::from(self), 0, ctx.le)?; diff --git a/third_party/rust/goblin/src/mach/load_command.rs b/third_party/rust/goblin/src/mach/load_command.rs index 3868b031c7..b64853fc2f 100644 --- a/third_party/rust/goblin/src/mach/load_command.rs +++ b/third_party/rust/goblin/src/mach/load_command.rs @@ -497,8 +497,7 @@ impl ThreadCommand { impl<'a> ctx::TryFromCtx<'a, Endian> for ThreadCommand { type Error = crate::error::Error; - type Size = usize; - fn try_from_ctx(bytes: &'a [u8], le: Endian) -> error::Result<(Self, Self::Size)> { + fn try_from_ctx(bytes: &'a [u8], le: Endian) -> error::Result<(Self, usize)> { let lc = bytes.pread_with::(0, le)?; // read the thread state flavor and length of the thread state @@ -1324,8 +1323,7 @@ pub enum CommandVariant { impl<'a> ctx::TryFromCtx<'a, Endian> for CommandVariant { type Error = crate::error::Error; - type Size = usize; - fn try_from_ctx(bytes: &'a [u8], le: Endian) -> error::Result<(Self, Self::Size)> { + fn try_from_ctx(bytes: &'a [u8], le: Endian) -> error::Result<(Self, usize)> { use self::CommandVariant::*; let lc = bytes.pread_with::(0, le)?; let size = lc.cmdsize as usize; diff --git a/third_party/rust/goblin/src/mach/segment.rs b/third_party/rust/goblin/src/mach/segment.rs index 9ee7a2bfc1..c070cb6732 100644 --- a/third_party/rust/goblin/src/mach/segment.rs +++ b/third_party/rust/goblin/src/mach/segment.rs @@ -13,6 +13,7 @@ use crate::error; use crate::mach::relocation::RelocationInfo; use crate::mach::load_command::{Section32, Section64, SegmentCommand32, SegmentCommand64, SIZEOF_SECTION_32, SIZEOF_SECTION_64, SIZEOF_SEGMENT_COMMAND_32, SIZEOF_SEGMENT_COMMAND_64, LC_SEGMENT, LC_SEGMENT_64}; +use crate::mach::constants::{SECTION_TYPE, S_ZEROFILL}; pub struct RelocationIterator<'a> { data: &'a [u8], @@ -170,8 +171,7 @@ impl From for Section { impl<'a> ctx::TryFromCtx<'a, container::Ctx> for Section { type Error = crate::error::Error; - type Size = usize; - fn try_from_ctx(bytes: &'a [u8], ctx: container::Ctx) -> Result<(Self, Self::Size), Self::Error> { + fn try_from_ctx(bytes: &'a [u8], ctx: container::Ctx) -> Result<(Self, usize), Self::Error> { match ctx.container { container::Container::Little => { let section = Section::from(bytes.pread_with::(0, ctx.le)?); @@ -186,7 +186,6 @@ impl<'a> ctx::TryFromCtx<'a, container::Ctx> for Section { } impl ctx::SizeWith for Section { - type Units = usize; fn size_with(ctx: &container::Ctx) -> usize { match ctx.container { container::Container::Little => SIZEOF_SECTION_32, @@ -197,8 +196,7 @@ impl ctx::SizeWith for Section { impl ctx::TryIntoCtx for Section { type Error = crate::error::Error; - type Size = usize; - fn try_into_ctx(self, bytes: &mut [u8], ctx: container::Ctx) -> Result { + fn try_into_ctx(self, bytes: &mut [u8], ctx: container::Ctx) -> Result { if ctx.is_big () { bytes.pwrite_with::(self.into(), 0, ctx.le)?; } else { @@ -239,22 +237,26 @@ impl<'a> Iterator for SectionIterator<'a> { self.idx += 1; match self.data.gread_with::
(&mut self.offset, self.ctx) { Ok(section) => { - // it's not uncommon to encounter macho files where files are - // truncated but the sections are still remaining in the header. - // Because of this we want to not panic here but instead just - // slice down to a empty data slice. This way only if code - // actually needs to access those sections it will fall over. - let data = self.data - .get(section.offset as usize..) - .unwrap_or_else(|| { - warn!("section #{} offset {} out of bounds", self.idx, section.offset); - &[] - }) - .get(..section.size as usize) - .unwrap_or_else(|| { - warn!("section #{} size {} out of bounds", self.idx, section.size); - &[] - }); + let data = if section.flags & SECTION_TYPE == S_ZEROFILL { + &[] + } else { + // it's not uncommon to encounter macho files where files are + // truncated but the sections are still remaining in the header. + // Because of this we want to not panic here but instead just + // slice down to a empty data slice. This way only if code + // actually needs to access those sections it will fall over. + self.data + .get(section.offset as usize..) + .unwrap_or_else(|| { + warn!("section #{} offset {} out of bounds", self.idx, section.offset); + &[] + }) + .get(..section.size as usize) + .unwrap_or_else(|| { + warn!("section #{} size {} out of bounds", self.idx, section.size); + &[] + }) + }; Some(Ok((section, data))) }, Err(e) => Some(Err(e)) @@ -355,7 +357,6 @@ impl<'a> fmt::Debug for Segment<'a> { } impl<'a> ctx::SizeWith for Segment<'a> { - type Units = usize; fn size_with(ctx: &container::Ctx) -> usize { match ctx.container { container::Container::Little => SIZEOF_SEGMENT_COMMAND_32, @@ -366,8 +367,7 @@ impl<'a> ctx::SizeWith for Segment<'a> { impl<'a> ctx::TryIntoCtx for Segment<'a> { type Error = crate::error::Error; - type Size = usize; - fn try_into_ctx(self, bytes: &mut [u8], ctx: container::Ctx) -> Result { + fn try_into_ctx(self, bytes: &mut [u8], ctx: container::Ctx) -> Result { let segment_size = Self::size_with(&ctx); // should be able to write the section data inline after this, but not working at the moment //let section_size = bytes.pwrite(data, segment_size)?; @@ -446,7 +446,7 @@ impl<'a> Segment<'a> { initprot: segment.initprot, nsects: segment.nsects, flags: segment.flags, - data: segment_data(bytes, segment.fileoff as u64, segment.filesize as u64)?, + data: segment_data(bytes, u64::from(segment.fileoff), u64::from(segment.filesize))?, offset, raw_data: bytes, ctx, @@ -512,7 +512,7 @@ impl<'a> Segments<'a> { } /// Get every section from every segment // thanks to SpaceManic for figuring out the 'b lifetimes here :) - pub fn sections<'b>(&'b self) -> Box> + 'b> { + pub fn sections<'b>(&'b self) -> Box> + 'b> { Box::new(self.segments.iter().map(|segment| segment.into_iter())) } } diff --git a/third_party/rust/goblin/src/mach/symbols.rs b/third_party/rust/goblin/src/mach/symbols.rs index 87e1708640..d5d73f3ac5 100644 --- a/third_party/rust/goblin/src/mach/symbols.rs +++ b/third_party/rust/goblin/src/mach/symbols.rs @@ -236,7 +236,6 @@ impl Nlist { } impl ctx::SizeWith for Nlist { - type Units = usize; fn size_with(ctx: &container::Ctx) -> usize { match ctx.container { Container::Little => { @@ -299,8 +298,7 @@ impl From for Nlist64 { impl<'a> ctx::TryFromCtx<'a, container::Ctx> for Nlist { type Error = crate::error::Error; - type Size = usize; - fn try_from_ctx(bytes: &'a [u8], container::Ctx { container, le }: container::Ctx) -> crate::error::Result<(Self, Self::Size)> { + fn try_from_ctx(bytes: &'a [u8], container::Ctx { container, le }: container::Ctx) -> crate::error::Result<(Self, usize)> { let nlist = match container { Container::Little => { (bytes.pread_with::(0, le)?.into(), SIZEOF_NLIST_32) @@ -315,9 +313,7 @@ impl<'a> ctx::TryFromCtx<'a, container::Ctx> for Nlist { impl ctx::TryIntoCtx for Nlist { type Error = crate::error::Error; - type Size = usize; - - fn try_into_ctx(self, bytes: &mut [u8], container::Ctx { container, le }: container::Ctx) -> Result { + fn try_into_ctx(self, bytes: &mut [u8], container::Ctx { container, le }: container::Ctx) -> Result { let size = match container { Container::Little => { (bytes.pwrite_with::(self.into(), 0, le)?) @@ -345,10 +341,9 @@ pub struct SymbolsCtx { impl<'a, T: ?Sized> ctx::TryFromCtx<'a, SymbolsCtx, T> for Symbols<'a> where T: AsRef<[u8]> { type Error = crate::error::Error; - type Size = usize; fn try_from_ctx(bytes: &'a T, SymbolsCtx { nsyms, strtab, ctx - }: SymbolsCtx) -> crate::error::Result<(Self, Self::Size)> { + }: SymbolsCtx) -> crate::error::Result<(Self, usize)> { let data = bytes.as_ref(); Ok ((Symbols { data, diff --git a/third_party/rust/goblin/src/pe/debug.rs b/third_party/rust/goblin/src/pe/debug.rs index 6f01c6133b..36f188729e 100644 --- a/third_party/rust/goblin/src/pe/debug.rs +++ b/third_party/rust/goblin/src/pe/debug.rs @@ -56,7 +56,7 @@ pub const IMAGE_DEBUG_TYPE_BORLAND: u32 = 9; impl ImageDebugDirectory { fn parse(bytes: &[u8], dd: data_directories::DataDirectory, sections: &[section_table::SectionTable], file_alignment: u32) -> error::Result { let rva = dd.virtual_address as usize; - let offset = utils::find_offset(rva, sections, file_alignment).ok_or_else(|| error::Error::Malformed(format!("Cannot map ImageDebugDirectory rva {:#x} into offset", rva)))?;; + let offset = utils::find_offset(rva, sections, file_alignment).ok_or_else(|| error::Error::Malformed(format!("Cannot map ImageDebugDirectory rva {:#x} into offset", rva)))?; let idd: Self = bytes.pread_with(offset, scroll::LE)?; Ok (idd) } diff --git a/third_party/rust/goblin/src/pe/exception.rs b/third_party/rust/goblin/src/pe/exception.rs index d52608ef5d..42c297ad65 100644 --- a/third_party/rust/goblin/src/pe/exception.rs +++ b/third_party/rust/goblin/src/pe/exception.rs @@ -345,13 +345,11 @@ pub struct UnwindCode { impl<'a> TryFromCtx<'a, UnwindOpContext> for UnwindCode { type Error = error::Error; - type Size = usize; - #[inline] fn try_from_ctx( bytes: &'a [u8], ctx: UnwindOpContext, - ) -> Result<(Self, Self::Size), Self::Error> { + ) -> Result<(Self, usize), Self::Error> { let mut read = 0; let code_offset = bytes.gread_with::(&mut read, scroll::LE)?; let operation = bytes.gread_with::(&mut read, scroll::LE)?; @@ -675,10 +673,10 @@ impl<'a> ExceptionData<'a> { let size = directory.size as usize; if size % RUNTIME_FUNCTION_SIZE != 0 { - Err(scroll::Error::BadInput { + return Err(error::Error::from(scroll::Error::BadInput { size, msg: "invalid exception directory table size", - })?; + })); } let rva = directory.virtual_address as usize; @@ -687,7 +685,7 @@ impl<'a> ExceptionData<'a> { })?; if offset % 4 != 0 { - Err(scroll::Error::BadOffset(offset))?; + return Err(error::Error::from(scroll::Error::BadOffset(offset))); } Ok(ExceptionData { @@ -723,7 +721,7 @@ impl<'a> ExceptionData<'a> { /// Returns the function at the given index. pub fn get_function(&self, index: usize) -> error::Result { - self.get_function_by_offset(index * RUNTIME_FUNCTION_SIZE) + self.get_function_by_offset(self.offset + index * RUNTIME_FUNCTION_SIZE) } /// Performs a binary search to find a function entry covering the given RVA relative to the @@ -797,10 +795,10 @@ impl<'a> ExceptionData<'a> { #[inline] fn get_function_by_offset(&self, offset: usize) -> error::Result { - debug_assert!(offset % RUNTIME_FUNCTION_SIZE == 0); - debug_assert!(offset < self.size); + debug_assert!((offset - self.offset) % RUNTIME_FUNCTION_SIZE == 0); + debug_assert!(offset < self.offset + self.size); - Ok(self.bytes.pread_with(self.offset + offset, scroll::LE)?) + Ok(self.bytes.pread_with(offset, scroll::LE)?) } } @@ -824,3 +822,197 @@ impl<'a> IntoIterator for &'_ ExceptionData<'a> { self.functions() } } + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_size_of_runtime_function() { + assert_eq!( + std::mem::size_of::(), + RUNTIME_FUNCTION_SIZE + ); + } + + // Tests disabled until there is a solution for handling binary test data + // See https://github.com/m4b/goblin/issues/185 + + // macro_rules! microsoft_symbol { + // ($name:literal, $id:literal) => {{ + // use std::fs::File; + // use std::path::Path; + + // let path = Path::new(concat!("cache/", $name)); + // if !path.exists() { + // let url = format!( + // "https://msdl.microsoft.com/download/symbols/{}/{}/{}", + // $name, $id, $name + // ); + + // let mut response = reqwest::get(&url).expect(concat!("get ", $name)); + // let mut target = File::create(path).expect(concat!("create ", $name)); + // response + // .copy_to(&mut target) + // .expect(concat!("download ", $name)); + // } + + // std::fs::read(path).expect(concat!("open ", $name)) + // }}; + // } + + // lazy_static::lazy_static! { + // static ref PE_DATA: Vec = microsoft_symbol!("WSHTCPIP.DLL", "4a5be0b77000"); + // } + + // #[test] + // fn test_parse() { + // let pe = PE::parse(&PE_DATA).expect("parse PE"); + // let exception_data = pe.exception_data.expect("get exception data"); + + // assert_eq!(exception_data.len(), 19); + // assert!(!exception_data.is_empty()); + // } + + // #[test] + // fn test_iter_functions() { + // let pe = PE::parse(&PE_DATA).expect("parse PE"); + // let exception_data = pe.exception_data.expect("get exception data"); + + // let functions: Vec = exception_data + // .functions() + // .map(|result| result.expect("parse runtime function")) + // .collect(); + + // assert_eq!(functions.len(), 19); + + // let expected = RuntimeFunction { + // begin_address: 0x1355, + // end_address: 0x1420, + // unwind_info_address: 0x4019, + // }; + + // assert_eq!(functions[4], expected); + // } + + // #[test] + // fn test_get_function() { + // let pe = PE::parse(&PE_DATA).expect("parse PE"); + // let exception_data = pe.exception_data.expect("get exception data"); + + // let expected = RuntimeFunction { + // begin_address: 0x1355, + // end_address: 0x1420, + // unwind_info_address: 0x4019, + // }; + + // assert_eq!( + // exception_data.get_function(4).expect("find function"), + // expected + // ); + // } + + // #[test] + // fn test_find_function() { + // let pe = PE::parse(&PE_DATA).expect("parse PE"); + // let exception_data = pe.exception_data.expect("get exception data"); + + // let expected = RuntimeFunction { + // begin_address: 0x1355, + // end_address: 0x1420, + // unwind_info_address: 0x4019, + // }; + + // assert_eq!( + // exception_data.find_function(0x1400).expect("find function"), + // Some(expected) + // ); + // } + + // #[test] + // fn test_find_function_none() { + // let pe = PE::parse(&PE_DATA).expect("parse PE"); + // let exception_data = pe.exception_data.expect("get exception data"); + + // // 0x1d00 is the end address of the last function. + + // assert_eq!( + // exception_data.find_function(0x1d00).expect("find function"), + // None + // ); + // } + + // #[test] + // fn test_get_unwind_info() { + // let pe = PE::parse(&PE_DATA).expect("parse PE"); + // let exception_data = pe.exception_data.expect("get exception data"); + + // // runtime function #0 directly refers to unwind info + // let rt_function = RuntimeFunction { + // begin_address: 0x1010, + // end_address: 0x1090, + // unwind_info_address: 0x25d8, + // }; + + // let unwind_info = exception_data + // .get_unwind_info(rt_function, &pe.sections) + // .expect("get unwind info"); + + // // Unwind codes just used to assert that the right unwind info was resolved + // let expected = &[4, 98]; + + // assert_eq!(unwind_info.code_bytes, expected); + // } + + // #[test] + // fn test_get_unwind_info_redirect() { + // let pe = PE::parse(&PE_DATA).expect("parse PE"); + // let exception_data = pe.exception_data.expect("get exception data"); + + // // runtime function #4 has a redirect (unwind_info_address & 1). + // let rt_function = RuntimeFunction { + // begin_address: 0x1355, + // end_address: 0x1420, + // unwind_info_address: 0x4019, + // }; + + // let unwind_info = exception_data + // .get_unwind_info(rt_function, &pe.sections) + // .expect("get unwind info"); + + // // Unwind codes just used to assert that the right unwind info was resolved + // let expected = &[ + // 28, 100, 15, 0, 28, 84, 14, 0, 28, 52, 12, 0, 28, 82, 24, 240, 22, 224, 20, 208, 18, + // 192, 16, 112, + // ]; + + // assert_eq!(unwind_info.code_bytes, expected); + // } + + #[test] + fn test_iter_unwind_codes() { + let unwind_info = UnwindInfo { + version: 1, + size_of_prolog: 4, + frame_register: Register(0), + frame_register_offset: 0, + chained_info: None, + handler: None, + code_bytes: &[4, 98], + }; + + let unwind_codes: Vec = unwind_info + .unwind_codes() + .map(|result| result.expect("parse unwind code")) + .collect(); + + assert_eq!(unwind_codes.len(), 1); + + let expected = UnwindCode { + code_offset: 4, + operation: UnwindOperation::Alloc(56), + }; + + assert_eq!(unwind_codes[0], expected); + } +} diff --git a/third_party/rust/goblin/src/pe/export.rs b/third_party/rust/goblin/src/pe/export.rs index f1df5d7395..976c6fc143 100644 --- a/third_party/rust/goblin/src/pe/export.rs +++ b/third_party/rust/goblin/src/pe/export.rs @@ -142,14 +142,13 @@ impl<'a> ExportData<'a> { /// PE binaries have two kinds of reexports, either specifying the dll's name, or the ordinal value of the dll pub enum Reexport<'a> { DLLName { export: &'a str, lib: &'a str }, - DLLOrdinal { export: &'a str, ordinal: usize } + DLLOrdinal { ordinal: usize, lib: &'a str } } impl<'a> scroll::ctx::TryFromCtx<'a, scroll::Endian> for Reexport<'a> { type Error = crate::error::Error; - type Size = usize; #[inline] - fn try_from_ctx(bytes: &'a [u8], _ctx: scroll::Endian) -> Result<(Self, Self::Size), Self::Error> { + fn try_from_ctx(bytes: &'a [u8], _ctx: scroll::Endian) -> Result<(Self, usize), Self::Error> { let reexport = bytes.pread::<&str>(0)?; let reexport_len = reexport.len(); debug!("reexport: {}", &reexport); @@ -157,23 +156,21 @@ impl<'a> scroll::ctx::TryFromCtx<'a, scroll::Endian> for Reexport<'a> { let c: u8 = bytes.pread(o)?; debug!("reexport offset: {:#x} char: {:#x}", o, c); if c == b'.' { - let i = o - 1; - let dll: &'a str = bytes.pread_with(0, scroll::ctx::StrCtx::Length(i))?; + let dll: &'a str = bytes.pread_with(0, scroll::ctx::StrCtx::Length(o))?; debug!("dll: {:?}", &dll); - let len = reexport_len - i - 1; - let rest: &'a [u8] = bytes.pread_with(o, len)?; + if o + 1 == reexport_len { + break; + } + let len = reexport_len - o - 1; + let rest: &'a [u8] = bytes.pread_with(o + 1, len)?; debug!("rest: {:?}", &rest); - let len = rest.len() - 1; if rest[0] == b'#' { - // UNTESTED - let ordinal = rest.pread_with::<&str>(1, scroll::ctx::StrCtx::Length(len))?; + let ordinal = rest.pread_with::<&str>(1, scroll::ctx::StrCtx::Length(len - 1))?; let ordinal = ordinal.parse::().map_err(|_e| error::Error::Malformed(format!("Cannot parse reexport ordinal from {} bytes", bytes.len())))?; - // FIXME: return size - return Ok((Reexport::DLLOrdinal { export: dll, ordinal: ordinal as usize }, 0)) + return Ok((Reexport::DLLOrdinal { ordinal: ordinal as usize, lib: dll }, reexport_len + 1)) } else { - let export = rest.pread_with::<&str>(1, scroll::ctx::StrCtx::Length(len))?; - // FIXME: return size - return Ok((Reexport::DLLName { export, lib: dll }, 0)) + let export = rest.pread_with::<&str>(0, scroll::ctx::StrCtx::Length(len))?; + return Ok((Reexport::DLLName { export, lib: dll }, reexport_len + 1)) } } } @@ -209,9 +206,8 @@ struct ExportCtx<'a> { impl<'a, 'b> scroll::ctx::TryFromCtx<'a, ExportCtx<'b>> for Export<'a> { type Error = error::Error; - type Size = usize; #[inline] - fn try_from_ctx(bytes: &'a [u8], ExportCtx { ptr, idx, sections, file_alignment, addresses, ordinals }: ExportCtx<'b>) -> Result<(Self, Self::Size), Self::Error> { + fn try_from_ctx(bytes: &'a [u8], ExportCtx { ptr, idx, sections, file_alignment, addresses, ordinals }: ExportCtx<'b>) -> Result<(Self, usize), Self::Error> { use self::ExportAddressTableEntry::*; let name = utils::find_offset(ptr as usize, sections, file_alignment).and_then(|offset| bytes.pread::<&str>(offset).ok()); diff --git a/third_party/rust/goblin/src/pe/import.rs b/third_party/rust/goblin/src/pe/import.rs index 97e2196c52..2b226a5b2b 100644 --- a/third_party/rust/goblin/src/pe/import.rs +++ b/third_party/rust/goblin/src/pe/import.rs @@ -17,7 +17,7 @@ pub const IMPORT_BY_ORDINAL_64: u64 = 0x8000_0000_0000_0000; pub const IMPORT_RVA_MASK_32: u32 = 0x7fff_ffff; pub const IMPORT_RVA_MASK_64: u64 = 0x0000_0000_7fff_ffff; -pub trait Bitfield<'a>: Into + PartialEq + Eq + LowerHex + Debug + TryFromCtx<'a, scroll::Endian, Error=scroll::Error, Size=usize> { +pub trait Bitfield<'a>: Into + PartialEq + Eq + LowerHex + Debug + TryFromCtx<'a, scroll::Endian, Error=scroll::Error> { fn is_ordinal(&self) -> bool; fn to_ordinal(&self) -> u16; fn to_rva(&self) -> u32; @@ -189,7 +189,7 @@ impl<'a> ImportData<'a> { pub fn parse>(bytes: &'a[u8], dd: data_directories::DataDirectory, sections: &[section_table::SectionTable], file_alignment: u32) -> error::Result> { let import_directory_table_rva = dd.virtual_address as usize; debug!("import_directory_table_rva {:#x}", import_directory_table_rva); - let offset = &mut utils::find_offset(import_directory_table_rva, sections, file_alignment).ok_or_else(|| error::Error::Malformed(format!("Cannot create ImportData; cannot map import_directory_table_rva {:#x} into offset", import_directory_table_rva)))?;; + let offset = &mut utils::find_offset(import_directory_table_rva, sections, file_alignment).ok_or_else(|| error::Error::Malformed(format!("Cannot create ImportData; cannot map import_directory_table_rva {:#x} into offset", import_directory_table_rva)))?; debug!("import data offset {:#x}", offset); let mut import_data = Vec::new(); loop { diff --git a/third_party/rust/goblin/src/pe/mod.rs b/third_party/rust/goblin/src/pe/mod.rs index 986a4e208c..e3f72ffe3d 100644 --- a/third_party/rust/goblin/src/pe/mod.rs +++ b/third_party/rust/goblin/src/pe/mod.rs @@ -116,9 +116,12 @@ impl<'a> PE<'a> { debug_data = Some(debug::DebugData::parse(bytes, debug_table, §ions, file_alignment)?); } - debug!("exception data: {:#?}", exception_data); - if let Some(exception_table) = *optional_header.data_directories.get_exception_table() { - exception_data = Some(exception::ExceptionData::parse(bytes, exception_table, §ions, file_alignment)?); + if header.coff_header.machine == header::COFF_MACHINE_X86_64 { + // currently only x86_64 is supported + debug!("exception data: {:#?}", exception_data); + if let Some(exception_table) = *optional_header.data_directories.get_exception_table() { + exception_data = Some(exception::ExceptionData::parse(bytes, exception_table, §ions, file_alignment)?); + } } } Ok( PE { diff --git a/third_party/rust/goblin/src/pe/optional_header.rs b/third_party/rust/goblin/src/pe/optional_header.rs index 5da4d986c3..d2a55f3014 100644 --- a/third_party/rust/goblin/src/pe/optional_header.rs +++ b/third_party/rust/goblin/src/pe/optional_header.rs @@ -263,8 +263,7 @@ impl OptionalHeader { impl<'a> ctx::TryFromCtx<'a, Endian> for OptionalHeader { type Error = crate::error::Error; - type Size = usize; - fn try_from_ctx(bytes: &'a [u8], _: Endian) -> error::Result<(Self, Self::Size)> { + fn try_from_ctx(bytes: &'a [u8], _: Endian) -> error::Result<(Self, usize)> { let magic = bytes.pread_with::(0, LE)?; let offset = &mut 0; let (standard_fields, windows_fields): (StandardFields, WindowsFields) = match magic { diff --git a/third_party/rust/goblin/src/pe/section_table.rs b/third_party/rust/goblin/src/pe/section_table.rs index 1a42c0ebc7..5f33bc9f16 100644 --- a/third_party/rust/goblin/src/pe/section_table.rs +++ b/third_party/rust/goblin/src/pe/section_table.rs @@ -88,6 +88,7 @@ impl SectionTable { } } + #[allow(clippy::useless_let_if_seq)] pub fn set_name_offset(&mut self, mut idx: usize) -> error::Result<()> { if idx <= 9_999_999 { // 10^7 - 1 // write!(&mut self.name[1..], "{}", idx) without using io::Write. @@ -146,7 +147,6 @@ impl SectionTable { } impl ctx::SizeWith for SectionTable { - type Units = usize; fn size_with(_ctx: &scroll::Endian) -> usize { SIZEOF_SECTION_TABLE } @@ -154,8 +154,7 @@ impl ctx::SizeWith for SectionTable { impl ctx::TryIntoCtx for SectionTable { type Error = error::Error; - type Size = usize; - fn try_into_ctx(self, bytes: &mut [u8], ctx: scroll::Endian) -> Result { + fn try_into_ctx(self, bytes: &mut [u8], ctx: scroll::Endian) -> Result { let offset = &mut 0; bytes.gwrite(&self.name[..], offset)?; bytes.gwrite_with(self.virtual_size, offset, ctx)?; diff --git a/third_party/rust/goblin/src/pe/utils.rs b/third_party/rust/goblin/src/pe/utils.rs index 953d361ec9..bd376165f3 100644 --- a/third_party/rust/goblin/src/pe/utils.rs +++ b/third_party/rust/goblin/src/pe/utils.rs @@ -79,7 +79,7 @@ pub fn try_name<'a>(bytes: &'a [u8], rva: usize, sections: &[section_table::Sect } pub fn get_data<'a, T>(bytes: &'a [u8], sections: &[section_table::SectionTable], directory: DataDirectory, file_alignment: u32) -> error::Result - where T: scroll::ctx::TryFromCtx<'a, scroll::Endian, Size = usize, Error = scroll::Error> { + where T: scroll::ctx::TryFromCtx<'a, scroll::Endian, Error = scroll::Error> { let rva = directory.virtual_address as usize; let offset = find_offset(rva, sections, file_alignment) .ok_or_else(||error::Error::Malformed(directory.virtual_address.to_string()))?; diff --git a/third_party/rust/goblin/tests/archive.rs b/third_party/rust/goblin/tests/archive.rs index 455ea0bc16..a74007f4b8 100644 --- a/third_party/rust/goblin/tests/archive.rs +++ b/third_party/rust/goblin/tests/archive.rs @@ -59,7 +59,7 @@ fn parse_self() { path = Path::new("target").join("release").join("libgoblin.rlib"); } let buffer = { - let mut fd = File::open(path).expect("open file"); + let mut fd = File::open(path).expect("can open file; did you run cargo build first?"); let mut v = Vec::new(); fd.read_to_end(&mut v).expect("read file"); v diff --git a/third_party/rust/id-arena/.cargo-checksum.json b/third_party/rust/id-arena/.cargo-checksum.json new file mode 100644 index 0000000000..f28430a787 --- /dev/null +++ b/third_party/rust/id-arena/.cargo-checksum.json @@ -0,0 +1 @@ +{"files":{"CHANGELOG.md":"44d2bc9ae9829b9d80bbd64cf758a29c5b7a136e8049bde601f25b504ff5daf8","Cargo.toml":"68ffe09814502adc81ab77dacc5d76e5f439435a71b48ec9c575289193945cb7","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"378f5840b258e2779c39418f3f2d7b2ba96f1c7917dd6be0713f88305dbda397","README.md":"74619b782c5085d5e12762a2a209555e90770d0e08048d95a31f95febac0b4c6","README.tpl":"ec385000e14590a306855e7893daed0168102f33166bdc1e5cf5fa5599dac03f","src/lib.rs":"ee705a8a93ccfa0f958e421a1e27440e5b92afd422ee6579f66282287cb9abe8","src/rayon.rs":"48807a5563e6c248bab2731b60b00148084db9c071cf3c47cdb12dc7ecfa84e0","tests/readme_up_to_date.rs":"8db3e41d803e2a10307e7e35cb2afa6733e1c39ad34789752a927564bf6795b6"},"package":"25a2bc672d1148e28034f176e01fffebb08b35768468cc954630da77a1449005"} \ No newline at end of file diff --git a/third_party/rust/id-arena/CHANGELOG.md b/third_party/rust/id-arena/CHANGELOG.md new file mode 100644 index 0000000000..0f3ecde738 --- /dev/null +++ b/third_party/rust/id-arena/CHANGELOG.md @@ -0,0 +1,65 @@ +# 2.2.1 + +Released 2019-02-15. + +* Make sure our rayon parallel iterators are exported. Previously instances of + them were returned by `pub` methods but the types themselves were not + exported. + +# 2.2.0 + +Released 2019-01-30. + +* Add the `Arena::alloc_with_id` method. This is better than using + `Arena::next_id` directly most of the time (but is also not *quite* as + flexible). See [#9](https://github.com/fitzgen/id-arena/issues/9) and + [#10](https://github.com/fitzgen/id-arena/pull/10). + +-------------------------------------------------------------------------------- + +# 2.1.0 + +Released 2019-01-25. + +* Added optional support for `rayon` parallel iteration. Enable the `rayon` + Cargo feature to get access. + +-------------------------------------------------------------------------------- + +# 2.0.1 + +Released 2019-01-09. + +* Implemented `Ord` and `PartialOrd` for `Id`. +* Added an `Arena::with_capacity` constructor. +* Added `Arena::next_id` to get the id that will be used for the next + allocation. + +-------------------------------------------------------------------------------- + +# 2.0.0 + +Released 2018-11-28. + +* Introduces the `ArenaBehavior` trait, which allows one to customize identifier + types and do things like implement space optimizations or use identifiers for + many arenas at once. +* Implements `Clone`, `PartialEq` and `Eq` for arenas. + +-------------------------------------------------------------------------------- + +# 1.0.2 + +Released 2018-11-25. + +* `Id` now implements `Send` and `Sync` +* The `PartialEq` implementation for `Id` now correctly checks that two ids + are for the same arena when checking equality. + +-------------------------------------------------------------------------------- + +# 1.0.1 + +-------------------------------------------------------------------------------- + +# 1.0.0 diff --git a/third_party/rust/quote-0.6.11/Cargo.toml b/third_party/rust/id-arena/Cargo.toml similarity index 50% rename from third_party/rust/quote-0.6.11/Cargo.toml rename to third_party/rust/id-arena/Cargo.toml index 820c3a98f9..c8566221d4 100644 --- a/third_party/rust/quote-0.6.11/Cargo.toml +++ b/third_party/rust/id-arena/Cargo.toml @@ -11,23 +11,21 @@ # will likely look very different (and much more reasonable) [package] -name = "quote" -version = "0.6.11" -authors = ["David Tolnay "] -include = ["Cargo.toml", "src/**/*.rs", "tests/**/*.rs", "README.md", "LICENSE-APACHE", "LICENSE-MIT"] -description = "Quasi-quoting macro quote!(...)" -documentation = "https://docs.rs/quote/" +name = "id-arena" +version = "2.2.1" +authors = ["Nick Fitzgerald ", "Aleksey Kladov "] +description = "A simple, id-based arena." +documentation = "https://docs.rs/id-arena" readme = "README.md" -keywords = ["syn"] -categories = ["development-tools::procedural-macro-helpers"] +categories = ["memory-management", "rust-patterns", "no-std"] license = "MIT/Apache-2.0" -repository = "https://github.com/dtolnay/quote" -[dependencies.proc-macro2] -version = "0.4.21" -default-features = false +repository = "https://github.com/fitzgen/id-arena" +[package.metadata.docs.rs] +features = ["rayon"] +[dependencies.rayon] +version = "1.0.3" +optional = true [features] -default = ["proc-macro"] -proc-macro = ["proc-macro2/proc-macro"] -[badges.travis-ci] -repository = "dtolnay/quote" +default = ["std"] +std = [] diff --git a/third_party/rust/log-0.3.9/LICENSE-APACHE b/third_party/rust/id-arena/LICENSE-APACHE similarity index 100% rename from third_party/rust/log-0.3.9/LICENSE-APACHE rename to third_party/rust/id-arena/LICENSE-APACHE diff --git a/third_party/rust/proc-macro2-0.4.27/LICENSE-MIT b/third_party/rust/id-arena/LICENSE-MIT similarity index 100% rename from third_party/rust/proc-macro2-0.4.27/LICENSE-MIT rename to third_party/rust/id-arena/LICENSE-MIT diff --git a/third_party/rust/id-arena/README.md b/third_party/rust/id-arena/README.md new file mode 100644 index 0000000000..a783f1ce0b --- /dev/null +++ b/third_party/rust/id-arena/README.md @@ -0,0 +1,100 @@ +# `id-arena` + +[![](https://img.shields.io/crates/v/id-arena.svg)](https://crates.io/crates/id-arena) +[![](https://img.shields.io/crates/d/id-arena.svg)](https://crates.io/crates/id-arena) +[![Travis CI Build Status](https://travis-ci.org/fitzgen/id-arena.svg?branch=master)](https://travis-ci.org/fitzgen/id-arena) + +A simple, id-based arena. + +### Id-based + +Allocate objects and get an identifier for that object back, *not* a +reference to the allocated object. Given an id, you can get a shared or +exclusive reference to the allocated object from the arena. This id-based +approach is useful for constructing mutable graph data structures. + +If you want allocation to return a reference, consider [the `typed-arena` +crate](https://github.com/SimonSapin/rust-typed-arena/) instead. + +### No Deletion + +This arena does not support deletion, which makes its implementation simple +and allocation fast. If you want deletion, you need a way to solve the ABA +problem. Consider using [the `generational-arena` +crate](https://github.com/fitzgen/generational-arena) instead. + +### Homogeneous + +This crate's arenas can only contain objects of a single type `T`. If you +need an arena of objects with heterogeneous types, consider another crate. + +### `#![no_std]` Support + +Requires the `alloc` nightly feature. Disable the on-by-default `"std"` feature: + +```toml +[dependencies.id-arena] +version = "2" +default-features = false +``` + +### `rayon` Support + +If the `rayon` feature of this crate is activated: + +```toml +[dependencies] +id-arena = { version = "2", features = ["rayon"] } +``` + +then you can use [`rayon`](https://crates.io/crates/rayon)'s support for +parallel iteration. The `Arena` type will have a `par_iter` family of +methods where appropriate. + +### Example + +```rust +use id_arena::{Arena, Id}; + +type AstNodeId = Id; + +#[derive(Debug, Eq, PartialEq)] +pub enum AstNode { + Const(i64), + Var(String), + Add { + lhs: AstNodeId, + rhs: AstNodeId, + }, + Sub { + lhs: AstNodeId, + rhs: AstNodeId, + }, + Mul { + lhs: AstNodeId, + rhs: AstNodeId, + }, + Div { + lhs: AstNodeId, + rhs: AstNodeId, + }, +} + +let mut ast_nodes = Arena::::new(); + +// Create the AST for `a * (b + 3)`. +let three = ast_nodes.alloc(AstNode::Const(3)); +let b = ast_nodes.alloc(AstNode::Var("b".into())); +let b_plus_three = ast_nodes.alloc(AstNode::Add { + lhs: b, + rhs: three, +}); +let a = ast_nodes.alloc(AstNode::Var("a".into())); +let a_times_b_plus_three = ast_nodes.alloc(AstNode::Mul { + lhs: a, + rhs: b_plus_three, +}); + +// Can use indexing to access allocated nodes. +assert_eq!(ast_nodes[three], AstNode::Const(3)); +``` diff --git a/third_party/rust/id-arena/README.tpl b/third_party/rust/id-arena/README.tpl new file mode 100644 index 0000000000..56c38d5f08 --- /dev/null +++ b/third_party/rust/id-arena/README.tpl @@ -0,0 +1,3 @@ +# `{{crate}}` + +{{readme}} diff --git a/third_party/rust/id-arena/src/lib.rs b/third_party/rust/id-arena/src/lib.rs new file mode 100644 index 0000000000..0c0edd4e60 --- /dev/null +++ b/third_party/rust/id-arena/src/lib.rs @@ -0,0 +1,726 @@ +//! [![](https://img.shields.io/crates/v/id-arena.svg)](https://crates.io/crates/id-arena) +//! [![](https://img.shields.io/crates/d/id-arena.svg)](https://crates.io/crates/id-arena) +//! [![Travis CI Build Status](https://travis-ci.org/fitzgen/id-arena.svg?branch=master)](https://travis-ci.org/fitzgen/id-arena) +//! +//! A simple, id-based arena. +//! +//! ## Id-based +//! +//! Allocate objects and get an identifier for that object back, *not* a +//! reference to the allocated object. Given an id, you can get a shared or +//! exclusive reference to the allocated object from the arena. This id-based +//! approach is useful for constructing mutable graph data structures. +//! +//! If you want allocation to return a reference, consider [the `typed-arena` +//! crate](https://github.com/SimonSapin/rust-typed-arena/) instead. +//! +//! ## No Deletion +//! +//! This arena does not support deletion, which makes its implementation simple +//! and allocation fast. If you want deletion, you need a way to solve the ABA +//! problem. Consider using [the `generational-arena` +//! crate](https://github.com/fitzgen/generational-arena) instead. +//! +//! ## Homogeneous +//! +//! This crate's arenas can only contain objects of a single type `T`. If you +//! need an arena of objects with heterogeneous types, consider another crate. +//! +//! ## `#![no_std]` Support +//! +//! Requires the `alloc` nightly feature. Disable the on-by-default `"std"` feature: +//! +//! ```toml +//! [dependencies.id-arena] +//! version = "2" +//! default-features = false +//! ``` +//! +//! ## `rayon` Support +//! +//! If the `rayon` feature of this crate is activated: +//! +//! ```toml +//! [dependencies] +//! id-arena = { version = "2", features = ["rayon"] } +//! ``` +//! +//! then you can use [`rayon`](https://crates.io/crates/rayon)'s support for +//! parallel iteration. The `Arena` type will have a `par_iter` family of +//! methods where appropriate. +//! +//! ## Example +//! +//! ```rust +//! use id_arena::{Arena, Id}; +//! +//! type AstNodeId = Id; +//! +//! #[derive(Debug, Eq, PartialEq)] +//! pub enum AstNode { +//! Const(i64), +//! Var(String), +//! Add { +//! lhs: AstNodeId, +//! rhs: AstNodeId, +//! }, +//! Sub { +//! lhs: AstNodeId, +//! rhs: AstNodeId, +//! }, +//! Mul { +//! lhs: AstNodeId, +//! rhs: AstNodeId, +//! }, +//! Div { +//! lhs: AstNodeId, +//! rhs: AstNodeId, +//! }, +//! } +//! +//! let mut ast_nodes = Arena::::new(); +//! +//! // Create the AST for `a * (b + 3)`. +//! let three = ast_nodes.alloc(AstNode::Const(3)); +//! let b = ast_nodes.alloc(AstNode::Var("b".into())); +//! let b_plus_three = ast_nodes.alloc(AstNode::Add { +//! lhs: b, +//! rhs: three, +//! }); +//! let a = ast_nodes.alloc(AstNode::Var("a".into())); +//! let a_times_b_plus_three = ast_nodes.alloc(AstNode::Mul { +//! lhs: a, +//! rhs: b_plus_three, +//! }); +//! +//! // Can use indexing to access allocated nodes. +//! assert_eq!(ast_nodes[three], AstNode::Const(3)); +//! ``` + +#![forbid(unsafe_code)] +#![deny(missing_debug_implementations)] +#![deny(missing_docs)] +// In no-std mode, use the alloc crate to get `Vec`. +#![no_std] +#![cfg_attr(not(feature = "std"), feature(alloc))] + +use core::cmp::Ordering; +use core::fmt; +use core::hash::{Hash, Hasher}; +use core::iter; +use core::marker::PhantomData; +use core::ops; +use core::slice; +use core::sync::atomic::{self, AtomicUsize, ATOMIC_USIZE_INIT}; + +#[cfg(not(feature = "std"))] +extern crate alloc; +#[cfg(not(feature = "std"))] +use alloc::vec::{self, Vec}; + +#[cfg(feature = "std")] +extern crate std; +#[cfg(feature = "std")] +use std::vec::{self, Vec}; + +#[cfg(feature = "rayon")] +mod rayon; +#[cfg(feature = "rayon")] +pub use rayon::*; + +/// A trait representing the implementation behavior of an arena and how +/// identifiers are represented. +/// +/// ## When should I implement `ArenaBehavior` myself? +/// +/// Usually, you should just use `DefaultArenaBehavior`, which is simple and +/// correct. However, there are some scenarios where you might want to implement +/// `ArenaBehavior` yourself: +/// +/// * **Space optimizations:** The default identifier is two words in size, +/// which is larger than is usually necessary. For example, if you know that an +/// arena *cannot* contain more than 256 items, you could make your own +/// identifier type that stores the index as a `u8` and then you can save some +/// space. +/// +/// * **Trait Coherence:** If you need to implement an upstream crate's traits +/// for identifiers, then defining your own identifier type allows you to work +/// with trait coherence rules. +/// +/// * **Share identifiers across arenas:** You can coordinate and share +/// identifiers across different arenas to enable a "struct of arrays" style +/// data representation. +pub trait ArenaBehavior { + /// The identifier type. + type Id: Copy; + + /// Construct a new object identifier from the given index and arena + /// identifier. + /// + /// ## Panics + /// + /// Implementations are allowed to panic if the given index is larger than + /// the underlying storage (e.g. the implementation uses a `u8` for storing + /// indices and the given index value is larger than 255). + fn new_id(arena_id: u32, index: usize) -> Self::Id; + + /// Get the given identifier's index. + fn index(Self::Id) -> usize; + + /// Get the given identifier's arena id. + fn arena_id(Self::Id) -> u32; + + /// Construct a new arena identifier. + /// + /// This is used to disambiguate `Id`s across different arenas. To make + /// identifiers with the same index from different arenas compare false for + /// equality, return a unique `u32` on every invocation. This is the + /// default, provided implementation's behavior. + /// + /// To make identifiers with the same index from different arenas compare + /// true for equality, return the same `u32` on every invocation. + fn new_arena_id() -> u32 { + static ARENA_COUNTER: AtomicUsize = ATOMIC_USIZE_INIT; + ARENA_COUNTER.fetch_add(1, atomic::Ordering::SeqCst) as u32 + } +} + +/// An identifier for an object allocated within an arena. +pub struct Id { + idx: usize, + arena_id: u32, + _ty: PhantomData T>, +} + +impl fmt::Debug for Id { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct("Id").field("idx", &self.idx).finish() + } +} + +impl Copy for Id {} + +impl Clone for Id { + #[inline] + fn clone(&self) -> Id { + *self + } +} + +impl PartialEq for Id { + #[inline] + fn eq(&self, rhs: &Self) -> bool { + self.arena_id == rhs.arena_id && self.idx == rhs.idx + } +} + +impl Eq for Id {} + +impl Hash for Id { + #[inline] + fn hash(&self, h: &mut H) { + self.arena_id.hash(h); + self.idx.hash(h); + } +} + +impl PartialOrd for Id { + fn partial_cmp(&self, rhs: &Self) -> Option { + Some(self.cmp(rhs)) + } +} + +impl Ord for Id { + fn cmp(&self, rhs: &Self) -> Ordering { + self.arena_id + .cmp(&rhs.arena_id) + .then(self.idx.cmp(&rhs.idx)) + } +} + +impl Id { + /// Get the index within the arena that this id refers to. + #[inline] + pub fn index(&self) -> usize { + self.idx + } +} + +/// The default `ArenaBehavior` implementation. +#[derive(Clone, Debug, PartialEq, Eq)] +pub struct DefaultArenaBehavior { + _phantom: PhantomData T>, +} + +impl ArenaBehavior for DefaultArenaBehavior { + type Id = Id; + + #[inline] + fn new_id(arena_id: u32, idx: usize) -> Self::Id { + Id { + idx, + arena_id, + _ty: PhantomData, + } + } + + #[inline] + fn index(id: Self::Id) -> usize { + id.idx + } + + #[inline] + fn arena_id(id: Self::Id) -> u32 { + id.arena_id + } +} + +/// An arena of objects of type `T`. +/// +/// ``` +/// use id_arena::Arena; +/// +/// let mut arena = Arena::<&str>::new(); +/// +/// let a = arena.alloc("Albert"); +/// assert_eq!(arena[a], "Albert"); +/// +/// arena[a] = "Alice"; +/// assert_eq!(arena[a], "Alice"); +/// ``` +#[derive(Clone, Debug, PartialEq, Eq)] +pub struct Arena> { + arena_id: u32, + items: Vec, + _phantom: PhantomData A>, +} + +impl Default for Arena +where + A: ArenaBehavior, +{ + #[inline] + fn default() -> Arena { + Arena { + arena_id: A::new_arena_id(), + items: Vec::new(), + _phantom: PhantomData, + } + } +} + +impl Arena +where + A: ArenaBehavior, +{ + /// Construct a new, empty `Arena`. + /// + /// ``` + /// use id_arena::Arena; + /// + /// let mut arena = Arena::::new(); + /// arena.alloc(42); + /// ``` + #[inline] + pub fn new() -> Arena { + Default::default() + } + + /// Construct a new, empty `Arena` with capacity for the given number of + /// elements. + /// + /// ``` + /// use id_arena::Arena; + /// + /// let mut arena = Arena::::with_capacity(100); + /// for x in 0..100 { + /// arena.alloc(x * x); + /// } + /// ``` + #[inline] + pub fn with_capacity(capacity: usize) -> Arena { + Arena { + arena_id: A::new_arena_id(), + items: Vec::with_capacity(capacity), + _phantom: PhantomData, + } + } + + /// Allocate `item` within this arena and return its id. + /// + /// ``` + /// use id_arena::Arena; + /// + /// let mut arena = Arena::::new(); + /// let _id = arena.alloc(42); + /// ``` + /// + /// ## Panics + /// + /// Panics if the number of elements in the arena overflows a `usize` or + /// `Id`'s index storage representation. + #[inline] + pub fn alloc(&mut self, item: T) -> A::Id { + let id = self.next_id(); + self.items.push(item); + id + } + + /// Allocate an item with the id that it will be assigned. + /// + /// This is useful for structures that want to store their id as their own + /// member. + /// + /// ``` + /// use id_arena::{Arena, Id}; + /// + /// struct Cat { + /// id: Id, + /// } + /// + /// let mut arena = Arena::::new(); + /// + /// let kitty = arena.alloc_with_id(|id| Cat { id }); + /// assert_eq!(arena[kitty].id, kitty); + /// ``` + #[inline] + pub fn alloc_with_id(&mut self, f: impl FnOnce(A::Id) -> T) -> A::Id { + let id = self.next_id(); + let val = f(id); + self.alloc(val) + } + + /// Get the id that will be used for the next item allocated into this + /// arena. + /// + /// If you are allocating a `struct` that wants to have its id as a member + /// of itself, prefer the less error-prone `Arena::alloc_with_id` method. + #[inline] + pub fn next_id(&self) -> A::Id { + let arena_id = self.arena_id; + let idx = self.items.len(); + A::new_id(arena_id, idx) + } + + /// Get a shared reference to the object associated with the given `id` if + /// it exists. + /// + /// If there is no object associated with `id` (for example, it might + /// reference an object allocated within a different arena) then return + /// `None`. + /// + /// ``` + /// use id_arena::Arena; + /// + /// let mut arena = Arena::::new(); + /// let id = arena.alloc(42); + /// assert!(arena.get(id).is_some()); + /// + /// let other_arena = Arena::::new(); + /// assert!(other_arena.get(id).is_none()); + /// ``` + #[inline] + pub fn get(&self, id: A::Id) -> Option<&T> { + if A::arena_id(id) != self.arena_id { + None + } else { + self.items.get(A::index(id)) + } + } + + /// Get an exclusive reference to the object associated with the given `id` + /// if it exists. + /// + /// If there is no object associated with `id` (for example, it might + /// reference an object allocated within a different arena) then return + /// `None`. + /// + /// ``` + /// use id_arena::Arena; + /// + /// let mut arena = Arena::::new(); + /// let id = arena.alloc(42); + /// assert!(arena.get_mut(id).is_some()); + /// + /// let mut other_arena = Arena::::new(); + /// assert!(other_arena.get_mut(id).is_none()); + /// ``` + #[inline] + pub fn get_mut(&mut self, id: A::Id) -> Option<&mut T> { + if A::arena_id(id) != self.arena_id { + None + } else { + self.items.get_mut(A::index(id)) + } + } + + /// Iterate over this arena's items and their ids. + /// + /// ``` + /// use id_arena::Arena; + /// + /// let mut arena = Arena::<&str>::new(); + /// + /// arena.alloc("hello"); + /// arena.alloc("hi"); + /// arena.alloc("yo"); + /// + /// for (id, s) in arena.iter() { + /// assert_eq!(arena.get(id).unwrap(), s); + /// println!("{:?} -> {}", id, s); + /// } + /// ``` + #[inline] + pub fn iter(&self) -> Iter { + IntoIterator::into_iter(self) + } + + /// Iterate over this arena's items and their ids, allowing mutation of each + /// item. + #[inline] + pub fn iter_mut(&mut self) -> IterMut { + IntoIterator::into_iter(self) + } + + /// Get the number of objects allocated in this arena. + /// + /// ``` + /// use id_arena::Arena; + /// + /// let mut arena = Arena::<&str>::new(); + /// + /// arena.alloc("hello"); + /// arena.alloc("hi"); + /// + /// assert_eq!(arena.len(), 2); + /// ``` + #[inline] + pub fn len(&self) -> usize { + self.items.len() + } +} + +impl ops::Index for Arena +where + A: ArenaBehavior, +{ + type Output = T; + + #[inline] + fn index(&self, id: A::Id) -> &T { + assert_eq!(self.arena_id, A::arena_id(id)); + &self.items[A::index(id)] + } +} + +impl ops::IndexMut for Arena +where + A: ArenaBehavior, +{ + #[inline] + fn index_mut(&mut self, id: A::Id) -> &mut T { + assert_eq!(self.arena_id, A::arena_id(id)); + &mut self.items[A::index(id)] + } +} + +fn add_id(item: Option<(usize, T)>, arena_id: u32) -> Option<(A::Id, T)> +where + A: ArenaBehavior, +{ + item.map(|(idx, item)| (A::new_id(arena_id, idx), item)) +} + +/// An iterator over `(Id, &T)` pairs in an arena. +/// +/// See [the `Arena::iter()` method](./struct.Arena.html#method.iter) for details. +#[derive(Debug)] +pub struct Iter<'a, T: 'a, A: 'a> { + arena_id: u32, + iter: iter::Enumerate>, + _phantom: PhantomData A>, +} + +impl<'a, T: 'a, A: 'a> Iterator for Iter<'a, T, A> +where + A: ArenaBehavior, +{ + type Item = (A::Id, &'a T); + + #[inline] + fn next(&mut self) -> Option { + add_id::(self.iter.next(), self.arena_id) + } + + fn size_hint(&self) -> (usize, Option) { + self.iter.size_hint() + } +} + +impl<'a, T: 'a, A: 'a> DoubleEndedIterator for Iter<'a, T, A> +where + A: ArenaBehavior, +{ + fn next_back(&mut self) -> Option { + add_id::(self.iter.next_back(), self.arena_id) + } +} + +impl<'a, T: 'a, A: 'a> ExactSizeIterator for Iter<'a, T, A> +where + A: ArenaBehavior, +{ + fn len(&self) -> usize { + self.iter.len() + } +} + +impl<'a, T, A> IntoIterator for &'a Arena +where + A: ArenaBehavior, +{ + type Item = (A::Id, &'a T); + type IntoIter = Iter<'a, T, A>; + + #[inline] + fn into_iter(self) -> Iter<'a, T, A> { + Iter { + arena_id: self.arena_id, + iter: self.items.iter().enumerate(), + _phantom: PhantomData, + } + } +} + +/// An iterator over `(Id, &mut T)` pairs in an arena. +/// +/// See [the `Arena::iter_mut()` method](./struct.Arena.html#method.iter_mut) +/// for details. +#[derive(Debug)] +pub struct IterMut<'a, T: 'a, A: 'a> { + arena_id: u32, + iter: iter::Enumerate>, + _phantom: PhantomData A>, +} + +impl<'a, T: 'a, A: 'a> Iterator for IterMut<'a, T, A> +where + A: ArenaBehavior, +{ + type Item = (A::Id, &'a mut T); + + #[inline] + fn next(&mut self) -> Option { + add_id::(self.iter.next(), self.arena_id) + } + + fn size_hint(&self) -> (usize, Option) { + self.iter.size_hint() + } +} + +impl<'a, T: 'a, A: 'a> DoubleEndedIterator for IterMut<'a, T, A> +where + A: ArenaBehavior, +{ + fn next_back(&mut self) -> Option { + add_id::(self.iter.next_back(), self.arena_id) + } +} + +impl<'a, T: 'a, A: 'a> ExactSizeIterator for IterMut<'a, T, A> +where + A: ArenaBehavior, +{ + fn len(&self) -> usize { + self.iter.len() + } +} + +impl<'a, T, A> IntoIterator for &'a mut Arena +where + A: ArenaBehavior, +{ + type Item = (A::Id, &'a mut T); + type IntoIter = IterMut<'a, T, A>; + + #[inline] + fn into_iter(self) -> IterMut<'a, T, A> { + IterMut { + arena_id: self.arena_id, + iter: self.items.iter_mut().enumerate(), + _phantom: PhantomData, + } + } +} + +/// An iterator over `(Id, T)` pairs in an arena. +#[derive(Debug)] +pub struct IntoIter { + arena_id: u32, + iter: iter::Enumerate>, + _phantom: PhantomData A>, +} + +impl Iterator for IntoIter +where + A: ArenaBehavior, +{ + type Item = (A::Id, T); + + #[inline] + fn next(&mut self) -> Option { + add_id::(self.iter.next(), self.arena_id) + } + + fn size_hint(&self) -> (usize, Option) { + self.iter.size_hint() + } +} + +impl DoubleEndedIterator for IntoIter +where + A: ArenaBehavior, +{ + fn next_back(&mut self) -> Option { + add_id::(self.iter.next_back(), self.arena_id) + } +} + +impl ExactSizeIterator for IntoIter +where + A: ArenaBehavior, +{ + fn len(&self) -> usize { + self.iter.len() + } +} + +impl IntoIterator for Arena +where + A: ArenaBehavior, +{ + type Item = (A::Id, T); + type IntoIter = IntoIter; + + #[inline] + fn into_iter(self) -> IntoIter { + IntoIter { + arena_id: self.arena_id, + iter: self.items.into_iter().enumerate(), + _phantom: PhantomData, + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn ids_are_send_sync() { + fn assert_send_sync() {} + struct Foo; + assert_send_sync::>(); + } +} diff --git a/third_party/rust/id-arena/src/rayon.rs b/third_party/rust/id-arena/src/rayon.rs new file mode 100644 index 0000000000..7bea702247 --- /dev/null +++ b/third_party/rust/id-arena/src/rayon.rs @@ -0,0 +1,282 @@ +extern crate rayon; + +use self::rayon::iter::plumbing::{Consumer, UnindexedConsumer}; +use self::rayon::iter::plumbing::ProducerCallback; +use self::rayon::prelude::*; +use super::*; + +impl Arena +where + A: ArenaBehavior, +{ + /// Returns an iterator of shared references which can be used to iterate + /// over this arena in parallel with the `rayon` crate. + /// + /// # Features + /// + /// This API requires the `rayon` feature of this crate to be enabled. + pub fn par_iter(&self) -> ParIter + where + T: Sync, + A::Id: Send, + { + ParIter { + arena_id: self.arena_id, + iter: self.items.par_iter().enumerate(), + _phantom: PhantomData, + } + } + + /// Returns an iterator of mutable references which can be used to iterate + /// over this arena in parallel with the `rayon` crate. + /// + /// # Features + /// + /// This API requires the `rayon` feature of this crate to be enabled. + pub fn par_iter_mut(&mut self) -> ParIterMut + where + T: Send + Sync, + A::Id: Send, + { + ParIterMut { + arena_id: self.arena_id, + iter: self.items.par_iter_mut().enumerate(), + _phantom: PhantomData, + } + } +} + +/// A parallel iterator over shared references in an arena. +/// +/// See `Arena::par_iter` for more information. +#[derive(Debug)] +pub struct ParIter<'a, T, A> +where + T: Sync, +{ + arena_id: u32, + iter: rayon::iter::Enumerate>, + _phantom: PhantomData A>, +} + +impl<'a, T, A> ParallelIterator for ParIter<'a, T, A> +where + T: Sync, + A: ArenaBehavior, + A::Id: Send, +{ + type Item = (A::Id, &'a T); + + fn drive_unindexed(self, consumer: C) -> C::Result + where + C: UnindexedConsumer, + { + let arena_id = self.arena_id; + self.iter.map(|(i, item)| (A::new_id(arena_id, i), item)) + .drive_unindexed(consumer) + } + + fn opt_len(&self) -> Option { + self.iter.opt_len() + } +} + +impl<'a, T, A> IndexedParallelIterator for ParIter<'a, T, A> +where + T: Sync, + A: ArenaBehavior, + A::Id: Send, +{ + fn drive(self, consumer: C) -> C::Result + where + C: Consumer, + { + let arena_id = self.arena_id; + self.iter.map(|(i, item)| (A::new_id(arena_id, i), item)) + .drive(consumer) + } + + fn len(&self) -> usize { + self.iter.len() + } + + fn with_producer(self, callback: CB) -> CB::Output + where + CB: ProducerCallback, + { + let arena_id = self.arena_id; + self.iter.map(|(i, item)| (A::new_id(arena_id, i), item)) + .with_producer(callback) + } +} + +impl<'data, T, A> IntoParallelIterator for &'data Arena + where A: ArenaBehavior, + A::Id: Send, + T: Sync, +{ + type Item = (A::Id, &'data T); + type Iter = ParIter<'data, T, A>; + + fn into_par_iter(self) -> Self::Iter { + self.par_iter() + } +} + +/// A parallel iterator over mutable references in an arena. +/// +/// See `Arena::par_iter_mut` for more information. +#[derive(Debug)] +pub struct ParIterMut<'a, T, A> +where + T: Send + Sync, +{ + arena_id: u32, + iter: rayon::iter::Enumerate>, + _phantom: PhantomData A>, +} + +impl<'a, T, A> ParallelIterator for ParIterMut<'a, T, A> +where + T: Send + Sync, + A: ArenaBehavior, + A::Id: Send, +{ + type Item = (A::Id, &'a mut T); + + fn drive_unindexed(self, consumer: C) -> C::Result + where + C: UnindexedConsumer, + { + let arena_id = self.arena_id; + self.iter.map(|(i, item)| (A::new_id(arena_id, i), item)) + .drive_unindexed(consumer) + } + + fn opt_len(&self) -> Option { + self.iter.opt_len() + } +} + +impl<'a, T, A> IndexedParallelIterator for ParIterMut<'a, T, A> +where + T: Send + Sync, + A: ArenaBehavior, + A::Id: Send, +{ + fn drive(self, consumer: C) -> C::Result + where + C: Consumer, + { + let arena_id = self.arena_id; + self.iter.map(|(i, item)| (A::new_id(arena_id, i), item)) + .drive(consumer) + } + + fn len(&self) -> usize { + self.iter.len() + } + + fn with_producer(self, callback: CB) -> CB::Output + where + CB: ProducerCallback, + { + let arena_id = self.arena_id; + self.iter.map(|(i, item)| (A::new_id(arena_id, i), item)) + .with_producer(callback) + } +} + +impl<'data, T, A> IntoParallelIterator for &'data mut Arena + where A: ArenaBehavior, + A::Id: Send, + T: Send + Sync, +{ + type Item = (A::Id, &'data mut T); + type Iter = ParIterMut<'data, T, A>; + + fn into_par_iter(self) -> Self::Iter { + self.par_iter_mut() + } +} + +/// A parallel iterator over items in an arena. +/// +/// See `Arena::into_par_iter` for more information. +#[derive(Debug)] +pub struct IntoParIter +where + T: Send, +{ + arena_id: u32, + iter: rayon::iter::Enumerate>, + _phantom: PhantomData A>, +} + +impl ParallelIterator for IntoParIter +where + T: Send, + A: ArenaBehavior, + A::Id: Send, +{ + type Item = (A::Id, T); + + fn drive_unindexed(self, consumer: C) -> C::Result + where + C: UnindexedConsumer, + { + let arena_id = self.arena_id; + self.iter.map(|(i, item)| (A::new_id(arena_id, i), item)) + .drive_unindexed(consumer) + } + + fn opt_len(&self) -> Option { + self.iter.opt_len() + } +} + +impl IndexedParallelIterator for IntoParIter +where + T: Send, + A: ArenaBehavior, + A::Id: Send, +{ + fn drive(self, consumer: C) -> C::Result + where + C: Consumer, + { + let arena_id = self.arena_id; + self.iter.map(|(i, item)| (A::new_id(arena_id, i), item)) + .drive(consumer) + } + + fn len(&self) -> usize { + self.iter.len() + } + + fn with_producer(self, callback: CB) -> CB::Output + where + CB: ProducerCallback, + { + let arena_id = self.arena_id; + self.iter.map(|(i, item)| (A::new_id(arena_id, i), item)) + .with_producer(callback) + } +} + +impl IntoParallelIterator for Arena + where A: ArenaBehavior, + A::Id: Send, + T: Send, +{ + type Item = (A::Id, T); + type Iter = IntoParIter; + + fn into_par_iter(self) -> Self::Iter { + IntoParIter { + arena_id: self.arena_id, + iter: self.items.into_par_iter().enumerate(), + _phantom: PhantomData, + } + } +} diff --git a/third_party/rust/id-arena/tests/readme_up_to_date.rs b/third_party/rust/id-arena/tests/readme_up_to_date.rs new file mode 100644 index 0000000000..38af909e31 --- /dev/null +++ b/third_party/rust/id-arena/tests/readme_up_to_date.rs @@ -0,0 +1,22 @@ +use std::fs; +use std::process::Command; + +#[test] +fn cargo_readme_up_to_date() { + println!("Checking that `cargo readme > README.md` is up to date..."); + + let expected = Command::new("cargo") + .arg("readme") + .current_dir(env!("CARGO_MANIFEST_DIR")) + .output() + .expect("should run `cargo readme` OK") + .stdout; + let expected = String::from_utf8_lossy(&expected); + + let actual = fs::read_to_string(concat!(env!("CARGO_MANIFEST_DIR"), "/README.md")) + .expect("should read README.md OK"); + + if actual != expected { + panic!("Run `cargo readme > README.md` to update README.md"); + } +} diff --git a/third_party/rust/lmdb-rkv-sys/.cargo-checksum.json b/third_party/rust/lmdb-rkv-sys/.cargo-checksum.json index 3c5449380b..f03ccacfcf 100644 --- a/third_party/rust/lmdb-rkv-sys/.cargo-checksum.json +++ b/third_party/rust/lmdb-rkv-sys/.cargo-checksum.json @@ -1 +1 @@ -{"files":{"Cargo.toml":"61c7799174c7f869ab05be4ef4fe139196b4b5dd0d11202284a70ca3777f3ce6","bindgen.rs":"bf1f032951cac23a18bd41063df739503bf040bf7c815e1e393238c86edbbb25","build.rs":"47042b2d98af39f06406ebdd2020ad15c01165195aef6a3bda30e09d8411834a","lmdb/libraries/liblmdb/CHANGES":"ba14b94dda8670db454275d2f5fb83510f810ccb3ccfca642176a0efef245e08","lmdb/libraries/liblmdb/COPYRIGHT":"fae797823b892c4b59913256b4d10b17d71f57d4bc45e46d901b84fd6dfc3d13","lmdb/libraries/liblmdb/Doxyfile":"5545f6b049040ce58e6d1a603eaea6b7fb8ae92459f2ab8d3bcbacabcce1014d","lmdb/libraries/liblmdb/LICENSE":"310fe25c858a9515fc8c8d7d1f24a67c9496f84a91e0a0e41ea9975b1371e569","lmdb/libraries/liblmdb/Makefile":"60b5f574e6642602f692a95956da61c588a265ad50b8059960c230b9e6aaf4fd","lmdb/libraries/liblmdb/intro.doc":"9442e0db4fc9c70f058c43545e710476d8d5a80b959d20f4381240fd50c6b843","lmdb/libraries/liblmdb/lmdb.h":"05abf244b621b2d14e838b0643e72d5075ce77d8df856b6dccde74ee51c9cf22","lmdb/libraries/liblmdb/mdb.c":"5c7a4e9269e1af7ddb8f10b07f5d2b7f0d111dd379826d5e4880f6101bff4efc","lmdb/libraries/liblmdb/mdb_copy.1":"3a6a8a7a91e1bd42dc4d2a0188ff62d699ff2b3b097a670f30681decf63f22f3","lmdb/libraries/liblmdb/mdb_copy.c":"d3d412a770a5c3afeb88c44b4acdde0f0b985cde22497198e8f38296281cdddd","lmdb/libraries/liblmdb/mdb_dump.1":"9257be883c7fcfcbd61003cc730f7c0900fa8f6feba074c8c1e46634a257b13a","lmdb/libraries/liblmdb/mdb_dump.c":"b046cffcd997254e6daea47a2d7fb74f9d23282174cbb1e3bf9f5fb51a90fe64","lmdb/libraries/liblmdb/mdb_load.1":"ea927473245a4a7777ba687aa26baf7f0951fb620daf82b8d730a090185b2bbc","lmdb/libraries/liblmdb/mdb_load.c":"4f722613c65350315db23060be98584fb572978108885dab271101ba7187dca4","lmdb/libraries/liblmdb/mdb_stat.1":"c0a70d96b4b2d32e73301383d9d5620bc0bbbefb019bfd54f32088dfd4bc921a","lmdb/libraries/liblmdb/mdb_stat.c":"e6405fa191d784ecfa8eb8d1f153a58facc49a8f5a2c891a93802e67acc4861e","lmdb/libraries/liblmdb/midl.c":"e19143db51dd606396c7eba765832e4b66167c0975614e576b950349f8f6cdfd","lmdb/libraries/liblmdb/midl.h":"52066a085aa0fc90799113fb1cc60ca78a5e35ca6191f5f5cb29488d4bd66dba","lmdb/libraries/liblmdb/mtest.c":"89ab9ac8bf1e14a9f32a33757c4b3254e4984e0f24e5a302e2d126eb2c86f6db","lmdb/libraries/liblmdb/mtest2.c":"076b00395fe1461dd9577f7bb5567908ce50cf470efbf652787e6fe1dc2fb68c","lmdb/libraries/liblmdb/mtest3.c":"51b9a055e123bd0757ee3082cc6864c836969cf630e646a9cc34e01398c20634","lmdb/libraries/liblmdb/mtest4.c":"b0a725405d80bda6ab95b3ecf410ae330ab8df7a081ca81dd6ea1f8db87642e9","lmdb/libraries/liblmdb/mtest5.c":"7f3b06ca3833315ea4c70d5e91feb1b677f6949f105f4f89d96c3ac35e104f2f","lmdb/libraries/liblmdb/mtest6.c":"e4d7880c36547ebf33bc020046730bf2c075c53aaacd5c876152cc5ae7ab5e6c","lmdb/libraries/liblmdb/sample-bdb.txt":"153d84f8fc49a3abba53ed52d5a41c8d6d4698753a10bbe0689a9e65d3513513","lmdb/libraries/liblmdb/sample-mdb.txt":"1f77385786cffdf72b33da06a91a444fe2827673c3627f89110903a8fe012795","lmdb/libraries/liblmdb/tooltag":"4734c6dc1fa7aec8c2e9646bd04bc5218ef6a03ad83a3b18de2ac4069eb94120","src/bindings.rs":"187b1cdf9c7cd91632030fb64f964113cf115155675cb12c4a6a8afb2292f79a","src/lib.rs":"f962f3e5139b63b25c9198d3ef74b05b4ee1569f9ac7fdbac7c4cb7adac78f6d","tests/fixtures/testdb/data.mdb":"8a0cf8ad63473ae63d437a646042b0d64c112a8fa33d5c916f0678ce4d23189b","tests/fixtures/testdb/lock.mdb":"0e734f65f82f39556cfd62f5da1cc02e56e1cc435f48fe39168e4dc21628e586","tests/lmdb.rs":"5086cb43f3a7b6a8aaa257084c1e0bea664f279ff260b99a8ad0d3c598867a45","tests/simple.rs":"a7ffaef9e3e499bc9372dca9b37b05e7b4e70b3c7d9aac63f79dd0cb8512a41f"},"package":"a1e4b19a1fdf5b74bc802cc9aa7c0c86a775e8b872ba9d5a4e606ffc5d076953"} \ No newline at end of file +{"files":{"Cargo.toml":"8de5804dc3a9cb1f955d9ffdac7fd7f3e09c47b6aa060ea1a5698cdb90a0ffca","bindgen.rs":"4579cf8b217b9673fd08f8306bfe1b4bbac1b31cf11b2a395f81ddac04dfc10e","build.rs":"54abc550db966ce0479e1cf54ed992e3eca7e947357c54bf937b6048f0813c95","lmdb/libraries/liblmdb/CHANGES":"ba14b94dda8670db454275d2f5fb83510f810ccb3ccfca642176a0efef245e08","lmdb/libraries/liblmdb/COPYRIGHT":"fae797823b892c4b59913256b4d10b17d71f57d4bc45e46d901b84fd6dfc3d13","lmdb/libraries/liblmdb/Doxyfile":"5545f6b049040ce58e6d1a603eaea6b7fb8ae92459f2ab8d3bcbacabcce1014d","lmdb/libraries/liblmdb/LICENSE":"310fe25c858a9515fc8c8d7d1f24a67c9496f84a91e0a0e41ea9975b1371e569","lmdb/libraries/liblmdb/Makefile":"60b5f574e6642602f692a95956da61c588a265ad50b8059960c230b9e6aaf4fd","lmdb/libraries/liblmdb/intro.doc":"9442e0db4fc9c70f058c43545e710476d8d5a80b959d20f4381240fd50c6b843","lmdb/libraries/liblmdb/lmdb.h":"05abf244b621b2d14e838b0643e72d5075ce77d8df856b6dccde74ee51c9cf22","lmdb/libraries/liblmdb/mdb.c":"5c7a4e9269e1af7ddb8f10b07f5d2b7f0d111dd379826d5e4880f6101bff4efc","lmdb/libraries/liblmdb/mdb_copy.1":"3a6a8a7a91e1bd42dc4d2a0188ff62d699ff2b3b097a670f30681decf63f22f3","lmdb/libraries/liblmdb/mdb_copy.c":"d3d412a770a5c3afeb88c44b4acdde0f0b985cde22497198e8f38296281cdddd","lmdb/libraries/liblmdb/mdb_dump.1":"9257be883c7fcfcbd61003cc730f7c0900fa8f6feba074c8c1e46634a257b13a","lmdb/libraries/liblmdb/mdb_dump.c":"b046cffcd997254e6daea47a2d7fb74f9d23282174cbb1e3bf9f5fb51a90fe64","lmdb/libraries/liblmdb/mdb_load.1":"ea927473245a4a7777ba687aa26baf7f0951fb620daf82b8d730a090185b2bbc","lmdb/libraries/liblmdb/mdb_load.c":"4f722613c65350315db23060be98584fb572978108885dab271101ba7187dca4","lmdb/libraries/liblmdb/mdb_stat.1":"c0a70d96b4b2d32e73301383d9d5620bc0bbbefb019bfd54f32088dfd4bc921a","lmdb/libraries/liblmdb/mdb_stat.c":"e6405fa191d784ecfa8eb8d1f153a58facc49a8f5a2c891a93802e67acc4861e","lmdb/libraries/liblmdb/midl.c":"e19143db51dd606396c7eba765832e4b66167c0975614e576b950349f8f6cdfd","lmdb/libraries/liblmdb/midl.h":"52066a085aa0fc90799113fb1cc60ca78a5e35ca6191f5f5cb29488d4bd66dba","lmdb/libraries/liblmdb/mtest.c":"89ab9ac8bf1e14a9f32a33757c4b3254e4984e0f24e5a302e2d126eb2c86f6db","lmdb/libraries/liblmdb/mtest2.c":"076b00395fe1461dd9577f7bb5567908ce50cf470efbf652787e6fe1dc2fb68c","lmdb/libraries/liblmdb/mtest3.c":"51b9a055e123bd0757ee3082cc6864c836969cf630e646a9cc34e01398c20634","lmdb/libraries/liblmdb/mtest4.c":"b0a725405d80bda6ab95b3ecf410ae330ab8df7a081ca81dd6ea1f8db87642e9","lmdb/libraries/liblmdb/mtest5.c":"7f3b06ca3833315ea4c70d5e91feb1b677f6949f105f4f89d96c3ac35e104f2f","lmdb/libraries/liblmdb/mtest6.c":"e4d7880c36547ebf33bc020046730bf2c075c53aaacd5c876152cc5ae7ab5e6c","lmdb/libraries/liblmdb/sample-bdb.txt":"153d84f8fc49a3abba53ed52d5a41c8d6d4698753a10bbe0689a9e65d3513513","lmdb/libraries/liblmdb/sample-mdb.txt":"1f77385786cffdf72b33da06a91a444fe2827673c3627f89110903a8fe012795","lmdb/libraries/liblmdb/tooltag":"4734c6dc1fa7aec8c2e9646bd04bc5218ef6a03ad83a3b18de2ac4069eb94120","src/bindings.rs":"ab64073ce4ec64282e8f67cd8f148c83661810a662cb804813b3add0d92a4bf6","src/lib.rs":"b74706ae7901412da54caac7f0cc94f2b445e3a88741760b819659381d615ca8","tests/fixtures/testdb-32/data.mdb":"74d09a30a020789631ef5c64d60d34f6913cf63ad73c82327bd605c5a37849bb","tests/fixtures/testdb-32/lock.mdb":"bbfd0f5aa3eea8421b0a2c277de69b105789dbc744391d9a08d0d3332ae91f70","tests/fixtures/testdb/data.mdb":"8a0cf8ad63473ae63d437a646042b0d64c112a8fa33d5c916f0678ce4d23189b","tests/fixtures/testdb/lock.mdb":"16455695fa3d0229285720b2f16764d2f42d7477f14835bd513c9f2766b4ed22","tests/lmdb.rs":"5086cb43f3a7b6a8aaa257084c1e0bea664f279ff260b99a8ad0d3c598867a45","tests/simple.rs":"774a3edf589dd5fab3b90d5faabb8b8e06e51ec231a795ba17b1e35e65490848"},"package":"b27470ac25167b3afdfb6af8fcd3bc1be67de50ffbdaf4073378cfded6ae24a5"} \ No newline at end of file diff --git a/third_party/rust/lmdb-rkv-sys/Cargo.toml b/third_party/rust/lmdb-rkv-sys/Cargo.toml index 08036cc22f..9c91a6f499 100644 --- a/third_party/rust/lmdb-rkv-sys/Cargo.toml +++ b/third_party/rust/lmdb-rkv-sys/Cargo.toml @@ -12,7 +12,7 @@ [package] name = "lmdb-rkv-sys" -version = "0.9.5" +version = "0.11.0" authors = ["Dan Burkert ", "Victor Porof "] build = "build.rs" description = "Rust bindings for liblmdb." @@ -29,7 +29,9 @@ name = "lmdb_sys" [dependencies.libc] version = "0.2" [build-dependencies.bindgen] -version = "0.51.1-oldsyn" +version = "0.53.2" +features = ["runtime"] +optional = true default-features = false [build-dependencies.cc] diff --git a/third_party/rust/lmdb-rkv-sys/bindgen.rs b/third_party/rust/lmdb-rkv-sys/bindgen.rs index 5fc705a43c..3505fc5906 100644 --- a/third_party/rust/lmdb-rkv-sys/bindgen.rs +++ b/third_party/rust/lmdb-rkv-sys/bindgen.rs @@ -1,5 +1,3 @@ -extern crate bindgen; - use bindgen::callbacks::IntKind; use bindgen::callbacks::ParseCallbacks; use std::env; @@ -38,7 +36,7 @@ impl ParseCallbacks for Callbacks { } } -fn main() { +pub fn generate() { let mut lmdb = PathBuf::from(&env::var("CARGO_MANIFEST_DIR").unwrap()); lmdb.push("lmdb"); lmdb.push("libraries"); @@ -52,8 +50,11 @@ fn main() { .whitelist_var("^(MDB|mdb)_.*") .whitelist_type("^(MDB|mdb)_.*") .whitelist_function("^(MDB|mdb)_.*") + .size_t_is_usize(true) .ctypes_prefix("::libc") .blacklist_item("mode_t") + .blacklist_item("mdb_mode_t") + .blacklist_item("mdb_filehandle_t") .blacklist_item("^__.*") .parse_callbacks(Box::new(Callbacks {})) .layout_tests(false) diff --git a/third_party/rust/lmdb-rkv-sys/build.rs b/third_party/rust/lmdb-rkv-sys/build.rs index a6527d72ca..213e3cc90a 100644 --- a/third_party/rust/lmdb-rkv-sys/build.rs +++ b/third_party/rust/lmdb-rkv-sys/build.rs @@ -1,6 +1,13 @@ extern crate cc; extern crate pkg_config; +#[cfg(feature = "bindgen")] +extern crate bindgen; + +#[cfg(feature = "bindgen")] +#[path = "bindgen.rs"] +mod generate; + use std::env; use std::path::PathBuf; @@ -39,6 +46,9 @@ macro_rules! warn { } fn main() { + #[cfg(feature = "bindgen")] + generate::generate(); + let mut lmdb = PathBuf::from(&env::var("CARGO_MANIFEST_DIR").unwrap()); lmdb.push("lmdb"); lmdb.push("libraries"); diff --git a/third_party/rust/lmdb-rkv-sys/src/bindings.rs b/third_party/rust/lmdb-rkv-sys/src/bindings.rs index fc96b364c2..50d1e1e218 100644 --- a/third_party/rust/lmdb-rkv-sys/src/bindings.rs +++ b/third_party/rust/lmdb-rkv-sys/src/bindings.rs @@ -52,8 +52,6 @@ pub const MDB_BAD_TXN: ::libc::c_int = -30782; pub const MDB_BAD_VALSIZE: ::libc::c_int = -30781; pub const MDB_BAD_DBI: ::libc::c_int = -30780; pub const MDB_LAST_ERRCODE: ::libc::c_int = -30780; -pub type mdb_mode_t = mode_t; -pub type mdb_filehandle_t = ::libc::c_int; #[repr(C)] #[derive(Debug, Copy, Clone)] pub struct MDB_env { @@ -82,6 +80,7 @@ pub struct MDB_cursor { #[doc = " The same applies to data sizes in databases with the #MDB_DUPSORT flag."] #[doc = " Other data items can in theory be from 0 to 0xffffffff bytes long."] #[repr(C)] +#[derive(Debug, Copy, Clone)] pub struct MDB_val { #[doc = "< size of the data item"] pub mv_size: usize, @@ -167,6 +166,7 @@ pub const MDB_PREV_MULTIPLE: MDB_cursor_op = 18; pub type MDB_cursor_op = u32; #[doc = " @brief Statistics for a database in the environment"] #[repr(C)] +#[derive(Debug, Copy, Clone)] pub struct MDB_stat { #[doc = "< Size of a database page."] #[doc = "This is currently the same for all databases."] @@ -184,6 +184,7 @@ pub struct MDB_stat { } #[doc = " @brief Information about the environment"] #[repr(C)] +#[derive(Debug, Copy, Clone)] pub struct MDB_envinfo { #[doc = "< Address of map, if fixed"] pub me_mapaddr: *mut ::libc::c_void, diff --git a/third_party/rust/lmdb-rkv-sys/src/lib.rs b/third_party/rust/lmdb-rkv-sys/src/lib.rs index 804f5ac6d8..da85005538 100644 --- a/third_party/rust/lmdb-rkv-sys/src/lib.rs +++ b/third_party/rust/lmdb-rkv-sys/src/lib.rs @@ -1,15 +1,22 @@ #![deny(warnings)] #![allow(non_camel_case_types)] #![allow(clippy::all)] -#![doc(html_root_url = "https://docs.rs/lmdb-rkv-sys/0.9.5")] +#![doc(html_root_url = "https://docs.rs/lmdb-rkv-sys/0.11.0")] extern crate libc; #[cfg(unix)] #[allow(non_camel_case_types)] -pub type mode_t = ::libc::mode_t; +pub type mdb_mode_t = ::libc::mode_t; #[cfg(windows)] #[allow(non_camel_case_types)] -pub type mode_t = ::libc::c_int; +pub type mdb_mode_t = ::libc::c_int; + +#[cfg(unix)] +#[allow(non_camel_case_types)] +pub type mdb_filehandle_t = ::libc::c_int; +#[cfg(windows)] +#[allow(non_camel_case_types)] +pub type mdb_filehandle_t = *mut ::libc::c_void; include!("bindings.rs"); diff --git a/third_party/rust/lmdb-rkv-sys/tests/fixtures/testdb-32/data.mdb b/third_party/rust/lmdb-rkv-sys/tests/fixtures/testdb-32/data.mdb new file mode 100644 index 0000000000..c1a0bff0f4 Binary files /dev/null and b/third_party/rust/lmdb-rkv-sys/tests/fixtures/testdb-32/data.mdb differ diff --git a/third_party/rust/lmdb-rkv-sys/tests/fixtures/testdb-32/lock.mdb b/third_party/rust/lmdb-rkv-sys/tests/fixtures/testdb-32/lock.mdb new file mode 100644 index 0000000000..7219256523 Binary files /dev/null and b/third_party/rust/lmdb-rkv-sys/tests/fixtures/testdb-32/lock.mdb differ diff --git a/third_party/rust/lmdb-rkv-sys/tests/fixtures/testdb/lock.mdb b/third_party/rust/lmdb-rkv-sys/tests/fixtures/testdb/lock.mdb index fd45a7ae22..64042bd742 100644 Binary files a/third_party/rust/lmdb-rkv-sys/tests/fixtures/testdb/lock.mdb and b/third_party/rust/lmdb-rkv-sys/tests/fixtures/testdb/lock.mdb differ diff --git a/third_party/rust/lmdb-rkv-sys/tests/simple.rs b/third_party/rust/lmdb-rkv-sys/tests/simple.rs index eb2df05c7e..f15cc84c2d 100644 --- a/third_party/rust/lmdb-rkv-sys/tests/simple.rs +++ b/third_party/rust/lmdb-rkv-sys/tests/simple.rs @@ -3,6 +3,7 @@ extern crate lmdb_sys; use lmdb_sys::*; use std::ffi::{c_void, CString}; +use std::fs::File; use std::ptr; // https://github.com/victorporof/lmdb/blob/mdb.master/libraries/liblmdb/moz-test.c @@ -23,19 +24,30 @@ macro_rules! str { } #[test] -#[cfg(all(target_os = "windows", target_arch = "x86"))] -#[should_panic(expected = "Failed with code -30793")] -fn test_simple_win_32() { - test_simple() +#[cfg(target_pointer_width = "32")] +fn test_simple_32() { + test_simple("./tests/fixtures/testdb-32") } #[test] -#[cfg(not(all(target_os = "windows", target_arch = "x86")))] -fn test_simple_other() { - test_simple() +#[cfg(target_pointer_width = "64")] +fn test_simple_64() { + test_simple("./tests/fixtures/testdb") } -fn test_simple() { +#[cfg(windows)] +fn get_file_fd(file: &File) -> std::os::windows::io::RawHandle { + use std::os::windows::io::AsRawHandle; + file.as_raw_handle() +} + +#[cfg(unix)] +fn get_file_fd(file: &File) -> std::os::unix::io::RawFd { + use std::os::unix::io::AsRawFd; + file.as_raw_fd() +} + +fn test_simple(env_path: &str) { let mut env: *mut MDB_env = ptr::null_mut(); let mut dbi: MDB_dbi = 0; let mut key = MDB_val { @@ -53,7 +65,7 @@ fn test_simple() { unsafe { E!(mdb_env_create(&mut env)); E!(mdb_env_set_maxdbs(env, 2)); - E!(mdb_env_open(env, str!("./tests/fixtures/testdb"), 0, 0664)); + E!(mdb_env_open(env, str!(env_path), 0, 0664)); E!(mdb_txn_begin(env, ptr::null_mut(), 0, &mut txn)); E!(mdb_dbi_open(txn, str!("subdb"), MDB_CREATE, &mut dbi)); @@ -67,6 +79,13 @@ fn test_simple() { E!(mdb_txn_begin(env, ptr::null_mut(), 0, &mut txn)); E!(mdb_put(txn, dbi, &mut key, &mut data, 0)); E!(mdb_txn_commit(txn)); + } + + let file = File::create("./tests/fixtures/copytestdb.mdb").unwrap(); + + unsafe { + let fd = get_file_fd(&file); + E!(mdb_env_copyfd(env, fd)); mdb_dbi_close(env, dbi); mdb_env_close(env); diff --git a/third_party/rust/lmdb-rkv/.cargo-checksum.json b/third_party/rust/lmdb-rkv/.cargo-checksum.json index 0e7c5a2d06..d45ee560b1 100644 --- a/third_party/rust/lmdb-rkv/.cargo-checksum.json +++ b/third_party/rust/lmdb-rkv/.cargo-checksum.json @@ -1 +1 @@ -{"files":{"Cargo.toml":"6d06a849a674c6e18514d46e2f9cff156d7aaa7ae689a7a619432d5da46c537b","LICENSE":"db6d163be642e3b568c5fb2104013da632316ecd4e75935df1613af8b0b37933","README.md":"6d7b399235c2f09b4257c3b46369ab5dcd310b4fd2cb2cf6980eecbae6eceb2a","benches/cursor.rs":"3a3410940baf9cc9cfa552d23847ab39c71996477f38370803ed9edab3a45f03","benches/transaction.rs":"309cc7526d92274fc0748585200e1c4ff2e997514f8455f184360fe9dfb75035","benches/utils.rs":"e8c88b72cf7cc7a6ee331c03f630c6e52ec9f1a5462249ff5c25e53ddedc2f4f","src/cursor.rs":"fd7f1700c3e61bb1955e912d164fbe96cf7a7fc97918ef9f750af4f5c92b5469","src/database.rs":"9c52336a487e6141835607c04a4aa53fbe05b9329abb81a461d8f24dfd52f442","src/environment.rs":"6eadf5f1be000bdf7580c21f65ec966363f896fcce9ea004ff6722765eb1f060","src/error.rs":"19c97fee8b2a1bab25f047f34ec2c781062ad95b6dbac0830fdfd3c60e96307d","src/flags.rs":"40fd3d4d72c8db8f9ecb893420300a3585e2ca4c49073065ec9ebf24fe23c064","src/lib.rs":"e77abe7df39b44d2af2d796f027a3f03b6bc99582ae1f291f81e3b9c31d58ffe","src/transaction.rs":"dc81992311a0122bc1f138561b0431c36d8c1080cdf1c1dbf157a448ca3ba3ae"},"package":"605061e5465304475be2041f19967a900175ea1b6d8f47fbab84a84fb8c48452"} \ No newline at end of file +{"files":{"Cargo.toml":"99efe5a50cb9eedb2ec19f42a84c8d80c7ad12776fefa6fa971e6e8813a7f93a","LICENSE":"db6d163be642e3b568c5fb2104013da632316ecd4e75935df1613af8b0b37933","README.md":"6d7b399235c2f09b4257c3b46369ab5dcd310b4fd2cb2cf6980eecbae6eceb2a","benches/cursor.rs":"3a3410940baf9cc9cfa552d23847ab39c71996477f38370803ed9edab3a45f03","benches/transaction.rs":"309cc7526d92274fc0748585200e1c4ff2e997514f8455f184360fe9dfb75035","benches/utils.rs":"e8c88b72cf7cc7a6ee331c03f630c6e52ec9f1a5462249ff5c25e53ddedc2f4f","src/cursor.rs":"fd7f1700c3e61bb1955e912d164fbe96cf7a7fc97918ef9f750af4f5c92b5469","src/database.rs":"9c52336a487e6141835607c04a4aa53fbe05b9329abb81a461d8f24dfd52f442","src/environment.rs":"119dfd0d27b0202a68cb80f891acc0755e67afc9b379ca21c030760baad06965","src/error.rs":"19c97fee8b2a1bab25f047f34ec2c781062ad95b6dbac0830fdfd3c60e96307d","src/flags.rs":"40fd3d4d72c8db8f9ecb893420300a3585e2ca4c49073065ec9ebf24fe23c064","src/lib.rs":"738088722d641ebd1a5ce41576cef4b137b9863815bf9ee7a3909cb46a58e370","src/transaction.rs":"dc81992311a0122bc1f138561b0431c36d8c1080cdf1c1dbf157a448ca3ba3ae"},"package":"447a296f7aca299cfbb50f4e4f3d49451549af655fb7215d7f8c0c3d64bad42b"} \ No newline at end of file diff --git a/third_party/rust/lmdb-rkv/Cargo.toml b/third_party/rust/lmdb-rkv/Cargo.toml index e54e76f2e5..51274cf0f1 100644 --- a/third_party/rust/lmdb-rkv/Cargo.toml +++ b/third_party/rust/lmdb-rkv/Cargo.toml @@ -12,7 +12,7 @@ [package] name = "lmdb-rkv" -version = "0.12.3" +version = "0.14.0" authors = ["Dan Burkert ", "Victor Porof "] exclude = ["/.appveyor.yml", "/.travis.yml", "/azure-pipelines-template.yml", "/azure-pipelines.yml"] description = "Idiomatic and safe LMDB wrapper." @@ -36,7 +36,7 @@ version = "1" version = "0.2" [dependencies.lmdb-rkv-sys] -version = "0.9.3" +version = "0.11.0" [dev-dependencies.rand] version = "0.4" diff --git a/third_party/rust/lmdb-rkv/src/environment.rs b/third_party/rust/lmdb-rkv/src/environment.rs index cd16550ac3..87048ce00c 100644 --- a/third_party/rust/lmdb-rkv/src/environment.rs +++ b/third_party/rust/lmdb-rkv/src/environment.rs @@ -406,7 +406,7 @@ impl EnvironmentBuilder { /// /// The path may not contain the null character, Windows UNC (Uniform Naming Convention) /// paths are not supported either. - pub fn open_with_permissions(&self, path: &Path, mode: ffi::mode_t) -> Result { + pub fn open_with_permissions(&self, path: &Path, mode: ffi::mdb_mode_t) -> Result { let mut env: *mut ffi::MDB_env = ptr::null_mut(); unsafe { lmdb_try!(ffi::mdb_env_create(&mut env)); diff --git a/third_party/rust/lmdb-rkv/src/lib.rs b/third_party/rust/lmdb-rkv/src/lib.rs index 5e89a0ee03..2d42fd31c6 100644 --- a/third_party/rust/lmdb-rkv/src/lib.rs +++ b/third_party/rust/lmdb-rkv/src/lib.rs @@ -2,7 +2,7 @@ //! [Lightning Memory-mapped Database (LMDB)](https://symas.com/lmdb). #![deny(missing_docs)] -#![doc(html_root_url = "https://docs.rs/lmdb-rkv/0.12.3")] +#![doc(html_root_url = "https://docs.rs/lmdb-rkv/0.14.0")] extern crate byteorder; extern crate libc; diff --git a/third_party/rust/lock_api/.cargo-checksum.json b/third_party/rust/lock_api/.cargo-checksum.json index 5507dd1f12..b192a2fbbf 100644 --- a/third_party/rust/lock_api/.cargo-checksum.json +++ b/third_party/rust/lock_api/.cargo-checksum.json @@ -1 +1 @@ -{"files":{"Cargo.toml":"62073f7e3f8dd1c6996e2d2310af31b3f0c4a9c6a58cbfd1296d5d4335329c03","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"c9a75f18b9ab2927829a208fc6aa2cf4e63b8420887ba29cdb265d6619ae82d5","src/lib.rs":"d9ed1f911f058d066ebfd024940da8a5c1ebbab6cfd65a633dfbc613573dd823","src/mutex.rs":"179f232bbbe8279365af427287566f1e1382ddbee13d611deccbce34705c447b","src/remutex.rs":"29e724285529bc4fdff5be3a1d3066253a6da493bdcebf024c4ccbdfdd94457c","src/rwlock.rs":"5661564ab948f3a71be008bf1abb5c40d5d5660ca2f6a7c57ae73e51f31ababf"},"package":"f8912e782533a93a167888781b836336a6ca5da6175c05944c86cf28c31104dc"} \ No newline at end of file +{"files":{"Cargo.toml":"6b931fdc231a35953748d244fc3ed1b6cd4d95c95c618058955df508f2e7e738","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"c9a75f18b9ab2927829a208fc6aa2cf4e63b8420887ba29cdb265d6619ae82d5","src/lib.rs":"3541bfbc3b6121af8fbeb4683ab28412ca46a9b9bdd83a9348778db76e7ea18f","src/mutex.rs":"d010fba6b466937cbc8c16ed4131c7a16753c61362e4be038c1748c2b9431340","src/remutex.rs":"541735f5675c78117cdec802b53df6ac5b7a834d18e0616cff073b7acc6cf02b","src/rwlock.rs":"992394f38f0bc5211fa1f4d7b7af3a1cc9afcec4d48734ded3b248897c7902d9"},"package":"c4da24a77a3d8a6d4862d95f72e6fdb9c09a643ecdb402d754004a557f2bec75"} \ No newline at end of file diff --git a/third_party/rust/lock_api/Cargo.toml b/third_party/rust/lock_api/Cargo.toml index 0d16782902..7cbb4401fa 100644 --- a/third_party/rust/lock_api/Cargo.toml +++ b/third_party/rust/lock_api/Cargo.toml @@ -13,7 +13,7 @@ [package] edition = "2018" name = "lock_api" -version = "0.3.1" +version = "0.3.4" authors = ["Amanieu d'Antras "] description = "Wrappers to create fully-featured Mutex and RwLock types. Compatible with no_std." keywords = ["mutex", "rwlock", "lock", "no_std"] diff --git a/third_party/rust/lock_api/src/lib.rs b/third_party/rust/lock_api/src/lib.rs index d388d8a3ce..6576546320 100644 --- a/third_party/rust/lock_api/src/lib.rs +++ b/third_party/rust/lock_api/src/lib.rs @@ -47,7 +47,9 @@ //! } //! //! fn try_lock(&self) -> bool { -//! self.0.swap(true, Ordering::Acquire) +//! self.0 +//! .compare_exchange(false, true, Ordering::Acquire, Ordering::Relaxed) +//! .is_ok() //! } //! //! fn unlock(&self) { diff --git a/third_party/rust/lock_api/src/mutex.rs b/third_party/rust/lock_api/src/mutex.rs index 95a46fb623..352ac31c6c 100644 --- a/third_party/rust/lock_api/src/mutex.rs +++ b/third_party/rust/lock_api/src/mutex.rs @@ -28,6 +28,9 @@ use serde::{Deserialize, Deserializer, Serialize, Serializer}; /// exclusive: a lock can't be acquired while the mutex is already locked. pub unsafe trait RawMutex { /// Initial value for an unlocked mutex. + // A “non-constant” const item is a legacy way to supply an initialized value to downstream + // static items. Can hopefully be replaced with `const fn new() -> Self` at some point. + #[allow(clippy::declare_interior_mutable_const)] const INIT: Self; /// Marker type which determines whether a lock guard should be `Send`. Use @@ -37,7 +40,8 @@ pub unsafe trait RawMutex { /// Acquires this mutex, blocking the current thread until it is able to do so. fn lock(&self); - /// Attempts to acquire this mutex without blocking. + /// Attempts to acquire this mutex without blocking. Returns `true` + /// if the lock was successfully acquired and `false` otherwise. fn try_lock(&self) -> bool; /// Unlocks this mutex. @@ -91,40 +95,11 @@ pub unsafe trait RawMutexTimed: RawMutex { /// it is protecting. The data can only be accessed through the RAII guards /// returned from `lock` and `try_lock`, which guarantees that the data is only /// ever accessed when the mutex is locked. -pub struct Mutex { +pub struct Mutex { raw: R, data: UnsafeCell, } -// Copied and modified from serde -#[cfg(feature = "serde")] -impl Serialize for Mutex -where - R: RawMutex, - T: Serialize + ?Sized, -{ - fn serialize(&self, serializer: S) -> Result - where - S: Serializer, - { - self.lock().serialize(serializer) - } -} - -#[cfg(feature = "serde")] -impl<'de, R, T> Deserialize<'de> for Mutex -where - R: RawMutex, - T: Deserialize<'de> + ?Sized, -{ - fn deserialize(deserializer: D) -> Result - where - D: Deserializer<'de>, - { - Deserialize::deserialize(deserializer).map(Mutex::new) - } -} - unsafe impl Send for Mutex {} unsafe impl Sync for Mutex {} @@ -133,21 +108,39 @@ impl Mutex { #[cfg(feature = "nightly")] #[inline] pub const fn new(val: T) -> Mutex { - Mutex { data: UnsafeCell::new(val), raw: R::INIT } + Mutex { + raw: R::INIT, + data: UnsafeCell::new(val), + } } /// Creates a new mutex in an unlocked state ready for use. #[cfg(not(feature = "nightly"))] #[inline] pub fn new(val: T) -> Mutex { - Mutex { data: UnsafeCell::new(val), raw: R::INIT } + Mutex { + raw: R::INIT, + data: UnsafeCell::new(val), + } } /// Consumes this mutex, returning the underlying data. #[inline] - #[allow(unused_unsafe)] pub fn into_inner(self) -> T { - unsafe { self.data.into_inner() } + self.data.into_inner() + } +} + +impl Mutex { + /// Creates a new mutex based on a pre-existing raw mutex. + /// + /// This allows creating a mutex in a constant context on stable Rust. + #[inline] + pub const fn const_new(raw_mutex: R, val: T) -> Mutex { + Mutex { + raw: raw_mutex, + data: UnsafeCell::new(val), + } } } @@ -157,7 +150,10 @@ impl Mutex { /// The lock must be held when calling this method. #[inline] unsafe fn guard(&self) -> MutexGuard<'_, R, T> { - MutexGuard { mutex: self, marker: PhantomData } + MutexGuard { + mutex: self, + marker: PhantomData, + } } /// Acquires a mutex, blocking the current thread until it is able to do so. @@ -309,12 +305,43 @@ impl fmt::Debug for Mutex { } } - f.debug_struct("Mutex").field("data", &LockedPlaceholder).finish() + f.debug_struct("Mutex") + .field("data", &LockedPlaceholder) + .finish() } } } } +// Copied and modified from serde +#[cfg(feature = "serde")] +impl Serialize for Mutex +where + R: RawMutex, + T: Serialize + ?Sized, +{ + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + self.lock().serialize(serializer) + } +} + +#[cfg(feature = "serde")] +impl<'de, R, T> Deserialize<'de> for Mutex +where + R: RawMutex, + T: Deserialize<'de> + ?Sized, +{ + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + Deserialize::deserialize(deserializer).map(Mutex::new) + } +} + /// An RAII implementation of a "scoped lock" of a mutex. When this structure is /// dropped (falls out of scope), the lock will be unlocked. /// @@ -350,17 +377,21 @@ impl<'a, R: RawMutex + 'a, T: ?Sized + 'a> MutexGuard<'a, R, T> { let raw = &s.mutex.raw; let data = f(unsafe { &mut *s.mutex.data.get() }); mem::forget(s); - MappedMutexGuard { raw, data, marker: PhantomData } + MappedMutexGuard { + raw, + data, + marker: PhantomData, + } } - /// Attempts to make a new `MappedMutexGuard` for a component of the - /// locked data. The original guard is return if the closure returns `None`. + /// Attempts to make a new `MappedMutexGuard` for a component of the + /// locked data. The original guard is returned if the closure returns `None`. /// /// This operation cannot fail as the `MutexGuard` passed /// in already locked the mutex. /// /// This is an associated function that needs to be - /// used as `MutexGuard::map(...)`. A method would interfere with methods of + /// used as `MutexGuard::try_map(...)`. A method would interfere with methods of /// the same name on the contents of the locked data. #[inline] pub fn try_map(s: Self, f: F) -> Result, Self> @@ -373,7 +404,11 @@ impl<'a, R: RawMutex + 'a, T: ?Sized + 'a> MutexGuard<'a, R, T> { None => return Err(s), }; mem::forget(s); - Ok(MappedMutexGuard { raw, data, marker: PhantomData }) + Ok(MappedMutexGuard { + raw, + data, + marker: PhantomData, + }) } /// Temporarily unlocks the mutex to execute the given function. @@ -412,7 +447,7 @@ impl<'a, R: RawMutexFair + 'a, T: ?Sized + 'a> MutexGuard<'a, R, T> { /// Temporarily unlocks the mutex to execute the given function. /// - /// The mutex is unlocked a fair unlock protocol. + /// The mutex is unlocked using a fair unlock protocol. /// /// This is safe because `&mut` guarantees that there exist no other /// references to the data protected by the mutex. @@ -514,17 +549,21 @@ impl<'a, R: RawMutex + 'a, T: ?Sized + 'a> MappedMutexGuard<'a, R, T> { let raw = s.raw; let data = f(unsafe { &mut *s.data }); mem::forget(s); - MappedMutexGuard { raw, data, marker: PhantomData } + MappedMutexGuard { + raw, + data, + marker: PhantomData, + } } - /// Attempts to make a new `MappedMutexGuard` for a component of the - /// locked data. The original guard is return if the closure returns `None`. + /// Attempts to make a new `MappedMutexGuard` for a component of the + /// locked data. The original guard is returned if the closure returns `None`. /// /// This operation cannot fail as the `MappedMutexGuard` passed /// in already locked the mutex. /// /// This is an associated function that needs to be - /// used as `MappedMutexGuard::map(...)`. A method would interfere with methods of + /// used as `MappedMutexGuard::try_map(...)`. A method would interfere with methods of /// the same name on the contents of the locked data. #[inline] pub fn try_map(s: Self, f: F) -> Result, Self> @@ -537,7 +576,11 @@ impl<'a, R: RawMutex + 'a, T: ?Sized + 'a> MappedMutexGuard<'a, R, T> { None => return Err(s), }; mem::forget(s); - Ok(MappedMutexGuard { raw, data, marker: PhantomData }) + Ok(MappedMutexGuard { + raw, + data, + marker: PhantomData, + }) } } diff --git a/third_party/rust/lock_api/src/remutex.rs b/third_party/rust/lock_api/src/remutex.rs index 69dcf01b4c..bdfcc404ba 100644 --- a/third_party/rust/lock_api/src/remutex.rs +++ b/third_party/rust/lock_api/src/remutex.rs @@ -37,6 +37,9 @@ use serde::{Deserialize, Deserializer, Serialize, Serializer}; /// re-used since that thread is no longer active. pub unsafe trait GetThreadId { /// Initial value. + // A “non-constant” const item is a legacy way to supply an initialized value to downstream + // static items. Can hopefully be replaced with `const fn new() -> Self` at some point. + #[allow(clippy::declare_interior_mutable_const)] const INIT: Self; /// Returns a non-zero thread ID which identifies the current thread of @@ -44,7 +47,7 @@ pub unsafe trait GetThreadId { fn nonzero_thread_id(&self) -> NonZeroUsize; } -struct RawReentrantMutex { +struct RawReentrantMutex { owner: AtomicUsize, lock_count: Cell, mutex: R, @@ -57,7 +60,10 @@ impl RawReentrantMutex { let id = self.get_thread_id.nonzero_thread_id().get(); if self.owner.load(Ordering::Relaxed) == id { self.lock_count.set( - self.lock_count.get().checked_add(1).expect("ReentrantMutex lock count overflow"), + self.lock_count + .get() + .checked_add(1) + .expect("ReentrantMutex lock count overflow"), ); } else { if !try_lock() { @@ -139,42 +145,11 @@ impl RawReentrantMutex { /// /// See [`Mutex`](struct.Mutex.html) for more details about the underlying mutex /// primitive. -pub struct ReentrantMutex { +pub struct ReentrantMutex { raw: RawReentrantMutex, data: UnsafeCell, } -// Copied and modified from serde -#[cfg(feature = "serde")] -impl Serialize for ReentrantMutex -where - R: RawMutex, - G: GetThreadId, - T: Serialize + ?Sized, -{ - fn serialize(&self, serializer: S) -> Result - where - S: Serializer, - { - self.lock().serialize(serializer) - } -} - -#[cfg(feature = "serde")] -impl<'de, R, G, T> Deserialize<'de> for ReentrantMutex -where - R: RawMutex, - G: GetThreadId, - T: Deserialize<'de> + ?Sized, -{ - fn deserialize(deserializer: D) -> Result - where - D: Deserializer<'de>, - { - Deserialize::deserialize(deserializer).map(ReentrantMutex::new) - } -} - unsafe impl Send for ReentrantMutex { @@ -217,9 +192,28 @@ impl ReentrantMutex { /// Consumes this mutex, returning the underlying data. #[inline] - #[allow(unused_unsafe)] pub fn into_inner(self) -> T { - unsafe { self.data.into_inner() } + self.data.into_inner() + } +} + +impl ReentrantMutex { + /// Creates a new reentrant mutex based on a pre-existing raw mutex and a + /// helper to get the thread ID. + /// + /// This allows creating a reentrant mutex in a constant context on stable + /// Rust. + #[inline] + pub const fn const_new(raw_mutex: R, get_thread_id: G, val: T) -> ReentrantMutex { + ReentrantMutex { + data: UnsafeCell::new(val), + raw: RawReentrantMutex { + owner: AtomicUsize::new(0), + lock_count: Cell::new(0), + mutex: raw_mutex, + get_thread_id, + }, + } } } @@ -229,7 +223,10 @@ impl ReentrantMutex { /// The lock must be held when calling this method. #[inline] unsafe fn guard(&self) -> ReentrantMutexGuard<'_, R, G, T> { - ReentrantMutexGuard { remutex: &self, marker: PhantomData } + ReentrantMutexGuard { + remutex: &self, + marker: PhantomData, + } } /// Acquires a reentrant mutex, blocking the current thread until it is able @@ -373,7 +370,10 @@ impl From for ReentrantMutex { impl fmt::Debug for ReentrantMutex { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self.try_lock() { - Some(guard) => f.debug_struct("ReentrantMutex").field("data", &&*guard).finish(), + Some(guard) => f + .debug_struct("ReentrantMutex") + .field("data", &&*guard) + .finish(), None => { struct LockedPlaceholder; impl fmt::Debug for LockedPlaceholder { @@ -382,12 +382,45 @@ impl fmt::Debug for Reentra } } - f.debug_struct("ReentrantMutex").field("data", &LockedPlaceholder).finish() + f.debug_struct("ReentrantMutex") + .field("data", &LockedPlaceholder) + .finish() } } } } +// Copied and modified from serde +#[cfg(feature = "serde")] +impl Serialize for ReentrantMutex +where + R: RawMutex, + G: GetThreadId, + T: Serialize + ?Sized, +{ + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + self.lock().serialize(serializer) + } +} + +#[cfg(feature = "serde")] +impl<'de, R, G, T> Deserialize<'de> for ReentrantMutex +where + R: RawMutex, + G: GetThreadId, + T: Deserialize<'de> + ?Sized, +{ + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + Deserialize::deserialize(deserializer).map(ReentrantMutex::new) + } +} + /// An RAII implementation of a "scoped lock" of a reentrant mutex. When this structure /// is dropped (falls out of scope), the lock will be unlocked. /// @@ -426,7 +459,11 @@ impl<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: ?Sized + 'a> ReentrantMutexGu let raw = &s.remutex.raw; let data = f(unsafe { &*s.remutex.data.get() }); mem::forget(s); - MappedReentrantMutexGuard { raw, data, marker: PhantomData } + MappedReentrantMutexGuard { + raw, + data, + marker: PhantomData, + } } /// Attempts to make a new `MappedReentrantMutexGuard` for a component of the @@ -452,7 +489,11 @@ impl<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: ?Sized + 'a> ReentrantMutexGu None => return Err(s), }; mem::forget(s); - Ok(MappedReentrantMutexGuard { raw, data, marker: PhantomData }) + Ok(MappedReentrantMutexGuard { + raw, + data, + marker: PhantomData, + }) } /// Temporarily unlocks the mutex to execute the given function. @@ -597,7 +638,11 @@ impl<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: ?Sized + 'a> let raw = s.raw; let data = f(unsafe { &*s.data }); mem::forget(s); - MappedReentrantMutexGuard { raw, data, marker: PhantomData } + MappedReentrantMutexGuard { + raw, + data, + marker: PhantomData, + } } /// Attempts to make a new `MappedReentrantMutexGuard` for a component of the @@ -623,7 +668,11 @@ impl<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: ?Sized + 'a> None => return Err(s), }; mem::forget(s); - Ok(MappedReentrantMutexGuard { raw, data, marker: PhantomData }) + Ok(MappedReentrantMutexGuard { + raw, + data, + marker: PhantomData, + }) } } diff --git a/third_party/rust/lock_api/src/rwlock.rs b/third_party/rust/lock_api/src/rwlock.rs index 874ebd60bb..892ba5240b 100644 --- a/third_party/rust/lock_api/src/rwlock.rs +++ b/third_party/rust/lock_api/src/rwlock.rs @@ -30,6 +30,9 @@ use serde::{Deserialize, Deserializer, Serialize, Serializer}; /// exists. pub unsafe trait RawRwLock { /// Initial value for an unlocked `RwLock`. + // A “non-constant” const item is a legacy way to supply an initialized value to downstream + // static items. Can hopefully be replaced with `const fn new() -> Self` at some point. + #[allow(clippy::declare_interior_mutable_const)] const INIT: Self; /// Marker type which determines whether a lock guard should be `Send`. Use @@ -228,7 +231,7 @@ pub unsafe trait RawRwLockUpgradeTimed: RawRwLockUpgrade + RawRwLockTimed { /// allow concurrent access through readers. The RAII guards returned from the /// locking methods implement `Deref` (and `DerefMut` for the `write` methods) /// to allow access to the contained of the lock. -pub struct RwLock { +pub struct RwLock { raw: R, data: UnsafeCell, } @@ -270,14 +273,20 @@ impl RwLock { #[cfg(feature = "nightly")] #[inline] pub const fn new(val: T) -> RwLock { - RwLock { data: UnsafeCell::new(val), raw: R::INIT } + RwLock { + data: UnsafeCell::new(val), + raw: R::INIT, + } } /// Creates a new instance of an `RwLock` which is unlocked. #[cfg(not(feature = "nightly"))] #[inline] pub fn new(val: T) -> RwLock { - RwLock { data: UnsafeCell::new(val), raw: R::INIT } + RwLock { + data: UnsafeCell::new(val), + raw: R::INIT, + } } /// Consumes this `RwLock`, returning the underlying data. @@ -288,13 +297,31 @@ impl RwLock { } } +impl RwLock { + /// Creates a new new instance of an `RwLock` based on a pre-existing + /// `RawRwLock`. + /// + /// This allows creating a `RwLock` in a constant context on stable + /// Rust. + #[inline] + pub const fn const_new(raw_rwlock: R, val: T) -> RwLock { + RwLock { + data: UnsafeCell::new(val), + raw: raw_rwlock, + } + } +} + impl RwLock { /// # Safety /// /// The lock must be held when calling this method. #[inline] unsafe fn read_guard(&self) -> RwLockReadGuard<'_, R, T> { - RwLockReadGuard { rwlock: self, marker: PhantomData } + RwLockReadGuard { + rwlock: self, + marker: PhantomData, + } } /// # Safety @@ -302,7 +329,10 @@ impl RwLock { /// The lock must be held when calling this method. #[inline] unsafe fn write_guard(&self) -> RwLockWriteGuard<'_, R, T> { - RwLockWriteGuard { rwlock: self, marker: PhantomData } + RwLockWriteGuard { + rwlock: self, + marker: PhantomData, + } } /// Locks this `RwLock` with shared read access, blocking the current thread @@ -623,7 +653,10 @@ impl RwLock { /// The lock must be held when calling this method. #[inline] unsafe fn upgradable_guard(&self) -> RwLockUpgradableReadGuard<'_, R, T> { - RwLockUpgradableReadGuard { rwlock: self, marker: PhantomData } + RwLockUpgradableReadGuard { + rwlock: self, + marker: PhantomData, + } } /// Locks this `RwLock` with upgradable read access, blocking the current thread @@ -726,7 +759,9 @@ impl fmt::Debug for RwLock { } } - f.debug_struct("RwLock").field("data", &LockedPlaceholder).finish() + f.debug_struct("RwLock") + .field("data", &LockedPlaceholder) + .finish() } } } @@ -764,7 +799,11 @@ impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> RwLockReadGuard<'a, R, T> { let raw = &s.rwlock.raw; let data = f(unsafe { &*s.rwlock.data.get() }); mem::forget(s); - MappedRwLockReadGuard { raw, data, marker: PhantomData } + MappedRwLockReadGuard { + raw, + data, + marker: PhantomData, + } } /// Attempts to make a new `MappedRwLockReadGuard` for a component of the @@ -787,7 +826,11 @@ impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> RwLockReadGuard<'a, R, T> { None => return Err(s), }; mem::forget(s); - Ok(MappedRwLockReadGuard { raw, data, marker: PhantomData }) + Ok(MappedRwLockReadGuard { + raw, + data, + marker: PhantomData, + }) } /// Temporarily unlocks the `RwLock` to execute the given function. @@ -917,7 +960,11 @@ impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> RwLockWriteGuard<'a, R, T> { let raw = &s.rwlock.raw; let data = f(unsafe { &mut *s.rwlock.data.get() }); mem::forget(s); - MappedRwLockWriteGuard { raw, data, marker: PhantomData } + MappedRwLockWriteGuard { + raw, + data, + marker: PhantomData, + } } /// Attempts to make a new `MappedRwLockWriteGuard` for a component of the @@ -940,7 +987,11 @@ impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> RwLockWriteGuard<'a, R, T> { None => return Err(s), }; mem::forget(s); - Ok(MappedRwLockWriteGuard { raw, data, marker: PhantomData }) + Ok(MappedRwLockWriteGuard { + raw, + data, + marker: PhantomData, + }) } /// Temporarily unlocks the `RwLock` to execute the given function. @@ -969,7 +1020,10 @@ impl<'a, R: RawRwLockDowngrade + 'a, T: ?Sized + 'a> RwLockWriteGuard<'a, R, T> s.rwlock.raw.downgrade(); let rwlock = s.rwlock; mem::forget(s); - RwLockReadGuard { rwlock, marker: PhantomData } + RwLockReadGuard { + rwlock, + marker: PhantomData, + } } } @@ -984,7 +1038,10 @@ impl<'a, R: RawRwLockUpgradeDowngrade + 'a, T: ?Sized + 'a> RwLockWriteGuard<'a, s.rwlock.raw.downgrade_to_upgradable(); let rwlock = s.rwlock; mem::forget(s); - RwLockUpgradableReadGuard { rwlock, marker: PhantomData } + RwLockUpgradableReadGuard { + rwlock, + marker: PhantomData, + } } } @@ -1112,7 +1169,10 @@ impl<'a, R: RawRwLockUpgrade + 'a, T: ?Sized + 'a> RwLockUpgradableReadGuard<'a, s.rwlock.raw.upgrade(); let rwlock = s.rwlock; mem::forget(s); - RwLockWriteGuard { rwlock, marker: PhantomData } + RwLockWriteGuard { + rwlock, + marker: PhantomData, + } } /// Tries to atomically upgrade an upgradable read lock into a exclusive write lock. @@ -1122,7 +1182,10 @@ impl<'a, R: RawRwLockUpgrade + 'a, T: ?Sized + 'a> RwLockUpgradableReadGuard<'a, if s.rwlock.raw.try_upgrade() { let rwlock = s.rwlock; mem::forget(s); - Ok(RwLockWriteGuard { rwlock, marker: PhantomData }) + Ok(RwLockWriteGuard { + rwlock, + marker: PhantomData, + }) } else { Err(s) } @@ -1187,7 +1250,10 @@ impl<'a, R: RawRwLockUpgradeDowngrade + 'a, T: ?Sized + 'a> RwLockUpgradableRead s.rwlock.raw.downgrade_upgradable(); let rwlock = s.rwlock; mem::forget(s); - RwLockReadGuard { rwlock, marker: PhantomData } + RwLockReadGuard { + rwlock, + marker: PhantomData, + } } } @@ -1204,7 +1270,10 @@ impl<'a, R: RawRwLockUpgradeTimed + 'a, T: ?Sized + 'a> RwLockUpgradableReadGuar if s.rwlock.raw.try_upgrade_for(timeout) { let rwlock = s.rwlock; mem::forget(s); - Ok(RwLockWriteGuard { rwlock, marker: PhantomData }) + Ok(RwLockWriteGuard { + rwlock, + marker: PhantomData, + }) } else { Err(s) } @@ -1223,7 +1292,10 @@ impl<'a, R: RawRwLockUpgradeTimed + 'a, T: ?Sized + 'a> RwLockUpgradableReadGuar if s.rwlock.raw.try_upgrade_until(timeout) { let rwlock = s.rwlock; mem::forget(s); - Ok(RwLockWriteGuard { rwlock, marker: PhantomData }) + Ok(RwLockWriteGuard { + rwlock, + marker: PhantomData, + }) } else { Err(s) } @@ -1304,7 +1376,11 @@ impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> MappedRwLockReadGuard<'a, R, T> { let raw = s.raw; let data = f(unsafe { &*s.data }); mem::forget(s); - MappedRwLockReadGuard { raw, data, marker: PhantomData } + MappedRwLockReadGuard { + raw, + data, + marker: PhantomData, + } } /// Attempts to make a new `MappedRwLockReadGuard` for a component of the @@ -1327,7 +1403,11 @@ impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> MappedRwLockReadGuard<'a, R, T> { None => return Err(s), }; mem::forget(s); - Ok(MappedRwLockReadGuard { raw, data, marker: PhantomData }) + Ok(MappedRwLockReadGuard { + raw, + data, + marker: PhantomData, + }) } } @@ -1428,7 +1508,11 @@ impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> MappedRwLockWriteGuard<'a, R, T> { let raw = s.raw; let data = f(unsafe { &mut *s.data }); mem::forget(s); - MappedRwLockWriteGuard { raw, data, marker: PhantomData } + MappedRwLockWriteGuard { + raw, + data, + marker: PhantomData, + } } /// Attempts to make a new `MappedRwLockWriteGuard` for a component of the @@ -1451,7 +1535,11 @@ impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> MappedRwLockWriteGuard<'a, R, T> { None => return Err(s), }; mem::forget(s); - Ok(MappedRwLockWriteGuard { raw, data, marker: PhantomData }) + Ok(MappedRwLockWriteGuard { + raw, + data, + marker: PhantomData, + }) } } @@ -1462,12 +1550,20 @@ impl<'a, R: RawRwLockDowngrade + 'a, T: ?Sized + 'a> MappedRwLockWriteGuard<'a, /// Note that if there are any writers currently waiting to take the lock /// then other readers may not be able to acquire the lock even if it was /// downgraded. + #[deprecated( + since = "0.3.3", + note = "This function is unsound and will be removed in the future, see issue #198" + )] pub fn downgrade(s: Self) -> MappedRwLockReadGuard<'a, R, T> { s.raw.downgrade(); let raw = s.raw; let data = s.data; mem::forget(s); - MappedRwLockReadGuard { raw, data, marker: PhantomData } + MappedRwLockReadGuard { + raw, + data, + marker: PhantomData, + } } } diff --git a/third_party/rust/log-0.3.9/.cargo-checksum.json b/third_party/rust/log-0.3.9/.cargo-checksum.json deleted file mode 100644 index 115b30424d..0000000000 --- a/third_party/rust/log-0.3.9/.cargo-checksum.json +++ /dev/null @@ -1 +0,0 @@ -{"files":{".travis.yml":"2cdde67eec211928d7e667e5ade109cdf0f74c3417c47cc48905929c5c165230","Cargo.toml":"827b8cdf64e9652b178d6033fdcb4fc04a1382edf67ce4f2c6dce39943349f10","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"6485b8ed310d3f0340bf1ad1f47645069ce4069dcc6bb46c7d5c6faf41de1fdb","README.md":"7c831cff45cfd33333cc246815dccb25bfa0b981053656e707828fe3f25151da","appveyor.yml":"c61473b8c780ad2626282ce2b2ba0ef278082b6afe151a62ff419f33eaf90221","src/lib.rs":"8238bde9046b1b4a0d21b5ae9029abd672a7d314581b1fd8d0890b2a3052d443","src/macros.rs":"9068d69d32e989ac273ce73659125d31cf4a166076eefdad74dfbdf9506cf9c4"},"package":"e19e8d5c34a3e0e2223db8e060f9e8264aeeb5c5fc64a4ee9965c062211c024b"} \ No newline at end of file diff --git a/third_party/rust/log-0.3.9/.travis.yml b/third_party/rust/log-0.3.9/.travis.yml deleted file mode 100644 index 4799815828..0000000000 --- a/third_party/rust/log-0.3.9/.travis.yml +++ /dev/null @@ -1,30 +0,0 @@ -language: rust -sudo: false -rust: - - stable - - beta - - nightly -before_script: - - pip install 'travis-cargo<0.2' --user && export PATH=$HOME/.local/bin:$PATH -script: - - cargo build --verbose - - ([ $TRAVIS_RUST_VERSION != nightly ] || cargo build --verbose --no-default-features) - - ([ $TRAVIS_RUST_VERSION != nightly ] || cargo build --verbose --features nightly) - - cargo test --verbose - - ([ $TRAVIS_RUST_VERSION != nightly ] || cargo test --verbose --no-default-features) - - cargo test --verbose --manifest-path log-test/Cargo.toml - - cargo test --verbose --manifest-path env/Cargo.toml - - cargo test --verbose --manifest-path env/Cargo.toml --no-default-features - - cargo run --verbose --manifest-path tests/max_level_features/Cargo.toml - - cargo run --verbose --manifest-path tests/max_level_features/Cargo.toml --release - - ([ $TRAVIS_RUST_VERSION != nightly ] || cargo doc --no-deps --features nightly) - - CARGO_TARGET_DIR=target cargo doc --no-deps --manifest-path env/Cargo.toml -after_success: - - travis-cargo --only nightly doc-upload -env: - global: - secure: "VPHgnszydMudYTY8cthHj/Dmxqp7OmTiu4Sa/705Udsx+tYblTv+8WdThkClo3C/asStVcxlaRWAp91UX32/k4SfkPz17XId3Wadyt03r73ANm6ZOWY+qty+3/LINm54kuTxYUDDTbD6NaFNPFQLIE0xCpJeiXUQTlaMk6z0W3M=" - -notifications: - email: - on_success: never diff --git a/third_party/rust/log-0.3.9/Cargo.toml b/third_party/rust/log-0.3.9/Cargo.toml deleted file mode 100644 index 1bea809c6b..0000000000 --- a/third_party/rust/log-0.3.9/Cargo.toml +++ /dev/null @@ -1,45 +0,0 @@ -# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO -# -# When uploading crates to the registry Cargo will automatically -# "normalize" Cargo.toml files for maximal compatibility -# with all versions of Cargo and also rewrite `path` dependencies -# to registry (e.g. crates.io) dependencies -# -# If you believe there's an error in this file please file an -# issue against the rust-lang/cargo repository. If you're -# editing this file be aware that the upstream Cargo.toml -# will likely look very different (and much more reasonable) - -[package] -name = "log" -version = "0.3.9" -authors = ["The Rust Project Developers"] -description = "A lightweight logging facade for Rust\n" -homepage = "https://github.com/rust-lang/log" -documentation = "https://doc.rust-lang.org/log" -readme = "README.md" -categories = ["development-tools::debugging"] -license = "MIT/Apache-2.0" -repository = "https://github.com/rust-lang/log" - -[lib] -doctest = false -[dependencies.log] -version = "0.4" - -[features] -default = ["use_std"] -max_level_debug = ["log/max_level_debug"] -max_level_error = ["log/max_level_error"] -max_level_info = ["log/max_level_info"] -max_level_off = ["log/max_level_off"] -max_level_trace = ["log/max_level_trace"] -max_level_warn = ["log/max_level_warn"] -nightly = [] -release_max_level_debug = ["log/release_max_level_debug"] -release_max_level_error = ["log/release_max_level_error"] -release_max_level_info = ["log/release_max_level_info"] -release_max_level_off = ["log/release_max_level_off"] -release_max_level_trace = ["log/release_max_level_trace"] -release_max_level_warn = ["log/release_max_level_warn"] -use_std = ["log/std"] diff --git a/third_party/rust/log-0.3.9/LICENSE-MIT b/third_party/rust/log-0.3.9/LICENSE-MIT deleted file mode 100644 index 39d4bdb5ac..0000000000 --- a/third_party/rust/log-0.3.9/LICENSE-MIT +++ /dev/null @@ -1,25 +0,0 @@ -Copyright (c) 2014 The Rust Project Developers - -Permission is hereby granted, free of charge, to any -person obtaining a copy of this software and associated -documentation files (the "Software"), to deal in the -Software without restriction, including without -limitation the rights to use, copy, modify, merge, -publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software -is furnished to do so, subject to the following -conditions: - -The above copyright notice and this permission notice -shall be included in all copies or substantial portions -of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -DEALINGS IN THE SOFTWARE. diff --git a/third_party/rust/log-0.3.9/README.md b/third_party/rust/log-0.3.9/README.md deleted file mode 100644 index 0e9dc58ae7..0000000000 --- a/third_party/rust/log-0.3.9/README.md +++ /dev/null @@ -1,160 +0,0 @@ -log -=== - -A Rust library providing a lightweight logging *facade*. - -[![Build Status](https://travis-ci.org/rust-lang-nursery/log.svg?branch=master)](https://travis-ci.org/rust-lang-nursery/log) -[![Build status](https://ci.appveyor.com/api/projects/status/nopdjmmjt45xcrki?svg=true)](https://ci.appveyor.com/project/alexcrichton/log) - -* [`log` documentation](https://doc.rust-lang.org/log) -* [`env_logger` documentation](https://doc.rust-lang.org/log/env_logger) - -A logging facade provides a single logging API that abstracts over the actual -logging implementation. Libraries can use the logging API provided by this -crate, and the consumer of those libraries can choose the logging -implementation that is most suitable for its use case. - -## Usage - -## In libraries - -Libraries should link only to the `log` crate, and use the provided macros to -log whatever information will be useful to downstream consumers: - -```toml -[dependencies] -log = "0.3" -``` - -```rust -#[macro_use] -extern crate log; - -pub fn shave_the_yak(yak: &Yak) { - trace!("Commencing yak shaving"); - - loop { - match find_a_razor() { - Ok(razor) => { - info!("Razor located: {}", razor); - yak.shave(razor); - break; - } - Err(err) => { - warn!("Unable to locate a razor: {}, retrying", err); - } - } - } -} -``` - -## In executables - -Executables should choose a logger implementation and initialize it early in the -runtime of the program. Logger implementations will typically include a -function to do this. Any log messages generated before the logger is -initialized will be ignored. - -The executable itself may use the `log` crate to log as well. - -The `env_logger` crate provides a logger implementation that mirrors the -functionality of the old revision of the `log` crate. - -```toml -[dependencies] -log = "0.3" -env_logger = "0.3" -``` - -```rust -#[macro_use] -extern crate log; -extern crate env_logger; - -fn main() { - env_logger::init().unwrap(); - - info!("starting up"); - - // ... -} -``` - -## In tests - -Tests can use the `env_logger` crate to see log messages generated during that test: - -```toml -[dependencies] -log = "0.3" - -[dev-dependencies] -env_logger = "0.3" -``` - -```rust -#[macro_use] -extern crate log; - -fn add_one(num: i32) -> i32 { - info!("add_one called with {}", num); - num + 1 -} - -#[cfg(test)] -mod tests { - use super::*; - extern crate env_logger; - - #[test] - fn it_adds_one() { - let _ = env_logger::init(); - info!("can log from the test too"); - assert_eq!(3, add_one(2)); - } - - #[test] - fn it_handles_negative_numbers() { - let _ = env_logger::init(); - info!("logging from another test"); - assert_eq!(-7, add_one(-8)); - } -} -``` - -Assuming the module under test is called `my_lib`, running the tests with the -`RUST_LOG` filtering to info messages from this module looks like: - -```bash -$ RUST_LOG=my_lib=info cargo test - Running target/debug/my_lib-... - -running 2 tests -INFO:my_lib::tests: logging from another test -INFO:my_lib: add_one called with -8 -test tests::it_handles_negative_numbers ... ok -INFO:my_lib::tests: can log from the test too -INFO:my_lib: add_one called with 2 -test tests::it_adds_one ... ok - -test result: ok. 2 passed; 0 failed; 0 ignored; 0 measured -``` - -Note that `env_logger::init()` needs to be called in each test in which you -want to enable logging. Additionally, the default behavior of tests to -run in parallel means that logging output may be interleaved with test output. -Either run tests in a single thread by specifying `RUST_TEST_THREADS=1` or by -running one test by specifying its name as an argument to the test binaries as -directed by the `cargo test` help docs: - -```bash -$ RUST_LOG=my_lib=info cargo test it_adds_one - Running target/debug/my_lib-... - -running 1 test -INFO:my_lib::tests: can log from the test too -INFO:my_lib: add_one called with 2 -test tests::it_adds_one ... ok - -test result: ok. 1 passed; 0 failed; 0 ignored; 0 measured -``` diff --git a/third_party/rust/log-0.3.9/appveyor.yml b/third_party/rust/log-0.3.9/appveyor.yml deleted file mode 100644 index 841011deb2..0000000000 --- a/third_party/rust/log-0.3.9/appveyor.yml +++ /dev/null @@ -1,18 +0,0 @@ -environment: - matrix: - - TARGET: x86_64-pc-windows-msvc - - TARGET: i686-pc-windows-msvc - - TARGET: i686-pc-windows-gnu -install: - - ps: Start-FileDownload "https://static.rust-lang.org/dist/rust-nightly-${env:TARGET}.exe" - - rust-nightly-%TARGET%.exe /VERYSILENT /NORESTART /DIR="C:\Program Files (x86)\Rust" - - SET PATH=%PATH%;C:\Program Files (x86)\Rust\bin - - SET PATH=%PATH%;C:\MinGW\bin - - rustc -V - - cargo -V - -build: false - -test_script: - - cargo test --verbose - - cargo test --manifest-path env/Cargo.toml diff --git a/third_party/rust/log-0.3.9/src/lib.rs b/third_party/rust/log-0.3.9/src/lib.rs deleted file mode 100644 index 1abdf31572..0000000000 --- a/third_party/rust/log-0.3.9/src/lib.rs +++ /dev/null @@ -1,1091 +0,0 @@ -// Copyright 2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -//! A lightweight logging facade. -//! -//! A logging facade provides a single logging API that abstracts over the -//! actual logging implementation. Libraries can use the logging API provided -//! by this crate, and the consumer of those libraries can choose the logging -//! framework that is most suitable for its use case. -//! -//! If no logging implementation is selected, the facade falls back to a "noop" -//! implementation that ignores all log messages. The overhead in this case -//! is very small - just an integer load, comparison and jump. -//! -//! A log request consists of a target, a level, and a body. A target is a -//! string which defaults to the module path of the location of the log -//! request, though that default may be overridden. Logger implementations -//! typically use the target to filter requests based on some user -//! configuration. -//! -//! # Use -//! -//! ## In libraries -//! -//! Libraries should link only to the `log` crate, and use the provided -//! macros to log whatever information will be useful to downstream consumers. -//! -//! ### Examples -//! -//! ```rust -//! # #![allow(unstable)] -//! #[macro_use] -//! extern crate log; -//! -//! # #[derive(Debug)] pub struct Yak(String); -//! # impl Yak { fn shave(&self, _: u32) {} } -//! # fn find_a_razor() -> Result { Ok(1) } -//! pub fn shave_the_yak(yak: &Yak) { -//! info!(target: "yak_events", "Commencing yak shaving for {:?}", yak); -//! -//! loop { -//! match find_a_razor() { -//! Ok(razor) => { -//! info!("Razor located: {}", razor); -//! yak.shave(razor); -//! break; -//! } -//! Err(err) => { -//! warn!("Unable to locate a razor: {}, retrying", err); -//! } -//! } -//! } -//! } -//! # fn main() {} -//! ``` -//! -//! ## In executables -//! -//! Executables should choose a logging framework and initialize it early in the -//! runtime of the program. Logging frameworks will typically include a -//! function to do this. Any log messages generated before the framework is -//! initialized will be ignored. -//! -//! The executable itself may use the `log` crate to log as well. -//! -//! ### Warning -//! -//! The logging system may only be initialized once. -//! -//! ### Examples -//! -//! ```rust,ignore -//! #[macro_use] -//! extern crate log; -//! extern crate my_logger; -//! -//! fn main() { -//! my_logger::init(); -//! -//! info!("starting up"); -//! -//! // ... -//! } -//! ``` -//! -//! # Logger implementations -//! -//! Loggers implement the `Log` trait. Here's a very basic example that simply -//! logs all messages at the `Error`, `Warn` or `Info` levels to stdout: -//! -//! ```rust -//! extern crate log; -//! -//! use log::{LogRecord, LogLevel, LogMetadata}; -//! -//! struct SimpleLogger; -//! -//! impl log::Log for SimpleLogger { -//! fn enabled(&self, metadata: &LogMetadata) -> bool { -//! metadata.level() <= LogLevel::Info -//! } -//! -//! fn log(&self, record: &LogRecord) { -//! if self.enabled(record.metadata()) { -//! println!("{} - {}", record.level(), record.args()); -//! } -//! } -//! } -//! -//! # fn main() {} -//! ``` -//! -//! Loggers are installed by calling the `set_logger` function. It takes a -//! closure which is provided a `MaxLogLevel` token and returns a `Log` trait -//! object. The `MaxLogLevel` token controls the global maximum log level. The -//! logging facade uses this as an optimization to improve performance of log -//! messages at levels that are disabled. In the case of our example logger, -//! we'll want to set the maximum log level to `Info`, since we ignore any -//! `Debug` or `Trace` level log messages. A logging framework should provide a -//! function that wraps a call to `set_logger`, handling initialization of the -//! logger: -//! -//! ```rust -//! # extern crate log; -//! # use log::{LogLevel, LogLevelFilter, SetLoggerError, LogMetadata}; -//! # struct SimpleLogger; -//! # impl log::Log for SimpleLogger { -//! # fn enabled(&self, _: &LogMetadata) -> bool { false } -//! # fn log(&self, _: &log::LogRecord) {} -//! # } -//! # fn main() {} -//! # #[cfg(feature = "use_std")] -//! pub fn init() -> Result<(), SetLoggerError> { -//! log::set_logger(|max_log_level| { -//! max_log_level.set(LogLevelFilter::Info); -//! Box::new(SimpleLogger) -//! }) -//! } -//! ``` -//! -//! # Use with `no_std` -//! -//! To use the `log` crate without depending on `libstd`, you need to specify -//! `default-features = false` when specifying the dependency in `Cargo.toml`. -//! This makes no difference to libraries using `log` since the logging API -//! remains the same. However executables will need to use the `set_logger_raw` -//! function to initialize a logger and the `shutdown_logger_raw` function to -//! shut down the global logger before exiting: -//! -//! ```rust -//! # extern crate log; -//! # use log::{LogLevel, LogLevelFilter, SetLoggerError, ShutdownLoggerError, -//! # LogMetadata}; -//! # struct SimpleLogger; -//! # impl log::Log for SimpleLogger { -//! # fn enabled(&self, _: &LogMetadata) -> bool { false } -//! # fn log(&self, _: &log::LogRecord) {} -//! # } -//! # impl SimpleLogger { -//! # fn flush(&self) {} -//! # } -//! # fn main() {} -//! pub fn init() -> Result<(), SetLoggerError> { -//! unsafe { -//! log::set_logger_raw(|max_log_level| { -//! static LOGGER: SimpleLogger = SimpleLogger; -//! max_log_level.set(LogLevelFilter::Info); -//! &SimpleLogger -//! }) -//! } -//! } -//! pub fn shutdown() -> Result<(), ShutdownLoggerError> { -//! log::shutdown_logger_raw().map(|logger| { -//! let logger = unsafe { &*(logger as *const SimpleLogger) }; -//! logger.flush(); -//! }) -//! } -//! ``` - -#![doc(html_logo_url = "https://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png", - html_favicon_url = "https://www.rust-lang.org/favicon.ico", - html_root_url = "https://doc.rust-lang.org/log/")] -#![warn(missing_docs)] -#![cfg_attr(feature = "nightly", feature(panic_handler))] - -#![cfg_attr(not(feature = "use_std"), no_std)] - -// When compiled for the rustc compiler itself we want to make sure that this is -// an unstable crate -#![cfg_attr(rustbuild, feature(staged_api, rustc_private))] -#![cfg_attr(rustbuild, unstable(feature = "rustc_private", issue = "27812"))] - -#[cfg(not(feature = "use_std"))] -extern crate core as std; -extern crate log; - -use std::cmp; -#[cfg(feature = "use_std")] -use std::error; -use std::fmt; -use std::mem; -use std::ops::Deref; -use std::str::FromStr; -use std::sync::atomic::{AtomicUsize, ATOMIC_USIZE_INIT, Ordering}; -#[macro_use] -mod macros; - -// The setup here is a bit weird to make shutdown_logger_raw work. -// -// There are four different states that we care about: the logger's -// uninitialized, the logger's initializing (set_logger's been called but -// LOGGER hasn't actually been set yet), the logger's active, or the logger is -// shut down after calling shutdown_logger_raw. -// -// The LOGGER static holds a pointer to the global logger. It is protected by -// the STATE static which determines whether LOGGER has been initialized yet. -// -// The shutdown_logger_raw routine needs to make sure that no threads are -// actively logging before it returns. The number of actively logging threads is -// tracked in the REFCOUNT static. The routine first sets STATE back to -// INITIALIZING. All logging calls past that point will immediately return -// without accessing the logger. At that point, the at_exit routine just waits -// for the refcount to reach 0 before deallocating the logger. Note that the -// refcount does not necessarily monotonically decrease at this point, as new -// log calls still increment and decrement it, but the interval in between is -// small enough that the wait is really just for the active log calls to finish. - -static mut LOGGER: *const Log = &NopLogger; -static STATE: AtomicUsize = ATOMIC_USIZE_INIT; -static REFCOUNT: AtomicUsize = ATOMIC_USIZE_INIT; - -const INITIALIZING: usize = 1; -const INITIALIZED: usize = 2; - -static LOG_LEVEL_NAMES: [&'static str; 6] = ["OFF", "ERROR", "WARN", "INFO", - "DEBUG", "TRACE"]; - -/// An enum representing the available verbosity levels of the logging framework -/// -/// A `LogLevel` may be compared directly to a `LogLevelFilter`. -#[repr(usize)] -#[derive(Copy, Eq, Debug)] -pub enum LogLevel { - /// The "error" level. - /// - /// Designates very serious errors. - Error = 1, // This way these line up with the discriminants for LogLevelFilter below - /// The "warn" level. - /// - /// Designates hazardous situations. - Warn, - /// The "info" level. - /// - /// Designates useful information. - Info, - /// The "debug" level. - /// - /// Designates lower priority information. - Debug, - /// The "trace" level. - /// - /// Designates very low priority, often extremely verbose, information. - Trace, -} - -impl Clone for LogLevel { - #[inline] - fn clone(&self) -> LogLevel { - *self - } -} - -impl PartialEq for LogLevel { - #[inline] - fn eq(&self, other: &LogLevel) -> bool { - *self as usize == *other as usize - } -} - -impl PartialEq for LogLevel { - #[inline] - fn eq(&self, other: &LogLevelFilter) -> bool { - *self as usize == *other as usize - } -} - -impl PartialOrd for LogLevel { - #[inline] - fn partial_cmp(&self, other: &LogLevel) -> Option { - Some(self.cmp(other)) - } -} - -impl PartialOrd for LogLevel { - #[inline] - fn partial_cmp(&self, other: &LogLevelFilter) -> Option { - Some((*self as usize).cmp(&(*other as usize))) - } -} - -impl Ord for LogLevel { - #[inline] - fn cmp(&self, other: &LogLevel) -> cmp::Ordering { - (*self as usize).cmp(&(*other as usize)) - } -} - -fn ok_or(t: Option, e: E) -> Result { - match t { - Some(t) => Ok(t), - None => Err(e), - } -} - -// Reimplemented here because std::ascii is not available in libcore -fn eq_ignore_ascii_case(a: &str, b: &str) -> bool { - fn to_ascii_uppercase(c: u8) -> u8 { - if c >= b'a' && c <= b'z' { - c - b'a' + b'A' - } else { - c - } - } - - if a.len() == b.len() { - a.bytes() - .zip(b.bytes()) - .all(|(a, b)| to_ascii_uppercase(a) == to_ascii_uppercase(b)) - } else { - false - } -} - -impl FromStr for LogLevel { - type Err = (); - fn from_str(level: &str) -> Result { - ok_or(LOG_LEVEL_NAMES.iter() - .position(|&name| eq_ignore_ascii_case(name, level)) - .into_iter() - .filter(|&idx| idx != 0) - .map(|idx| LogLevel::from_usize(idx).unwrap()) - .next(), ()) - } -} - -impl fmt::Display for LogLevel { - fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { - fmt.pad(LOG_LEVEL_NAMES[*self as usize]) - } -} - -impl LogLevel { - fn from_usize(u: usize) -> Option { - match u { - 1 => Some(LogLevel::Error), - 2 => Some(LogLevel::Warn), - 3 => Some(LogLevel::Info), - 4 => Some(LogLevel::Debug), - 5 => Some(LogLevel::Trace), - _ => None - } - } - - fn from_new(level: log::Level) -> LogLevel { - match level { - log::Level::Error => LogLevel::Error, - log::Level::Warn => LogLevel::Warn, - log::Level::Info => LogLevel::Info, - log::Level::Debug => LogLevel::Debug, - log::Level::Trace => LogLevel::Trace, - } - } - - fn to_new(&self) -> log::Level { - match *self { - LogLevel::Error => log::Level::Error, - LogLevel::Warn => log::Level::Warn, - LogLevel::Info => log::Level::Info, - LogLevel::Debug => log::Level::Debug, - LogLevel::Trace => log::Level::Trace, - } - } - - /// Returns the most verbose logging level. - #[inline] - pub fn max() -> LogLevel { - LogLevel::Trace - } - - /// Converts the `LogLevel` to the equivalent `LogLevelFilter`. - #[inline] - pub fn to_log_level_filter(&self) -> LogLevelFilter { - LogLevelFilter::from_usize(*self as usize).unwrap() - } -} - -/// An enum representing the available verbosity level filters of the logging -/// framework. -/// -/// A `LogLevelFilter` may be compared directly to a `LogLevel`. -#[repr(usize)] -#[derive(Copy, Eq, Debug)] -pub enum LogLevelFilter { - /// A level lower than all log levels. - Off, - /// Corresponds to the `Error` log level. - Error, - /// Corresponds to the `Warn` log level. - Warn, - /// Corresponds to the `Info` log level. - Info, - /// Corresponds to the `Debug` log level. - Debug, - /// Corresponds to the `Trace` log level. - Trace, -} - -// Deriving generates terrible impls of these traits - -impl Clone for LogLevelFilter { - #[inline] - fn clone(&self) -> LogLevelFilter { - *self - } -} - -impl PartialEq for LogLevelFilter { - #[inline] - fn eq(&self, other: &LogLevelFilter) -> bool { - *self as usize == *other as usize - } -} - -impl PartialEq for LogLevelFilter { - #[inline] - fn eq(&self, other: &LogLevel) -> bool { - other.eq(self) - } -} - -impl PartialOrd for LogLevelFilter { - #[inline] - fn partial_cmp(&self, other: &LogLevelFilter) -> Option { - Some(self.cmp(other)) - } -} - -impl PartialOrd for LogLevelFilter { - #[inline] - fn partial_cmp(&self, other: &LogLevel) -> Option { - other.partial_cmp(self).map(|x| x.reverse()) - } -} - -impl Ord for LogLevelFilter { - #[inline] - fn cmp(&self, other: &LogLevelFilter) -> cmp::Ordering { - (*self as usize).cmp(&(*other as usize)) - } -} - -impl FromStr for LogLevelFilter { - type Err = (); - fn from_str(level: &str) -> Result { - ok_or(LOG_LEVEL_NAMES.iter() - .position(|&name| eq_ignore_ascii_case(name, level)) - .map(|p| LogLevelFilter::from_usize(p).unwrap()), ()) - } -} - -impl fmt::Display for LogLevelFilter { - fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { - write!(fmt, "{}", LOG_LEVEL_NAMES[*self as usize]) - } -} - -impl LogLevelFilter { - fn from_usize(u: usize) -> Option { - match u { - 0 => Some(LogLevelFilter::Off), - 1 => Some(LogLevelFilter::Error), - 2 => Some(LogLevelFilter::Warn), - 3 => Some(LogLevelFilter::Info), - 4 => Some(LogLevelFilter::Debug), - 5 => Some(LogLevelFilter::Trace), - _ => None - } - } - - fn from_new(filter: log::LevelFilter) -> LogLevelFilter { - match filter { - log::LevelFilter::Off => LogLevelFilter::Off, - log::LevelFilter::Error => LogLevelFilter::Error, - log::LevelFilter::Warn => LogLevelFilter::Warn, - log::LevelFilter::Info => LogLevelFilter::Info, - log::LevelFilter::Debug => LogLevelFilter::Debug, - log::LevelFilter::Trace => LogLevelFilter::Trace, - } - } - - fn to_new(&self) -> log::LevelFilter { - match *self { - LogLevelFilter::Off => log::LevelFilter::Off, - LogLevelFilter::Error => log::LevelFilter::Error, - LogLevelFilter::Warn => log::LevelFilter::Warn, - LogLevelFilter::Info => log::LevelFilter::Info, - LogLevelFilter::Debug => log::LevelFilter::Debug, - LogLevelFilter::Trace => log::LevelFilter::Trace, - } - } - - /// Returns the most verbose logging level filter. - #[inline] - pub fn max() -> LogLevelFilter { - LogLevelFilter::Trace - } - - /// Converts `self` to the equivalent `LogLevel`. - /// - /// Returns `None` if `self` is `LogLevelFilter::Off`. - #[inline] - pub fn to_log_level(&self) -> Option { - LogLevel::from_usize(*self as usize) - } -} - -/// The "payload" of a log message. -pub struct LogRecord<'a> { - metadata: LogMetadata<'a>, - location: &'a LogLocation, - args: fmt::Arguments<'a>, -} - -impl<'a> LogRecord<'a> { - /// The message body. - pub fn args(&self) -> &fmt::Arguments<'a> { - &self.args - } - - /// Metadata about the log directive. - pub fn metadata(&self) -> &LogMetadata { - &self.metadata - } - - /// The location of the log directive. - pub fn location(&self) -> &LogLocation { - self.location - } - - /// The verbosity level of the message. - pub fn level(&self) -> LogLevel { - self.metadata.level() - } - - /// The name of the target of the directive. - pub fn target(&self) -> &str { - self.metadata.target() - } -} - -/// Metadata about a log message. -pub struct LogMetadata<'a> { - level: LogLevel, - target: &'a str, -} - -impl<'a> LogMetadata<'a> { - /// The verbosity level of the message. - pub fn level(&self) -> LogLevel { - self.level - } - - /// The name of the target of the directive. - pub fn target(&self) -> &str { - self.target - } -} - -/// A trait encapsulating the operations required of a logger -pub trait Log: Sync+Send { - /// Determines if a log message with the specified metadata would be - /// logged. - /// - /// This is used by the `log_enabled!` macro to allow callers to avoid - /// expensive computation of log message arguments if the message would be - /// discarded anyway. - fn enabled(&self, metadata: &LogMetadata) -> bool; - - /// Logs the `LogRecord`. - /// - /// Note that `enabled` is *not* necessarily called before this method. - /// Implementations of `log` should perform all necessary filtering - /// internally. - fn log(&self, record: &LogRecord); -} - -// Just used as a dummy initial value for LOGGER -struct NopLogger; - -impl Log for NopLogger { - fn enabled(&self, _: &LogMetadata) -> bool { false } - - fn log(&self, _: &LogRecord) {} -} - -/// The location of a log message. -/// -/// # Warning -/// -/// The fields of this struct are public so that they may be initialized by the -/// `log!` macro. They are subject to change at any time and should never be -/// accessed directly. -#[derive(Copy, Clone, Debug)] -pub struct LogLocation { - #[doc(hidden)] - pub __module_path: &'static str, - #[doc(hidden)] - pub __file: &'static str, - #[doc(hidden)] - pub __line: u32, -} - -impl LogLocation { - /// The module path of the message. - pub fn module_path(&self) -> &str { - self.__module_path - } - - /// The source file containing the message. - pub fn file(&self) -> &str { - self.__file - } - - /// The line containing the message. - pub fn line(&self) -> u32 { - self.__line - } -} - -/// A token providing read and write access to the global maximum log level -/// filter. -/// -/// The maximum log level is used as an optimization to avoid evaluating log -/// messages that will be ignored by the logger. Any message with a level -/// higher than the maximum log level filter will be ignored. A logger should -/// make sure to keep the maximum log level filter in sync with its current -/// configuration. -pub struct MaxLogLevelFilter(()); - -impl fmt::Debug for MaxLogLevelFilter { - fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { - write!(fmt, "MaxLogLevelFilter") - } -} - -impl MaxLogLevelFilter { - /// Gets the current maximum log level filter. - pub fn get(&self) -> LogLevelFilter { - max_log_level() - } - - /// Sets the maximum log level. - pub fn set(&self, level: LogLevelFilter) { - log::set_max_level(level.to_new()) - } -} - -/// Returns the current maximum log level. -/// -/// The `log!`, `error!`, `warn!`, `info!`, `debug!`, and `trace!` macros check -/// this value and discard any message logged at a higher level. The maximum -/// log level is set by the `MaxLogLevel` token passed to loggers. -#[inline(always)] -pub fn max_log_level() -> LogLevelFilter { - LogLevelFilter::from_new(log::max_level()) -} - -/// Sets the global logger. -/// -/// The `make_logger` closure is passed a `MaxLogLevel` object, which the -/// logger should use to keep the global maximum log level in sync with the -/// highest log level that the logger will not ignore. -/// -/// This function may only be called once in the lifetime of a program. Any log -/// events that occur before the call to `set_logger` completes will be -/// ignored. -/// -/// This function does not typically need to be called manually. Logger -/// implementations should provide an initialization method that calls -/// `set_logger` internally. -/// -/// Requires the `use_std` feature (enabled by default). -#[cfg(feature = "use_std")] -pub fn set_logger(make_logger: M) -> Result<(), SetLoggerError> - where M: FnOnce(MaxLogLevelFilter) -> Box { - unsafe { set_logger_raw(|max_level| mem::transmute(make_logger(max_level))) } -} - -/// Sets the global logger from a raw pointer. -/// -/// This function is similar to `set_logger` except that it is usable in -/// `no_std` code. -/// -/// The `make_logger` closure is passed a `MaxLogLevel` object, which the -/// logger should use to keep the global maximum log level in sync with the -/// highest log level that the logger will not ignore. -/// -/// This function may only be called once in the lifetime of a program. Any log -/// events that occur before the call to `set_logger_raw` completes will be -/// ignored. -/// -/// This function does not typically need to be called manually. Logger -/// implementations should provide an initialization method that calls -/// `set_logger_raw` internally. -/// -/// # Safety -/// -/// The pointer returned by `make_logger` must remain valid for the entire -/// duration of the program or until `shutdown_logger_raw` is called. In -/// addition, `shutdown_logger` *must not* be called after this function. -pub unsafe fn set_logger_raw(make_logger: M) -> Result<(), SetLoggerError> - where M: FnOnce(MaxLogLevelFilter) -> *const Log { - static ADAPTOR: LoggerAdaptor = LoggerAdaptor; - match log::set_logger(&ADAPTOR) { - Ok(()) => { - LOGGER = make_logger(MaxLogLevelFilter(())); - STATE.store(INITIALIZED, Ordering::SeqCst); - Ok(()) - } - Err(_) => Err(SetLoggerError(())), - } -} - -/// Shuts down the global logger. -/// -/// This function may only be called once in the lifetime of a program, and may -/// not be called before `set_logger`. Once the global logger has been shut -/// down, it can no longer be re-initialized by `set_logger`. Any log events -/// that occur after the call to `shutdown_logger` completes will be ignored. -/// -/// The logger that was originally created by the call to to `set_logger` is -/// returned on success. At that point it is guaranteed that no other threads -/// are concurrently accessing the logger object. -#[cfg(feature = "use_std")] -pub fn shutdown_logger() -> Result, ShutdownLoggerError> { - shutdown_logger_raw().map(|l| unsafe { mem::transmute(l) }) -} - -/// Shuts down the global logger. -/// -/// This function is similar to `shutdown_logger` except that it is usable in -/// `no_std` code. -/// -/// This function may only be called once in the lifetime of a program, and may -/// not be called before `set_logger_raw`. Once the global logger has been shut -/// down, it can no longer be re-initialized by `set_logger_raw`. Any log -/// events that occur after the call to `shutdown_logger_raw` completes will be -/// ignored. -/// -/// The pointer that was originally passed to `set_logger_raw` is returned on -/// success. At that point it is guaranteed that no other threads are -/// concurrently accessing the logger object. -pub fn shutdown_logger_raw() -> Result<*const Log, ShutdownLoggerError> { - // Set to INITIALIZING to prevent re-initialization after - if STATE.compare_and_swap(INITIALIZED, INITIALIZING, - Ordering::SeqCst) != INITIALIZED { - return Err(ShutdownLoggerError(())); - } - - while REFCOUNT.load(Ordering::SeqCst) != 0 { - // FIXME add a sleep here when it doesn't involve timers - } - - unsafe { - let logger = LOGGER; - LOGGER = &NopLogger; - Ok(logger) - } -} - -/// The type returned by `set_logger` if `set_logger` has already been called. -#[allow(missing_copy_implementations)] -#[derive(Debug)] -pub struct SetLoggerError(()); - -impl fmt::Display for SetLoggerError { - fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { - write!(fmt, "attempted to set a logger after the logging system \ - was already initialized") - } -} - -// The Error trait is not available in libcore -#[cfg(feature = "use_std")] -impl error::Error for SetLoggerError { - fn description(&self) -> &str { "set_logger() called multiple times" } -} - -/// The type returned by `shutdown_logger_raw` if `shutdown_logger_raw` has -/// already been called or if `set_logger_raw` has not been called yet. -#[allow(missing_copy_implementations)] -#[derive(Debug)] -pub struct ShutdownLoggerError(()); - -impl fmt::Display for ShutdownLoggerError { - fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { - write!(fmt, "attempted to shut down the logger without an active logger") - } -} - -// The Error trait is not available in libcore -#[cfg(feature = "use_std")] -impl error::Error for ShutdownLoggerError { - fn description(&self) -> &str { "shutdown_logger() called without an active logger" } -} - -/// Deprecated -/// -/// Use https://crates.io/crates/log-panics instead. -#[cfg(all(feature = "nightly", feature = "use_std"))] -pub fn log_panics() { - std::panic::set_hook(Box::new(panic::log)); -} - -// inner module so that the reporting module is log::panic instead of log -#[cfg(all(feature = "nightly", feature = "use_std"))] -mod panic { - use std::panic::PanicInfo; - use std::thread; - - pub fn log(info: &PanicInfo) { - let thread = thread::current(); - let thread = thread.name().unwrap_or(""); - - let msg = match info.payload().downcast_ref::<&'static str>() { - Some(s) => *s, - None => match info.payload().downcast_ref::() { - Some(s) => &s[..], - None => "Box", - } - }; - - match info.location() { - Some(location) => { - error!("thread '{}' panicked at '{}': {}:{}", - thread, - msg, - location.file(), - location.line()) - } - None => error!("thread '{}' panicked at '{}'", thread, msg), - } - } -} - -struct LoggerGuard(&'static Log); - -impl Drop for LoggerGuard { - fn drop(&mut self) { - REFCOUNT.fetch_sub(1, Ordering::SeqCst); - } -} - -impl Deref for LoggerGuard { - type Target = Log; - - fn deref(&self) -> &(Log + 'static) { - self.0 - } -} - -fn logger() -> Option { - REFCOUNT.fetch_add(1, Ordering::SeqCst); - if STATE.load(Ordering::SeqCst) != INITIALIZED { - REFCOUNT.fetch_sub(1, Ordering::SeqCst); - None - } else { - Some(LoggerGuard(unsafe { &*LOGGER })) - } -} - -struct LoggerAdaptor; - -impl log::Log for LoggerAdaptor { - fn log(&self, record: &log::Record) { - if let Some(logger) = logger() { - let record = LogRecord { - metadata: LogMetadata { - level: LogLevel::from_new(record.level()), - target: record.target(), - }, - // file and module path aren't static in 0.4 so we can't forward them. - location: &LogLocation { - __file: "", - __line: record.line().unwrap_or(0), - __module_path: "", - }, - args: *record.args(), - }; - logger.log(&record); - } - } - - fn enabled(&self, metadata: &log::Metadata) -> bool { - match logger() { - Some(logger) => { - let metadata = LogMetadata { - level: LogLevel::from_new(metadata.level()), - target: metadata.target(), - }; - logger.enabled(&metadata) - } - None => false - } - } - - fn flush(&self) {} -} - -// WARNING -// This is not considered part of the crate's public API. It is subject to -// change at any time. -#[doc(hidden)] -pub fn __enabled(level: LogLevel, target: &str) -> bool { - match logger() { - Some(logger) => { - let metadata = LogMetadata { - level: level, - target: target, - }; - logger.enabled(&metadata) - } - None => { - log::Log::enabled( - log::logger(), - &log::Metadata::builder() - .level(level.to_new()) - .target(target) - .build() - ) - } - } -} - -// WARNING -// This is not considered part of the crate's public API. It is subject to -// change at any time. -#[doc(hidden)] -pub fn __log(level: LogLevel, target: &str, loc: &LogLocation, - args: fmt::Arguments) { - match logger() { - Some(logger) => { - let record = LogRecord { - metadata: LogMetadata { - level: level, - target: target, - }, - location: loc, - args: args, - }; - logger.log(&record); - } - None => { - log::Log::log( - log::logger(), - &log::Record::builder() - .level(level.to_new()) - .target(target) - .file(Some(loc.__file)) - .line(Some(loc.__line)) - .module_path(Some(loc.__module_path)) - .args(args) - .build() - ) - } - } -} - -// WARNING -// This is not considered part of the crate's public API. It is subject to -// change at any time. -#[inline(always)] -#[doc(hidden)] -pub fn __static_max_level() -> LogLevelFilter { - LogLevelFilter::from_new(log::STATIC_MAX_LEVEL) -} - -#[cfg(test)] -mod tests { - extern crate std; - use tests::std::string::ToString; - use super::{LogLevel, LogLevelFilter}; - - #[test] - fn test_loglevelfilter_from_str() { - let tests = [ - ("off", Ok(LogLevelFilter::Off)), - ("error", Ok(LogLevelFilter::Error)), - ("warn", Ok(LogLevelFilter::Warn)), - ("info", Ok(LogLevelFilter::Info)), - ("debug", Ok(LogLevelFilter::Debug)), - ("trace", Ok(LogLevelFilter::Trace)), - ("OFF", Ok(LogLevelFilter::Off)), - ("ERROR", Ok(LogLevelFilter::Error)), - ("WARN", Ok(LogLevelFilter::Warn)), - ("INFO", Ok(LogLevelFilter::Info)), - ("DEBUG", Ok(LogLevelFilter::Debug)), - ("TRACE", Ok(LogLevelFilter::Trace)), - ("asdf", Err(())), - ]; - for &(s, ref expected) in &tests { - assert_eq!(expected, &s.parse()); - } - } - - #[test] - fn test_loglevel_from_str() { - let tests = [ - ("OFF", Err(())), - ("error", Ok(LogLevel::Error)), - ("warn", Ok(LogLevel::Warn)), - ("info", Ok(LogLevel::Info)), - ("debug", Ok(LogLevel::Debug)), - ("trace", Ok(LogLevel::Trace)), - ("ERROR", Ok(LogLevel::Error)), - ("WARN", Ok(LogLevel::Warn)), - ("INFO", Ok(LogLevel::Info)), - ("DEBUG", Ok(LogLevel::Debug)), - ("TRACE", Ok(LogLevel::Trace)), - ("asdf", Err(())), - ]; - for &(s, ref expected) in &tests { - assert_eq!(expected, &s.parse()); - } - } - - #[test] - fn test_loglevel_show() { - assert_eq!("INFO", LogLevel::Info.to_string()); - assert_eq!("ERROR", LogLevel::Error.to_string()); - } - - #[test] - fn test_loglevelfilter_show() { - assert_eq!("OFF", LogLevelFilter::Off.to_string()); - assert_eq!("ERROR", LogLevelFilter::Error.to_string()); - } - - #[test] - fn test_cross_cmp() { - assert!(LogLevel::Debug > LogLevelFilter::Error); - assert!(LogLevelFilter::Warn < LogLevel::Trace); - assert!(LogLevelFilter::Off < LogLevel::Error); - } - - #[test] - fn test_cross_eq() { - assert!(LogLevel::Error == LogLevelFilter::Error); - assert!(LogLevelFilter::Off != LogLevel::Error); - assert!(LogLevel::Trace == LogLevelFilter::Trace); - } - - #[test] - fn test_to_log_level() { - assert_eq!(Some(LogLevel::Error), LogLevelFilter::Error.to_log_level()); - assert_eq!(None, LogLevelFilter::Off.to_log_level()); - assert_eq!(Some(LogLevel::Debug), LogLevelFilter::Debug.to_log_level()); - } - - #[test] - fn test_to_log_level_filter() { - assert_eq!(LogLevelFilter::Error, LogLevel::Error.to_log_level_filter()); - assert_eq!(LogLevelFilter::Trace, LogLevel::Trace.to_log_level_filter()); - } - - #[test] - #[cfg(feature = "use_std")] - fn test_error_trait() { - use std::error::Error; - use super::SetLoggerError; - let e = SetLoggerError(()); - assert_eq!(e.description(), "set_logger() called multiple times"); - } -} diff --git a/third_party/rust/log-0.3.9/src/macros.rs b/third_party/rust/log-0.3.9/src/macros.rs deleted file mode 100644 index 7037f9ac0d..0000000000 --- a/third_party/rust/log-0.3.9/src/macros.rs +++ /dev/null @@ -1,155 +0,0 @@ -// Copyright 2014-2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. -/// The standard logging macro. -/// -/// This macro will generically log with the specified `LogLevel` and `format!` -/// based argument list. -/// -/// The `max_level_*` features can be used to statically disable logging at -/// various levels. -#[macro_export] -macro_rules! log { - (target: $target:expr, $lvl:expr, $($arg:tt)+) => ({ - static _LOC: $crate::LogLocation = $crate::LogLocation { - __line: line!(), - __file: file!(), - __module_path: module_path!(), - }; - let lvl = $lvl; - if lvl <= $crate::__static_max_level() && lvl <= $crate::max_log_level() { - $crate::__log(lvl, $target, &_LOC, format_args!($($arg)+)) - } - }); - ($lvl:expr, $($arg:tt)+) => (log!(target: module_path!(), $lvl, $($arg)+)) -} - -/// Logs a message at the error level. -/// -/// Logging at this level is disabled if the `max_level_off` feature is present. -#[macro_export] -macro_rules! error { - (target: $target:expr, $($arg:tt)*) => ( - log!(target: $target, $crate::LogLevel::Error, $($arg)*); - ); - ($($arg:tt)*) => ( - log!($crate::LogLevel::Error, $($arg)*); - ) -} - -/// Logs a message at the warn level. -/// -/// Logging at this level is disabled if any of the following features are -/// present: `max_level_off` or `max_level_error`. -/// -/// When building in release mode (i.e., without the `debug_assertions` option), -/// logging at this level is also disabled if any of the following features are -/// present: `release_max_level_off` or `max_level_error`. -#[macro_export] -macro_rules! warn { - (target: $target:expr, $($arg:tt)*) => ( - log!(target: $target, $crate::LogLevel::Warn, $($arg)*); - ); - ($($arg:tt)*) => ( - log!($crate::LogLevel::Warn, $($arg)*); - ) -} - -/// Logs a message at the info level. -/// -/// Logging at this level is disabled if any of the following features are -/// present: `max_level_off`, `max_level_error`, or `max_level_warn`. -/// -/// When building in release mode (i.e., without the `debug_assertions` option), -/// logging at this level is also disabled if any of the following features are -/// present: `release_max_level_off`, `release_max_level_error`, or -/// `release_max_level_warn`. -#[macro_export] -macro_rules! info { - (target: $target:expr, $($arg:tt)*) => ( - log!(target: $target, $crate::LogLevel::Info, $($arg)*); - ); - ($($arg:tt)*) => ( - log!($crate::LogLevel::Info, $($arg)*); - ) -} - -/// Logs a message at the debug level. -/// -/// Logging at this level is disabled if any of the following features are -/// present: `max_level_off`, `max_level_error`, `max_level_warn`, or -/// `max_level_info`. -/// -/// When building in release mode (i.e., without the `debug_assertions` option), -/// logging at this level is also disabled if any of the following features are -/// present: `release_max_level_off`, `release_max_level_error`, -/// `release_max_level_warn`, or `release_max_level_info`. -#[macro_export] -macro_rules! debug { - (target: $target:expr, $($arg:tt)*) => ( - log!(target: $target, $crate::LogLevel::Debug, $($arg)*); - ); - ($($arg:tt)*) => ( - log!($crate::LogLevel::Debug, $($arg)*); - ) -} - -/// Logs a message at the trace level. -/// -/// Logging at this level is disabled if any of the following features are -/// present: `max_level_off`, `max_level_error`, `max_level_warn`, -/// `max_level_info`, or `max_level_debug`. -/// -/// When building in release mode (i.e., without the `debug_assertions` option), -/// logging at this level is also disabled if any of the following features are -/// present: `release_max_level_off`, `release_max_level_error`, -/// `release_max_level_warn`, `release_max_level_info`, or -/// `release_max_level_debug`. -#[macro_export] -macro_rules! trace { - (target: $target:expr, $($arg:tt)*) => ( - log!(target: $target, $crate::LogLevel::Trace, $($arg)*); - ); - ($($arg:tt)*) => ( - log!($crate::LogLevel::Trace, $($arg)*); - ) -} - -/// Determines if a message logged at the specified level in that module will -/// be logged. -/// -/// This can be used to avoid expensive computation of log message arguments if -/// the message would be ignored anyway. -/// -/// # Examples -/// -/// ```rust -/// # #[macro_use] -/// # extern crate log; -/// use log::LogLevel::Debug; -/// -/// # fn foo() { -/// if log_enabled!(Debug) { -/// let data = expensive_call(); -/// debug!("expensive debug data: {} {}", data.x, data.y); -/// } -/// # } -/// # struct Data { x: u32, y: u32 } -/// # fn expensive_call() -> Data { Data { x: 0, y: 0 } } -/// # fn main() {} -/// ``` -#[macro_export] -macro_rules! log_enabled { - (target: $target:expr, $lvl:expr) => ({ - let lvl = $lvl; - lvl <= $crate::__static_max_level() && lvl <= $crate::max_log_level() && - $crate::__enabled(lvl, $target) - }); - ($lvl:expr) => (log_enabled!(target: module_path!(), $lvl)) -} diff --git a/third_party/rust/log/.cargo-checksum.json b/third_party/rust/log/.cargo-checksum.json index 31dc666b23..c9cef5d2c5 100644 --- a/third_party/rust/log/.cargo-checksum.json +++ b/third_party/rust/log/.cargo-checksum.json @@ -1 +1 @@ -{"files":{"CHANGELOG.md":"817c711701c9925f2494c5a2e09c679efd4026e121395b0167e722eb827eb2bf","Cargo.toml":"86f094aa5d1cea5b682852ec8a80a3cb954fc3d5267bd201e2e956d89b17f4aa","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"6485b8ed310d3f0340bf1ad1f47645069ce4069dcc6bb46c7d5c6faf41de1fdb","README.md":"ebfbd97d184c4474295e36e30e45779bb40bc7c07610734058dfa272a922927e","appveyor.yml":"d5376a881aef6dc3bed6a6b51f1f1c3801b88fc77a12895f6953190256e965bc","src/lib.rs":"43acb02c1d3440235e464b6ab51e52824a7d604390e278d098054e76e686b798","src/macros.rs":"728a5309f35c84895ff1935c712f57464e2239dddb09b6fd84ff289a66c34b46","src/serde.rs":"a5ae50d6f5ac538a5ca57ee58031a2582afa6989c741574584756632a4131ba7","tests/filters.rs":"6aee024d4594b4fde772e35c5d3318e5aa5d9aa4eaeafb04e4c7c922be4ac837"},"package":"c84ec4b527950aa83a329754b01dbe3f58361d1c5efacd1f6d68c494d08a17c6"} \ No newline at end of file +{"files":{"CHANGELOG.md":"4a384a648f3c5f9deed5463bbcdb92cf95dfe86d755767c0c21140d0539c1243","Cargo.toml":"cde739d8e087cc2515bc06d8ce327648606575e6dd51b74fb0ffd6a329381ba1","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"6485b8ed310d3f0340bf1ad1f47645069ce4069dcc6bb46c7d5c6faf41de1fdb","README.md":"0a6436eede658249802b63f1e693b28c9f80a2da2697bb80a2677e6c253c3a7d","build.rs":"16a49ad8a5b11a610382f904c72435ce3201b0807c584128c7e61fbb59c76b09","src/kv/error.rs":"296f6af53062a4526bb75eb4dbf955c75aed3b6332c260dc5bd7f0bc68c2d8c7","src/kv/key.rs":"68077f9ad149b28ccdeacd9e95c44c292b68d7fa0823aac5e022e2f3df6120e6","src/kv/mod.rs":"3397573933689b11ca5ad526193166c1e4f665e5692c38cd8fdb63a5aa61f7bf","src/kv/source.rs":"3783ac96b54e24fe6525f9e3cec91425800267f842704d947fea27ee344025a2","src/kv/value/impls.rs":"c8d4cb4e746e7b5a6e1f1993145c2b5309ac556f7ffc757fb80bb10b89bfa40d","src/kv/value/internal.rs":"b7e7d94d018d0207dfb30350a2ce916dc124c48c2e056e7637c159132c707a59","src/kv/value/mod.rs":"a4732af6fb9b3fad8ddf9dd5f9da90689eb9023984ff1f455f871ed32fde4aef","src/kv/value/test.rs":"99e0b87dd8822e7f148a3acfd1dd050718e2aee3ecd9314849076bf043a0a3e9","src/lib.rs":"c594c4a71bff2118e120c9c64e1e2e866dc76489af007965b6c67e57bf1a1797","src/macros.rs":"68cbe1a49a3ac846d6b80d291f427a70dbad0f57dac8beecac7449723293bd99","src/serde.rs":"4677851fba9d43adcddeb94c416af9e06eb70d4fb86e2840256e1edfc4638fef","tests/filters.rs":"90cae6653ba6dc3a462bab2a0b96cb66d5d21882492118bfd6999c0baff1b0e5"},"package":"14b6052be84e6b71ab17edffc2eeabf5c2c3ae1fdb464aae35ac50c67a44e1f7"} \ No newline at end of file diff --git a/third_party/rust/log/CHANGELOG.md b/third_party/rust/log/CHANGELOG.md index edc161783f..e04562b155 100644 --- a/third_party/rust/log/CHANGELOG.md +++ b/third_party/rust/log/CHANGELOG.md @@ -2,6 +2,26 @@ ## [Unreleased] +## [0.4.8] - 2019-07-28 + +### New + +* Support attempting to get `Record` fields as static strings. + +## [0.4.7] - 2019-07-06 + +### New + +* Support for embedded environments with thread-unsafe initialization. +* Initial unstable support for capturing structured data under the `kv_unstable` +feature gate. This new API doesn't affect existing users and may change in future +patches (so those changes may not appear in the changelog until it stabilizes). + +### Improved + +* Docs for using `log` with the 2018 edition. +* Error messages for macros missing arguments. + ## [0.4.6] - 2018-10-27 ### Improved @@ -112,7 +132,9 @@ version using log 0.4.x to avoid losing module and file information. Look at the [release tags] for information about older releases. -[Unreleased]: https://github.com/rust-lang-nursery/log/compare/0.4.6...HEAD +[Unreleased]: https://github.com/rust-lang-nursery/log/compare/0.4.8...HEAD +[0.4.8]: https://github.com/rust-lang-nursery/log/compare/0.4.7...0.4.8 +[0.4.7]: https://github.com/rust-lang-nursery/log/compare/0.4.6...0.4.7 [0.4.6]: https://github.com/rust-lang-nursery/log/compare/0.4.5...0.4.6 [0.4.5]: https://github.com/rust-lang-nursery/log/compare/0.4.4...0.4.5 [0.4.4]: https://github.com/rust-lang-nursery/log/compare/0.4.3...0.4.4 diff --git a/third_party/rust/log/Cargo.toml b/third_party/rust/log/Cargo.toml index c589af93b5..146060f8fa 100644 --- a/third_party/rust/log/Cargo.toml +++ b/third_party/rust/log/Cargo.toml @@ -12,18 +12,19 @@ [package] name = "log" -version = "0.4.6" +version = "0.4.8" authors = ["The Rust Project Developers"] +build = "build.rs" +exclude = ["rfcs/**/*", "/.travis.yml", "/appveyor.yml"] description = "A lightweight logging facade for Rust\n" -homepage = "https://github.com/rust-lang/log" documentation = "https://docs.rs/log" readme = "README.md" keywords = ["logging"] categories = ["development-tools::debugging"] -license = "MIT/Apache-2.0" +license = "MIT OR Apache-2.0" repository = "https://github.com/rust-lang/log" [package.metadata.docs.rs] -features = ["std", "serde"] +features = ["std", "serde", "kv_unstable_sval"] [[test]] name = "filters" @@ -35,10 +36,21 @@ version = "0.1.2" version = "1.0" optional = true default-features = false + +[dependencies.sval] +version = "0.4.2" +optional = true +default-features = false [dev-dependencies.serde_test] version = "1.0" +[dev-dependencies.sval] +version = "0.4.2" +features = ["test"] + [features] +kv_unstable = [] +kv_unstable_sval = ["kv_unstable", "sval/fmt"] max_level_debug = [] max_level_error = [] max_level_info = [] diff --git a/third_party/rust/log/README.md b/third_party/rust/log/README.md index b7884ad834..5fbcc97012 100644 --- a/third_party/rust/log/README.md +++ b/third_party/rust/log/README.md @@ -3,7 +3,7 @@ log A Rust library providing a lightweight logging *facade*. -[![Build Status](https://travis-ci.org/rust-lang-nursery/log.svg?branch=master)](https://travis-ci.org/rust-lang-nursery/log) +[![Build Status](https://travis-ci.com/rust-lang-nursery/log.svg?branch=master)](https://travis-ci.com/rust-lang-nursery/log) [![Build status](https://ci.appveyor.com/api/projects/status/nopdjmmjt45xcrki?svg=true)](https://ci.appveyor.com/project/alexcrichton/log) [![Latest version](https://img.shields.io/crates/v/log.svg)](https://crates.io/crates/log) [![Documentation](https://docs.rs/log/badge.svg)](https://docs.rs/log) @@ -16,6 +16,13 @@ logging implementation. Libraries can use the logging API provided by this crate, and the consumer of those libraries can choose the logging implementation that is most suitable for its use case. + +## Minimum supported `rustc` + +`1.16.0+` + +This version is explicitly tested in CI and may be bumped in any release as needed. Maintaining compatibility with older compilers is a priority though, so the bar for bumping the minimum supported version is set very high. Any changes to the supported minimum version will be called out in the release notes. + ## Usage ## In libraries @@ -29,8 +36,7 @@ log = "0.4" ``` ```rust -#[macro_use] -extern crate log; +use log::{info, trace, warn}; pub fn shave_the_yak(yak: &mut Yak) { trace!("Commencing yak shaving"); @@ -50,19 +56,9 @@ pub fn shave_the_yak(yak: &mut Yak) { } ``` -If you use Rust 2018, you can use instead the following code to import the crate macros: - -```rust -use log::{info, trace, warn}; - -pub fn shave_the_yak(yak: &mut Yak) { - // … -} -``` - ## In executables -In order to produce log output executables have to use a logger implementation compatible with the facade. +In order to produce log output, executables have to use a logger implementation compatible with the facade. There are many available implementations to chose from, here are some of the most popular ones: * Simple minimal loggers: @@ -79,6 +75,8 @@ There are many available implementations to chose from, here are some of the mos * [`syslog`](https://docs.rs/syslog/*/syslog/) * [`slog-stdlog`](https://docs.rs/slog-stdlog/*/slog_stdlog/) * [`android_log`](https://docs.rs/android_log/*/android_log/) +* For WebAssembly binaries: + * [`console_log`](https://docs.rs/console_log/*/console_log/) Executables should choose a logger implementation and initialize it early in the runtime of the program. Logger implementations will typically include a diff --git a/third_party/rust/log/appveyor.yml b/third_party/rust/log/appveyor.yml deleted file mode 100644 index eb6786dd11..0000000000 --- a/third_party/rust/log/appveyor.yml +++ /dev/null @@ -1,19 +0,0 @@ -environment: - matrix: - - TARGET: x86_64-pc-windows-msvc - - TARGET: i686-pc-windows-msvc - - TARGET: i686-pc-windows-gnu -install: - - ps: Start-FileDownload "https://static.rust-lang.org/dist/rust-nightly-${env:TARGET}.exe" - - rust-nightly-%TARGET%.exe /VERYSILENT /NORESTART /DIR="C:\Program Files (x86)\Rust" - - SET PATH=%PATH%;C:\Program Files (x86)\Rust\bin - - SET PATH=%PATH%;C:\MinGW\bin - - rustc -V - - cargo -V - -build: false - -test_script: - - cargo test --verbose - - cargo test --verbose --features serde - - cargo test --verbose --features std diff --git a/third_party/rust/log/build.rs b/third_party/rust/log/build.rs new file mode 100644 index 0000000000..6717bf0f70 --- /dev/null +++ b/third_party/rust/log/build.rs @@ -0,0 +1,14 @@ +//! This build script detects target platforms that lack proper support for +//! atomics and sets `cfg` flags accordingly. + +use std::env; + +fn main() { + let target = env::var("TARGET").unwrap(); + + if !target.starts_with("thumbv6") { + println!("cargo:rustc-cfg=atomic_cas"); + } + + println!("cargo:rerun-if-changed=build.rs"); +} diff --git a/third_party/rust/log/src/kv/error.rs b/third_party/rust/log/src/kv/error.rs new file mode 100644 index 0000000000..0f5652f923 --- /dev/null +++ b/third_party/rust/log/src/kv/error.rs @@ -0,0 +1,88 @@ +use std::fmt; + +/// An error encountered while working with structured data. +#[derive(Debug)] +pub struct Error { + inner: Inner +} + +#[derive(Debug)] +enum Inner { + #[cfg(feature = "std")] + Boxed(std_support::BoxedError), + Msg(&'static str), + Fmt, +} + +impl Error { + /// Create an error from a message. + pub fn msg(msg: &'static str) -> Self { + Error { + inner: Inner::Msg(msg), + } + } +} + +impl fmt::Display for Error { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + use self::Inner::*; + match &self.inner { + #[cfg(feature = "std")] + &Boxed(ref err) => err.fmt(f), + &Msg(ref msg) => msg.fmt(f), + &Fmt => fmt::Error.fmt(f), + } + } +} + +impl From for Error { + fn from(_: fmt::Error) -> Self { + Error { + inner: Inner::Fmt, + } + } +} + +impl From for fmt::Error { + fn from(_: Error) -> Self { + fmt::Error + } +} + +#[cfg(feature = "std")] +mod std_support { + use super::*; + use std::{error, io}; + + pub(super) type BoxedError = Box; + + impl Error { + /// Create an error from a standard error type. + pub fn boxed(err: E) -> Self + where + E: Into, + { + Error { + inner: Inner::Boxed(err.into()) + } + } + } + + impl error::Error for Error { + fn description(&self) -> &str { + "key values error" + } + } + + impl From for Error { + fn from(err: io::Error) -> Self { + Error::boxed(err) + } + } + + impl From for io::Error { + fn from(err: Error) -> Self { + io::Error::new(io::ErrorKind::Other, err) + } + } +} diff --git a/third_party/rust/log/src/kv/key.rs b/third_party/rust/log/src/kv/key.rs new file mode 100644 index 0000000000..82f1e85e62 --- /dev/null +++ b/third_party/rust/log/src/kv/key.rs @@ -0,0 +1,143 @@ +//! Structured keys. + +use std::fmt; +use std::cmp; +use std::hash; +use std::borrow::Borrow; + +/// A type that can be converted into a [`Key`](struct.Key.html). +pub trait ToKey { + /// Perform the conversion. + fn to_key(&self) -> Key; +} + +impl<'a, T> ToKey for &'a T +where + T: ToKey + ?Sized, +{ + fn to_key(&self) -> Key { + (**self).to_key() + } +} + +impl<'k> ToKey for Key<'k> { + fn to_key(&self) -> Key { + Key { + key: self.key, + } + } +} + +impl ToKey for str { + fn to_key(&self) -> Key { + Key::from_str(self) + } +} + +/// A key in a structured key-value pair. +#[derive(Clone)] +pub struct Key<'k> { + key: &'k str, +} + +impl<'k> Key<'k> { + /// Get a key from a borrowed string. + pub fn from_str(key: &'k str) -> Self { + Key { + key: key, + } + } + + /// Get a borrowed string from this key. + pub fn as_str(&self) -> &str { + self.key + } +} + +impl<'k> fmt::Debug for Key<'k> { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + self.key.fmt(f) + } +} + +impl<'k> fmt::Display for Key<'k> { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + self.key.fmt(f) + } +} + +impl<'k> hash::Hash for Key<'k> { + fn hash(&self, state: &mut H) + where + H: hash::Hasher, + { + self.as_str().hash(state) + } +} + +impl<'k, 'ko> PartialEq> for Key<'k> { + fn eq(&self, other: &Key<'ko>) -> bool { + self.as_str().eq(other.as_str()) + } +} + +impl<'k> Eq for Key<'k> {} + +impl<'k, 'ko> PartialOrd> for Key<'k> { + fn partial_cmp(&self, other: &Key<'ko>) -> Option { + self.as_str().partial_cmp(other.as_str()) + } +} + +impl<'k> Ord for Key<'k> { + fn cmp(&self, other: &Self) -> cmp::Ordering { + self.as_str().cmp(other.as_str()) + } +} + +impl<'k> AsRef for Key<'k> { + fn as_ref(&self) -> &str { + self.as_str() + } +} + +impl<'k> Borrow for Key<'k> { + fn borrow(&self) -> &str { + self.as_str() + } +} + +impl<'k> From<&'k str> for Key<'k> { + fn from(s: &'k str) -> Self { + Key::from_str(s) + } +} + +#[cfg(feature = "std")] +mod std_support { + use super::*; + + use std::borrow::Cow; + + impl ToKey for String { + fn to_key(&self) -> Key { + Key::from_str(self) + } + } + + impl<'a> ToKey for Cow<'a, str> { + fn to_key(&self) -> Key { + Key::from_str(self) + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn key_from_string() { + assert_eq!("a key", Key::from_str("a key").as_str()); + } +} diff --git a/third_party/rust/log/src/kv/mod.rs b/third_party/rust/log/src/kv/mod.rs new file mode 100644 index 0000000000..17226b4cf6 --- /dev/null +++ b/third_party/rust/log/src/kv/mod.rs @@ -0,0 +1,23 @@ +//! **UNSTABLE:** Structured key-value pairs. +//! +//! This module is unstable and breaking changes may be made +//! at any time. See [the tracking issue](https://github.com/rust-lang-nursery/log/issues/328) +//! for more details. +//! +//! Add the `kv_unstable` feature to your `Cargo.toml` to enable +//! this module: +//! +//! ```toml +//! [dependencies.log] +//! features = ["kv_unstable"] +//! ``` + +mod error; +mod source; +mod key; +pub mod value; + +pub use self::error::Error; +pub use self::source::{Source, Visitor}; +pub use self::key::{Key, ToKey}; +pub use self::value::{Value, ToValue}; diff --git a/third_party/rust/log/src/kv/source.rs b/third_party/rust/log/src/kv/source.rs new file mode 100644 index 0000000000..0538c14f47 --- /dev/null +++ b/third_party/rust/log/src/kv/source.rs @@ -0,0 +1,396 @@ +//! Sources for key-value pairs. + +use std::fmt; +use kv::{Error, Key, ToKey, Value, ToValue}; + +/// A source of key-value pairs. +/// +/// The source may be a single pair, a set of pairs, or a filter over a set of pairs. +/// Use the [`Visitor`](trait.Visitor.html) trait to inspect the structured data +/// in a source. +pub trait Source { + /// Visit key-value pairs. + /// + /// A source doesn't have to guarantee any ordering or uniqueness of key-value pairs. + /// If the given visitor returns an error then the source may early-return with it, + /// even if there are more key-value pairs. + /// + /// # Implementation notes + /// + /// A source should yield the same key-value pairs to a subsequent visitor unless + /// that visitor itself fails. + fn visit<'kvs>(&'kvs self, visitor: &mut Visitor<'kvs>) -> Result<(), Error>; + + /// Get the value for a given key. + /// + /// If the key appears multiple times in the source then which key is returned + /// is implementation specific. + /// + /// # Implementation notes + /// + /// A source that can provide a more efficient implementation of this method + /// should override it. + fn get<'v>(&'v self, key: Key) -> Option> { + struct Get<'k, 'v> { + key: Key<'k>, + found: Option>, + } + + impl<'k, 'kvs> Visitor<'kvs> for Get<'k, 'kvs> { + fn visit_pair(&mut self, key: Key<'kvs>, value: Value<'kvs>) -> Result<(), Error> { + if self.key == key { + self.found = Some(value); + } + + Ok(()) + } + } + + let mut get = Get { + key, + found: None, + }; + + let _ = self.visit(&mut get); + get.found + } + + /// Count the number of key-value pairs that can be visited. + /// + /// # Implementation notes + /// + /// A source that knows the number of key-value pairs upfront may provide a more + /// efficient implementation. + /// + /// A subsequent call to `visit` should yield the same number of key-value pairs + /// to the visitor, unless that visitor fails part way through. + fn count(&self) -> usize { + struct Count(usize); + + impl<'kvs> Visitor<'kvs> for Count { + fn visit_pair(&mut self, _: Key<'kvs>, _: Value<'kvs>) -> Result<(), Error> { + self.0 += 1; + + Ok(()) + } + } + + let mut count = Count(0); + let _ = self.visit(&mut count); + count.0 + } +} + +impl<'a, T> Source for &'a T +where + T: Source + ?Sized, +{ + fn visit<'kvs>(&'kvs self, visitor: &mut Visitor<'kvs>) -> Result<(), Error> { + Source::visit(&**self, visitor) + } + + fn get<'v>(&'v self, key: Key) -> Option> { + Source::get(&**self, key) + } + + fn count(&self) -> usize { + Source::count(&**self) + } +} + +impl Source for (K, V) +where + K: ToKey, + V: ToValue, +{ + fn visit<'kvs>(&'kvs self, visitor: &mut Visitor<'kvs>) -> Result<(), Error> { + visitor.visit_pair(self.0.to_key(), self.1.to_value()) + } + + fn get<'v>(&'v self, key: Key) -> Option> { + if self.0.to_key() == key { + Some(self.1.to_value()) + } else { + None + } + } + + fn count(&self) -> usize { + 1 + } +} + +impl Source for [S] +where + S: Source, +{ + fn visit<'kvs>(&'kvs self, visitor: &mut Visitor<'kvs>) -> Result<(), Error> { + for source in self { + source.visit(visitor)?; + } + + Ok(()) + } + + fn count(&self) -> usize { + self.len() + } +} + +impl Source for Option +where + S: Source, +{ + fn visit<'kvs>(&'kvs self, visitor: &mut Visitor<'kvs>) -> Result<(), Error> { + if let Some(ref source) = *self { + source.visit(visitor)?; + } + + Ok(()) + } + + fn count(&self) -> usize { + self.as_ref().map(Source::count).unwrap_or(0) + } +} + +/// A visitor for the key-value pairs in a [`Source`](trait.Source.html). +pub trait Visitor<'kvs> { + /// Visit a key-value pair. + fn visit_pair(&mut self, key: Key<'kvs>, value: Value<'kvs>) -> Result<(), Error>; +} + +impl<'a, 'kvs, T> Visitor<'kvs> for &'a mut T +where + T: Visitor<'kvs> + ?Sized, +{ + fn visit_pair(&mut self, key: Key<'kvs>, value: Value<'kvs>) -> Result<(), Error> { + (**self).visit_pair(key, value) + } +} + +impl<'a, 'b: 'a, 'kvs> Visitor<'kvs> for fmt::DebugMap<'a, 'b> { + fn visit_pair(&mut self, key: Key<'kvs>, value: Value<'kvs>) -> Result<(), Error> { + self.entry(&key, &value); + Ok(()) + } +} + +impl<'a, 'b: 'a, 'kvs> Visitor<'kvs> for fmt::DebugList<'a, 'b> { + fn visit_pair(&mut self, key: Key<'kvs>, value: Value<'kvs>) -> Result<(), Error> { + self.entry(&(key, value)); + Ok(()) + } +} + +impl<'a, 'b: 'a, 'kvs> Visitor<'kvs> for fmt::DebugSet<'a, 'b> { + fn visit_pair(&mut self, key: Key<'kvs>, value: Value<'kvs>) -> Result<(), Error> { + self.entry(&(key, value)); + Ok(()) + } +} + +impl<'a, 'b: 'a, 'kvs> Visitor<'kvs> for fmt::DebugTuple<'a, 'b> { + fn visit_pair(&mut self, key: Key<'kvs>, value: Value<'kvs>) -> Result<(), Error> { + self.field(&key); + self.field(&value); + Ok(()) + } +} + +#[cfg(feature = "std")] +mod std_support { + use super::*; + use std::borrow::Borrow; + use std::collections::{BTreeMap, HashMap}; + use std::hash::{BuildHasher, Hash}; + + impl Source for Box + where + S: Source + ?Sized, + { + fn visit<'kvs>(&'kvs self, visitor: &mut Visitor<'kvs>) -> Result<(), Error> { + Source::visit(&**self, visitor) + } + + fn get<'v>(&'v self, key: Key) -> Option> { + Source::get(&**self, key) + } + + fn count(&self) -> usize { + Source::count(&**self) + } + } + + impl Source for Vec + where + S: Source, + { + fn visit<'kvs>(&'kvs self, visitor: &mut Visitor<'kvs>) -> Result<(), Error> { + Source::visit(&**self, visitor) + } + + fn get<'v>(&'v self, key: Key) -> Option> { + Source::get(&**self, key) + } + + fn count(&self) -> usize { + Source::count(&**self) + } + } + + impl<'kvs, V> Visitor<'kvs> for Box + where + V: Visitor<'kvs> + ?Sized, + { + fn visit_pair(&mut self, key: Key<'kvs>, value: Value<'kvs>) -> Result<(), Error> { + (**self).visit_pair(key, value) + } + } + + impl Source for HashMap + where + K: ToKey + Borrow + Eq + Hash, + V: ToValue, + S: BuildHasher, + { + fn visit<'kvs>(&'kvs self, visitor: &mut Visitor<'kvs>) -> Result<(), Error> { + for (key, value) in self { + visitor.visit_pair(key.to_key(), value.to_value())?; + } + Ok(()) + } + + fn get<'v>(&'v self, key: Key) -> Option> { + HashMap::get(self, key.as_str()).map(|v| v.to_value()) + } + + fn count(&self) -> usize { + self.len() + } + } + + impl Source for BTreeMap + where + K: ToKey + Borrow + Ord, + V: ToValue, + { + fn visit<'kvs>(&'kvs self, visitor: &mut Visitor<'kvs>) -> Result<(), Error> { + for (key, value) in self { + visitor.visit_pair(key.to_key(), value.to_value())?; + } + Ok(()) + } + + fn get<'v>(&'v self, key: Key) -> Option> { + BTreeMap::get(self, key.as_str()).map(|v| v.to_value()) + } + + fn count(&self) -> usize { + self.len() + } + } + + #[cfg(test)] + mod tests { + use super::*; + use kv::value::test::Token; + use std::collections::{BTreeMap, HashMap}; + + #[test] + fn count() { + assert_eq!(1, Source::count(&Box::new(("a", 1)))); + assert_eq!(2, Source::count(&vec![("a", 1), ("b", 2)])); + } + + #[test] + fn get() { + let source = vec![("a", 1), ("b", 2), ("a", 1)]; + assert_eq!( + Token::I64(1), + Source::get(&source, Key::from_str("a")).unwrap().to_token() + ); + + let source = Box::new(Option::None::<(&str, i32)>); + assert!(Source::get(&source, Key::from_str("a")).is_none()); + } + + #[test] + fn hash_map() { + let mut map = HashMap::new(); + map.insert("a", 1); + map.insert("b", 2); + + assert_eq!(2, Source::count(&map)); + assert_eq!( + Token::I64(1), + Source::get(&map, Key::from_str("a")).unwrap().to_token() + ); + } + + #[test] + fn btree_map() { + let mut map = BTreeMap::new(); + map.insert("a", 1); + map.insert("b", 2); + + assert_eq!(2, Source::count(&map)); + assert_eq!( + Token::I64(1), + Source::get(&map, Key::from_str("a")).unwrap().to_token() + ); + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use kv::value::test::Token; + + #[test] + fn source_is_object_safe() { + fn _check(_: &Source) {} + } + + #[test] + fn visitor_is_object_safe() { + fn _check(_: &Visitor) {} + } + + #[test] + fn count() { + struct OnePair { + key: &'static str, + value: i32, + } + + impl Source for OnePair { + fn visit<'kvs>(&'kvs self, visitor: &mut Visitor<'kvs>) -> Result<(), Error> { + visitor.visit_pair(self.key.to_key(), self.value.to_value()) + } + } + + assert_eq!(1, Source::count(&("a", 1))); + assert_eq!(2, Source::count(&[("a", 1), ("b", 2)] as &[_])); + assert_eq!(0, Source::count(&Option::None::<(&str, i32)>)); + assert_eq!(1, Source::count(&OnePair { key: "a", value: 1 })); + } + + #[test] + fn get() { + let source = &[("a", 1), ("b", 2), ("a", 1)] as &[_]; + assert_eq!( + Token::I64(1), + Source::get(source, Key::from_str("a")).unwrap().to_token() + ); + assert_eq!( + Token::I64(2), + Source::get(source, Key::from_str("b")).unwrap().to_token() + ); + assert!(Source::get(&source, Key::from_str("c")).is_none()); + + let source = Option::None::<(&str, i32)>; + assert!(Source::get(&source, Key::from_str("a")).is_none()); + } +} diff --git a/third_party/rust/log/src/kv/value/impls.rs b/third_party/rust/log/src/kv/value/impls.rs new file mode 100644 index 0000000000..ea181466b4 --- /dev/null +++ b/third_party/rust/log/src/kv/value/impls.rs @@ -0,0 +1,269 @@ +use std::fmt; + +use super::{ToValue, Value, Primitive}; + +impl ToValue for usize { + fn to_value(&self) -> Value { + Value::from(*self) + } +} + +impl<'v> From for Value<'v> { + fn from(value: usize) -> Self { + Value::from_primitive(Primitive::Unsigned(value as u64)) + } +} + +impl ToValue for isize { + fn to_value(&self) -> Value { + Value::from(*self) + } +} + +impl<'v> From for Value<'v> { + fn from(value: isize) -> Self { + Value::from_primitive(Primitive::Signed(value as i64)) + } +} + +impl ToValue for u8 { + fn to_value(&self) -> Value { + Value::from(*self) + } +} + +impl<'v> From for Value<'v> { + fn from(value: u8) -> Self { + Value::from_primitive(Primitive::Unsigned(value as u64)) + } +} + +impl ToValue for u16 { + fn to_value(&self) -> Value { + Value::from(*self) + } +} + +impl<'v> From for Value<'v> { + fn from(value: u16) -> Self { + Value::from_primitive(Primitive::Unsigned(value as u64)) + } +} + +impl ToValue for u32 { + fn to_value(&self) -> Value { + Value::from(*self) + } +} + +impl<'v> From for Value<'v> { + fn from(value: u32) -> Self { + Value::from_primitive(Primitive::Unsigned(value as u64)) + } +} + +impl ToValue for u64 { + fn to_value(&self) -> Value { + Value::from(*self) + } +} + +impl<'v> From for Value<'v> { + fn from(value: u64) -> Self { + Value::from_primitive(Primitive::Unsigned(value)) + } +} + +impl ToValue for i8 { + fn to_value(&self) -> Value { + Value::from(*self) + } +} + +impl<'v> From for Value<'v> { + fn from(value: i8) -> Self { + Value::from_primitive(Primitive::Signed(value as i64)) + } +} + +impl ToValue for i16 { + fn to_value(&self) -> Value { + Value::from(*self) + } +} + +impl<'v> From for Value<'v> { + fn from(value: i16) -> Self { + Value::from_primitive(Primitive::Signed(value as i64)) + } +} + +impl ToValue for i32 { + fn to_value(&self) -> Value { + Value::from(*self) + } +} + +impl<'v> From for Value<'v> { + fn from(value: i32) -> Self { + Value::from_primitive(Primitive::Signed(value as i64)) + } +} + +impl ToValue for i64 { + fn to_value(&self) -> Value { + Value::from(*self) + } +} + +impl<'v> From for Value<'v> { + fn from(value: i64) -> Self { + Value::from_primitive(Primitive::Signed(value)) + } +} + +impl ToValue for f32 { + fn to_value(&self) -> Value { + Value::from(*self) + } +} + +impl<'v> From for Value<'v> { + fn from(value: f32) -> Self { + Value::from_primitive(Primitive::Float(value as f64)) + } +} + +impl ToValue for f64 { + fn to_value(&self) -> Value { + Value::from(*self) + } +} + +impl<'v> From for Value<'v> { + fn from(value: f64) -> Self { + Value::from_primitive(Primitive::Float(value)) + } +} + +impl ToValue for bool { + fn to_value(&self) -> Value { + Value::from(*self) + } +} + +impl<'v> From for Value<'v> { + fn from(value: bool) -> Self { + Value::from_primitive(Primitive::Bool(value)) + } +} + +impl ToValue for char { + fn to_value(&self) -> Value { + Value::from(*self) + } +} + +impl<'v> From for Value<'v> { + fn from(value: char) -> Self { + Value::from_primitive(Primitive::Char(value)) + } +} + +impl<'v> ToValue for &'v str { + fn to_value(&self) -> Value { + Value::from(*self) + } +} + +impl<'v> From<&'v str> for Value<'v> { + fn from(value: &'v str) -> Self { + Value::from_primitive(Primitive::Str(value)) + } +} + +impl ToValue for () { + fn to_value(&self) -> Value { + Value::from_primitive(Primitive::None) + } +} + +impl ToValue for Option +where + T: ToValue, +{ + fn to_value(&self) -> Value { + match *self { + Some(ref value) => value.to_value(), + None => Value::from_primitive(Primitive::None), + } + } +} + +impl<'v> ToValue for fmt::Arguments<'v> { + fn to_value(&self) -> Value { + Value::from_debug(self) + } +} + +#[cfg(feature = "std")] +mod std_support { + use super::*; + + use std::borrow::Cow; + + impl ToValue for Box + where + T: ToValue + ?Sized, + { + fn to_value(&self) -> Value { + (**self).to_value() + } + } + + impl ToValue for String { + fn to_value(&self) -> Value { + Value::from_primitive(Primitive::Str(&*self)) + } + } + + impl<'v> ToValue for Cow<'v, str> { + fn to_value(&self) -> Value { + Value::from_primitive(Primitive::Str(&*self)) + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use kv::value::test::Token; + + #[test] + fn test_to_value_display() { + assert_eq!(42u64.to_value().to_string(), "42"); + assert_eq!(42i64.to_value().to_string(), "42"); + assert_eq!(42.01f64.to_value().to_string(), "42.01"); + assert_eq!(true.to_value().to_string(), "true"); + assert_eq!('a'.to_value().to_string(), "'a'"); + assert_eq!(format_args!("a {}", "value").to_value().to_string(), "a value"); + assert_eq!("a loong string".to_value().to_string(), "\"a loong string\""); + assert_eq!(Some(true).to_value().to_string(), "true"); + assert_eq!(().to_value().to_string(), "None"); + assert_eq!(Option::None::.to_value().to_string(), "None"); + } + + #[test] + fn test_to_value_structured() { + assert_eq!(42u64.to_value().to_token(), Token::U64(42)); + assert_eq!(42i64.to_value().to_token(), Token::I64(42)); + assert_eq!(42.01f64.to_value().to_token(), Token::F64(42.01)); + assert_eq!(true.to_value().to_token(), Token::Bool(true)); + assert_eq!('a'.to_value().to_token(), Token::Char('a')); + assert_eq!(format_args!("a {}", "value").to_value().to_token(), Token::Str("a value".into())); + assert_eq!("a loong string".to_value().to_token(), Token::Str("a loong string".into())); + assert_eq!(Some(true).to_value().to_token(), Token::Bool(true)); + assert_eq!(().to_value().to_token(), Token::None); + assert_eq!(Option::None::.to_value().to_token(), Token::None); + } +} diff --git a/third_party/rust/log/src/kv/value/internal.rs b/third_party/rust/log/src/kv/value/internal.rs new file mode 100644 index 0000000000..5f01a317b1 --- /dev/null +++ b/third_party/rust/log/src/kv/value/internal.rs @@ -0,0 +1,264 @@ +use std::fmt; + +use super::{Fill, Slot, Error}; +use kv; + +// `Visitor` is an internal API for visiting the structure of a value. +// It's not intended to be public (at this stage). + +/// A container for a structured value for a specific kind of visitor. +#[derive(Clone, Copy)] +pub(super) enum Inner<'v> { + /// A simple primitive value that can be copied without allocating. + Primitive(Primitive<'v>), + /// A value that can be filled. + Fill(&'v Fill), + /// A debuggable value. + Debug(&'v fmt::Debug), + /// A displayable value. + Display(&'v fmt::Display), + + #[cfg(feature = "kv_unstable_sval")] + /// A structured value from `sval`. + Sval(&'v sval_support::Value), +} + +impl<'v> Inner<'v> { + pub(super) fn visit(&self, visitor: &mut Visitor) -> Result<(), Error> { + match *self { + Inner::Primitive(value) => match value { + Primitive::Signed(value) => visitor.i64(value), + Primitive::Unsigned(value) => visitor.u64(value), + Primitive::Float(value) => visitor.f64(value), + Primitive::Bool(value) => visitor.bool(value), + Primitive::Char(value) => visitor.char(value), + Primitive::Str(value) => visitor.str(value), + Primitive::None => visitor.none(), + }, + Inner::Fill(value) => value.fill(&mut Slot::new(visitor)), + Inner::Debug(value) => visitor.debug(value), + Inner::Display(value) => visitor.display(value), + + #[cfg(feature = "kv_unstable_sval")] + Inner::Sval(value) => visitor.sval(value), + } + } +} + +/// The internal serialization contract. +pub(super) trait Visitor { + fn debug(&mut self, v: &fmt::Debug) -> Result<(), Error>; + fn display(&mut self, v: &fmt::Display) -> Result<(), Error> { + self.debug(&format_args!("{}", v)) + } + + fn u64(&mut self, v: u64) -> Result<(), Error>; + fn i64(&mut self, v: i64) -> Result<(), Error>; + fn f64(&mut self, v: f64) -> Result<(), Error>; + fn bool(&mut self, v: bool) -> Result<(), Error>; + fn char(&mut self, v: char) -> Result<(), Error>; + fn str(&mut self, v: &str) -> Result<(), Error>; + fn none(&mut self) -> Result<(), Error>; + + #[cfg(feature = "kv_unstable_sval")] + fn sval(&mut self, v: &sval_support::Value) -> Result<(), Error>; +} + +#[derive(Clone, Copy)] +pub(super) enum Primitive<'v> { + Signed(i64), + Unsigned(u64), + Float(f64), + Bool(bool), + Char(char), + Str(&'v str), + None, +} + +mod fmt_support { + use super::*; + + impl<'v> kv::Value<'v> { + /// Get a value from a debuggable type. + pub fn from_debug(value: &'v T) -> Self + where + T: fmt::Debug, + { + kv::Value { + inner: Inner::Debug(value), + } + } + + /// Get a value from a displayable type. + pub fn from_display(value: &'v T) -> Self + where + T: fmt::Display, + { + kv::Value { + inner: Inner::Display(value), + } + } + } + + impl<'v> fmt::Debug for kv::Value<'v> { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + self.visit(&mut FmtVisitor(f))?; + + Ok(()) + } + } + + impl<'v> fmt::Display for kv::Value<'v> { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + self.visit(&mut FmtVisitor(f))?; + + Ok(()) + } + } + + struct FmtVisitor<'a, 'b: 'a>(&'a mut fmt::Formatter<'b>); + + impl<'a, 'b: 'a> Visitor for FmtVisitor<'a, 'b> { + fn debug(&mut self, v: &fmt::Debug) -> Result<(), Error> { + v.fmt(self.0)?; + + Ok(()) + } + + fn u64(&mut self, v: u64) -> Result<(), Error> { + self.debug(&format_args!("{:?}", v)) + } + + fn i64(&mut self, v: i64) -> Result<(), Error> { + self.debug(&format_args!("{:?}", v)) + } + + fn f64(&mut self, v: f64) -> Result<(), Error> { + self.debug(&format_args!("{:?}", v)) + } + + fn bool(&mut self, v: bool) -> Result<(), Error> { + self.debug(&format_args!("{:?}", v)) + } + + fn char(&mut self, v: char) -> Result<(), Error> { + self.debug(&format_args!("{:?}", v)) + } + + fn str(&mut self, v: &str) -> Result<(), Error> { + self.debug(&format_args!("{:?}", v)) + } + + fn none(&mut self) -> Result<(), Error> { + self.debug(&format_args!("None")) + } + + #[cfg(feature = "kv_unstable_sval")] + fn sval(&mut self, v: &sval_support::Value) -> Result<(), Error> { + sval_support::fmt(self.0, v) + } + } +} + +#[cfg(feature = "kv_unstable_sval")] +pub(super) mod sval_support { + use super::*; + + extern crate sval; + + impl<'v> kv::Value<'v> { + /// Get a value from a structured type. + pub fn from_sval(value: &'v T) -> Self + where + T: sval::Value, + { + kv::Value { + inner: Inner::Sval(value), + } + } + } + + impl<'v> sval::Value for kv::Value<'v> { + fn stream(&self, s: &mut sval::value::Stream) -> sval::value::Result { + self.visit(&mut SvalVisitor(s)).map_err(Error::into_sval)?; + + Ok(()) + } + } + + pub(in kv::value) use self::sval::Value; + + pub(super) fn fmt(f: &mut fmt::Formatter, v: &sval::Value) -> Result<(), Error> { + sval::fmt::debug(f, v)?; + Ok(()) + } + + impl Error { + fn from_sval(_: sval::value::Error) -> Self { + Error::msg("`sval` serialization failed") + } + + fn into_sval(self) -> sval::value::Error { + sval::value::Error::msg("`sval` serialization failed") + } + } + + struct SvalVisitor<'a, 'b: 'a>(&'a mut sval::value::Stream<'b>); + + impl<'a, 'b: 'a> Visitor for SvalVisitor<'a, 'b> { + fn debug(&mut self, v: &fmt::Debug) -> Result<(), Error> { + self.0.fmt(format_args!("{:?}", v)).map_err(Error::from_sval) + } + + fn u64(&mut self, v: u64) -> Result<(), Error> { + self.0.u64(v).map_err(Error::from_sval) + } + + fn i64(&mut self, v: i64) -> Result<(), Error> { + self.0.i64(v).map_err(Error::from_sval) + } + + fn f64(&mut self, v: f64) -> Result<(), Error> { + self.0.f64(v).map_err(Error::from_sval) + } + + fn bool(&mut self, v: bool) -> Result<(), Error> { + self.0.bool(v).map_err(Error::from_sval) + } + + fn char(&mut self, v: char) -> Result<(), Error> { + self.0.char(v).map_err(Error::from_sval) + } + + fn str(&mut self, v: &str) -> Result<(), Error> { + self.0.str(v).map_err(Error::from_sval) + } + + fn none(&mut self) -> Result<(), Error> { + self.0.none().map_err(Error::from_sval) + } + + fn sval(&mut self, v: &sval::Value) -> Result<(), Error> { + self.0.any(v).map_err(Error::from_sval) + } + } + + #[cfg(test)] + mod tests { + use super::*; + use kv::value::test::Token; + + #[test] + fn test_from_sval() { + assert_eq!(kv::Value::from_sval(&42u64).to_token(), Token::Sval); + } + + #[test] + fn test_sval_structured() { + let value = kv::Value::from(42u64); + let expected = vec![sval::test::Token::Unsigned(42)]; + + assert_eq!(sval::test::tokens(value), expected); + } + } +} diff --git a/third_party/rust/log/src/kv/value/mod.rs b/third_party/rust/log/src/kv/value/mod.rs new file mode 100644 index 0000000000..1695afbd16 --- /dev/null +++ b/third_party/rust/log/src/kv/value/mod.rs @@ -0,0 +1,155 @@ +//! Structured values. + +use std::fmt; + +mod internal; +mod impls; + +#[cfg(test)] +pub(in kv) mod test; + +pub use kv::Error; + +use self::internal::{Inner, Visitor, Primitive}; + +/// A type that can be converted into a [`Value`](struct.Value.html). +pub trait ToValue { + /// Perform the conversion. + fn to_value(&self) -> Value; +} + +impl<'a, T> ToValue for &'a T +where + T: ToValue + ?Sized, +{ + fn to_value(&self) -> Value { + (**self).to_value() + } +} + +impl<'v> ToValue for Value<'v> { + fn to_value(&self) -> Value { + Value { + inner: self.inner, + } + } +} + +/// A type that requires extra work to convert into a [`Value`](struct.Value.html). +/// +/// This trait is a more advanced initialization API than [`ToValue`](trait.ToValue.html). +/// It's intended for erased values coming from other logging frameworks that may need +/// to perform extra work to determine the concrete type to use. +pub trait Fill { + /// Fill a value. + fn fill(&self, slot: &mut Slot) -> Result<(), Error>; +} + +impl<'a, T> Fill for &'a T +where + T: Fill + ?Sized, +{ + fn fill(&self, slot: &mut Slot) -> Result<(), Error> { + (**self).fill(slot) + } +} + +/// A value slot to fill using the [`Fill`](trait.Fill.html) trait. +pub struct Slot<'a> { + filled: bool, + visitor: &'a mut Visitor, +} + +impl<'a> fmt::Debug for Slot<'a> { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct("Slot").finish() + } +} + +impl<'a> Slot<'a> { + fn new(visitor: &'a mut Visitor) -> Self { + Slot { + visitor, + filled: false, + } + } + + /// Fill the slot with a value. + /// + /// The given value doesn't need to satisfy any particular lifetime constraints. + /// + /// # Panics + /// + /// Calling `fill` more than once will panic. + pub fn fill(&mut self, value: Value) -> Result<(), Error> { + assert!(!self.filled, "the slot has already been filled"); + self.filled = true; + + value.visit(self.visitor) + } +} + +/// A value in a structured key-value pair. +pub struct Value<'v> { + inner: Inner<'v>, +} + +impl<'v> Value<'v> { + /// Get a value from an internal `Visit`. + fn from_primitive(value: Primitive<'v>) -> Self { + Value { + inner: Inner::Primitive(value), + } + } + + /// Get a value from a fillable slot. + pub fn from_fill(value: &'v T) -> Self + where + T: Fill, + { + Value { + inner: Inner::Fill(value), + } + } + + fn visit(&self, visitor: &mut Visitor) -> Result<(), Error> { + self.inner.visit(visitor) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn fill_value() { + struct TestFill; + + impl Fill for TestFill { + fn fill(&self, slot: &mut Slot) -> Result<(), Error> { + let dbg: &fmt::Debug = &1; + + slot.fill(Value::from_debug(&dbg)) + } + } + + assert_eq!("1", Value::from_fill(&TestFill).to_string()); + } + + #[test] + #[should_panic] + fn fill_multiple_times_panics() { + struct BadFill; + + impl Fill for BadFill { + fn fill(&self, slot: &mut Slot) -> Result<(), Error> { + slot.fill(42.into())?; + slot.fill(6789.into())?; + + Ok(()) + } + } + + let _ = Value::from_fill(&BadFill).to_string(); + } +} diff --git a/third_party/rust/log/src/kv/value/test.rs b/third_party/rust/log/src/kv/value/test.rs new file mode 100644 index 0000000000..c9c03dc420 --- /dev/null +++ b/third_party/rust/log/src/kv/value/test.rs @@ -0,0 +1,81 @@ +// Test support for inspecting Values + +use std::fmt; +use std::str; + +use super::{Value, Error}; +use super::internal; + +#[derive(Debug, PartialEq)] +pub(in kv) enum Token { + U64(u64), + I64(i64), + F64(f64), + Char(char), + Bool(bool), + Str(String), + None, + + #[cfg(feature = "kv_unstable_sval")] + Sval, +} + +#[cfg(test)] +impl<'v> Value<'v> { + pub(in kv) fn to_token(&self) -> Token { + struct TestVisitor(Option); + + impl internal::Visitor for TestVisitor { + fn debug(&mut self, v: &fmt::Debug) -> Result<(), Error> { + self.0 = Some(Token::Str(format!("{:?}", v))); + Ok(()) + } + + fn u64(&mut self, v: u64) -> Result<(), Error> { + self.0 = Some(Token::U64(v)); + Ok(()) + } + + fn i64(&mut self, v: i64) -> Result<(), Error> { + self.0 = Some(Token::I64(v)); + Ok(()) + } + + fn f64(&mut self, v: f64) -> Result<(), Error> { + self.0 = Some(Token::F64(v)); + Ok(()) + } + + fn bool(&mut self, v: bool) -> Result<(), Error> { + self.0 = Some(Token::Bool(v)); + Ok(()) + } + + fn char(&mut self, v: char) -> Result<(), Error> { + self.0 = Some(Token::Char(v)); + Ok(()) + } + + fn str(&mut self, v: &str) -> Result<(), Error> { + self.0 = Some(Token::Str(v.into())); + Ok(()) + } + + fn none(&mut self) -> Result<(), Error> { + self.0 = Some(Token::None); + Ok(()) + } + + #[cfg(feature = "kv_unstable_sval")] + fn sval(&mut self, _: &internal::sval_support::Value) -> Result<(), Error> { + self.0 = Some(Token::Sval); + Ok(()) + } + } + + let mut visitor = TestVisitor(None); + self.visit(&mut visitor).unwrap(); + + visitor.0.unwrap() + } +} diff --git a/third_party/rust/log/src/lib.rs b/third_party/rust/log/src/lib.rs index af4de3a7f8..c79219dc64 100644 --- a/third_party/rust/log/src/lib.rs +++ b/third_party/rust/log/src/lib.rs @@ -48,14 +48,12 @@ //! //! ### Examples //! -//! ```rust -//! # #![allow(unstable)] -//! #[macro_use] -//! extern crate log; -//! +//! ```edition2018 //! # #[derive(Debug)] pub struct Yak(String); //! # impl Yak { fn shave(&mut self, _: u32) {} } //! # fn find_a_razor() -> Result { Ok(1) } +//! use log::{info, warn}; +//! //! pub fn shave_the_yak(yak: &mut Yak) { //! info!(target: "yak_events", "Commencing yak shaving for {:?}", yak); //! @@ -115,9 +113,7 @@ //! logs all messages at the [`Error`][level_link], [`Warn`][level_link] or //! [`Info`][level_link] levels to stdout: //! -//! ```rust -//! extern crate log; -//! +//! ```edition2018 //! use log::{Record, Level, Metadata}; //! //! struct SimpleLogger; @@ -150,8 +146,7 @@ //! provide a function that wraps a call to [`set_logger`] and //! [`set_max_level`], handling initialization of the logger: //! -//! ```rust -//! # extern crate log; +//! ```edition2018 //! # use log::{Level, Metadata}; //! # struct SimpleLogger; //! # impl log::Log for SimpleLogger { @@ -181,8 +176,7 @@ //! identical to `set_logger` except that it takes a `Box` rather than a //! `&'static Log`: //! -//! ```rust -//! # extern crate log; +//! ```edition2018 //! # use log::{Level, LevelFilter, Log, SetLoggerError, Metadata}; //! # struct SimpleLogger; //! # impl log::Log for SimpleLogger { @@ -221,6 +215,9 @@ //! These features control the value of the `STATIC_MAX_LEVEL` constant. The logging macros check //! this value before logging a message. By default, no levels are disabled. //! +//! Libraries should avoid using the max level features because they're global and can't be changed +//! once they're set. +//! //! For example, a crate can disable trace level logs in debug builds and trace, debug, and info //! level logs in release builds with the following configuration: //! @@ -270,17 +267,17 @@ #![doc( html_logo_url = "https://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png", html_favicon_url = "https://www.rust-lang.org/favicon.ico", - html_root_url = "https://docs.rs/log/0.4.6" + html_root_url = "https://docs.rs/log/0.4.8" )] #![warn(missing_docs)] #![deny(missing_debug_implementations)] -#![cfg_attr(not(feature = "std"), no_std)] +#![cfg_attr(all(not(feature = "std"), not(test)), no_std)] // When compiled for the rustc compiler itself we want to make sure that this is // an unstable crate #![cfg_attr(rustbuild, feature(staged_api, rustc_private))] #![cfg_attr(rustbuild, unstable(feature = "rustc_private", issue = "27812"))] -#[cfg(not(feature = "std"))] +#[cfg(all(not(feature = "std"), not(test)))] extern crate core as std; #[macro_use] @@ -292,15 +289,26 @@ use std::error; use std::fmt; use std::mem; use std::str::FromStr; -use std::sync::atomic::{AtomicUsize, Ordering, ATOMIC_USIZE_INIT}; +use std::sync::atomic::{AtomicUsize, Ordering}; + +// FIXME: ATOMIC_USIZE_INIT was deprecated in rust 1.34. Silence the +// deprecation warning until our MSRV >= 1.24, where we can use the +// replacement const fn `AtomicUsize::new` +#[allow(deprecated)] +use std::sync::atomic::ATOMIC_USIZE_INIT; #[macro_use] mod macros; mod serde; +#[cfg(feature = "kv_unstable")] +pub mod kv; + // The LOGGER static holds a pointer to the global logger. It is protected by // the STATE static which determines whether LOGGER has been initialized yet. static mut LOGGER: &'static Log = &NopLogger; + +#[allow(deprecated)] static STATE: AtomicUsize = ATOMIC_USIZE_INIT; // There are three different states that we care about: the logger's @@ -310,6 +318,7 @@ const UNINITIALIZED: usize = 0; const INITIALIZING: usize = 1; const INITIALIZED: usize = 2; +#[allow(deprecated)] static MAX_LOG_LEVEL_FILTER: AtomicUsize = ATOMIC_USIZE_INIT; static LOG_LEVEL_NAMES: [&'static str; 6] = ["OFF", "ERROR", "WARN", "INFO", "DEBUG", "TRACE"]; @@ -629,7 +638,7 @@ impl FromStr for LevelFilter { impl fmt::Display for LevelFilter { fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { - write!(fmt, "{}", LOG_LEVEL_NAMES[*self as usize]) + fmt.pad(LOG_LEVEL_NAMES[*self as usize]) } } @@ -660,6 +669,22 @@ impl LevelFilter { } } +#[derive(Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash, Debug)] +enum MaybeStaticStr<'a> { + Static(&'static str), + Borrowed(&'a str), +} + +impl<'a> MaybeStaticStr<'a> { + #[inline] + fn get(&self) -> &'a str { + match *self { + MaybeStaticStr::Static(s) => s, + MaybeStaticStr::Borrowed(s) => s, + } + } +} + /// The "payload" of a log message. /// /// # Use @@ -678,8 +703,7 @@ impl LevelFilter { /// The following example shows a simple logger that displays the level, /// module path, and message of any `Record` that is passed to it. /// -/// ```rust -/// # extern crate log; +/// ```edition2018 /// struct SimpleLogger; /// /// impl log::Log for SimpleLogger { @@ -710,9 +734,28 @@ impl LevelFilter { pub struct Record<'a> { metadata: Metadata<'a>, args: fmt::Arguments<'a>, - module_path: Option<&'a str>, - file: Option<&'a str>, + module_path: Option>, + file: Option>, line: Option, + #[cfg(feature = "kv_unstable")] + key_values: KeyValues<'a>, +} + +// This wrapper type is only needed so we can +// `#[derive(Debug)]` on `Record`. It also +// provides a useful `Debug` implementation for +// the underlying `Source`. +#[cfg(feature = "kv_unstable")] +#[derive(Clone)] +struct KeyValues<'a>(&'a kv::Source); + +#[cfg(feature = "kv_unstable")] +impl<'a> fmt::Debug for KeyValues<'a> { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let mut visitor = f.debug_map(); + self.0.visit(&mut visitor)?; + visitor.finish() + } } impl<'a> Record<'a> { @@ -749,13 +792,31 @@ impl<'a> Record<'a> { /// The module path of the message. #[inline] pub fn module_path(&self) -> Option<&'a str> { - self.module_path + self.module_path.map(|s| s.get()) + } + + /// The module path of the message, if it is a `'static` string. + #[inline] + pub fn module_path_static(&self) -> Option<&'static str> { + match self.module_path { + Some(MaybeStaticStr::Static(s)) => Some(s), + _ => None, + } } /// The source file containing the message. #[inline] pub fn file(&self) -> Option<&'a str> { - self.file + self.file.map(|s| s.get()) + } + + /// The module path of the message, if it is a `'static` string. + #[inline] + pub fn file_static(&self) -> Option<&'static str> { + match self.file { + Some(MaybeStaticStr::Static(s)) => Some(s), + _ => None, + } } /// The line containing the message. @@ -763,6 +824,32 @@ impl<'a> Record<'a> { pub fn line(&self) -> Option { self.line } + + /// The structued key-value pairs associated with the message. + #[cfg(feature = "kv_unstable")] + #[inline] + pub fn key_values(&self) -> &kv::Source { + self.key_values.0 + } + + /// Create a new [`Builder`](struct.Builder.html) based on this record. + #[cfg(feature = "kv_unstable")] + #[inline] + pub fn to_builder(&self) -> RecordBuilder { + RecordBuilder { + record: Record { + metadata: Metadata { + level: self.metadata.level, + target: self.metadata.target, + }, + args: self.args, + module_path: self.module_path, + file: self.file, + line: self.line, + key_values: self.key_values.clone(), + } + } + } } /// Builder for [`Record`](struct.Record.html). @@ -774,7 +861,7 @@ impl<'a> Record<'a> { /// # Examples /// /// -/// ```rust +/// ```edition2018 /// use log::{Level, Record}; /// /// let record = Record::builder() @@ -789,7 +876,7 @@ impl<'a> Record<'a> { /// /// Alternatively, use [`MetadataBuilder`](struct.MetadataBuilder.html): /// -/// ```rust +/// ```edition2018 /// use log::{Record, Level, MetadataBuilder}; /// /// let error_metadata = MetadataBuilder::new() @@ -825,15 +912,28 @@ impl<'a> RecordBuilder<'a> { /// [`Metadata::builder().build()`]: struct.MetadataBuilder.html#method.build #[inline] pub fn new() -> RecordBuilder<'a> { - RecordBuilder { + #[cfg(feature = "kv_unstable")] + return RecordBuilder { record: Record { args: format_args!(""), metadata: Metadata::builder().build(), module_path: None, file: None, line: None, + key_values: KeyValues(&Option::None::<(kv::Key, kv::Value)>), }, - } + }; + + #[cfg(not(feature = "kv_unstable"))] + return RecordBuilder { + record: Record { + args: format_args!(""), + metadata: Metadata::builder().build(), + module_path: None, + file: None, + line: None, + }, + }; } /// Set [`args`](struct.Record.html#method.args). @@ -867,14 +967,28 @@ impl<'a> RecordBuilder<'a> { /// Set [`module_path`](struct.Record.html#method.module_path) #[inline] pub fn module_path(&mut self, path: Option<&'a str>) -> &mut RecordBuilder<'a> { - self.record.module_path = path; + self.record.module_path = path.map(MaybeStaticStr::Borrowed); + self + } + + /// Set [`module_path`](struct.Record.html#method.module_path) to a `'static` string + #[inline] + pub fn module_path_static(&mut self, path: Option<&'static str>) -> &mut RecordBuilder<'a> { + self.record.module_path = path.map(MaybeStaticStr::Static); self } /// Set [`file`](struct.Record.html#method.file) #[inline] pub fn file(&mut self, file: Option<&'a str>) -> &mut RecordBuilder<'a> { - self.record.file = file; + self.record.file = file.map(MaybeStaticStr::Borrowed); + self + } + + /// Set [`file`](struct.Record.html#method.file) to a `'static` string. + #[inline] + pub fn file_static(&mut self, file: Option<&'static str>) -> &mut RecordBuilder<'a> { + self.record.file = file.map(MaybeStaticStr::Static); self } @@ -885,6 +999,14 @@ impl<'a> RecordBuilder<'a> { self } + /// Set [`key_values`](struct.Record.html#method.key_values) + #[cfg(feature = "kv_unstable")] + #[inline] + pub fn key_values(&mut self, kvs: &'a kv::Source) -> &mut RecordBuilder<'a> { + self.record.key_values = KeyValues(kvs); + self + } + /// Invoke the builder and return a `Record` #[inline] pub fn build(&self) -> Record<'a> { @@ -910,10 +1032,7 @@ impl<'a> RecordBuilder<'a> { /// /// # Examples /// -/// ```rust -/// # #[macro_use] -/// # extern crate log; -/// # +/// ```edition2018 /// use log::{Record, Level, Metadata}; /// /// struct MyLogger; @@ -967,7 +1086,7 @@ impl<'a> Metadata<'a> { /// /// # Example /// -/// ```rust +/// ```edition2018 /// let target = "myApp"; /// use log::{Level, MetadataBuilder}; /// let metadata = MetadataBuilder::new() @@ -1090,7 +1209,7 @@ pub fn max_level() -> LevelFilter { /// An error is returned if a logger has already been set. /// /// [`set_logger`]: fn.set_logger.html -#[cfg(feature = "std")] +#[cfg(all(feature = "std", atomic_cas))] pub fn set_boxed_logger(logger: Box) -> Result<(), SetLoggerError> { set_logger_inner(|| unsafe { &*Box::into_raw(logger) }) } @@ -1104,17 +1223,21 @@ pub fn set_boxed_logger(logger: Box) -> Result<(), SetLoggerError> { /// implementations should provide an initialization method that installs the /// logger internally. /// +/// # Availability +/// +/// This method is available even when the `std` feature is disabled. However, +/// it is currently unavailable on `thumbv6` targets, which lack support for +/// some atomic operations which are used by this function. Even on those +/// targets, [`set_logger_racy`] will be available. +/// /// # Errors /// /// An error is returned if a logger has already been set. /// /// # Examples /// -/// ```rust -/// # #[macro_use] -/// # extern crate log; -/// # -/// use log::{Record, Level, Metadata, LevelFilter}; +/// ```edition2018 +/// use log::{error, info, warn, Record, Level, Metadata, LevelFilter}; /// /// static MY_LOGGER: MyLogger = MyLogger; /// @@ -1142,10 +1265,14 @@ pub fn set_boxed_logger(logger: Box) -> Result<(), SetLoggerError> { /// error!("oops"); /// # } /// ``` +/// +/// [`set_logger_racy`]: fn.set_logger_racy.html +#[cfg(atomic_cas)] pub fn set_logger(logger: &'static Log) -> Result<(), SetLoggerError> { set_logger_inner(|| logger) } +#[cfg(atomic_cas)] fn set_logger_inner(make_logger: F) -> Result<(), SetLoggerError> where F: FnOnce() -> &'static Log, @@ -1166,6 +1293,40 @@ where } } +/// A thread-unsafe version of [`set_logger`]. +/// +/// This function is available on all platforms, even those that do not have +/// support for atomics that is needed by [`set_logger`]. +/// +/// In almost all cases, [`set_logger`] should be preferred. +/// +/// # Safety +/// +/// This function is only safe to call when no other logger initialization +/// function is called while this function still executes. +/// +/// This can be upheld by (for example) making sure that **there are no other +/// threads**, and (on embedded) that **interrupts are disabled**. +/// +/// It is safe to use other logging functions while this function runs +/// (including all logging macros). +/// +/// [`set_logger`]: fn.set_logger.html +pub unsafe fn set_logger_racy(logger: &'static Log) -> Result<(), SetLoggerError> { + match STATE.load(Ordering::SeqCst) { + UNINITIALIZED => { + LOGGER = logger; + STATE.store(INITIALIZED, Ordering::SeqCst); + Ok(()) + } + INITIALIZING => { + // This is just plain UB, since we were racing another initialization function + unreachable!("set_logger_racy must not be used with other initialization functions") + } + _ => Err(SetLoggerError(())), + } +} + /// The type returned by [`set_logger`] if [`set_logger`] has already been called. /// /// [`set_logger`]: fn.set_logger.html @@ -1227,15 +1388,15 @@ pub fn logger() -> &'static Log { pub fn __private_api_log( args: fmt::Arguments, level: Level, - &(target, module_path, file, line): &(&str, &str, &str, u32), + &(target, module_path, file, line): &(&str, &'static str, &'static str, u32), ) { logger().log( &Record::builder() .args(args) .level(level) .target(target) - .module_path(Some(module_path)) - .file(Some(file)) + .module_path_static(Some(module_path)) + .file_static(Some(file)) .line(Some(line)) .build(), ); @@ -1466,4 +1627,42 @@ mod tests { assert_eq!(record_test.file(), Some("bar")); assert_eq!(record_test.line(), Some(30)); } + + #[test] + #[cfg(feature = "kv_unstable")] + fn test_record_key_values_builder() { + use super::Record; + use kv::{self, Visitor}; + + struct TestVisitor { + seen_pairs: usize, + } + + impl<'kvs> Visitor<'kvs> for TestVisitor { + fn visit_pair( + &mut self, + _: kv::Key<'kvs>, + _: kv::Value<'kvs> + ) -> Result<(), kv::Error> { + self.seen_pairs += 1; + Ok(()) + } + } + + let kvs: &[(&str, i32)] = &[ + ("a", 1), + ("b", 2) + ]; + let record_test = Record::builder() + .key_values(&kvs) + .build(); + + let mut visitor = TestVisitor { + seen_pairs: 0, + }; + + record_test.key_values().visit(&mut visitor).unwrap(); + + assert_eq!(2, visitor.seen_pairs); + } } diff --git a/third_party/rust/log/src/macros.rs b/third_party/rust/log/src/macros.rs index 06ce65a576..bb40b71072 100644 --- a/third_party/rust/log/src/macros.rs +++ b/third_party/rust/log/src/macros.rs @@ -15,10 +15,8 @@ /// /// # Examples /// -/// ```rust -/// # #[macro_use] -/// # extern crate log; -/// use log::Level; +/// ```edition2018 +/// use log::{log, Level}; /// /// # fn main() { /// let data = (42, "Forty-two"); @@ -48,9 +46,9 @@ macro_rules! log { /// /// # Examples /// -/// ```rust -/// # #[macro_use] -/// # extern crate log; +/// ```edition2018 +/// use log::error; +/// /// # fn main() { /// let (err_info, port) = ("No connection", 22); /// @@ -60,11 +58,11 @@ macro_rules! log { /// ``` #[macro_export(local_inner_macros)] macro_rules! error { - (target: $target:expr, $($arg:tt)*) => ( - log!(target: $target, $crate::Level::Error, $($arg)*); + (target: $target:expr, $($arg:tt)+) => ( + log!(target: $target, $crate::Level::Error, $($arg)+); ); - ($($arg:tt)*) => ( - log!($crate::Level::Error, $($arg)*); + ($($arg:tt)+) => ( + log!($crate::Level::Error, $($arg)+); ) } @@ -72,9 +70,9 @@ macro_rules! error { /// /// # Examples /// -/// ```rust -/// # #[macro_use] -/// # extern crate log; +/// ```edition2018 +/// use log::warn; +/// /// # fn main() { /// let warn_description = "Invalid Input"; /// @@ -84,11 +82,11 @@ macro_rules! error { /// ``` #[macro_export(local_inner_macros)] macro_rules! warn { - (target: $target:expr, $($arg:tt)*) => ( - log!(target: $target, $crate::Level::Warn, $($arg)*); + (target: $target:expr, $($arg:tt)+) => ( + log!(target: $target, $crate::Level::Warn, $($arg)+); ); - ($($arg:tt)*) => ( - log!($crate::Level::Warn, $($arg)*); + ($($arg:tt)+) => ( + log!($crate::Level::Warn, $($arg)+); ) } @@ -96,9 +94,9 @@ macro_rules! warn { /// /// # Examples /// -/// ```rust -/// # #[macro_use] -/// # extern crate log; +/// ```edition2018 +/// use log::info; +/// /// # fn main() { /// # struct Connection { port: u32, speed: f32 } /// let conn_info = Connection { port: 40, speed: 3.20 }; @@ -110,11 +108,11 @@ macro_rules! warn { /// ``` #[macro_export(local_inner_macros)] macro_rules! info { - (target: $target:expr, $($arg:tt)*) => ( - log!(target: $target, $crate::Level::Info, $($arg)*); + (target: $target:expr, $($arg:tt)+) => ( + log!(target: $target, $crate::Level::Info, $($arg)+); ); - ($($arg:tt)*) => ( - log!($crate::Level::Info, $($arg)*); + ($($arg:tt)+) => ( + log!($crate::Level::Info, $($arg)+); ) } @@ -122,9 +120,9 @@ macro_rules! info { /// /// # Examples /// -/// ```rust -/// # #[macro_use] -/// # extern crate log; +/// ```edition2018 +/// use log::debug; +/// /// # fn main() { /// # struct Position { x: f32, y: f32 } /// let pos = Position { x: 3.234, y: -1.223 }; @@ -135,11 +133,11 @@ macro_rules! info { /// ``` #[macro_export(local_inner_macros)] macro_rules! debug { - (target: $target:expr, $($arg:tt)*) => ( - log!(target: $target, $crate::Level::Debug, $($arg)*); + (target: $target:expr, $($arg:tt)+) => ( + log!(target: $target, $crate::Level::Debug, $($arg)+); ); - ($($arg:tt)*) => ( - log!($crate::Level::Debug, $($arg)*); + ($($arg:tt)+) => ( + log!($crate::Level::Debug, $($arg)+); ) } @@ -147,9 +145,9 @@ macro_rules! debug { /// /// # Examples /// -/// ```rust -/// # #[macro_use] -/// # extern crate log; +/// ```edition2018 +/// use log::trace; +/// /// # fn main() { /// # struct Position { x: f32, y: f32 } /// let pos = Position { x: 3.234, y: -1.223 }; @@ -162,11 +160,11 @@ macro_rules! debug { /// ``` #[macro_export(local_inner_macros)] macro_rules! trace { - (target: $target:expr, $($arg:tt)*) => ( - log!(target: $target, $crate::Level::Trace, $($arg)*); + (target: $target:expr, $($arg:tt)+) => ( + log!(target: $target, $crate::Level::Trace, $($arg)+); ); - ($($arg:tt)*) => ( - log!($crate::Level::Trace, $($arg)*); + ($($arg:tt)+) => ( + log!($crate::Level::Trace, $($arg)+); ) } @@ -178,10 +176,9 @@ macro_rules! trace { /// /// # Examples /// -/// ```rust -/// # #[macro_use] -/// # extern crate log; +/// ```edition2018 /// use log::Level::Debug; +/// use log::{debug, log_enabled}; /// /// # fn foo() { /// if log_enabled!(Debug) { diff --git a/third_party/rust/log/src/serde.rs b/third_party/rust/log/src/serde.rs index 176f9f4112..efc9f14a40 100644 --- a/third_party/rust/log/src/serde.rs +++ b/third_party/rust/log/src/serde.rs @@ -1,9 +1,11 @@ #![cfg(feature = "serde")] extern crate serde; +use self::serde::de::{ + Deserialize, DeserializeSeed, Deserializer, EnumAccess, Error, Unexpected, VariantAccess, + Visitor, +}; use self::serde::ser::{Serialize, Serializer}; -use self::serde::de::{Deserialize, DeserializeSeed, Deserializer, Visitor, EnumAccess, - Unexpected, VariantAccess, Error}; use {Level, LevelFilter, LOG_LEVEL_NAMES}; diff --git a/third_party/rust/log/tests/filters.rs b/third_party/rust/log/tests/filters.rs index 84449ccfe8..e4d21a87fa 100644 --- a/third_party/rust/log/tests/filters.rs +++ b/third_party/rust/log/tests/filters.rs @@ -1,8 +1,8 @@ #[macro_use] extern crate log; +use log::{Level, LevelFilter, Log, Metadata, Record}; use std::sync::{Arc, Mutex}; -use log::{Level, LevelFilter, Log, Record, Metadata}; #[cfg(feature = "std")] use log::set_boxed_logger; @@ -30,7 +30,9 @@ impl Log for Logger { } fn main() { - let me = Arc::new(State { last_log: Mutex::new(None) }); + let me = Arc::new(State { + last_log: Mutex::new(None), + }); let a = me.clone(); set_boxed_logger(Box::new(Logger(me))).unwrap(); @@ -56,7 +58,11 @@ fn test(a: &State, filter: LevelFilter) { last(&a, t(Level::Trace, filter)); fn t(lvl: Level, filter: LevelFilter) -> Option { - if lvl <= filter { Some(lvl) } else { None } + if lvl <= filter { + Some(lvl) + } else { + None + } } } diff --git a/third_party/rust/mime/.cargo-checksum.json b/third_party/rust/mime/.cargo-checksum.json index ce2ac48140..220a471312 100644 --- a/third_party/rust/mime/.cargo-checksum.json +++ b/third_party/rust/mime/.cargo-checksum.json @@ -1 +1 @@ -{"files":{"CONTRIBUTING.md":"7a8f1d12eb98bd09c290d31f25b03c71ff78027d9fc468e8782efa7dd3e69f1c","Cargo.toml":"0392e7aaa8950e923802ba61c27e95817f78cf7913a2df9eecf1f602e3428c1b","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"df9cfd06d8a44d9a671eadd39ffd97f166481da015a30f45dfd27886209c5922","README.md":"4ac32f1d6d7e1ac9f89f0a6d7d0cbc26f20ef9defdc7b206ef3a77616f493bbf","benches/cmp.rs":"9deb7c222eb69e7c5160aa82d361d4883792be3b557fbf8f7c807b398ba951a1","benches/fmt.rs":"46ec1e7c7970a3eed84b303309a2395ac16d16534ea691db7f361d0016ef0673","benches/parse.rs":"af2b35fc314e39c7fb3fbe6a77b65e54d0f4bd8956950330700028a98513b7d8","src/lib.rs":"3157308653b79f181dbe5c3c04a5efea5b59644908b7c5237fa8d3b06a11fe5f","src/parse.rs":"ab8d8ba0d4a3748844dc09da05167966637c70a97254d1c14cf23271c93277d7"},"package":"3e27ca21f40a310bd06d9031785f4801710d566c184a6e15bad4f1d9b65f9425"} \ No newline at end of file +{"files":{"CONTRIBUTING.md":"7a8f1d12eb98bd09c290d31f25b03c71ff78027d9fc468e8782efa7dd3e69f1c","Cargo.toml":"75e36b40187c8edad0baae326a0903b6b462f1acd0d68102a8e4f006b8802041","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"df9cfd06d8a44d9a671eadd39ffd97f166481da015a30f45dfd27886209c5922","README.md":"4ac32f1d6d7e1ac9f89f0a6d7d0cbc26f20ef9defdc7b206ef3a77616f493bbf","benches/cmp.rs":"9deb7c222eb69e7c5160aa82d361d4883792be3b557fbf8f7c807b398ba951a1","benches/fmt.rs":"46ec1e7c7970a3eed84b303309a2395ac16d16534ea691db7f361d0016ef0673","benches/parse.rs":"af2b35fc314e39c7fb3fbe6a77b65e54d0f4bd8956950330700028a98513b7d8","src/lib.rs":"c848e55a49ae4ed6451e94c8c120451b5031ba2ab87170ed389eeb4731679446","src/parse.rs":"cfe11f611901a581245b091942bb28ef2eec57645b981e1699d247f11c9e6fe3"},"package":"2a60c7ce501c71e03a9c9c0d35b861413ae925bd979cc7a4e30d060069aaac8d"} \ No newline at end of file diff --git a/third_party/rust/mime/Cargo.toml b/third_party/rust/mime/Cargo.toml index 8c46fca9ed..1f34190e7e 100644 --- a/third_party/rust/mime/Cargo.toml +++ b/third_party/rust/mime/Cargo.toml @@ -3,7 +3,7 @@ # When uploading crates to the registry Cargo will automatically # "normalize" Cargo.toml files for maximal compatibility # with all versions of Cargo and also rewrite `path` dependencies -# to registry (e.g. crates.io) dependencies +# to registry (e.g., crates.io) dependencies # # If you believe there's an error in this file please file an # issue against the rust-lang/cargo repository. If you're @@ -12,12 +12,12 @@ [package] name = "mime" -version = "0.3.13" +version = "0.3.16" authors = ["Sean McArthur "] description = "Strongly Typed Mimes" documentation = "https://docs.rs/mime" keywords = ["mime", "media-extensions", "media-types"] license = "MIT/Apache-2.0" repository = "https://github.com/hyperium/mime" -[dependencies.unicase] -version = "2.0" + +[dependencies] diff --git a/third_party/rust/mime/src/lib.rs b/third_party/rust/mime/src/lib.rs index 6a12857ff6..1f24fb1cf0 100644 --- a/third_party/rust/mime/src/lib.rs +++ b/third_party/rust/mime/src/lib.rs @@ -23,14 +23,12 @@ //! } //! ``` -#![doc(html_root_url = "https://docs.rs/mime/0.3.13")] +#![doc(html_root_url = "https://docs.rs/mime/0.3.16")] #![deny(warnings)] #![deny(missing_docs)] #![deny(missing_debug_implementations)] -extern crate unicase; - use std::cmp::Ordering; use std::error::Error; use std::fmt; @@ -75,15 +73,23 @@ pub struct FromStrError { inner: parse::ParseError, } -impl Error for FromStrError { - fn description(&self) -> &str { - "an error occurred while parsing a MIME type" +impl FromStrError { + fn s(&self) -> &str { + "mime parse error" } } impl fmt::Display for FromStrError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "{}: {}", self.description(), self.inner) + write!(f, "{}: {}", self.s(), self.inner) + } +} + +impl Error for FromStrError { + // Minimum Rust is 1.15, Error::description was still required then + #[allow(deprecated)] + fn description(&self) -> &str { + self.s() } } @@ -206,6 +212,15 @@ impl Mime { Params(inner) } + /// Return a `&str` of the Mime's ["essence"][essence]. + /// + /// [essence]: https://mimesniff.spec.whatwg.org/#mime-type-essence + pub fn essence_str(&self) -> &str { + let end = self.semicolon().unwrap_or(self.source.as_ref().len()); + + &self.source.as_ref()[..end] + } + #[cfg(test)] fn has_params(&self) -> bool { match self.params { @@ -233,24 +248,33 @@ impl Mime { // Mime ============ +fn eq_ascii(a: &str, b: &str) -> bool { + // str::eq_ignore_ascii_case didn't stabilize until Rust 1.23. + // So while our MSRV is 1.15, gotta import this trait. + #[allow(deprecated, unused)] + use std::ascii::AsciiExt; + + a.eq_ignore_ascii_case(b) +} + fn mime_eq_str(mime: &Mime, s: &str) -> bool { if let ParamSource::Utf8(semicolon) = mime.params { if mime.source.as_ref().len() == s.len() { - unicase::eq_ascii(mime.source.as_ref(), s) + eq_ascii(mime.source.as_ref(), s) } else { params_eq(semicolon, mime.source.as_ref(), s) } } else if let Some(semicolon) = mime.semicolon() { params_eq(semicolon, mime.source.as_ref(), s) } else { - unicase::eq_ascii(mime.source.as_ref(), s) + eq_ascii(mime.source.as_ref(), s) } } fn params_eq(semicolon: usize, a: &str, b: &str) -> bool { if b.len() < semicolon + 1 { false - } else if !unicase::eq_ascii(&a[..semicolon], &b[..semicolon]) { + } else if !eq_ascii(&a[..semicolon], &b[..semicolon]) { false } else { // gotta check for quotes, LWS, and for case senstive names @@ -280,7 +304,7 @@ fn params_eq(semicolon: usize, a: &str, b: &str) -> bool { #[allow(deprecated)] { b[..b_idx].trim_left() } }; - if !unicase::eq_ascii(a_name, b_name) { + if !eq_ascii(a_name, b_name) { return false; } sensitive = a_name != CHARSET; @@ -327,7 +351,7 @@ fn params_eq(semicolon: usize, a: &str, b: &str) -> bool { }; if sensitive { - if !unicase::eq_ascii(&a[..a_end], &b[..b_end]) { + if !eq_ascii(&a[..a_end], &b[..b_end]) { return false; } } else { @@ -349,7 +373,7 @@ impl PartialEq for Mime { // This could optimize for when there are no customs parameters. // Any parsed mime has already been lowercased, so if there aren't // any parameters that are case sensistive, this can skip the - // unicase::eq_ascii, and just use a memcmp instead. + // eq_ascii, and just use a memcmp instead. (0, _) | (_, 0) => mime_eq_str(self, other.source.as_ref()), (a, b) => a == b, @@ -424,7 +448,7 @@ impl fmt::Display for Mime { fn name_eq_str(name: &Name, s: &str) -> bool { if name.insensitive { - unicase::eq_ascii(name.source, s) + eq_ascii(name.source, s) } else { name.source == s } @@ -903,4 +927,11 @@ mod tests { assert_ne!(param, "abc"); assert_ne!("abc", param); } + + #[test] + fn test_essence_str() { + assert_eq!(TEXT_PLAIN.essence_str(), "text/plain"); + assert_eq!(TEXT_PLAIN_UTF_8.essence_str(), "text/plain"); + assert_eq!(IMAGE_SVG.essence_str(), "image/svg+xml"); + } } diff --git a/third_party/rust/mime/src/parse.rs b/third_party/rust/mime/src/parse.rs index 426537ae5b..d55e5494cb 100644 --- a/third_party/rust/mime/src/parse.rs +++ b/third_party/rust/mime/src/parse.rs @@ -18,8 +18,8 @@ pub enum ParseError { }, } -impl Error for ParseError { - fn description(&self) -> &str { +impl ParseError { + fn s(&self) -> &str { use self::ParseError::*; match *self { @@ -34,13 +34,21 @@ impl Error for ParseError { impl fmt::Display for ParseError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { if let ParseError::InvalidToken { pos, byte } = *self { - write!(f, "{}, {:X} at position {}", self.description(), byte, pos) + write!(f, "{}, {:X} at position {}", self.s(), byte, pos) } else { - f.write_str(self.description()) + f.write_str(self.s()) } } } +impl Error for ParseError { + // Minimum Rust is 1.15, Error::description was still required then + #[allow(deprecated)] + fn description(&self) -> &str { + self.s() + } +} + pub fn parse(s: &str) -> Result { if s == "*/*" { return Ok(::STAR_STAR); @@ -95,7 +103,7 @@ pub fn parse(s: &str) -> Result { } // params - let params = try!(params_from_str(s, &mut iter, start)); + let params = params_from_str(s, &mut iter, start)?; let src = match params { ParamSource::Utf8(_) => s.to_ascii_lowercase(), @@ -325,6 +333,7 @@ fn is_restricted_quoted_char(c: u8) -> bool { } #[test] +#[allow(warnings)] // ... ranges deprecated fn test_lookup_tables() { for (i, &valid) in TOKEN_MAP.iter().enumerate() { let i = i as u8; diff --git a/third_party/rust/mime_guess/.cargo-checksum.json b/third_party/rust/mime_guess/.cargo-checksum.json index 10673507ad..5652db3fcf 100644 --- a/third_party/rust/mime_guess/.cargo-checksum.json +++ b/third_party/rust/mime_guess/.cargo-checksum.json @@ -1 +1 @@ -{"files":{"Cargo.lock":"cc465f0deff8c0a8505addc685cfc82f4121446cfec6e781284cbd34ddba4079","Cargo.toml":"c2390b459898eaad40c1c726cb0c7665d1a3abc0ae26693ce4b3fddb117510fc","LICENSE":"6919f1acec82afc721be2d9907b993267f433a44d25d8aedf1003b5f59ebfd46","README.md":"f238798faadf016145e6718fdb2ad1e9607471f10707424135a63c015ac80f5a","benches/benchmark.rs":"2287c7233cf78a9af98b2b53cd20e221e4a785209bd70a87030f1ade34a02507","build.rs":"8e47254c0a927eeb243058cfc0da413f34a958226707c941e8634e4514562f04","examples/rev_map.rs":"0bab6cc20b9eace741c6cc4a8c67ebfa124df1bf213038c5e34976081ffd2ebc","src/impl_bin_search.rs":"32a89409690d57f074f11eb93a9d6e422f73788af63fc29f980e13d1c2d2fa6f","src/impl_phf.rs":"321028cc657364c6f7c5b5f538b0033bcfd6f3b5ef711304d806c0644466a964","src/lib.rs":"8c8db560605b46a13062a168283e8b0f459242718682642b1f167bec686e5606","src/mime_types.rs":"eb75a90bc41baa8169fb85ec8ddf82e99660f2c1aa3a47e29ef9a2af3796b3bc"},"package":"1a0ed03949aef72dbdf3116a383d7b38b4768e6f960528cd6a6044aa9ed68599"} \ No newline at end of file +{"files":{"Cargo.lock":"68df26849ead62e10cc255ca2009ddba651d393be3499aefa24d25538922768f","Cargo.toml":"b7877455bb621228f87ed556f04c007d2704ad3193d1aabeb7526d4006008699","LICENSE":"6919f1acec82afc721be2d9907b993267f433a44d25d8aedf1003b5f59ebfd46","README.md":"8e1db7c6f134caf08edd05732f61de07a322446c58f081cfc0220e7447ab051c","benches/benchmark.rs":"2287c7233cf78a9af98b2b53cd20e221e4a785209bd70a87030f1ade34a02507","build.rs":"8e47254c0a927eeb243058cfc0da413f34a958226707c941e8634e4514562f04","examples/rev_map.rs":"0bab6cc20b9eace741c6cc4a8c67ebfa124df1bf213038c5e34976081ffd2ebc","src/impl_bin_search.rs":"32a89409690d57f074f11eb93a9d6e422f73788af63fc29f980e13d1c2d2fa6f","src/impl_phf.rs":"321028cc657364c6f7c5b5f538b0033bcfd6f3b5ef711304d806c0644466a964","src/lib.rs":"8c8db560605b46a13062a168283e8b0f459242718682642b1f167bec686e5606","src/mime_types.rs":"06223533119bc845dece004db99b2cdc6d43ba5bad19f65628438bc36adb9b2d"},"package":"2684d4c2e97d99848d30b324b00c8fcc7e5c897b7cbb5819b09e7c90e8baf212"} \ No newline at end of file diff --git a/third_party/rust/mime_guess/Cargo.lock b/third_party/rust/mime_guess/Cargo.lock index 5f8d2d9193..cc4a1eae48 100644 --- a/third_party/rust/mime_guess/Cargo.lock +++ b/third_party/rust/mime_guess/Cargo.lock @@ -1,25 +1,23 @@ # This file is automatically @generated by Cargo. # It is not intended for manual editing. [[package]] -name = "arrayvec" -version = "0.4.11" +name = "anyhow" +version = "1.0.26" source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "nodrop 0.1.13 (registry+https://github.com/rust-lang/crates.io-index)", -] [[package]] name = "atty" -version = "0.2.13" +version = "0.2.14" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ + "hermit-abi 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)", "libc 0.2.60 (registry+https://github.com/rust-lang/crates.io-index)", "winapi 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "autocfg" -version = "0.1.5" +version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] @@ -29,24 +27,32 @@ source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "bstr" -version = "0.2.6" +version = "0.2.11" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "lazy_static 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)", - "memchr 2.2.1 (registry+https://github.com/rust-lang/crates.io-index)", + "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", + "memchr 2.3.3 (registry+https://github.com/rust-lang/crates.io-index)", "regex-automata 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)", - "serde 1.0.98 (registry+https://github.com/rust-lang/crates.io-index)", + "serde 1.0.104 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "bumpalo" +version = "3.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" + [[package]] name = "byteorder" -version = "1.3.2" +version = "1.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "cast" -version = "0.2.2" +version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "rustc_version 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)", +] [[package]] name = "cfg-if" @@ -60,138 +66,150 @@ source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "bitflags 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", "textwrap 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)", - "unicode-width 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "cloudabi" -version = "0.0.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "bitflags 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "unicode-width 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "criterion" -version = "0.2.11" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "atty 0.2.13 (registry+https://github.com/rust-lang/crates.io-index)", - "cast 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", + "atty 0.2.14 (registry+https://github.com/rust-lang/crates.io-index)", + "cast 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)", "clap 2.33.0 (registry+https://github.com/rust-lang/crates.io-index)", - "criterion-plot 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", - "csv 1.1.1 (registry+https://github.com/rust-lang/crates.io-index)", - "itertools 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)", - "lazy_static 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.60 (registry+https://github.com/rust-lang/crates.io-index)", - "num-traits 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", - "rand_core 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", - "rand_os 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", - "rand_xoshiro 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", - "rayon 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", - "rayon-core 1.5.0 (registry+https://github.com/rust-lang/crates.io-index)", - "serde 1.0.98 (registry+https://github.com/rust-lang/crates.io-index)", - "serde_derive 1.0.98 (registry+https://github.com/rust-lang/crates.io-index)", - "serde_json 1.0.40 (registry+https://github.com/rust-lang/crates.io-index)", - "tinytemplate 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", - "walkdir 2.2.9 (registry+https://github.com/rust-lang/crates.io-index)", + "criterion-plot 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)", + "csv 1.1.3 (registry+https://github.com/rust-lang/crates.io-index)", + "itertools 0.8.2 (registry+https://github.com/rust-lang/crates.io-index)", + "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", + "num-traits 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)", + "oorandom 11.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "plotters 0.2.12 (registry+https://github.com/rust-lang/crates.io-index)", + "rayon 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)", + "regex 1.3.4 (registry+https://github.com/rust-lang/crates.io-index)", + "serde 1.0.104 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_derive 1.0.104 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_json 1.0.48 (registry+https://github.com/rust-lang/crates.io-index)", + "tinytemplate 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)", + "walkdir 2.3.1 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "criterion-plot" -version = "0.3.1" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "byteorder 1.3.2 (registry+https://github.com/rust-lang/crates.io-index)", - "cast 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", - "itertools 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)", + "cast 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)", + "itertools 0.8.2 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "crossbeam-deque" -version = "0.6.3" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "crossbeam-epoch 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)", - "crossbeam-utils 0.6.6 (registry+https://github.com/rust-lang/crates.io-index)", + "crossbeam-epoch 0.8.2 (registry+https://github.com/rust-lang/crates.io-index)", + "crossbeam-utils 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)", + "maybe-uninit 2.0.0 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "crossbeam-epoch" -version = "0.7.2" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "arrayvec 0.4.11 (registry+https://github.com/rust-lang/crates.io-index)", + "autocfg 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", "cfg-if 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)", - "crossbeam-utils 0.6.6 (registry+https://github.com/rust-lang/crates.io-index)", - "lazy_static 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)", - "memoffset 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)", - "scopeguard 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "crossbeam-utils 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)", + "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", + "maybe-uninit 2.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "memoffset 0.5.3 (registry+https://github.com/rust-lang/crates.io-index)", + "scopeguard 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "crossbeam-queue" -version = "0.1.2" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "crossbeam-utils 0.6.6 (registry+https://github.com/rust-lang/crates.io-index)", + "cfg-if 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)", + "crossbeam-utils 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "crossbeam-utils" -version = "0.6.6" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ + "autocfg 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", "cfg-if 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)", - "lazy_static 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)", + "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "csv" -version = "1.1.1" +version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "bstr 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)", - "csv-core 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)", - "itoa 0.4.4 (registry+https://github.com/rust-lang/crates.io-index)", - "ryu 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", - "serde 1.0.98 (registry+https://github.com/rust-lang/crates.io-index)", + "bstr 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)", + "csv-core 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", + "itoa 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)", + "ryu 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", + "serde 1.0.104 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "csv-core" -version = "0.1.6" +version = "0.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "memchr 2.2.1 (registry+https://github.com/rust-lang/crates.io-index)", + "memchr 2.3.3 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "either" -version = "1.5.2" +version = "1.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] -name = "fuchsia-cprng" -version = "0.1.1" +name = "heck" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "unicode-segmentation 1.6.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "hermit-abi" +version = "0.1.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "libc 0.2.60 (registry+https://github.com/rust-lang/crates.io-index)", +] [[package]] name = "itertools" -version = "0.8.0" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "either 1.5.2 (registry+https://github.com/rust-lang/crates.io-index)", + "either 1.5.3 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "itoa" -version = "0.4.4" +version = "0.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" +[[package]] +name = "js-sys" +version = "0.3.35" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "wasm-bindgen 0.2.58 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "lazy_static" -version = "1.3.0" +version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] @@ -200,16 +218,26 @@ version = "0.2.60" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] -name = "memchr" -version = "2.2.1" +name = "log" +version = "0.4.8" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "libc 0.2.60 (registry+https://github.com/rust-lang/crates.io-index)", + "cfg-if 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "maybe-uninit" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "memchr" +version = "2.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" + [[package]] name = "memoffset" -version = "0.5.1" +version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "rustc_version 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)", @@ -217,121 +245,104 @@ dependencies = [ [[package]] name = "mime" -version = "0.3.13" +version = "0.3.16" source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "unicase 2.4.0 (registry+https://github.com/rust-lang/crates.io-index)", -] [[package]] name = "mime_guess" -version = "2.0.1" +version = "2.0.3" dependencies = [ - "criterion 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)", - "mime 0.3.13 (registry+https://github.com/rust-lang/crates.io-index)", - "unicase 2.4.0 (registry+https://github.com/rust-lang/crates.io-index)", + "criterion 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", + "mime 0.3.16 (registry+https://github.com/rust-lang/crates.io-index)", + "unicase 2.6.0 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] -name = "nodrop" -version = "0.1.13" +name = "nom" +version = "4.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "memchr 2.3.3 (registry+https://github.com/rust-lang/crates.io-index)", + "version_check 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)", +] [[package]] name = "num-traits" -version = "0.2.8" +version = "0.2.11" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "autocfg 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)", + "autocfg 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "num_cpus" -version = "1.10.1" +version = "1.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ + "hermit-abi 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)", "libc 0.2.60 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] -name = "proc-macro2" -version = "0.4.30" +name = "oorandom" +version = "11.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "unicode-xid 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", -] [[package]] -name = "quote" -version = "0.6.13" +name = "plotters" +version = "0.2.12" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "proc-macro2 0.4.30 (registry+https://github.com/rust-lang/crates.io-index)", + "js-sys 0.3.35 (registry+https://github.com/rust-lang/crates.io-index)", + "num-traits 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)", + "wasm-bindgen 0.2.58 (registry+https://github.com/rust-lang/crates.io-index)", + "web-sys 0.3.35 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] -name = "rand_core" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "rand_core 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "rand_core" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "rand_os" -version = "0.1.3" +name = "proc-macro2" +version = "1.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "cloudabi 0.0.3 (registry+https://github.com/rust-lang/crates.io-index)", - "fuchsia-cprng 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.60 (registry+https://github.com/rust-lang/crates.io-index)", - "rand_core 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", - "rdrand 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", - "winapi 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)", + "unicode-xid 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] -name = "rand_xoshiro" -version = "0.1.0" +name = "quote" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "byteorder 1.3.2 (registry+https://github.com/rust-lang/crates.io-index)", - "rand_core 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", + "proc-macro2 1.0.9 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "rayon" -version = "1.1.0" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "crossbeam-deque 0.6.3 (registry+https://github.com/rust-lang/crates.io-index)", - "either 1.5.2 (registry+https://github.com/rust-lang/crates.io-index)", - "rayon-core 1.5.0 (registry+https://github.com/rust-lang/crates.io-index)", + "crossbeam-deque 0.7.3 (registry+https://github.com/rust-lang/crates.io-index)", + "either 1.5.3 (registry+https://github.com/rust-lang/crates.io-index)", + "rayon-core 1.7.0 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "rayon-core" -version = "1.5.0" +version = "1.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "crossbeam-deque 0.6.3 (registry+https://github.com/rust-lang/crates.io-index)", - "crossbeam-queue 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", - "crossbeam-utils 0.6.6 (registry+https://github.com/rust-lang/crates.io-index)", - "lazy_static 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)", - "num_cpus 1.10.1 (registry+https://github.com/rust-lang/crates.io-index)", + "crossbeam-deque 0.7.3 (registry+https://github.com/rust-lang/crates.io-index)", + "crossbeam-queue 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", + "crossbeam-utils 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)", + "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", + "num_cpus 1.12.0 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] -name = "rdrand" -version = "0.4.0" +name = "regex" +version = "1.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "rand_core 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", + "regex-syntax 0.6.14 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -339,9 +350,14 @@ name = "regex-automata" version = "0.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "byteorder 1.3.2 (registry+https://github.com/rust-lang/crates.io-index)", + "byteorder 1.3.4 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "regex-syntax" +version = "0.6.14" +source = "registry+https://github.com/rust-lang/crates.io-index" + [[package]] name = "rustc_version" version = "0.2.3" @@ -352,20 +368,20 @@ dependencies = [ [[package]] name = "ryu" -version = "1.0.0" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "same-file" -version = "1.0.5" +version = "1.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "winapi-util 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", + "winapi-util 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "scopeguard" -version = "1.0.0" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] @@ -383,37 +399,42 @@ source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "serde" -version = "1.0.98" +version = "1.0.104" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "serde_derive" -version = "1.0.98" +version = "1.0.104" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "proc-macro2 0.4.30 (registry+https://github.com/rust-lang/crates.io-index)", - "quote 0.6.13 (registry+https://github.com/rust-lang/crates.io-index)", - "syn 0.15.42 (registry+https://github.com/rust-lang/crates.io-index)", + "proc-macro2 1.0.9 (registry+https://github.com/rust-lang/crates.io-index)", + "quote 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", + "syn 1.0.16 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "serde_json" -version = "1.0.40" +version = "1.0.48" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "itoa 0.4.4 (registry+https://github.com/rust-lang/crates.io-index)", - "ryu 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", - "serde 1.0.98 (registry+https://github.com/rust-lang/crates.io-index)", + "itoa 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)", + "ryu 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", + "serde 1.0.104 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "sourcefile" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" + [[package]] name = "syn" -version = "0.15.42" +version = "1.0.16" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "proc-macro2 0.4.30 (registry+https://github.com/rust-lang/crates.io-index)", - "quote 0.6.13 (registry+https://github.com/rust-lang/crates.io-index)", - "unicode-xid 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "proc-macro2 1.0.9 (registry+https://github.com/rust-lang/crates.io-index)", + "quote 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", + "unicode-xid 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -421,34 +442,39 @@ name = "textwrap" version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "unicode-width 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)", + "unicode-width 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "tinytemplate" -version = "1.0.2" +version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "serde 1.0.98 (registry+https://github.com/rust-lang/crates.io-index)", - "serde_json 1.0.40 (registry+https://github.com/rust-lang/crates.io-index)", + "serde 1.0.104 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_json 1.0.48 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "unicase" -version = "2.4.0" +version = "2.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "version_check 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)", + "version_check 0.9.1 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "unicode-segmentation" +version = "1.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" + [[package]] name = "unicode-width" -version = "0.1.5" +version = "0.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "unicode-xid" -version = "0.1.0" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] @@ -456,14 +482,103 @@ name = "version_check" version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" +[[package]] +name = "version_check" +version = "0.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" + [[package]] name = "walkdir" -version = "2.2.9" +version = "2.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "same-file 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)", + "same-file 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)", "winapi 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)", - "winapi-util 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", + "winapi-util 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "wasm-bindgen" +version = "0.2.58" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "cfg-if 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)", + "wasm-bindgen-macro 0.2.58 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "wasm-bindgen-backend" +version = "0.2.58" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "bumpalo 3.2.0 (registry+https://github.com/rust-lang/crates.io-index)", + "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", + "proc-macro2 1.0.9 (registry+https://github.com/rust-lang/crates.io-index)", + "quote 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", + "syn 1.0.16 (registry+https://github.com/rust-lang/crates.io-index)", + "wasm-bindgen-shared 0.2.58 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "wasm-bindgen-macro" +version = "0.2.58" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "quote 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", + "wasm-bindgen-macro-support 0.2.58 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "wasm-bindgen-macro-support" +version = "0.2.58" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "proc-macro2 1.0.9 (registry+https://github.com/rust-lang/crates.io-index)", + "quote 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", + "syn 1.0.16 (registry+https://github.com/rust-lang/crates.io-index)", + "wasm-bindgen-backend 0.2.58 (registry+https://github.com/rust-lang/crates.io-index)", + "wasm-bindgen-shared 0.2.58 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "wasm-bindgen-shared" +version = "0.2.58" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "wasm-bindgen-webidl" +version = "0.2.58" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "anyhow 1.0.26 (registry+https://github.com/rust-lang/crates.io-index)", + "heck 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", + "proc-macro2 1.0.9 (registry+https://github.com/rust-lang/crates.io-index)", + "quote 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", + "syn 1.0.16 (registry+https://github.com/rust-lang/crates.io-index)", + "wasm-bindgen-backend 0.2.58 (registry+https://github.com/rust-lang/crates.io-index)", + "weedle 0.10.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "web-sys" +version = "0.3.35" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "anyhow 1.0.26 (registry+https://github.com/rust-lang/crates.io-index)", + "js-sys 0.3.35 (registry+https://github.com/rust-lang/crates.io-index)", + "sourcefile 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", + "wasm-bindgen 0.2.58 (registry+https://github.com/rust-lang/crates.io-index)", + "wasm-bindgen-webidl 0.2.58 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "weedle" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "nom 4.2.3 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -482,7 +597,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "winapi-util" -version = "0.1.2" +version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "winapi 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)", @@ -494,64 +609,78 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" [metadata] -"checksum arrayvec 0.4.11 (registry+https://github.com/rust-lang/crates.io-index)" = "b8d73f9beda665eaa98ab9e4f7442bd4e7de6652587de55b2525e52e29c1b0ba" -"checksum atty 0.2.13 (registry+https://github.com/rust-lang/crates.io-index)" = "1803c647a3ec87095e7ae7acfca019e98de5ec9a7d01343f611cf3152ed71a90" -"checksum autocfg 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)" = "22130e92352b948e7e82a49cdb0aa94f2211761117f29e052dd397c1ac33542b" +"checksum anyhow 1.0.26 (registry+https://github.com/rust-lang/crates.io-index)" = "7825f6833612eb2414095684fcf6c635becf3ce97fe48cf6421321e93bfbd53c" +"checksum atty 0.2.14 (registry+https://github.com/rust-lang/crates.io-index)" = "d9b39be18770d11421cdb1b9947a45dd3f37e93092cbf377614828a319d5fee8" +"checksum autocfg 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "f8aac770f1885fd7e387acedd76065302551364496e46b3dd00860b2f8359b9d" "checksum bitflags 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "3d155346769a6855b86399e9bc3814ab343cd3d62c7e985113d46a0ec3c281fd" -"checksum bstr 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)" = "e0a692f1c740e7e821ca71a22cf99b9b2322dfa94d10f71443befb1797b3946a" -"checksum byteorder 1.3.2 (registry+https://github.com/rust-lang/crates.io-index)" = "a7c3dd8985a7111efc5c80b44e23ecdd8c007de8ade3b96595387e812b957cf5" -"checksum cast 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "926013f2860c46252efceabb19f4a6b308197505082c609025aa6706c011d427" +"checksum bstr 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)" = "502ae1441a0a5adb8fbd38a5955a6416b9493e92b465de5e4a9bde6a539c2c48" +"checksum bumpalo 3.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "1f359dc14ff8911330a51ef78022d376f25ed00248912803b58f00cb1c27f742" +"checksum byteorder 1.3.4 (registry+https://github.com/rust-lang/crates.io-index)" = "08c48aae112d48ed9f069b33538ea9e3e90aa263cfa3d1c24309612b1f7472de" +"checksum cast 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)" = "4b9434b9a5aa1450faa3f9cb14ea0e8c53bb5d2b3c1bfd1ab4fc03e9f33fbfb0" "checksum cfg-if 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)" = "b486ce3ccf7ffd79fdeb678eac06a9e6c09fc88d33836340becb8fffe87c5e33" "checksum clap 2.33.0 (registry+https://github.com/rust-lang/crates.io-index)" = "5067f5bb2d80ef5d68b4c87db81601f0b75bca627bc2ef76b141d7b846a3c6d9" -"checksum cloudabi 0.0.3 (registry+https://github.com/rust-lang/crates.io-index)" = "ddfc5b9aa5d4507acaf872de71051dfd0e309860e88966e1051e462a077aac4f" -"checksum criterion 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)" = "0363053954f3e679645fc443321ca128b7b950a6fe288cf5f9335cc22ee58394" -"checksum criterion-plot 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "76f9212ddf2f4a9eb2d401635190600656a1f88a932ef53d06e7fa4c7e02fb8e" -"checksum crossbeam-deque 0.6.3 (registry+https://github.com/rust-lang/crates.io-index)" = "05e44b8cf3e1a625844d1750e1f7820da46044ff6d28f4d43e455ba3e5bb2c13" -"checksum crossbeam-epoch 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)" = "fedcd6772e37f3da2a9af9bf12ebe046c0dfe657992377b4df982a2b54cd37a9" -"checksum crossbeam-queue 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "7c979cd6cfe72335896575c6b5688da489e420d36a27a0b9eb0c73db574b4a4b" -"checksum crossbeam-utils 0.6.6 (registry+https://github.com/rust-lang/crates.io-index)" = "04973fa96e96579258a5091af6003abde64af786b860f18622b82e026cca60e6" -"checksum csv 1.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "37519ccdfd73a75821cac9319d4fce15a81b9fcf75f951df5b9988aa3a0af87d" -"checksum csv-core 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)" = "9b5cadb6b25c77aeff80ba701712494213f4a8418fcda2ee11b6560c3ad0bf4c" -"checksum either 1.5.2 (registry+https://github.com/rust-lang/crates.io-index)" = "5527cfe0d098f36e3f8839852688e63c8fff1c90b2b405aef730615f9a7bcf7b" -"checksum fuchsia-cprng 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "a06f77d526c1a601b7c4cdd98f54b5eaabffc14d5f2f0296febdc7f357c6d3ba" -"checksum itertools 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)" = "5b8467d9c1cebe26feb08c640139247fac215782d35371ade9a2136ed6085358" -"checksum itoa 0.4.4 (registry+https://github.com/rust-lang/crates.io-index)" = "501266b7edd0174f8530248f87f99c88fbe60ca4ef3dd486835b8d8d53136f7f" -"checksum lazy_static 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "bc5729f27f159ddd61f4df6228e827e86643d4d3e7c32183cb30a1c08f604a14" +"checksum criterion 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "1fc755679c12bda8e5523a71e4d654b6bf2e14bd838dfc48cde6559a05caf7d1" +"checksum criterion-plot 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)" = "a01e15e0ea58e8234f96146b1f91fa9d0e4dd7a38da93ff7a75d42c0b9d3a545" +"checksum crossbeam-deque 0.7.3 (registry+https://github.com/rust-lang/crates.io-index)" = "9f02af974daeee82218205558e51ec8768b48cf524bd01d550abe5573a608285" +"checksum crossbeam-epoch 0.8.2 (registry+https://github.com/rust-lang/crates.io-index)" = "058ed274caafc1f60c4997b5fc07bf7dc7cca454af7c6e81edffe5f33f70dace" +"checksum crossbeam-queue 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "c695eeca1e7173472a32221542ae469b3e9aac3a4fc81f7696bcad82029493db" +"checksum crossbeam-utils 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)" = "c3c7c73a2d1e9fc0886a08b93e98eb643461230d5f1925e4036204d5f2e261a8" +"checksum csv 1.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "00affe7f6ab566df61b4be3ce8cf16bc2576bca0963ceb0955e45d514bf9a279" +"checksum csv-core 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)" = "2b2466559f260f48ad25fe6317b3c8dac77b5bdb5763ac7d9d6103530663bc90" +"checksum either 1.5.3 (registry+https://github.com/rust-lang/crates.io-index)" = "bb1f6b1ce1c140482ea30ddd3335fc0024ac7ee112895426e0a629a6c20adfe3" +"checksum heck 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "20564e78d53d2bb135c343b3f47714a56af2061f1c928fdb541dc7b9fdd94205" +"checksum hermit-abi 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)" = "1010591b26bbfe835e9faeabeb11866061cc7dcebffd56ad7d0942d0e61aefd8" +"checksum itertools 0.8.2 (registry+https://github.com/rust-lang/crates.io-index)" = "f56a2d0bc861f9165be4eb3442afd3c236d8a98afd426f65d92324ae1091a484" +"checksum itoa 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)" = "b8b7a7c0c47db5545ed3fef7468ee7bb5b74691498139e4b3f6a20685dc6dd8e" +"checksum js-sys 0.3.35 (registry+https://github.com/rust-lang/crates.io-index)" = "7889c7c36282151f6bf465be4700359318aef36baa951462382eae49e9577cf9" +"checksum lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" "checksum libc 0.2.60 (registry+https://github.com/rust-lang/crates.io-index)" = "d44e80633f007889c7eff624b709ab43c92d708caad982295768a7b13ca3b5eb" -"checksum memchr 2.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "88579771288728879b57485cc7d6b07d648c9f0141eb955f8ab7f9d45394468e" -"checksum memoffset 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)" = "ce6075db033bbbb7ee5a0bbd3a3186bbae616f57fb001c485c7ff77955f8177f" -"checksum mime 0.3.13 (registry+https://github.com/rust-lang/crates.io-index)" = "3e27ca21f40a310bd06d9031785f4801710d566c184a6e15bad4f1d9b65f9425" -"checksum nodrop 0.1.13 (registry+https://github.com/rust-lang/crates.io-index)" = "2f9667ddcc6cc8a43afc9b7917599d7216aa09c463919ea32c59ed6cac8bc945" -"checksum num-traits 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)" = "6ba9a427cfca2be13aa6f6403b0b7e7368fe982bfa16fccc450ce74c46cd9b32" -"checksum num_cpus 1.10.1 (registry+https://github.com/rust-lang/crates.io-index)" = "bcef43580c035376c0705c42792c294b66974abbfd2789b511784023f71f3273" -"checksum proc-macro2 0.4.30 (registry+https://github.com/rust-lang/crates.io-index)" = "cf3d2011ab5c909338f7887f4fc896d35932e29146c12c8d01da6b22a80ba759" -"checksum quote 0.6.13 (registry+https://github.com/rust-lang/crates.io-index)" = "6ce23b6b870e8f94f81fb0a363d65d86675884b34a09043c81e5562f11c1f8e1" -"checksum rand_core 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "7a6fdeb83b075e8266dcc8762c22776f6877a63111121f5f8c7411e5be7eed4b" -"checksum rand_core 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "d0e7a549d590831370895ab7ba4ea0c1b6b011d106b5ff2da6eee112615e6dc0" -"checksum rand_os 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "7b75f676a1e053fc562eafbb47838d67c84801e38fc1ba459e8f180deabd5071" -"checksum rand_xoshiro 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "03b418169fb9c46533f326efd6eed2576699c44ca92d3052a066214a8d828929" -"checksum rayon 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "a4b0186e22767d5b9738a05eab7c6ac90b15db17e5b5f9bd87976dd7d89a10a4" -"checksum rayon-core 1.5.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ebbe0df8435ac0c397d467b6cad6d25543d06e8a019ef3f6af3c384597515bd2" -"checksum rdrand 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "678054eb77286b51581ba43620cc911abf02758c91f93f479767aed0f90458b2" +"checksum log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)" = "14b6052be84e6b71ab17edffc2eeabf5c2c3ae1fdb464aae35ac50c67a44e1f7" +"checksum maybe-uninit 2.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "60302e4db3a61da70c0cb7991976248362f30319e88850c487b9b95bbf059e00" +"checksum memchr 2.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "3728d817d99e5ac407411fa471ff9800a778d88a24685968b36824eaf4bee400" +"checksum memoffset 0.5.3 (registry+https://github.com/rust-lang/crates.io-index)" = "75189eb85871ea5c2e2c15abbdd541185f63b408415e5051f5cac122d8c774b9" +"checksum mime 0.3.16 (registry+https://github.com/rust-lang/crates.io-index)" = "2a60c7ce501c71e03a9c9c0d35b861413ae925bd979cc7a4e30d060069aaac8d" +"checksum nom 4.2.3 (registry+https://github.com/rust-lang/crates.io-index)" = "2ad2a91a8e869eeb30b9cb3119ae87773a8f4ae617f41b1eb9c154b2905f7bd6" +"checksum num-traits 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)" = "c62be47e61d1842b9170f0fdeec8eba98e60e90e5446449a0545e5152acd7096" +"checksum num_cpus 1.12.0 (registry+https://github.com/rust-lang/crates.io-index)" = "46203554f085ff89c235cd12f7075f3233af9b11ed7c9e16dfe2560d03313ce6" +"checksum oorandom 11.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ebcec7c9c2a95cacc7cd0ecb89d8a8454eca13906f6deb55258ffff0adeb9405" +"checksum plotters 0.2.12 (registry+https://github.com/rust-lang/crates.io-index)" = "4e3bb8da247d27ae212529352020f3e5ee16e83c0c258061d27b08ab92675eeb" +"checksum proc-macro2 1.0.9 (registry+https://github.com/rust-lang/crates.io-index)" = "6c09721c6781493a2a492a96b5a5bf19b65917fe6728884e7c44dd0c60ca3435" +"checksum quote 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)" = "053a8c8bcc71fcce321828dc897a98ab9760bef03a4fc36693c231e5b3216cfe" +"checksum rayon 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "db6ce3297f9c85e16621bb8cca38a06779ffc31bb8184e1be4bed2be4678a098" +"checksum rayon-core 1.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "08a89b46efaf957e52b18062fb2f4660f8b8a4dde1807ca002690868ef2c85a9" +"checksum regex 1.3.4 (registry+https://github.com/rust-lang/crates.io-index)" = "322cf97724bea3ee221b78fe25ac9c46114ebb51747ad5babd51a2fc6a8235a8" "checksum regex-automata 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)" = "92b73c2a1770c255c240eaa4ee600df1704a38dc3feaa6e949e7fcd4f8dc09f9" +"checksum regex-syntax 0.6.14 (registry+https://github.com/rust-lang/crates.io-index)" = "b28dfe3fe9badec5dbf0a79a9cccad2cfc2ab5484bdb3e44cbd1ae8b3ba2be06" "checksum rustc_version 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)" = "138e3e0acb6c9fb258b19b67cb8abd63c00679d2851805ea151465464fe9030a" -"checksum ryu 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "c92464b447c0ee8c4fb3824ecc8383b81717b9f1e74ba2e72540aef7b9f82997" -"checksum same-file 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)" = "585e8ddcedc187886a30fa705c47985c3fa88d06624095856b36ca0b82ff4421" -"checksum scopeguard 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "b42e15e59b18a828bbf5c58ea01debb36b9b096346de35d941dcb89009f24a0d" +"checksum ryu 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)" = "bfa8506c1de11c9c4e4c38863ccbe02a305c8188e85a05a784c9e11e1c3910c8" +"checksum same-file 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)" = "93fc1dc3aaa9bfed95e02e6eadabb4baf7e3078b0bd1b4d7b6b0b68378900502" +"checksum scopeguard 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd" "checksum semver 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)" = "1d7eb9ef2c18661902cc47e535f9bc51b78acd254da71d375c2f6720d9a40403" "checksum semver-parser 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3" -"checksum serde 1.0.98 (registry+https://github.com/rust-lang/crates.io-index)" = "7fe5626ac617da2f2d9c48af5515a21d5a480dbd151e01bb1c355e26a3e68113" -"checksum serde_derive 1.0.98 (registry+https://github.com/rust-lang/crates.io-index)" = "01e69e1b8a631f245467ee275b8c757b818653c6d704cdbcaeb56b56767b529c" -"checksum serde_json 1.0.40 (registry+https://github.com/rust-lang/crates.io-index)" = "051c49229f282f7c6f3813f8286cc1e3323e8051823fce42c7ea80fe13521704" -"checksum syn 0.15.42 (registry+https://github.com/rust-lang/crates.io-index)" = "eadc09306ca51a40555dd6fc2b415538e9e18bc9f870e47b1a524a79fe2dcf5e" +"checksum serde 1.0.104 (registry+https://github.com/rust-lang/crates.io-index)" = "414115f25f818d7dfccec8ee535d76949ae78584fc4f79a6f45a904bf8ab4449" +"checksum serde_derive 1.0.104 (registry+https://github.com/rust-lang/crates.io-index)" = "128f9e303a5a29922045a830221b8f78ec74a5f544944f3d5984f8ec3895ef64" +"checksum serde_json 1.0.48 (registry+https://github.com/rust-lang/crates.io-index)" = "9371ade75d4c2d6cb154141b9752cf3781ec9c05e0e5cf35060e1e70ee7b9c25" +"checksum sourcefile 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)" = "4bf77cb82ba8453b42b6ae1d692e4cdc92f9a47beaf89a847c8be83f4e328ad3" +"checksum syn 1.0.16 (registry+https://github.com/rust-lang/crates.io-index)" = "123bd9499cfb380418d509322d7a6d52e5315f064fe4b3ad18a53d6b92c07859" "checksum textwrap 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)" = "d326610f408c7a4eb6f51c37c330e496b08506c9457c9d34287ecc38809fb060" -"checksum tinytemplate 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)" = "4574b75faccaacddb9b284faecdf0b544b80b6b294f3d062d325c5726a209c20" -"checksum unicase 2.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "a84e5511b2a947f3ae965dcb29b13b7b1691b6e7332cf5dbc1744138d5acb7f6" -"checksum unicode-width 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)" = "882386231c45df4700b275c7ff55b6f3698780a650026380e72dabe76fa46526" -"checksum unicode-xid 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "fc72304796d0818e357ead4e000d19c9c174ab23dc11093ac919054d20a6a7fc" +"checksum tinytemplate 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)" = "57a3c6667d3e65eb1bc3aed6fd14011c6cbc3a0665218ab7f5daf040b9ec371a" +"checksum unicase 2.6.0 (registry+https://github.com/rust-lang/crates.io-index)" = "50f37be617794602aabbeee0be4f259dc1778fabe05e2d67ee8f79326d5cb4f6" +"checksum unicode-segmentation 1.6.0 (registry+https://github.com/rust-lang/crates.io-index)" = "e83e153d1053cbb5a118eeff7fd5be06ed99153f00dbcd8ae310c5fb2b22edc0" +"checksum unicode-width 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)" = "caaa9d531767d1ff2150b9332433f32a24622147e5ebb1f26409d5da67afd479" +"checksum unicode-xid 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "826e7639553986605ec5979c7dd957c7895e93eabed50ab2ffa7f6128a75097c" "checksum version_check 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)" = "914b1a6776c4c929a602fafd8bc742e06365d4bcbe48c30f9cca5824f70dc9dd" -"checksum walkdir 2.2.9 (registry+https://github.com/rust-lang/crates.io-index)" = "9658c94fa8b940eab2250bd5a457f9c48b748420d71293b165c8cdbe2f55f71e" +"checksum version_check 0.9.1 (registry+https://github.com/rust-lang/crates.io-index)" = "078775d0255232fb988e6fccf26ddc9d1ac274299aaedcedce21c6f72cc533ce" +"checksum walkdir 2.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "777182bc735b6424e1a57516d35ed72cb8019d85c8c9bf536dccb3445c1a2f7d" +"checksum wasm-bindgen 0.2.58 (registry+https://github.com/rust-lang/crates.io-index)" = "5205e9afdf42282b192e2310a5b463a6d1c1d774e30dc3c791ac37ab42d2616c" +"checksum wasm-bindgen-backend 0.2.58 (registry+https://github.com/rust-lang/crates.io-index)" = "11cdb95816290b525b32587d76419facd99662a07e59d3cdb560488a819d9a45" +"checksum wasm-bindgen-macro 0.2.58 (registry+https://github.com/rust-lang/crates.io-index)" = "574094772ce6921576fb6f2e3f7497b8a76273b6db092be18fc48a082de09dc3" +"checksum wasm-bindgen-macro-support 0.2.58 (registry+https://github.com/rust-lang/crates.io-index)" = "e85031354f25eaebe78bb7db1c3d86140312a911a106b2e29f9cc440ce3e7668" +"checksum wasm-bindgen-shared 0.2.58 (registry+https://github.com/rust-lang/crates.io-index)" = "f5e7e61fc929f4c0dddb748b102ebf9f632e2b8d739f2016542b4de2965a9601" +"checksum wasm-bindgen-webidl 0.2.58 (registry+https://github.com/rust-lang/crates.io-index)" = "ef012a0d93fc0432df126a8eaf547b2dce25a8ce9212e1d3cbeef5c11157975d" +"checksum web-sys 0.3.35 (registry+https://github.com/rust-lang/crates.io-index)" = "aaf97caf6aa8c2b1dac90faf0db529d9d63c93846cca4911856f78a83cebf53b" +"checksum weedle 0.10.0 (registry+https://github.com/rust-lang/crates.io-index)" = "3bb43f70885151e629e2a19ce9e50bd730fd436cfd4b666894c9ce4de9141164" "checksum winapi 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)" = "f10e386af2b13e47c89e7236a7a14a086791a2b88ebad6df9bf42040195cf770" "checksum winapi-i686-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" -"checksum winapi-util 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "7168bab6e1daee33b4557efd0e95d5ca70a03706d39fa5f3fe7a236f584b03c9" +"checksum winapi-util 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "4ccfbf554c6ad11084fb7517daca16cfdcaccbdadba4fc336f032a8b12c2ad80" "checksum winapi-x86_64-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" diff --git a/third_party/rust/mime_guess/Cargo.toml b/third_party/rust/mime_guess/Cargo.toml index 5061b79060..2718d6dda0 100644 --- a/third_party/rust/mime_guess/Cargo.toml +++ b/third_party/rust/mime_guess/Cargo.toml @@ -12,7 +12,7 @@ [package] name = "mime_guess" -version = "2.0.1" +version = "2.0.3" authors = ["Austin Bonander "] description = "A simple crate for detection of a file's MIME type by its extension." documentation = "https://docs.rs/mime_guess/" @@ -34,7 +34,7 @@ version = "0.3" [dependencies.unicase] version = "2.4.0" [dev-dependencies.criterion] -version = "0.2" +version = "0.3" [build-dependencies.unicase] version = "2.4.0" diff --git a/third_party/rust/mime_guess/README.md b/third_party/rust/mime_guess/README.md index 9723c00fba..c1436ec3a2 100644 --- a/third_party/rust/mime_guess/README.md +++ b/third_party/rust/mime_guess/README.md @@ -12,7 +12,7 @@ Uses a static map of known file extension -> MIME type mappings. ### Versioning Due to a mistaken premature release, `mime_guess` currently publicly depends on a pre-1.0 `mime`, -which means `mime` upgrades are breaking changes and necessitates a major version bump. +which means `mime` upgrades are breaking changes and necessitate a major version bump. Refer to the following table to find a version of `mime_guess` which matches your version of `mime`: | `mime` version | `mime_guess` version | @@ -49,30 +49,26 @@ The latter is only for consistency's sake; the search is case-insensitive. Simply add or update the appropriate string pair(s) to make the correction(s) needed. Run `cargo test` to make sure the library continues to work correctly. -#### (Important! Updated as of 2.0.0) Citing the corrected MIME type +#### Important! Citing the corrected MIME type When opening a pull request, please include a link to an official document or RFC noting -the correct MIME type for the file type in question **as a comment next to the addition**. -The latter is a new requirement as of 2.0.0 which is intended to make auditing easier in the future. -Bulk additions may request to omit this, although please provide a good reason. +the correct MIME type for the file type in question **in the commit message** so +that the commit history can be used as an audit trail. Though we're only guessing here, we like to be as correct as we can. It makes it much easier to vet your contribution if we don't have to search for corroborating material. -#### Providing citations for existing types -Historically, citations were only required in pull requests for additions -or corrections to media types; they are now required to be provided in-line -next to the mapping for easier auditing. - -If anyone is looking for busy work, finding and adding citations for existing mappings would be an easy -way to get a few pull requests in. See the issue tracker for more information. - #### Multiple MIME types per extension -As of `2.0.0`, multiple MIME types per extension are supported. The first MIME type in the list for a given -extension should be the most "correct" so users who only care about getting a single MIME type can use the `first*()` methods. +As of `2.0.0`, multiple MIME types per extension are supported. The first MIME type in the list for +a given extension should be the most "correct" so users who only care about getting a single MIME +type can use the `first*()` methods. + +The definition of "correct" is open to debate, however. In the author's opinion this should be +whatever is defined by the latest IETF RFC for the given file format, or otherwise explicitly +supercedes all others. -The defintion of "correct" is open to debate, however. In the author's opinion this should be whatever is defined by the latest IETF RFC -for the given file format, or otherwise explicitly supercedes all others. +If an official IANA registration replaces an older "experimental" style media type, please +place the new type before the old type in the list, but keep the old type for reference. #### Changes to the API or operation of the crate diff --git a/third_party/rust/mime_guess/src/mime_types.rs b/third_party/rust/mime_guess/src/mime_types.rs index 79479322f5..244b4c819c 100644 --- a/third_party/rust/mime_guess/src/mime_types.rs +++ b/third_party/rust/mime_guess/src/mime_types.rs @@ -1,14 +1,15 @@ -// Probably not exhaustive; keep updated. /// A mapping of known file extensions and their MIME types. /// /// Required to be sorted lexicographically by extension for ease of maintenance. /// -/// Multiple MIME types per extension are allowed but MUST be adjacent to each other; the -/// order is arbitrary but the first should be the most prevalent by most recent RFC declaration -/// or explicit succession of other media types. +/// Multiple MIME types per extension are supported; the order is arbitrary but the first should be +/// the most prevalent by most recent RFC declaration or explicit succession of other media types. +/// +/// NOTE: when adding or modifying entries, please include a citation in the commit message. +/// If a media type for an extension changed, please keep the old entry but add the new one before +/// it in the slice literal, e.g.: +/// /// -/// As of release 2.0.0, new and modified mappings should have citations provided inline -/// in order to provide an audit trail that's easier to follow than Git commit history. /// /// Sourced from: /// https://github.com/samuelneff/MimeTypeMap/blob/master/src/MimeTypes/MimeTypeMap.cs @@ -649,7 +650,7 @@ pub static MIME_TYPES: &[(&str, &[&str])] = &[ ("mc1", &["application/vnd.medcalcdata"]), ("mcd", &["application/vnd.mcd"]), ("mcurl", &["text/vnd.curl.mcurl"]), - ("md", &["text/x-markdown"]), + ("md", &["text/markdown", "text/x-markdown"]), ("mda", &["application/msaccess"]), ("mdb", &["application/x-msaccess"]), ("mde", &["application/msaccess"]), diff --git a/third_party/rust/mio-named-pipes/Cargo.toml b/third_party/rust/mio-named-pipes/Cargo.toml index 3af4ea2eb8..56bee020f3 100644 --- a/third_party/rust/mio-named-pipes/Cargo.toml +++ b/third_party/rust/mio-named-pipes/Cargo.toml @@ -13,7 +13,7 @@ Windows named pipe bindings for mio. [target.'cfg(windows)'.dependencies] kernel32-sys = "0.2" -log = "0.3" +log = "0.4" mio = "0.6.5" miow = "0.2" winapi = "0.2" diff --git a/third_party/rust/mp4parse_fallible/.cargo-checksum.json b/third_party/rust/mp4parse_fallible/.cargo-checksum.json index e1dfba567c..f0dc856fb7 100644 --- a/third_party/rust/mp4parse_fallible/.cargo-checksum.json +++ b/third_party/rust/mp4parse_fallible/.cargo-checksum.json @@ -1 +1 @@ -{"files":{"Cargo.toml":"7e0306d977c63a491bea39dde354455f23c7eb283abf874d0122a664bbc082d3","README":"1ff38c3749ba83c9b364c6bb35b95525dd956449ec7386a50faf8fcfdd5a7de4","lib.rs":"3c54447b497f808ab7fa7659f1379acf4c5c3def37fc4cc7778281273b84eabb"},"package":"6626c2aef76eb8f984eef02e475883d3fe9112e114720446c5810fc5f045cd30"} \ No newline at end of file +{"files":{"CODE_OF_CONDUCT.md":"902d5357af363426631d907e641e220b3ec89039164743f8442b3f120479b7cf","Cargo.toml":"2edecc4249f6ff011255fe3c92892050e466246b23ee6d002243b0df760625c7","LICENSE":"fab3dd6bdab226f1c08630b1dd917e11fcb4ec5e1e020e2c16f83a0a13863e85","README":"6f28a5c89ff7c018760402038a991a581771b8f66869268a7288f64915f192a6","lib.rs":"70f5bec52c586809882a1edce407552e8a5c3d0f126eaa92abd676484395cfc8"},"package":"704f773471ac3e7110427b6bdf93184932b19319c9b7717688da5424e519b10a"} \ No newline at end of file diff --git a/third_party/rust/mp4parse_fallible/CODE_OF_CONDUCT.md b/third_party/rust/mp4parse_fallible/CODE_OF_CONDUCT.md new file mode 100644 index 0000000000..498baa3fb0 --- /dev/null +++ b/third_party/rust/mp4parse_fallible/CODE_OF_CONDUCT.md @@ -0,0 +1,15 @@ +# Community Participation Guidelines + +This repository is governed by Mozilla's code of conduct and etiquette guidelines. +For more details, please read the +[Mozilla Community Participation Guidelines](https://www.mozilla.org/about/governance/policies/participation/). + +## How to Report +For more information on how to report violations of the Community Participation Guidelines, please read our '[How to Report](https://www.mozilla.org/about/governance/policies/participation/reporting/)' page. + + diff --git a/third_party/rust/mp4parse_fallible/Cargo.toml b/third_party/rust/mp4parse_fallible/Cargo.toml index 27b0974fb5..c1e1cf4b24 100644 --- a/third_party/rust/mp4parse_fallible/Cargo.toml +++ b/third_party/rust/mp4parse_fallible/Cargo.toml @@ -3,7 +3,7 @@ # When uploading crates to the registry Cargo will automatically # "normalize" Cargo.toml files for maximal compatibility # with all versions of Cargo and also rewrite `path` dependencies -# to registry (e.g. crates.io) dependencies +# to registry (e.g., crates.io) dependencies # # If you believe there's an error in this file please file an # issue against the rust-lang/cargo repository. If you're @@ -12,7 +12,7 @@ [package] name = "mp4parse_fallible" -version = "0.0.1" +version = "0.0.3" authors = ["The Servo Project Developers"] description = "Fallible replacement for Vec" documentation = "https://docs.rs/mp4parse_fallible/" diff --git a/third_party/rust/mp4parse_fallible/LICENSE b/third_party/rust/mp4parse_fallible/LICENSE new file mode 100644 index 0000000000..14e2f777f6 --- /dev/null +++ b/third_party/rust/mp4parse_fallible/LICENSE @@ -0,0 +1,373 @@ +Mozilla Public License Version 2.0 +================================== + +1. Definitions +-------------- + +1.1. "Contributor" + means each individual or legal entity that creates, contributes to + the creation of, or owns Covered Software. + +1.2. "Contributor Version" + means the combination of the Contributions of others (if any) used + by a Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + means Source Code Form to which the initial Contributor has attached + the notice in Exhibit A, the Executable Form of such Source Code + Form, and Modifications of such Source Code Form, in each case + including portions thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + (a) that the initial Contributor has attached the notice described + in Exhibit B to the Covered Software; or + + (b) that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the + terms of a Secondary License. + +1.6. "Executable Form" + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + means a work that combines Covered Software with other material, in + a separate file or files, that is not Covered Software. + +1.8. "License" + means this document. + +1.9. "Licensable" + means having the right to grant, to the maximum extent possible, + whether at the time of the initial grant or subsequently, any and + all of the rights conveyed by this License. + +1.10. "Modifications" + means any of the following: + + (a) any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered + Software; or + + (b) any new file in Source Code Form that contains any Covered + Software. + +1.11. "Patent Claims" of a Contributor + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the + License, by the making, using, selling, offering for sale, having + made, import, or transfer of either its Contributions or its + Contributor Version. + +1.12. "Secondary License" + means either the GNU General Public License, Version 2.0, the GNU + Lesser General Public License, Version 2.1, the GNU Affero General + Public License, Version 3.0, or any later versions of those + licenses. + +1.13. "Source Code Form" + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that + controls, is controlled by, or is under common control with You. For + purposes of this definition, "control" means (a) the power, direct + or indirect, to cause the direction or management of such entity, + whether by contract or otherwise, or (b) ownership of more than + fifty percent (50%) of the outstanding shares or beneficial + ownership of such entity. + +2. License Grants and Conditions +-------------------------------- + +2.1. Grants + +Each Contributor hereby grants You a world-wide, royalty-free, +non-exclusive license: + +(a) under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + +(b) under Patent Claims of such Contributor to make, use, sell, offer + for sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + +The licenses granted in Section 2.1 with respect to any Contribution +become effective for each Contribution on the date the Contributor first +distributes such Contribution. + +2.3. Limitations on Grant Scope + +The licenses granted in this Section 2 are the only rights granted under +this License. No additional rights or licenses will be implied from the +distribution or licensing of Covered Software under this License. +Notwithstanding Section 2.1(b) above, no patent license is granted by a +Contributor: + +(a) for any code that a Contributor has removed from Covered Software; + or + +(b) for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + +(c) under Patent Claims infringed by Covered Software in the absence of + its Contributions. + +This License does not grant any rights in the trademarks, service marks, +or logos of any Contributor (except as may be necessary to comply with +the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + +No Contributor makes additional grants as a result of Your choice to +distribute the Covered Software under a subsequent version of this +License (see Section 10.2) or under the terms of a Secondary License (if +permitted under the terms of Section 3.3). + +2.5. Representation + +Each Contributor represents that the Contributor believes its +Contributions are its original creation(s) or it has sufficient rights +to grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + +This License is not intended to limit any rights You have under +applicable copyright doctrines of fair use, fair dealing, or other +equivalents. + +2.7. Conditions + +Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted +in Section 2.1. + +3. Responsibilities +------------------- + +3.1. Distribution of Source Form + +All distribution of Covered Software in Source Code Form, including any +Modifications that You create or to which You contribute, must be under +the terms of this License. You must inform recipients that the Source +Code Form of the Covered Software is governed by the terms of this +License, and how they can obtain a copy of this License. You may not +attempt to alter or restrict the recipients' rights in the Source Code +Form. + +3.2. Distribution of Executable Form + +If You distribute Covered Software in Executable Form then: + +(a) such Covered Software must also be made available in Source Code + Form, as described in Section 3.1, and You must inform recipients of + the Executable Form how they can obtain a copy of such Source Code + Form by reasonable means in a timely manner, at a charge no more + than the cost of distribution to the recipient; and + +(b) You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter + the recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + +You may create and distribute a Larger Work under terms of Your choice, +provided that You also comply with the requirements of this License for +the Covered Software. If the Larger Work is a combination of Covered +Software with a work governed by one or more Secondary Licenses, and the +Covered Software is not Incompatible With Secondary Licenses, this +License permits You to additionally distribute such Covered Software +under the terms of such Secondary License(s), so that the recipient of +the Larger Work may, at their option, further distribute the Covered +Software under the terms of either this License or such Secondary +License(s). + +3.4. Notices + +You may not remove or alter the substance of any license notices +(including copyright notices, patent notices, disclaimers of warranty, +or limitations of liability) contained within the Source Code Form of +the Covered Software, except that You may alter any license notices to +the extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + +You may choose to offer, and to charge a fee for, warranty, support, +indemnity or liability obligations to one or more recipients of Covered +Software. However, You may do so only on Your own behalf, and not on +behalf of any Contributor. You must make it absolutely clear that any +such warranty, support, indemnity, or liability obligation is offered by +You alone, and You hereby agree to indemnify every Contributor for any +liability incurred by such Contributor as a result of warranty, support, +indemnity or liability terms You offer. You may include additional +disclaimers of warranty and limitations of liability specific to any +jurisdiction. + +4. Inability to Comply Due to Statute or Regulation +--------------------------------------------------- + +If it is impossible for You to comply with any of the terms of this +License with respect to some or all of the Covered Software due to +statute, judicial order, or regulation then You must: (a) comply with +the terms of this License to the maximum extent possible; and (b) +describe the limitations and the code they affect. Such description must +be placed in a text file included with all distributions of the Covered +Software under this License. Except to the extent prohibited by statute +or regulation, such description must be sufficiently detailed for a +recipient of ordinary skill to be able to understand it. + +5. Termination +-------------- + +5.1. The rights granted under this License will terminate automatically +if You fail to comply with any of its terms. However, if You become +compliant, then the rights granted under this License from a particular +Contributor are reinstated (a) provisionally, unless and until such +Contributor explicitly and finally terminates Your grants, and (b) on an +ongoing basis, if such Contributor fails to notify You of the +non-compliance by some reasonable means prior to 60 days after You have +come back into compliance. Moreover, Your grants from a particular +Contributor are reinstated on an ongoing basis if such Contributor +notifies You of the non-compliance by some reasonable means, this is the +first time You have received notice of non-compliance with this License +from such Contributor, and You become compliant prior to 30 days after +Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent +infringement claim (excluding declaratory judgment actions, +counter-claims, and cross-claims) alleging that a Contributor Version +directly or indirectly infringes any patent, then the rights granted to +You by any and all Contributors for the Covered Software under Section +2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all +end user license agreements (excluding distributors and resellers) which +have been validly granted by You or Your distributors under this License +prior to termination shall survive termination. + +************************************************************************ +* * +* 6. Disclaimer of Warranty * +* ------------------------- * +* * +* Covered Software is provided under this License on an "as is" * +* basis, without warranty of any kind, either expressed, implied, or * +* statutory, including, without limitation, warranties that the * +* Covered Software is free of defects, merchantable, fit for a * +* particular purpose or non-infringing. The entire risk as to the * +* quality and performance of the Covered Software is with You. * +* Should any Covered Software prove defective in any respect, You * +* (not any Contributor) assume the cost of any necessary servicing, * +* repair, or correction. This disclaimer of warranty constitutes an * +* essential part of this License. No use of any Covered Software is * +* authorized under this License except under this disclaimer. * +* * +************************************************************************ + +************************************************************************ +* * +* 7. Limitation of Liability * +* -------------------------- * +* * +* Under no circumstances and under no legal theory, whether tort * +* (including negligence), contract, or otherwise, shall any * +* Contributor, or anyone who distributes Covered Software as * +* permitted above, be liable to You for any direct, indirect, * +* special, incidental, or consequential damages of any character * +* including, without limitation, damages for lost profits, loss of * +* goodwill, work stoppage, computer failure or malfunction, or any * +* and all other commercial damages or losses, even if such party * +* shall have been informed of the possibility of such damages. This * +* limitation of liability shall not apply to liability for death or * +* personal injury resulting from such party's negligence to the * +* extent applicable law prohibits such limitation. Some * +* jurisdictions do not allow the exclusion or limitation of * +* incidental or consequential damages, so this exclusion and * +* limitation may not apply to You. * +* * +************************************************************************ + +8. Litigation +------------- + +Any litigation relating to this License may be brought only in the +courts of a jurisdiction where the defendant maintains its principal +place of business and such litigation shall be governed by laws of that +jurisdiction, without reference to its conflict-of-law provisions. +Nothing in this Section shall prevent a party's ability to bring +cross-claims or counter-claims. + +9. Miscellaneous +---------------- + +This License represents the complete agreement concerning the subject +matter hereof. If any provision of this License is held to be +unenforceable, such provision shall be reformed only to the extent +necessary to make it enforceable. Any law or regulation which provides +that the language of a contract shall be construed against the drafter +shall not be used to construe this License against a Contributor. + +10. Versions of the License +--------------------------- + +10.1. New Versions + +Mozilla Foundation is the license steward. Except as provided in Section +10.3, no one other than the license steward has the right to modify or +publish new versions of this License. Each version will be given a +distinguishing version number. + +10.2. Effect of New Versions + +You may distribute the Covered Software under the terms of the version +of the License under which You originally received the Covered Software, +or under the terms of any subsequent version published by the license +steward. + +10.3. Modified Versions + +If you create software not governed by this License, and you want to +create a new license for such software, you may create and use a +modified version of this License if you rename the license and remove +any references to the name of the license steward (except to note that +such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary +Licenses + +If You choose to distribute Source Code Form that is Incompatible With +Secondary Licenses under the terms of this version of the License, the +notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice +------------------------------------------- + + This Source Code Form is subject to the terms of the Mozilla Public + License, v. 2.0. If a copy of the MPL was not distributed with this + file, You can obtain one at http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular +file, then You may include the notice in a location (such as a LICENSE +file in a relevant directory) where a recipient would be likely to look +for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice +--------------------------------------------------------- + + This Source Code Form is "Incompatible With Secondary Licenses", as + defined by the Mozilla Public License, v. 2.0. diff --git a/third_party/rust/mp4parse_fallible/README b/third_party/rust/mp4parse_fallible/README index 35bcb75cb1..fa27b5daeb 100644 --- a/third_party/rust/mp4parse_fallible/README +++ b/third_party/rust/mp4parse_fallible/README @@ -1,5 +1,5 @@ This is from https://github.com/servo/servo/tree/master/components/fallible -with modificaion for mp4 demuxer. +with modification for mp4 demuxer. The purpose of this crate is to solve infallible memory allocation problem which causes OOM easily on win32. This is more like a temporary solution. diff --git a/third_party/rust/mp4parse_fallible/lib.rs b/third_party/rust/mp4parse_fallible/lib.rs index a3ab075c8a..d82267882c 100644 --- a/third_party/rust/mp4parse_fallible/lib.rs +++ b/third_party/rust/mp4parse_fallible/lib.rs @@ -15,9 +15,17 @@ pub trait FallibleVec { /// Err(()) if it fails, which can only be due to lack of memory. fn try_push(&mut self, value: T) -> Result<(), ()>; - /// Expand the vector size. Return Ok(()) on success, Err(()) if it - /// fails. - fn try_reserve(&mut self, new_cap: usize) -> Result<(), ()>; + /// Reserves capacity for at least `additional` more elements to + /// be inserted in the vector. Does nothing if capacity is already + /// sufficient. Return Ok(()) on success, Err(()) if it fails either + /// due to lack of memory, or overflowing the `usize` used to store + /// the capacity. + fn try_reserve(&mut self, additional: usize) -> Result<(), ()>; + + /// Clones and appends all elements in a slice to the Vec. + /// Returns Ok(()) on success, Err(()) if it fails, which can + /// only be due to lack of memory. + fn try_extend_from_slice(&mut self, other: &[T]) -> Result<(), ()> where T: Clone; } ///////////////////////////////////////////////////////////////// @@ -39,10 +47,21 @@ impl FallibleVec for Vec { } #[inline] - fn try_reserve(&mut self, cap: usize) -> Result<(), ()> { - let new_cap = cap + self.capacity(); - try_extend_vec(self, new_cap)?; - debug_assert!(self.capacity() == new_cap); + fn try_reserve(&mut self, additional: usize) -> Result<(), ()> { + let available = self.capacity().checked_sub(self.len()).expect("capacity >= len"); + if additional > available { + let increase = additional.checked_sub(available).expect("additional > available"); + let new_cap = self.capacity().checked_add(increase).ok_or(())?; + try_extend_vec(self, new_cap)?; + debug_assert!(self.capacity() == new_cap); + } + Ok(()) + } + + #[inline] + fn try_extend_from_slice(&mut self, other: &[T]) -> Result<(), ()> where T: Clone { + FallibleVec::try_reserve(self, other.len())?; + self.extend_from_slice(other); Ok(()) } } @@ -83,10 +102,46 @@ fn try_extend_vec(vec: &mut Vec, new_cap: usize) -> Result<(), ()> { } #[test] -fn oom_test() { +fn oom() { let mut vec: Vec = Vec::new(); - match vec.try_reserve(std::usize::MAX) { + match FallibleVec::try_reserve(&mut vec, std::usize::MAX) { Ok(_) => panic!("it should be OOM"), _ => (), } } + +#[test] +fn try_reserve() { + let mut vec = vec![1]; + let old_cap = vec.capacity(); + let new_cap = old_cap + 1; + FallibleVec::try_reserve(&mut vec, new_cap).unwrap(); + assert!(vec.capacity() >= new_cap); +} + +#[test] +fn try_reserve_idempotent() { + let mut vec = vec![1]; + let old_cap = vec.capacity(); + let new_cap = old_cap + 1; + FallibleVec::try_reserve(&mut vec, new_cap).unwrap(); + let cap_after_reserve = vec.capacity(); + FallibleVec::try_reserve(&mut vec, new_cap).unwrap(); + assert_eq!(cap_after_reserve, vec.capacity()); +} + +#[test] +fn capacity_overflow() { + let mut vec = vec![1]; + match FallibleVec::try_reserve(&mut vec, std::usize::MAX) { + Ok(_) => panic!("capacity calculation should overflow"), + _ => (), + } +} + +#[test] +fn extend_from_slice() { + let mut vec = b"foo".to_vec(); + FallibleVec::try_extend_from_slice(&mut vec, b"bar").unwrap(); + assert_eq!(&vec, b"foobar"); +} diff --git a/third_party/rust/nom/.cargo-checksum.json b/third_party/rust/nom/.cargo-checksum.json index 3493fcb4ab..b236aa3bb5 100644 --- a/third_party/rust/nom/.cargo-checksum.json +++ b/third_party/rust/nom/.cargo-checksum.json @@ -1 +1 @@ -{"files":{"CHANGELOG.md":"cb50e17fee44939d130a69cce1ceedd2db80bb3034578d45e8a22380e6efa2c0","Cargo.toml":"33304918465e1a777631d680fccbc023a58809b951681ff10913d0cfe498f98c","LICENSE":"568d0ae12e18bf9dda98fcd838732321852abdb557c24900d474e71f8fd29f4e","src/bits.rs":"1d6d9a683548caa09d7cec76d47f69ae4bb895fc69b1fad3799c27181713ba20","src/branch.rs":"4d4202f1211bb9b5cfc5970d604389d57a64fdfd222fd228d86f669e5bcc9d3c","src/bytes.rs":"78bf6b07638d387870e588e874f664a473f1e99ded1725b60ccc2dc2d8c2d850","src/character.rs":"9c85a19751287838d950f5571b6e03b4a3c864fd64c077f6a12c1d04822d28be","src/internal.rs":"b29292b7d8edd9bc78317d1b0ba588ce09b3b61326789773fb0388edb260f504","src/lib.rs":"61193663cf53b51d62be70ec64cbf73c3705a9c8caf6b2cf3cca1045e76328e1","src/macros.rs":"27b2c8aabf7f560f379dbacfba4426eb4e7c2c71d2be2ee4081786722e67aeb0","src/methods.rs":"bf9710423ffaccce5c511de992465ac589e4bb3d573b60d64f2cd884933964f1","src/multi.rs":"89057e25becebc6c6869a21261ebbee3c42671d034a369bc630531c575b2f72c","src/nom.rs":"bdd5d435289250a7ef98a69e2f6c023a2fd0797be590b069826876caa04f9530","src/regexp.rs":"b88585dc652dca12174ecaebc1ffe6446c784bfd2f2899c8c0748afcc557b051","src/sequence.rs":"20ad36dfa8f0b8838125bd516b5ef0eeee70861d2ff2051da532e363a559db20","src/simple_errors.rs":"5cd1df94219baa35a82b3b3da0f5f9714a112d7f583e7248813a020b68d67310","src/str.rs":"ad6194cd55e87989bfa73764db6307203db190d4e0d99f14f39ac5960b1f5970","src/traits.rs":"fcd2f72c0cb067afb9e94ea481781b17af0cd9436006c64846396d67aee5e1eb","src/types.rs":"748ab405c75994130c30e1003ec44a754be6b2e35106530c1b75ec1c4ba56479","src/util.rs":"a2083616a17f19f1592d95a3f67fbe3ad9d3e822643bbb9b3010ab3ef721d23d","src/verbose_errors.rs":"02350485ee6dd71712fafdc17e1e28ce830b3ce46e7a9780ab8a16cdd9f9a130","src/whitespace.rs":"783be5b626f4d62a95899a011ccd95d3d95e806a535ab0b08a34f8b1394bf403","tests/arithmetic.rs":"07aea9907e064460edf0e2428aaeb2eeb4d96c63e6652a420327bf583665c079","tests/arithmetic_ast.rs":"8323b7a299c9506d21ceb798e11a0101d10f9941f03786274c9b20b12694beac","tests/blockbuf-arithmetic.rs":"099fdf75da97ae032006d8c82ea2207265c5161a08d1370e1ddddb01e73afaf4","tests/complete_arithmetic.rs":"0066d3e5793dcb1441e84303d31ffd3d3c26976e1662929cdde00b3573c9e01c","tests/complete_float.rs":"72898d6aab275270665296a40fea969ffcdfdb2aa4af6600c7f535a634e3d7eb","tests/css.rs":"2eb6e94dd7e639a78d92f716edee223f4c23f8a39610727af57baaad75f80f7f","tests/custom_errors.rs":"d88ecb97f85b289eb2b7daec2d0b8040419eee587b11f06342a868e16d39ef97","tests/float.rs":"205648feac5af962d542c7b3d2ca3085a11b6e804ec6fe3ec94b7dc8822dd5f8","tests/inference.rs":"55a4ea61fdc524b509c92edae5c3c9a5632e5421d44773a8f654928470d6e34c","tests/ini.rs":"6e808885f7842620b6fdafe36e1f0ef5efb0bdbdb4c494cb207f82564a23804d","tests/ini_str.rs":"c1e38ebb1b3cdf8a79495d7ce858f3f1a105a981120d250ae8ad704244a4b42b","tests/issues.rs":"bf3af0c5af2322f8ef99c2c864294d45a2bae42b5f77cbfec476d55139a8a7ca","tests/json.rs":"84343598f6461226c4aea8316274ab8e0eddcb142efdb23d83a55e2920842a92","tests/mp4.rs":"5108a2fb52bf19684608a4dbc1b3538e5ddbb43a3543308e080091c917bd3c72","tests/multiline.rs":"76c9f0f17af0dcb9f0be0cf918d8731e4286514086008eb9bd57438ccf557280","tests/named_args.rs":"aaa0f30c01006fb599b0ad239b8d97cac3ed73e1e4390f1460dff5a7985050ec","tests/overflow.rs":"b330741e593b9107666b3eb63b8758de715586f42a10650960ed60449a2914e4","tests/reborrow_fold.rs":"362d34561b0278f163d21d0ee451f6e0b938156535f4bb08791df5709532d9e7","tests/test1.rs":"06fc9e52638f16bfc3ef69cd26b927e0cf55706d6f132ab7c0f1072208475853"},"package":"9c349f68f25f596b9f44cf0e7c69752a5c633b0550c3ff849518bfba0233774a"} \ No newline at end of file +{"files":{".travis.yml":"124c5613de02fe2088a7410a7057b0e0ed49dcbc8509704b690de98e83b289e0","CHANGELOG.md":"f4015ae48bb8f7c672c4fdf1ca4bdde5910f1b423ddc12e9fe80395911759d3e","Cargo.lock":"d7985e783bf7275009ab524f13aac37adc7291838a1956e1f092413ff3b8ea04","Cargo.toml":"72afa6607971579c7009dff34b6b334d4893b1a9f1983cd80364c5e318e4ec2b","LICENSE":"4dbda04344456f09a7a588140455413a9ac59b6b26a1ef7cdf9c800c012d87f0","build.rs":"fd66799ca3bd6a83b10f18a62e6ffc3b1ac94074fe65de4e4c1c447bf71d6ebb","src/bits/complete.rs":"8a60ae4cd6aaf32cb232b598b1a3dda7858c619944ba12ebdb01a31c75243293","src/bits/macros.rs":"8d9ba23f237b4fc01e3b2106002d0f6d59930a42f34d80662c61e0057dfb4a5b","src/bits/mod.rs":"4ca0148b4ef2de4e88796da7831eaa5c4fcbc5515a101eae1b0bc4853c80b5e7","src/bits/streaming.rs":"7c587808476edee57caeccca7dccd744bdfdbb13ae1156400fb4961980fa325d","src/branch/macros.rs":"8b885185725c16369d90954fed8450070bcd4bf8ae7de1df1bb46bb378450d71","src/branch/mod.rs":"d0c871ad7b74428ddccef445a10254249c3907528005a00275e8eb6516255f2f","src/bytes/complete.rs":"107f776885161e48a596962925d2b1628f20fd4bbe5b3777bb34ec175b97e280","src/bytes/macros.rs":"914a821761bbf49f04425068b7426bdd60394e8cc30e7c2193160b16e06c266f","src/bytes/mod.rs":"577231e6e6bd51726a877a73292d8f1c626f6b32ebe4a57943acaed8a8a2975d","src/bytes/streaming.rs":"4b2e577e6057fda932d1edc2ebe53c5df71a21417a304b35d5230dd7221411f3","src/character/complete.rs":"bb80656f3405eca79ba93c674cae7cd296733d784928e5c45754ee27963b7325","src/character/macros.rs":"f330ab60d469802b1664dcbbccd4bfe3e1ca87b348e92609ef34e25b4d475983","src/character/mod.rs":"9f520d535a88849726eac8648aa5c8b86193ab2f3a984d63c8371b846cc0b72c","src/character/streaming.rs":"9be7051951e8d0a54e294542baaf115aeb6efb818521f045386cd3a1777ca6a0","src/combinator/macros.rs":"df9ba1157bda21313a9c23826baeefd99f54e92c47d60c8dbb25697fe4d52686","src/combinator/mod.rs":"d1b2073683be1c9c4a06d1d3764ac789027ad7401ec726805d1505c1ad8ab1fd","src/error.rs":"ef7feb06b9689aa2f4b11a367b6f7058df8fd151b673c7482edd6600e55e38da","src/internal.rs":"c4029b0e32d41eb6407e517f3102a41a3a80a6a69df4767008ac5656b28d7ab0","src/lib.rs":"f01cdc23cc17201f978796d2d80fb6bba5a9b81ffb4653286e1e53f904079911","src/methods.rs":"56099c30123e92f9f6bacb16017f29fcdbc6546afbf0f80cf4951d2d8093ba83","src/multi/macros.rs":"01d15ae913921bd1ed0ff579a868ea39a314a866e6b5a365ef6b448a75f9b3a8","src/multi/mod.rs":"342c30e0c601558c867df215f7094bc9e43534a7793f2e8b19c114fe07cfea41","src/number/complete.rs":"560dfb2ffbbfe7fe276389b60eec2d717fec20eab68cc72d10d87ff6e2195352","src/number/macros.rs":"e614ee142658c126902a466f29ef250a77016fa126b8dfd59db0c6657a0ef205","src/number/mod.rs":"e432c317ee839a2f928cd0e8e583acadb165ed84fb649e681688a0fcd84b0a75","src/number/streaming.rs":"4c6fbce64e0d535f69d2c928314e1d0a019480db5a09e4f47a654a2e8fd56e8c","src/regexp.rs":"ac6fc61c2e7b629e6786638b44d63d15139c50f9d6a38acd044e4c2b3a699e08","src/sequence/macros.rs":"0e72871cdb2f1bf804f7f637def117c8e531f78cc7da5a6a0e343f0dbfb04271","src/sequence/mod.rs":"ec34b969462703252c4f00c27a03928c9580da612c71b7100e0902190a633ab9","src/str.rs":"fcae4d6f2f7bc921cafe3d0ce682d0566618cbe5f3b3e4b51ca34d11cb0e3e93","src/traits.rs":"2a84c3aa40f1cf78e0149e344da8b85486f7b6c0ddff8f23689bccce0def2260","src/util.rs":"bcedca3c88ac24f11f73e836efd8fe00014562163cc3d43d0cec9d726a4687c3","src/whitespace.rs":"53bddd9d559dc7793effcb828f9c196a7f098338edb0877940d1246834761308","tests/arithmetic.rs":"c57bc547110e498e7bddc973a94202f22356bc525fed55dac4f03daf16eb54f7","tests/arithmetic_ast.rs":"8fbc4c5d8850fa1cf0a16f97e8718d5427b2657f12ca4a0b3b6c1b47fd9e67d4","tests/blockbuf-arithmetic.rs":"099fdf75da97ae032006d8c82ea2207265c5161a08d1370e1ddddb01e73afaf4","tests/css.rs":"b13466eb6a0831f98ede83ffdd752ba821f7d03db724fd92b5bfbc0b9f804a65","tests/custom_errors.rs":"3d2511f8a8d0eb20d9efc19f29ae4ab34389bdd33214a421989d0c47b540b7fd","tests/escaped.rs":"03ecb10828472e4de2ace05a12cb49997b47a113b6a3b0eea3d56bc2bafd8446","tests/float.rs":"92947cc112a6b865f5e19d80edbf300ddc0d0ca4b4e4543eda15337b5c60eedf","tests/inference.rs":"fe476d1367fce9f0baf82295dc037c79321ededf12b9bcc0c5acdc7cefff4720","tests/ini.rs":"04ebf3ead0008974b3bedc387e889bab5942efd0c9c8563fe47e056a9b90bbab","tests/ini_str.rs":"2831a4ee26b37734dba8862cc2972a3a1433abf4fcab253b6cf4fb43f120301d","tests/issues.rs":"142c8d206089b04cf2fd0cbd90f87421ded435ce300516c029294b056354e00f","tests/json.rs":"25476ec2daca19295f5f99b621eecc859a5db5789ac35be381eaf8703a70bce8","tests/mp4.rs":"d0e61bfc93ff40676ca7e9d7813a5ad7c73b1db874599d8f3ea784115bfcab87","tests/multiline.rs":"6a5321cb53c7f88778fa100499533abfa602bada7a6b1d0fbba7ef77b9c110f5","tests/named_args.rs":"bd8095c3abc6fb806c9181c6025c0111d1e7f3b7269ea89ae122bf3bb8ed7e7d","tests/overflow.rs":"d1d6d8ce9b34ed47b42a5f7250ce711805a397691dc6cad3cc8945ec230da161","tests/reborrow_fold.rs":"9328deafc2143c2a2d1a0be86e2448b644cffcb5f0935c8b24eb469f1f9477c0","tests/test1.rs":"06fc9e52638f16bfc3ef69cd26b927e0cf55706d6f132ab7c0f1072208475853"},"package":"0b471253da97532da4b61552249c521e01e736071f71c1a4f7ebbfbf0a06aad6"} \ No newline at end of file diff --git a/third_party/rust/nom/.travis.yml b/third_party/rust/nom/.travis.yml new file mode 100644 index 0000000000..e18b14079d --- /dev/null +++ b/third_party/rust/nom/.travis.yml @@ -0,0 +1,101 @@ +language: rust +# sudo is required to enable kcov to use the personality syscall +sudo: required +dist: trusty +cache: cargo + +rust: + - nightly + - beta + - stable + - 1.31.0 + +env: + matrix: + - FEATURES='--features "regexp regexp_macros"' + +before_script: + - eval git pull --rebase https://github.com/Geal/nom master + - eval git log --pretty=oneline HEAD~5..HEAD + +matrix: + include: + - rust: nightly + env: FEATURES='--no-default-features' + - rust: nightly + env: FEATURES='--no-default-features --features "alloc"' + - rust: stable + env: FEATURES='' + - rust: nightly + env: DOC_FEATURES='--features "std lexical regexp regexp_macros" --no-default-features' + before_script: + - export PATH=$HOME/.cargo/bin:$PATH + script: + - eval cargo doc --verbose $DOC_FEATURES + - rust: nightly + env: FEATURES='' + before_script: + - export PATH=$HOME/.cargo/bin:$PATH + - cargo install cargo-update || echo "cargo-update already installed" + - cargo install cargo-travis || echo "cargo-travis already installed" + - cargo install-update -a + - mkdir -p target/kcov-master + script: + cargo coveralls --verbose --all-features + allow_failures: + - rust: stable + env: FEATURES='' + before_script: + - export PATH=$HOME/.cargo/bin:$PATH + - rustup component add rustfmt-preview + script: + - eval cargo fmt -- --write-mode=diff + +notifications: + webhooks: + urls: + - https://webhooks.gitter.im/e/9c035a194ac4fd4cc061 + on_success: change + on_failure: always + on_start: false + + +addons: + apt: + packages: + - libcurl4-openssl-dev + - libelf-dev + - libdw-dev + - binutils-dev + - cmake + sources: + - kalakris-cmake + +cache: + directories: + - /home/travis/.cargo + +before_cache: + - rm -rf /home/travis/.cargo/registry + +script: + - eval cargo build --verbose $FEATURES + - eval cargo test --verbose $FEATURES + +after_success: | + case "$TRAVIS_RUST_VERSION" in + nightly) + if [ "${TRAVIS_PULL_REQUEST_BRANCH:-$TRAVIS_BRANCH}" != "master" ]; then + git fetch && + git checkout master && + cargo bench --verbose + fi + + if [ "$FEATURES" == '--features "regexp regexp_macros"' ]; then + cargo bench --verbose + fi + ;; + + *) + ;; + esac diff --git a/third_party/rust/nom/CHANGELOG.md b/third_party/rust/nom/CHANGELOG.md index 0c6cb6fb34..33d2a782d0 100644 --- a/third_party/rust/nom/CHANGELOG.md +++ b/third_party/rust/nom/CHANGELOG.md @@ -8,6 +8,176 @@ ### Fixed +## 5.1.1 - 2020-02-24 + +### Thanks + +- @Alexhuszagh for float fixes +- @AlexanderEkdahl, @JoshOrndorff, @akitsu-sanae for docs fixes +- @ignatenkobrain: dependency update +- @derekdreery: `map` implementation for errors +- @Lucretiel for docs fixes and compilation fixes +- adytzu2007: warning fixes +- @lo48576: error management fixes + +### Fixed + +- C symbols compilation errors due to old lexical-core version + +### Added + +- `Err` now has a `map` function + +### Changed + +- Make `error::context()` available without `alloc` feature + +## 5.1.0 - 2020-01-07 + +### Thanks + +- @Hywan, @nickmooney, @jplatte, @ngortheone, @ejmg, @SirWindfield, @demurgos, @spazm, @nyarly, @guedou, @adamnemecek, for docs fixes +- @Alxandr for error management bugfixes +- @Lucretiel for example fixes and optimizations +- @adytzu2007 for optimizations +- @audunhalland for utf8 fixes + +### Fixed + +- panic in `convert_error` +- `compile_error` macro usage + +### Added + +- `std::error::Error`, `std::fmt::Display`, `Eq`, `ToOwned` implementations for errors +- inline attribute for `ToUsize` + +### Changed + +- `convert_error` optimization +- `alt` optimization + +## 5.0.1 - 2020-08-22 + +### Thanks + +- @waywardmonkeys, @phaazon, @dalance for docs fixes +- @kali for `many0_m_n` fixes +- @ia0 for macros fixes + +### Fixed + +- `many0_m_n` now supports the n=1 case +- relaxed trait requirements in `cut` +- `peek!` macro reimplementation +- type inference in `value!` + +## 5.0.0 - 2019-06-24 + +This version comes with a complete rewrite of nom internals to use functions as a base +for parsers, instead of macros. Macros have been updated to use functions under +the hood, so that most existing parsers will work directly or require minimal changes. + +The `CompleteByteSlice` and `CompleteStr` input types were removed. To get different +behaviour related to streaming or complete input, there are different versions of some +parsers in different submodules, like `nom::character::streaming::alpha0` and +`nom::character::complete::alpha0`. + +The `verbose-errors` feature is gone, now the error type is decided through a generic +bound. To get equivalent behaviour to `verbose-errors`, check out `nom::error::VerboseError` + +### Thanks + +- @lowenheim helped in refactoring and error management +- @Keruspe helped in refactoring and fixing tests +- @pingiun, @Songbird0, @jeremystucki, @BeatButton, @NamsooCho, @Waelwindows, @rbtcollins, @MarkMcCaskey for a lot of help in rewriting the documentation and adding code examples +- @GuillaumeGomez for documentation rewriting and checking +- @iosmanthus for bug fixes +- @lo48576 for error management fixes +- @vaffeine for macros visibility fixes +- @webholik and @Havvy for `escaped` and `escaped_transform` fixes +- @proman21 for help on porting bits parsers + +### Added + +- the `VerboseError` type accumulates position info and error codes, and can generate a trace with span information +- the `lexical-core` crate is now used by default (through the `lexical` compilation feature) to parse floats from text +- documentation and code examples for all functions and macros + +### Changed + +- nom now uses functions instead of macros to generate parsers +- macros now use the functions under the hood +- the minimal Rust version is now 1.31 +- the verify combinator's condition function now takes its argument by reference +- `cond` will now return the error of the parser instead of None +- `alpha*`, `digit*`, `hex_digit*`, `alphanumeric*` now recognize only ASCII characters + +### Removed + +- deprecated string parsers (with the `_s` suffix), the normal version can be used instead +- `verbose-errors` is not needed anymore, now the error type can be decided when writing the parsers, and parsers provided by nom are generic over the error type +- `AtEof`, `CompleteByteSlice` and `CompleteStr` are gone, instead some parsers are specialized to work on streaming or complete input, and provided in different modules +- character parsers that were aliases to their `*1` version: eol, alpha, digit, hex_digit, oct_digit, alphanumeric, space, multispace +- `count_fixed` macro +- `whitespace::sp` can be replaced by `character::complete::multispace0` +- method combinators are now in the nom-methods crate +- `take_until_either`, `take_until_either1`, `take_until_either_and_consume` and `take_until_either_and_consume1`: they can be replaced with `is_not` (possibly combined with something else) +- `take_until_and_consume`, `take_until_and_consume1`: they can be replaced with `take_until` combined with `take` +- `sized_buffer` and `length_bytes!`: they can be replaced with the `length_data` function +- `non_empty`, `begin` and `rest_s` function +- `cond_reduce!`, `cond_with_error!`, `closure!`, `apply`, `map_res_err!`, `expr_opt!`, `expr_res!` +- `alt_complete`, `separated_list_complete`, `separated_nonempty_list_complete` + +## 4.2.3 - 2019-03-23 + +### Fixed + +- add missing `build.rs` file to the package +- fix code comparison links in changelog + +## 4.2.2 - 2019-03-04 + +### Fixed + +- regression in do_parse macro import for edition 2018 + +## 4.2.1 - 2019-02-27 + +### Fixed + +- macro expansion error in `do_parse` due to `compile_error` macro usage + +## 4.2.0 - 2019-01-29 + +### Thanks + +- @JoshMcguigan for unit test fixes +- @oza for documentation fixes +- @wackywendell for better error conversion +- @Zebradil for documentation fixes +- @tsraom for new combinators +- @hcpl for minimum Rust version tests +- @KellerFuchs for removing some unsafe uses in float parsing + +### Changed + +- macro import in edition 2018 code should work without importing internal macros now +- the regex parsers do not require the calling code to have imported the regex crate anymore +- error conversions are more ergonomic +- method combinators are now deprecated. They might be moved to a separate crate +- nom now specifies Rust 1.24.1 as minimum version. This was already the case before, now it is made explicit + +### Added + +- `many0_count` and `many1_count` to count applications of a parser instead of +accumulating its results in a `Vec` + +### Fixed + +- overflow in the byte wrapper for bit level parsers +- `f64` parsing does not use `transmute` anymore + ## 4.1.1 - 2018-10-14 ### Fixed @@ -20,7 +190,7 @@ - @xfix for fixing warnings, simplifying examples and performance fixes - @dvberkel for documentation fixes -- @chifflier for fixinf warnings +- @chifflier for fixing warnings - @myrrlyn for dead code elimination - @petrochenkov for removing redundant test macros - @tbelaire for documentation fixes @@ -96,8 +266,8 @@ - @passy for typo fixes - @ayrat555 for typo fixes - @GuillaumeGomez for documentation fixes -- @jrakow for documentation fixes and fiwes for `switch!` -- @phlosioneer for dicumentation fixes +- @jrakow for documentation fixes and fixes for `switch!` +- @phlosioneer for documentation fixes - @creativcoder for typo fixes - @derekdreery for typo fixes - @lucasem for implementing `Deref` on `CompleteStr` and `CompleteByteSlice` @@ -946,47 +1116,55 @@ Considering the number of changes since the last release, this version can conta ## Compare code -* [unreleased]: https://github.com/Geal/nom/compare/4.1.1...HEAD -* [4.1.1]: https://github.com/Geal/nom/compare/4.1.0...4.1.1 -* [4.1.0]: https://github.com/Geal/nom/compare/4.0.0...4.1.0 -* [4.0.0]: https://github.com/Geal/nom/compare/3.2.1...4.0.0 -* [3.2.1]: https://github.com/Geal/nom/compare/3.2.0...3.2.1 -* [3.2.0]: https://github.com/Geal/nom/compare/3.1.0...3.2.0 -* [3.1.0]: https://github.com/Geal/nom/compare/3.0.0...3.1.0 -* [3.0.0]: https://github.com/Geal/nom/compare/2.2.1...3.0.0 -* [2.2.1]: https://github.com/Geal/nom/compare/2.2.0...2.2.1 -* [2.2.0]: https://github.com/Geal/nom/compare/2.1.0...2.2.0 -* [2.1.0]: https://github.com/Geal/nom/compare/2.0.1...2.1.0 -* [2.0.1]: https://github.com/Geal/nom/compare/2.0.0...2.0.1 -* [2.0.0]: https://github.com/Geal/nom/compare/1.2.4...2.0.0 -* [1.2.4]: https://github.com/Geal/nom/compare/1.2.3...1.2.4 -* [1.2.3]: https://github.com/Geal/nom/compare/1.2.2...1.2.3 -* [1.2.2]: https://github.com/Geal/nom/compare/1.2.1...1.2.2 -* [1.2.1]: https://github.com/Geal/nom/compare/1.2.0...1.2.1 -* [1.2.0]: https://github.com/Geal/nom/compare/1.1.0...1.2.0 -* [1.1.0]: https://github.com/Geal/nom/compare/1.0.1...1.1.0 -* [1.0.1]: https://github.com/Geal/nom/compare/1.0.0...1.0.1 -* [1.0.0]: https://github.com/Geal/nom/compare/0.5.0...1.0.0 -* [0.5.0]: https://github.com/geal/nom/compare/0.4.0...0.5.0 -* [0.4.0]: https://github.com/geal/nom/compare/0.3.11...0.4.0 -* [0.3.11]: https://github.com/geal/nom/compare/0.3.10...0.3.11 -* [0.3.10]: https://github.com/geal/nom/compare/0.3.9...0.3.10 -* [0.3.9]: https://github.com/geal/nom/compare/0.3.8...0.3.9 -* [0.3.8]: https://github.com/Geal/nom/compare/0.3.7...0.3.8 -* [0.3.7]: https://github.com/Geal/nom/compare/0.3.6...0.3.7 -* [0.3.6]: https://github.com/Geal/nom/compare/0.3.5...0.3.6 -* [0.3.5]: https://github.com/Geal/nom/compare/0.3.4...0.3.5 -* [0.3.4]: https://github.com/Geal/nom/compare/0.3.3...0.3.4 -* [0.3.3]: https://github.com/Geal/nom/compare/0.3.2...0.3.3 -* [0.3.2]: https://github.com/Geal/nom/compare/0.3.1...0.3.2 -* [0.3.1]: https://github.com/Geal/nom/compare/0.3.0...0.3.1 -* [0.3.0]: https://github.com/Geal/nom/compare/0.2.2...0.3.0 -* [0.2.2]: https://github.com/Geal/nom/compare/0.2.1...0.2.2 -* [0.2.1]: https://github.com/Geal/nom/compare/0.2.0...0.2.1 -* [0.2.0]: https://github.com/Geal/nom/compare/0.1.6...0.2.0 -* [0.1.6]: https://github.com/Geal/nom/compare/0.1.5...0.1.6 -* [0.1.5]: https://github.com/Geal/nom/compare/0.1.4...0.1.5 -* [0.1.4]: https://github.com/Geal/nom/compare/0.1.3...0.1.4 -* [0.1.3]: https://github.com/Geal/nom/compare/0.1.2...0.1.3 -* [0.1.2]: https://github.com/Geal/nom/compare/0.1.1...0.1.2 -* [0.1.1]: https://github.com/Geal/nom/compare/0.1.0...0.1.1 +* [unreleased](https://github.com/Geal/nom/compare/5.1.1...HEAD) +* [5.1.1](https://github.com/Geal/nom/compare/5.1.0...5.1.1) +* [5.1.0](https://github.com/Geal/nom/compare/5.0.1...5.1.0) +* [5.0.1](https://github.com/Geal/nom/compare/5.0.0...5.0.1) +* [5.0.0](https://github.com/Geal/nom/compare/4.2.3...5.0.0) +* [4.2.3](https://github.com/Geal/nom/compare/4.2.2...4.2.3) +* [4.2.2](https://github.com/Geal/nom/compare/4.2.1...4.2.2) +* [4.2.1](https://github.com/Geal/nom/compare/4.2.0...4.2.1) +* [4.2.0](https://github.com/Geal/nom/compare/4.1.1...4.2.0) +* [4.1.1](https://github.com/Geal/nom/compare/4.1.0...4.1.1) +* [4.1.0](https://github.com/Geal/nom/compare/4.0.0...4.1.0) +* [4.0.0](https://github.com/Geal/nom/compare/3.2.1...4.0.0) +* [3.2.1](https://github.com/Geal/nom/compare/3.2.0...3.2.1) +* [3.2.0](https://github.com/Geal/nom/compare/3.1.0...3.2.0) +* [3.1.0](https://github.com/Geal/nom/compare/3.0.0...3.1.0) +* [3.0.0](https://github.com/Geal/nom/compare/2.2.1...3.0.0) +* [2.2.1](https://github.com/Geal/nom/compare/2.2.0...2.2.1) +* [2.2.0](https://github.com/Geal/nom/compare/2.1.0...2.2.0) +* [2.1.0](https://github.com/Geal/nom/compare/2.0.1...2.1.0) +* [2.0.1](https://github.com/Geal/nom/compare/2.0.0...2.0.1) +* [2.0.0](https://github.com/Geal/nom/compare/1.2.4...2.0.0) +* [1.2.4](https://github.com/Geal/nom/compare/1.2.3...1.2.4) +* [1.2.3](https://github.com/Geal/nom/compare/1.2.2...1.2.3) +* [1.2.2](https://github.com/Geal/nom/compare/1.2.1...1.2.2) +* [1.2.1](https://github.com/Geal/nom/compare/1.2.0...1.2.1) +* [1.2.0](https://github.com/Geal/nom/compare/1.1.0...1.2.0) +* [1.1.0](https://github.com/Geal/nom/compare/1.0.1...1.1.0) +* [1.0.1](https://github.com/Geal/nom/compare/1.0.0...1.0.1) +* [1.0.0](https://github.com/Geal/nom/compare/0.5.0...1.0.0) +* [0.5.0](https://github.com/geal/nom/compare/0.4.0...0.5.0) +* [0.4.0](https://github.com/geal/nom/compare/0.3.11...0.4.0) +* [0.3.11](https://github.com/geal/nom/compare/0.3.10...0.3.11) +* [0.3.10](https://github.com/geal/nom/compare/0.3.9...0.3.10) +* [0.3.9](https://github.com/geal/nom/compare/0.3.8...0.3.9) +* [0.3.8](https://github.com/Geal/nom/compare/0.3.7...0.3.8) +* [0.3.7](https://github.com/Geal/nom/compare/0.3.6...0.3.7) +* [0.3.6](https://github.com/Geal/nom/compare/0.3.5...0.3.6) +* [0.3.5](https://github.com/Geal/nom/compare/0.3.4...0.3.5) +* [0.3.4](https://github.com/Geal/nom/compare/0.3.3...0.3.4) +* [0.3.3](https://github.com/Geal/nom/compare/0.3.2...0.3.3) +* [0.3.2](https://github.com/Geal/nom/compare/0.3.1...0.3.2) +* [0.3.1](https://github.com/Geal/nom/compare/0.3.0...0.3.1) +* [0.3.0](https://github.com/Geal/nom/compare/0.2.2...0.3.0) +* [0.2.2](https://github.com/Geal/nom/compare/0.2.1...0.2.2) +* [0.2.1](https://github.com/Geal/nom/compare/0.2.0...0.2.1) +* [0.2.0](https://github.com/Geal/nom/compare/0.1.6...0.2.0) +* [0.1.6](https://github.com/Geal/nom/compare/0.1.5...0.1.6) +* [0.1.5](https://github.com/Geal/nom/compare/0.1.4...0.1.5) +* [0.1.4](https://github.com/Geal/nom/compare/0.1.3...0.1.4) +* [0.1.3](https://github.com/Geal/nom/compare/0.1.2...0.1.3) +* [0.1.2](https://github.com/Geal/nom/compare/0.1.1...0.1.2) +* [0.1.1](https://github.com/Geal/nom/compare/0.1.0...0.1.1) diff --git a/third_party/rust/nom/Cargo.lock b/third_party/rust/nom/Cargo.lock new file mode 100644 index 0000000000..0f59f087d7 --- /dev/null +++ b/third_party/rust/nom/Cargo.lock @@ -0,0 +1,639 @@ +# This file is automatically @generated by Cargo. +# It is not intended for manual editing. +[[package]] +name = "aho-corasick" +version = "0.7.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "memchr 2.2.1 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "arrayvec" +version = "0.4.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "nodrop 0.1.13 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "atty" +version = "0.2.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "libc 0.2.62 (registry+https://github.com/rust-lang/crates.io-index)", + "winapi 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "autocfg" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "bitflags" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "bstr" +version = "0.2.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "lazy_static 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)", + "memchr 2.2.1 (registry+https://github.com/rust-lang/crates.io-index)", + "regex-automata 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)", + "serde 1.0.99 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "byteorder" +version = "1.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "cast" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "cc" +version = "1.0.40" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "cfg-if" +version = "0.1.9" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "clap" +version = "2.33.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "bitflags 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)", + "textwrap 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)", + "unicode-width 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "cloudabi" +version = "0.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "bitflags 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "criterion" +version = "0.2.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "atty 0.2.13 (registry+https://github.com/rust-lang/crates.io-index)", + "cast 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", + "clap 2.33.0 (registry+https://github.com/rust-lang/crates.io-index)", + "criterion-plot 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", + "csv 1.1.1 (registry+https://github.com/rust-lang/crates.io-index)", + "itertools 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)", + "lazy_static 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.62 (registry+https://github.com/rust-lang/crates.io-index)", + "num-traits 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", + "rand_core 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", + "rand_os 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", + "rand_xoshiro 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "rayon 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "rayon-core 1.5.0 (registry+https://github.com/rust-lang/crates.io-index)", + "serde 1.0.99 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_derive 1.0.99 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_json 1.0.40 (registry+https://github.com/rust-lang/crates.io-index)", + "tinytemplate 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", + "walkdir 2.2.9 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "criterion-plot" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "byteorder 1.3.2 (registry+https://github.com/rust-lang/crates.io-index)", + "cast 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", + "itertools 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "crossbeam-deque" +version = "0.6.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "crossbeam-epoch 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)", + "crossbeam-utils 0.6.6 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "crossbeam-epoch" +version = "0.7.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "arrayvec 0.4.11 (registry+https://github.com/rust-lang/crates.io-index)", + "cfg-if 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)", + "crossbeam-utils 0.6.6 (registry+https://github.com/rust-lang/crates.io-index)", + "lazy_static 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)", + "memoffset 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)", + "scopeguard 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "crossbeam-queue" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "crossbeam-utils 0.6.6 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "crossbeam-utils" +version = "0.6.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "cfg-if 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)", + "lazy_static 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "csv" +version = "1.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "bstr 0.2.7 (registry+https://github.com/rust-lang/crates.io-index)", + "csv-core 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)", + "itoa 0.4.4 (registry+https://github.com/rust-lang/crates.io-index)", + "ryu 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "serde 1.0.99 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "csv-core" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "memchr 2.2.1 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "doc-comment" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "either" +version = "1.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "fs_extra" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "fuchsia-cprng" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "itertools" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "either 1.5.2 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "itoa" +version = "0.4.4" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "jemalloc-sys" +version = "0.1.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "cc 1.0.40 (registry+https://github.com/rust-lang/crates.io-index)", + "fs_extra 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.62 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "jemallocator" +version = "0.1.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "jemalloc-sys 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.62 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "lazy_static" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "lexical-core" +version = "0.6.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "arrayvec 0.4.11 (registry+https://github.com/rust-lang/crates.io-index)", + "bitflags 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)", + "cfg-if 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)", + "rustc_version 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)", + "ryu 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "static_assertions 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "libc" +version = "0.2.62" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "memchr" +version = "2.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "libc 0.2.62 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "memoffset" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "rustc_version 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "nodrop" +version = "0.1.13" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "nom" +version = "5.1.1" +dependencies = [ + "criterion 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)", + "doc-comment 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", + "jemallocator 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)", + "lazy_static 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)", + "lexical-core 0.6.7 (registry+https://github.com/rust-lang/crates.io-index)", + "memchr 2.2.1 (registry+https://github.com/rust-lang/crates.io-index)", + "regex 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)", + "version_check 0.9.1 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "num-traits" +version = "0.2.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "autocfg 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "num_cpus" +version = "1.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "libc 0.2.62 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "proc-macro2" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "unicode-xid 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "quote" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "proc-macro2 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "rand_core" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "rand_core 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "rand_core" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "rand_os" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "cloudabi 0.0.3 (registry+https://github.com/rust-lang/crates.io-index)", + "fuchsia-cprng 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.62 (registry+https://github.com/rust-lang/crates.io-index)", + "rand_core 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", + "rdrand 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", + "winapi 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "rand_xoshiro" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "byteorder 1.3.2 (registry+https://github.com/rust-lang/crates.io-index)", + "rand_core 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "rayon" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "crossbeam-deque 0.6.3 (registry+https://github.com/rust-lang/crates.io-index)", + "either 1.5.2 (registry+https://github.com/rust-lang/crates.io-index)", + "rayon-core 1.5.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "rayon-core" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "crossbeam-deque 0.6.3 (registry+https://github.com/rust-lang/crates.io-index)", + "crossbeam-queue 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", + "crossbeam-utils 0.6.6 (registry+https://github.com/rust-lang/crates.io-index)", + "lazy_static 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)", + "num_cpus 1.10.1 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "rdrand" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "rand_core 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "regex" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "aho-corasick 0.7.6 (registry+https://github.com/rust-lang/crates.io-index)", + "memchr 2.2.1 (registry+https://github.com/rust-lang/crates.io-index)", + "regex-syntax 0.6.11 (registry+https://github.com/rust-lang/crates.io-index)", + "thread_local 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "regex-automata" +version = "0.1.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "byteorder 1.3.2 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "regex-syntax" +version = "0.6.11" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "rustc_version" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "semver 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "ryu" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "same-file" +version = "1.0.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "winapi-util 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "scopeguard" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "semver" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "semver-parser 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "semver-parser" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "serde" +version = "1.0.99" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "serde_derive" +version = "1.0.99" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "proc-macro2 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)", + "quote 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", + "syn 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "serde_json" +version = "1.0.40" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "itoa 0.4.4 (registry+https://github.com/rust-lang/crates.io-index)", + "ryu 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "serde 1.0.99 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "static_assertions" +version = "0.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "syn" +version = "1.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "proc-macro2 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)", + "quote 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", + "unicode-xid 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "textwrap" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "unicode-width 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "thread_local" +version = "0.3.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "lazy_static 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "tinytemplate" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "serde 1.0.99 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_json 1.0.40 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "unicode-width" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "unicode-xid" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "version_check" +version = "0.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "walkdir" +version = "2.2.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "same-file 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)", + "winapi 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)", + "winapi-util 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "winapi" +version = "0.3.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "winapi-i686-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", + "winapi-x86_64-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "winapi-i686-pc-windows-gnu" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "winapi-util" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "winapi 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "winapi-x86_64-pc-windows-gnu" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[metadata] +"checksum aho-corasick 0.7.6 (registry+https://github.com/rust-lang/crates.io-index)" = "58fb5e95d83b38284460a5fda7d6470aa0b8844d283a0b614b8535e880800d2d" +"checksum arrayvec 0.4.11 (registry+https://github.com/rust-lang/crates.io-index)" = "b8d73f9beda665eaa98ab9e4f7442bd4e7de6652587de55b2525e52e29c1b0ba" +"checksum atty 0.2.13 (registry+https://github.com/rust-lang/crates.io-index)" = "1803c647a3ec87095e7ae7acfca019e98de5ec9a7d01343f611cf3152ed71a90" +"checksum autocfg 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)" = "b671c8fb71b457dd4ae18c4ba1e59aa81793daacc361d82fcd410cef0d491875" +"checksum bitflags 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "cf1de2fe8c75bc145a2f577add951f8134889b4795d47466a54a5c846d691693" +"checksum bstr 0.2.7 (registry+https://github.com/rust-lang/crates.io-index)" = "94cdf78eb7e94c566c1f5dbe2abf8fc70a548fc902942a48c4b3a98b48ca9ade" +"checksum byteorder 1.3.2 (registry+https://github.com/rust-lang/crates.io-index)" = "a7c3dd8985a7111efc5c80b44e23ecdd8c007de8ade3b96595387e812b957cf5" +"checksum cast 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "926013f2860c46252efceabb19f4a6b308197505082c609025aa6706c011d427" +"checksum cc 1.0.40 (registry+https://github.com/rust-lang/crates.io-index)" = "b548a4ee81fccb95919d4e22cfea83c7693ebfd78f0495493178db20b3139da7" +"checksum cfg-if 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)" = "b486ce3ccf7ffd79fdeb678eac06a9e6c09fc88d33836340becb8fffe87c5e33" +"checksum clap 2.33.0 (registry+https://github.com/rust-lang/crates.io-index)" = "5067f5bb2d80ef5d68b4c87db81601f0b75bca627bc2ef76b141d7b846a3c6d9" +"checksum cloudabi 0.0.3 (registry+https://github.com/rust-lang/crates.io-index)" = "ddfc5b9aa5d4507acaf872de71051dfd0e309860e88966e1051e462a077aac4f" +"checksum criterion 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)" = "0363053954f3e679645fc443321ca128b7b950a6fe288cf5f9335cc22ee58394" +"checksum criterion-plot 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "76f9212ddf2f4a9eb2d401635190600656a1f88a932ef53d06e7fa4c7e02fb8e" +"checksum crossbeam-deque 0.6.3 (registry+https://github.com/rust-lang/crates.io-index)" = "05e44b8cf3e1a625844d1750e1f7820da46044ff6d28f4d43e455ba3e5bb2c13" +"checksum crossbeam-epoch 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)" = "fedcd6772e37f3da2a9af9bf12ebe046c0dfe657992377b4df982a2b54cd37a9" +"checksum crossbeam-queue 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "7c979cd6cfe72335896575c6b5688da489e420d36a27a0b9eb0c73db574b4a4b" +"checksum crossbeam-utils 0.6.6 (registry+https://github.com/rust-lang/crates.io-index)" = "04973fa96e96579258a5091af6003abde64af786b860f18622b82e026cca60e6" +"checksum csv 1.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "37519ccdfd73a75821cac9319d4fce15a81b9fcf75f951df5b9988aa3a0af87d" +"checksum csv-core 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)" = "9b5cadb6b25c77aeff80ba701712494213f4a8418fcda2ee11b6560c3ad0bf4c" +"checksum doc-comment 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "923dea538cea0aa3025e8685b20d6ee21ef99c4f77e954a30febbaac5ec73a97" +"checksum either 1.5.2 (registry+https://github.com/rust-lang/crates.io-index)" = "5527cfe0d098f36e3f8839852688e63c8fff1c90b2b405aef730615f9a7bcf7b" +"checksum fs_extra 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "5f2a4a2034423744d2cc7ca2068453168dcdb82c438419e639a26bd87839c674" +"checksum fuchsia-cprng 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "a06f77d526c1a601b7c4cdd98f54b5eaabffc14d5f2f0296febdc7f357c6d3ba" +"checksum itertools 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)" = "5b8467d9c1cebe26feb08c640139247fac215782d35371ade9a2136ed6085358" +"checksum itoa 0.4.4 (registry+https://github.com/rust-lang/crates.io-index)" = "501266b7edd0174f8530248f87f99c88fbe60ca4ef3dd486835b8d8d53136f7f" +"checksum jemalloc-sys 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)" = "bfc62c8e50e381768ce8ee0428ee53741929f7ebd73e4d83f669bcf7693e00ae" +"checksum jemallocator 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)" = "9f0cd42ac65f758063fea55126b0148b1ce0a6354ff78e07a4d6806bc65c4ab3" +"checksum lazy_static 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "bc5729f27f159ddd61f4df6228e827e86643d4d3e7c32183cb30a1c08f604a14" +"checksum lexical-core 0.6.7 (registry+https://github.com/rust-lang/crates.io-index)" = "f86d66d380c9c5a685aaac7a11818bdfa1f733198dfd9ec09c70b762cd12ad6f" +"checksum libc 0.2.62 (registry+https://github.com/rust-lang/crates.io-index)" = "34fcd2c08d2f832f376f4173a231990fa5aef4e99fb569867318a227ef4c06ba" +"checksum memchr 2.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "88579771288728879b57485cc7d6b07d648c9f0141eb955f8ab7f9d45394468e" +"checksum memoffset 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)" = "ce6075db033bbbb7ee5a0bbd3a3186bbae616f57fb001c485c7ff77955f8177f" +"checksum nodrop 0.1.13 (registry+https://github.com/rust-lang/crates.io-index)" = "2f9667ddcc6cc8a43afc9b7917599d7216aa09c463919ea32c59ed6cac8bc945" +"checksum num-traits 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)" = "6ba9a427cfca2be13aa6f6403b0b7e7368fe982bfa16fccc450ce74c46cd9b32" +"checksum num_cpus 1.10.1 (registry+https://github.com/rust-lang/crates.io-index)" = "bcef43580c035376c0705c42792c294b66974abbfd2789b511784023f71f3273" +"checksum proc-macro2 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)" = "4c5c2380ae88876faae57698be9e9775e3544decad214599c3a6266cca6ac802" +"checksum quote 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)" = "053a8c8bcc71fcce321828dc897a98ab9760bef03a4fc36693c231e5b3216cfe" +"checksum rand_core 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "7a6fdeb83b075e8266dcc8762c22776f6877a63111121f5f8c7411e5be7eed4b" +"checksum rand_core 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)" = "9c33a3c44ca05fa6f1807d8e6743f3824e8509beca625669633be0acbdf509dc" +"checksum rand_os 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "7b75f676a1e053fc562eafbb47838d67c84801e38fc1ba459e8f180deabd5071" +"checksum rand_xoshiro 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "03b418169fb9c46533f326efd6eed2576699c44ca92d3052a066214a8d828929" +"checksum rayon 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "a4b0186e22767d5b9738a05eab7c6ac90b15db17e5b5f9bd87976dd7d89a10a4" +"checksum rayon-core 1.5.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ebbe0df8435ac0c397d467b6cad6d25543d06e8a019ef3f6af3c384597515bd2" +"checksum rdrand 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "678054eb77286b51581ba43620cc911abf02758c91f93f479767aed0f90458b2" +"checksum regex 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "88c3d9193984285d544df4a30c23a4e62ead42edf70a4452ceb76dac1ce05c26" +"checksum regex-automata 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)" = "92b73c2a1770c255c240eaa4ee600df1704a38dc3feaa6e949e7fcd4f8dc09f9" +"checksum regex-syntax 0.6.11 (registry+https://github.com/rust-lang/crates.io-index)" = "b143cceb2ca5e56d5671988ef8b15615733e7ee16cd348e064333b251b89343f" +"checksum rustc_version 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)" = "138e3e0acb6c9fb258b19b67cb8abd63c00679d2851805ea151465464fe9030a" +"checksum ryu 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "c92464b447c0ee8c4fb3824ecc8383b81717b9f1e74ba2e72540aef7b9f82997" +"checksum same-file 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)" = "585e8ddcedc187886a30fa705c47985c3fa88d06624095856b36ca0b82ff4421" +"checksum scopeguard 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "b42e15e59b18a828bbf5c58ea01debb36b9b096346de35d941dcb89009f24a0d" +"checksum semver 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)" = "1d7eb9ef2c18661902cc47e535f9bc51b78acd254da71d375c2f6720d9a40403" +"checksum semver-parser 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3" +"checksum serde 1.0.99 (registry+https://github.com/rust-lang/crates.io-index)" = "fec2851eb56d010dc9a21b89ca53ee75e6528bab60c11e89d38390904982da9f" +"checksum serde_derive 1.0.99 (registry+https://github.com/rust-lang/crates.io-index)" = "cb4dc18c61206b08dc98216c98faa0232f4337e1e1b8574551d5bad29ea1b425" +"checksum serde_json 1.0.40 (registry+https://github.com/rust-lang/crates.io-index)" = "051c49229f282f7c6f3813f8286cc1e3323e8051823fce42c7ea80fe13521704" +"checksum static_assertions 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)" = "7f3eb36b47e512f8f1c9e3d10c2c1965bc992bd9cdb024fa581e2194501c83d3" +"checksum syn 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)" = "158521e6f544e7e3dcfc370ac180794aa38cb34a1b1e07609376d4adcf429b93" +"checksum textwrap 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)" = "d326610f408c7a4eb6f51c37c330e496b08506c9457c9d34287ecc38809fb060" +"checksum thread_local 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)" = "c6b53e329000edc2b34dbe8545fd20e55a333362d0a321909685a19bd28c3f1b" +"checksum tinytemplate 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)" = "4574b75faccaacddb9b284faecdf0b544b80b6b294f3d062d325c5726a209c20" +"checksum unicode-width 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)" = "7007dbd421b92cc6e28410fe7362e2e0a2503394908f417b68ec8d1c364c4e20" +"checksum unicode-xid 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "826e7639553986605ec5979c7dd957c7895e93eabed50ab2ffa7f6128a75097c" +"checksum version_check 0.9.1 (registry+https://github.com/rust-lang/crates.io-index)" = "078775d0255232fb988e6fccf26ddc9d1ac274299aaedcedce21c6f72cc533ce" +"checksum walkdir 2.2.9 (registry+https://github.com/rust-lang/crates.io-index)" = "9658c94fa8b940eab2250bd5a457f9c48b748420d71293b165c8cdbe2f55f71e" +"checksum winapi 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)" = "f10e386af2b13e47c89e7236a7a14a086791a2b88ebad6df9bf42040195cf770" +"checksum winapi-i686-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" +"checksum winapi-util 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "7168bab6e1daee33b4557efd0e95d5ca70a03706d39fa5f3fe7a236f584b03c9" +"checksum winapi-x86_64-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" diff --git a/third_party/rust/nom/Cargo.toml b/third_party/rust/nom/Cargo.toml index d71dc3beea..91894a9746 100644 --- a/third_party/rust/nom/Cargo.toml +++ b/third_party/rust/nom/Cargo.toml @@ -3,7 +3,7 @@ # When uploading crates to the registry Cargo will automatically # "normalize" Cargo.toml files for maximal compatibility # with all versions of Cargo and also rewrite `path` dependencies -# to registry (e.g. crates.io) dependencies +# to registry (e.g., crates.io) dependencies # # If you believe there's an error in this file please file an # issue against the rust-lang/cargo repository. If you're @@ -11,10 +11,12 @@ # will likely look very different (and much more reasonable) [package] +edition = "2018" name = "nom" -version = "4.1.1" +version = "5.1.1" authors = ["contact@geoffroycouprie.com"] -include = ["CHANGELOG.md", "LICENSE", ".gitignore", ".travis.yml", "Cargo.toml", "src/*.rs", "tests/*.rs"] +include = ["CHANGELOG.md", "LICENSE", ".gitignore", ".travis.yml", "Cargo.toml", "src/*.rs", "src/*/*.rs", "tests/*.rs", "build.rs"] +autoexamples = false description = "A byte-oriented, zero-copy, parser combinators library" documentation = "https://docs.rs/nom" readme = "README.md" @@ -24,7 +26,29 @@ license = "MIT" repository = "https://github.com/Geal/nom" [package.metadata.docs.rs] all-features = true -features = ["alloc", "std", "regexp", "regexp_macros", "verbose-errors"] +features = ["alloc", "std", "regexp", "regexp_macros", "lexical"] +[profile.bench] +lto = true +codegen-units = 1 +debug = true + +[lib] +bench = false + +[[example]] +name = "json" +path = "examples/json.rs" +required-features = ["alloc"] + +[[example]] +name = "s_expression" +path = "examples/s_expression.rs" +required-features = ["alloc"] + +[[example]] +name = "string" +path = "examples/string.rs" +required-features = ["alloc"] [[test]] name = "arithmetic" @@ -36,12 +60,6 @@ required-features = ["alloc"] [[test]] name = "blockbuf-arithmetic" -[[test]] -name = "complete_arithmetic" - -[[test]] -name = "complete_float" - [[test]] name = "css" @@ -71,6 +89,7 @@ name = "json" [[test]] name = "mp4" +required-features = ["alloc"] [[test]] name = "multiline" @@ -87,10 +106,44 @@ name = "reborrow_fold" [[test]] name = "test1" + +[[bench]] +name = "arithmetic" +path = "benches/arithmetic.rs" +harness = false + +[[bench]] +name = "http" +path = "benches/http.rs" +harness = false + +[[bench]] +name = "ini" +path = "benches/ini.rs" +harness = false + +[[bench]] +name = "ini_complete" +path = "benches/ini_complete.rs" +harness = false + +[[bench]] +name = "ini_str" +path = "benches/ini_str.rs" +harness = false + +[[bench]] +name = "json" +path = "benches/json.rs" +harness = false [dependencies.lazy_static] version = "^1.0" optional = true +[dependencies.lexical-core] +version = "^0.6.0" +optional = true + [dependencies.memchr] version = "^2.0" default-features = false @@ -98,14 +151,24 @@ default-features = false [dependencies.regex] version = "^1.0" optional = true +[dev-dependencies.criterion] +version = "0.2" + +[dev-dependencies.doc-comment] +version = "0.3" + +[dev-dependencies.jemallocator] +version = "^0.1" +[build-dependencies.version_check] +version = "0.9" [features] alloc = [] -default = ["std"] +default = ["std", "lexical"] +lexical = ["lexical-core"] regexp = ["regex"] regexp_macros = ["regexp", "lazy_static"] std = ["alloc", "memchr/use_std"] -verbose-errors = ["alloc"] [badges.coveralls] branch = "master" repository = "Geal/nom" diff --git a/third_party/rust/nom/LICENSE b/third_party/rust/nom/LICENSE index a885458136..88557e44e3 100644 --- a/third_party/rust/nom/LICENSE +++ b/third_party/rust/nom/LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2015-2016 Geoffroy Couprie +Copyright (c) 2014-2019 Geoffroy Couprie Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the diff --git a/third_party/rust/nom/build.rs b/third_party/rust/nom/build.rs new file mode 100644 index 0000000000..9743c905f7 --- /dev/null +++ b/third_party/rust/nom/build.rs @@ -0,0 +1,7 @@ +extern crate version_check; + +fn main() { + if version_check::is_min_version("1.28.0").unwrap_or(true) { + println!("cargo:rustc-cfg=stable_i128"); + } +} diff --git a/third_party/rust/nom/src/bits.rs b/third_party/rust/nom/src/bits.rs deleted file mode 100644 index ffb7531708..0000000000 --- a/third_party/rust/nom/src/bits.rs +++ /dev/null @@ -1,489 +0,0 @@ -//! Bit level parsers and combinators -//! -//! Bit parsing is handled by tweaking the input in most macros. -//! In byte level parsing, the input is generally a `&[u8]` passed from combinator -//! to combinator as the slices are manipulated. -//! -//! Bit parsers take a `(&[u8], usize)` as input. The first part of the tuple is a byte slice, -//! the second part is a bit offset in the first byte of the slice. -//! -//! By passing a pair like this, we can leverage most of the existing combinators, and avoid -//! transforming the whole slice to a vector of booleans. This should make it easy -//! to see a byte slice as a bit stream, and parse code points of arbitrary bit length. -//! - -/// Transforms its byte slice input into a bit stream for the underlying parser. This allows the -/// given bit stream parser to work on a byte slice input. -/// -/// Signature: -/// `bits!( parser ) => ( &[u8], (&[u8], usize) -> IResult<(&[u8], usize), T> ) -> IResult<&[u8], T>` -/// -/// ``` -/// # #[macro_use] extern crate nom; -/// # fn main() { -/// named!( take_4_bits, bits!( take_bits!( u8, 4 ) ) ); -/// -/// let input = vec![0xAB, 0xCD, 0xEF, 0x12]; -/// let sl = &input[..]; -/// -/// assert_eq!(take_4_bits( sl ), Ok( (&sl[1..], 0xA) )); -/// # } -#[macro_export] -macro_rules! bits ( - ($i:expr, $submac:ident!( $($args:tt)* )) => ( - bits_impl!($i, $submac!($($args)*)); - ); - ($i:expr, $f:expr) => ( - bits_impl!($i, call!($f)); - ); -); - -#[cfg(feature = "verbose-errors")] -/// Internal parser, do not use directly -#[doc(hidden)] -#[macro_export] -macro_rules! bits_impl ( - ($i:expr, $submac:ident!( $($args:tt)* )) => ( - { - use $crate::lib::std::result::Result::*; - use $crate::{Context,Err,Needed}; - - let input = ($i, 0usize); - match $submac!(input, $($args)*) { - Err(Err::Error(e)) => { - let err = match e { - Context::Code((i,b), kind) => Context::Code(&i[b/8..], kind), - Context::List(mut v) => { - Context::List(v.drain(..).map(|((i,b), kind)| (&i[b/8..], kind)).collect()) - } - }; - Err(Err::Error(err)) - }, - Err(Err::Failure(e)) => { - let err = match e { - Context::Code((i,b), kind) => Context::Code(&i[b/8..], kind), - Context::List(mut v) => { - Context::List(v.drain(..).map(|((i,b), kind)| (&i[b/8..], kind)).collect()) - } - }; - Err(Err::Failure(err)) - }, - Err(Err::Incomplete(Needed::Unknown)) => Err(Err::Incomplete(Needed::Unknown)), - Err(Err::Incomplete(Needed::Size(i))) => { - //println!("bits parser returned Needed::Size({})", i); - Err(Err::Incomplete(Needed::Size(i / 8 + 1))) - }, - Ok(((i, bit_index), o)) => { - let byte_index = bit_index / 8 + if bit_index % 8 == 0 { 0 } else { 1 } ; - //println!("bit index=={} => byte index=={}", bit_index, byte_index); - Ok((&i[byte_index..], o)) - } - } - } - ); -); - -#[cfg(not(feature = "verbose-errors"))] -/// Internal parser, do not use directly -#[doc(hidden)] -#[macro_export] -macro_rules! bits_impl ( - ($i:expr, $submac:ident!( $($args:tt)* )) => ( - { - use $crate::lib::std::result::Result::*; - use $crate::{Err,Needed,Context}; - - let input = ($i, 0usize); - match $submac!(input, $($args)*) { - Err(Err::Error(e)) => { - let Context::Code(_,err) = e; - Err(Err::Error(error_position!($i, err))) - }, - Err(Err::Failure(e)) => { - let Context::Code(_,err) = e; - Err(Err::Failure(error_position!($i, err))) - }, - Err(Err::Incomplete(Needed::Unknown)) => Err(Err::Incomplete(Needed::Unknown)), - Err(Err::Incomplete(Needed::Size(i))) => { - //println!("bits parser returned Needed::Size({})", i); - $crate::need_more($i, $crate::Needed::Size(i / 8 + 1)) - }, - Ok(((i, bit_index), o)) => { - let byte_index = bit_index / 8 + if bit_index % 8 == 0 { 0 } else { 1 } ; - //println!("bit index=={} => byte index=={}", bit_index, byte_index); - Ok((&i[byte_index..], o)) - } - } - } - ); -); - -/// Counterpart to bits, bytes! transforms its bit stream input into a byte slice for the underlying -/// parser, allowing byte-slice parsers to work on bit streams. -/// -/// Signature: -/// `bytes!( parser ) => ( (&[u8], usize), &[u8] -> IResult<&[u8], T> ) -> IResult<(&[u8], usize), T>`, -/// -/// A partial byte remaining in the input will be ignored and the given parser will start parsing -/// at the next full byte. -/// -/// ``` -/// # #[macro_use] extern crate nom; -/// # use nom::rest; -/// # fn main() { -/// named!( parse<(u8, u8, &[u8])>, bits!( tuple!( -/// take_bits!(u8, 4), -/// take_bits!(u8, 8), -/// bytes!(rest) -/// ))); -/// -/// let input = &[0xde, 0xad, 0xbe, 0xaf]; -/// -/// assert_eq!(parse( input ), Ok(( &[][..], (0xd, 0xea, &[0xbe, 0xaf][..]) ))); -/// # } -#[macro_export] -macro_rules! bytes ( - ($i:expr, $submac:ident!( $($args:tt)* )) => ( - bytes_impl!($i, $submac!($($args)*)); - ); - ($i:expr, $f:expr) => ( - bytes_impl!($i, call!($f)); - ); -); - -#[cfg(feature = "verbose-errors")] -/// Internal parser, do not use directly -#[doc(hidden)] -#[macro_export] -macro_rules! bytes_impl ( - ($macro_i:expr, $submac:ident!( $($args:tt)* )) => ( - { - use $crate::lib::std::result::Result::*; - use $crate::{Err,Needed,Context}; - - let inp; - if $macro_i.1 % 8 != 0 { - inp = & $macro_i.0[1 + $macro_i.1 / 8 ..]; - } - else { - inp = & $macro_i.0[$macro_i.1 / 8 ..]; - } - - let sub = $submac!(inp, $($args)*); - let res = match sub { - Err(Err::Incomplete(Needed::Size(i))) => { - Err(Err::Incomplete(Needed::Size(i * 8))) - }, - Err(Err::Incomplete(Needed::Unknown)) => Err(Err::Incomplete(Needed::Unknown)), - Ok((i, o)) => { - Ok(((i, 0), o)) - }, - Err(Err::Error(e)) => { - let err = match e { - Context::Code(i, c) => Context::Code((i,0), c), - Context::List(mut v) => { - let (i, c) = v.remove(0); - Context::Code((i,0), c) - } - }; - Err(Err::Error(err)) - }, - Err(Err::Failure(e)) => { - let err = match e { - Context::Code(i, c) => Context::Code((i,0), c), - Context::List(mut v) => { - let (i, c) = v.remove(0); - Context::Code((i,0), c) - } - }; - Err(Err::Error(err)) - }, - Err(Err::Incomplete(Needed::Unknown)) => Err(Err::Incomplete(Needed::Unknown)), - Err(Err::Incomplete(Needed::Size(i))) => { - Err(Err::Incomplete(Needed::Size(i * 8))) - }, - Ok((i, o)) => { - Ok(((i, 0), o)) - } - }; - res - } - ); -); - -#[cfg(not(feature = "verbose-errors"))] -/// Internal parser, do not use directly -#[doc(hidden)] -#[macro_export] -macro_rules! bytes_impl ( - ($macro_i:expr, $submac:ident!( $($args:tt)* )) => ( - { - use $crate::lib::std::result::Result::*; - use $crate::{Err,Needed,Context}; - - let inp; - if $macro_i.1 % 8 != 0 { - inp = & $macro_i.0[1 + $macro_i.1 / 8 ..]; - } - else { - inp = & $macro_i.0[$macro_i.1 / 8 ..]; - } - - let sub = $submac!(inp, $($args)*); - let res = match sub { - Err(Err::Incomplete(Needed::Size(i))) => { - Err(Err::Incomplete(Needed::Size(i * 8))) - }, - Err(Err::Incomplete(Needed::Unknown)) => Err(Err::Incomplete(Needed::Unknown)), - Ok((i, o)) => { - Ok(((i, 0), o)) - }, - Err(Err::Error(e)) => { - let Context::Code(i, c) = e; - Err(Err::Error(Context::Code((i,0), c))) - }, - Err(Err::Failure(e)) => { - let Context::Code(i, c) = e; - Err(Err::Failure(Context::Code((i,0), c))) - }, - }; - res - } - ); -); - -/// Consumes the specified number of bits and returns them as the specified type. -/// -/// Signature: -/// `take_bits!(type, count) => ( (&[T], usize), U, usize) -> IResult<(&[T], usize), U>` -/// -/// ``` -/// # #[macro_use] extern crate nom; -/// # fn main() { -/// named!( take_pair<(u8, u8)>, bits!( pair!( take_bits!(u8, 4), take_bits!(u8, 4) ) ) ); -/// -/// let input = vec![0xAB, 0xCD, 0xEF]; -/// let sl = &input[..]; -/// -/// assert_eq!(take_pair( sl ), Ok((&sl[1..], (0xA, 0xB))) ); -/// assert_eq!(take_pair( &sl[1..] ), Ok((&sl[2..], (0xC, 0xD))) ); -/// # } -/// ``` -#[macro_export] -macro_rules! take_bits ( - ($i:expr, $t:ty, $count:expr) => ( - { - use $crate::lib::std::result::Result::*; - use $crate::{Needed,IResult}; - - use $crate::lib::std::ops::Div; - use $crate::lib::std::convert::Into; - //println!("taking {} bits from {:?}", $count, $i); - let (input, bit_offset) = $i; - let res : IResult<(&[u8],usize), $t> = if $count == 0 { - Ok(( (input, bit_offset), (0 as u8).into())) - } else { - let cnt = ($count as usize + bit_offset).div(8); - if input.len() * 8 < $count as usize + bit_offset { - //println!("returning incomplete: {}", $count as usize + bit_offset); - $crate::need_more($i, Needed::Size($count as usize)) - } else { - let mut acc:$t = (0 as u8).into(); - let mut offset: usize = bit_offset; - let mut remaining: usize = $count; - let mut end_offset: usize = 0; - - for byte in input.iter().take(cnt + 1) { - if remaining == 0 { - break; - } - let val: $t = if offset == 0 { - (*byte as u8).into() - } else { - (((*byte as u8) << offset) as u8 >> offset).into() - }; - - if remaining < 8 - offset { - acc += val >> (8 - offset - remaining); - end_offset = remaining + offset; - break; - } else { - acc += val << (remaining - (8 - offset)); - remaining -= 8 - offset; - offset = 0; - } - } - Ok(( (&input[cnt..], end_offset) , acc)) - } - }; - res - } - ); -); - -/// Matches the given bit pattern. -/// -/// Signature: -/// `tag_bits!(type, count, pattern) => ( (&[T], usize), U, usize, U) -> IResult<(&[T], usize), U>` -/// -/// The caller must specify the number of bits to consume. The matched value is included in the -/// result on success. -/// -/// ``` -/// # #[macro_use] extern crate nom; -/// # fn main() { -/// named!( take_a, bits!( tag_bits!(u8, 4, 0xA) ) ); -/// -/// let input = vec![0xAB, 0xCD, 0xEF]; -/// let sl = &input[..]; -/// -/// assert_eq!(take_a( sl ), Ok((&sl[1..], 0xA)) ); -/// # } -/// ``` -#[macro_export] -macro_rules! tag_bits ( - ($i:expr, $t:ty, $count:expr, $p: pat) => ( - { - use $crate::lib::std::result::Result::*; - use $crate::{Err,IResult}; - - match take_bits!($i, $t, $count) { - Err(Err::Incomplete(i)) => Err(Err::Incomplete(i)), - Ok((i, o)) => { - if let $p = o { - let res: IResult<(&[u8],usize),$t> = Ok((i, o)); - res - } else { - let e: $crate::ErrorKind = $crate::ErrorKind::TagBits; - Err(Err::Error(error_position!($i, e))) - } - }, - _ => { - let e: $crate::ErrorKind = $crate::ErrorKind::TagBits; - Err(Err::Error(error_position!($i, e))) - } - } - } - ) -); - -#[cfg(test)] -mod tests { - use lib::std::ops::{AddAssign, Shl, Shr}; - use internal::{Err, Needed}; - use util::ErrorKind; - - #[test] - fn take_bits() { - let input = [0b10_10_10_10, 0b11_11_00_00, 0b00_11_00_11]; - let sl = &input[..]; - - assert_eq!(take_bits!((sl, 0), u8, 0), Ok(((sl, 0), 0))); - assert_eq!(take_bits!((sl, 0), u8, 8), Ok(((&sl[1..], 0), 170))); - assert_eq!(take_bits!((sl, 0), u8, 3), Ok(((&sl[0..], 3), 5))); - assert_eq!(take_bits!((sl, 0), u8, 6), Ok(((&sl[0..], 6), 42))); - assert_eq!(take_bits!((sl, 1), u8, 1), Ok(((&sl[0..], 2), 0))); - assert_eq!(take_bits!((sl, 1), u8, 2), Ok(((&sl[0..], 3), 1))); - assert_eq!(take_bits!((sl, 1), u8, 3), Ok(((&sl[0..], 4), 2))); - assert_eq!(take_bits!((sl, 6), u8, 3), Ok(((&sl[1..], 1), 5))); - assert_eq!(take_bits!((sl, 0), u16, 10), Ok(((&sl[1..], 2), 683))); - assert_eq!(take_bits!((sl, 0), u16, 8), Ok(((&sl[1..], 0), 170))); - assert_eq!(take_bits!((sl, 6), u16, 10), Ok(((&sl[2..], 0), 752))); - assert_eq!(take_bits!((sl, 6), u16, 11), Ok(((&sl[2..], 1), 1504))); - assert_eq!(take_bits!((sl, 0), u32, 20), Ok(((&sl[2..], 4), 700_163))); - assert_eq!(take_bits!((sl, 4), u32, 20), Ok(((&sl[3..], 0), 716_851))); - assert_eq!( - take_bits!((sl, 4), u32, 22), - Err(Err::Incomplete(Needed::Size(22))) - ); - } - - #[test] - fn tag_bits() { - let input = [0b10_10_10_10, 0b11_11_00_00, 0b00_11_00_11]; - let sl = &input[..]; - - assert_eq!(tag_bits!((sl, 0), u8, 3, 0b101), Ok(((&sl[0..], 3), 5))); - assert_eq!(tag_bits!((sl, 0), u8, 4, 0b1010), Ok(((&sl[0..], 4), 10))); - } - - named!(ch<(&[u8],usize),(u8,u8)>, - do_parse!( - tag_bits!(u8, 3, 0b101) >> - x: take_bits!(u8, 4) >> - y: take_bits!(u8, 5) >> - (x,y) - ) - ); - - #[test] - fn chain_bits() { - let input = [0b10_10_10_10, 0b11_11_00_00, 0b00_11_00_11]; - let sl = &input[..]; - assert_eq!(ch((&input[..], 0)), Ok(((&sl[1..], 4), (5, 15)))); - assert_eq!(ch((&input[..], 4)), Ok(((&sl[2..], 0), (7, 16)))); - assert_eq!(ch((&input[..1], 0)), Err(Err::Incomplete(Needed::Size(5)))); - } - - named!(ch_bytes<(u8, u8)>, bits!(ch)); - #[test] - fn bits_to_bytes() { - let input = [0b10_10_10_10, 0b11_11_00_00, 0b00_11_00_11]; - assert_eq!(ch_bytes(&input[..]), Ok((&input[2..], (5, 15)))); - assert_eq!(ch_bytes(&input[..1]), Err(Err::Incomplete(Needed::Size(1)))); - assert_eq!( - ch_bytes(&input[1..]), - Err(Err::Error(error_position!(&input[1..], ErrorKind::TagBits))) - ); - } - - #[derive(PartialEq, Debug)] - struct FakeUint(u32); - - impl AddAssign for FakeUint { - fn add_assign(&mut self, other: FakeUint) { - *self = FakeUint(self.0 + other.0); - } - } - - impl Shr for FakeUint { - type Output = FakeUint; - - fn shr(self, shift: usize) -> FakeUint { - FakeUint(self.0 >> shift) - } - } - - impl Shl for FakeUint { - type Output = FakeUint; - - fn shl(self, shift: usize) -> FakeUint { - FakeUint(self.0 << shift) - } - } - - impl From for FakeUint { - fn from(i: u8) -> FakeUint { - FakeUint(u32::from(i)) - } - } - - #[test] - fn non_privitive_type() { - let input = [0b10_10_10_10, 0b11_11_00_00, 0b00_11_00_11]; - let sl = &input[..]; - - assert_eq!( - take_bits!((sl, 0), FakeUint, 20), - Ok(((&sl[2..], 4), FakeUint(700_163))) - ); - assert_eq!( - take_bits!((sl, 4), FakeUint, 20), - Ok(((&sl[3..], 0), FakeUint(716_851))) - ); - assert_eq!( - take_bits!((sl, 4), FakeUint, 22), - Err(Err::Incomplete(Needed::Size(22))) - ); - } -} diff --git a/third_party/rust/nom/src/bits/complete.rs b/third_party/rust/nom/src/bits/complete.rs new file mode 100644 index 0000000000..f868ec2643 --- /dev/null +++ b/third_party/rust/nom/src/bits/complete.rs @@ -0,0 +1,75 @@ +//! bit level parsers +//! + +use crate::error::{ErrorKind, ParseError}; +use crate::internal::{Err, IResult}; +use crate::lib::std::ops::{AddAssign, RangeFrom, Shl, Shr, Div}; +use crate::traits::{InputIter, InputLength, Slice, ToUsize}; + +/// generates a parser taking `count` bits +pub fn take>(count: C) -> impl Fn((I, usize)) -> IResult<(I, usize), O, E> +where + I: Slice> + InputIter + InputLength, + C: ToUsize, + O: From + AddAssign + Shl + Shr, +{ + let count = count.to_usize(); + move |(input, bit_offset): (I, usize)| { + if count == 0 { + Ok(((input, bit_offset), 0u8.into())) + } else { + let cnt = (count + bit_offset).div(8); + if input.input_len() * 8 < count + bit_offset { + Err(Err::Error(E::from_error_kind((input, bit_offset), ErrorKind::Eof))) + } else { + let mut acc:O = (0 as u8).into(); + let mut offset: usize = bit_offset; + let mut remaining: usize = count; + let mut end_offset: usize = 0; + + for byte in input.iter_elements().take(cnt + 1) { + if remaining == 0 { + break; + } + let val: O = if offset == 0 { + byte.into() + } else { + ((byte << offset) as u8 >> offset).into() + }; + + if remaining < 8 - offset { + acc += val >> (8 - offset - remaining); + end_offset = remaining + offset; + break; + } else { + acc += val << (remaining - (8 - offset)); + remaining -= 8 - offset; + offset = 0; + } + } + Ok(( (input.slice(cnt..), end_offset) , acc)) + } + } + } +} + +/// generates a parser taking `count` bits and comparing them to `pattern` +pub fn tag>(pattern: O, count: C) -> impl Fn((I, usize)) -> IResult<(I, usize), O, E> +where + I: Slice> + InputIter + InputLength + Clone, + C: ToUsize, + O: From + AddAssign + Shl + Shr + PartialEq, +{ + let count = count.to_usize(); + move |input: (I, usize)| { + let inp = input.clone(); + + take(count)(input).and_then(|(i, o)| { + if pattern == o { + Ok((i, o)) + } else { + Err(Err::Error(error_position!(inp, ErrorKind::TagBits))) + } + }) + } +} diff --git a/third_party/rust/nom/src/bits/macros.rs b/third_party/rust/nom/src/bits/macros.rs new file mode 100644 index 0000000000..b056503fc6 --- /dev/null +++ b/third_party/rust/nom/src/bits/macros.rs @@ -0,0 +1,263 @@ +//! Bit level parsers and combinators +//! +//! Bit parsing is handled by tweaking the input in most macros. +//! In byte level parsing, the input is generally a `&[u8]` passed from combinator +//! to combinator as the slices are manipulated. +//! +//! Bit parsers take a `(&[u8], usize)` as input. The first part of the tuple is a byte slice, +//! the second part is a bit offset in the first byte of the slice. +//! +//! By passing a pair like this, we can leverage most of the existing combinators, and avoid +//! transforming the whole slice to a vector of booleans. This should make it easy +//! to see a byte slice as a bit stream, and parse code points of arbitrary bit length. +//! + +/// Transforms its byte slice input into a bit stream for the underlying parser. This allows the +/// given bit stream parser to work on a byte slice input. +/// +/// Signature: +/// `bits!( parser ) => ( &[u8], (&[u8], usize) -> IResult<(&[u8], usize), T> ) -> IResult<&[u8], T>` +/// +/// ``` +/// # #[macro_use] extern crate nom; +/// # use nom::{Err, Needed}; +/// # fn main() { +/// named!( take_4_bits, bits!( take_bits!( 4u8 ) ) ); +/// +/// let input = vec![0xAB, 0xCD, 0xEF, 0x12]; +/// let sl = &input[..]; +/// +/// assert_eq!(take_4_bits( sl ), Ok( (&sl[1..], 0xA) )); +/// assert_eq!(take_4_bits( &b""[..] ), Err(Err::Incomplete(Needed::Size(1)))); +/// # } +#[macro_export(local_inner_macros)] +macro_rules! bits ( + ($i:expr, $submac:ident!( $($args:tt)* )) => ({ + $crate::bits::bitsc($i, move |i| { $submac!(i, $($args)*) }) + }); + ($i:expr, $f:expr) => ( + bits!($i, call!($f)) + ); +); + +/// Counterpart to bits, bytes! transforms its bit stream input into a byte slice for the underlying +/// parser, allowing byte-slice parsers to work on bit streams. +/// +/// Signature: +/// `bytes!( parser ) => ( (&[u8], usize), &[u8] -> IResult<&[u8], T> ) -> IResult<(&[u8], usize), T>`, +/// +/// A partial byte remaining in the input will be ignored and the given parser will start parsing +/// at the next full byte. +/// +/// ``` +/// # #[macro_use] extern crate nom; +/// # use nom::combinator::rest; +/// # use nom::error::ErrorKind; +/// # fn main() { +/// +/// named!( parse<(u8, u8, &[u8])>, bits!( tuple!( +/// take_bits!(4u8), +/// take_bits!(8u8), +/// bytes!(rest::<_, (_, ErrorKind)>) +/// ))); +/// +/// let input = &[0xde, 0xad, 0xbe, 0xaf]; +/// +/// assert_eq!(parse( input ), Ok(( &[][..], (0xd, 0xea, &[0xbe, 0xaf][..]) ))); +/// # } +#[macro_export(local_inner_macros)] +macro_rules! bytes ( + ($i:expr, $submac:ident!( $($args:tt)* )) => ({ + $crate::bits::bytesc($i, move |i| { $submac!(i, $($args)*) }) + }); + ($i:expr, $f:expr) => ( + bytes!($i, call!($f)) + ); +); + +/// Consumes the specified number of bits and returns them as the specified type. +/// +/// Signature: +/// `take_bits!(type, count) => ( (&[T], usize), U, usize) -> IResult<(&[T], usize), U>` +/// +/// ``` +/// # #[macro_use] extern crate nom; +/// # fn main() { +/// named!(bits_pair<(&[u8], usize), (u8, u8)>, pair!( take_bits!(4u8), take_bits!(4u8) ) ); +/// named!( take_pair<(u8, u8)>, bits!( bits_pair ) ); +/// +/// let input = vec![0xAB, 0xCD, 0xEF]; +/// let sl = &input[..]; +/// +/// assert_eq!(take_pair( sl ), Ok((&sl[1..], (0xA, 0xB))) ); +/// assert_eq!(take_pair( &sl[1..] ), Ok((&sl[2..], (0xC, 0xD))) ); +/// # } +/// ``` +#[macro_export(local_inner_macros)] +macro_rules! take_bits ( + ($i:expr, $count:expr) => ( + { + let res: $crate::IResult<_, _> = $crate::bits::streaming::take($count)($i); + res + } + ); +); + +/// Matches the given bit pattern. +/// +/// Signature: +/// `tag_bits!(type, count, pattern) => ( (&[T], usize), U, usize, U) -> IResult<(&[T], usize), U>` +/// +/// The caller must specify the number of bits to consume. The matched value is included in the +/// result on success. +/// +/// ``` +/// # #[macro_use] extern crate nom; +/// # fn main() { +/// named!( take_a, bits!( tag_bits!(4usize, 0xA) ) ); +/// +/// let input = vec![0xAB, 0xCD, 0xEF]; +/// let sl = &input[..]; +/// +/// assert_eq!(take_a( sl ), Ok((&sl[1..], 0xA)) ); +/// # } +/// ``` +#[macro_export(local_inner_macros)] +macro_rules! tag_bits ( + ($i:expr, $count:expr, $p: expr) => ( + { + let res: $crate::IResult<_, _> = $crate::bits::streaming::tag($p, $count)($i); + res + } + ) +); + +#[cfg(test)] +mod tests { + use crate::lib::std::ops::{AddAssign, Shl, Shr}; + use crate::internal::{Err, Needed, IResult}; + use crate::error::ErrorKind; + + #[test] + fn take_bits() { + let input = [0b10_10_10_10, 0b11_11_00_00, 0b00_11_00_11]; + let sl = &input[..]; + + assert_eq!(take_bits!((sl, 0), 0u8), Ok(((sl, 0), 0))); + assert_eq!(take_bits!((sl, 0), 8u8), Ok(((&sl[1..], 0), 170))); + assert_eq!(take_bits!((sl, 0), 3u8), Ok(((&sl[0..], 3), 5))); + assert_eq!(take_bits!((sl, 0), 6u8), Ok(((&sl[0..], 6), 42))); + assert_eq!(take_bits!((sl, 1), 1u8), Ok(((&sl[0..], 2), 0))); + assert_eq!(take_bits!((sl, 1), 2u8), Ok(((&sl[0..], 3), 1))); + assert_eq!(take_bits!((sl, 1), 3u8), Ok(((&sl[0..], 4), 2))); + assert_eq!(take_bits!((sl, 6), 3u8), Ok(((&sl[1..], 1), 5))); + assert_eq!(take_bits!((sl, 0), 10u8), Ok(((&sl[1..], 2), 683))); + assert_eq!(take_bits!((sl, 0), 8u8), Ok(((&sl[1..], 0), 170))); + assert_eq!(take_bits!((sl, 6), 10u8), Ok(((&sl[2..], 0), 752))); + assert_eq!(take_bits!((sl, 6), 11u8), Ok(((&sl[2..], 1), 1504))); + assert_eq!(take_bits!((sl, 0), 20u8), Ok(((&sl[2..], 4), 700_163))); + assert_eq!(take_bits!((sl, 4), 20u8), Ok(((&sl[3..], 0), 716_851))); + let r: IResult<_,u32> = take_bits!((sl, 4), 22u8); + assert_eq!( + r, + Err(Err::Incomplete(Needed::Size(22))) + ); + } + + #[test] + fn tag_bits() { + let input = [0b10_10_10_10, 0b11_11_00_00, 0b00_11_00_11]; + let sl = &input[..]; + + assert_eq!(tag_bits!((sl, 0), 3u8, 0b101), Ok(((&sl[0..], 3), 5))); + assert_eq!(tag_bits!((sl, 0), 4u8, 0b1010), Ok(((&sl[0..], 4), 10))); + } + + named!(ch<(&[u8],usize),(u8,u8)>, + do_parse!( + tag_bits!(3u8, 0b101) >> + x: take_bits!(4u8) >> + y: take_bits!(5u8) >> + (x,y) + ) + ); + + #[test] + fn chain_bits() { + let input = [0b10_10_10_10, 0b11_11_00_00, 0b00_11_00_11]; + let sl = &input[..]; + assert_eq!(ch((&input[..], 0)), Ok(((&sl[1..], 4), (5, 15)))); + assert_eq!(ch((&input[..], 4)), Ok(((&sl[2..], 0), (7, 16)))); + assert_eq!(ch((&input[..1], 0)), Err(Err::Incomplete(Needed::Size(5)))); + } + + named!(ch_bytes<(u8, u8)>, bits!(ch)); + #[test] + fn bits_to_bytes() { + let input = [0b10_10_10_10, 0b11_11_00_00, 0b00_11_00_11]; + assert_eq!(ch_bytes(&input[..]), Ok((&input[2..], (5, 15)))); + assert_eq!(ch_bytes(&input[..1]), Err(Err::Incomplete(Needed::Size(1)))); + assert_eq!( + ch_bytes(&input[1..]), + Err(Err::Error(error_position!(&input[1..], ErrorKind::TagBits))) + ); + } + + named!(bits_bytes_bs, bits!(bytes!(crate::combinator::rest::<_, (&[u8], ErrorKind)>))); + #[test] + fn bits_bytes() { + let input = [0b10_10_10_10]; + assert_eq!(bits_bytes_bs(&input[..]), Ok((&[][..], &[0b10_10_10_10][..]))); + } + + #[derive(PartialEq, Debug)] + struct FakeUint(u32); + + impl AddAssign for FakeUint { + fn add_assign(&mut self, other: FakeUint) { + *self = FakeUint(self.0 + other.0); + } + } + + impl Shr for FakeUint { + type Output = FakeUint; + + fn shr(self, shift: usize) -> FakeUint { + FakeUint(self.0 >> shift) + } + } + + impl Shl for FakeUint { + type Output = FakeUint; + + fn shl(self, shift: usize) -> FakeUint { + FakeUint(self.0 << shift) + } + } + + impl From for FakeUint { + fn from(i: u8) -> FakeUint { + FakeUint(u32::from(i)) + } + } + + #[test] + fn non_privitive_type() { + let input = [0b10_10_10_10, 0b11_11_00_00, 0b00_11_00_11]; + let sl = &input[..]; + + assert_eq!( + take_bits!((sl, 0), 20u8), + Ok(((&sl[2..], 4), FakeUint(700_163))) + ); + assert_eq!( + take_bits!((sl, 4), 20u8), + Ok(((&sl[3..], 0), FakeUint(716_851))) + ); + let r3: IResult<_, FakeUint> = take_bits!((sl, 4), 22u8); + assert_eq!( + r3, + Err(Err::Incomplete(Needed::Size(22))) + ); + } +} diff --git a/third_party/rust/nom/src/bits/mod.rs b/third_party/rust/nom/src/bits/mod.rs new file mode 100644 index 0000000000..a6b12f1a74 --- /dev/null +++ b/third_party/rust/nom/src/bits/mod.rs @@ -0,0 +1,119 @@ +//! bit level parsers +//! + +#[macro_use] +mod macros; + +pub mod streaming; +pub mod complete; + +use crate::error::{ParseError, ErrorKind}; +use crate::internal::{Err, IResult, Needed}; +use crate::lib::std::ops::RangeFrom; +use crate::traits::{Slice, ErrorConvert}; + + +/// Converts a byte-level input to a bit-level input, for consumption by a parser that uses bits. +/// +/// Afterwards, the input is converted back to a byte-level parser, with any remaining bits thrown +/// away. +/// +/// # Example +/// ```ignore +/// # #[macro_use] extern crate nom; +/// # use nom::IResult; +/// use nom::bits::bits; +/// use nom::bits::complete::take; +/// +/// fn take_4_bits(input: &[u8]) -> IResult<&[u8], u64> { +/// bits(take::<_, _, _, (_, _)>(4usize))(input) +/// } +/// +/// let input = vec![0xAB, 0xCD, 0xEF, 0x12]; +/// let sl = &input[..]; +/// +/// assert_eq!(take_4_bits( sl ), Ok( (&sl[1..], 0xA) )); +/// ``` +pub fn bits+ErrorConvert, E2: ParseError, P>(parser: P) -> impl Fn(I) -> IResult +where + I: Slice>, + P: Fn((I, usize)) -> IResult<(I, usize), O, E1>, +{ + move |input: I| match parser((input, 0)) { + Ok(((rest, offset), res)) => { + let byte_index = offset / 8 + if offset % 8 == 0 { 0 } else { 1 }; + Ok((rest.slice(byte_index..), res)) + } + Err(Err::Incomplete(n)) => Err(Err::Incomplete(n.map(|u| u / 8 + 1))), + Err(Err::Error(e)) => Err(Err::Error(e.convert())), + Err(Err::Failure(e)) => Err(Err::Failure(e.convert())), + } +} + +#[doc(hidden)] +pub fn bitsc+ErrorConvert, E2: ParseError, P>(input: I, parser: P) -> IResult +where + I: Slice>, + P: Fn((I, usize)) -> IResult<(I, usize), O, E1>, +{ + bits(parser)(input) +} + +/// Counterpart to bits, bytes transforms its bit stream input into a byte slice for the underlying +/// parser, allowing byte-slice parsers to work on bit streams. +/// +/// A partial byte remaining in the input will be ignored and the given parser will start parsing +/// at the next full byte. +/// +/// ```ignore +/// # #[macro_use] extern crate nom; +/// # use nom::IResult; +/// # use nom::combinator::rest; +/// # use nom::sequence::tuple; +/// use nom::bits::{bits, bytes, streaming::take_bits}; +/// +/// fn parse(input: &[u8]) -> IResult<&[u8], (u8, u8, &[u8])> { +/// bits(tuple(( +/// take_bits(4usize), +/// take_bits(8usize), +/// bytes(rest) +/// )))(input) +/// } +/// +/// let input = &[0xde, 0xad, 0xbe, 0xaf]; +/// +/// assert_eq!(parse( input ), Ok(( &[][..], (0xd, 0xea, &[0xbe, 0xaf][..]) ))); +/// ``` +pub fn bytes+ErrorConvert, E2: ParseError<(I, usize)>, P>(parser: P) -> impl Fn((I, usize)) -> IResult<(I, usize), O, E2> +where + I: Slice> + Clone, + P: Fn(I) -> IResult, +{ + move |(input, offset): (I, usize)| { + let inner = if offset % 8 != 0 { + input.slice((1 + offset / 8)..) + } else { + input.slice((offset / 8)..) + }; + let i = (input.clone(), offset); + match parser(inner) { + Ok((rest, res)) => Ok(((rest, 0), res)), + Err(Err::Incomplete(Needed::Unknown)) => Err(Err::Incomplete(Needed::Unknown)), + Err(Err::Incomplete(Needed::Size(sz))) => Err(match sz.checked_mul(8) { + Some(v) => Err::Incomplete(Needed::Size(v)), + None => Err::Failure(E2::from_error_kind(i, ErrorKind::TooLarge)), + }), + Err(Err::Error(e)) => Err(Err::Error(e.convert())), + Err(Err::Failure(e)) => Err(Err::Failure(e.convert())), + } + } +} + +#[doc(hidden)] +pub fn bytesc+ErrorConvert, E2: ParseError<(I, usize)>, P>(input: (I, usize), parser: P) -> IResult<(I, usize), O, E2> +where + I: Slice> + Clone, + P: Fn(I) -> IResult, +{ + bytes(parser)(input) +} diff --git a/third_party/rust/nom/src/bits/streaming.rs b/third_party/rust/nom/src/bits/streaming.rs new file mode 100644 index 0000000000..5ab75961bb --- /dev/null +++ b/third_party/rust/nom/src/bits/streaming.rs @@ -0,0 +1,75 @@ +//! bit level parsers +//! + +use crate::error::{ErrorKind, ParseError}; +use crate::internal::{Err, IResult, Needed}; +use crate::lib::std::ops::{AddAssign, RangeFrom, Shl, Shr, Div}; +use crate::traits::{InputIter, InputLength, Slice, ToUsize}; + +/// generates a parser taking `count` bits +pub fn take>(count: C) -> impl Fn((I, usize)) -> IResult<(I, usize), O, E> +where + I: Slice> + InputIter + InputLength, + C: ToUsize, + O: From + AddAssign + Shl + Shr, +{ + let count = count.to_usize(); + move |(input, bit_offset): (I, usize)| { + if count == 0 { + Ok(((input, bit_offset), 0u8.into())) + } else { + let cnt = (count + bit_offset).div(8); + if input.input_len() * 8 < count + bit_offset { + Err(Err::Incomplete(Needed::Size(count as usize))) + } else { + let mut acc:O = (0 as u8).into(); + let mut offset: usize = bit_offset; + let mut remaining: usize = count; + let mut end_offset: usize = 0; + + for byte in input.iter_elements().take(cnt + 1) { + if remaining == 0 { + break; + } + let val: O = if offset == 0 { + byte.into() + } else { + ((byte << offset) as u8 >> offset).into() + }; + + if remaining < 8 - offset { + acc += val >> (8 - offset - remaining); + end_offset = remaining + offset; + break; + } else { + acc += val << (remaining - (8 - offset)); + remaining -= 8 - offset; + offset = 0; + } + } + Ok(( (input.slice(cnt..), end_offset) , acc)) + } + } + } +} + +/// generates a parser taking `count` bits and comparing them to `pattern` +pub fn tag>(pattern: O, count: C) -> impl Fn((I, usize)) -> IResult<(I, usize), O, E> +where + I: Slice> + InputIter + InputLength + Clone, + C: ToUsize, + O: From + AddAssign + Shl + Shr + PartialEq, +{ + let count = count.to_usize(); + move |input: (I, usize)| { + let inp = input.clone(); + + take(count)(input).and_then(|(i, o)| { + if pattern == o { + Ok((i, o)) + } else { + Err(Err::Error(error_position!(inp, ErrorKind::TagBits))) + } + }) + } +} diff --git a/third_party/rust/nom/src/branch.rs b/third_party/rust/nom/src/branch/macros.rs similarity index 79% rename from third_party/rust/nom/src/branch.rs rename to third_party/rust/nom/src/branch/macros.rs index c7589ede66..4c02461c04 100644 --- a/third_party/rust/nom/src/branch.rs +++ b/third_party/rust/nom/src/branch/macros.rs @@ -57,7 +57,7 @@ /// // Let's make a helper function that returns true when not a space /// // we are required to do this because the `take_while!` macro is limited /// // to idents, so we can't negate `ìs_space` at the call site -/// fn is_not_space(c: u8) -> bool { ! nom::is_space(c) } +/// fn is_not_space(c: u8) -> bool { ! nom::character::is_space(c) } /// /// // Our parser will return the `Dragon` variant when matching "dragon", /// // the `Beast` variant when matching "beast" and otherwise it will consume @@ -82,7 +82,7 @@ /// assert_eq!(result, Creature::Beast); /// assert_eq!(rest, b" of Gevaudan"); /// -/// // Given the input "demon hunter" the parser will return `Creature::Unkown(5)` +/// // Given the input "demon hunter" the parser will return `Creature::Unknown(5)` /// // and the rest will be " hunter" /// let (rest, result) = creature(b"demon hunter").unwrap(); /// assert_eq!(result, Creature::Unknown(5)); @@ -131,9 +131,6 @@ /// ); /// ``` /// -/// If you want the `complete!` combinator to be applied to all rules then use the convenience -/// `alt_complete!` macro (see below). -/// /// This behaviour of `alt!` can get especially confusing if multiple alternatives have different /// sizes but a common prefix, like this: /// @@ -165,10 +162,10 @@ /// or empty input (End Of File). If none of them work, `preceded!` will fail and /// "ef" will be tested. /// -#[macro_export] +#[macro_export(local_inner_macros)] macro_rules! alt ( (__impl $i:expr, $submac:ident!( $($args:tt)* ), $($rest:tt)* ) => ( - compile_error!("alt uses '|' as separator, not ',': + nom_compile_error!("alt uses '|' as separator, not ',': alt!( tag!(\"abcd\") | @@ -242,7 +239,7 @@ macro_rules! alt ( (__impl $i:expr, __end) => ( { - use $crate::{Err,ErrorKind}; + use $crate::{Err,error::ErrorKind}; let e2 = ErrorKind::Alt; let err = Err::Error(error_position!($i, e2)); @@ -257,108 +254,6 @@ macro_rules! alt ( ); ); -/// Is equivalent to the `alt!` combinator, except that it will not return `Incomplete` -/// when one of the constituting parsers returns `Incomplete`. Instead, it will try the -/// next alternative in the chain. -/// -/// You should use this combinator only if you know you -/// will not receive partial input for the rules you're trying to match (this -/// is almost always the case for parsing programming languages). -/// -/// ```rust,ignore -/// alt_complete!(I -> IResult | I -> IResult | ... | I -> IResult ) => I -> IResult -/// ``` -/// All the parsers must have the same return type. -/// -/// If one of the parsers return `Incomplete`, `alt_complete!` will try the next alternative. -/// If there is no other parser left to try, an `Error` will be returned. -/// -/// ```rust,ignore -/// alt_complete!(parser_1 | parser_2 | ... | parser_n) -/// ``` -/// **For more in depth examples, refer to the documentation of `alt!`** -#[macro_export] -macro_rules! alt_complete ( - // Recursive rules (must include `complete!` around the head) - - ($i:expr, $e:path | $($rest:tt)*) => ( - alt_complete!($i, complete!(call!($e)) | $($rest)*); - ); - - ($i:expr, $subrule:ident!( $($args:tt)*) | $($rest:tt)*) => ( - { - use $crate::lib::std::result::Result::*; - use $crate::Err; - - let i_ = $i.clone(); - let res = complete!(i_, $subrule!($($args)*)); - match res { - Ok((_,_)) => res, - Err(Err::Failure(e)) => Err(Err::Failure(e)), - e => { - let out = alt_complete!($i, $($rest)*); - - if let (&Err(Err::Error(ref e1)), &Err(Err::Error(ref e2))) = (&e, &out) { - // Compile-time hack to ensure that res's E type is not under-specified. - // This all has no effect at runtime. - fn unify_types(_: &T, _: &T) {} - unify_types(e1, e2); - } - - out - }, - } - } - ); - - ($i:expr, $subrule:ident!( $($args:tt)* ) => { $gen:expr } | $($rest:tt)+) => ( - { - use $crate::lib::std::result::Result::*; - use $crate::Err; - - let i_ = $i.clone(); - match complete!(i_, $subrule!($($args)*)) { - Ok((i,o)) => Ok((i,$gen(o))), - Err(Err::Failure(e)) => Err(Err::Failure(e)), - e => { - let out = alt_complete!($i, $($rest)*); - - if let (&Err(Err::Error(ref e1)), &Err(Err::Error(ref e2))) = (&e, &out) { - // Compile-time hack to ensure that res's E type is not under-specified. - // This all has no effect at runtime. - fn unify_types(_: &T, _: &T) {} - unify_types(e1, e2); - } - - out - }, - } - } - ); - - ($i:expr, $e:path => { $gen:expr } | $($rest:tt)*) => ( - alt_complete!($i, complete!(call!($e)) => { $gen } | $($rest)*); - ); - - // Tail (non-recursive) rules - - ($i:expr, $e:path => { $gen:expr }) => ( - alt_complete!($i, call!($e) => { $gen }); - ); - - ($i:expr, $subrule:ident!( $($args:tt)* ) => { $gen:expr }) => ( - alt!(__impl $i, complete!($subrule!($($args)*)) => { $gen } | __end) - ); - - ($i:expr, $e:path) => ( - alt_complete!($i, call!($e)); - ); - - ($i:expr, $subrule:ident!( $($args:tt)*)) => ( - alt!(__impl $i, complete!($subrule!($($args)*)) | __end) - ); -); - /// `switch!(I -> IResult, P => I -> IResult | ... | P => I -> IResult ) => I -> IResult` /// choose the next parser depending on the result of the first one, if successful, /// and returns the result of the second parser @@ -366,7 +261,7 @@ macro_rules! alt_complete ( /// ``` /// # #[macro_use] extern crate nom; /// # use nom::Err; -/// # use nom::ErrorKind; +/// # use nom::error::ErrorKind; /// # fn main() { /// named!(sw, /// switch!(take!(4), @@ -435,13 +330,13 @@ macro_rules! alt_complete ( /// ); /// ``` /// -#[macro_export] +#[macro_export(local_inner_macros)] macro_rules! switch ( (__impl $i:expr, $submac:ident!( $($args:tt)* ), $( $($p:pat)|+ => $subrule:ident!( $($args2:tt)* ))|* ) => ( { use $crate::lib::std::result::Result::*; use $crate::lib::std::option::Option::*; - use $crate::{Err,Convert,ErrorKind}; + use $crate::{Err,error::ErrorKind}; let i_ = $i.clone(); match map!(i_, $submac!($($args)*), Some) { @@ -469,7 +364,7 @@ macro_rules! switch ( Ok(o) => Ok(o), Err(e) => Err(e), }),*, - _ => Err(Err::convert(Err::Error(error_position!($i, ErrorKind::Switch::)))) + _ => Err(Err::convert(Err::Error(error_position!($i, ErrorKind::Switch)))) } } } @@ -497,7 +392,7 @@ macro_rules! switch ( /// /// ``` /// # #[macro_use] extern crate nom; -/// # use nom::{Err,ErrorKind,Needed}; +/// # use nom::{Err,error::ErrorKind,Needed}; /// # fn main() { /// named!(perm<(&[u8], &[u8], &[u8])>, /// permutation!(tag!("abcd"), tag!("efg"), tag!("hi")) @@ -528,7 +423,7 @@ macro_rules! switch ( /// /// ``` /// # #[macro_use] extern crate nom; -/// # use nom::{Err,ErrorKind,Needed}; +/// # use nom::{Err,error::ErrorKind,Needed}; /// # fn main() { /// named!(perm<&str, (Option<&str>, &str, &str)>, /// permutation!(tag!("abcd")?, tag!("efg"), tag!("hi")) @@ -559,13 +454,13 @@ macro_rules! switch ( /// assert_eq!(perm(e), Err(Err::Incomplete(Needed::Size(4)))); /// # } /// ``` -#[macro_export] +#[macro_export(local_inner_macros)] macro_rules! permutation ( ($i:expr, $($rest:tt)*) => ( { use $crate::lib::std::result::Result::*; use $crate::lib::std::option::Option::*; - use $crate::{Err,Convert,ErrorKind}; + use $crate::{Err,error::ErrorKind}; let mut res = permutation_init!((), $($rest)*); let mut input = $i; @@ -602,7 +497,7 @@ macro_rules! permutation ( ); #[doc(hidden)] -#[macro_export] +#[macro_export(local_inner_macros)] macro_rules! permutation_init ( ((), $e:ident?, $($rest:tt)*) => ( permutation_init!(($crate::lib::std::option::Option::None), $($rest)*) @@ -651,7 +546,7 @@ macro_rules! permutation_init ( ); #[doc(hidden)] -#[macro_export] +#[macro_export(local_inner_macros)] macro_rules! succ ( (0, $submac:ident ! ($($rest:tt)*)) => ($submac!(1, $($rest)*)); (1, $submac:ident ! ($($rest:tt)*)) => ($submac!(2, $($rest)*)); @@ -675,43 +570,14 @@ macro_rules! succ ( (19, $submac:ident ! ($($rest:tt)*)) => ($submac!(20, $($rest)*)); ); -// HACK: for some reason, Rust 1.11 does not accept $res.$it in -// permutation_unwrap. This is a bit ugly, but it will have no -// impact on the generated code -#[doc(hidden)] -#[macro_export] -macro_rules! acc ( - (0, $tup:expr) => ($tup.0); - (1, $tup:expr) => ($tup.1); - (2, $tup:expr) => ($tup.2); - (3, $tup:expr) => ($tup.3); - (4, $tup:expr) => ($tup.4); - (5, $tup:expr) => ($tup.5); - (6, $tup:expr) => ($tup.6); - (7, $tup:expr) => ($tup.7); - (8, $tup:expr) => ($tup.8); - (9, $tup:expr) => ($tup.9); - (10, $tup:expr) => ($tup.10); - (11, $tup:expr) => ($tup.11); - (12, $tup:expr) => ($tup.12); - (13, $tup:expr) => ($tup.13); - (14, $tup:expr) => ($tup.14); - (15, $tup:expr) => ($tup.15); - (16, $tup:expr) => ($tup.16); - (17, $tup:expr) => ($tup.17); - (18, $tup:expr) => ($tup.18); - (19, $tup:expr) => ($tup.19); - (20, $tup:expr) => ($tup.20); -); - #[doc(hidden)] -#[macro_export] +#[macro_export(local_inner_macros)] macro_rules! permutation_unwrap ( ($it:tt, (), $res:ident, $e:ident?, $($rest:tt)*) => ( - succ!($it, permutation_unwrap!((acc!($it, $res)), $res, $($rest)*)); + succ!($it, permutation_unwrap!(($res.$it), $res, $($rest)*)); ); ($it:tt, (), $res:ident, $e:ident, $($rest:tt)*) => ({ - let res = acc!($it, $res); + let res = $res.$it; if res.is_some() { succ!($it, permutation_unwrap!((res.unwrap()), $res, $($rest)*)) } else { @@ -720,10 +586,10 @@ macro_rules! permutation_unwrap ( }); ($it:tt, (), $res:ident, $submac:ident!( $($args:tt)* )?, $($rest:tt)*) => ( - succ!($it, permutation_unwrap!((acc!($it, $res)), $res, $($rest)*)); + succ!($it, permutation_unwrap!(($res.$it), $res, $($rest)*)); ); ($it:tt, (), $res:ident, $submac:ident!( $($args:tt)* ), $($rest:tt)*) => ({ - let res = acc!($it, $res); + let res = $res.$it; if res.is_some() { succ!($it, permutation_unwrap!((res.unwrap()), $res, $($rest)*)) } else { @@ -732,10 +598,10 @@ macro_rules! permutation_unwrap ( }); ($it:tt, ($($parsed:expr),*), $res:ident, $e:ident?, $($rest:tt)*) => ( - succ!($it, permutation_unwrap!(($($parsed),* , acc!($it, $res)), $res, $($rest)*)); + succ!($it, permutation_unwrap!(($($parsed),* , $res.$it), $res, $($rest)*)); ); ($it:tt, ($($parsed:expr),*), $res:ident, $e:ident, $($rest:tt)*) => ({ - let res = acc!($it, $res); + let res = $res.$it; if res.is_some() { succ!($it, permutation_unwrap!(($($parsed),* , res.unwrap()), $res, $($rest)*)) } else { @@ -744,10 +610,10 @@ macro_rules! permutation_unwrap ( }); ($it:tt, ($($parsed:expr),*), $res:ident, $submac:ident!( $($args:tt)* )?, $($rest:tt)*) => ( - succ!($it, permutation_unwrap!(($($parsed),* , acc!($it, $res)), $res, $($rest)*)); + succ!($it, permutation_unwrap!(($($parsed),* , $res.$it), $res, $($rest)*)); ); ($it:tt, ($($parsed:expr),*), $res:ident, $submac:ident!( $($args:tt)* ), $($rest:tt)*) => ({ - let res = acc!($it, $res); + let res = $res.$it; if res.is_some() { succ!($it, permutation_unwrap!(($($parsed),* , res.unwrap()), $res, $($rest)*)) } else { @@ -756,10 +622,10 @@ macro_rules! permutation_unwrap ( }); ($it:tt, ($($parsed:expr),*), $res:ident?, $e:ident) => ( - $crate::lib::std::option::Option::Some(($($parsed),* , { acc!($it, $res) })) + $crate::lib::std::option::Option::Some(($($parsed),* , { $res.$it })) ); ($it:tt, ($($parsed:expr),*), $res:ident, $e:ident) => ({ - let res = acc!($it, $res); + let res = $res.$it; if res.is_some() { $crate::lib::std::option::Option::Some(($($parsed),* , res.unwrap() )) } else { @@ -768,10 +634,10 @@ macro_rules! permutation_unwrap ( }); ($it:tt, ($($parsed:expr),*), $res:ident, $submac:ident!( $($args:tt)* )?) => ( - $crate::lib::std::option::Option::Some(($($parsed),* , { acc!($it, $res) })) + $crate::lib::std::option::Option::Some(($($parsed),* , { $res.$it })) ); ($it:tt, ($($parsed:expr),*), $res:ident, $submac:ident!( $($args:tt)* )) => ({ - let res = acc!($it, $res); + let res = $res.$it; if res.is_some() { $crate::lib::std::option::Option::Some(($($parsed),* , res.unwrap() )) } else { @@ -781,7 +647,7 @@ macro_rules! permutation_unwrap ( ); #[doc(hidden)] -#[macro_export] +#[macro_export(local_inner_macros)] macro_rules! permutation_iterator ( ($it:tt,$i:expr, $all_done:expr, $needed:expr, $res:expr, $e:ident?, $($rest:tt)*) => ( permutation_iterator!($it, $i, $all_done, $needed, $res, call!($e), $($rest)*); @@ -798,11 +664,11 @@ macro_rules! permutation_iterator ( use $crate::lib::std::option::Option::*; use $crate::Err; - if acc!($it, $res).is_none() { + if $res.$it.is_none() { match $submac!($i, $($args)*) { Ok((i,o)) => { $i = i; - acc!($it, $res) = Some(o); + $res.$it = Some(o); continue; }, Err(Err::Error(_)) => { @@ -832,11 +698,11 @@ macro_rules! permutation_iterator ( use $crate::lib::std::option::Option::*; use $crate::Err; - if acc!($it, $res).is_none() { + if $res.$it.is_none() { match $submac!($i, $($args)*) { Ok((i,o)) => { $i = i; - acc!($it, $res) = Some(o); + $res.$it = Some(o); continue; }, Err(Err::Error(_)) => { @@ -853,10 +719,16 @@ macro_rules! permutation_iterator ( #[cfg(test)] mod tests { + use crate::error::ErrorKind; #[cfg(feature = "alloc")] - use lib::std::string::{String, ToString}; - use internal::{Err, IResult, Needed}; - use util::ErrorKind; + use crate::{ + error::ParseError, + lib::std::{ + fmt::Debug, + string::{String, ToString} + } + }; + use crate::internal::{Err, IResult, Needed}; // reproduce the tag and take macros, because of module import order macro_rules! tag ( @@ -878,7 +750,6 @@ mod tests { macro_rules! tag_bytes ( ($i:expr, $bytes: expr) => ( { - use $crate::need_more; use $crate::lib::std::cmp::min; let len = $i.len(); @@ -887,13 +758,11 @@ mod tests { let reduced = &$i[..m]; let b = &$bytes[..m]; - let res: IResult<_,_,u32> = if reduced != b { - let e: ErrorKind = ErrorKind::Tag::; + let res: IResult<_,_,_> = if reduced != b { + let e: ErrorKind = ErrorKind::Tag; Err(Err::Error(error_position!($i, e))) } else if m < blen { - //let e:Err<&[u8], u32> = need_more($i, Needed::Size(blen)); - //Err(e) - need_more($i, Needed::Size(blen)) + Err(Err::Incomplete(Needed::Size(blen))) } else { Ok((&$i[blen..], reduced)) }; @@ -905,11 +774,9 @@ mod tests { macro_rules! take( ($i:expr, $count:expr) => ( { - use $crate::need_more; - let cnt = $count as usize; - let res:IResult<&[u8],&[u8],u32> = if $i.len() < cnt { - need_more($i, Needed::Size(cnt)) + let res:IResult<&[u8],&[u8],_> = if $i.len() < cnt { + Err(Err::Incomplete(Needed::Size(cnt))) } else { Ok((&$i[cnt..],&$i[0..cnt])) }; @@ -936,6 +803,17 @@ mod tests { } } + #[cfg(feature = "alloc")] + impl ParseError for ErrorStr { + fn from_error_kind(input: I, kind: ErrorKind) -> Self { + ErrorStr(format!("custom error message: ({:?}, {:?})", input, kind)) + } + + fn append(input: I, kind: ErrorKind, other: Self) -> Self { + ErrorStr(format!("custom error message: ({:?}, {:?}) - {:?}", input, kind, other)) + } + } + #[cfg(feature = "alloc")] #[test] fn alt() { @@ -945,11 +823,7 @@ mod tests { #[allow(unused_variables)] fn dont_work(input: &[u8]) -> IResult<&[u8], &[u8], ErrorStr> { - use Context; - Err(Err::Error(Context::Code( - &b""[..], - ErrorKind::Custom(ErrorStr("abcd".to_string())), - ))) + Err(Err::Error(ErrorStr("abcd".to_string()))) } fn work2(input: &[u8]) -> IResult<&[u8], &[u8], ErrorStr> { @@ -980,10 +854,7 @@ mod tests { assert_eq!(alt4(b), Ok((&b""[..], b))); // test the alternative syntax - named!( - alt5, - alt!(tag!("abcd") => { |_| false } | tag!("efgh") => { |_| true }) - ); + named!(alt5, alt!(tag!("abcd") => { |_| false } | tag!("efgh") => { |_| true })); assert_eq!(alt5(a), Ok((&b""[..], false))); assert_eq!(alt5(b), Ok((&b""[..], true))); @@ -1011,20 +882,6 @@ mod tests { assert_eq!(alt1(a), Ok((&b"g"[..], &b"def"[..]))); } - #[test] - fn alt_complete() { - named!(ac<&[u8], &[u8]>, - alt_complete!(tag!("abcd") | tag!("ef") | tag!("ghi") | tag!("kl")) - ); - - let a = &b""[..]; - assert_eq!(ac(a), Err(Err::Error(error_position!(a, ErrorKind::Alt)))); - let a = &b"ef"[..]; - assert_eq!(ac(a), Ok((&b""[..], &b"ef"[..]))); - let a = &b"cde"[..]; - assert_eq!(ac(a), Err(Err::Error(error_position!(a, ErrorKind::Alt)))); - } - #[allow(unused_variables)] #[test] fn switch() { @@ -1042,13 +899,7 @@ mod tests { let b = &b"efghijkl"[..]; assert_eq!(sw(b), Ok((&b""[..], &b"ijkl"[..]))); let c = &b"afghijkl"[..]; - assert_eq!( - sw(c), - Err(Err::Error(error_position!( - &b"afghijkl"[..], - ErrorKind::Switch - ))) - ); + assert_eq!(sw(c), Err(Err::Error(error_position!(&b"afghijkl"[..], ErrorKind::Switch)))); let a = &b"xxxxefgh"[..]; assert_eq!(sw(a), Ok((&b"gh"[..], &b"ef"[..]))); @@ -1056,10 +907,7 @@ mod tests { #[test] fn permutation() { - named!( - perm<(&[u8], &[u8], &[u8])>, - permutation!(tag!("abcd"), tag!("efg"), tag!("hi")) - ); + named!(perm<(&[u8], &[u8], &[u8])>, permutation!(tag!("abcd"), tag!("efg"), tag!("hi"))); let expected = (&b"abcd"[..], &b"efg"[..], &b"hi"[..]); diff --git a/third_party/rust/nom/src/branch/mod.rs b/third_party/rust/nom/src/branch/mod.rs new file mode 100644 index 0000000000..55d3633c8f --- /dev/null +++ b/third_party/rust/nom/src/branch/mod.rs @@ -0,0 +1,263 @@ +//! choice combinators + +#[macro_use] +mod macros; + +use crate::error::ErrorKind; +use crate::error::ParseError; +use crate::internal::{Err, IResult}; + +/// helper trait for the [alt()] combinator +/// +/// this trait is implemented for tuples of up to 21 elements +pub trait Alt { + /// tests each parser in the tuple and returns the result of the first one that succeeds + fn choice(&self, input: I) -> IResult; +} + +/// tests a list of parsers one by one until one succeeds +/// +/// It takes as argument a tuple of parsers. +/// +/// ```rust +/// # #[macro_use] extern crate nom; +/// # use nom::{Err,error::ErrorKind, Needed, IResult}; +/// use nom::character::complete::{alpha1, digit1}; +/// use nom::branch::alt; +/// # fn main() { +/// fn parser(input: &str) -> IResult<&str, &str> { +/// alt((alpha1, digit1))(input) +/// }; +/// +/// // the first parser, alpha1, recognizes the input +/// assert_eq!(parser("abc"), Ok(("", "abc"))); +/// +/// // the first parser returns an error, so alt tries the second one +/// assert_eq!(parser("123456"), Ok(("", "123456"))); +/// +/// // both parsers failed, and with the default error type, alt will return the last error +/// assert_eq!(parser(" "), Err(Err::Error(error_position!(" ", ErrorKind::Digit)))); +/// # } +/// ``` +/// +/// with a custom error type, it is possible to have alt return the error of the parser +/// that went the farthest in the input data +pub fn alt, List: Alt>(l: List) -> impl Fn(I) -> IResult { + move |i: I| l.choice(i) +} + +/// helper trait for the [permutation()] combinator +/// +/// this trait is implemented for tuples of up to 21 elements +pub trait Permutation { + /// tries to apply all parsers in the tuple in various orders until all of them succeed + fn permutation(&self, input: I) -> IResult; +} + +/// applies a list of parsers in any order +/// +/// permutation will succeed if all of the child parsers succeeded. +/// It takes as argument a tuple of parsers, and returns a +/// tuple of the parser results. +/// +/// ```rust +/// # #[macro_use] extern crate nom; +/// # use nom::{Err,error::ErrorKind, Needed, IResult}; +/// use nom::character::complete::{alpha1, digit1}; +/// use nom::branch::permutation; +/// # fn main() { +/// fn parser(input: &str) -> IResult<&str, (&str, &str)> { +/// permutation((alpha1, digit1))(input) +/// }; +/// +/// // permutation recognizes alphabetic characters then digit +/// assert_eq!(parser("abc123"), Ok(("", ("abc", "123")))); +/// +/// // but also in inverse order +/// assert_eq!(parser("123abc"), Ok(("", ("abc", "123")))); +/// +/// // it will fail if one of the parsers failed +/// assert_eq!(parser("abc;"), Err(Err::Error(error_position!(";", ErrorKind::Permutation)))); +/// # } +/// ``` +pub fn permutation, List: Permutation>(l: List) -> impl Fn(I) -> IResult { + move |i: I| l.permutation(i) +} + +macro_rules! alt_trait( + ($first:ident $second:ident $($id: ident)+) => ( + alt_trait!(__impl $first $second; $($id)+); + ); + (__impl $($current:ident)*; $head:ident $($id: ident)+) => ( + alt_trait_impl!($($current)*); + + alt_trait!(__impl $($current)* $head; $($id)+); + ); + (__impl $($current:ident)*; $head:ident) => ( + alt_trait_impl!($($current)*); + alt_trait_impl!($($current)* $head); + ); +); + +macro_rules! alt_trait_impl( + ($($id:ident)+) => ( + impl< + Input: Clone, Output, Error: ParseError, + $($id: Fn(Input) -> IResult),+ + > Alt for ( $($id),+ ) { + + fn choice(&self, input: Input) -> IResult { + let mut err: Option = None; + alt_trait_inner!(0, self, input, err, $($id)+); + + Err(Err::Error(Error::append(input, ErrorKind::Alt, err.unwrap()))) + } + } + ); +); + +macro_rules! alt_trait_inner( + ($it:tt, $self:expr, $input:expr, $err:expr, $head:ident $($id:ident)+) => ( + match $self.$it($input.clone()) { + Err(Err::Error(e)) => { + $err = Some(match $err.take() { + None => e, + Some(prev) => prev.or(e), + }); + succ!($it, alt_trait_inner!($self, $input, $err, $($id)+)) + }, + res => return res, + } + ); + ($it:tt, $self:expr, $input:expr, $err:expr, $head:ident) => ( + match $self.$it($input.clone()) { + Err(Err::Error(e)) => { + $err = Some(match $err.take() { + None => e, + Some(prev) => prev.or(e), + }); + }, + res => return res, + } + ); +); + +alt_trait!(A B C D E F G H I J K L M N O P Q R S T U); + +macro_rules! permutation_trait( + ($name1:ident $ty1:ident, $name2:ident $ty2:ident) => ( + permutation_trait_impl!($name1 $ty1, $name2 $ty2); + ); + ($name1:ident $ty1:ident, $name2: ident $ty2:ident, $($name:ident $ty:ident),*) => ( + permutation_trait!(__impl $name1 $ty1, $name2 $ty2; $($name $ty),*); + ); + (__impl $($name:ident $ty: ident),+; $name1:ident $ty1:ident, $($name2:ident $ty2:ident),*) => ( + permutation_trait_impl!($($name $ty),+); + permutation_trait!(__impl $($name $ty),+ , $name1 $ty1; $($name2 $ty2),*); + ); + (__impl $($name:ident $ty: ident),+; $name1:ident $ty1:ident) => ( + permutation_trait_impl!($($name $ty),+); + permutation_trait_impl!($($name $ty),+, $name1 $ty1); + ); +); + +macro_rules! permutation_trait_impl( + ($($name:ident $ty: ident),+) => ( + impl< + Input: Clone, $($ty),+ , Error: ParseError, + $($name: Fn(Input) -> IResult),+ + > Permutation for ( $($name),+ ) { + + fn permutation(&self, mut input: Input) -> IResult { + let mut res = permutation_init!((), $($name),+); + + loop { + let mut all_done = true; + permutation_trait_inner!(0, self, input, res, all_done, $($name)+); + + //if we reach that part, it means none of the parsers were able to read anything + if !all_done { + //FIXME: should wrap the error returned by the child parser + return Err(Err::Error(error_position!(input, ErrorKind::Permutation))); + } + break; + } + + if let Some(unwrapped_res) = { permutation_trait_unwrap!(0, (), res, $($name),+) } { + Ok((input, unwrapped_res)) + } else { + Err(Err::Error(error_position!(input, ErrorKind::Permutation))) + } + } + } + ); +); + +macro_rules! permutation_trait_inner( + ($it:tt, $self:expr, $input:ident, $res:expr, $all_done:expr, $head:ident $($id:ident)+) => ({ + if $res.$it.is_none() { + match $self.$it($input.clone()) { + Ok((i,o)) => { + $input = i; + $res.$it = Some(o); + continue; + }, + Err(Err::Error(_)) => { + $all_done = false; + }, + Err(e) => { + return Err(e); + } + }; + } + succ!($it, permutation_trait_inner!($self, $input, $res, $all_done, $($id)+)); + }); + ($it:tt, $self:expr, $input:ident, $res:expr, $all_done:expr, $head:ident) => ({ + if $res.$it.is_none() { + match $self.$it($input.clone()) { + Ok((i,o)) => { + $input = i; + $res.$it = Some(o); + continue; + }, + Err(Err::Error(_)) => { + $all_done = false; + }, + Err(e) => { + return Err(e); + } + }; + } + }); +); + +macro_rules! permutation_trait_unwrap ( + ($it:tt, (), $res:ident, $e:ident, $($name:ident),+) => ({ + let res = $res.$it; + if res.is_some() { + succ!($it, permutation_trait_unwrap!((res.unwrap()), $res, $($name),+)) + } else { + $crate::lib::std::option::Option::None + } + }); + + ($it:tt, ($($parsed:expr),*), $res:ident, $e:ident, $($name:ident),+) => ({ + let res = $res.$it; + if res.is_some() { + succ!($it, permutation_trait_unwrap!(($($parsed),* , res.unwrap()), $res, $($name),+)) + } else { + $crate::lib::std::option::Option::None + } + }); + + ($it:tt, ($($parsed:expr),*), $res:ident, $name:ident) => ({ + let res = $res.$it; + if res.is_some() { + $crate::lib::std::option::Option::Some(($($parsed),* , res.unwrap() )) + } else { + $crate::lib::std::option::Option::None + } + }); +); + +permutation_trait!(FnA A, FnB B, FnC C, FnD D, FnE E, FnF F, FnG G, FnH H, FnI I, FnJ J, FnK K, FnL L, FnM M, FnN N, FnO O, FnP P, FnQ Q, FnR R, FnS S, FnT T, FnU U); diff --git a/third_party/rust/nom/src/bytes.rs b/third_party/rust/nom/src/bytes.rs deleted file mode 100644 index d5c0a47d4d..0000000000 --- a/third_party/rust/nom/src/bytes.rs +++ /dev/null @@ -1,1980 +0,0 @@ -//! Byte level parsers and combinators -//! -#[allow(unused_variables)] - -/// `tag!(&[T]: nom::AsBytes) => &[T] -> IResult<&[T], &[T]>` -/// declares a byte array as a suite to recognize -/// -/// consumes the recognized characters -/// -/// # Example -/// ``` -/// # #[macro_use] extern crate nom; -/// # fn main() { -/// named!(x, tag!("abcd")); -/// let r = x(&b"abcdefgh"[..]); -/// assert_eq!(r, Ok((&b"efgh"[..], &b"abcd"[..]))); -/// # } -/// ``` -#[macro_export] -macro_rules! tag ( - ($i:expr, $tag: expr) => ( - { - use $crate::lib::std::result::Result::*; - use $crate::{Err,Needed,IResult,ErrorKind}; - use $crate::{Compare,CompareResult,InputLength,need_more,InputTake}; - - let res: IResult<_,_> = match ($i).compare($tag) { - CompareResult::Ok => { - let blen = $tag.input_len(); - Ok($i.take_split(blen)) - }, - CompareResult::Incomplete => { - need_more($i, Needed::Size($tag.input_len())) - }, - CompareResult::Error => { - let e:ErrorKind = ErrorKind::Tag; - Err(Err::Error($crate::Context::Code($i, e))) - } - }; - res - } - ); -); - -/// `tag_no_case!(&[T]) => &[T] -> IResult<&[T], &[T]>` -/// declares a case insensitive ascii string as a suite to recognize -/// -/// consumes the recognized characters -/// -/// # Example -/// ``` -/// # #[macro_use] extern crate nom; -/// # fn main() { -/// named!(test, tag_no_case!("ABcd")); -/// -/// let r = test(&b"aBCdefgh"[..]); -/// assert_eq!(r, Ok((&b"efgh"[..], &b"aBCd"[..]))); -/// # } -/// ``` -#[macro_export] -macro_rules! tag_no_case ( - ($i:expr, $tag: expr) => ( - { - use $crate::lib::std::result::Result::*; - use $crate::{Err,Needed,IResult,ErrorKind}; - use $crate::{Compare,CompareResult,InputLength,InputTake}; - - let res: IResult<_,_> = match ($i).compare_no_case($tag) { - CompareResult::Ok => { - let blen = $tag.input_len(); - Ok($i.take_split(blen)) - }, - CompareResult::Incomplete => { - $crate::need_more($i, Needed::Size($tag.input_len())) - }, - CompareResult::Error => { - let e:ErrorKind = ErrorKind::Tag; - Err(Err::Error($crate::Context::Code($i, e))) - } - }; - res - } - ); -); - -/// `is_not!(&[T:AsBytes]) => &[T] -> IResult<&[T], &[T]>` -/// returns the longest list of bytes that do not appear in the provided array -/// -/// # Example -/// ``` -/// # #[macro_use] extern crate nom; -/// # fn main() { -/// named!( not_space, is_not!( " \t\r\n" ) ); -/// -/// let r = not_space(&b"abcdefgh\nijkl"[..]); -/// assert_eq!(r, Ok((&b"\nijkl"[..], &b"abcdefgh"[..]))); -/// # } -/// ``` -#[macro_export] -macro_rules! is_not ( - ($input:expr, $arr:expr) => ( - { - use $crate::ErrorKind; - use $crate::FindToken; - use $crate::InputTakeAtPosition; - let input = $input; - input.split_at_position1(|c| $arr.find_token(c), ErrorKind::IsNot) - } - ); -); - -/// `is_a!(&[T]) => &[T] -> IResult<&[T], &[T]>` -/// returns the longest list of bytes that appear in the provided array -/// -/// # Example -/// ``` -/// # #[macro_use] extern crate nom; -/// # fn main() { -/// named!(abcd, is_a!( "abcd" )); -/// -/// let r1 = abcd(&b"aaaaefgh"[..]); -/// assert_eq!(r1, Ok((&b"efgh"[..], &b"aaaa"[..]))); -/// -/// let r2 = abcd(&b"dcbaefgh"[..]); -/// assert_eq!(r2, Ok((&b"efgh"[..], &b"dcba"[..]))); -/// # } -/// ``` -#[macro_export] -macro_rules! is_a ( - ($input:expr, $arr:expr) => ( - { - use $crate::ErrorKind; - use $crate::FindToken; - use $crate::InputTakeAtPosition; - let input = $input; - input.split_at_position1(|c| !$arr.find_token(c), ErrorKind::IsA) - } - ); -); - -/// `escaped!(T -> IResult, U, T -> IResult) => T -> IResult where T: InputIter, -/// U: AsChar` -/// matches a byte string with escaped characters. -/// -/// The first argument matches the normal characters (it must not accept the control character), -/// the second argument is the control character (like `\` in most languages), -/// the third argument matches the escaped characters -/// -/// # Example -/// ``` -/// # #[macro_use] extern crate nom; -/// # use nom::alpha; -/// # fn main() { -/// named!(esc, escaped!(call!(alpha), '\\', one_of!("\"n\\"))); -/// assert_eq!(esc(&b"abcd;"[..]), Ok((&b";"[..], &b"abcd"[..]))); -/// assert_eq!(esc(&b"ab\\\"cd;"[..]), Ok((&b";"[..], &b"ab\\\"cd"[..]))); -/// # } -/// ``` -#[macro_export] -macro_rules! escaped ( - // Internal parser, do not use directly - (__impl $i: expr, $normal:ident!( $($args:tt)* ), $control_char: expr, $escapable:ident!( $($args2:tt)* )) => ( - { - use $crate::lib::std::result::Result::*; - use $crate::{Err,Needed,IResult,ErrorKind,need_more}; - use $crate::AsChar; - use $crate::InputIter; - use $crate::InputLength; - use $crate::InputTake; - use $crate::Slice; - - let cl = || -> IResult<_,_,u32> { - use $crate::Offset; - let mut input = $i.clone(); - let control_char = $control_char.as_char(); - - while input.input_len() > 0 { - match $normal!(input, $($args)*) { - Ok((i, _)) => { - if i.input_len() == 0 { - return Ok(($i.slice($i.input_len()..), $i)) - } else { - input = i; - } - }, - Err(Err::Failure(e)) => { - return Err(Err::Failure(e)); - }, - Err(Err::Incomplete(i)) => { - return Err(Err::Incomplete(i)); - }, - Err(Err::Error(_)) => { - // unwrap() should be safe here since index < $i.input_len() - if input.iter_elements().next().unwrap().as_char() == control_char { - let next = control_char.len_utf8(); - if next >= input.input_len() { - return need_more($i, Needed::Size(next - input.input_len() + 1)); - } else { - match $escapable!(input.slice(next..), $($args2)*) { - Ok((i,_)) => { - if i.input_len() == 0 { - return Ok(($i.slice($i.input_len()..), $i)) - } else { - input = i; - } - }, - Err(e) => return Err(e) - } - } - } else { - let index = $i.offset(&input); - return Ok($i.take_split(index)); - } - }, - } - } - let index = $i.offset(&input); - Ok($i.take_split(index)) - }; - - match cl() { - Err(Err::Incomplete(x)) => Err(Err::Incomplete(x)), - Ok((i, o)) => Ok((i, o)), - Err(Err::Error(e)) => { - let e2 = ErrorKind::Escaped::; - Err(Err::Error(error_node_position!($i, e2, e))) - }, - Err(Err::Failure(e)) => { - let e2 = ErrorKind::Escaped::; - Err(Err::Failure(error_node_position!($i, e2, e))) - } - } - } - ); - // Internal parser, do not use directly - (__impl_1 $i:expr, $submac1:ident!( $($args:tt)* ), $control_char: expr, $submac2:ident!( $($args2:tt)*) ) => ( - { - escaped!(__impl $i, $submac1!($($args)*), $control_char, $submac2!($($args2)*)) - } - ); - // Internal parser, do not use directly - (__impl_1 $i:expr, $submac1:ident!( $($args:tt)* ), $control_char: expr, $g:expr) => ( - escaped!(__impl $i, $submac1!($($args)*), $control_char, call!($g)) - ); - ($i:expr, $submac:ident!( $($args:tt)* ), $control_char: expr, $($rest:tt)+) => ( - { - escaped!(__impl_1 $i, $submac!($($args)*), $control_char, $($rest)*) - } - ); - - ($i:expr, $f:expr, $control_char: expr, $($rest:tt)+) => ( - escaped!(__impl_1 $i, call!($f), $control_char, $($rest)*) - ); -); - -/// `escaped_transform!(&[T] -> IResult<&[T], &[T]>, T, &[T] -> IResult<&[T], &[T]>) => &[T] -> IResult<&[T], Vec>` -/// matches a byte string with escaped characters. -/// -/// The first argument matches the normal characters (it must not match the control character), -/// the second argument is the control character (like `\` in most languages), -/// the third argument matches the escaped characters and transforms them. -/// -/// As an example, the chain `abc\tdef` could be `abc def` (it also consumes the control character) -/// -/// WARNING: if you do not use the `verbose-errors` feature, this combinator will currently fail to build -/// because of a type inference error -/// -/// # Example -/// ```ignore -/// # #[macro_use] extern crate nom; -/// # use nom::alpha; -/// # use $crate::lib::std::str::from_utf8; -/// # fn main() { -/// fn to_s(i:Vec) -> String { -/// String::from_utf8_lossy(&i).into_owned() -/// } - -/// named!(transform < String >, -/// map!( -/// escaped_transform!(call!(alpha), '\\', -/// alt!( -/// tag!("\\") => { |_| &b"\\"[..] } -/// | tag!("\"") => { |_| &b"\""[..] } -/// | tag!("n") => { |_| &b"\n"[..] } -/// ) -/// ), to_s -/// ) -/// ); -/// assert_eq!(transform(&b"ab\\\"cd"[..]), Ok((&b""[..], String::from("ab\"cd")))); -/// # } -/// ``` -#[macro_export] -macro_rules! escaped_transform ( - // Internal parser, do not use directly - (__impl $i: expr, $normal:ident!( $($args:tt)* ), $control_char: expr, $transform:ident!( $($args2:tt)* )) => ( - { - use $crate::lib::std::result::Result::*; - use $crate::{Err,ErrorKind}; - use $crate::AsChar; - use $crate::ExtendInto; - use $crate::InputIter; - use $crate::InputLength; - use $crate::Needed; - use $crate::Slice; - use $crate::need_more; - - let cl = || -> $crate::IResult<_,_,_> { - use $crate::Offset; - let mut index = 0; - let mut res = $i.new_builder(); - let control_char = $control_char.as_char(); - - while index < $i.input_len() { - let remainder = $i.slice(index..); - match $normal!(remainder, $($args)*) { - Ok((i,o)) => { - o.extend_into(&mut res); - if i.input_len() == 0 { - return Ok(($i.slice($i.input_len()..), res)); - } else { - index = $i.offset(&i); - } - }, - Err(Err::Incomplete(i)) => { - return Err(Err::Incomplete(i)) - }, - Err(Err::Failure(e)) => { - return Err(Err::Failure(e)) - }, - Err(Err::Error(_)) => { - // unwrap() should be safe here since index < $i.input_len() - if remainder.iter_elements().next().unwrap().as_char() == control_char { - let next = index + control_char.len_utf8(); - let input_len = $i.input_len(); - - if next >= input_len { - return need_more($i, Needed::Size(next - input_len + 1)); - } else { - match $transform!($i.slice(next..), $($args2)*) { - Ok((i,o)) => { - o.extend_into(&mut res); - if i.input_len() == 0 { - return Ok(($i.slice($i.input_len()..), res)) - } else { - index = $i.offset(&i); - } - }, - Err(Err::Error(e)) => { - return Err(Err::Error(e)) - }, - Err(Err::Incomplete(i)) => { - return Err(Err::Incomplete(i)) - }, - Err(Err::Failure(e)) => { - return Err(Err::Failure(e)) - }, - } - } - } else { - return Ok((remainder, res)) - } - } - } - } - Ok(($i.slice(index..), res)) - }; - match cl() { - Err(Err::Incomplete(x)) => Err(Err::Incomplete(x)), - Ok((i, o)) => Ok((i, o)), - Err(Err::Error(e)) => { - let e2 = ErrorKind::EscapedTransform::; - Err(Err::Error(error_node_position!($i, e2, e))) - }, - Err(Err::Failure(e)) => { - let e2 = ErrorKind::EscapedTransform::; - Err(Err::Failure(error_node_position!($i, e2, e))) - } - } - } - ); - - // Internal parser, do not use directly - (__impl_1 $i:expr, $submac1:ident!( $($args:tt)* ), $control_char: expr, $submac2:ident!( $($args2:tt)*) ) => ( - { - escaped_transform!(__impl $i, $submac1!($($args)*), $control_char, $submac2!($($args2)*)) - } - ); - // Internal parser, do not use directly - (__impl_1 $i:expr, $submac1:ident!( $($args:tt)* ), $control_char: expr, $g:expr) => ( - escaped_transform!(__impl $i, $submac1!($($args)*), $control_char, call!($g)) - ); - - ($i:expr, $submac:ident!( $($args:tt)* ), $control_char: expr, $($rest:tt)+) => ( - { - escaped_transform!(__impl_1 $i, $submac!($($args)*), $control_char, $($rest)*) - } - ); - - ($i:expr, $f:expr, $control_char: expr, $($rest:tt)+) => ( - escaped_transform!(__impl_1 $i, call!($f), $control_char, $($rest)*) - ); -); - -/// `take_while!(T -> bool) => &[T] -> IResult<&[T], &[T]>` -/// returns the longest list of bytes until the provided function fails. -/// -/// The argument is either a function `T -> bool` or a macro returning a `bool`. -/// -/// # Example -/// ``` -/// # #[macro_use] extern crate nom; -/// # use nom::is_alphanumeric; -/// # fn main() { -/// named!( alpha, take_while!( is_alphanumeric ) ); -/// -/// let r = alpha(&b"abcd\nefgh"[..]); -/// assert_eq!(r, Ok((&b"\nefgh"[..], &b"abcd"[..]))); -/// # } -/// ``` -#[macro_export] -macro_rules! take_while ( - ($input:expr, $submac:ident!( $($args:tt)* )) => ( - { - use $crate::InputTakeAtPosition; - let input = $input; - input.split_at_position(|c| !$submac!(c, $($args)*)) - } - ); - ($input:expr, $f:expr) => ( - take_while!($input, call!($f)); - ); -); - -/// `take_while1!(T -> bool) => &[T] -> IResult<&[T], &[T]>` -/// returns the longest (non empty) list of bytes until the provided function fails. -/// -/// The argument is either a function `&[T] -> bool` or a macro returning a `bool` -/// -/// # Example -/// ``` -/// # #[macro_use] extern crate nom; -/// # use nom::{Err,ErrorKind}; -/// # use nom::is_alphanumeric; -/// # fn main() { -/// named!( alpha, take_while1!( is_alphanumeric ) ); -/// -/// let r = alpha(&b"abcd\nefgh"[..]); -/// assert_eq!(r, Ok((&b"\nefgh"[..], &b"abcd"[..]))); -/// let r = alpha(&b"\nefgh"[..]); -/// assert_eq!(r, Err(Err::Error(error_position!(&b"\nefgh"[..], ErrorKind::TakeWhile1)))); -/// # } -/// ``` -#[macro_export] -macro_rules! take_while1 ( - ($input:expr, $submac:ident!( $($args:tt)* )) => ( - { - use $crate::ErrorKind; - use $crate::InputTakeAtPosition; - - let input = $input; - input.split_at_position1(|c| !$submac!(c, $($args)*), ErrorKind::TakeWhile1) - } - ); - ($input:expr, $f:expr) => ( - take_while1!($input, call!($f)); - ); -); - -/// `take_while_m_n!(m: usize, n: usize, T -> bool) => &[T] -> IResult<&[T], &[T]>` -/// returns a list of bytes or characters for which the provided function returns true. -/// the returned list's size will be at least m, and at most n -/// -/// The argument is either a function `T -> bool` or a macro returning a `bool`. -/// -/// # Example -/// ``` -/// # #[macro_use] extern crate nom; -/// # use nom::is_alphanumeric; -/// # fn main() { -/// named!( alpha, take_while!( is_alphanumeric ) ); -/// -/// let r = alpha(&b"abcd\nefgh"[..]); -/// assert_eq!(r, Ok((&b"\nefgh"[..], &b"abcd"[..]))); -/// # } -/// ``` -#[macro_export] -macro_rules! take_while_m_n ( - ($input:expr, $m:expr, $n:expr, $submac:ident!( $($args:tt)* )) => ( - { - use $crate::lib::std::result::Result::*; - use $crate::lib::std::option::Option::*; - use $crate::IResult; - use $crate::ErrorKind; - - use $crate::{InputLength,InputIter,Slice,Err,Needed,AtEof,InputTake}; - let input = $input; - let m = $m; - let n = $n; - - match input.position(|c| !$submac!(c, $($args)*)) { - Some(idx) => { - if idx >= m { - if idx <= n { - let res:IResult<_,_> = Ok(input.take_split(idx)); - res - } else { - let res:IResult<_,_> = Ok(input.take_split(n)); - res - } - } else { - let e = ErrorKind::TakeWhileMN::; - Err(Err::Error(error_position!(input, e))) - } - }, - None => { - let len = input.input_len(); - if len >= n { - let res:IResult<_,_> = Ok(input.take_split(n)); - res - } else { - if input.at_eof() { - if len >= $m && len <= $n { - let res:IResult<_,_> = Ok((input.slice(len..), input)); - res - } else { - let e = ErrorKind::TakeWhileMN::; - Err(Err::Error(error_position!(input, e))) - } - } else { - let needed = if m > len { - m - len - } else { - 1 - }; - Err(Err::Incomplete(Needed::Size(needed))) - } - } - } - } - } - ); - ($input:expr, $m:expr, $n: expr, $f:expr) => ( - take_while_m_n!($input, $m, $n, call!($f)); - ); -); -/// `take_till!(T -> bool) => &[T] -> IResult<&[T], &[T]>` -/// returns the longest list of bytes until the provided function succeeds -/// -/// The argument is either a function `&[T] -> bool` or a macro returning a `bool`. -/// -/// # Example -/// ``` -/// # #[macro_use] extern crate nom; -/// # fn main() { -/// named!( till_colon, take_till!(|ch| ch == b':') ); -/// -/// let r = till_colon(&b"abcd:efgh"[..]); -/// assert_eq!(r, Ok((&b":efgh"[..], &b"abcd"[..]))); -/// let r2 = till_colon(&b":abcdefgh"[..]); // empty match is allowed -/// assert_eq!(r2, Ok((&b":abcdefgh"[..], &b""[..]))); -/// # } -/// ``` -#[macro_export] -macro_rules! take_till ( - ($input:expr, $submac:ident!( $($args:tt)* )) => ( - { - use $crate::InputTakeAtPosition; - let input = $input; - input.split_at_position(|c| $submac!(c, $($args)*)) - } - ); - ($input:expr, $f:expr) => ( - take_till!($input, call!($f)); - ); -); - -/// `take_till1!(T -> bool) => &[T] -> IResult<&[T], &[T]>` -/// returns the longest non empty list of bytes until the provided function succeeds -/// -/// The argument is either a function `&[T] -> bool` or a macro returning a `bool`. -/// -/// # Example -/// ``` -/// # #[macro_use] extern crate nom; -/// # use nom::{Err,ErrorKind}; -/// # fn main() { -/// named!( till1_colon, take_till1!(|ch| ch == b':') ); -/// -/// let r = till1_colon(&b"abcd:efgh"[..]); -/// assert_eq!(r, Ok((&b":efgh"[..], &b"abcd"[..]))); -/// -/// let r2 = till1_colon(&b":abcdefgh"[..]); // empty match is error -/// assert_eq!(r2, Err(Err::Error(error_position!(&b":abcdefgh"[..], ErrorKind::TakeTill1)))); -/// # } -/// ``` -#[macro_export] -macro_rules! take_till1 ( - ($input:expr, $submac:ident!( $($args:tt)* )) => ( - { - use $crate::{ErrorKind, InputTakeAtPosition}; - let input = $input; - input.split_at_position1(|c| $submac!(c, $($args)*), ErrorKind::TakeTill1) - } - ); - ($input:expr, $f:expr) => ( - take_till1!($input, call!($f)); - ); -); - -/// `take!(nb) => &[T] -> IResult<&[T], &[T]>` -/// generates a parser consuming the specified number of bytes -/// -/// # Example -/// ``` -/// # #[macro_use] extern crate nom; -/// # fn main() { -/// // Desmond parser -/// named!(take5, take!( 5 ) ); -/// -/// let a = b"abcdefgh"; -/// -/// assert_eq!(take5(&a[..]), Ok((&b"fgh"[..], &b"abcde"[..]))); -/// # } -/// ``` -#[macro_export] -macro_rules! take ( - ($i:expr, $count:expr) => ( - { - use $crate::lib::std::result::Result::*; - use $crate::lib::std::option::Option::*; - use $crate::{Needed,IResult}; - - use $crate::InputIter; - use $crate::InputTake; - let input = $i; - - let cnt = $count as usize; - - let res: IResult<_,_,u32> = match input.slice_index(cnt) { - None => $crate::need_more($i, Needed::Size(cnt)), - Some(index) => Ok(input.take_split(index)) - }; - res - } - ); -); - -/// `take_str!(nb) => &[T] -> IResult<&[T], &str>` -/// same as take! but returning a &str -/// -/// # Example -/// ``` -/// # #[macro_use] extern crate nom; -/// # fn main() { -/// named!(take5( &[u8] ) -> &str, take_str!( 5 ) ); -/// -/// let a = b"abcdefgh"; -/// -/// assert_eq!(take5(&a[..]), Ok((&b"fgh"[..], "abcde"))); -/// # } -/// ``` -#[macro_export] -macro_rules! take_str ( - ( $i:expr, $size:expr ) => ( - { - let input: &[u8] = $i; - - map_res!(input, take!($size), $crate::lib::std::str::from_utf8) - } - ); -); - -/// `take_until_and_consume!(tag) => &[T] -> IResult<&[T], &[T]>` -/// generates a parser consuming bytes until the specified byte sequence is found, and consumes it -/// -/// The parsed input and the tag are removed from the remainder. -/// (As opposed to `take_until!` that does not remove the tag from the remainder.) -/// -/// # Example -/// ``` -/// # #[macro_use] extern crate nom; -/// # fn main() { -/// named!(x, take_until_and_consume!("foo")); -/// let r = x(&b"abcd foo efgh"[..]); -/// assert_eq!(r, Ok((&b" efgh"[..], &b"abcd "[..]))); -/// # } -/// ``` -#[macro_export] -macro_rules! take_until_and_consume ( - ($i:expr, $substr:expr) => ( - { - use $crate::lib::std::result::Result::*; - use $crate::lib::std::option::Option::*; - use $crate::{Needed,IResult,ErrorKind,need_more_err}; - use $crate::InputLength; - use $crate::FindSubstring; - use $crate::Slice; - - let input = $i; - - let res: IResult<_,_> = match input.find_substring($substr) { - None => { - need_more_err(input, Needed::Size($substr.input_len()), ErrorKind::TakeUntilAndConsume::) - }, - Some(index) => { - Ok(($i.slice(index+$substr.input_len()..), $i.slice(0..index))) - }, - }; - res - } - ); -); - -/// `take_until_and_consume1!(tag) => &[T] -> IResult<&[T], &[T]>` -/// generates a parser consuming bytes (at least 1) until the specified byte sequence is found, and consumes it -/// -/// The parsed input and the tag are removed from the remainder. -/// (As opposed to `take_until1!` that does not remove the tag from the remainder.) -/// -/// # Example -/// ``` -/// # #[macro_use] extern crate nom; -/// # fn main() { -/// named!(x, take_until_and_consume!("foo")); -/// let r = x(&b"abcd foo efgh"[..]); -/// assert_eq!(r, Ok((&b" efgh"[..], &b"abcd "[..]))); -/// # } -/// ``` -#[macro_export] -macro_rules! take_until_and_consume1 ( - ($i:expr, $substr:expr) => ( - { - use $crate::lib::std::result::Result::*; - use $crate::lib::std::option::Option::*; - use $crate::{Err,Needed,IResult,ErrorKind,need_more_err}; - - use $crate::InputLength; - use $crate::FindSubstring; - use $crate::Slice; - let input = $i; - - let res: IResult<_,_> = match input.find_substring($substr) { - None => { - need_more_err(input, Needed::Size(1+$substr.input_len()), ErrorKind::TakeUntilAndConsume1::) - }, - Some(0) => { - let e = ErrorKind::TakeUntilAndConsume1::; - Err(Err::Error(error_position!($i, e))) - } - Some(index) => { - Ok(($i.slice(index+$substr.input_len()..), $i.slice(0..index))) - }, - }; - res - } - ); -); - -/// `take_until!(tag) => &[T] -> IResult<&[T], &[T]>` -/// consumes data until it finds the specified tag. -/// -/// The remainder still contains the tag. -/// (As opposed to `take_until_and_consume!` which removes it from the remainder.) -/// -/// # Example -/// ``` -/// # #[macro_use] extern crate nom; -/// # fn main() { -/// named!(x, take_until!("foo")); -/// let r = x(&b"abcd foo efgh"[..]); -/// assert_eq!(r, Ok((&b"foo efgh"[..], &b"abcd "[..]))); -/// # } -/// ``` -#[macro_export] -macro_rules! take_until ( - ($i:expr, $substr:expr) => ( - { - use $crate::lib::std::result::Result::*; - use $crate::lib::std::option::Option::*; - use $crate::{Needed,IResult,need_more_err, ErrorKind}; - - use $crate::InputLength; - use $crate::FindSubstring; - use $crate::InputTake; - let input = $i; - - let res: IResult<_,_> = match input.find_substring($substr) { - None => { - need_more_err($i, Needed::Size($substr.input_len()), ErrorKind::TakeUntil::) - }, - Some(index) => { - Ok($i.take_split(index)) - }, - }; - res - } - ); -); - -/// `take_until1!(tag) => &[T] -> IResult<&[T], &[T]>` -/// consumes data (at least one byte) until it finds the specified tag -/// -/// The remainder still contains the tag. -/// (As opposed to `take_until_and_consume1!` which removes it from the remainder.) -/// -/// # Example -/// ``` -/// # #[macro_use] extern crate nom; -/// # fn main() { -/// named!(x, take_until1!("foo")); -/// -/// let r = x(&b"abcd foo efgh"[..]); -/// -/// assert_eq!(r, Ok((&b"foo efgh"[..], &b"abcd "[..]))); -/// # } -/// ``` -#[macro_export] -macro_rules! take_until1 ( - ($i:expr, $substr:expr) => ( - { - use $crate::lib::std::result::Result::*; - use $crate::lib::std::option::Option::*; - use $crate::{Err,Needed,IResult,need_more_err,ErrorKind}; - use $crate::InputLength; - use $crate::FindSubstring; - use $crate::InputTake; - let input = $i; - - let res: IResult<_,_> = match input.find_substring($substr) { - None => { - need_more_err($i, Needed::Size(1 + $substr.input_len()), ErrorKind::TakeUntil::) - }, - Some(0) => { - let e = ErrorKind::TakeUntil::; - Err(Err::Error(error_position!($i, e))) - }, - Some(index) => { - Ok($i.take_split(index)) - }, - }; - res - } - ); -); - -/// `take_until_either_and_consume!(chars) => &[T] -> IResult<&[T], &[T]>` -/// consumes data until it finds any of the specified characters, and consume it -/// -/// The parsed input and the tag are removed from the remainder. -/// (As opposed to `take_until_either!` that does not remove the tag from the remainder.) -/// -/// # Example -/// ``` -/// # #[macro_use] extern crate nom; -/// # fn main() { -/// named!(x, take_until_either_and_consume!("012")); -/// let r = x(&b"abcd2efgh"[..]); -/// assert_eq!(r, Ok((&b"efgh"[..], &b"abcd"[..]))); -/// # } -/// ``` -#[macro_export] -macro_rules! take_until_either_and_consume ( - ($input:expr, $arr:expr) => ( - { - use $crate::lib::std::result::Result::*; - use $crate::lib::std::option::Option::*; - use $crate::{Needed,IResult,need_more_err,ErrorKind}; - - use $crate::InputLength; - use $crate::InputIter; - use $crate::FindToken; - use $crate::Slice; - let input = $input; - - let res: IResult<_,_> = match input.position(|c| { - $arr.find_token(c) - }) { - Some(n) => { - let mut it = input.slice(n..).iter_indices(); - - // this unwrap() should be safe, since we already know there's a char there - let r1 = it.next().unwrap(); - let r2 = it.next(); - - match r2 { - None => { - // r1 was the last character of the input, and we consumed it - Ok(( input.slice(input.input_len()..), input.slice(..n) )) - }, - Some(l) => { - // index like this because the character we consume might be more than one byte - Ok((input.slice(n+r1.0+l.0..), input.slice(..n))) - } - } - }, - None => { - need_more_err(input, Needed::Size(1), ErrorKind::TakeUntilEitherAndConsume::) - } - }; - res - } - ); -); - -/// `take_until_either_and_consume1!(chars) => &[T] -> IResult<&[T], &[T]>` -/// consumes data (at least one byte) until it finds any of the specified characters, and consume it -/// -/// The parsed input and the tag are removed from the remainder. -/// (As opposed to `take_until_either!` that does not remove the tag from the remainder.) -/// -/// # Example -/// ``` -/// # #[macro_use] extern crate nom; -/// # fn main() { -/// named!(x, take_until_either_and_consume!("012")); -/// let r = x(&b"abcd2efgh"[..]); -/// assert_eq!(r, Ok((&b"efgh"[..], &b"abcd"[..]))); -/// # } -/// ``` -#[macro_export] -macro_rules! take_until_either_and_consume1 ( - ($input:expr, $arr:expr) => ( - { - use $crate::lib::std::result::Result::*; - use $crate::lib::std::option::Option::*; - use $crate::{Err,Needed,IResult,need_more_err,ErrorKind}; - - use $crate::InputLength; - use $crate::InputIter; - use $crate::FindToken; - use $crate::Slice; - let input = $input; - - let res: IResult<_,_> = match $input.position(|c| { - $arr.find_token(c) - }) { - Some(0) => Err(Err::Error(error_position!($input, ErrorKind::TakeUntilEitherAndConsume::))), - Some(n) => { - let mut it = input.slice(n..).iter_indices(); - - // this unwrap() should be safe, since we already know there's a char there - let r1 = it.next().unwrap(); - let r2 = it.next(); - - match r2 { - None => { - // r1 was the last character of the input, and we consumed it - Ok(( input.slice(input.input_len()..), input.slice(..n) )) - }, - Some(l) => { - // index like this because the character we consume might be more than one byte - Ok((input.slice(n+r1.0+l.0..), input.slice(..n))) - } - } - }, - None => { - need_more_err($input, Needed::Size(1), ErrorKind::TakeUntilEitherAndConsume::) - } - }; - res - } - ); -); - -/// `take_until_either!(tag) => &[T] -> IResult<&[T], &[T]>` -/// consumes data until it finds any of the specified characters -/// -/// The remainder still contains the tag. -/// (As opposed to `take_until_either_and_consume!` which removes it from the remainder.) -/// -/// -/// # Example -/// ``` -/// # #[macro_use] extern crate nom; -/// # fn main() { -/// named!(x, take_until_either!("012")); -/// let r = x(&b"abcd2efgh"[..]); -/// assert_eq!(r, Ok((&b"2efgh"[..], &b"abcd"[..]))); -/// # } -/// ``` -#[macro_export] -macro_rules! take_until_either ( - ($input:expr, $arr:expr) => ( - { - use $crate::lib::std::result::Result::*; - use $crate::lib::std::option::Option::*; - use $crate::{Needed,IResult,need_more_err,ErrorKind}; - - use $crate::InputIter; - use $crate::FindToken; - use $crate::InputTake; - - let res: IResult<_,_> = match $input.position(|c| { - $arr.find_token(c) - }) { - Some(n) => { - Ok($input.take_split(n)) - }, - None => { - need_more_err($input, Needed::Size(1), ErrorKind::TakeUntilEither::) - } - }; - res - } - ); -); - -/// `take_until_either1!(tag) => &[T] -> IResult<&[T], &[T]>` -/// consumes data (at least one byte) until it finds any of the specified characters -/// -/// The remainder still contains the tag. -/// (As opposed to `take_until_either_and_consume!` which removes it from the remainder.) -/// -/// -/// # Example -/// ``` -/// # #[macro_use] extern crate nom; -/// # fn main() { -/// named!(x, take_until_either!("012")); -/// let r = x(&b"abcd2efgh"[..]); -/// assert_eq!(r, Ok((&b"2efgh"[..], &b"abcd"[..]))); -/// # } -/// ``` -#[macro_export] -macro_rules! take_until_either1 ( - ($input:expr, $arr:expr) => ( - { - use $crate::lib::std::result::Result::*; - use $crate::lib::std::option::Option::*; - use $crate::{Err,Needed,IResult,need_more_err,ErrorKind}; - - use $crate::InputIter; - use $crate::InputTake; - use $crate::FindToken; - - let res: IResult<_,_> = match $input.position(|c| { - $arr.find_token(c) - }) { - Some(0) => Err(Err::Error(error_position!($input, ErrorKind::TakeUntilEither::))), - Some(n) => { - Ok($input.take_split(n)) - }, - None => { - need_more_err($input, Needed::Size(1), ErrorKind::TakeUntilEither::) - } - }; - res - } - ); -); - -/// `length_bytes!(&[T] -> IResult<&[T], nb>) => &[T] -> IResult<&[T], &[T]>` -/// Gets a number from the first parser, then extracts that many bytes from the -/// remaining stream -/// -/// # Example -/// ``` -/// # #[macro_use] extern crate nom; -/// # use nom::be_u8; -/// # fn main() { -/// named!(with_length, length_bytes!( be_u8 )); -/// let r = with_length(&b"\x05abcdefgh"[..]); -/// assert_eq!(r, Ok((&b"fgh"[..], &b"abcde"[..]))); -/// # } -/// ``` -#[macro_export] -macro_rules! length_bytes( - ($i:expr, $submac:ident!( $($args:tt)* )) => ( - { - length_data!($i, $submac!($($args)*)) - } - ); - ($i:expr, $f:expr) => ( - length_data!($i, call!($f)) - ) -); - -#[cfg(test)] -mod tests { - use internal::{Err, Needed}; - use nom::{alpha, alphanumeric, digit, hex_digit, multispace, oct_digit, space}; - use types::{CompleteByteSlice, CompleteStr}; - use util::ErrorKind; - #[cfg(feature = "alloc")] - #[cfg(feature = "verbose-errors")] - use lib::std::string::String; - #[cfg(feature = "alloc")] - #[cfg(feature = "verbose-errors")] - use lib::std::vec::Vec; - - macro_rules! one_of ( - ($i:expr, $inp: expr) => ( - { - use $crate::Err; - use $crate::Slice; - use $crate::AsChar; - use $crate::FindToken; - use $crate::InputIter; - - match ($i).iter_elements().next().map(|c| { - $inp.find_token(c) - }) { - None => Err::<_,_>(Err::Incomplete(Needed::Size(1))), - Some(false) => Err(Err::Error(error_position!($i, ErrorKind::OneOf::))), - //the unwrap should be safe here - Some(true) => Ok(($i.slice(1..), $i.iter_elements().next().unwrap().as_char())) - } - } - ); - ); - - #[test] - fn is_a() { - named!(a_or_b, is_a!(&b"ab"[..])); - - let a = &b"abcd"[..]; - assert_eq!(a_or_b(a), Ok((&b"cd"[..], &b"ab"[..]))); - - let b = &b"bcde"[..]; - assert_eq!(a_or_b(b), Ok((&b"cde"[..], &b"b"[..]))); - - let c = &b"cdef"[..]; - assert_eq!( - a_or_b(c), - Err(Err::Error(error_position!(c, ErrorKind::IsA::))) - ); - - let d = &b"bacdef"[..]; - assert_eq!(a_or_b(d), Ok((&b"cdef"[..], &b"ba"[..]))); - } - - #[test] - fn is_not() { - named!(a_or_b, is_not!(&b"ab"[..])); - - let a = &b"cdab"[..]; - assert_eq!(a_or_b(a), Ok((&b"ab"[..], &b"cd"[..]))); - - let b = &b"cbde"[..]; - assert_eq!(a_or_b(b), Ok((&b"bde"[..], &b"c"[..]))); - - let c = &b"abab"[..]; - assert_eq!( - a_or_b(c), - Err(Err::Error(error_position!(c, ErrorKind::IsNot))) - ); - - let d = &b"cdefba"[..]; - assert_eq!(a_or_b(d), Ok((&b"ba"[..], &b"cdef"[..]))); - - let e = &b"e"[..]; - assert_eq!(a_or_b(e), Err(Err::Incomplete(Needed::Size(1)))); - } - - #[cfg(feature = "alloc")] - #[allow(unused_variables)] - #[test] - fn escaping() { - named!(esc, escaped!(call!(alpha), '\\', one_of!("\"n\\"))); - assert_eq!(esc(&b"abcd;"[..]), Ok((&b";"[..], &b"abcd"[..]))); - assert_eq!(esc(&b"ab\\\"cd;"[..]), Ok((&b";"[..], &b"ab\\\"cd"[..]))); - assert_eq!(esc(&b"\\\"abcd;"[..]), Ok((&b";"[..], &b"\\\"abcd"[..]))); - assert_eq!(esc(&b"\\n;"[..]), Ok((&b";"[..], &b"\\n"[..]))); - assert_eq!(esc(&b"ab\\\"12"[..]), Ok((&b"12"[..], &b"ab\\\""[..]))); - assert_eq!(esc(&b"AB\\"[..]), Err(Err::Incomplete(Needed::Size(1)))); - assert_eq!( - esc(&b"AB\\A"[..]), - Err(Err::Error(error_node_position!( - &b"AB\\A"[..], - ErrorKind::Escaped, - error_position!(&b"A"[..], ErrorKind::OneOf) - ))) - ); - - named!(esc2, escaped!(call!(digit), '\\', one_of!("\"n\\"))); - assert_eq!(esc2(&b"12\\nnn34"[..]), Ok((&b"nn34"[..], &b"12\\n"[..]))); - } - - #[cfg(feature = "alloc")] - #[test] - fn escaping_str() { - named!(esc<&str, &str>, escaped!(call!(alpha), '\\', one_of!("\"n\\"))); - assert_eq!(esc("abcd;"), Ok((";", "abcd"))); - assert_eq!(esc("ab\\\"cd;"), Ok((";", "ab\\\"cd"))); - assert_eq!(esc("\\\"abcd;"), Ok((";", "\\\"abcd"))); - assert_eq!(esc("\\n;"), Ok((";", "\\n"))); - assert_eq!(esc("ab\\\"12"), Ok(("12", "ab\\\""))); - assert_eq!(esc("AB\\"), Err(Err::Incomplete(Needed::Size(1)))); - assert_eq!( - esc("AB\\A"), - Err(Err::Error(error_node_position!( - "AB\\A", - ErrorKind::Escaped, - error_position!("A", ErrorKind::OneOf) - ))) - ); - - named!(esc2<&str, &str>, escaped!(call!(digit), '\\', one_of!("\"n\\"))); - assert_eq!(esc2("12\\nnn34"), Ok(("nn34", "12\\n"))); - - named!(esc3<&str, &str>, escaped!(call!(alpha), '\u{241b}', one_of!("\"n"))); - assert_eq!(esc3("ab␛ncd;"), Ok((";", "ab␛ncd"))); - } - - #[cfg(feature = "alloc")] - #[test] - fn escaping_complete_str() { - named!(esc, escaped!(call!(alpha), '\\', one_of!("\"n\\"))); - assert_eq!( - esc(CompleteStr("abcd;")), - Ok((CompleteStr(";"), CompleteStr("abcd"))) - ); - assert_eq!( - esc(CompleteStr("ab\\\"cd;")), - Ok((CompleteStr(";"), CompleteStr("ab\\\"cd"))) - ); - //assert_eq!(esc("\\\"abcd;"), Ok((";", "\\\"abcd"))); - //assert_eq!(esc("\\n;"), Ok((";", "\\n"))); - //assert_eq!(esc("ab\\\"12"), Ok(("12", "ab\\\""))); - assert_eq!( - esc(CompleteStr("AB\\")), - Err(Err::Error(error_node_position!( - CompleteStr("AB\\"), - ErrorKind::Escaped, - error_position!(CompleteStr("AB\\"), ErrorKind::Eof) - ))) - ); - assert_eq!(esc(CompleteStr("")), Ok((CompleteStr(""), CompleteStr("")))); - /*assert_eq!( - esc("AB\\A"), - Err(Err::Error(error_node_position!( - "AB\\A", - ErrorKind::Escaped, - error_position!("A", ErrorKind::OneOf) - ))) - ); - - named!(esc2<&str, &str>, escaped!(call!(digit), '\\', one_of!("\"n\\"))); - assert_eq!(esc2("12\\nnn34"), Ok(("nn34", "12\\n"))); - - named!(esc3<&str, &str>, escaped!(call!(alpha), '\u{241b}', one_of!("\"n"))); - assert_eq!(esc3("ab␛ncd;"), Ok((";", "ab␛ncd"))); - */ - } - - #[cfg(feature = "alloc")] - #[cfg(feature = "verbose-errors")] - fn to_s(i: Vec) -> String { - String::from_utf8_lossy(&i).into_owned() - } - - #[cfg(feature = "alloc")] - #[cfg(feature = "verbose-errors")] - #[test] - fn escape_transform() { - use lib::std::str; - - named!( - esc, - map!( - escaped_transform!( - alpha, - '\\', - alt!( - tag!("\\") => { |_| &b"\\"[..] } - | tag!("\"") => { |_| &b"\""[..] } - | tag!("n") => { |_| &b"\n"[..] } - ) - ), - to_s - ) - ); - - assert_eq!(esc(&b"abcd;"[..]), Ok((&b";"[..], String::from("abcd")))); - assert_eq!( - esc(&b"ab\\\"cd;"[..]), - Ok((&b";"[..], String::from("ab\"cd"))) - ); - assert_eq!( - esc(&b"\\\"abcd;"[..]), - Ok((&b";"[..], String::from("\"abcd"))) - ); - assert_eq!(esc(&b"\\n;"[..]), Ok((&b";"[..], String::from("\n")))); - assert_eq!( - esc(&b"ab\\\"12"[..]), - Ok((&b"12"[..], String::from("ab\""))) - ); - assert_eq!(esc(&b"AB\\"[..]), Err(Err::Incomplete(Needed::Size(1)))); - assert_eq!( - esc(&b"AB\\A"[..]), - Err(Err::Error(error_node_position!( - &b"AB\\A"[..], - ErrorKind::EscapedTransform, - error_position!(&b"A"[..], ErrorKind::Alt) - ))) - ); - - named!( - esc2, - map!( - escaped_transform!( - call!(alpha), - '&', - alt!( - tag!("egrave;") => { |_| str::as_bytes("è") } - | tag!("agrave;") => { |_| str::as_bytes("à") } - ) - ), - to_s - ) - ); - assert_eq!( - esc2(&b"abèDEF;"[..]), - Ok((&b";"[..], String::from("abèDEF"))) - ); - assert_eq!( - esc2(&b"abèDàEF;"[..]), - Ok((&b";"[..], String::from("abèDàEF"))) - ); - } - - #[cfg(feature = "verbose-errors")] - #[test] - fn issue_84() { - let r0 = is_a!(&b"aaaaefgh"[..], "abcd"); - assert_eq!(r0, Ok((&b"efgh"[..], &b"aaaa"[..]))); - let r1 = is_a!(&b"aaaa;"[..], "abcd"); - assert_eq!(r1, Ok((&b";"[..], &b"aaaa"[..]))); - let r2 = is_a!(&b"1;"[..], "123456789"); - assert_eq!(r2, Ok((&b";"[..], &b"1"[..]))); - } - - #[cfg(feature = "std")] - #[test] - fn escape_transform_str() { - named!(esc<&str, String>, escaped_transform!(alpha, '\\', - alt!( - tag!("\\") => { |_| "\\" } - | tag!("\"") => { |_| "\"" } - | tag!("n") => { |_| "\n" } - )) - ); - - assert_eq!(esc("abcd;"), Ok((";", String::from("abcd")))); - assert_eq!(esc("ab\\\"cd;"), Ok((";", String::from("ab\"cd")))); - assert_eq!(esc("\\\"abcd;"), Ok((";", String::from("\"abcd")))); - assert_eq!(esc("\\n;"), Ok((";", String::from("\n")))); - assert_eq!(esc("ab\\\"12"), Ok(("12", String::from("ab\"")))); - assert_eq!(esc("AB\\"), Err(Err::Incomplete(Needed::Size(1)))); - assert_eq!( - esc("AB\\A"), - Err(Err::Error(error_node_position!( - "AB\\A", - ErrorKind::EscapedTransform, - error_position!("A", ErrorKind::Alt) - ))) - ); - - named!(esc2<&str, String>, escaped_transform!(alpha, '&', - alt!( - tag!("egrave;") => { |_| "è" } - | tag!("agrave;") => { |_| "à" } - )) - ); - assert_eq!(esc2("abèDEF;"), Ok((";", String::from("abèDEF")))); - assert_eq!( - esc2("abèDàEF;"), - Ok((";", String::from("abèDàEF"))) - ); - - named!(esc3<&str, String>, escaped_transform!(alpha, '␛', - alt!( - tag!("0") => { |_| "\0" } | - tag!("n") => { |_| "\n" }))); - assert_eq!(esc3("a␛0bc␛n"), Ok(("", String::from("a\0bc\n")))); - } - - #[test] - fn take_str_test() { - let a = b"omnomnom"; - - assert_eq!(take_str!(&a[..], 5), Ok((&b"nom"[..], "omnom"))); - assert_eq!(take_str!(&a[..], 9), Err(Err::Incomplete(Needed::Size(9)))); - } - - #[test] - fn take_until_and_consume() { - named!(x, take_until_and_consume!("efgh")); - let r = x(&b"abcdabcdefghijkl"[..]); - assert_eq!(r, Ok((&b"ijkl"[..], &b"abcdabcd"[..]))); - - let r2 = x(&b"abcdabcdefgh"[..]); - assert_eq!(r2, Ok((&b""[..], &b"abcdabcd"[..]))); - - let r3 = x(&b"abcefg"[..]); - assert_eq!(r3, Err(Err::Incomplete(Needed::Size(4)))); - - assert_eq!(x(&b"ab"[..]), Err(Err::Incomplete(Needed::Size(4)))); - } - - #[test] - fn take_until_and_consume_complete() { - named!(x, take_until_and_consume!("efgh")); - let r = x(CompleteStr(&"abcdabcdefghijkl"[..])); - assert_eq!( - r, - Ok((CompleteStr(&"ijkl"[..]), CompleteStr(&"abcdabcd"[..]))) - ); - - let r2 = x(CompleteStr(&"abcdabcdefgh"[..])); - assert_eq!(r2, Ok((CompleteStr(&""[..]), CompleteStr(&"abcdabcd"[..])))); - - let r3 = x(CompleteStr(&"abcefg"[..])); - assert_eq!( - r3, - Err(Err::Error(error_position!( - CompleteStr(&"abcefg"[..]), - ErrorKind::TakeUntilAndConsume - ))) - ); - - assert_eq!( - x(CompleteStr(&"ab"[..])), - Err(Err::Error(error_position!( - CompleteStr(&"ab"[..]), - ErrorKind::TakeUntilAndConsume - ))) - ); - } - - #[test] - fn take_until_and_consume1() { - named!(x, take_until_and_consume1!("efgh")); - let r = x(&b"abcdabcdefghijkl"[..]); - assert_eq!(r, Ok((&b"ijkl"[..], &b"abcdabcd"[..]))); - - let r2 = x(&b"abcdabcdefgh"[..]); - assert_eq!(r2, Ok((&b""[..], &b"abcdabcd"[..]))); - - let r3 = x(&b"abcefg"[..]); - assert_eq!(r3, Err(Err::Incomplete(Needed::Size(5)))); - - let r4 = x(&b"efgh"[..]); - assert_eq!( - r4, - Err(Err::Error(error_position!( - &b"efgh"[..], - ErrorKind::TakeUntilAndConsume1 - ))) - ); - - named!(x2, take_until_and_consume1!("")); - let r5 = x2(&b""[..]); - assert_eq!( - r5, - Err(Err::Error(error_position!( - &b""[..], - ErrorKind::TakeUntilAndConsume1 - ))) - ); - - let r6 = x2(&b"a"[..]); - assert_eq!( - r6, - Err(Err::Error(error_position!( - &b"a"[..], - ErrorKind::TakeUntilAndConsume1 - ))) - ); - - let r7 = x(&b"efghi"[..]); - assert_eq!( - r7, - Err(Err::Error(error_position!( - &b"efghi"[..], - ErrorKind::TakeUntilAndConsume1 - ))) - ); - } - - #[test] - fn take_until_and_consume1_complete() { - named!(x, take_until_and_consume1!("efgh")); - let r = x(CompleteStr(&"abcdabcdefghijkl"[..])); - assert_eq!( - r, - Ok((CompleteStr(&"ijkl"[..]), CompleteStr(&"abcdabcd"[..]))) - ); - - let r2 = x(CompleteStr(&"abcdabcdefgh"[..])); - assert_eq!(r2, Ok((CompleteStr(&""[..]), CompleteStr(&"abcdabcd"[..])))); - - let r3 = x(CompleteStr(&"abcefg"[..])); - assert_eq!( - r3, - Err(Err::Error(error_position!( - CompleteStr("abcefg"), - ErrorKind::TakeUntilAndConsume1 - ))) - ); - - let r4 = x(CompleteStr(&"efgh"[..])); - assert_eq!( - r4, - Err(Err::Error(error_position!( - CompleteStr("efgh"), - ErrorKind::TakeUntilAndConsume1 - ))) - ); - - named!(x2, take_until_and_consume1!("")); - let r5 = x2(CompleteStr("")); - assert_eq!( - r5, - Err(Err::Error(error_position!( - CompleteStr(""), - ErrorKind::TakeUntilAndConsume1 - ))) - ); - - let r6 = x2(CompleteStr("a")); - assert_eq!( - r6, - Err(Err::Error(error_position!( - CompleteStr("a"), - ErrorKind::TakeUntilAndConsume1 - ))) - ); - - let r7 = x(CompleteStr("efghi")); - assert_eq!( - r7, - Err(Err::Error(error_position!( - CompleteStr("efghi"), - ErrorKind::TakeUntilAndConsume1 - ))) - ); - } - - #[test] - fn take_until_either() { - named!(x, take_until_either!("!.")); - assert_eq!(x(&b"123!abc"[..]), Ok((&b"!abc"[..], &b"123"[..]))); - assert_eq!(x(&b"123"[..]), Err(Err::Incomplete(Needed::Size(1)))); - } - - #[test] - fn take_until_either_complete() { - named!(x, take_until_either!("!.")); - assert_eq!( - x(CompleteStr("123!abc")), - Ok((CompleteStr("!abc"), CompleteStr("123"))) - ); - assert_eq!( - x(CompleteStr("123")), - Err(Err::Error(error_position!( - CompleteStr("123"), - ErrorKind::TakeUntilEither - ))) - ); - } - - #[test] - fn take_until_either_and_consume() { - named!(x, take_until_either_and_consume!("!.")); - assert_eq!(x(&b"123.abc"[..]), Ok((&b"abc"[..], &b"123"[..]))); - } - - #[test] - fn take_until_incomplete() { - named!(y, take_until!("end")); - assert_eq!(y(&b"nd"[..]), Err(Err::Incomplete(Needed::Size(3)))); - assert_eq!(y(&b"123"[..]), Err(Err::Incomplete(Needed::Size(3)))); - assert_eq!(y(&b"123en"[..]), Err(Err::Incomplete(Needed::Size(3)))); - } - - #[test] - fn take_until_complete() { - named!(y, take_until!("end")); - assert_eq!( - y(CompleteStr("nd")), - Err(Err::Error(error_position!( - CompleteStr("nd"), - ErrorKind::TakeUntil - ))) - ); - assert_eq!( - y(CompleteStr("123")), - Err(Err::Error(error_position!( - CompleteStr("123"), - ErrorKind::TakeUntil - ))) - ); - assert_eq!( - y(CompleteStr("123en")), - Err(Err::Error(error_position!( - CompleteStr("123en"), - ErrorKind::TakeUntil - ))) - ); - assert_eq!( - y(CompleteStr("123end")), - Ok((CompleteStr("end"), CompleteStr("123"))) - ); - } - - #[test] - fn take_until_incomplete_s() { - named!(ys<&str, &str>, take_until!("end")); - assert_eq!(ys("123en"), Err(Err::Incomplete(Needed::Size(3)))); - } - - #[test] - fn recognize() { - named!( - x, - recognize!(delimited!(tag!(""))) - ); - let r = x(&b" aaa"[..]); - assert_eq!(r, Ok((&b" aaa"[..], &b""[..]))); - - let semicolon = &b";"[..]; - - named!(ya, recognize!(alpha)); - let ra = ya(&b"abc;"[..]); - assert_eq!(ra, Ok((semicolon, &b"abc"[..]))); - - named!(yd, recognize!(digit)); - let rd = yd(&b"123;"[..]); - assert_eq!(rd, Ok((semicolon, &b"123"[..]))); - - named!(yhd, recognize!(hex_digit)); - let rhd = yhd(&b"123abcDEF;"[..]); - assert_eq!(rhd, Ok((semicolon, &b"123abcDEF"[..]))); - - named!(yod, recognize!(oct_digit)); - let rod = yod(&b"1234567;"[..]); - assert_eq!(rod, Ok((semicolon, &b"1234567"[..]))); - - named!(yan, recognize!(alphanumeric)); - let ran = yan(&b"123abc;"[..]); - assert_eq!(ran, Ok((semicolon, &b"123abc"[..]))); - - named!(ys, recognize!(space)); - let rs = ys(&b" \t;"[..]); - assert_eq!(rs, Ok((semicolon, &b" \t"[..]))); - - named!(yms, recognize!(multispace)); - let rms = yms(&b" \t\r\n;"[..]); - assert_eq!(rms, Ok((semicolon, &b" \t\r\n"[..]))); - } - - #[test] - fn take_while() { - use nom::is_alphabetic; - named!(f, take_while!(is_alphabetic)); - let a = b""; - let b = b"abcd"; - let c = b"abcd123"; - let d = b"123"; - - assert_eq!(f(&a[..]), Err(Err::Incomplete(Needed::Size(1)))); - assert_eq!(f(&b[..]), Err(Err::Incomplete(Needed::Size(1)))); - assert_eq!(f(&c[..]), Ok((&d[..], &b[..]))); - assert_eq!(f(&d[..]), Ok((&d[..], &a[..]))); - } - - #[test] - fn take_while1() { - use nom::is_alphabetic; - named!(f, take_while1!(is_alphabetic)); - let a = b""; - let b = b"abcd"; - let c = b"abcd123"; - let d = b"123"; - - assert_eq!(f(&a[..]), Err(Err::Incomplete(Needed::Size(1)))); - assert_eq!(f(&b[..]), Err(Err::Incomplete(Needed::Size(1)))); - assert_eq!(f(&c[..]), Ok((&b"123"[..], &b[..]))); - assert_eq!( - f(&d[..]), - Err(Err::Error(error_position!(&d[..], ErrorKind::TakeWhile1))) - ); - } - - #[test] - fn take_while_m_n() { - use nom::is_alphabetic; - named!(x, take_while_m_n!(2, 4, is_alphabetic)); - let a = b""; - let b = b"a"; - let c = b"abc"; - let d = b"abc123"; - let e = b"abcde"; - let f = b"123"; - - assert_eq!(x(&a[..]), Err(Err::Incomplete(Needed::Size(2)))); - assert_eq!(x(&b[..]), Err(Err::Incomplete(Needed::Size(1)))); - assert_eq!(x(&c[..]), Err(Err::Incomplete(Needed::Size(1)))); - assert_eq!(x(&d[..]), Ok((&b"123"[..], &c[..]))); - assert_eq!(x(&e[..]), Ok((&b"e"[..], &b"abcd"[..]))); - assert_eq!( - x(&f[..]), - Err(Err::Error(error_position!(&f[..], ErrorKind::TakeWhileMN))) - ); - } - - #[test] - fn take_while_m_n_complete() { - use nom::is_alphabetic; - named!(x, take_while_m_n!(2, 4, is_alphabetic)); - let a = CompleteByteSlice(b""); - let b = CompleteByteSlice(b"a"); - let c = CompleteByteSlice(b"abc"); - let d = CompleteByteSlice(b"abc123"); - let e = CompleteByteSlice(b"abcde"); - let f = CompleteByteSlice(b"123"); - - assert_eq!( - x(a), - Err(Err::Error(error_position!(a, ErrorKind::TakeWhileMN))) - ); - assert_eq!( - x(b), - Err(Err::Error(error_position!(b, ErrorKind::TakeWhileMN))) - ); - assert_eq!(x(c), Ok((CompleteByteSlice(b""), c))); - assert_eq!( - x(d), - Ok((CompleteByteSlice(b"123"), CompleteByteSlice(b"abc"))) - ); - assert_eq!( - x(e), - Ok((CompleteByteSlice(b"e"), CompleteByteSlice(b"abcd"))) - ); - assert_eq!( - x(f), - Err(Err::Error(error_position!(f, ErrorKind::TakeWhileMN))) - ); - } - - #[test] - fn take_while1_complete() { - use nom::is_alphabetic; - named!(f, take_while1!(is_alphabetic)); - let a = CompleteByteSlice(b""); - let b = CompleteByteSlice(b"abcd"); - let c = CompleteByteSlice(b"abcd123"); - let d = CompleteByteSlice(b"123"); - - assert_eq!( - f(a), - Err(Err::Error(error_position!(a, ErrorKind::TakeWhile1))) - ); - assert_eq!(f(b), Ok((CompleteByteSlice(b""), b))); - assert_eq!(f(c), Ok((CompleteByteSlice(b"123"), b))); - assert_eq!( - f(d), - Err(Err::Error(error_position!(d, ErrorKind::TakeWhile1))) - ); - - named!(f2, take_while1!(|c: char| c.is_alphabetic())); - let a2 = CompleteStr(""); - assert_eq!( - f2(a2), - Err(Err::Error(error_position!(a2, ErrorKind::TakeWhile1))) - ); - } - - #[test] - fn take_till() { - use nom::is_alphabetic; - named!(f, take_till!(is_alphabetic)); - let a = b""; - let b = b"abcd"; - let c = b"123abcd"; - let d = b"123"; - - assert_eq!(f(&a[..]), Err(Err::Incomplete(Needed::Size(1)))); - assert_eq!(f(&b[..]), Ok((&b"abcd"[..], &b""[..]))); - assert_eq!(f(&c[..]), Ok((&b"abcd"[..], &b"123"[..]))); - assert_eq!(f(&d[..]), Err(Err::Incomplete(Needed::Size(1)))); - } - - #[test] - fn take_till_complete() { - use nom::is_alphabetic; - named!(f, take_till!(is_alphabetic)); - let a = CompleteByteSlice(b""); - let b = CompleteByteSlice(b"abcd"); - let c = CompleteByteSlice(b"123abcd"); - let d = CompleteByteSlice(b"123"); - - assert_eq!(f(a), Ok((a, a))); - assert_eq!( - f(b), - Ok((CompleteByteSlice(b"abcd"), CompleteByteSlice(b""))) - ); - assert_eq!( - f(c), - Ok((CompleteByteSlice(b"abcd"), CompleteByteSlice(b"123"))) - ); - assert_eq!(f(d), Ok((a, d))); - } - - #[test] - fn take_till1() { - use nom::is_alphabetic; - named!(f, take_till1!(is_alphabetic)); - let a = b""; - let b = b"abcd"; - let c = b"123abcd"; - let d = b"123"; - - assert_eq!(f(&a[..]), Err(Err::Incomplete(Needed::Size(1)))); - assert_eq!( - f(&b[..]), - Err(Err::Error(error_position!(&b[..], ErrorKind::TakeTill1))) - ); - assert_eq!(f(&c[..]), Ok((&b"abcd"[..], &b"123"[..]))); - assert_eq!(f(&d[..]), Err(Err::Incomplete(Needed::Size(1)))); - } - - #[test] - fn take_while_utf8() { - named!(f<&str,&str>, take_while!(|c:char| { c != '點' })); - - assert_eq!(f(""), Err(Err::Incomplete(Needed::Size(1)))); - assert_eq!(f("abcd"), Err(Err::Incomplete(Needed::Size(1)))); - assert_eq!(f("abcd點"), Ok(("點", "abcd"))); - assert_eq!(f("abcd點a"), Ok(("點a", "abcd"))); - - named!(g<&str,&str>, take_while!(|c:char| { c == '點' })); - - assert_eq!(g(""), Err(Err::Incomplete(Needed::Size(1)))); - assert_eq!(g("點abcd"), Ok(("abcd", "點"))); - assert_eq!(g("點點點a"), Ok(("a", "點點點"))); - } - - #[test] - fn take_till_utf8() { - named!(f<&str,&str>, take_till!(|c:char| { c == '點' })); - - assert_eq!(f(""), Err(Err::Incomplete(Needed::Size(1)))); - assert_eq!(f("abcd"), Err(Err::Incomplete(Needed::Size(1)))); - assert_eq!(f("abcd點"), Ok(("點", "abcd"))); - assert_eq!(f("abcd點a"), Ok(("點a", "abcd"))); - - named!(g<&str,&str>, take_till!(|c:char| { c != '點' })); - - assert_eq!(g(""), Err(Err::Incomplete(Needed::Size(1)))); - assert_eq!(g("點abcd"), Ok(("abcd", "點"))); - assert_eq!(g("點點點a"), Ok(("a", "點點點"))); - } - - #[test] - fn take_until_either_and_consume_utf8() { - named!(f<&str,&str>, take_until_either_and_consume!("é點")); - - assert_eq!(f(""), Err(Err::Incomplete(Needed::Size(1)))); - assert_eq!(f("abcd"), Err(Err::Incomplete(Needed::Size(1)))); - assert_eq!(f("abcd點"), Ok(("", "abcd"))); - assert_eq!(f("abcdéa"), Ok(("a", "abcd"))); - assert_eq!(f("點a"), Ok(("a", ""))); - - named!(g<&str,&str>, take_until_either_and_consume1!("é點")); - - assert_eq!(g(""), Err(Err::Incomplete(Needed::Size(1)))); - assert_eq!(g("xabcd"), Err(Err::Incomplete(Needed::Size(1)))); - assert_eq!(g("xabcd點"), Ok(("", "xabcd"))); - assert_eq!(g("xabcdéa"), Ok(("a", "xabcd"))); - assert_eq!( - g("點xa"), - Err(Err::Error(error_position!( - "點xa", - ErrorKind::TakeUntilEitherAndConsume - ))) - ); - } - - #[test] - fn take_utf8() { - named!(f<&str,&str>, take!(3)); - - assert_eq!(f(""), Err(Err::Incomplete(Needed::Size(3)))); - assert_eq!(f("ab"), Err(Err::Incomplete(Needed::Size(3)))); - assert_eq!(f("點"), Err(Err::Incomplete(Needed::Size(3)))); - assert_eq!(f("ab點cd"), Ok(("cd", "ab點"))); - assert_eq!(f("a點bcd"), Ok(("cd", "a點b"))); - assert_eq!(f("a點b"), Ok(("", "a點b"))); - - named!(g<&str,&str>, take_while!(|c:char| { c == '點' })); - - assert_eq!(g(""), Err(Err::Incomplete(Needed::Size(1)))); - assert_eq!(g("點abcd"), Ok(("abcd", "點"))); - assert_eq!(g("點點點a"), Ok(("a", "點點點"))); - } - - #[cfg(nightly)] - use test::Bencher; - - #[cfg(nightly)] - #[bench] - fn take_while_bench(b: &mut Bencher) { - use nom::is_alphabetic; - named!(f, take_while!(is_alphabetic)); - b.iter(|| f(&b"abcdefghijklABCDEejfrfrjgro12aa"[..])); - } - - #[test] - #[cfg(feature = "std")] - fn recognize_take_while() { - use nom::is_alphanumeric; - named!(x, take_while!(is_alphanumeric)); - named!(y, recognize!(x)); - assert_eq!(x(&b"ab."[..]), Ok((&b"."[..], &b"ab"[..]))); - println!("X: {:?}", x(&b"ab"[..])); - assert_eq!(y(&b"ab."[..]), Ok((&b"."[..], &b"ab"[..]))); - } - - #[test] - fn length_bytes() { - use nom::le_u8; - named!(x, length_bytes!(le_u8)); - assert_eq!(x(b"\x02..>>"), Ok((&b">>"[..], &b".."[..]))); - assert_eq!(x(b"\x02.."), Ok((&[][..], &b".."[..]))); - assert_eq!(x(b"\x02."), Err(Err::Incomplete(Needed::Size(2)))); - assert_eq!(x(b"\x02"), Err(Err::Incomplete(Needed::Size(2)))); - - named!( - y, - do_parse!(tag!("magic") >> b: length_bytes!(le_u8) >> (b)) - ); - assert_eq!(y(b"magic\x02..>>"), Ok((&b">>"[..], &b".."[..]))); - assert_eq!(y(b"magic\x02.."), Ok((&[][..], &b".."[..]))); - assert_eq!(y(b"magic\x02."), Err(Err::Incomplete(Needed::Size(2)))); - assert_eq!(y(b"magic\x02"), Err(Err::Incomplete(Needed::Size(2)))); - } - - #[cfg(feature = "alloc")] - #[test] - fn case_insensitive() { - named!(test, tag_no_case!("ABcd")); - assert_eq!(test(&b"aBCdefgh"[..]), Ok((&b"efgh"[..], &b"aBCd"[..]))); - assert_eq!(test(&b"abcdefgh"[..]), Ok((&b"efgh"[..], &b"abcd"[..]))); - assert_eq!(test(&b"ABCDefgh"[..]), Ok((&b"efgh"[..], &b"ABCD"[..]))); - assert_eq!(test(&b"ab"[..]), Err(Err::Incomplete(Needed::Size(4)))); - assert_eq!( - test(&b"Hello"[..]), - Err(Err::Error(error_position!(&b"Hello"[..], ErrorKind::Tag))) - ); - assert_eq!( - test(&b"Hel"[..]), - Err(Err::Error(error_position!(&b"Hel"[..], ErrorKind::Tag))) - ); - - named!(test2<&str, &str>, tag_no_case!("ABcd")); - assert_eq!(test2("aBCdefgh"), Ok(("efgh", "aBCd"))); - assert_eq!(test2("abcdefgh"), Ok(("efgh", "abcd"))); - assert_eq!(test2("ABCDefgh"), Ok(("efgh", "ABCD"))); - assert_eq!(test2("ab"), Err(Err::Incomplete(Needed::Size(4)))); - assert_eq!( - test2("Hello"), - Err(Err::Error(error_position!(&"Hello"[..], ErrorKind::Tag))) - ); - assert_eq!( - test2("Hel"), - Err(Err::Error(error_position!(&"Hel"[..], ErrorKind::Tag))) - ); - } - - #[test] - fn tag_fixed_size_array() { - named!(test, tag!([0x42])); - named!(test2, tag!(&[0x42])); - let input = [0x42, 0x00]; - assert_eq!(test(&input), Ok((&b"\x00"[..], &b"\x42"[..]))); - assert_eq!(test2(&input), Ok((&b"\x00"[..], &b"\x42"[..]))); - } -} diff --git a/third_party/rust/nom/src/bytes/complete.rs b/third_party/rust/nom/src/bytes/complete.rs new file mode 100644 index 0000000000..8bf42204ba --- /dev/null +++ b/third_party/rust/nom/src/bytes/complete.rs @@ -0,0 +1,658 @@ +//! parsers recognizing bytes streams, complete input version + +use crate::error::ErrorKind; +use crate::error::ParseError; +use crate::internal::{Err, IResult}; +use crate::lib::std::ops::RangeFrom; +use crate::lib::std::result::Result::*; +use crate::traits::{Compare, CompareResult, FindSubstring, FindToken, InputIter, InputLength, InputTake, InputTakeAtPosition, Slice, ToUsize}; + +/// Recognizes a pattern +/// +/// The input data will be compared to the tag combinator's argument and will return the part of +/// the input that matches the argument +/// +/// It will return `Err(Err::Error((_, ErrorKind::Tag)))` if the input doesn't match the pattern +/// # Example +/// ```rust +/// # #[macro_use] extern crate nom; +/// # use nom::{Err, error::ErrorKind, Needed, IResult}; +/// use nom::bytes::complete::tag; +/// +/// fn parser(s: &str) -> IResult<&str, &str> { +/// tag("Hello")(s) +/// } +/// +/// assert_eq!(parser("Hello, World!"), Ok((", World!", "Hello"))); +/// assert_eq!(parser("Something"), Err(Err::Error(("Something", ErrorKind::Tag)))); +/// assert_eq!(parser(""), Err(Err::Error(("", ErrorKind::Tag)))); +/// ``` +pub fn tag<'a, T: 'a, Input: 'a, Error: ParseError>(tag: T) -> impl Fn(Input) -> IResult +where + Input: InputTake + Compare, + T: InputLength + Clone, +{ + move |i: Input| { + let tag_len = tag.input_len(); + let t = tag.clone(); + let res: IResult<_, _, Error> = match i.compare(t) { + CompareResult::Ok => Ok(i.take_split(tag_len)), + _ => { + let e: ErrorKind = ErrorKind::Tag; + Err(Err::Error(Error::from_error_kind(i, e))) + } + }; + res + } +} + +/// Recognizes a case insensitive pattern +/// +/// The input data will be compared to the tag combinator's argument and will return the part of +/// the input that matches the argument with no regard to case +/// +/// It will return `Err(Err::Error((_, ErrorKind::Tag)))` if the input doesn't match the pattern +/// # Example +/// ```rust +/// # #[macro_use] extern crate nom; +/// # use nom::{Err, error::ErrorKind, Needed, IResult}; +/// use nom::bytes::complete::tag_no_case; +/// +/// fn parser(s: &str) -> IResult<&str, &str> { +/// tag_no_case("hello")(s) +/// } +/// +/// assert_eq!(parser("Hello, World!"), Ok((", World!", "Hello"))); +/// assert_eq!(parser("hello, World!"), Ok((", World!", "hello"))); +/// assert_eq!(parser("HeLlO, World!"), Ok((", World!", "HeLlO"))); +/// assert_eq!(parser("Something"), Err(Err::Error(("Something", ErrorKind::Tag)))); +/// assert_eq!(parser(""), Err(Err::Error(("", ErrorKind::Tag)))); +/// ``` +pub fn tag_no_case>(tag: T) -> impl Fn(Input) -> IResult +where + Input: InputTake + Compare, + T: InputLength + Clone, +{ + move |i: Input| { + let tag_len = tag.input_len(); + let t = tag.clone(); + + let res: IResult<_, _, Error> = match (i).compare_no_case(t) { + CompareResult::Ok => Ok(i.take_split(tag_len)), + _ => { + let e: ErrorKind = ErrorKind::Tag; + Err(Err::Error(Error::from_error_kind(i, e))) + } + }; + res + } +} + +/// Parse till certain characters are met +/// +/// The parser will return the longest slice till one of the characters of the combinator's argument are met. +/// +/// It doesn't consume the matched character, +/// +/// It will return a `Err::Error(("", ErrorKind::IsNot))` if the pattern wasn't met +/// # Example +/// ```rust +/// # #[macro_use] extern crate nom; +/// # use nom::{Err, error::ErrorKind, Needed, IResult}; +/// use nom::bytes::complete::is_not; +/// +/// fn not_space(s: &str) -> IResult<&str, &str> { +/// is_not(" \t\r\n")(s) +/// } +/// +/// assert_eq!(not_space("Hello, World!"), Ok((" World!", "Hello,"))); +/// assert_eq!(not_space("Sometimes\t"), Ok(("\t", "Sometimes"))); +/// assert_eq!(not_space("Nospace"), Ok(("", "Nospace"))); +/// assert_eq!(not_space(""), Err(Err::Error(("", ErrorKind::IsNot)))); +/// ``` +pub fn is_not>(arr: T) -> impl Fn(Input) -> IResult +where + Input: InputTakeAtPosition, + T: InputLength + FindToken<::Item>, +{ + move |i: Input| { + let e: ErrorKind = ErrorKind::IsNot; + i.split_at_position1_complete(|c| arr.find_token(c), e) + } +} + +/// Returns the longest slice of the matches the pattern +/// +/// The parser will return the longest slice consisting of the characters in provided in the +/// combinator's argument +/// +/// It will return a `Err(Err::Error((_, ErrorKind::IsA)))` if the pattern wasn't met +/// +/// # Example +/// ```rust +/// # #[macro_use] extern crate nom; +/// # use nom::{Err, error::ErrorKind, Needed, IResult}; +/// use nom::bytes::complete::is_a; +/// +/// fn hex(s: &str) -> IResult<&str, &str> { +/// is_a("1234567890ABCDEF")(s) +/// } +/// +/// assert_eq!(hex("123 and voila"), Ok((" and voila", "123"))); +/// assert_eq!(hex("DEADBEEF and others"), Ok((" and others", "DEADBEEF"))); +/// assert_eq!(hex("BADBABEsomething"), Ok(("something", "BADBABE"))); +/// assert_eq!(hex("D15EA5E"), Ok(("", "D15EA5E"))); +/// assert_eq!(hex(""), Err(Err::Error(("", ErrorKind::IsA)))); +/// ``` +pub fn is_a>(arr: T) -> impl Fn(Input) -> IResult +where + Input: InputTakeAtPosition, + T: InputLength + FindToken<::Item>, +{ + move |i: Input| { + let e: ErrorKind = ErrorKind::IsA; + i.split_at_position1_complete(|c| !arr.find_token(c), e) + } +} + +/// Returns the longest input slice (if any) that matches the predicate +/// +/// The parser will return the longest slice that matches the given predicate *(a function that +/// takes the input and returns a bool)* +/// +/// # Example +/// ```rust +/// # #[macro_use] extern crate nom; +/// # use nom::{Err, error::ErrorKind, Needed, IResult}; +/// use nom::bytes::complete::take_while; +/// use nom::character::is_alphabetic; +/// +/// fn alpha(s: &[u8]) -> IResult<&[u8], &[u8]> { +/// take_while(is_alphabetic)(s) +/// } +/// +/// assert_eq!(alpha(b"latin123"), Ok((&b"123"[..], &b"latin"[..]))); +/// assert_eq!(alpha(b"12345"), Ok((&b"12345"[..], &b""[..]))); +/// assert_eq!(alpha(b"latin"), Ok((&b""[..], &b"latin"[..]))); +/// assert_eq!(alpha(b""), Ok((&b""[..], &b""[..]))); +/// ``` +pub fn take_while>(cond: F) -> impl Fn(Input) -> IResult +where + Input: InputTakeAtPosition, + F: Fn(::Item) -> bool, +{ + move |i: Input| i.split_at_position_complete(|c| !cond(c)) +} + +/// Returns the longest (atleast 1) input slice that matches the predicate +/// +/// The parser will return the longest slice that matches the given predicate *(a function that +/// takes the input and returns a bool)* +/// +/// It will return an `Err(Err::Error((_, ErrorKind::TakeWhile1)))` if the pattern wasn't met +/// +/// # Example +/// ```rust +/// # #[macro_use] extern crate nom; +/// # use nom::{Err, error::ErrorKind, Needed, IResult}; +/// use nom::bytes::complete::take_while1; +/// use nom::character::is_alphabetic; +/// +/// fn alpha(s: &[u8]) -> IResult<&[u8], &[u8]> { +/// take_while1(is_alphabetic)(s) +/// } +/// +/// assert_eq!(alpha(b"latin123"), Ok((&b"123"[..], &b"latin"[..]))); +/// assert_eq!(alpha(b"latin"), Ok((&b""[..], &b"latin"[..]))); +/// assert_eq!(alpha(b"12345"), Err(Err::Error((&b"12345"[..], ErrorKind::TakeWhile1)))); +/// ``` +pub fn take_while1>(cond: F) -> impl Fn(Input) -> IResult +where + Input: InputTakeAtPosition, + F: Fn(::Item) -> bool, +{ + move |i: Input| { + let e: ErrorKind = ErrorKind::TakeWhile1; + i.split_at_position1_complete(|c| !cond(c), e) + } +} + +/// Returns the longest (m <= len <= n) input slice that matches the predicate +/// +/// The parser will return the longest slice that matches the given predicate *(a function that +/// takes the input and returns a bool)* +/// +/// It will return an `Err::Error((_, ErrorKind::TakeWhileMN))` if the pattern wasn't met or is out +/// of range (m <= len <= n) +/// +/// # Example +/// ```rust +/// # #[macro_use] extern crate nom; +/// # use nom::{Err, error::ErrorKind, Needed, IResult}; +/// use nom::bytes::complete::take_while_m_n; +/// use nom::character::is_alphabetic; +/// +/// fn short_alpha(s: &[u8]) -> IResult<&[u8], &[u8]> { +/// take_while_m_n(3, 6, is_alphabetic)(s) +/// } +/// +/// assert_eq!(short_alpha(b"latin123"), Ok((&b"123"[..], &b"latin"[..]))); +/// assert_eq!(short_alpha(b"lengthy"), Ok((&b"y"[..], &b"length"[..]))); +/// assert_eq!(short_alpha(b"latin"), Ok((&b""[..], &b"latin"[..]))); +/// assert_eq!(short_alpha(b"ed"), Err(Err::Error((&b"ed"[..], ErrorKind::TakeWhileMN)))); +/// assert_eq!(short_alpha(b"12345"), Err(Err::Error((&b"12345"[..], ErrorKind::TakeWhileMN)))); +/// ``` +pub fn take_while_m_n>(m: usize, n: usize, cond: F) -> impl Fn(Input) -> IResult +where + Input: InputTake + InputIter + InputLength + Slice>, + F: Fn(::Item) -> bool, +{ + move |i: Input| { + let input = i; + + match input.position(|c| !cond(c)) { + Some(idx) => { + if idx >= m { + if idx <= n { + let res: IResult<_, _, Error> = if let Some(index) = input.slice_index(idx) { + Ok(input.take_split(index)) + } else { + Err(Err::Error(Error::from_error_kind(input, ErrorKind::TakeWhileMN))) + }; + res + } else { + let res: IResult<_, _, Error> = if let Some(index) = input.slice_index(n) { + Ok(input.take_split(index)) + } else { + Err(Err::Error(Error::from_error_kind(input, ErrorKind::TakeWhileMN))) + }; + res + } + } else { + let e = ErrorKind::TakeWhileMN; + Err(Err::Error(Error::from_error_kind(input, e))) + } + } + None => { + let len = input.input_len(); + if len >= n { + match input.slice_index(n) { + Some(index) => Ok(input.take_split(index)), + None => Err(Err::Error(Error::from_error_kind(input, ErrorKind::TakeWhileMN))) + } + } else { + if len >= m && len <= n { + let res: IResult<_, _, Error> = Ok((input.slice(len..), input)); + res + } else { + let e = ErrorKind::TakeWhileMN; + Err(Err::Error(Error::from_error_kind(input, e))) + } + } + } + } + } +} + +/// Returns the longest input slice (if any) till a predicate is met +/// +/// The parser will return the longest slice till the given predicate *(a function that +/// takes the input and returns a bool)* +/// +/// # Example +/// ```rust +/// # #[macro_use] extern crate nom; +/// # use nom::{Err, error::ErrorKind, Needed, IResult}; +/// use nom::bytes::complete::take_till; +/// +/// fn till_colon(s: &str) -> IResult<&str, &str> { +/// take_till(|c| c == ':')(s) +/// } +/// +/// assert_eq!(till_colon("latin:123"), Ok((":123", "latin"))); +/// assert_eq!(till_colon(":empty matched"), Ok((":empty matched", ""))); //allowed +/// assert_eq!(till_colon("12345"), Ok(("", "12345"))); +/// assert_eq!(till_colon(""), Ok(("", ""))); +/// ``` +pub fn take_till>(cond: F) -> impl Fn(Input) -> IResult +where + Input: InputTakeAtPosition, + F: Fn(::Item) -> bool, +{ + move |i: Input| i.split_at_position_complete(|c| cond(c)) +} + +/// Returns the longest (atleast 1) input slice till a predicate is met +/// +/// The parser will return the longest slice till the given predicate *(a function that +/// takes the input and returns a bool)* +/// +/// It will return `Err(Err::Error((_, ErrorKind::TakeTill1)))` if the input is empty or the +/// predicate matches the first input +/// +/// # Example +/// ```rust +/// # #[macro_use] extern crate nom; +/// # use nom::{Err, error::ErrorKind, Needed, IResult}; +/// use nom::bytes::complete::take_till1; +/// +/// fn till_colon(s: &str) -> IResult<&str, &str> { +/// take_till1(|c| c == ':')(s) +/// } +/// +/// assert_eq!(till_colon("latin:123"), Ok((":123", "latin"))); +/// assert_eq!(till_colon(":empty matched"), Err(Err::Error((":empty matched", ErrorKind::TakeTill1)))); +/// assert_eq!(till_colon("12345"), Ok(("", "12345"))); +/// assert_eq!(till_colon(""), Err(Err::Error(("", ErrorKind::TakeTill1)))); +/// ``` +pub fn take_till1>(cond: F) -> impl Fn(Input) -> IResult +where + Input: InputTakeAtPosition, + F: Fn(::Item) -> bool, +{ + move |i: Input| { + let e: ErrorKind = ErrorKind::TakeTill1; + i.split_at_position1_complete(|c| cond(c), e) + } +} + +/// Returns an input slice containing the first N input elements (Input[..N]) +/// +/// It will return `Err(Err::Error((_, ErrorKind::Eof)))` if the input is shorter than the argument +/// +/// # Example +/// ```rust +/// # #[macro_use] extern crate nom; +/// # use nom::{Err, error::ErrorKind, Needed, IResult}; +/// use nom::bytes::complete::take; +/// +/// fn take6(s: &str) -> IResult<&str, &str> { +/// take(6usize)(s) +/// } +/// +/// assert_eq!(take6("1234567"), Ok(("7", "123456"))); +/// assert_eq!(take6("things"), Ok(("", "things"))); +/// assert_eq!(take6("short"), Err(Err::Error(("short", ErrorKind::Eof)))); +/// assert_eq!(take6(""), Err(Err::Error(("", ErrorKind::Eof)))); +/// ``` +pub fn take>(count: C) -> impl Fn(Input) -> IResult +where + Input: InputIter + InputTake, + C: ToUsize, +{ + let c = count.to_usize(); + move |i: Input| match i.slice_index(c) { + None => Err(Err::Error(Error::from_error_kind(i, ErrorKind::Eof))), + Some(index) => Ok(i.take_split(index)), + } +} + +/// Returns the longest input slice till it matches the pattern. +/// +/// It doesn't consume the pattern. It will return `Err(Err::Error((_, ErrorKind::TakeUntil)))` +/// if the pattern wasn't met +/// +/// # Example +/// ```rust +/// # #[macro_use] extern crate nom; +/// # use nom::{Err, error::ErrorKind, Needed, IResult}; +/// use nom::bytes::complete::take_until; +/// +/// fn until_eof(s: &str) -> IResult<&str, &str> { +/// take_until("eof")(s) +/// } +/// +/// assert_eq!(until_eof("hello, worldeof"), Ok(("eof", "hello, world"))); +/// assert_eq!(until_eof("hello, world"), Err(Err::Error(("hello, world", ErrorKind::TakeUntil)))); +/// assert_eq!(until_eof(""), Err(Err::Error(("", ErrorKind::TakeUntil)))); +/// ``` +pub fn take_until>(tag: T) -> impl Fn(Input) -> IResult +where + Input: InputTake + FindSubstring, + T: InputLength + Clone, +{ + move |i: Input| { + let t = tag.clone(); + let res: IResult<_, _, Error> = match i.find_substring(t) { + None => Err(Err::Error(Error::from_error_kind(i, ErrorKind::TakeUntil))), + Some(index) => Ok(i.take_split(index)), + }; + res + } +} + +/// Matches a byte string with escaped characters. +/// +/// * The first argument matches the normal characters (it must not accept the control character), +/// * the second argument is the control character (like `\` in most languages), +/// * the third argument matches the escaped characters +/// +/// # Example +/// ``` +/// # #[macro_use] extern crate nom; +/// # use nom::{Err, error::ErrorKind, Needed, IResult}; +/// # use nom::character::complete::digit1; +/// use nom::bytes::complete::escaped; +/// use nom::character::complete::one_of; +/// +/// fn esc(s: &str) -> IResult<&str, &str> { +/// escaped(digit1, '\\', one_of(r#""n\"#))(s) +/// } +/// +/// assert_eq!(esc("123;"), Ok((";", "123"))); +/// assert_eq!(esc(r#"12\"34;"#), Ok((";", r#"12\"34"#))); +/// ``` +/// +pub fn escaped(normal: F, control_char: char, escapable: G) -> impl Fn(Input) -> IResult +where + Input: Clone + crate::traits::Offset + InputLength + InputTake + InputTakeAtPosition + Slice> + InputIter, + ::Item: crate::traits::AsChar, + F: Fn(Input) -> IResult, + G: Fn(Input) -> IResult, + Error: ParseError, +{ + use crate::traits::AsChar; + + move |input: Input| { + let mut i = input.clone(); + + while i.input_len() > 0 { + match normal(i.clone()) { + Ok((i2, _)) => { + if i2.input_len() == 0 { + return Ok((input.slice(input.input_len()..), input)); + } else { + i = i2; + } + } + Err(Err::Error(_)) => { + // unwrap() should be safe here since index < $i.input_len() + if i.iter_elements().next().unwrap().as_char() == control_char { + let next = control_char.len_utf8(); + if next >= i.input_len() { + return Err(Err::Error(Error::from_error_kind(input, ErrorKind::Escaped))); + } else { + match escapable(i.slice(next..)) { + Ok((i2, _)) => { + if i2.input_len() == 0 { + return Ok((input.slice(input.input_len()..), input)); + } else { + i = i2; + } + } + Err(e) => return Err(e), + } + } + } else { + let index = input.offset(&i); + if index == 0 { + return Err(Err::Error(Error::from_error_kind(input, ErrorKind::Escaped))); + } + return Ok(input.take_split(index)); + } + } + Err(e) => { + return Err(e); + } + } + } + + Ok((input.slice(input.input_len()..), input)) + } +} + +#[doc(hidden)] +pub fn escapedc(i: Input, normal: F, control_char: char, escapable: G) -> IResult +where + Input: Clone + crate::traits::Offset + InputLength + InputTake + InputTakeAtPosition + Slice> + InputIter, + ::Item: crate::traits::AsChar, + F: Fn(Input) -> IResult, + G: Fn(Input) -> IResult, + Error: ParseError, +{ + escaped(normal, control_char, escapable)(i) +} + +/// Matches a byte string with escaped characters. +/// +/// * The first argument matches the normal characters (it must not match the control character), +/// * the second argument is the control character (like `\` in most languages), +/// * the third argument matches the escaped characters and transforms them. +/// +/// As an example, the chain `abc\tdef` could be `abc def` (it also consumes the control character) +/// +/// ``` +/// # #[macro_use] extern crate nom; +/// # use nom::{Err, error::ErrorKind, Needed, IResult}; +/// # use std::str::from_utf8; +/// use nom::bytes::complete::escaped_transform; +/// use nom::character::complete::alpha1; +/// +/// fn parser(input: &str) -> IResult<&str, String> { +/// escaped_transform( +/// alpha1, +/// '\\', +/// |i:&str| alt!(i, +/// tag!("\\") => { |_| "\\" } +/// | tag!("\"") => { |_| "\"" } +/// | tag!("n") => { |_| "\n" } +/// ) +/// )(input) +/// } +/// +/// assert_eq!(parser("ab\\\"cd"), Ok(("", String::from("ab\"cd")))); +/// ``` +#[cfg(feature = "alloc")] +pub fn escaped_transform( + normal: F, + control_char: char, + transform: G, +) -> impl Fn(Input) -> IResult +where + Input: Clone + crate::traits::Offset + InputLength + InputTake + InputTakeAtPosition + Slice> + InputIter, + Input: crate::traits::ExtendInto, + O1: crate::traits::ExtendInto, + O2: crate::traits::ExtendInto, + Output: core::iter::Extend<::Item>, + Output: core::iter::Extend<::Item>, + Output: core::iter::Extend<::Item>, + ::Item: crate::traits::AsChar, + F: Fn(Input) -> IResult, + G: Fn(Input) -> IResult, + Error: ParseError, +{ + use crate::traits::AsChar; + + move |input: Input| { + let mut index = 0; + let mut res = input.new_builder(); + + let i = input.clone(); + + while index < i.input_len() { + let remainder = i.slice(index..); + match normal(remainder.clone()) { + Ok((i2, o)) => { + o.extend_into(&mut res); + if i2.input_len() == 0 { + return Ok((i.slice(i.input_len()..), res)); + } else { + index = input.offset(&i2); + } + } + Err(Err::Error(_)) => { + // unwrap() should be safe here since index < $i.input_len() + if remainder.iter_elements().next().unwrap().as_char() == control_char { + let next = index + control_char.len_utf8(); + let input_len = input.input_len(); + + if next >= input_len { + return Err(Err::Error(Error::from_error_kind(remainder, ErrorKind::EscapedTransform))); + } else { + match transform(i.slice(next..)) { + Ok((i2, o)) => { + o.extend_into(&mut res); + if i2.input_len() == 0 { + return Ok((i.slice(i.input_len()..), res)); + } else { + index = input.offset(&i2); + } + } + Err(e) => return Err(e), + } + } + } else { + if index == 0 { + return Err(Err::Error(Error::from_error_kind(remainder, ErrorKind::EscapedTransform))); + } + return Ok((remainder, res)); + } + } + Err(e) => return Err(e), + } + } + Ok((input.slice(index..), res)) + } +} + +#[doc(hidden)] +#[cfg(feature = "alloc")] +pub fn escaped_transformc( + i: Input, + normal: F, + control_char: char, + transform: G, +) -> IResult +where + Input: Clone + crate::traits::Offset + InputLength + InputTake + InputTakeAtPosition + Slice> + InputIter, + Input: crate::traits::ExtendInto, + O1: crate::traits::ExtendInto, + O2: crate::traits::ExtendInto, + Output: core::iter::Extend<::Item>, + Output: core::iter::Extend<::Item>, + Output: core::iter::Extend<::Item>, + ::Item: crate::traits::AsChar, + F: Fn(Input) -> IResult, + G: Fn(Input) -> IResult, + Error: ParseError, +{ + escaped_transform(normal, control_char, transform)(i) + +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn complete_take_while_m_n_utf8_all_matching() { + let result: IResult<&str, &str> = super::take_while_m_n(1, 4, |c: char| c.is_alphabetic())("øn"); + assert_eq!(result, Ok(("", "øn"))); + } + + #[test] + fn complete_take_while_m_n_utf8_all_matching_substring() { + let result: IResult<&str, &str> = super::take_while_m_n(1, 1, |c: char| c.is_alphabetic())("øn"); + assert_eq!(result, Ok(("n", "ø"))); + } +} diff --git a/third_party/rust/nom/src/bytes/macros.rs b/third_party/rust/nom/src/bytes/macros.rs new file mode 100644 index 0000000000..1e24ba8851 --- /dev/null +++ b/third_party/rust/nom/src/bytes/macros.rs @@ -0,0 +1,938 @@ +//! Byte level parsers and combinators +//! +#[allow(unused_variables)] + +/// `tag!(&[T]: nom::AsBytes) => &[T] -> IResult<&[T], &[T]>` +/// declares a byte array as a suite to recognize +/// +/// consumes the recognized characters +/// +/// # Example +/// ``` +/// # #[macro_use] extern crate nom; +/// # fn main() { +/// named!(x, tag!("abcd")); +/// let r = x(&b"abcdefgh"[..]); +/// assert_eq!(r, Ok((&b"efgh"[..], &b"abcd"[..]))); +/// # } +/// ``` +#[macro_export(local_inner_macros)] +macro_rules! tag ( + ($i:expr, $tag: expr) => ({ + $crate::bytes::streaming::tag($tag)($i) + }); +); + +/// `tag_no_case!(&[T]) => &[T] -> IResult<&[T], &[T]>` +/// declares a case insensitive ascii string as a suite to recognize +/// +/// consumes the recognized characters +/// +/// # Example +/// ``` +/// # #[macro_use] extern crate nom; +/// # fn main() { +/// named!(test, tag_no_case!("ABcd")); +/// +/// let r = test(&b"aBCdefgh"[..]); +/// assert_eq!(r, Ok((&b"efgh"[..], &b"aBCd"[..]))); +/// # } +/// ``` +#[macro_export(local_inner_macros)] +macro_rules! tag_no_case ( + ($i:expr, $tag: expr) => ({ + $crate::bytes::streaming::tag_no_case($tag)($i) + }); +); + +/// `is_not!(&[T:AsBytes]) => &[T] -> IResult<&[T], &[T]>` +/// returns the longest list of bytes that do not appear in the provided array +/// +/// # Example +/// ``` +/// # #[macro_use] extern crate nom; +/// # fn main() { +/// named!( not_space, is_not!( " \t\r\n" ) ); +/// +/// let r = not_space(&b"abcdefgh\nijkl"[..]); +/// assert_eq!(r, Ok((&b"\nijkl"[..], &b"abcdefgh"[..]))); +/// # } +/// ``` +#[macro_export(local_inner_macros)] +macro_rules! is_not ( + ($input:expr, $arr:expr) => ({ + $crate::bytes::streaming::is_not($arr)($input) + }); +); + +/// `is_a!(&[T]) => &[T] -> IResult<&[T], &[T]>` +/// returns the longest list of bytes that appear in the provided array +/// +/// # Example +/// ``` +/// # #[macro_use] extern crate nom; +/// # fn main() { +/// named!(abcd, is_a!( "abcd" )); +/// +/// let r1 = abcd(&b"aaaaefgh"[..]); +/// assert_eq!(r1, Ok((&b"efgh"[..], &b"aaaa"[..]))); +/// +/// let r2 = abcd(&b"dcbaefgh"[..]); +/// assert_eq!(r2, Ok((&b"efgh"[..], &b"dcba"[..]))); +/// # } +/// ``` +#[macro_export(local_inner_macros)] +macro_rules! is_a ( + ($input:expr, $arr:expr) => ({ + $crate::bytes::streaming::is_a($arr)($input) + }); +); + +/// `escaped!(T -> IResult, U, T -> IResult) => T -> IResult where T: InputIter, +/// U: AsChar` +/// matches a byte string with escaped characters. +/// +/// The first argument matches the normal characters (it must not accept the control character), +/// the second argument is the control character (like `\` in most languages), +/// the third argument matches the escaped characters +/// +/// # Example +/// ``` +/// # #[macro_use] extern crate nom; +/// # use nom::character::complete::digit1; +/// # fn main() { +/// named!(esc, escaped!(call!(digit1), '\\', one_of!("\"n\\"))); +/// assert_eq!(esc(&b"123;"[..]), Ok((&b";"[..], &b"123"[..]))); +/// assert_eq!(esc(&b"12\\\"34;"[..]), Ok((&b";"[..], &b"12\\\"34"[..]))); +/// # } +/// ``` +#[macro_export(local_inner_macros)] +macro_rules! escaped ( + ($i:expr, $submac1:ident!( $($args:tt)* ), $control_char: expr, $submac2:ident!( $($args2:tt)*) ) => ( + { + escaped!($i, |i| $submac1!(i, $($args)*), $control_char, |i| $submac2!(i, $($args2)*)) + } + ); + ($i:expr, $normal:expr, $control_char: expr, $submac2:ident!( $($args2:tt)*) ) => ( + { + escaped!($i, $normal, $control_char, |i| $submac2!(i, $($args2)*)) + } + ); + ($i:expr, $submac1:ident!( $($args:tt)* ), $control_char: expr, $escapable:expr ) => ( + { + escaped!($i, |i| $submac1!(i, $($args)*), $control_char, $escapable) + } + ); + ($i:expr, $normal:expr, $control_char: expr, $escapable:expr) => ( + { + $crate::bytes::complete::escapedc($i, $normal, $control_char, $escapable) + } + ); +); + +/// `escaped_transform!(&[T] -> IResult<&[T], &[T]>, T, &[T] -> IResult<&[T], &[T]>) => &[T] -> IResult<&[T], Vec>` +/// matches a byte string with escaped characters. +/// +/// The first argument matches the normal characters (it must not match the control character), +/// the second argument is the control character (like `\` in most languages), +/// the third argument matches the escaped characters and transforms them. +/// +/// As an example, the chain `abc\tdef` could be `abc def` (it also consumes the control character) +/// +/// # Example +/// ```rust +/// # #[macro_use] extern crate nom; +/// # use nom::character::complete::alpha1; +/// # use nom::lib::std::str::from_utf8; +/// # fn main() { +/// fn to_s(i:Vec) -> String { +/// String::from_utf8_lossy(&i).into_owned() +/// } +/// +/// named!(transform < String >, +/// map!( +/// escaped_transform!(call!(alpha1), '\\', +/// alt!( +/// tag!("\\") => { |_| &b"\\"[..] } +/// | tag!("\"") => { |_| &b"\""[..] } +/// | tag!("n") => { |_| &b"\n"[..] } +/// ) +/// ), to_s +/// ) +/// ); +/// assert_eq!(transform(&b"ab\\\"cd"[..]), Ok((&b""[..], String::from("ab\"cd")))); +/// # } +/// ``` +#[cfg(feature = "alloc")] +#[macro_export(local_inner_macros)] +macro_rules! escaped_transform ( + ($i:expr, $submac1:ident!( $($args:tt)* ), $control_char: expr, $submac2:ident!( $($args2:tt)*) ) => ( + { + escaped_transform!($i, |i| $submac1!(i, $($args)*), $control_char, |i| $submac2!(i, $($args2)*)) + } + ); + ($i:expr, $normal:expr, $control_char: expr, $submac2:ident!( $($args2:tt)*) ) => ( + { + escaped_transform!($i, $normal, $control_char, |i| $submac2!(i, $($args2)*)) + } + ); + ($i:expr, $submac1:ident!( $($args:tt)* ), $control_char: expr, $transform:expr ) => ( + { + escaped_transform!($i, |i| $submac1!(i, $($args)*), $control_char, $transform) + } + ); + ($i:expr, $normal:expr, $control_char: expr, $transform:expr) => ( + { + $crate::bytes::complete::escaped_transformc($i, $normal, $control_char, $transform) + } + ); +); + +/// `take_while!(T -> bool) => &[T] -> IResult<&[T], &[T]>` +/// returns the longest list of bytes until the provided function fails. +/// +/// The argument is either a function `T -> bool` or a macro returning a `bool`. +/// +/// # Example +/// ``` +/// # #[macro_use] extern crate nom; +/// # use nom::character::is_alphanumeric; +/// # fn main() { +/// named!( alpha, take_while!( is_alphanumeric ) ); +/// +/// let r = alpha(&b"abcd\nefgh"[..]); +/// assert_eq!(r, Ok((&b"\nefgh"[..], &b"abcd"[..]))); +/// # } +/// ``` +#[macro_export(local_inner_macros)] +macro_rules! take_while ( + ($input:expr, $submac:ident!( $($args:tt)* )) => ({ + let res: $crate::IResult<_, _, _> = take_while!($input, (|c| $submac!(c, $($args)*))); + res + }); + ($input:expr, $f:expr) => ( + $crate::bytes::streaming::take_while($f)($input) + ); +); + +/// `take_while1!(T -> bool) => &[T] -> IResult<&[T], &[T]>` +/// returns the longest (non empty) list of bytes until the provided function fails. +/// +/// The argument is either a function `&[T] -> bool` or a macro returning a `bool` +/// +/// # Example +/// ``` +/// # #[macro_use] extern crate nom; +/// # use nom::{Err,error::ErrorKind}; +/// # use nom::character::is_alphanumeric; +/// # fn main() { +/// named!( alpha, take_while1!( is_alphanumeric ) ); +/// +/// let r = alpha(&b"abcd\nefgh"[..]); +/// assert_eq!(r, Ok((&b"\nefgh"[..], &b"abcd"[..]))); +/// let r = alpha(&b"\nefgh"[..]); +/// assert_eq!(r, Err(Err::Error(error_position!(&b"\nefgh"[..], ErrorKind::TakeWhile1)))); +/// # } +/// ``` +#[macro_export(local_inner_macros)] +macro_rules! take_while1 ( + ($input:expr, $submac:ident!( $($args:tt)* )) => ({ + let res: $crate::IResult<_, _, _> = take_while1!($input, (|c| $submac!(c, $($args)*))); + res + }); + ($input:expr, $f:expr) => ( + $crate::bytes::streaming::take_while1($f)($input) + ); +); + +/// `take_while_m_n!(m: usize, n: usize, T -> bool) => &[T] -> IResult<&[T], &[T]>` +/// returns a list of bytes or characters for which the provided function returns true. +/// the returned list's size will be at least m, and at most n +/// +/// The argument is either a function `T -> bool` or a macro returning a `bool`. +/// +/// # Example +/// ``` +/// # #[macro_use] extern crate nom; +/// # use nom::character::is_alphanumeric; +/// # fn main() { +/// named!( alpha, take_while_m_n!(3, 6, is_alphanumeric ) ); +/// +/// let r = alpha(&b"abcd\nefgh"[..]); +/// assert_eq!(r, Ok((&b"\nefgh"[..], &b"abcd"[..]))); +/// # } +/// ``` +#[macro_export(local_inner_macros)] +macro_rules! take_while_m_n ( + ($input:expr, $m:expr, $n: expr, $submac:ident!( $($args:tt)* )) => ({ + let res: $crate::IResult<_, _, _> = take_while_m_n!($input, $m, $n, (|c| $submac!(c, $($args)*))); + res + }); + ($input:expr, $m:expr, $n:expr, $f:expr) => ( + $crate::bytes::streaming::take_while_m_n($m, $n, $f)($input) + ); +); + +/// `take_till!(T -> bool) => &[T] -> IResult<&[T], &[T]>` +/// returns the longest list of bytes until the provided function succeeds +/// +/// The argument is either a function `&[T] -> bool` or a macro returning a `bool`. +/// +/// # Example +/// ``` +/// # #[macro_use] extern crate nom; +/// # fn main() { +/// named!( till_colon, take_till!(|ch| ch == b':') ); +/// +/// let r = till_colon(&b"abcd:efgh"[..]); +/// assert_eq!(r, Ok((&b":efgh"[..], &b"abcd"[..]))); +/// let r2 = till_colon(&b":abcdefgh"[..]); // empty match is allowed +/// assert_eq!(r2, Ok((&b":abcdefgh"[..], &b""[..]))); +/// # } +/// ``` +#[macro_export(local_inner_macros)] +macro_rules! take_till ( + ($input:expr, $submac:ident!( $($args:tt)* )) => ({ + let res: $crate::IResult<_, _, _> = take_till!($input, (|c| $submac!(c, $($args)*))); + res + }); + ($input:expr, $f:expr) => ( + $crate::bytes::streaming::take_till($f)($input) + ); +); + +/// `take_till1!(T -> bool) => &[T] -> IResult<&[T], &[T]>` +/// returns the longest non empty list of bytes until the provided function succeeds +/// +/// The argument is either a function `&[T] -> bool` or a macro returning a `bool`. +/// +/// # Example +/// ``` +/// # #[macro_use] extern crate nom; +/// # use nom::{Err, error::ErrorKind}; +/// # fn main() { +/// named!( till1_colon, take_till1!(|ch| ch == b':') ); +/// +/// let r = till1_colon(&b"abcd:efgh"[..]); +/// assert_eq!(r, Ok((&b":efgh"[..], &b"abcd"[..]))); +/// +/// let r2 = till1_colon(&b":abcdefgh"[..]); // empty match is error +/// assert_eq!(r2, Err(Err::Error(error_position!(&b":abcdefgh"[..], ErrorKind::TakeTill1)))); +/// # } +/// ``` +#[macro_export(local_inner_macros)] +macro_rules! take_till1 ( + ($input:expr, $submac:ident!( $($args:tt)* )) => ({ + let res: $crate::IResult<_, _, _> = take_till1!($input, (|c| $submac!(c, $($args)*))); + res + }); + ($input:expr, $f:expr) => ( + $crate::bytes::streaming::take_till1($f)($input) + ); +); + +/// `take!(nb) => &[T] -> IResult<&[T], &[T]>` +/// generates a parser consuming the specified number of bytes +/// +/// # Example +/// ``` +/// # #[macro_use] extern crate nom; +/// # fn main() { +/// // Desmond parser +/// named!(take5, take!( 5 ) ); +/// +/// let a = b"abcdefgh"; +/// +/// assert_eq!(take5(&a[..]), Ok((&b"fgh"[..], &b"abcde"[..]))); +/// # } +/// ``` +#[macro_export(local_inner_macros)] +macro_rules! take ( + ($i:expr, $count:expr) => ({ + let c = $count as usize; + let res: $crate::IResult<_,_,_> = $crate::bytes::streaming::take(c)($i); + res + }); +); + +/// `take_str!(nb) => &[T] -> IResult<&[T], &str>` +/// same as take! but returning a &str +/// +/// # Example +/// ``` +/// # #[macro_use] extern crate nom; +/// # fn main() { +/// named!(take5( &[u8] ) -> &str, take_str!( 5 ) ); +/// +/// let a = b"abcdefgh"; +/// +/// assert_eq!(take5(&a[..]), Ok((&b"fgh"[..], "abcde"))); +/// # } +/// ``` +#[macro_export(local_inner_macros)] +macro_rules! take_str ( + ( $i:expr, $size:expr ) => ( + { + let input: &[u8] = $i; + + map_res!(input, take!($size), $crate::lib::std::str::from_utf8) + } + ); +); + +/// `take_until!(tag) => &[T] -> IResult<&[T], &[T]>` +/// consumes data until it finds the specified tag. +/// +/// The remainder still contains the tag. +/// +/// # Example +/// ``` +/// # #[macro_use] extern crate nom; +/// # fn main() { +/// named!(x, take_until!("foo")); +/// let r = x(&b"abcd foo efgh"[..]); +/// assert_eq!(r, Ok((&b"foo efgh"[..], &b"abcd "[..]))); +/// # } +/// ``` +#[macro_export(local_inner_macros)] +macro_rules! take_until ( + ($i:expr, $substr:expr) => ({ + let res: $crate::IResult<_,_,_> = $crate::bytes::streaming::take_until($substr)($i); + res + }); +); + +/// `take_until1!(tag) => &[T] -> IResult<&[T], &[T]>` +/// consumes data (at least one byte) until it finds the specified tag +/// +/// The remainder still contains the tag. +/// +/// # Example +/// ``` +/// # #[macro_use] extern crate nom; +/// # fn main() { +/// named!(x, take_until1!("foo")); +/// +/// let r = x(&b"abcd foo efgh"[..]); +/// +/// assert_eq!(r, Ok((&b"foo efgh"[..], &b"abcd "[..]))); +/// # } +/// ``` +#[macro_export(local_inner_macros)] +macro_rules! take_until1 ( + ($i:expr, $substr:expr) => ( + { + use $crate::lib::std::result::Result::*; + use $crate::lib::std::option::Option::*; + use $crate::{Err,Needed,IResult,error::ErrorKind}; + use $crate::InputLength; + use $crate::FindSubstring; + use $crate::InputTake; + let input = $i; + + let res: IResult<_,_> = match input.find_substring($substr) { + None => { + Err(Err::Incomplete(Needed::Size(1 + $substr.input_len()))) + }, + Some(0) => { + let e = ErrorKind::TakeUntil; + Err(Err::Error(error_position!($i, e))) + }, + Some(index) => { + Ok($i.take_split(index)) + }, + }; + res + } + ); +); + +#[cfg(test)] +mod tests { + use crate::internal::{Err, Needed, IResult}; + #[cfg(feature = "alloc")] + use crate::lib::std::string::String; + #[cfg(feature = "alloc")] + use crate::lib::std::vec::Vec; + use crate::character::streaming::{alpha1 as alpha, alphanumeric1 as alphanumeric, digit1 as digit, hex_digit1 as hex_digit, multispace1 as multispace, oct_digit1 as oct_digit, space1 as space}; + use crate::error::ErrorKind; + use crate::character::is_alphabetic; + + #[cfg(feature = "alloc")] + macro_rules! one_of ( + ($i:expr, $inp: expr) => ( + { + use $crate::Err; + use $crate::Slice; + use $crate::AsChar; + use $crate::FindToken; + use $crate::InputIter; + + match ($i).iter_elements().next().map(|c| { + $inp.find_token(c) + }) { + None => Err::<_,_>(Err::Incomplete(Needed::Size(1))), + Some(false) => Err(Err::Error(error_position!($i, ErrorKind::OneOf))), + //the unwrap should be safe here + Some(true) => Ok(($i.slice(1..), $i.iter_elements().next().unwrap().as_char())) + } + } + ); + ); + + #[test] + fn is_a() { + named!(a_or_b, is_a!(&b"ab"[..])); + + let a = &b"abcd"[..]; + assert_eq!(a_or_b(a), Ok((&b"cd"[..], &b"ab"[..]))); + + let b = &b"bcde"[..]; + assert_eq!(a_or_b(b), Ok((&b"cde"[..], &b"b"[..]))); + + let c = &b"cdef"[..]; + assert_eq!(a_or_b(c), Err(Err::Error(error_position!(c, ErrorKind::IsA)))); + + let d = &b"bacdef"[..]; + assert_eq!(a_or_b(d), Ok((&b"cdef"[..], &b"ba"[..]))); + } + + #[test] + fn is_not() { + named!(a_or_b, is_not!(&b"ab"[..])); + + let a = &b"cdab"[..]; + assert_eq!(a_or_b(a), Ok((&b"ab"[..], &b"cd"[..]))); + + let b = &b"cbde"[..]; + assert_eq!(a_or_b(b), Ok((&b"bde"[..], &b"c"[..]))); + + let c = &b"abab"[..]; + assert_eq!(a_or_b(c), Err(Err::Error(error_position!(c, ErrorKind::IsNot)))); + + let d = &b"cdefba"[..]; + assert_eq!(a_or_b(d), Ok((&b"ba"[..], &b"cdef"[..]))); + + let e = &b"e"[..]; + assert_eq!(a_or_b(e), Err(Err::Incomplete(Needed::Size(1)))); + } + + #[cfg(feature = "alloc")] + #[allow(unused_variables)] + #[test] + fn escaping() { + named!(esc, escaped!(call!(alpha), '\\', one_of!("\"n\\"))); + assert_eq!(esc(&b"abcd;"[..]), Ok((&b";"[..], &b"abcd"[..]))); + assert_eq!(esc(&b"ab\\\"cd;"[..]), Ok((&b";"[..], &b"ab\\\"cd"[..]))); + assert_eq!(esc(&b"\\\"abcd;"[..]), Ok((&b";"[..], &b"\\\"abcd"[..]))); + assert_eq!(esc(&b"\\n;"[..]), Ok((&b";"[..], &b"\\n"[..]))); + assert_eq!(esc(&b"ab\\\"12"[..]), Ok((&b"12"[..], &b"ab\\\""[..]))); + assert_eq!(esc(&b"AB\\"[..]), Err(Err::Error(error_position!(&b"AB\\"[..], ErrorKind::Escaped)))); + assert_eq!( + esc(&b"AB\\A"[..]), + Err(Err::Error(error_node_position!( + &b"AB\\A"[..], + ErrorKind::Escaped, + error_position!(&b"A"[..], ErrorKind::OneOf) + ))) + ); + + named!(esc2, escaped!(call!(digit), '\\', one_of!("\"n\\"))); + assert_eq!(esc2(&b"12\\nnn34"[..]), Ok((&b"nn34"[..], &b"12\\n"[..]))); + } + + #[cfg(feature = "alloc")] + #[test] + fn escaping_str() { + named!(esc<&str, &str>, escaped!(call!(alpha), '\\', one_of!("\"n\\"))); + assert_eq!(esc("abcd;"), Ok((";", "abcd"))); + assert_eq!(esc("ab\\\"cd;"), Ok((";", "ab\\\"cd"))); + assert_eq!(esc("\\\"abcd;"), Ok((";", "\\\"abcd"))); + assert_eq!(esc("\\n;"), Ok((";", "\\n"))); + assert_eq!(esc("ab\\\"12"), Ok(("12", "ab\\\""))); + assert_eq!(esc("AB\\"), Err(Err::Error(error_position!("AB\\", ErrorKind::Escaped)))); + assert_eq!( + esc("AB\\A"), + Err(Err::Error(error_node_position!( + "AB\\A", + ErrorKind::Escaped, + error_position!("A", ErrorKind::OneOf) + ))) + ); + + named!(esc2<&str, &str>, escaped!(call!(digit), '\\', one_of!("\"n\\"))); + assert_eq!(esc2("12\\nnn34"), Ok(("nn34", "12\\n"))); + + named!(esc3<&str, &str>, escaped!(call!(alpha), '\u{241b}', one_of!("\"n"))); + assert_eq!(esc3("ab␛ncd;"), Ok((";", "ab␛ncd"))); + } + + #[cfg(feature = "alloc")] + fn to_s(i: Vec) -> String { + String::from_utf8_lossy(&i).into_owned() + } + + #[cfg(feature = "alloc")] + #[test] + fn escape_transform() { + use crate::lib::std::str; + + named!( + esc, + map!( + escaped_transform!( + alpha, + '\\', + alt!( + tag!("\\") => { |_| &b"\\"[..] } + | tag!("\"") => { |_| &b"\""[..] } + | tag!("n") => { |_| &b"\n"[..] } + ) + ), + to_s + ) + ); + + assert_eq!(esc(&b"abcd;"[..]), Ok((&b";"[..], String::from("abcd")))); + assert_eq!(esc(&b"ab\\\"cd;"[..]), Ok((&b";"[..], String::from("ab\"cd")))); + assert_eq!(esc(&b"\\\"abcd;"[..]), Ok((&b";"[..], String::from("\"abcd")))); + assert_eq!(esc(&b"\\n;"[..]), Ok((&b";"[..], String::from("\n")))); + assert_eq!(esc(&b"ab\\\"12"[..]), Ok((&b"12"[..], String::from("ab\"")))); + assert_eq!(esc(&b"AB\\"[..]), Err(Err::Error(error_position!(&b"\\"[..], ErrorKind::EscapedTransform)))); + assert_eq!( + esc(&b"AB\\A"[..]), + Err(Err::Error(error_node_position!( + &b"AB\\A"[..], + ErrorKind::EscapedTransform, + error_position!(&b"A"[..], ErrorKind::Alt) + ))) + ); + + named!( + esc2, + map!( + escaped_transform!( + call!(alpha), + '&', + alt!( + tag!("egrave;") => { |_| str::as_bytes("è") } + | tag!("agrave;") => { |_| str::as_bytes("à") } + ) + ), + to_s + ) + ); + assert_eq!(esc2(&b"abèDEF;"[..]), Ok((&b";"[..], String::from("abèDEF")))); + assert_eq!(esc2(&b"abèDàEF;"[..]), Ok((&b";"[..], String::from("abèDàEF")))); + } + + #[cfg(feature = "std")] + #[test] + fn escape_transform_str() { + named!(esc<&str, String>, escaped_transform!(alpha, '\\', + alt!( + tag!("\\") => { |_| "\\" } + | tag!("\"") => { |_| "\"" } + | tag!("n") => { |_| "\n" } + )) + ); + + assert_eq!(esc("abcd;"), Ok((";", String::from("abcd")))); + assert_eq!(esc("ab\\\"cd;"), Ok((";", String::from("ab\"cd")))); + assert_eq!(esc("\\\"abcd;"), Ok((";", String::from("\"abcd")))); + assert_eq!(esc("\\n;"), Ok((";", String::from("\n")))); + assert_eq!(esc("ab\\\"12"), Ok(("12", String::from("ab\"")))); + assert_eq!(esc("AB\\"), Err(Err::Error(error_position!("\\", ErrorKind::EscapedTransform)))); + assert_eq!( + esc("AB\\A"), + Err(Err::Error(error_node_position!( + "AB\\A", + ErrorKind::EscapedTransform, + error_position!("A", ErrorKind::Alt) + ))) + ); + + named!(esc2<&str, String>, escaped_transform!(alpha, '&', + alt!( + tag!("egrave;") => { |_| "è" } + | tag!("agrave;") => { |_| "à" } + )) + ); + assert_eq!(esc2("abèDEF;"), Ok((";", String::from("abèDEF")))); + assert_eq!(esc2("abèDàEF;"), Ok((";", String::from("abèDàEF")))); + + named!(esc3<&str, String>, escaped_transform!(alpha, '␛', + alt!( + tag!("0") => { |_| "\0" } | + tag!("n") => { |_| "\n" }))); + assert_eq!(esc3("a␛0bc␛n"), Ok(("", String::from("a\0bc\n")))); + } + + #[test] + fn take_str_test() { + let a = b"omnomnom"; + + let res: IResult<_,_,(&[u8], ErrorKind)> = take_str!(&a[..], 5u32); + assert_eq!(res, Ok((&b"nom"[..], "omnom"))); + + let res: IResult<_,_,(&[u8], ErrorKind)> = take_str!(&a[..], 9u32); + assert_eq!(res, Err(Err::Incomplete(Needed::Size(9)))); + } + + #[test] + fn take_until_incomplete() { + named!(y, take_until!("end")); + assert_eq!(y(&b"nd"[..]), Err(Err::Incomplete(Needed::Size(3)))); + assert_eq!(y(&b"123"[..]), Err(Err::Incomplete(Needed::Size(3)))); + assert_eq!(y(&b"123en"[..]), Err(Err::Incomplete(Needed::Size(3)))); + } + + #[test] + fn take_until_incomplete_s() { + named!(ys<&str, &str>, take_until!("end")); + assert_eq!(ys("123en"), Err(Err::Incomplete(Needed::Size(3)))); + } + + #[test] + fn recognize() { + named!(x, recognize!(delimited!(tag!("")))); + let r = x(&b" aaa"[..]); + assert_eq!(r, Ok((&b" aaa"[..], &b""[..]))); + + let semicolon = &b";"[..]; + + named!(ya, recognize!(alpha)); + let ra = ya(&b"abc;"[..]); + assert_eq!(ra, Ok((semicolon, &b"abc"[..]))); + + named!(yd, recognize!(digit)); + let rd = yd(&b"123;"[..]); + assert_eq!(rd, Ok((semicolon, &b"123"[..]))); + + named!(yhd, recognize!(hex_digit)); + let rhd = yhd(&b"123abcDEF;"[..]); + assert_eq!(rhd, Ok((semicolon, &b"123abcDEF"[..]))); + + named!(yod, recognize!(oct_digit)); + let rod = yod(&b"1234567;"[..]); + assert_eq!(rod, Ok((semicolon, &b"1234567"[..]))); + + named!(yan, recognize!(alphanumeric)); + let ran = yan(&b"123abc;"[..]); + assert_eq!(ran, Ok((semicolon, &b"123abc"[..]))); + + named!(ys, recognize!(space)); + let rs = ys(&b" \t;"[..]); + assert_eq!(rs, Ok((semicolon, &b" \t"[..]))); + + named!(yms, recognize!(multispace)); + let rms = yms(&b" \t\r\n;"[..]); + assert_eq!(rms, Ok((semicolon, &b" \t\r\n"[..]))); + } + + #[test] + fn take_while() { + named!(f, take_while!(is_alphabetic)); + let a = b""; + let b = b"abcd"; + let c = b"abcd123"; + let d = b"123"; + + assert_eq!(f(&a[..]), Err(Err::Incomplete(Needed::Size(1)))); + assert_eq!(f(&b[..]), Err(Err::Incomplete(Needed::Size(1)))); + assert_eq!(f(&c[..]), Ok((&d[..], &b[..]))); + assert_eq!(f(&d[..]), Ok((&d[..], &a[..]))); + } + + #[test] + fn take_while1() { + named!(f, take_while1!(is_alphabetic)); + let a = b""; + let b = b"abcd"; + let c = b"abcd123"; + let d = b"123"; + + assert_eq!(f(&a[..]), Err(Err::Incomplete(Needed::Size(1)))); + assert_eq!(f(&b[..]), Err(Err::Incomplete(Needed::Size(1)))); + assert_eq!(f(&c[..]), Ok((&b"123"[..], &b[..]))); + assert_eq!(f(&d[..]), Err(Err::Error(error_position!(&d[..], ErrorKind::TakeWhile1)))); + } + + #[test] + fn take_while_m_n() { + named!(x, take_while_m_n!(2, 4, is_alphabetic)); + let a = b""; + let b = b"a"; + let c = b"abc"; + let d = b"abc123"; + let e = b"abcde"; + let f = b"123"; + + assert_eq!(x(&a[..]), Err(Err::Incomplete(Needed::Size(2)))); + assert_eq!(x(&b[..]), Err(Err::Incomplete(Needed::Size(1)))); + assert_eq!(x(&c[..]), Err(Err::Incomplete(Needed::Size(1)))); + assert_eq!(x(&d[..]), Ok((&b"123"[..], &c[..]))); + assert_eq!(x(&e[..]), Ok((&b"e"[..], &b"abcd"[..]))); + assert_eq!(x(&f[..]), Err(Err::Error(error_position!(&f[..], ErrorKind::TakeWhileMN)))); + } + + #[test] + fn take_till() { + + named!(f, take_till!(is_alphabetic)); + let a = b""; + let b = b"abcd"; + let c = b"123abcd"; + let d = b"123"; + + assert_eq!(f(&a[..]), Err(Err::Incomplete(Needed::Size(1)))); + assert_eq!(f(&b[..]), Ok((&b"abcd"[..], &b""[..]))); + assert_eq!(f(&c[..]), Ok((&b"abcd"[..], &b"123"[..]))); + assert_eq!(f(&d[..]), Err(Err::Incomplete(Needed::Size(1)))); + } + + #[test] + fn take_till1() { + + named!(f, take_till1!(is_alphabetic)); + let a = b""; + let b = b"abcd"; + let c = b"123abcd"; + let d = b"123"; + + assert_eq!(f(&a[..]), Err(Err::Incomplete(Needed::Size(1)))); + assert_eq!(f(&b[..]), Err(Err::Error(error_position!(&b[..], ErrorKind::TakeTill1)))); + assert_eq!(f(&c[..]), Ok((&b"abcd"[..], &b"123"[..]))); + assert_eq!(f(&d[..]), Err(Err::Incomplete(Needed::Size(1)))); + } + + #[test] + fn take_while_utf8() { + named!(f<&str,&str>, take_while!(|c:char| { c != '點' })); + + assert_eq!(f(""), Err(Err::Incomplete(Needed::Size(1)))); + assert_eq!(f("abcd"), Err(Err::Incomplete(Needed::Size(1)))); + assert_eq!(f("abcd點"), Ok(("點", "abcd"))); + assert_eq!(f("abcd點a"), Ok(("點a", "abcd"))); + + named!(g<&str,&str>, take_while!(|c:char| { c == '點' })); + + assert_eq!(g(""), Err(Err::Incomplete(Needed::Size(1)))); + assert_eq!(g("點abcd"), Ok(("abcd", "點"))); + assert_eq!(g("點點點a"), Ok(("a", "點點點"))); + } + + #[test] + fn take_till_utf8() { + named!(f<&str,&str>, take_till!(|c:char| { c == '點' })); + + assert_eq!(f(""), Err(Err::Incomplete(Needed::Size(1)))); + assert_eq!(f("abcd"), Err(Err::Incomplete(Needed::Size(1)))); + assert_eq!(f("abcd點"), Ok(("點", "abcd"))); + assert_eq!(f("abcd點a"), Ok(("點a", "abcd"))); + + named!(g<&str,&str>, take_till!(|c:char| { c != '點' })); + + assert_eq!(g(""), Err(Err::Incomplete(Needed::Size(1)))); + assert_eq!(g("點abcd"), Ok(("abcd", "點"))); + assert_eq!(g("點點點a"), Ok(("a", "點點點"))); + } + + #[test] + fn take_utf8() { + named!(f<&str,&str>, take!(3)); + + assert_eq!(f(""), Err(Err::Incomplete(Needed::Size(3)))); + assert_eq!(f("ab"), Err(Err::Incomplete(Needed::Size(3)))); + assert_eq!(f("點"), Err(Err::Incomplete(Needed::Size(3)))); + assert_eq!(f("ab點cd"), Ok(("cd", "ab點"))); + assert_eq!(f("a點bcd"), Ok(("cd", "a點b"))); + assert_eq!(f("a點b"), Ok(("", "a點b"))); + + named!(g<&str,&str>, take_while!(|c:char| { c == '點' })); + + assert_eq!(g(""), Err(Err::Incomplete(Needed::Size(1)))); + assert_eq!(g("點abcd"), Ok(("abcd", "點"))); + assert_eq!(g("點點點a"), Ok(("a", "點點點"))); + } + + #[test] + fn take_while_m_n_utf8() { + named!(parser<&str, &str>, take_while_m_n!(1, 1, |c| c == 'A' || c == '😃')); + assert_eq!(parser("A!"), Ok(("!", "A"))); + assert_eq!(parser("😃!"), Ok(("!", "😃"))); + } + + #[test] + fn take_while_m_n_utf8_full_match() { + named!(parser<&str, &str>, take_while_m_n!(1, 1, |c: char| c.is_alphabetic())); + assert_eq!(parser("øn"), Ok(("n", "ø"))); + } + + #[cfg(nightly)] + use test::Bencher; + + #[cfg(nightly)] + #[bench] + fn take_while_bench(b: &mut Bencher) { + + named!(f, take_while!(is_alphabetic)); + b.iter(|| f(&b"abcdefghijklABCDEejfrfrjgro12aa"[..])); + } + + #[test] + #[cfg(feature = "std")] + fn recognize_take_while() { + use crate::character::is_alphanumeric; + named!(x, take_while!(is_alphanumeric)); + named!(y, recognize!(x)); + assert_eq!(x(&b"ab."[..]), Ok((&b"."[..], &b"ab"[..]))); + println!("X: {:?}", x(&b"ab"[..])); + assert_eq!(y(&b"ab."[..]), Ok((&b"."[..], &b"ab"[..]))); + } + + #[test] + fn length_bytes() { + use crate::number::streaming::le_u8; + named!(x, length_data!(le_u8)); + assert_eq!(x(b"\x02..>>"), Ok((&b">>"[..], &b".."[..]))); + assert_eq!(x(b"\x02.."), Ok((&[][..], &b".."[..]))); + assert_eq!(x(b"\x02."), Err(Err::Incomplete(Needed::Size(2)))); + assert_eq!(x(b"\x02"), Err(Err::Incomplete(Needed::Size(2)))); + + named!(y, do_parse!(tag!("magic") >> b: length_data!(le_u8) >> (b))); + assert_eq!(y(b"magic\x02..>>"), Ok((&b">>"[..], &b".."[..]))); + assert_eq!(y(b"magic\x02.."), Ok((&[][..], &b".."[..]))); + assert_eq!(y(b"magic\x02."), Err(Err::Incomplete(Needed::Size(2)))); + assert_eq!(y(b"magic\x02"), Err(Err::Incomplete(Needed::Size(2)))); + } + + #[cfg(feature = "alloc")] + #[test] + fn case_insensitive() { + named!(test, tag_no_case!("ABcd")); + assert_eq!(test(&b"aBCdefgh"[..]), Ok((&b"efgh"[..], &b"aBCd"[..]))); + assert_eq!(test(&b"abcdefgh"[..]), Ok((&b"efgh"[..], &b"abcd"[..]))); + assert_eq!(test(&b"ABCDefgh"[..]), Ok((&b"efgh"[..], &b"ABCD"[..]))); + assert_eq!(test(&b"ab"[..]), Err(Err::Incomplete(Needed::Size(4)))); + assert_eq!(test(&b"Hello"[..]), Err(Err::Error(error_position!(&b"Hello"[..], ErrorKind::Tag)))); + assert_eq!(test(&b"Hel"[..]), Err(Err::Error(error_position!(&b"Hel"[..], ErrorKind::Tag)))); + + named!(test2<&str, &str>, tag_no_case!("ABcd")); + assert_eq!(test2("aBCdefgh"), Ok(("efgh", "aBCd"))); + assert_eq!(test2("abcdefgh"), Ok(("efgh", "abcd"))); + assert_eq!(test2("ABCDefgh"), Ok(("efgh", "ABCD"))); + assert_eq!(test2("ab"), Err(Err::Incomplete(Needed::Size(4)))); + assert_eq!(test2("Hello"), Err(Err::Error(error_position!(&"Hello"[..], ErrorKind::Tag)))); + assert_eq!(test2("Hel"), Err(Err::Error(error_position!(&"Hel"[..], ErrorKind::Tag)))); + } + + #[test] + fn tag_fixed_size_array() { + named!(test, tag!([0x42])); + named!(test2, tag!(&[0x42])); + let input = [0x42, 0x00]; + assert_eq!(test(&input), Ok((&b"\x00"[..], &b"\x42"[..]))); + assert_eq!(test2(&input), Ok((&b"\x00"[..], &b"\x42"[..]))); + } +} diff --git a/third_party/rust/nom/src/bytes/mod.rs b/third_party/rust/nom/src/bytes/mod.rs new file mode 100644 index 0000000000..13b20126f4 --- /dev/null +++ b/third_party/rust/nom/src/bytes/mod.rs @@ -0,0 +1,7 @@ +//! parsers recognizing bytes streams + +#[macro_use] +mod macros; +pub mod streaming; +pub mod complete; + diff --git a/third_party/rust/nom/src/bytes/streaming.rs b/third_party/rust/nom/src/bytes/streaming.rs new file mode 100644 index 0000000000..f6e6706888 --- /dev/null +++ b/third_party/rust/nom/src/bytes/streaming.rs @@ -0,0 +1,645 @@ +//! parsers recognizing bytes streams, streaming version + +use crate::error::ErrorKind; +use crate::error::ParseError; +use crate::internal::{Err, IResult, Needed}; +use crate::lib::std::ops::RangeFrom; +use crate::lib::std::result::Result::*; +use crate::traits::{Compare, CompareResult, FindSubstring, FindToken, InputIter, InputLength, InputTake, InputTakeAtPosition, Slice, ToUsize}; + +/// Recognizes a pattern +/// +/// The input data will be compared to the tag combinator's argument and will return the part of +/// the input that matches the argument +/// # Example +/// ```rust +/// # #[macro_use] extern crate nom; +/// # use nom::{Err, error::ErrorKind, Needed, IResult}; +/// use nom::bytes::streaming::tag; +/// +/// fn parser(s: &str) -> IResult<&str, &str> { +/// tag("Hello")(s) +/// } +/// +/// assert_eq!(parser("Hello, World!"), Ok((", World!", "Hello"))); +/// assert_eq!(parser("Something"), Err(Err::Error(("Something", ErrorKind::Tag)))); +/// assert_eq!(parser(""), Err(Err::Incomplete(Needed::Size(5)))); +/// ``` +pub fn tag<'a, T: 'a, Input: 'a, Error: ParseError>(tag: T) -> impl Fn(Input) -> IResult +where + Input: InputTake + Compare, + T: InputLength + Clone, +{ + move |i: Input| { + let tag_len = tag.input_len(); + let t = tag.clone(); + + let res: IResult<_, _, Error> = match i.compare(t) { + CompareResult::Ok => Ok(i.take_split(tag_len)), + CompareResult::Incomplete => Err(Err::Incomplete(Needed::Size(tag_len))), + CompareResult::Error => { + let e: ErrorKind = ErrorKind::Tag; + Err(Err::Error(Error::from_error_kind(i, e))) + } + }; + res + } +} + +/// Recognizes a case insensitive pattern +/// +/// The input data will be compared to the tag combinator's argument and will return the part of +/// the input that matches the argument with no regard to case +/// # Example +/// ```rust +/// # #[macro_use] extern crate nom; +/// # use nom::{Err, error::ErrorKind, Needed, IResult}; +/// use nom::bytes::streaming::tag_no_case; +/// +/// fn parser(s: &str) -> IResult<&str, &str> { +/// tag_no_case("hello")(s) +/// } +/// +/// assert_eq!(parser("Hello, World!"), Ok((", World!", "Hello"))); +/// assert_eq!(parser("hello, World!"), Ok((", World!", "hello"))); +/// assert_eq!(parser("HeLlO, World!"), Ok((", World!", "HeLlO"))); +/// assert_eq!(parser("Something"), Err(Err::Error(("Something", ErrorKind::Tag)))); +/// assert_eq!(parser(""), Err(Err::Incomplete(Needed::Size(5)))); +/// ``` +pub fn tag_no_case>(tag: T) -> impl Fn(Input) -> IResult +where + Input: InputTake + Compare, + T: InputLength + Clone, +{ + move |i: Input| { + let tag_len = tag.input_len(); + let t = tag.clone(); + + let res: IResult<_, _, Error> = match (i).compare_no_case(t) { + CompareResult::Ok => Ok(i.take_split(tag_len)), + CompareResult::Incomplete => Err(Err::Incomplete(Needed::Size(tag_len))), + CompareResult::Error => { + let e: ErrorKind = ErrorKind::Tag; + Err(Err::Error(Error::from_error_kind(i, e))) + } + }; + res + } +} + +/// Parse till certain characters are met +/// +/// The parser will return the longest slice till one of the characters of the combinator's argument are met. +/// +/// It doesn't consume the matched character, +/// +/// It will return a `Err::Incomplete(Needed::Size(1))` if the pattern wasn't met +/// # Example +/// ```rust +/// # #[macro_use] extern crate nom; +/// # use nom::{Err, error::ErrorKind, Needed, IResult}; +/// use nom::bytes::streaming::is_not; +/// +/// fn not_space(s: &str) -> IResult<&str, &str> { +/// is_not(" \t\r\n")(s) +/// } +/// +/// assert_eq!(not_space("Hello, World!"), Ok((" World!", "Hello,"))); +/// assert_eq!(not_space("Sometimes\t"), Ok(("\t", "Sometimes"))); +/// assert_eq!(not_space("Nospace"), Err(Err::Incomplete(Needed::Size(1)))); +/// assert_eq!(not_space(""), Err(Err::Incomplete(Needed::Size(1)))); +/// ``` +pub fn is_not>(arr: T) -> impl Fn(Input) -> IResult +where + Input: InputTakeAtPosition, + T: InputLength + FindToken<::Item>, +{ + move |i: Input| { + let e: ErrorKind = ErrorKind::IsNot; + i.split_at_position1(|c| arr.find_token(c), e) + } +} + +/// Returns the longest slice of the matches the pattern +/// +/// The parser will return the longest slice consisting of the characters in provided in the +/// combinator's argument +/// +/// # Streaming specific +/// *Streaming version* will return a `Err::Incomplete(Needed::Size(1))` if the pattern wasn't met +/// or if the pattern reaches the end of the input +/// # Example +/// ```rust +/// # #[macro_use] extern crate nom; +/// # use nom::{Err, error::ErrorKind, Needed, IResult}; +/// use nom::bytes::streaming::is_a; +/// +/// fn hex(s: &str) -> IResult<&str, &str> { +/// is_a("1234567890ABCDEF")(s) +/// } +/// +/// assert_eq!(hex("123 and voila"), Ok((" and voila", "123"))); +/// assert_eq!(hex("DEADBEEF and others"), Ok((" and others", "DEADBEEF"))); +/// assert_eq!(hex("BADBABEsomething"), Ok(("something", "BADBABE"))); +/// assert_eq!(hex("D15EA5E"), Err(Err::Incomplete(Needed::Size(1)))); +/// assert_eq!(hex(""), Err(Err::Incomplete(Needed::Size(1)))); +/// ``` +pub fn is_a>(arr: T) -> impl Fn(Input) -> IResult +where + Input: InputTakeAtPosition, + T: InputLength + FindToken<::Item>, +{ + move |i: Input| { + let e: ErrorKind = ErrorKind::IsA; + i.split_at_position1(|c| !arr.find_token(c), e) + } +} + +/// Returns the longest input slice (if any) that matches the predicate +/// +/// The parser will return the longest slice that matches the given predicate *(a function that +/// takes the input and returns a bool)* +/// +/// # Streaming Specific +/// *Streaming version* will return a `Err::Incomplete(Needed::Size(1))` if the pattern reaches the end of the input +/// # Example +/// ```rust +/// # #[macro_use] extern crate nom; +/// # use nom::{Err, error::ErrorKind, Needed, IResult}; +/// use nom::bytes::streaming::take_while; +/// use nom::character::is_alphabetic; +/// +/// fn alpha(s: &[u8]) -> IResult<&[u8], &[u8]> { +/// take_while(is_alphabetic)(s) +/// } +/// +/// assert_eq!(alpha(b"latin123"), Ok((&b"123"[..], &b"latin"[..]))); +/// assert_eq!(alpha(b"12345"), Ok((&b"12345"[..], &b""[..]))); +/// assert_eq!(alpha(b"latin"), Err(Err::Incomplete(Needed::Size(1)))); +/// assert_eq!(alpha(b""), Err(Err::Incomplete(Needed::Size(1)))); +/// ``` +pub fn take_while>(cond: F) -> impl Fn(Input) -> IResult +where + Input: InputTakeAtPosition, + F: Fn(::Item) -> bool, +{ + move |i: Input| i.split_at_position(|c| !cond(c)) +} + +/// Returns the longest (atleast 1) input slice that matches the predicate +/// +/// The parser will return the longest slice that matches the given predicate *(a function that +/// takes the input and returns a bool)* +/// +/// It will return an `Err(Err::Error((_, ErrorKind::TakeWhile1)))` if the pattern wasn't met +/// +/// # Streaming Specific +/// *Streaming version* will return a `Err::Incomplete(Needed::Size(1))` or if the pattern reaches the end of the input. +/// +/// # Example +/// ```rust +/// # #[macro_use] extern crate nom; +/// # use nom::{Err, error::ErrorKind, Needed, IResult}; +/// use nom::bytes::streaming::take_while1; +/// use nom::character::is_alphabetic; +/// +/// fn alpha(s: &[u8]) -> IResult<&[u8], &[u8]> { +/// take_while1(is_alphabetic)(s) +/// } +/// +/// assert_eq!(alpha(b"latin123"), Ok((&b"123"[..], &b"latin"[..]))); +/// assert_eq!(alpha(b"latin"), Err(Err::Incomplete(Needed::Size(1)))); +/// assert_eq!(alpha(b"12345"), Err(Err::Error((&b"12345"[..], ErrorKind::TakeWhile1)))); +/// ``` +pub fn take_while1>(cond: F) -> impl Fn(Input) -> IResult +where + Input: InputTakeAtPosition, + F: Fn(::Item) -> bool, +{ + move |i: Input| { + let e: ErrorKind = ErrorKind::TakeWhile1; + i.split_at_position1(|c| !cond(c), e) + } +} + +/// Returns the longest (m <= len <= n) input slice that matches the predicate +/// +/// The parser will return the longest slice that matches the given predicate *(a function that +/// takes the input and returns a bool)* +/// +/// It will return an `Err::Error((_, ErrorKind::TakeWhileMN))` if the pattern wasn't met +/// # Streaming Specific +/// *Streaming version* will return a `Err::Incomplete(Needed::Size(1))` if the pattern reaches the end of the input or is too short. +/// +/// # Example +/// ```rust +/// # #[macro_use] extern crate nom; +/// # use nom::{Err, error::ErrorKind, Needed, IResult}; +/// use nom::bytes::streaming::take_while_m_n; +/// use nom::character::is_alphabetic; +/// +/// fn short_alpha(s: &[u8]) -> IResult<&[u8], &[u8]> { +/// take_while_m_n(3, 6, is_alphabetic)(s) +/// } +/// +/// assert_eq!(short_alpha(b"latin123"), Ok((&b"123"[..], &b"latin"[..]))); +/// assert_eq!(short_alpha(b"lengthy"), Ok((&b"y"[..], &b"length"[..]))); +/// assert_eq!(short_alpha(b"latin"), Err(Err::Incomplete(Needed::Size(1)))); +/// assert_eq!(short_alpha(b"ed"), Err(Err::Incomplete(Needed::Size(1)))); +/// assert_eq!(short_alpha(b"12345"), Err(Err::Error((&b"12345"[..], ErrorKind::TakeWhileMN)))); +/// ``` +pub fn take_while_m_n>(m: usize, n: usize, cond: F) -> impl Fn(Input) -> IResult +where + Input: InputTake + InputIter + InputLength + Slice>, + F: Fn(::Item) -> bool, +{ + move |i: Input| { + let input = i; + + match input.position(|c| !cond(c)) { + Some(idx) => { + if idx >= m { + if idx <= n { + let res: IResult<_, _, Error> = if let Some(index) = input.slice_index(idx) { + Ok(input.take_split(index)) + } else { + Err(Err::Error(Error::from_error_kind(input, ErrorKind::TakeWhileMN))) + }; + res + } else { + let res: IResult<_, _, Error> = if let Some(index) = input.slice_index(n) { + Ok(input.take_split(index)) + } else { + Err(Err::Error(Error::from_error_kind(input, ErrorKind::TakeWhileMN))) + }; + res + } + } else { + let e = ErrorKind::TakeWhileMN; + Err(Err::Error(Error::from_error_kind(input, e))) + } + } + None => { + let len = input.input_len(); + if len >= n { + match input.slice_index(n) { + Some(index) => Ok(input.take_split(index)), + None => Err(Err::Error(Error::from_error_kind(input, ErrorKind::TakeWhileMN))) + } + } else { + let needed = if m > len { m - len } else { 1 }; + Err(Err::Incomplete(Needed::Size(needed))) + } + } + } + } +} + +/// Returns the longest input slice (if any) till a predicate is met +/// +/// The parser will return the longest slice till the given predicate *(a function that +/// takes the input and returns a bool)* +/// +/// # Streaming Specific +/// *Streaming version* will return a `Err::Incomplete(Needed::Size(1))` if the match reaches the +/// end of input or if there was not match +/// +/// # Example +/// ```rust +/// # #[macro_use] extern crate nom; +/// # use nom::{Err, error::ErrorKind, Needed, IResult}; +/// use nom::bytes::streaming::take_till; +/// +/// fn till_colon(s: &str) -> IResult<&str, &str> { +/// take_till(|c| c == ':')(s) +/// } +/// +/// assert_eq!(till_colon("latin:123"), Ok((":123", "latin"))); +/// assert_eq!(till_colon(":empty matched"), Ok((":empty matched", ""))); //allowed +/// assert_eq!(till_colon("12345"), Err(Err::Incomplete(Needed::Size(1)))); +/// assert_eq!(till_colon(""), Err(Err::Incomplete(Needed::Size(1)))); +/// ``` +pub fn take_till>(cond: F) -> impl Fn(Input) -> IResult +where + Input: InputTakeAtPosition, + F: Fn(::Item) -> bool, +{ + move |i: Input| i.split_at_position(|c| cond(c)) +} + +/// Returns the longest (atleast 1) input slice till a predicate is met +/// +/// The parser will return the longest slice till the given predicate *(a function that +/// takes the input and returns a bool)* +/// +/// # Streaming Specific +/// *Streaming version* will return a `Err::Incomplete(Needed::Size(1))` if the match reaches the +/// end of input or if there was not match +/// # Example +/// ```rust +/// # #[macro_use] extern crate nom; +/// # use nom::{Err, error::ErrorKind, Needed, IResult}; +/// use nom::bytes::streaming::take_till1; +/// +/// fn till_colon(s: &str) -> IResult<&str, &str> { +/// take_till1(|c| c == ':')(s) +/// } +/// +/// assert_eq!(till_colon("latin:123"), Ok((":123", "latin"))); +/// assert_eq!(till_colon(":empty matched"), Err(Err::Error((":empty matched", ErrorKind::TakeTill1)))); +/// assert_eq!(till_colon("12345"), Err(Err::Incomplete(Needed::Size(1)))); +/// assert_eq!(till_colon(""), Err(Err::Incomplete(Needed::Size(1)))); +/// ``` +pub fn take_till1>(cond: F) -> impl Fn(Input) -> IResult +where + Input: InputTakeAtPosition, + F: Fn(::Item) -> bool, +{ + move |i: Input| { + let e: ErrorKind = ErrorKind::TakeTill1; + i.split_at_position1(|c| cond(c), e) + } +} + +/// Returns an input slice containing the first N input elements (Input[..N]) +/// +/// # Streaming Specific +/// *Streaming version* will return a `Err::Incomplete(Needed::Size(N))` where N is the +/// argument if the input is less than the length provided +/// # Example +/// ```rust +/// # #[macro_use] extern crate nom; +/// # use nom::{Err, error::ErrorKind, Needed, IResult}; +/// use nom::bytes::streaming::take; +/// +/// fn take6(s: &str) -> IResult<&str, &str> { +/// take(6usize)(s) +/// } +/// +/// assert_eq!(take6("1234567"), Ok(("7", "123456"))); +/// assert_eq!(take6("things"), Ok(("", "things"))); +/// assert_eq!(take6("short"), Err(Err::Incomplete(Needed::Size(6)))); //N doesn't change +/// assert_eq!(take6(""), Err(Err::Incomplete(Needed::Size(6)))); +/// ``` +pub fn take>(count: C) -> impl Fn(Input) -> IResult +where + Input: InputIter + InputTake, + C: ToUsize, +{ + let c = count.to_usize(); + move |i: Input| match i.slice_index(c) { + None => Err(Err::Incomplete(Needed::Size(c))), + Some(index) => Ok(i.take_split(index)), + } +} + +/// Returns the longest input slice till it matches the pattern. +/// +/// It doesn't consume the pattern +/// +/// # Streaming Specific +/// *Streaming version* will return a `Err::Incomplete(Needed::Size(N))` if the input doesn't +/// contain the pattern or if the input is smaller than the pattern +/// # Example +/// ```rust +/// # #[macro_use] extern crate nom; +/// # use nom::{Err, error::ErrorKind, Needed, IResult}; +/// use nom::bytes::streaming::take_until; +/// +/// fn until_eof(s: &str) -> IResult<&str, &str> { +/// take_until("eof")(s) +/// } +/// +/// assert_eq!(until_eof("hello, worldeof"), Ok(("eof", "hello, world"))); +/// assert_eq!(until_eof("hello, world"), Err(Err::Incomplete(Needed::Size(3)))); +/// assert_eq!(until_eof(""), Err(Err::Incomplete(Needed::Size(3)))); +/// ``` +pub fn take_until>(tag: T) -> impl Fn(Input) -> IResult +where + Input: InputTake + FindSubstring, + T: InputLength + Clone, +{ + move |i: Input| { + let len = tag.input_len(); + let t = tag.clone(); + + let res: IResult<_, _, Error> = match i.find_substring(t) { + None => Err(Err::Incomplete(Needed::Size(len))), + Some(index) => Ok(i.take_split(index)), + }; + res + } +} + +/// Matches a byte string with escaped characters. +/// +/// * The first argument matches the normal characters (it must not accept the control character), +/// * the second argument is the control character (like `\` in most languages), +/// * the third argument matches the escaped characters +/// +/// # Example +/// ``` +/// # #[macro_use] extern crate nom; +/// # use nom::{Err, error::ErrorKind, Needed, IResult}; +/// # use nom::character::complete::digit1; +/// use nom::bytes::streaming::escaped; +/// use nom::character::streaming::one_of; +/// +/// fn esc(s: &str) -> IResult<&str, &str> { +/// escaped(digit1, '\\', one_of("\"n\\"))(s) +/// } +/// +/// assert_eq!(esc("123;"), Ok((";", "123"))); +/// assert_eq!(esc("12\\\"34;"), Ok((";", "12\\\"34"))); +/// ``` +/// +pub fn escaped(normal: F, control_char: char, escapable: G) -> impl Fn(Input) -> IResult +where + Input: Clone + crate::traits::Offset + InputLength + InputTake + InputTakeAtPosition + Slice> + InputIter, + ::Item: crate::traits::AsChar, + F: Fn(Input) -> IResult, + G: Fn(Input) -> IResult, + Error: ParseError, +{ + use crate::traits::AsChar; + + move |input: Input| { + let mut i = input.clone(); + + while i.input_len() > 0 { + match normal(i.clone()) { + Ok((i2, _)) => { + if i2.input_len() == 0 { + return Err(Err::Incomplete(Needed::Unknown)); + } else { + i = i2; + } + } + Err(Err::Error(_)) => { + // unwrap() should be safe here since index < $i.input_len() + if i.iter_elements().next().unwrap().as_char() == control_char { + let next = control_char.len_utf8(); + if next >= i.input_len() { + return Err(Err::Incomplete(Needed::Size(1))); + } else { + match escapable(i.slice(next..)) { + Ok((i2, _)) => { + if i2.input_len() == 0 { + return Err(Err::Incomplete(Needed::Unknown)); + } else { + i = i2; + } + } + Err(e) => return Err(e), + } + } + } else { + let index = input.offset(&i); + return Ok(input.take_split(index)); + } + } + Err(e) => { + return Err(e); + } + } + } + + Err(Err::Incomplete(Needed::Unknown)) + } +} + +#[doc(hidden)] +pub fn escapedc(i: Input, normal: F, control_char: char, escapable: G) -> IResult +where + Input: Clone + crate::traits::Offset + InputLength + InputTake + InputTakeAtPosition + Slice> + InputIter, + ::Item: crate::traits::AsChar, + F: Fn(Input) -> IResult, + G: Fn(Input) -> IResult, + Error: ParseError, +{ + escaped(normal, control_char, escapable)(i) +} + +/// Matches a byte string with escaped characters. +/// +/// * The first argument matches the normal characters (it must not match the control character), +/// * the second argument is the control character (like `\` in most languages), +/// * the third argument matches the escaped characters and transforms them. +/// +/// As an example, the chain `abc\tdef` could be `abc def` (it also consumes the control character) +/// +/// ``` +/// # #[macro_use] extern crate nom; +/// # use nom::{Err, error::ErrorKind, Needed, IResult}; +/// # use std::str::from_utf8; +/// use nom::bytes::streaming::escaped_transform; +/// use nom::character::streaming::alpha1; +/// +/// fn parser(input: &str) -> IResult<&str, String> { +/// escaped_transform( +/// alpha1, +/// '\\', +/// |i:&str| alt!(i, +/// tag!("\\") => { |_| "\\" } +/// | tag!("\"") => { |_| "\"" } +/// | tag!("n") => { |_| "\n" } +/// ) +/// )(input) +/// } +/// +/// assert_eq!(parser("ab\\\"cd\""), Ok(("\"", String::from("ab\"cd")))); +/// ``` +#[cfg(feature = "alloc")] +pub fn escaped_transform( + normal: F, + control_char: char, + transform: G, +) -> impl Fn(Input) -> IResult +where + Input: Clone + crate::traits::Offset + InputLength + InputTake + InputTakeAtPosition + Slice> + InputIter, + Input: crate::traits::ExtendInto, + O1: crate::traits::ExtendInto, + O2: crate::traits::ExtendInto, + Output: core::iter::Extend<::Item>, + Output: core::iter::Extend<::Item>, + Output: core::iter::Extend<::Item>, + ::Item: crate::traits::AsChar, + F: Fn(Input) -> IResult, + G: Fn(Input) -> IResult, + Error: ParseError, +{ + use crate::traits::AsChar; + + move |input: Input| { + let mut index = 0; + let mut res = input.new_builder(); + + let i = input.clone(); + + while index < i.input_len() { + let remainder = i.slice(index..); + match normal(remainder.clone()) { + Ok((i2, o)) => { + o.extend_into(&mut res); + if i2.input_len() == 0 { + return Err(Err::Incomplete(Needed::Unknown)); + } else { + index = input.offset(&i2); + } + } + Err(Err::Error(_)) => { + // unwrap() should be safe here since index < $i.input_len() + if remainder.iter_elements().next().unwrap().as_char() == control_char { + let next = index + control_char.len_utf8(); + let input_len = input.input_len(); + + if next >= input_len { + return Err(Err::Incomplete(Needed::Unknown)); + } else { + match transform(i.slice(next..)) { + Ok((i2, o)) => { + o.extend_into(&mut res); + if i2.input_len() == 0 { + return Err(Err::Incomplete(Needed::Unknown)); + } else { + index = input.offset(&i2); + } + } + Err(e) => return Err(e), + } + } + } else { + return Ok((remainder, res)); + } + } + Err(e) => return Err(e), + } + } + Err(Err::Incomplete(Needed::Unknown)) + } +} + +#[doc(hidden)] +#[cfg(feature = "alloc")] +pub fn escaped_transformc( + i: Input, + normal: F, + control_char: char, + transform: G, +) -> IResult +where + Input: Clone + crate::traits::Offset + InputLength + InputTake + InputTakeAtPosition + Slice> + InputIter, + Input: crate::traits::ExtendInto, + O1: crate::traits::ExtendInto, + O2: crate::traits::ExtendInto, + Output: core::iter::Extend<::Item>, + Output: core::iter::Extend<::Item>, + Output: core::iter::Extend<::Item>, + ::Item: crate::traits::AsChar, + F: Fn(Input) -> IResult, + G: Fn(Input) -> IResult, + Error: ParseError, +{ + escaped_transform(normal, control_char, transform)(i) + +} diff --git a/third_party/rust/nom/src/character.rs b/third_party/rust/nom/src/character.rs deleted file mode 100644 index 6ccd61eb51..0000000000 --- a/third_party/rust/nom/src/character.rs +++ /dev/null @@ -1,228 +0,0 @@ -/// Character level parsers - -use internal::{IResult, Needed}; -use traits::{AsChar, InputIter, InputLength, Slice}; -use lib::std::ops::RangeFrom; -use traits::{need_more, AtEof}; - -/// matches one of the provided characters -/// -/// # Example -/// ``` -/// # #[macro_use] extern crate nom; -/// # fn main() { -/// named!(simple, one_of!(&b"abc"[..])); -/// assert_eq!(simple(b"a123"), Ok((&b"123"[..], 'a'))); -/// -/// named!(a_or_b<&str, char>, one_of!("ab汉")); -/// assert_eq!(a_or_b("汉jiosfe"), Ok(("jiosfe", '汉'))); -/// # } -/// ``` -#[macro_export] -macro_rules! one_of ( - ($i:expr, $inp: expr) => ( - { - use $crate::lib::std::result::Result::*; - use $crate::lib::std::option::Option::*; - use $crate::{Err,Needed}; - - use $crate::Slice; - use $crate::AsChar; - use $crate::FindToken; - use $crate::InputIter; - - match ($i).iter_elements().next().map(|c| { - (c, $inp.find_token(c)) - }) { - None => $crate::need_more($i, Needed::Size(1)), - Some((_, false)) => Err(Err::Error(error_position!($i, $crate::ErrorKind::OneOf::))), - //the unwrap should be safe here - Some((c, true)) => Ok(( $i.slice(c.len()..), $i.iter_elements().next().unwrap().as_char() )) - } - } - ); -); - -/// matches anything but the provided characters -/// -/// # Example -/// ``` -/// # #[macro_use] extern crate nom; -/// # use nom::{Err,ErrorKind}; -/// # fn main() { -/// named!(no_letter_a, none_of!(&b"abc"[..])); -/// assert_eq!(no_letter_a(b"123"), Ok((&b"23"[..], '1'))); -/// -/// named!(err_on_single_quote, none_of!(&b"'"[..])); -/// assert_eq!(err_on_single_quote(b"'jiosfe"), Err(Err::Error(error_position!(&b"'jiosfe"[..], ErrorKind::NoneOf)))); -/// # } -/// ``` -#[macro_export] -macro_rules! none_of ( - ($i:expr, $inp: expr) => ( - { - use $crate::lib::std::result::Result::*; - use $crate::lib::std::option::Option::*; - use $crate::{Err,Needed}; - - use $crate::Slice; - use $crate::AsChar; - use $crate::FindToken; - use $crate::InputIter; - - match ($i).iter_elements().next().map(|c| { - (c, !$inp.find_token(c)) - }) { - None => $crate::need_more($i, Needed::Size(1)), - Some((_, false)) => Err(Err::Error(error_position!($i, $crate::ErrorKind::NoneOf::))), - //the unwrap should be safe here - Some((c, true)) => Ok(( $i.slice(c.len()..), $i.iter_elements().next().unwrap().as_char() )) - } - } - ); -); - -/// matches one character: `char!(char) => &[u8] -> IResult<&[u8], char> -/// -/// # Example -/// ``` -/// # #[macro_use] extern crate nom; -/// # use nom::{Err,ErrorKind}; -/// # fn main() { -/// named!(match_letter_a, char!('a')); -/// assert_eq!(match_letter_a(b"abc"), Ok((&b"bc"[..],'a'))); -/// -/// assert_eq!(match_letter_a(b"123cdef"), Err(Err::Error(error_position!(&b"123cdef"[..], ErrorKind::Char)))); -/// # } -/// ``` -#[macro_export] -macro_rules! char ( - ($i:expr, $c: expr) => ( - { - use $crate::lib::std::result::Result::*; - use $crate::lib::std::option::Option::*; - use $crate::{Err,Needed}; - - use $crate::Slice; - use $crate::AsChar; - use $crate::InputIter; - - match ($i).iter_elements().next().map(|c| { - let b = c.as_char() == $c; - b - }) { - None => $crate::need_more($i, Needed::Size(1)), - Some(false) => { - let e: $crate::ErrorKind = $crate::ErrorKind::Char; - Err(Err::Error($crate::Context::Code($i, e))) - }, - //the unwrap should be safe here - Some(true) => Ok(( $i.slice($c.len()..), $i.iter_elements().next().unwrap().as_char() )) - } - } - ); -); - -named!(#[doc="Matches a newline character '\\n'"], pub newline, char!('\n')); - -named!(#[doc="Matches a tab character '\\t'"], pub tab, char!('\t')); - -/// matches one byte as a character. Note that the input type will -/// accept a `str`, but not a `&[u8]`, unlike many other nom parsers. -/// -/// # Example -/// ``` -/// # #[macro_use] extern crate nom; -/// # use nom::anychar; -/// # fn main() { -/// assert_eq!(anychar("abc"), Ok(("bc",'a'))); -/// # } -/// ``` -pub fn anychar(input: T) -> IResult -where - T: InputIter + InputLength + Slice> + AtEof, - ::Item: AsChar, -{ - let mut it = input.iter_indices(); - match it.next() { - None => need_more(input, Needed::Size(1)), - Some((_, c)) => match it.next() { - None => Ok((input.slice(input.input_len()..), c.as_char())), - Some((idx, _)) => Ok((input.slice(idx..), c.as_char())), - }, - } -} - -#[cfg(test)] -mod tests { - use internal::Err; - use util::ErrorKind; - - #[test] - fn one_of() { - named!(f, one_of!("ab")); - - let a = &b"abcd"[..]; - assert_eq!(f(a), Ok((&b"bcd"[..], 'a'))); - - let b = &b"cde"[..]; - assert_eq!(f(b), Err(Err::Error(error_position!(b, ErrorKind::OneOf)))); - - named!(utf8(&str) -> char, - one_of!("+\u{FF0B}")); - - assert!(utf8("+").is_ok()); - assert!(utf8("\u{FF0B}").is_ok()); - } - - #[test] - fn none_of() { - named!(f, none_of!("ab")); - - let a = &b"abcd"[..]; - assert_eq!(f(a), Err(Err::Error(error_position!(a, ErrorKind::NoneOf)))); - - let b = &b"cde"[..]; - assert_eq!(f(b), Ok((&b"de"[..], 'c'))); - } - - #[test] - fn char() { - named!(f, char!('c')); - - let a = &b"abcd"[..]; - assert_eq!(f(a), Err(Err::Error(error_position!(a, ErrorKind::Char)))); - - let b = &b"cde"[..]; - assert_eq!(f(b), Ok((&b"de"[..], 'c'))); - } - - #[test] - fn char_str() { - named!(f<&str, char>, char!('c')); - - let a = &"abcd"[..]; - assert_eq!(f(a), Err(Err::Error(error_position!(a, ErrorKind::Char)))); - - let b = &"cde"[..]; - assert_eq!(f(b), Ok((&"de"[..], 'c'))); - } - - use types::CompleteStr; - #[test] - fn complete_char() { - named!(f, char!('c')); - - let a = CompleteStr("abcd"); - assert_eq!(f(a), Err(Err::Error(error_position!(a, ErrorKind::Char)))); - - let b = CompleteStr("cde"); - assert_eq!(f(b), Ok((CompleteStr("de"), 'c'))); - } - - #[test] - fn anychar_str() { - use super::anychar; - assert_eq!(anychar("Ә"), Ok(("", 'Ә'))); - } -} diff --git a/third_party/rust/nom/src/character/complete.rs b/third_party/rust/nom/src/character/complete.rs new file mode 100644 index 0000000000..beddc3fa43 --- /dev/null +++ b/third_party/rust/nom/src/character/complete.rs @@ -0,0 +1,1082 @@ +//! Character specific parsers and combinators, complete input version. +//! +//! Functions recognizing specific characters. + +use crate::internal::{Err, IResult}; +use crate::error::ParseError; +use crate::lib::std::ops::{Range, RangeFrom, RangeTo}; +use crate::traits::{AsChar, FindToken, InputIter, InputLength, InputTakeAtPosition, Slice}; +use crate::traits::{Compare, CompareResult}; +use crate::error::ErrorKind; + +/// Recognizes one character. +/// +/// *complete version*: Will return an error if there's not enough input data. +/// +/// # Example +/// +/// ``` +/// # use nom::{Err, error::ErrorKind}; +/// # use nom::character::complete::char; +/// # fn main() { +/// assert_eq!(char::<_, (&str, ErrorKind)>('a')("abc"), Ok(("bc", 'a'))); +/// assert_eq!(char::<_, (&str, ErrorKind)>('a')("bc"), Err(Err::Error(("bc", ErrorKind::Char)))); +/// assert_eq!(char::<_, (&str, ErrorKind)>('a')(""), Err(Err::Error(("", ErrorKind::Char)))); +/// # } +/// ``` +pub fn char>(c: char) -> impl Fn(I) -> IResult +where + I: Slice> + InputIter, + ::Item: AsChar, +{ + move |i: I| match (i).iter_elements().next().map(|t| { + let b = t.as_char() == c; + (&c, b) + }) { + Some((c, true)) => Ok((i.slice(c.len()..), c.as_char())), + _ => Err(Err::Error(Error::from_char(i, c))), + } +} + +/// Recognizes one of the provided characters. +/// +/// *complete version*: Will return an error if there's not enough input data. +/// +/// # Example +/// +/// ``` +/// # use nom::{Err, error::ErrorKind}; +/// # use nom::character::complete::one_of; +/// # fn main() { +/// assert_eq!(one_of::<_, _, (&str, ErrorKind)>("abc")("b"), Ok(("", 'b'))); +/// assert_eq!(one_of::<_, _, (&str, ErrorKind)>("a")("bc"), Err(Err::Error(("bc", ErrorKind::OneOf)))); +/// assert_eq!(one_of::<_, _, (&str, ErrorKind)>("a")(""), Err(Err::Error(("", ErrorKind::OneOf)))); +/// # } +/// ``` +pub fn one_of>(list: T) -> impl Fn(I) -> IResult +where + I: Slice> + InputIter, + ::Item: AsChar + Copy, + T: FindToken<::Item>, +{ + move |i: I| match (i).iter_elements().next().map(|c| (c, list.find_token(c))) { + Some((c, true)) => Ok((i.slice(c.len()..), c.as_char())), + _ => Err(Err::Error(Error::from_error_kind(i, ErrorKind::OneOf))), + } +} + +/// Recognizes a character that is not in the provided characters. +/// +/// *complete version*: Will return an error if there's not enough input data. +/// +/// # Example +/// +/// ``` +/// # use nom::{Err, error::ErrorKind}; +/// # use nom::character::complete::none_of; +/// # fn main() { +/// assert_eq!(none_of::<_, _, (&str, ErrorKind)>("abc")("z"), Ok(("", 'z'))); +/// assert_eq!(none_of::<_, _, (&str, ErrorKind)>("ab")("a"), Err(Err::Error(("a", ErrorKind::NoneOf)))); +/// assert_eq!(none_of::<_, _, (&str, ErrorKind)>("a")(""), Err(Err::Error(("", ErrorKind::NoneOf)))); +/// # } +/// ``` +pub fn none_of>(list: T) -> impl Fn(I) -> IResult +where + I: Slice> + InputIter, + ::Item: AsChar + Copy, + T: FindToken<::Item>, +{ + move |i: I| match (i).iter_elements().next().map(|c| (c, !list.find_token(c))) { + Some((c, true)) => Ok((i.slice(c.len()..), c.as_char())), + _ => Err(Err::Error(Error::from_error_kind(i, ErrorKind::NoneOf))), + } +} + +/// Recognizes the string "\r\n". +/// +/// *complete version*: Will return an error if there's not enough input data. +/// +/// # Example +/// +/// ``` +/// # use nom::{Err, error::ErrorKind, IResult}; +/// # use nom::character::complete::crlf; +/// # fn main() { +/// fn parser(input: &str) -> IResult<&str, &str> { +/// crlf(input) +/// } +/// +/// assert_eq!(parser("\r\nc"), Ok(("c", "\r\n"))); +/// assert_eq!(parser("ab\r\nc"), Err(Err::Error(("ab\r\nc", ErrorKind::CrLf)))); +/// assert_eq!(parser(""), Err(Err::Error(("", ErrorKind::CrLf)))); +/// # } +/// ``` +pub fn crlf>(input: T) -> IResult +where + T: Slice> + Slice> + Slice>, + T: InputIter, + T: Compare<&'static str>, +{ + match input.compare("\r\n") { + //FIXME: is this the right index? + CompareResult::Ok => Ok((input.slice(2..), input.slice(0..2))), + _ => { + let e: ErrorKind = ErrorKind::CrLf; + Err(Err::Error(E::from_error_kind(input, e))) + } + } +} + +//FIXME: there's still an incomplete +/// Recognizes a string of any char except '\r' or '\n'. +/// +/// *complete version*: Will return an error if there's not enough input data. +/// +/// # Example +/// +/// ``` +/// # use nom::{Err, error::ErrorKind, IResult, Needed}; +/// # use nom::character::complete::not_line_ending; +/// # fn main() { +/// fn parser(input: &str) -> IResult<&str, &str> { +/// not_line_ending(input) +/// } +/// +/// assert_eq!(parser("ab\r\nc"), Ok(("\r\nc", "ab"))); +/// assert_eq!(parser("abc"), Ok(("", "abc"))); +/// assert_eq!(parser(""), Ok(("", ""))); +/// # } +/// ``` +pub fn not_line_ending>(input: T) -> IResult +where + T: Slice> + Slice> + Slice>, + T: InputIter + InputLength, + T: Compare<&'static str>, + ::Item: AsChar, + ::Item: AsChar, +{ + match input.position(|item| { + let c = item.as_char(); + c == '\r' || c == '\n' + }) { + None => { + Ok((input.slice(input.input_len()..), input)) + } + Some(index) => { + let mut it = input.slice(index..).iter_elements(); + let nth = it.next().unwrap().as_char(); + if nth == '\r' { + let sliced = input.slice(index..); + let comp = sliced.compare("\r\n"); + match comp { + //FIXME: calculate the right index + CompareResult::Ok => Ok((input.slice(index..), input.slice(..index))), + _ => { + let e: ErrorKind = ErrorKind::Tag; + Err(Err::Error(E::from_error_kind(input, e))) + } + } + } else { + Ok((input.slice(index..), input.slice(..index))) + } + } + } +} + +/// Recognizes an end of line (both '\n' and '\r\n'). +/// +/// *complete version*: Will return an error if there's not enough input data. +/// +/// # Example +/// +/// ``` +/// # use nom::{Err, error::ErrorKind, IResult, Needed}; +/// # use nom::character::complete::line_ending; +/// # fn main() { +/// fn parser(input: &str) -> IResult<&str, &str> { +/// line_ending(input) +/// } +/// +/// assert_eq!(parser("\r\nc"), Ok(("c", "\r\n"))); +/// assert_eq!(parser("ab\r\nc"), Err(Err::Error(("ab\r\nc", ErrorKind::CrLf)))); +/// assert_eq!(parser(""), Err(Err::Error(("", ErrorKind::CrLf)))); +/// # } +/// ``` +pub fn line_ending>(input: T) -> IResult +where + T: Slice> + Slice> + Slice>, + T: InputIter + InputLength, + T: Compare<&'static str>, +{ + match input.compare("\n") { + CompareResult::Ok => Ok((input.slice(1..), input.slice(0..1))), + CompareResult::Incomplete => Err(Err::Error(E::from_error_kind(input, ErrorKind::CrLf))), + CompareResult::Error => { + match input.compare("\r\n") { + //FIXME: is this the right index? + CompareResult::Ok => Ok((input.slice(2..), input.slice(0..2))), + _ => Err(Err::Error(E::from_error_kind(input, ErrorKind::CrLf))), + } + } + } +} + +/// Matches a newline character '\n'. +/// +/// *complete version*: Will return an error if there's not enough input data. +/// +/// # Example +/// +/// ``` +/// # use nom::{Err, error::ErrorKind, IResult, Needed}; +/// # use nom::character::complete::newline; +/// # fn main() { +/// fn parser(input: &str) -> IResult<&str, char> { +/// newline(input) +/// } +/// +/// assert_eq!(parser("\nc"), Ok(("c", '\n'))); +/// assert_eq!(parser("\r\nc"), Err(Err::Error(("\r\nc", ErrorKind::Char)))); +/// assert_eq!(parser(""), Err(Err::Error(("", ErrorKind::Char)))); +/// # } +/// ``` +pub fn newline>(input: I) -> IResult +where + I: Slice> + InputIter, + ::Item: AsChar, +{ + char('\n')(input) +} + +/// Matches a tab character '\t'. +/// +/// *complete version*: Will return an error if there's not enough input data. +/// +/// # Example +/// +/// ``` +/// # use nom::{Err, error::ErrorKind, IResult, Needed}; +/// # use nom::character::complete::tab; +/// # fn main() { +/// fn parser(input: &str) -> IResult<&str, char> { +/// tab(input) +/// } +/// +/// assert_eq!(parser("\tc"), Ok(("c", '\t'))); +/// assert_eq!(parser("\r\nc"), Err(Err::Error(("\r\nc", ErrorKind::Char)))); +/// assert_eq!(parser(""), Err(Err::Error(("", ErrorKind::Char)))); +/// # } +/// ``` +pub fn tab>(input: I) -> IResult +where + I: Slice> + InputIter, + ::Item: AsChar, +{ + char('\t')(input) +} + +/// Matches one byte as a character. Note that the input type will +/// accept a `str`, but not a `&[u8]`, unlike many other nom parsers. +/// +/// *complete version*: Will return an error if there's not enough input data. +/// +/// # Example +/// +/// ``` +/// # use nom::{character::complete::anychar, Err, error::ErrorKind, IResult}; +/// # fn main() { +/// fn parser(input: &str) -> IResult<&str, char> { +/// anychar(input) +/// } +/// +/// assert_eq!(parser("abc"), Ok(("bc",'a'))); +/// assert_eq!(parser(""), Err(Err::Error(("", ErrorKind::Eof)))); +/// # } +/// ``` +pub fn anychar>(input: T) -> IResult +where + T: InputIter + InputLength + Slice>, + ::Item: AsChar, +{ + let mut it = input.iter_indices(); + match it.next() { + None => Err(Err::Error(E::from_error_kind(input, ErrorKind::Eof))), + Some((_, c)) => match it.next() { + None => Ok((input.slice(input.input_len()..), c.as_char())), + Some((idx, _)) => Ok((input.slice(idx..), c.as_char())), + }, + } +} + +/// Recognizes zero or more lowercase and uppercase ASCII alphabetic characters: a-z, A-Z +/// +/// *complete version*: Will return the whole input if no terminating token is found (a non +/// alphabetic character). +/// +/// # Example +/// +/// ``` +/// # use nom::{Err, error::ErrorKind, IResult, Needed}; +/// # use nom::character::complete::alpha0; +/// # fn main() { +/// fn parser(input: &str) -> IResult<&str, &str> { +/// alpha0(input) +/// } +/// +/// assert_eq!(parser("ab1c"), Ok(("1c", "ab"))); +/// assert_eq!(parser("1c"), Ok(("1c", ""))); +/// assert_eq!(parser(""), Ok(("", ""))); +/// # } +/// ``` +pub fn alpha0>(input: T) -> IResult +where + T: InputTakeAtPosition, + ::Item: AsChar, +{ + input.split_at_position_complete(|item| !item.is_alpha()) +} + +/// Recognizes one or more lowercase and uppercase ASCII alphabetic characters: a-z, A-Z +/// +/// *complete version*: Will return an error if there's not enough input data, +/// or the whole input if no terminating token is found (a non alphabetic character). +/// +/// # Example +/// +/// ``` +/// # use nom::{Err, error::ErrorKind, IResult, Needed}; +/// # use nom::character::complete::alpha1; +/// # fn main() { +/// fn parser(input: &str) -> IResult<&str, &str> { +/// alpha1(input) +/// } +/// +/// assert_eq!(parser("aB1c"), Ok(("1c", "aB"))); +/// assert_eq!(parser("1c"), Err(Err::Error(("1c", ErrorKind::Alpha)))); +/// assert_eq!(parser(""), Err(Err::Error(("", ErrorKind::Alpha)))); +/// # } +/// ``` +pub fn alpha1>(input: T) -> IResult +where + T: InputTakeAtPosition, + ::Item: AsChar, +{ + input.split_at_position1_complete(|item| !item.is_alpha(), ErrorKind::Alpha) +} + +/// Recognizes zero or more ASCII numerical characters: 0-9 +/// +/// *complete version*: Will return an error if there's not enough input data, +/// or the whole input if no terminating token is found (a non digit character). +/// +/// # Example +/// +/// ``` +/// # use nom::{Err, error::ErrorKind, IResult, Needed}; +/// # use nom::character::complete::digit0; +/// # fn main() { +/// fn parser(input: &str) -> IResult<&str, &str> { +/// digit0(input) +/// } +/// +/// assert_eq!(parser("21c"), Ok(("c", "21"))); +/// assert_eq!(parser("21"), Ok(("", "21"))); +/// assert_eq!(parser("a21c"), Ok(("a21c", ""))); +/// assert_eq!(parser(""), Ok(("", ""))); +/// # } +/// ``` +pub fn digit0>(input: T) -> IResult +where + T: InputTakeAtPosition, + ::Item: AsChar, +{ + input.split_at_position_complete(|item| !item.is_dec_digit()) +} + +/// Recognizes one or more ASCII numerical characters: 0-9 +/// +/// *complete version*: Will return an error if there's not enough input data, +/// or the whole input if no terminating token is found (a non digit character). +/// +/// # Example +/// +/// ``` +/// # use nom::{Err, error::ErrorKind, IResult, Needed}; +/// # use nom::character::complete::digit1; +/// # fn main() { +/// fn parser(input: &str) -> IResult<&str, &str> { +/// digit1(input) +/// } +/// +/// assert_eq!(parser("21c"), Ok(("c", "21"))); +/// assert_eq!(parser("c1"), Err(Err::Error(("c1", ErrorKind::Digit)))); +/// assert_eq!(parser(""), Err(Err::Error(("", ErrorKind::Digit)))); +/// # } +/// ``` +pub fn digit1>(input: T) -> IResult +where + T: InputTakeAtPosition, + ::Item: AsChar, +{ + input.split_at_position1_complete(|item| !item.is_dec_digit(), ErrorKind::Digit) +} + +/// Recognizes zero or more ASCII hexadecimal numerical characters: 0-9, A-F, a-f +/// +/// *complete version*: Will return the whole input if no terminating token is found (a non hexadecimal digit character). +/// +/// # Example +/// +/// ``` +/// # use nom::{Err, error::ErrorKind, IResult, Needed}; +/// # use nom::character::complete::hex_digit0; +/// # fn main() { +/// fn parser(input: &str) -> IResult<&str, &str> { +/// hex_digit0(input) +/// } +/// +/// assert_eq!(parser("21cZ"), Ok(("Z", "21c"))); +/// assert_eq!(parser("Z21c"), Ok(("Z21c", ""))); +/// assert_eq!(parser(""), Ok(("", ""))); +/// # } +/// ``` +pub fn hex_digit0>(input: T) -> IResult +where + T: InputTakeAtPosition, + ::Item: AsChar, +{ + input.split_at_position_complete(|item| !item.is_hex_digit()) +} +/// Recognizes one or more ASCII hexadecimal numerical characters: 0-9, A-F, a-f +/// +/// *complete version*: Will return an error if there's not enough input data, +/// or the whole input if no terminating token is found (a non hexadecimal digit character). +/// +/// # Example +/// +/// ``` +/// # use nom::{Err, error::ErrorKind, IResult, Needed}; +/// # use nom::character::complete::hex_digit1; +/// # fn main() { +/// fn parser(input: &str) -> IResult<&str, &str> { +/// hex_digit1(input) +/// } +/// +/// assert_eq!(parser("21cZ"), Ok(("Z", "21c"))); +/// assert_eq!(parser("H2"), Err(Err::Error(("H2", ErrorKind::HexDigit)))); +/// assert_eq!(parser(""), Err(Err::Error(("", ErrorKind::HexDigit)))); +/// # } +/// ``` +pub fn hex_digit1>(input: T) -> IResult +where + T: InputTakeAtPosition, + ::Item: AsChar, +{ + input.split_at_position1_complete(|item| !item.is_hex_digit(), ErrorKind::HexDigit) +} + +/// Recognizes zero or more octal characters: 0-7 +/// +/// *complete version*: Will return the whole input if no terminating token is found (a non octal +/// digit character). +/// +/// # Example +/// +/// ``` +/// # use nom::{Err, error::ErrorKind, IResult, Needed}; +/// # use nom::character::complete::oct_digit0; +/// # fn main() { +/// fn parser(input: &str) -> IResult<&str, &str> { +/// oct_digit0(input) +/// } +/// +/// assert_eq!(parser("21cZ"), Ok(("cZ", "21"))); +/// assert_eq!(parser("Z21c"), Ok(("Z21c", ""))); +/// assert_eq!(parser(""), Ok(("", ""))); +/// # } +/// ``` +pub fn oct_digit0>(input: T) -> IResult +where + T: InputTakeAtPosition, + ::Item: AsChar, +{ + input.split_at_position_complete(|item| !item.is_oct_digit()) +} + +/// Recognizes one or more octal characters: 0-7 +/// +/// *complete version*: Will return an error if there's not enough input data, +/// or the whole input if no terminating token is found (a non octal digit character). +/// +/// # Example +/// +/// ``` +/// # use nom::{Err, error::ErrorKind, IResult, Needed}; +/// # use nom::character::complete::oct_digit1; +/// # fn main() { +/// fn parser(input: &str) -> IResult<&str, &str> { +/// oct_digit1(input) +/// } +/// +/// assert_eq!(parser("21cZ"), Ok(("cZ", "21"))); +/// assert_eq!(parser("H2"), Err(Err::Error(("H2", ErrorKind::OctDigit)))); +/// assert_eq!(parser(""), Err(Err::Error(("", ErrorKind::OctDigit)))); +/// # } +/// ``` +pub fn oct_digit1>(input: T) -> IResult +where + T: InputTakeAtPosition, + ::Item: AsChar, +{ + input.split_at_position1_complete(|item| !item.is_oct_digit(), ErrorKind::OctDigit) +} + +/// Recognizes zero or more ASCII numerical and alphabetic characters: 0-9, a-z, A-Z +/// +/// *complete version*: Will return the whole input if no terminating token is found (a non +/// alphanumerical character). +/// +/// # Example +/// +/// ``` +/// # use nom::{Err, error::ErrorKind, IResult, Needed}; +/// # use nom::character::complete::alphanumeric0; +/// # fn main() { +/// fn parser(input: &str) -> IResult<&str, &str> { +/// alphanumeric0(input) +/// } +/// +/// assert_eq!(parser("21cZ%1"), Ok(("%1", "21cZ"))); +/// assert_eq!(parser("&Z21c"), Ok(("&Z21c", ""))); +/// assert_eq!(parser(""), Ok(("", ""))); +/// # } +/// ``` +pub fn alphanumeric0>(input: T) -> IResult +where + T: InputTakeAtPosition, + ::Item: AsChar, +{ + input.split_at_position_complete(|item| !item.is_alphanum()) +} + +/// Recognizes one or more ASCII numerical and alphabetic characters: 0-9, a-z, A-Z +/// +/// *complete version*: Will return an error if there's not enough input data, +/// or the whole input if no terminating token is found (a non alphanumerical character). +/// +/// # Example +/// +/// ``` +/// # use nom::{Err, error::ErrorKind, IResult, Needed}; +/// # use nom::character::complete::alphanumeric1; +/// # fn main() { +/// fn parser(input: &str) -> IResult<&str, &str> { +/// alphanumeric1(input) +/// } +/// +/// assert_eq!(parser("21cZ%1"), Ok(("%1", "21cZ"))); +/// assert_eq!(parser("&H2"), Err(Err::Error(("&H2", ErrorKind::AlphaNumeric)))); +/// assert_eq!(parser(""), Err(Err::Error(("", ErrorKind::AlphaNumeric)))); +/// # } +/// ``` +pub fn alphanumeric1>(input: T) -> IResult +where + T: InputTakeAtPosition, + ::Item: AsChar, +{ + input.split_at_position1_complete(|item| !item.is_alphanum(), ErrorKind::AlphaNumeric) +} + +/// Recognizes zero or more spaces and tabs. +/// +/// *complete version*: Will return the whole input if no terminating token is found (a non space +/// character). +/// +/// # Example +/// +/// ``` +/// # use nom::{Err, error::ErrorKind, IResult, Needed}; +/// # use nom::character::complete::space0; +/// # fn main() { +/// fn parser(input: &str) -> IResult<&str, &str> { +/// space0(input) +/// } +/// +/// assert_eq!(parser(" \t21c"), Ok(("21c", " \t"))); +/// assert_eq!(parser("Z21c"), Ok(("Z21c", ""))); +/// assert_eq!(parser(""), Ok(("", ""))); +/// # } +/// ``` +pub fn space0>(input: T) -> IResult +where + T: InputTakeAtPosition, + ::Item: AsChar + Clone, +{ + input.split_at_position_complete(|item| { + let c = item.clone().as_char(); + !(c == ' ' || c == '\t') + }) +} + +/// Recognizes one or more spaces and tabs. +/// +/// *complete version*: Will return an error if there's not enough input data, +/// or the whole input if no terminating token is found (a non space character). +/// +/// # Example +/// +/// ``` +/// # use nom::{Err, error::ErrorKind, IResult, Needed}; +/// # use nom::character::complete::space1; +/// # fn main() { +/// fn parser(input: &str) -> IResult<&str, &str> { +/// space1(input) +/// } +/// +/// assert_eq!(parser(" \t21c"), Ok(("21c", " \t"))); +/// assert_eq!(parser("H2"), Err(Err::Error(("H2", ErrorKind::Space)))); +/// assert_eq!(parser(""), Err(Err::Error(("", ErrorKind::Space)))); +/// # } +/// ``` +pub fn space1>(input: T) -> IResult +where + T: InputTakeAtPosition, + ::Item: AsChar + Clone, +{ + input.split_at_position1_complete( + |item| { + let c = item.clone().as_char(); + !(c == ' ' || c == '\t') + }, + ErrorKind::Space, + ) +} + +/// Recognizes zero or more spaces, tabs, carriage returns and line feeds. +/// +/// *complete version*: will return the whole input if no terminating token is found (a non space +/// character). +/// +/// # Example +/// +/// ``` +/// # use nom::{Err, error::ErrorKind, IResult, Needed}; +/// # use nom::character::complete::multispace0; +/// # fn main() { +/// fn parser(input: &str) -> IResult<&str, &str> { +/// multispace0(input) +/// } +/// +/// assert_eq!(parser(" \t\n\r21c"), Ok(("21c", " \t\n\r"))); +/// assert_eq!(parser("Z21c"), Ok(("Z21c", ""))); +/// assert_eq!(parser(""), Ok(("", ""))); +/// # } +/// ``` +pub fn multispace0>(input: T) -> IResult +where + T: InputTakeAtPosition, + ::Item: AsChar + Clone, +{ + input.split_at_position_complete(|item| { + let c = item.clone().as_char(); + !(c == ' ' || c == '\t' || c == '\r' || c == '\n') + }) +} + +/// Recognizes one or more spaces, tabs, carriage returns and line feeds. +/// +/// *complete version*: will return an error if there's not enough input data, +/// or the whole input if no terminating token is found (a non space character). +/// +/// # Example +/// +/// ``` +/// # use nom::{Err, error::ErrorKind, IResult, Needed}; +/// # use nom::character::complete::multispace1; +/// # fn main() { +/// fn parser(input: &str) -> IResult<&str, &str> { +/// multispace1(input) +/// } +/// +/// assert_eq!(parser(" \t\n\r21c"), Ok(("21c", " \t\n\r"))); +/// assert_eq!(parser("H2"), Err(Err::Error(("H2", ErrorKind::MultiSpace)))); +/// assert_eq!(parser(""), Err(Err::Error(("", ErrorKind::MultiSpace)))); +/// # } +/// ``` +pub fn multispace1>(input: T) -> IResult +where + T: InputTakeAtPosition, + ::Item: AsChar + Clone, +{ + input.split_at_position1_complete( + |item| { + let c = item.clone().as_char(); + !(c == ' ' || c == '\t' || c == '\r' || c == '\n') + }, + ErrorKind::MultiSpace, + ) +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::internal::Err; + + macro_rules! assert_parse( + ($left: expr, $right: expr) => { + let res: $crate::IResult<_, _, (_, ErrorKind)> = $left; + assert_eq!(res, $right); + }; + ); + + + #[test] + fn character() { + let empty: &[u8] = b""; + let a: &[u8] = b"abcd"; + let b: &[u8] = b"1234"; + let c: &[u8] = b"a123"; + let d: &[u8] = "azé12".as_bytes(); + let e: &[u8] = b" "; + let f: &[u8] = b" ;"; + //assert_eq!(alpha1::<_, (_, ErrorKind)>(a), Err(Err::Incomplete(Needed::Size(1)))); + assert_parse!(alpha1(a), Ok((empty, a))); + assert_eq!( + alpha1(b), + Err(Err::Error((b, ErrorKind::Alpha))) + ); + assert_eq!(alpha1::<_, (_, ErrorKind)>(c), Ok((&c[1..], &b"a"[..]))); + assert_eq!(alpha1::<_, (_, ErrorKind)>(d), Ok(("é12".as_bytes(), &b"az"[..]))); + assert_eq!( + digit1(a), + Err(Err::Error((a, ErrorKind::Digit))) + ); + assert_eq!(digit1::<_, (_, ErrorKind)>(b), Ok((empty, b))); + assert_eq!( + digit1(c), + Err(Err::Error((c, ErrorKind::Digit))) + ); + assert_eq!( + digit1(d), + Err(Err::Error((d, ErrorKind::Digit))) + ); + assert_eq!(hex_digit1::<_, (_, ErrorKind)>(a), Ok((empty, a))); + assert_eq!(hex_digit1::<_, (_, ErrorKind)>(b), Ok((empty, b))); + assert_eq!(hex_digit1::<_, (_, ErrorKind)>(c), Ok((empty, c))); + assert_eq!(hex_digit1::<_, (_, ErrorKind)>(d), Ok(("zé12".as_bytes(), &b"a"[..]))); + assert_eq!( + hex_digit1(e), + Err(Err::Error((e, ErrorKind::HexDigit))) + ); + assert_eq!( + oct_digit1(a), + Err(Err::Error((a, ErrorKind::OctDigit))) + ); + assert_eq!(oct_digit1::<_, (_, ErrorKind)>(b), Ok((empty, b))); + assert_eq!( + oct_digit1(c), + Err(Err::Error((c, ErrorKind::OctDigit))) + ); + assert_eq!( + oct_digit1(d), + Err(Err::Error((d, ErrorKind::OctDigit))) + ); + assert_eq!(alphanumeric1::<_, (_, ErrorKind)>(a), Ok((empty, a))); + //assert_eq!(fix_error!(b,(), alphanumeric), Ok((empty, b))); + assert_eq!(alphanumeric1::<_, (_, ErrorKind)>(c), Ok((empty, c))); + assert_eq!(alphanumeric1::<_, (_, ErrorKind)>(d), Ok(("é12".as_bytes(), &b"az"[..]))); + assert_eq!(space1::<_, (_, ErrorKind)>(e), Ok((empty, e))); + assert_eq!(space1::<_, (_, ErrorKind)>(f), Ok((&b";"[..], &b" "[..]))); + } + + #[cfg(feature = "alloc")] + #[test] + fn character_s() { + let empty = ""; + let a = "abcd"; + let b = "1234"; + let c = "a123"; + let d = "azé12"; + let e = " "; + assert_eq!(alpha1::<_, (_, ErrorKind)>(a), Ok((empty, a))); + assert_eq!( + alpha1(b), + Err(Err::Error((b, ErrorKind::Alpha))) + ); + assert_eq!(alpha1::<_, (_, ErrorKind)>(c), Ok((&c[1..], &"a"[..]))); + assert_eq!(alpha1::<_, (_, ErrorKind)>(d), Ok(("é12", &"az"[..]))); + assert_eq!( + digit1(a), + Err(Err::Error((a, ErrorKind::Digit))) + ); + assert_eq!(digit1::<_, (_, ErrorKind)>(b), Ok((empty, b))); + assert_eq!( + digit1(c), + Err(Err::Error((c, ErrorKind::Digit))) + ); + assert_eq!( + digit1(d), + Err(Err::Error((d, ErrorKind::Digit))) + ); + assert_eq!(hex_digit1::<_, (_, ErrorKind)>(a), Ok((empty, a))); + assert_eq!(hex_digit1::<_, (_, ErrorKind)>(b), Ok((empty, b))); + assert_eq!(hex_digit1::<_, (_, ErrorKind)>(c), Ok((empty, c))); + assert_eq!(hex_digit1::<_, (_, ErrorKind)>(d), Ok(("zé12", &"a"[..]))); + assert_eq!( + hex_digit1(e), + Err(Err::Error((e, ErrorKind::HexDigit))) + ); + assert_eq!( + oct_digit1(a), + Err(Err::Error((a, ErrorKind::OctDigit))) + ); + assert_eq!(oct_digit1::<_, (_, ErrorKind)>(b), Ok((empty, b))); + assert_eq!( + oct_digit1(c), + Err(Err::Error((c, ErrorKind::OctDigit))) + ); + assert_eq!( + oct_digit1(d), + Err(Err::Error((d, ErrorKind::OctDigit))) + ); + assert_eq!(alphanumeric1::<_, (_, ErrorKind)>(a), Ok((empty, a))); + //assert_eq!(fix_error!(b,(), alphanumeric), Ok((empty, b))); + assert_eq!(alphanumeric1::<_, (_, ErrorKind)>(c), Ok((empty, c))); + assert_eq!(alphanumeric1::<_, (_, ErrorKind)>(d), Ok(("é12", "az"))); + assert_eq!(space1::<_, (_, ErrorKind)>(e), Ok((empty, e))); + } + + use crate::traits::Offset; + #[test] + fn offset() { + let a = &b"abcd;"[..]; + let b = &b"1234;"[..]; + let c = &b"a123;"[..]; + let d = &b" \t;"[..]; + let e = &b" \t\r\n;"[..]; + let f = &b"123abcDEF;"[..]; + + match alpha1::<_, (_, ErrorKind)>(a) { + Ok((i, _)) => { + assert_eq!(a.offset(i) + i.len(), a.len()); + } + _ => panic!("wrong return type in offset test for alpha"), + } + match digit1::<_, (_, ErrorKind)>(b) { + Ok((i, _)) => { + assert_eq!(b.offset(i) + i.len(), b.len()); + } + _ => panic!("wrong return type in offset test for digit"), + } + match alphanumeric1::<_, (_, ErrorKind)>(c) { + Ok((i, _)) => { + assert_eq!(c.offset(i) + i.len(), c.len()); + } + _ => panic!("wrong return type in offset test for alphanumeric"), + } + match space1::<_, (_, ErrorKind)>(d) { + Ok((i, _)) => { + assert_eq!(d.offset(i) + i.len(), d.len()); + } + _ => panic!("wrong return type in offset test for space"), + } + match multispace1::<_, (_, ErrorKind)>(e) { + Ok((i, _)) => { + assert_eq!(e.offset(i) + i.len(), e.len()); + } + _ => panic!("wrong return type in offset test for multispace"), + } + match hex_digit1::<_, (_, ErrorKind)>(f) { + Ok((i, _)) => { + assert_eq!(f.offset(i) + i.len(), f.len()); + } + _ => panic!("wrong return type in offset test for hex_digit"), + } + match oct_digit1::<_, (_, ErrorKind)>(f) { + Ok((i, _)) => { + assert_eq!(f.offset(i) + i.len(), f.len()); + } + _ => panic!("wrong return type in offset test for oct_digit"), + } + } + + #[test] + fn is_not_line_ending_bytes() { + let a: &[u8] = b"ab12cd\nefgh"; + assert_eq!(not_line_ending::<_, (_, ErrorKind)>(a), Ok((&b"\nefgh"[..], &b"ab12cd"[..]))); + + let b: &[u8] = b"ab12cd\nefgh\nijkl"; + assert_eq!( + not_line_ending::<_, (_, ErrorKind)>(b), + Ok((&b"\nefgh\nijkl"[..], &b"ab12cd"[..])) + ); + + let c: &[u8] = b"ab12cd\r\nefgh\nijkl"; + assert_eq!( + not_line_ending::<_, (_, ErrorKind)>(c), + Ok((&b"\r\nefgh\nijkl"[..], &b"ab12cd"[..])) + ); + + let d: &[u8] = b"ab12cd"; + assert_eq!(not_line_ending::<_, (_, ErrorKind)>(d), Ok((&[][..], &d[..]))); + } + + #[test] + fn is_not_line_ending_str() { + + /* + let a: &str = "ab12cd\nefgh"; + assert_eq!(not_line_ending(a), Ok((&"\nefgh"[..], &"ab12cd"[..]))); + + let b: &str = "ab12cd\nefgh\nijkl"; + assert_eq!(not_line_ending(b), Ok((&"\nefgh\nijkl"[..], &"ab12cd"[..]))); + + let c: &str = "ab12cd\r\nefgh\nijkl"; + assert_eq!(not_line_ending(c), Ok((&"\r\nefgh\nijkl"[..], &"ab12cd"[..]))); + + let d = "βèƒôřè\nÂßÇáƒƭèř"; + assert_eq!(not_line_ending(d), Ok((&"\nÂßÇáƒƭèř"[..], &"βèƒôřè"[..]))); + + let e = "βèƒôřè\r\nÂßÇáƒƭèř"; + assert_eq!(not_line_ending(e), Ok((&"\r\nÂßÇáƒƭèř"[..], &"βèƒôřè"[..]))); + */ + + let f = "βèƒôřè\rÂßÇáƒƭèř"; + assert_eq!( + not_line_ending(f), + Err(Err::Error((f, ErrorKind::Tag))) + ); + + let g2: &str = "ab12cd"; + assert_eq!(not_line_ending::<_, (_, ErrorKind)>(g2), Ok(("", g2))); + } + + #[test] + fn hex_digit_test() { + let i = &b"0123456789abcdefABCDEF;"[..]; + assert_parse!(hex_digit1(i), Ok((&b";"[..], &i[..i.len() - 1]))); + + let i = &b"g"[..]; + assert_parse!( + hex_digit1(i), + Err(Err::Error(error_position!(i, ErrorKind::HexDigit))) + ); + + let i = &b"G"[..]; + assert_parse!( + hex_digit1(i), + Err(Err::Error(error_position!(i, ErrorKind::HexDigit))) + ); + + assert!(crate::character::is_hex_digit(b'0')); + assert!(crate::character::is_hex_digit(b'9')); + assert!(crate::character::is_hex_digit(b'a')); + assert!(crate::character::is_hex_digit(b'f')); + assert!(crate::character::is_hex_digit(b'A')); + assert!(crate::character::is_hex_digit(b'F')); + assert!(!crate::character::is_hex_digit(b'g')); + assert!(!crate::character::is_hex_digit(b'G')); + assert!(!crate::character::is_hex_digit(b'/')); + assert!(!crate::character::is_hex_digit(b':')); + assert!(!crate::character::is_hex_digit(b'@')); + assert!(!crate::character::is_hex_digit(b'\x60')); + } + + #[test] + fn oct_digit_test() { + let i = &b"01234567;"[..]; + assert_parse!(oct_digit1(i), Ok((&b";"[..], &i[..i.len() - 1]))); + + let i = &b"8"[..]; + assert_parse!( + oct_digit1(i), + Err(Err::Error(error_position!(i, ErrorKind::OctDigit))) + ); + + assert!(crate::character::is_oct_digit(b'0')); + assert!(crate::character::is_oct_digit(b'7')); + assert!(!crate::character::is_oct_digit(b'8')); + assert!(!crate::character::is_oct_digit(b'9')); + assert!(!crate::character::is_oct_digit(b'a')); + assert!(!crate::character::is_oct_digit(b'A')); + assert!(!crate::character::is_oct_digit(b'/')); + assert!(!crate::character::is_oct_digit(b':')); + assert!(!crate::character::is_oct_digit(b'@')); + assert!(!crate::character::is_oct_digit(b'\x60')); + } + + #[test] + fn full_line_windows() { + //let not_line_ending = |i:&[u8]| take_while(|c| c != b'\r' && c != b'\n')(i); + + named!( + take_full_line<(&[u8], &[u8])>, + tuple!(not_line_ending, line_ending) + ); + let input = b"abc\r\n"; + let output = take_full_line(input); + assert_eq!(output, Ok((&b""[..], (&b"abc"[..], &b"\r\n"[..])))); + } + + #[test] + fn full_line_unix() { + //let not_line_ending = |i:&[u8]| take_while(|c| c != b'\n')(i); + named!( + take_full_line<(&[u8], &[u8])>, + tuple!(not_line_ending, line_ending) + ); + let input = b"abc\n"; + let output = take_full_line(input); + assert_eq!(output, Ok((&b""[..], (&b"abc"[..], &b"\n"[..])))); + } + + #[test] + fn check_windows_lineending() { + let input = b"\r\n"; + let output = line_ending(&input[..]); + assert_parse!(output, Ok((&b""[..], &b"\r\n"[..]))); + } + + #[test] + fn check_unix_lineending() { + let input = b"\n"; + let output = line_ending(&input[..]); + assert_parse!(output, Ok((&b""[..], &b"\n"[..]))); + } + + #[test] + fn cr_lf() { + assert_parse!(crlf(&b"\r\na"[..]), Ok((&b"a"[..], &b"\r\n"[..]))); + assert_parse!(crlf(&b"\r"[..]), Err(Err::Error(error_position!(&b"\r"[..], ErrorKind::CrLf)))); + assert_parse!( + crlf(&b"\ra"[..]), + Err(Err::Error(error_position!(&b"\ra"[..], ErrorKind::CrLf))) + ); + + assert_parse!(crlf("\r\na"), Ok(("a", "\r\n"))); + assert_parse!(crlf("\r"), Err(Err::Error(error_position!(&"\r"[..], ErrorKind::CrLf)))); + assert_parse!( + crlf("\ra"), + Err(Err::Error(error_position!("\ra", ErrorKind::CrLf))) + ); + } + + #[test] + fn end_of_line() { + assert_parse!(line_ending(&b"\na"[..]), Ok((&b"a"[..], &b"\n"[..]))); + assert_parse!(line_ending(&b"\r\na"[..]), Ok((&b"a"[..], &b"\r\n"[..]))); + assert_parse!(line_ending(&b"\r"[..]), Err(Err::Error(error_position!(&b"\r"[..], ErrorKind::CrLf)))); + assert_parse!( + line_ending(&b"\ra"[..]), + Err(Err::Error(error_position!(&b"\ra"[..], ErrorKind::CrLf))) + ); + + assert_parse!(line_ending("\na"), Ok(("a", "\n"))); + assert_parse!(line_ending("\r\na"), Ok(("a", "\r\n"))); + assert_parse!(line_ending("\r"), Err(Err::Error(error_position!(&"\r"[..], ErrorKind::CrLf)))); + assert_parse!( + line_ending("\ra"), + Err(Err::Error(error_position!("\ra", ErrorKind::CrLf))) + ); + } +} diff --git a/third_party/rust/nom/src/character/macros.rs b/third_party/rust/nom/src/character/macros.rs new file mode 100644 index 0000000000..097ee409fe --- /dev/null +++ b/third_party/rust/nom/src/character/macros.rs @@ -0,0 +1,112 @@ +/// Character level parsers + +/// matches one of the provided characters +/// +/// # Example +/// ``` +/// # #[macro_use] extern crate nom; +/// # fn main() { +/// named!(simple, one_of!(&b"abc"[..])); +/// assert_eq!(simple(b"a123"), Ok((&b"123"[..], 'a'))); +/// +/// named!(a_or_b<&str, char>, one_of!("ab汉")); +/// assert_eq!(a_or_b("汉jiosfe"), Ok(("jiosfe", '汉'))); +/// # } +/// ``` +#[macro_export(local_inner_macros)] +macro_rules! one_of ( + ($i:expr, $inp: expr) => ( $crate::character::streaming::one_of($inp)($i) ); +); + +/// matches anything but the provided characters +/// +/// # Example +/// ``` +/// # #[macro_use] extern crate nom; +/// # use nom::{Err,error::ErrorKind}; +/// # fn main() { +/// named!(no_letter_a, none_of!(&b"abc"[..])); +/// assert_eq!(no_letter_a(b"123"), Ok((&b"23"[..], '1'))); +/// +/// named!(err_on_single_quote, none_of!(&b"'"[..])); +/// assert_eq!(err_on_single_quote(b"'jiosfe"), Err(Err::Error(error_position!(&b"'jiosfe"[..], ErrorKind::NoneOf)))); +/// # } +/// ``` +#[macro_export(local_inner_macros)] +macro_rules! none_of ( + ($i:expr, $inp: expr) => ( $crate::character::streaming::none_of($inp)($i) ); +); + +/// matches one character: `char!(char) => &[u8] -> IResult<&[u8], char> +/// +/// # Example +/// ``` +/// # #[macro_use] extern crate nom; +/// # use nom::{Err,error::ErrorKind}; +/// # fn main() { +/// named!(match_letter_a, char!('a')); +/// assert_eq!(match_letter_a(b"abc"), Ok((&b"bc"[..],'a'))); +/// +/// assert_eq!(match_letter_a(b"123cdef"), Err(Err::Error(error_position!(&b"123cdef"[..], ErrorKind::Char)))); +/// # } +/// ``` +#[macro_export(local_inner_macros)] +macro_rules! char ( + ($i:expr, $c: expr) => ( $crate::character::streaming::char($c)($i) ); +); + +#[cfg(test)] +mod tests { + use crate::internal::Err; + use crate::error::ErrorKind; + + #[test] + fn one_of() { + named!(f, one_of!("ab")); + + let a = &b"abcd"[..]; + assert_eq!(f(a), Ok((&b"bcd"[..], 'a'))); + + let b = &b"cde"[..]; + assert_eq!(f(b), Err(Err::Error(error_position!(b, ErrorKind::OneOf)))); + + named!(utf8(&str) -> char, + one_of!("+\u{FF0B}")); + + assert!(utf8("+").is_ok()); + assert!(utf8("\u{FF0B}").is_ok()); + } + + #[test] + fn none_of() { + named!(f, none_of!("ab")); + + let a = &b"abcd"[..]; + assert_eq!(f(a), Err(Err::Error(error_position!(a, ErrorKind::NoneOf)))); + + let b = &b"cde"[..]; + assert_eq!(f(b), Ok((&b"de"[..], 'c'))); + } + + #[test] + fn char() { + named!(f, char!('c')); + + let a = &b"abcd"[..]; + assert_eq!(f(a), Err(Err::Error(error_position!(a, ErrorKind::Char)))); + + let b = &b"cde"[..]; + assert_eq!(f(b), Ok((&b"de"[..], 'c'))); + } + + #[test] + fn char_str() { + named!(f<&str, char>, char!('c')); + + let a = &"abcd"[..]; + assert_eq!(f(a), Err(Err::Error(error_position!(a, ErrorKind::Char)))); + + let b = &"cde"[..]; + assert_eq!(f(b), Ok((&"de"[..], 'c'))); + } +} diff --git a/third_party/rust/nom/src/character/mod.rs b/third_party/rust/nom/src/character/mod.rs new file mode 100644 index 0000000000..3f0899aa8c --- /dev/null +++ b/third_party/rust/nom/src/character/mod.rs @@ -0,0 +1,101 @@ +//! character specific parsers and combinators +//! +//! functions recognizing specific characters + +#[macro_use] +mod macros; + +pub mod streaming; +pub mod complete; + +/// Tests if byte is ASCII alphabetic: A-Z, a-z +/// +/// # Example +/// +/// ``` +/// # use nom::character::is_alphabetic; +/// assert_eq!(is_alphabetic(b'9'), false); +/// assert_eq!(is_alphabetic(b'a'), true); +/// ``` +#[inline] +pub fn is_alphabetic(chr: u8) -> bool { + (chr >= 0x41 && chr <= 0x5A) || (chr >= 0x61 && chr <= 0x7A) +} + +/// Tests if byte is ASCII digit: 0-9 +/// +/// # Example +/// +/// ``` +/// # use nom::character::is_digit; +/// assert_eq!(is_digit(b'a'), false); +/// assert_eq!(is_digit(b'9'), true); +/// ``` +#[inline] +pub fn is_digit(chr: u8) -> bool { + chr >= 0x30 && chr <= 0x39 +} + +/// Tests if byte is ASCII hex digit: 0-9, A-F, a-f +/// +/// # Example +/// +/// ``` +/// # use nom::character::is_hex_digit; +/// assert_eq!(is_hex_digit(b'a'), true); +/// assert_eq!(is_hex_digit(b'9'), true); +/// assert_eq!(is_hex_digit(b'A'), true); +/// assert_eq!(is_hex_digit(b'x'), false); +/// ``` +#[inline] +pub fn is_hex_digit(chr: u8) -> bool { + (chr >= 0x30 && chr <= 0x39) || (chr >= 0x41 && chr <= 0x46) || (chr >= 0x61 && chr <= 0x66) +} + +/// Tests if byte is ASCII octal digit: 0-7 +/// +/// # Example +/// +/// ``` +/// # use nom::character::is_oct_digit; +/// assert_eq!(is_oct_digit(b'a'), false); +/// assert_eq!(is_oct_digit(b'9'), false); +/// assert_eq!(is_oct_digit(b'6'), true); +/// ``` +#[inline] +pub fn is_oct_digit(chr: u8) -> bool { + chr >= 0x30 && chr <= 0x37 +} + +/// Tests if byte is ASCII alphanumeric: A-Z, a-z, 0-9 +/// +/// # Example +/// +/// ``` +/// # use nom::character::is_alphanumeric; +/// assert_eq!(is_alphanumeric(b'-'), false); +/// assert_eq!(is_alphanumeric(b'a'), true); +/// assert_eq!(is_alphanumeric(b'9'), true); +/// assert_eq!(is_alphanumeric(b'A'), true); +/// ``` +#[inline] +pub fn is_alphanumeric(chr: u8) -> bool { + is_alphabetic(chr) || is_digit(chr) +} + +/// Tests if byte is ASCII space or tab +/// +/// # Example +/// +/// ``` +/// # use nom::character::is_space; +/// assert_eq!(is_space(b'\n'), false); +/// assert_eq!(is_space(b'\r'), false); +/// assert_eq!(is_space(b' '), true); +/// assert_eq!(is_space(b'\t'), true); +/// ``` +#[inline] +pub fn is_space(chr: u8) -> bool { + chr == b' ' || chr == b'\t' +} + diff --git a/third_party/rust/nom/src/character/streaming.rs b/third_party/rust/nom/src/character/streaming.rs new file mode 100644 index 0000000000..f62daceeb4 --- /dev/null +++ b/third_party/rust/nom/src/character/streaming.rs @@ -0,0 +1,1010 @@ +//! character specific parsers and combinators, streaming version +//! +//! functions recognizing specific characters + +use crate::internal::{Err, IResult, Needed}; +use crate::error::ParseError; +use crate::lib::std::ops::{Range, RangeFrom, RangeTo}; +use crate::traits::{AsChar, FindToken, InputIter, InputLength, InputTakeAtPosition, Slice}; +use crate::traits::{Compare, CompareResult}; + +use crate::error::ErrorKind; + +/// Recognizes one character. +/// +/// *streaming version*: Will return `Err(nom::Err::Incomplete(_))` if there's not enough input data. +/// +/// # Example +/// +/// ``` +/// # use nom::{Err, error::ErrorKind, Needed}; +/// # use nom::character::streaming::char; +/// # fn main() { +/// assert_eq!(char::<_, (_, ErrorKind)>('a')(&b"abc"[..]), Ok((&b"bc"[..], 'a'))); +/// assert_eq!(char::<_, (_, ErrorKind)>('a')(&b"bc"[..]), Err(Err::Error((&b"bc"[..], ErrorKind::Char)))); +/// assert_eq!(char::<_, (_, ErrorKind)>('a')(&b""[..]), Err(Err::Incomplete(Needed::Size(1)))); +/// # } +/// ``` +pub fn char>(c: char) -> impl Fn(I) -> IResult +where + I: Slice> + InputIter, + ::Item: AsChar, +{ + move |i: I| match (i).iter_elements().next().map(|t| { + let b = t.as_char() == c; + (&c, b) + }) { + None => Err(Err::Incomplete(Needed::Size(1))), + Some((_, false)) => { + Err(Err::Error(Error::from_char(i, c))) + } + Some((c, true)) => Ok((i.slice(c.len()..), c.as_char())), + } +} + +/// Recognizes one of the provided characters. +/// +/// *streaming version*: Will return `Err(nom::Err::Incomplete(_))` if there's not enough input data. +/// +/// # Example +/// +/// ``` +/// # use nom::{Err, error::ErrorKind, Needed}; +/// # use nom::character::streaming::one_of; +/// # fn main() { +/// assert_eq!(one_of::<_, _, (_, ErrorKind)>("abc")("b"), Ok(("", 'b'))); +/// assert_eq!(one_of::<_, _, (_, ErrorKind)>("a")("bc"), Err(Err::Error(("bc", ErrorKind::OneOf)))); +/// assert_eq!(one_of::<_, _, (_, ErrorKind)>("a")(""), Err(Err::Incomplete(Needed::Size(1)))); +/// # } +/// ``` +pub fn one_of>(list: T) -> impl Fn(I) -> IResult +where + I: Slice> + InputIter, + ::Item: AsChar + Copy, + T: FindToken<::Item>, +{ + move |i: I| match (i).iter_elements().next().map(|c| (c, list.find_token(c))) { + None => Err(Err::Incomplete(Needed::Size(1))), + Some((_, false)) => Err(Err::Error(Error::from_error_kind(i, ErrorKind::OneOf))), + Some((c, true)) => Ok((i.slice(c.len()..), c.as_char())), + } +} + +/// Recognizes a character that is not in the provided characters. +/// +/// *streaming version*: Will return `Err(nom::Err::Incomplete(_))` if there's not enough input data. +/// +/// # Example +/// +/// ``` +/// # use nom::{Err, error::ErrorKind, Needed}; +/// # use nom::character::streaming::none_of; +/// # fn main() { +/// assert_eq!(none_of::<_, _, (_, ErrorKind)>("abc")("z"), Ok(("", 'z'))); +/// assert_eq!(none_of::<_, _, (_, ErrorKind)>("ab")("a"), Err(Err::Error(("a", ErrorKind::NoneOf)))); +/// assert_eq!(none_of::<_, _, (_, ErrorKind)>("a")(""), Err(Err::Incomplete(Needed::Size(1)))); +/// # } +/// ``` +pub fn none_of>(list: T) -> impl Fn(I) -> IResult +where + I: Slice> + InputIter, + ::Item: AsChar + Copy, + T: FindToken<::Item>, +{ + move |i: I| match (i).iter_elements().next().map(|c| (c, !list.find_token(c))) { + None => Err(Err::Incomplete(Needed::Size(1))), + Some((_, false)) => Err(Err::Error(Error::from_error_kind(i, ErrorKind::NoneOf))), + Some((c, true)) => Ok((i.slice(c.len()..), c.as_char())), + } +} + +/// Recognizes the string "\r\n". +/// +/// *streaming version*: Will return `Err(nom::Err::Incomplete(_))` if there's not enough input data. +/// +/// # Example +/// +/// ``` +/// # use nom::{Err, error::ErrorKind, IResult, Needed}; +/// # use nom::character::streaming::crlf; +/// # fn main() { +/// assert_eq!(crlf::<_, (_, ErrorKind)>("\r\nc"), Ok(("c", "\r\n"))); +/// assert_eq!(crlf::<_, (_, ErrorKind)>("ab\r\nc"), Err(Err::Error(("ab\r\nc", ErrorKind::CrLf)))); +/// assert_eq!(crlf::<_, (_, ErrorKind)>(""), Err(Err::Incomplete(Needed::Size(2)))); +/// # } +/// ``` +pub fn crlf>(input: T) -> IResult +where + T: Slice> + Slice> + Slice>, + T: InputIter, + T: Compare<&'static str>, +{ + match input.compare("\r\n") { + //FIXME: is this the right index? + CompareResult::Ok => Ok((input.slice(2..), input.slice(0..2))), + CompareResult::Incomplete => Err(Err::Incomplete(Needed::Size(2))), + CompareResult::Error => { + let e: ErrorKind = ErrorKind::CrLf; + Err(Err::Error(E::from_error_kind(input, e))) + } + } +} + +/// Recognizes a string of any char except '\r' or '\n'. +/// +/// *streaming version*: Will return `Err(nom::Err::Incomplete(_))` if there's not enough input data. +/// +/// # Example +/// +/// ``` +/// # use nom::{Err, error::ErrorKind, IResult, Needed}; +/// # use nom::character::streaming::not_line_ending; +/// # fn main() { +/// assert_eq!(not_line_ending::<_, (_, ErrorKind)>("ab\r\nc"), Ok(("\r\nc", "ab"))); +/// assert_eq!(not_line_ending::<_, (_, ErrorKind)>("abc"), Err(Err::Incomplete(Needed::Unknown))); +/// assert_eq!(not_line_ending::<_, (_, ErrorKind)>(""), Err(Err::Incomplete(Needed::Unknown))); +/// # } +/// ``` +pub fn not_line_ending>(input: T) -> IResult +where + T: Slice> + Slice> + Slice>, + T: InputIter + InputLength, + T: Compare<&'static str>, + ::Item: AsChar, + ::Item: AsChar, +{ + match input.position(|item| { + let c = item.as_char(); + c == '\r' || c == '\n' + }) { + None => { + Err(Err::Incomplete(Needed::Unknown)) + } + Some(index) => { + let mut it = input.slice(index..).iter_elements(); + let nth = it.next().unwrap().as_char(); + if nth == '\r' { + let sliced = input.slice(index..); + let comp = sliced.compare("\r\n"); + match comp { + //FIXME: calculate the right index + CompareResult::Incomplete => Err(Err::Incomplete(Needed::Unknown)), + CompareResult::Error => { + let e: ErrorKind = ErrorKind::Tag; + Err(Err::Error(E::from_error_kind(input, e))) + } + CompareResult::Ok => Ok((input.slice(index..), input.slice(..index))), + } + } else { + Ok((input.slice(index..), input.slice(..index))) + } + } + } +} + +/// Recognizes an end of line (both '\n' and '\r\n'). +/// +/// *streaming version*: Will return `Err(nom::Err::Incomplete(_))` if there's not enough input data. +/// +/// # Example +/// +/// ``` +/// # use nom::{Err, error::ErrorKind, IResult, Needed}; +/// # use nom::character::streaming::line_ending; +/// # fn main() { +/// assert_eq!(line_ending::<_, (_, ErrorKind)>("\r\nc"), Ok(("c", "\r\n"))); +/// assert_eq!(line_ending::<_, (_, ErrorKind)>("ab\r\nc"), Err(Err::Error(("ab\r\nc", ErrorKind::CrLf)))); +/// assert_eq!(line_ending::<_, (_, ErrorKind)>(""), Err(Err::Incomplete(Needed::Size(1)))); +/// # } +/// ``` +pub fn line_ending>(input: T) -> IResult +where + T: Slice> + Slice> + Slice>, + T: InputIter + InputLength, + T: Compare<&'static str>, +{ + match input.compare("\n") { + CompareResult::Ok => Ok((input.slice(1..), input.slice(0..1))), + CompareResult::Incomplete => Err(Err::Incomplete(Needed::Size(1))), + CompareResult::Error => { + match input.compare("\r\n") { + //FIXME: is this the right index? + CompareResult::Ok => Ok((input.slice(2..), input.slice(0..2))), + CompareResult::Incomplete => Err(Err::Incomplete(Needed::Size(2))), + CompareResult::Error => Err(Err::Error(E::from_error_kind(input, ErrorKind::CrLf))), + } + } + } +} + +/// Matches a newline character '\\n'. +/// +/// *streaming version*: Will return `Err(nom::Err::Incomplete(_))` if there's not enough input data. +/// +/// # Example +/// +/// ``` +/// # use nom::{Err, error::ErrorKind, IResult, Needed}; +/// # use nom::character::streaming::newline; +/// # fn main() { +/// assert_eq!(newline::<_, (_, ErrorKind)>("\nc"), Ok(("c", '\n'))); +/// assert_eq!(newline::<_, (_, ErrorKind)>("\r\nc"), Err(Err::Error(("\r\nc", ErrorKind::Char)))); +/// assert_eq!(newline::<_, (_, ErrorKind)>(""), Err(Err::Incomplete(Needed::Size(1)))); +/// # } +/// ``` +pub fn newline>(input: I) -> IResult +where + I: Slice> + InputIter, + ::Item: AsChar, +{ + char('\n')(input) +} + +/// Matches a tab character '\t'. +/// +/// *streaming version*: Will return `Err(nom::Err::Incomplete(_))` if there's not enough input data. +/// +/// # Example +/// +/// ``` +/// # use nom::{Err, error::ErrorKind, IResult, Needed}; +/// # use nom::character::streaming::tab; +/// # fn main() { +/// assert_eq!(tab::<_, (_, ErrorKind)>("\tc"), Ok(("c", '\t'))); +/// assert_eq!(tab::<_, (_, ErrorKind)>("\r\nc"), Err(Err::Error(("\r\nc", ErrorKind::Char)))); +/// assert_eq!(tab::<_, (_, ErrorKind)>(""), Err(Err::Incomplete(Needed::Size(1)))); +/// # } +/// ``` +pub fn tab>(input: I) -> IResult +where + I: Slice> + InputIter, + ::Item: AsChar, +{ + char('\t')(input) +} + +/// Matches one byte as a character. Note that the input type will +/// accept a `str`, but not a `&[u8]`, unlike many other nom parsers. +/// +/// *streaming version*: Will return `Err(nom::Err::Incomplete(_))` if there's not enough input data. +/// +/// # Example +/// +/// ``` +/// # use nom::{character::streaming::anychar, Err, error::ErrorKind, IResult, Needed}; +/// # fn main() { +/// assert_eq!(anychar::<_, (_, ErrorKind)>("abc"), Ok(("bc",'a'))); +/// assert_eq!(anychar::<_, (_, ErrorKind)>(""), Err(Err::Incomplete(Needed::Size(1)))); +/// # } +/// ``` +pub fn anychar>(input: T) -> IResult +where + T: InputIter + InputLength + Slice>, + ::Item: AsChar, +{ + let mut it = input.iter_indices(); + match it.next() { + None => Err(Err::Incomplete(Needed::Size(1))), + Some((_, c)) => match it.next() { + None => Ok((input.slice(input.input_len()..), c.as_char())), + Some((idx, _)) => Ok((input.slice(idx..), c.as_char())), + }, + } +} + +/// Recognizes zero or more lowercase and uppercase ASCII alphabetic characters: a-z, A-Z +/// +/// *streaming version*: Will return `Err(nom::Err::Incomplete(_))` if there's not enough input data, +/// or if no terminating token is found (a non alphabetic character). +/// +/// # Example +/// +/// ``` +/// # use nom::{Err, error::ErrorKind, IResult, Needed}; +/// # use nom::character::streaming::alpha0; +/// # fn main() { +/// assert_eq!(alpha0::<_, (_, ErrorKind)>("ab1c"), Ok(("1c", "ab"))); +/// assert_eq!(alpha0::<_, (_, ErrorKind)>("1c"), Ok(("1c", ""))); +/// assert_eq!(alpha0::<_, (_, ErrorKind)>(""), Err(Err::Incomplete(Needed::Size(1)))); +/// # } +/// ``` +pub fn alpha0>(input: T) -> IResult +where + T: InputTakeAtPosition, + ::Item: AsChar, +{ + input.split_at_position(|item| !item.is_alpha()) +} + +/// Recognizes one or more lowercase and uppercase ASCII alphabetic characters: a-z, A-Z +/// +/// *streaming version*: Will return `Err(nom::Err::Incomplete(_))` if there's not enough input data, +/// or if no terminating token is found (a non alphabetic character). +/// +/// # Example +/// +/// ``` +/// # use nom::{Err, error::ErrorKind, IResult, Needed}; +/// # use nom::character::streaming::alpha1; +/// # fn main() { +/// assert_eq!(alpha1::<_, (_, ErrorKind)>("aB1c"), Ok(("1c", "aB"))); +/// assert_eq!(alpha1::<_, (_, ErrorKind)>("1c"), Err(Err::Error(("1c", ErrorKind::Alpha)))); +/// assert_eq!(alpha1::<_, (_, ErrorKind)>(""), Err(Err::Incomplete(Needed::Size(1)))); +/// # } +/// ``` +pub fn alpha1>(input: T) -> IResult +where + T: InputTakeAtPosition, + ::Item: AsChar, +{ + input.split_at_position1(|item| !item.is_alpha(), ErrorKind::Alpha) +} + +/// Recognizes zero or more ASCII numerical characters: 0-9 +/// +/// *streaming version*: Will return `Err(nom::Err::Incomplete(_))` if there's not enough input data, +/// or if no terminating token is found (a non digit character). +/// +/// # Example +/// +/// ``` +/// # use nom::{Err, error::ErrorKind, IResult, Needed}; +/// # use nom::character::streaming::digit0; +/// # fn main() { +/// assert_eq!(digit0::<_, (_, ErrorKind)>("21c"), Ok(("c", "21"))); +/// assert_eq!(digit0::<_, (_, ErrorKind)>("a21c"), Ok(("a21c", ""))); +/// assert_eq!(digit0::<_, (_, ErrorKind)>(""), Err(Err::Incomplete(Needed::Size(1)))); +/// # } +/// ``` +pub fn digit0>(input: T) -> IResult +where + T: InputTakeAtPosition, + ::Item: AsChar, +{ + input.split_at_position(|item| !item.is_dec_digit()) +} + +/// Recognizes one or more ASCII numerical characters: 0-9 +/// +/// *streaming version*: Will return `Err(nom::Err::Incomplete(_))` if there's not enough input data, +/// or if no terminating token is found (a non digit character). +/// +/// # Example +/// +/// ``` +/// # use nom::{Err, error::ErrorKind, IResult, Needed}; +/// # use nom::character::streaming::digit1; +/// # fn main() { +/// assert_eq!(digit1::<_, (_, ErrorKind)>("21c"), Ok(("c", "21"))); +/// assert_eq!(digit1::<_, (_, ErrorKind)>("c1"), Err(Err::Error(("c1", ErrorKind::Digit)))); +/// assert_eq!(digit1::<_, (_, ErrorKind)>(""), Err(Err::Incomplete(Needed::Size(1)))); +/// # } +/// ``` +pub fn digit1>(input: T) -> IResult +where + T: InputTakeAtPosition, + ::Item: AsChar, +{ + input.split_at_position1(|item| !item.is_dec_digit(), ErrorKind::Digit) +} + +/// Recognizes zero or more ASCII hexadecimal numerical characters: 0-9, A-F, a-f +/// +/// *streaming version*: Will return `Err(nom::Err::Incomplete(_))` if there's not enough input data, +/// or if no terminating token is found (a non hexadecimal digit character). +/// +/// # Example +/// +/// ``` +/// # use nom::{Err, error::ErrorKind, IResult, Needed}; +/// # use nom::character::streaming::hex_digit0; +/// # fn main() { +/// assert_eq!(hex_digit0::<_, (_, ErrorKind)>("21cZ"), Ok(("Z", "21c"))); +/// assert_eq!(hex_digit0::<_, (_, ErrorKind)>("Z21c"), Ok(("Z21c", ""))); +/// assert_eq!(hex_digit0::<_, (_, ErrorKind)>(""), Err(Err::Incomplete(Needed::Size(1)))); +/// # } +/// ``` +pub fn hex_digit0>(input: T) -> IResult +where + T: InputTakeAtPosition, + ::Item: AsChar, +{ + input.split_at_position(|item| !item.is_hex_digit()) +} + +/// Recognizes one or more ASCII hexadecimal numerical characters: 0-9, A-F, a-f +/// +/// *streaming version*: Will return `Err(nom::Err::Incomplete(_))` if there's not enough input data, +/// or if no terminating token is found (a non hexadecimal digit character). +/// +/// # Example +/// +/// ``` +/// # use nom::{Err, error::ErrorKind, IResult, Needed}; +/// # use nom::character::streaming::hex_digit1; +/// # fn main() { +/// assert_eq!(hex_digit1::<_, (_, ErrorKind)>("21cZ"), Ok(("Z", "21c"))); +/// assert_eq!(hex_digit1::<_, (_, ErrorKind)>("H2"), Err(Err::Error(("H2", ErrorKind::HexDigit)))); +/// assert_eq!(hex_digit1::<_, (_, ErrorKind)>(""), Err(Err::Incomplete(Needed::Size(1)))); +/// # } +/// ``` +pub fn hex_digit1>(input: T) -> IResult +where + T: InputTakeAtPosition, + ::Item: AsChar, +{ + input.split_at_position1(|item| !item.is_hex_digit(), ErrorKind::HexDigit) +} + +/// Recognizes zero or more octal characters: 0-7 +/// +/// *streaming version*: Will return `Err(nom::Err::Incomplete(_))` if there's not enough input data, +/// or if no terminating token is found (a non octal digit character). +/// +/// # Example +/// +/// ``` +/// # use nom::{Err, error::ErrorKind, IResult, Needed}; +/// # use nom::character::streaming::oct_digit0; +/// # fn main() { +/// assert_eq!(oct_digit0::<_, (_, ErrorKind)>("21cZ"), Ok(("cZ", "21"))); +/// assert_eq!(oct_digit0::<_, (_, ErrorKind)>("Z21c"), Ok(("Z21c", ""))); +/// assert_eq!(oct_digit0::<_, (_, ErrorKind)>(""), Err(Err::Incomplete(Needed::Size(1)))); +/// # } +/// ``` +pub fn oct_digit0>(input: T) -> IResult +where + T: InputTakeAtPosition, + ::Item: AsChar, +{ + input.split_at_position(|item| !item.is_oct_digit()) +} + +/// Recognizes one or more octal characters: 0-7 +/// +/// *streaming version*: Will return `Err(nom::Err::Incomplete(_))` if there's not enough input data, +/// or if no terminating token is found (a non octal digit character). +/// +/// # Example +/// +/// ``` +/// # use nom::{Err, error::ErrorKind, IResult, Needed}; +/// # use nom::character::streaming::oct_digit1; +/// # fn main() { +/// assert_eq!(oct_digit1::<_, (_, ErrorKind)>("21cZ"), Ok(("cZ", "21"))); +/// assert_eq!(oct_digit1::<_, (_, ErrorKind)>("H2"), Err(Err::Error(("H2", ErrorKind::OctDigit)))); +/// assert_eq!(oct_digit1::<_, (_, ErrorKind)>(""), Err(Err::Incomplete(Needed::Size(1)))); +/// # } +/// ``` +pub fn oct_digit1>(input: T) -> IResult +where + T: InputTakeAtPosition, + ::Item: AsChar, +{ + input.split_at_position1(|item| !item.is_oct_digit(), ErrorKind::OctDigit) +} + +/// Recognizes zero or more ASCII numerical and alphabetic characters: 0-9, a-z, A-Z +/// +/// *streaming version*: Will return `Err(nom::Err::Incomplete(_))` if there's not enough input data, +/// or if no terminating token is found (a non alphanumerical character). +/// +/// # Example +/// +/// ``` +/// # use nom::{Err, error::ErrorKind, IResult, Needed}; +/// # use nom::character::streaming::alphanumeric0; +/// # fn main() { +/// assert_eq!(alphanumeric0::<_, (_, ErrorKind)>("21cZ%1"), Ok(("%1", "21cZ"))); +/// assert_eq!(alphanumeric0::<_, (_, ErrorKind)>("&Z21c"), Ok(("&Z21c", ""))); +/// assert_eq!(alphanumeric0::<_, (_, ErrorKind)>(""), Err(Err::Incomplete(Needed::Size(1)))); +/// # } +/// ``` +pub fn alphanumeric0>(input: T) -> IResult +where + T: InputTakeAtPosition, + ::Item: AsChar, +{ + input.split_at_position(|item| !item.is_alphanum()) +} + +/// Recognizes one or more ASCII numerical and alphabetic characters: 0-9, a-z, A-Z +/// +/// *streaming version*: Will return `Err(nom::Err::Incomplete(_))` if there's not enough input data, +/// or if no terminating token is found (a non alphanumerical character). +/// +/// # Example +/// +/// ``` +/// # use nom::{Err, error::ErrorKind, IResult, Needed}; +/// # use nom::character::streaming::alphanumeric1; +/// # fn main() { +/// assert_eq!(alphanumeric1::<_, (_, ErrorKind)>("21cZ%1"), Ok(("%1", "21cZ"))); +/// assert_eq!(alphanumeric1::<_, (_, ErrorKind)>("&H2"), Err(Err::Error(("&H2", ErrorKind::AlphaNumeric)))); +/// assert_eq!(alphanumeric1::<_, (_, ErrorKind)>(""), Err(Err::Incomplete(Needed::Size(1)))); +/// # } +/// ``` +pub fn alphanumeric1>(input: T) -> IResult +where + T: InputTakeAtPosition, + ::Item: AsChar, +{ + input.split_at_position1(|item| !item.is_alphanum(), ErrorKind::AlphaNumeric) +} + +/// Recognizes zero or more spaces and tabs. +/// +/// *streaming version*: Will return `Err(nom::Err::Incomplete(_))` if there's not enough input data, +/// or if no terminating token is found (a non space character). +/// +/// # Example +/// +/// ``` +/// # use nom::{Err, error::ErrorKind, IResult, Needed}; +/// # use nom::character::streaming::space0; +/// # fn main() { +/// assert_eq!(space0::<_, (_, ErrorKind)>(" \t21c"), Ok(("21c", " \t"))); +/// assert_eq!(space0::<_, (_, ErrorKind)>("Z21c"), Ok(("Z21c", ""))); +/// assert_eq!(space0::<_, (_, ErrorKind)>(""), Err(Err::Incomplete(Needed::Size(1)))); +/// # } +/// ``` +pub fn space0>(input: T) -> IResult +where + T: InputTakeAtPosition, + ::Item: AsChar + Clone, +{ + input.split_at_position(|item| { + let c = item.clone().as_char(); + !(c == ' ' || c == '\t') + }) +} +/// Recognizes one or more spaces and tabs. +/// +/// *streaming version*: Will return `Err(nom::Err::Incomplete(_))` if there's not enough input data, +/// or if no terminating token is found (a non space character). +/// +/// # Example +/// +/// ``` +/// # use nom::{Err, error::ErrorKind, IResult, Needed}; +/// # use nom::character::streaming::space1; +/// # fn main() { +/// assert_eq!(space1::<_, (_, ErrorKind)>(" \t21c"), Ok(("21c", " \t"))); +/// assert_eq!(space1::<_, (_, ErrorKind)>("H2"), Err(Err::Error(("H2", ErrorKind::Space)))); +/// assert_eq!(space1::<_, (_, ErrorKind)>(""), Err(Err::Incomplete(Needed::Size(1)))); +/// # } +/// ``` +pub fn space1>(input: T) -> IResult +where + T: InputTakeAtPosition, + ::Item: AsChar + Clone, +{ + input.split_at_position1( + |item| { + let c = item.clone().as_char(); + !(c == ' ' || c == '\t') + }, + ErrorKind::Space, + ) +} + +/// Recognizes zero or more spaces, tabs, carriage returns and line feeds. +/// +/// *streaming version*: Will return `Err(nom::Err::Incomplete(_))` if there's not enough input data, +/// or if no terminating token is found (a non space character). +/// +/// # Example +/// +/// ``` +/// # use nom::{Err, error::ErrorKind, IResult, Needed}; +/// # use nom::character::streaming::multispace0; +/// # fn main() { +/// assert_eq!(multispace0::<_, (_, ErrorKind)>(" \t\n\r21c"), Ok(("21c", " \t\n\r"))); +/// assert_eq!(multispace0::<_, (_, ErrorKind)>("Z21c"), Ok(("Z21c", ""))); +/// assert_eq!(multispace0::<_, (_, ErrorKind)>(""), Err(Err::Incomplete(Needed::Size(1)))); +/// # } +/// ``` +pub fn multispace0>(input: T) -> IResult +where + T: InputTakeAtPosition, + ::Item: AsChar + Clone, +{ + input.split_at_position(|item| { + let c = item.clone().as_char(); + !(c == ' ' || c == '\t' || c == '\r' || c == '\n') + }) +} + +/// Recognizes one or more spaces, tabs, carriage returns and line feeds. +/// +/// *streaming version*: Will return `Err(nom::Err::Incomplete(_))` if there's not enough input data, +/// or if no terminating token is found (a non space character). +/// +/// # Example +/// +/// ``` +/// # use nom::{Err, error::ErrorKind, IResult, Needed}; +/// # use nom::character::streaming::multispace1; +/// # fn main() { +/// assert_eq!(multispace1::<_, (_, ErrorKind)>(" \t\n\r21c"), Ok(("21c", " \t\n\r"))); +/// assert_eq!(multispace1::<_, (_, ErrorKind)>("H2"), Err(Err::Error(("H2", ErrorKind::MultiSpace)))); +/// assert_eq!(multispace1::<_, (_, ErrorKind)>(""), Err(Err::Incomplete(Needed::Size(1)))); +/// # } +/// ``` +pub fn multispace1>(input: T) -> IResult +where + T: InputTakeAtPosition, + ::Item: AsChar + Clone, +{ + input.split_at_position1( + |item| { + let c = item.clone().as_char(); + !(c == ' ' || c == '\t' || c == '\r' || c == '\n') + }, + ErrorKind::MultiSpace, + ) +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::internal::{Err, Needed}; + use crate::error::ErrorKind; + + macro_rules! assert_parse( + ($left: expr, $right: expr) => { + let res: $crate::IResult<_, _, (_, ErrorKind)> = $left; + assert_eq!(res, $right); + }; + ); + + #[test] + fn anychar_str() { + use super::anychar; + assert_eq!(anychar::<_, (&str, ErrorKind)>("Ә"), Ok(("", 'Ә'))); + } + + #[test] + fn character() { + let a: &[u8] = b"abcd"; + let b: &[u8] = b"1234"; + let c: &[u8] = b"a123"; + let d: &[u8] = "azé12".as_bytes(); + let e: &[u8] = b" "; + let f: &[u8] = b" ;"; + //assert_eq!(alpha1::<_, (_, ErrorKind)>(a), Err(Err::Incomplete(Needed::Size(1)))); + assert_parse!(alpha1(a), Err(Err::Incomplete(Needed::Size(1)))); + assert_eq!( + alpha1(b), + Err(Err::Error((b, ErrorKind::Alpha))) + ); + assert_eq!(alpha1::<_, (_, ErrorKind)>(c), Ok((&c[1..], &b"a"[..]))); + assert_eq!(alpha1::<_, (_, ErrorKind)>(d), Ok(("é12".as_bytes(), &b"az"[..]))); + assert_eq!( + digit1(a), + Err(Err::Error((a, ErrorKind::Digit))) + ); + assert_eq!(digit1::<_, (_, ErrorKind)>(b), Err(Err::Incomplete(Needed::Size(1)))); + assert_eq!( + digit1(c), + Err(Err::Error((c, ErrorKind::Digit))) + ); + assert_eq!( + digit1(d), + Err(Err::Error((d, ErrorKind::Digit))) + ); + assert_eq!(hex_digit1::<_, (_, ErrorKind)>(a), Err(Err::Incomplete(Needed::Size(1)))); + assert_eq!(hex_digit1::<_, (_, ErrorKind)>(b), Err(Err::Incomplete(Needed::Size(1)))); + assert_eq!(hex_digit1::<_, (_, ErrorKind)>(c), Err(Err::Incomplete(Needed::Size(1)))); + assert_eq!(hex_digit1::<_, (_, ErrorKind)>(d), Ok(("zé12".as_bytes(), &b"a"[..]))); + assert_eq!( + hex_digit1(e), + Err(Err::Error((e, ErrorKind::HexDigit))) + ); + assert_eq!( + oct_digit1(a), + Err(Err::Error((a, ErrorKind::OctDigit))) + ); + assert_eq!(oct_digit1::<_, (_, ErrorKind)>(b), Err(Err::Incomplete(Needed::Size(1)))); + assert_eq!( + oct_digit1(c), + Err(Err::Error((c, ErrorKind::OctDigit))) + ); + assert_eq!( + oct_digit1(d), + Err(Err::Error((d, ErrorKind::OctDigit))) + ); + assert_eq!(alphanumeric1::<_, (_, ErrorKind)>(a), Err(Err::Incomplete(Needed::Size(1)))); + //assert_eq!(fix_error!(b,(), alphanumeric1), Ok((empty, b))); + assert_eq!(alphanumeric1::<_, (_, ErrorKind)>(c), Err(Err::Incomplete(Needed::Size(1)))); + assert_eq!(alphanumeric1::<_, (_, ErrorKind)>(d), Ok(("é12".as_bytes(), &b"az"[..]))); + assert_eq!(space1::<_, (_, ErrorKind)>(e), Err(Err::Incomplete(Needed::Size(1)))); + assert_eq!(space1::<_, (_, ErrorKind)>(f), Ok((&b";"[..], &b" "[..]))); + } + + #[cfg(feature = "alloc")] + #[test] + fn character_s() { + let a = "abcd"; + let b = "1234"; + let c = "a123"; + let d = "azé12"; + let e = " "; + assert_eq!(alpha1::<_, (_, ErrorKind)>(a), Err(Err::Incomplete(Needed::Size(1)))); + assert_eq!( + alpha1(b), + Err(Err::Error((b, ErrorKind::Alpha))) + ); + assert_eq!(alpha1::<_, (_, ErrorKind)>(c), Ok((&c[1..], &"a"[..]))); + assert_eq!(alpha1::<_, (_, ErrorKind)>(d), Ok(("é12", &"az"[..]))); + assert_eq!( + digit1(a), + Err(Err::Error((a, ErrorKind::Digit))) + ); + assert_eq!(digit1::<_, (_, ErrorKind)>(b), Err(Err::Incomplete(Needed::Size(1)))); + assert_eq!( + digit1(c), + Err(Err::Error((c, ErrorKind::Digit))) + ); + assert_eq!( + digit1(d), + Err(Err::Error((d, ErrorKind::Digit))) + ); + assert_eq!(hex_digit1::<_, (_, ErrorKind)>(a), Err(Err::Incomplete(Needed::Size(1)))); + assert_eq!(hex_digit1::<_, (_, ErrorKind)>(b), Err(Err::Incomplete(Needed::Size(1)))); + assert_eq!(hex_digit1::<_, (_, ErrorKind)>(c), Err(Err::Incomplete(Needed::Size(1)))); + assert_eq!(hex_digit1::<_, (_, ErrorKind)>(d), Ok(("zé12", &"a"[..]))); + assert_eq!( + hex_digit1(e), + Err(Err::Error((e, ErrorKind::HexDigit))) + ); + assert_eq!( + oct_digit1(a), + Err(Err::Error((a, ErrorKind::OctDigit))) + ); + assert_eq!(oct_digit1::<_, (_, ErrorKind)>(b), Err(Err::Incomplete(Needed::Size(1)))); + assert_eq!( + oct_digit1(c), + Err(Err::Error((c, ErrorKind::OctDigit))) + ); + assert_eq!( + oct_digit1(d), + Err(Err::Error((d, ErrorKind::OctDigit))) + ); + assert_eq!(alphanumeric1::<_, (_, ErrorKind)>(a), Err(Err::Incomplete(Needed::Size(1)))); + //assert_eq!(fix_error!(b,(), alphanumeric1), Ok((empty, b))); + assert_eq!(alphanumeric1::<_, (_, ErrorKind)>(c), Err(Err::Incomplete(Needed::Size(1)))); + assert_eq!(alphanumeric1::<_, (_, ErrorKind)>(d), Ok(("é12", "az"))); + assert_eq!(space1::<_, (_, ErrorKind)>(e), Err(Err::Incomplete(Needed::Size(1)))); + } + + use crate::traits::Offset; + #[test] + fn offset() { + let a = &b"abcd;"[..]; + let b = &b"1234;"[..]; + let c = &b"a123;"[..]; + let d = &b" \t;"[..]; + let e = &b" \t\r\n;"[..]; + let f = &b"123abcDEF;"[..]; + + match alpha1::<_, (_, ErrorKind)>(a) { + Ok((i, _)) => { + assert_eq!(a.offset(i) + i.len(), a.len()); + } + _ => panic!("wrong return type in offset test for alpha"), + } + match digit1::<_, (_, ErrorKind)>(b) { + Ok((i, _)) => { + assert_eq!(b.offset(i) + i.len(), b.len()); + } + _ => panic!("wrong return type in offset test for digit"), + } + match alphanumeric1::<_, (_, ErrorKind)>(c) { + Ok((i, _)) => { + assert_eq!(c.offset(i) + i.len(), c.len()); + } + _ => panic!("wrong return type in offset test for alphanumeric"), + } + match space1::<_, (_, ErrorKind)>(d) { + Ok((i, _)) => { + assert_eq!(d.offset(i) + i.len(), d.len()); + } + _ => panic!("wrong return type in offset test for space"), + } + match multispace1::<_, (_, ErrorKind)>(e) { + Ok((i, _)) => { + assert_eq!(e.offset(i) + i.len(), e.len()); + } + _ => panic!("wrong return type in offset test for multispace"), + } + match hex_digit1::<_, (_, ErrorKind)>(f) { + Ok((i, _)) => { + assert_eq!(f.offset(i) + i.len(), f.len()); + } + _ => panic!("wrong return type in offset test for hex_digit"), + } + match oct_digit1::<_, (_, ErrorKind)>(f) { + Ok((i, _)) => { + assert_eq!(f.offset(i) + i.len(), f.len()); + } + _ => panic!("wrong return type in offset test for oct_digit"), + } + } + + #[test] + fn is_not_line_ending_bytes() { + let a: &[u8] = b"ab12cd\nefgh"; + assert_eq!(not_line_ending::<_, (_, ErrorKind)>(a), Ok((&b"\nefgh"[..], &b"ab12cd"[..]))); + + let b: &[u8] = b"ab12cd\nefgh\nijkl"; + assert_eq!( + not_line_ending::<_, (_, ErrorKind)>(b), + Ok((&b"\nefgh\nijkl"[..], &b"ab12cd"[..])) + ); + + let c: &[u8] = b"ab12cd\r\nefgh\nijkl"; + assert_eq!( + not_line_ending::<_, (_, ErrorKind)>(c), + Ok((&b"\r\nefgh\nijkl"[..], &b"ab12cd"[..])) + ); + + let d: &[u8] = b"ab12cd"; + assert_eq!(not_line_ending::<_, (_, ErrorKind)>(d), Err(Err::Incomplete(Needed::Unknown))); + } + + #[test] + fn is_not_line_ending_str() { + /* + let a: &str = "ab12cd\nefgh"; + assert_eq!(not_line_ending(a), Ok((&"\nefgh"[..], &"ab12cd"[..]))); + + let b: &str = "ab12cd\nefgh\nijkl"; + assert_eq!(not_line_ending(b), Ok((&"\nefgh\nijkl"[..], &"ab12cd"[..]))); + + let c: &str = "ab12cd\r\nefgh\nijkl"; + assert_eq!(not_line_ending(c), Ok((&"\r\nefgh\nijkl"[..], &"ab12cd"[..]))); + + let d = "βèƒôřè\nÂßÇáƒƭèř"; + assert_eq!(not_line_ending(d), Ok((&"\nÂßÇáƒƭèř"[..], &"βèƒôřè"[..]))); + + let e = "βèƒôřè\r\nÂßÇáƒƭèř"; + assert_eq!(not_line_ending(e), Ok((&"\r\nÂßÇáƒƭèř"[..], &"βèƒôřè"[..]))); + */ + + let f = "βèƒôřè\rÂßÇáƒƭèř"; + assert_eq!( + not_line_ending(f), + Err(Err::Error((f, ErrorKind::Tag))) + ); + + let g2: &str = "ab12cd"; + assert_eq!(not_line_ending::<_, (_, ErrorKind)>(g2), Err(Err::Incomplete(Needed::Unknown))); + } + + #[test] + fn hex_digit_test() { + let i = &b"0123456789abcdefABCDEF;"[..]; + assert_parse!(hex_digit1(i), Ok((&b";"[..], &i[..i.len() - 1]))); + + let i = &b"g"[..]; + assert_parse!( + hex_digit1(i), + Err(Err::Error(error_position!(i, ErrorKind::HexDigit))) + ); + + let i = &b"G"[..]; + assert_parse!( + hex_digit1(i), + Err(Err::Error(error_position!(i, ErrorKind::HexDigit))) + ); + + assert!(crate::character::is_hex_digit(b'0')); + assert!(crate::character::is_hex_digit(b'9')); + assert!(crate::character::is_hex_digit(b'a')); + assert!(crate::character::is_hex_digit(b'f')); + assert!(crate::character::is_hex_digit(b'A')); + assert!(crate::character::is_hex_digit(b'F')); + assert!(!crate::character::is_hex_digit(b'g')); + assert!(!crate::character::is_hex_digit(b'G')); + assert!(!crate::character::is_hex_digit(b'/')); + assert!(!crate::character::is_hex_digit(b':')); + assert!(!crate::character::is_hex_digit(b'@')); + assert!(!crate::character::is_hex_digit(b'\x60')); + } + + #[test] + fn oct_digit_test() { + let i = &b"01234567;"[..]; + assert_parse!(oct_digit1(i), Ok((&b";"[..], &i[..i.len() - 1]))); + + let i = &b"8"[..]; + assert_parse!( + oct_digit1(i), + Err(Err::Error(error_position!(i, ErrorKind::OctDigit))) + ); + + assert!(crate::character::is_oct_digit(b'0')); + assert!(crate::character::is_oct_digit(b'7')); + assert!(!crate::character::is_oct_digit(b'8')); + assert!(!crate::character::is_oct_digit(b'9')); + assert!(!crate::character::is_oct_digit(b'a')); + assert!(!crate::character::is_oct_digit(b'A')); + assert!(!crate::character::is_oct_digit(b'/')); + assert!(!crate::character::is_oct_digit(b':')); + assert!(!crate::character::is_oct_digit(b'@')); + assert!(!crate::character::is_oct_digit(b'\x60')); + } + + #[test] + fn full_line_windows() { + named!( + take_full_line<(&[u8], &[u8])>, + tuple!(not_line_ending, line_ending) + ); + let input = b"abc\r\n"; + let output = take_full_line(input); + assert_eq!(output, Ok((&b""[..], (&b"abc"[..], &b"\r\n"[..])))); + } + + #[test] + fn full_line_unix() { + named!( + take_full_line<(&[u8], &[u8])>, + tuple!(not_line_ending, line_ending) + ); + let input = b"abc\n"; + let output = take_full_line(input); + assert_eq!(output, Ok((&b""[..], (&b"abc"[..], &b"\n"[..])))); + } + + #[test] + fn check_windows_lineending() { + let input = b"\r\n"; + let output = line_ending(&input[..]); + assert_parse!(output, Ok((&b""[..], &b"\r\n"[..]))); + } + + #[test] + fn check_unix_lineending() { + let input = b"\n"; + let output = line_ending(&input[..]); + assert_parse!(output, Ok((&b""[..], &b"\n"[..]))); + } + + #[test] + fn cr_lf() { + assert_parse!(crlf(&b"\r\na"[..]), Ok((&b"a"[..], &b"\r\n"[..]))); + assert_parse!(crlf(&b"\r"[..]), Err(Err::Incomplete(Needed::Size(2)))); + assert_parse!( + crlf(&b"\ra"[..]), + Err(Err::Error(error_position!(&b"\ra"[..], ErrorKind::CrLf))) + ); + + assert_parse!(crlf("\r\na"), Ok(("a", "\r\n"))); + assert_parse!(crlf("\r"), Err(Err::Incomplete(Needed::Size(2)))); + assert_parse!( + crlf("\ra"), + Err(Err::Error(error_position!("\ra", ErrorKind::CrLf))) + ); + } + + #[test] + fn end_of_line() { + assert_parse!(line_ending(&b"\na"[..]), Ok((&b"a"[..], &b"\n"[..]))); + assert_parse!(line_ending(&b"\r\na"[..]), Ok((&b"a"[..], &b"\r\n"[..]))); + assert_parse!(line_ending(&b"\r"[..]), Err(Err::Incomplete(Needed::Size(2)))); + assert_parse!( + line_ending(&b"\ra"[..]), + Err(Err::Error(error_position!(&b"\ra"[..], ErrorKind::CrLf))) + ); + + assert_parse!(line_ending("\na"), Ok(("a", "\n"))); + assert_parse!(line_ending("\r\na"), Ok(("a", "\r\n"))); + assert_parse!(line_ending("\r"), Err(Err::Incomplete(Needed::Size(2)))); + assert_parse!( + line_ending("\ra"), + Err(Err::Error(error_position!("\ra", ErrorKind::CrLf))) + ); + } +} diff --git a/third_party/rust/nom/src/combinator/macros.rs b/third_party/rust/nom/src/combinator/macros.rs new file mode 100644 index 0000000000..59df06f660 --- /dev/null +++ b/third_party/rust/nom/src/combinator/macros.rs @@ -0,0 +1,1201 @@ +//! Macro combinators +//! +//! Macros are used to make combination easier, +//! since they often do not depend on the type +//! of the data they manipulate or return. +//! +//! There is a trick to make them easier to assemble, +//! combinators are defined like this: +//! +//! ```ignore +//! macro_rules! tag ( +//! ($i:expr, $inp: expr) => ( +//! { +//! ... +//! } +//! ); +//! ); +//! ``` +//! +//! But when used in other combinators, are Used +//! like this: +//! +//! ```ignore +//! named!(my_function, tag!("abcd")); +//! ``` +//! +//! Internally, other combinators will rewrite +//! that call to pass the input as first argument: +//! +//! ```ignore +//! macro_rules! named ( +//! ($name:ident, $submac:ident!( $($args:tt)* )) => ( +//! fn $name<'a>( i: &'a [u8] ) -> IResult<'a,&[u8], &[u8]> { +//! $submac!(i, $($args)*) +//! } +//! ); +//! ); +//! ``` +//! +//! If you want to call a combinator directly, you can +//! do it like this: +//! +//! ```ignore +//! let res = { tag!(input, "abcd"); } +//! ``` +//! +//! Combinators must have a specific variant for +//! non-macro arguments. Example: passing a function +//! to take_while! instead of another combinator. +//! +//! ```ignore +//! macro_rules! take_while( +//! ($input:expr, $submac:ident!( $($args:tt)* )) => ( +//! { +//! ... +//! } +//! ); +//! +//! // wrap the function in a macro to pass it to the main implementation +//! ($input:expr, $f:expr) => ( +//! take_while!($input, call!($f)); +//! ); +//! ); +//! ``` +#[allow(unused_variables)] + +/// Makes a function from a parser combination +/// +/// The type can be set up if the compiler needs +/// more information +/// +/// Function-like declaration: +/// ``` +/// # use nom::{named, tag}; +/// named!(my_function( &[u8] ) -> &[u8], tag!("abcd")); +/// ``` +/// Alternative declaration. First type parameter is input, second is output: +/// ``` +/// # use nom::{named, tag}; +/// named!(my_function<&[u8], &[u8]>, tag!("abcd")); +/// ``` +/// This one will have `&[u8]` as input type, `&[u8]` as output type: +/// ``` +/// # use nom::{named, tag}; +/// named!(my_function, tag!("abcd")); +/// ``` +/// Will use `&[u8]` as output type: +/// ``` +/// # use nom::{named, tag}; +/// named!(my_function<&[u8]>, tag!("abcd")); +/// ``` +/// Prefix them with 'pub' to make the functions public: +/// ``` +/// # use nom::{named, tag}; +/// named!(pub my_function, tag!("abcd")); +/// ``` +/// Prefix them with 'pub(crate)' to make the functions public within the crate: +/// ``` +/// # use nom::{named, tag}; +/// named!(pub(crate) my_function, tag!("abcd")); +/// ``` +#[macro_export(local_inner_macros)] +macro_rules! named ( + (#$($args:tt)*) => ( + named_attr!(#$($args)*); + ); + ($vis:vis $name:ident( $i:ty ) -> $o:ty, $submac:ident!( $($args:tt)* )) => ( + $vis fn $name( i: $i ) -> $crate::IResult<$i, $o, ($i, $crate::error::ErrorKind)> { + $submac!(i, $($args)*) + } + ); + ($vis:vis $name:ident<$i:ty,$o:ty,$e:ty>, $submac:ident!( $($args:tt)* )) => ( + $vis fn $name( i: $i ) -> $crate::IResult<$i, $o, $e> { + $submac!(i, $($args)*) + } + ); + ($vis:vis $name:ident<$i:ty,$o:ty>, $submac:ident!( $($args:tt)* )) => ( + $vis fn $name( i: $i ) -> $crate::IResult<$i, $o, ($i, $crate::error::ErrorKind)> { + $submac!(i, $($args)*) + } + ); + ($vis:vis $name:ident<$o:ty>, $submac:ident!( $($args:tt)* )) => ( + $vis fn $name( i: &[u8] ) -> $crate::IResult<&[u8], $o, (&[u8], $crate::error::ErrorKind)> { + $submac!(i, $($args)*) + } + ); + ($vis:vis $name:ident, $submac:ident!( $($args:tt)* )) => ( + $vis fn $name( i: &[u8] ) -> $crate::IResult<&[u8], &[u8], (&[u8], $crate::error::ErrorKind)> { + $submac!(i, $($args)*) + } + ); +); + +/// Makes a function from a parser combination with arguments. +/// +/// ```ignore +/// //takes [`&[u8]`] as input +/// named_args!(tagged(open_tag: &[u8], close_tag: &[u8])<&str>, +/// delimited!(tag!(open_tag), map_res!(take!(4), str::from_utf8), tag!(close_tag)) +/// ); + +/// //takes `&str` as input +/// named_args!(tagged(open_tag: &str, close_tag: &str)<&str, &str>, +/// delimited!(tag!(open_tag), take!(4), tag!(close_tag)) +/// ); +/// ``` +/// +/// Note: if using arguments that way gets hard to read, it is always +/// possible to write the equivalent parser definition manually, like +/// this: +/// +/// ```ignore +/// fn tagged(input: &[u8], open_tag: &[u8], close_tag: &[u8]) -> IResult<&[u8], &str> { +/// // the first combinator in the tree gets the input as argument. It is then +/// // passed from one combinator to the next through macro rewriting +/// delimited!(input, +/// tag!(open_tag), take!(4), tag!(close_tag) +/// ) +/// ); +/// ``` +/// +#[macro_export(local_inner_macros)] +macro_rules! named_args { + ($vis:vis $func_name:ident ( $( $arg:ident : $typ:ty ),* ) < $return_type:ty > , $submac:ident!( $($args:tt)* ) ) => { + $vis fn $func_name(input: &[u8], $( $arg : $typ ),*) -> $crate::IResult<&[u8], $return_type> { + $submac!(input, $($args)*) + } + }; + + ($vis:vis $func_name:ident < 'a > ( $( $arg:ident : $typ:ty ),* ) < $return_type:ty > , $submac:ident!( $($args:tt)* ) ) => { + $vis fn $func_name<'this_is_probably_unique_i_hope_please, 'a>( + input: &'this_is_probably_unique_i_hope_please [u8], $( $arg : $typ ),*) -> + $crate::IResult<&'this_is_probably_unique_i_hope_please [u8], $return_type> + { + $submac!(input, $($args)*) + } + }; + + ($vis:vis $func_name:ident ( $( $arg:ident : $typ:ty ),* ) < $input_type:ty, $return_type:ty > , $submac:ident!( $($args:tt)* ) ) => { + $vis fn $func_name(input: $input_type, $( $arg : $typ ),*) -> $crate::IResult<$input_type, $return_type> { + $submac!(input, $($args)*) + } + }; + + ($vis:vis $func_name:ident < 'a > ( $( $arg:ident : $typ:ty ),* ) < $input_type:ty, $return_type:ty > , $submac:ident!( $($args:tt)* ) ) => { + $vis fn $func_name<'a>( + input: $input_type, $( $arg : $typ ),*) + -> $crate::IResult<$input_type, $return_type> + { + $submac!(input, $($args)*) + } + }; +} + +/// Makes a function from a parser combination, with attributes +/// +/// The usage of this macro is almost identical to `named!`, except that +/// you also pass attributes to be attached to the generated function. +/// This is ideal for adding documentation to your parser. +/// +/// Create my_function as if you wrote it with the doc comment /// My Func: +/// ``` +/// # use nom::{named_attr, tag}; +/// named_attr!(#[doc = "My Func"], my_function( &[u8] ) -> &[u8], tag!("abcd")); +/// ``` +/// Also works for pub functions, and multiple lines: +/// ``` +/// # use nom::{named_attr, tag}; +/// named_attr!(#[doc = "My Func\nRecognise abcd"], pub my_function, tag!("abcd")); +/// ``` +/// Multiple attributes can be passed if required: +/// ``` +/// # use nom::{named_attr, tag}; +/// named_attr!(#[doc = "My Func"] #[inline(always)], pub my_function, tag!("abcd")); +/// ``` +#[macro_export(local_inner_macros)] +macro_rules! named_attr ( + ($(#[$attr:meta])*, $vis:vis $name:ident( $i:ty ) -> $o:ty, $submac:ident!( $($args:tt)* )) => ( + $(#[$attr])* + $vis fn $name( i: $i ) -> $crate::IResult<$i,$o, ($i, $crate::error::ErrorKind)> { + $submac!(i, $($args)*) + } + ); + ($(#[$attr:meta])*, $vis:vis $name:ident<$i:ty,$o:ty,$e:ty>, $submac:ident!( $($args:tt)* )) => ( + $(#[$attr])* + $vis fn $name( i: $i ) -> $crate::IResult<$i, $o, $e> { + $submac!(i, $($args)*) + } + ); + ($(#[$attr:meta])*, $vis:vis $name:ident<$i:ty,$o:ty>, $submac:ident!( $($args:tt)* )) => ( + $(#[$attr])* + $vis fn $name( i: $i ) -> $crate::IResult<$i, $o, ($i, $crate::error::ErrorKind)> { + $submac!(i, $($args)*) + } + ); + ($(#[$attr:meta])*, $vis:vis $name:ident<$o:ty>, $submac:ident!( $($args:tt)* )) => ( + $(#[$attr])* + $vis fn $name( i: &[u8] ) -> $crate::IResult<&[u8], $o, (&[u8], $crate::error::ErrorKind)> { + $submac!(i, $($args)*) + } + ); + ($(#[$attr:meta])*, $vis:vis $name:ident, $submac:ident!( $($args:tt)* )) => ( + $(#[$attr])* + $vis fn $name<'a>( i: &'a [u8] ) -> $crate::IResult<&[u8], &[u8], (&[u8], $crate::error::ErrorKind)> { + $submac!(i, $($args)*) + } + ); +); + +/// Used to wrap common expressions and function as macros +/// +/// ``` +/// # #[macro_use] extern crate nom; +/// # use nom::IResult; +/// # fn main() { +/// fn take_wrapper(input: &[u8], i: u8) -> IResult<&[u8], &[u8]> { take!(input, i * 10) } +/// +/// // will make a parser taking 20 bytes +/// named!(parser, call!(take_wrapper, 2)); +/// # } +/// ``` +#[macro_export(local_inner_macros)] +macro_rules! call ( + ($i:expr, $fun:expr) => ( $fun( $i ) ); + ($i:expr, $fun:expr, $($args:expr),* ) => ( $fun( $i, $($args),* ) ); +); + +//FIXME: error rewrite +/// Prevents backtracking if the child parser fails +/// +/// This parser will do an early return instead of sending +/// its result to the parent parser. +/// +/// If another `return_error!` combinator is present in the parent +/// chain, the error will be wrapped and another early +/// return will be made. +/// +/// This makes it easy to build report on which parser failed, +/// where it failed in the input, and the chain of parsers +/// that led it there. +/// +/// Additionally, the error chain contains number identifiers +/// that can be matched to provide useful error messages. +/// +/// ``` +/// # #[macro_use] extern crate nom; +/// # use nom::Err; +/// # use nom::error::ErrorKind; +/// # fn main() { +/// named!(err_test<&[u8], &[u8]>, alt!( +/// tag!("abcd") | +/// preceded!(tag!("efgh"), return_error!(ErrorKind::Eof, +/// do_parse!( +/// tag!("ijkl") >> +/// res: return_error!(ErrorKind::Tag, tag!("mnop")) >> +/// (res) +/// ) +/// ) +/// ) +/// )); +/// let a = &b"efghblah"[..]; +/// let b = &b"efghijklblah"[..]; +/// let c = &b"efghijklmnop"[..]; +/// +/// let blah = &b"blah"[..]; +/// +/// let res_a = err_test(a); +/// let res_b = err_test(b); +/// let res_c = err_test(c); +/// assert_eq!(res_a, Err(Err::Failure(error_node_position!(blah, ErrorKind::Eof, error_position!(blah, ErrorKind::Tag))))); +/// assert_eq!(res_b, Err(Err::Failure(error_node_position!(&b"ijklblah"[..], ErrorKind::Eof, +/// error_node_position!(blah, ErrorKind::Tag, error_position!(blah, ErrorKind::Tag)))) +/// )); +/// # } +/// ``` +/// +#[macro_export(local_inner_macros)] +macro_rules! return_error ( + ($i:expr, $code:expr, $submac:ident!( $($args:tt)* )) => ( + { + use $crate::lib::std::result::Result::*; + use $crate::Err; + + let i_ = $i.clone(); + let cl = || { + $submac!(i_, $($args)*) + }; + + match cl() { + Err(Err::Incomplete(x)) => Err(Err::Incomplete(x)), + Ok((i, o)) => Ok((i, o)), + Err(Err::Error(e)) | Err(Err::Failure(e)) => { + return Err(Err::Failure($crate::error::append_error($i, $code, e))) + } + } + } + ); + ($i:expr, $code:expr, $f:expr) => ( + return_error!($i, $code, call!($f)); + ); + ($i:expr, $submac:ident!( $($args:tt)* )) => ( + { + use $crate::lib::std::result::Result::*; + use $crate::Err; + + let i_ = $i.clone(); + let cl = || { + $submac!(i_, $($args)*) + }; + + match cl() { + Err(Err::Incomplete(x)) => Err(Err::Incomplete(x)), + Ok((i, o)) => Ok((i, o)), + Err(Err::Error(e)) | Err(Err::Failure(e)) => { + return Err(Err::Failure(e)) + } + } + } + ); + ($i:expr, $f:expr) => ( + return_error!($i, call!($f)); + ); +); + +//FIXME: error rewrite +/// Add an error if the child parser fails +/// +/// While `return_error!` does an early return and avoids backtracking, +/// add_return_error! backtracks normally. It just provides more context +/// for an error +/// +/// ``` +/// # #[macro_use] extern crate nom; +/// # use std::collections; +/// # use nom::Err; +/// # use nom::error::ErrorKind; +/// # fn main() { +/// named!(err_test, add_return_error!(ErrorKind::Tag, tag!("abcd"))); +/// +/// let a = &b"efghblah"[..]; +/// let res_a = err_test(a); +/// assert_eq!(res_a, Err(Err::Error(error_node_position!(a, ErrorKind::Tag, error_position!(a, ErrorKind::Tag))))); +/// # } +/// ``` +/// +#[macro_export(local_inner_macros)] +macro_rules! add_return_error ( + ($i:expr, $code:expr, $submac:ident!( $($args:tt)* )) => ( + { + use $crate::lib::std::result::Result::*; + use $crate::{Err,error::ErrorKind}; + + match $submac!($i, $($args)*) { + Ok((i, o)) => Ok((i, o)), + Err(Err::Error(e)) => { + Err(Err::Error(error_node_position!($i, $code, e))) + }, + Err(Err::Failure(e)) => { + Err(Err::Failure(error_node_position!($i, $code, e))) + }, + Err(e) => Err(e), + } + } + ); + ($i:expr, $code:expr, $f:expr) => ( + add_return_error!($i, $code, call!($f)); + ); +); + +/// replaces a `Incomplete` returned by the child parser +/// with an `Error` +/// +/// ``` +/// # #[macro_use] extern crate nom; +/// # use std::collections; +/// # use nom::Err; +/// # use nom::error::ErrorKind; +/// # fn main() { +/// named!(take_5, complete!(take!(5))); +/// +/// let a = &b"abcd"[..]; +/// let res_a = take_5(a); +/// assert_eq!(res_a, Err(Err::Error(error_position!(a, ErrorKind::Complete)))); +/// # } +/// ``` +/// +#[macro_export(local_inner_macros)] +macro_rules! complete ( + ($i:expr, $submac:ident!( $($args:tt)* )) => ( + $crate::combinator::completec($i, move |i| { $submac!(i, $($args)*) }) + ); + ($i:expr, $f:expr) => ( + complete!($i, call!($f)); + ); +); + +/// A bit like `std::try!`, this macro will return the remaining input and +/// parsed value if the child parser returned `Ok`, and will do an early +/// return for the `Err` side. +/// +/// this can provide more flexibility than `do_parse!` if needed +/// +/// ``` +/// # #[macro_use] extern crate nom; +/// # use nom::Err; +/// # use nom::error::ErrorKind; +/// # use nom::IResult; +/// +/// fn take_add(input:&[u8], size: u8) -> IResult<&[u8], &[u8]> { +/// let (i1, length) = try_parse!(input, map_opt!(nom::number::streaming::be_u8, |sz| size.checked_add(sz))); +/// let (i2, data) = try_parse!(i1, take!(length)); +/// return Ok((i2, data)); +/// } +/// # fn main() { +/// let arr1 = [1, 2, 3, 4, 5]; +/// let r1 = take_add(&arr1[..], 1); +/// assert_eq!(r1, Ok((&[4,5][..], &[2,3][..]))); +/// +/// let arr2 = [0xFE, 2, 3, 4, 5]; +/// // size is overflowing +/// let r1 = take_add(&arr2[..], 42); +/// assert_eq!(r1, Err(Err::Error(error_position!(&[254, 2,3,4,5][..], ErrorKind::MapOpt)))); +/// # } +/// ``` +#[macro_export(local_inner_macros)] +macro_rules! try_parse ( + ($i:expr, $submac:ident!( $($args:tt)* )) => ({ + use $crate::lib::std::result::Result::*; + + match $submac!($i, $($args)*) { + Ok((i,o)) => (i,o), + Err(e) => return Err(e), + } + }); + ($i:expr, $f:expr) => ( + try_parse!($i, call!($f)) + ); +); + +/// `map!(I -> IResult, O -> P) => I -> IResult` +/// +/// maps a function on the result of a parser +/// +/// ```rust +/// # #[macro_use] extern crate nom; +/// # use nom::{Err,error::ErrorKind, IResult}; +/// use nom::character::complete::digit1; +/// # fn main() { +/// +/// named!(parse<&str, usize>, map!(digit1, |s| s.len())); +/// +/// // the parser will count how many characters were returned by digit1 +/// assert_eq!(parse("123456"), Ok(("", 6))); +/// +/// // this will fail if digit1 fails +/// assert_eq!(parse("abc"), Err(Err::Error(error_position!("abc", ErrorKind::Digit)))); +/// # } +/// ``` +#[macro_export(local_inner_macros)] +macro_rules! map( + // Internal parser, do not use directly + (__impl $i:expr, $submac:ident!( $($args:tt)* ), $g:expr) => ( + $crate::combinator::mapc($i, move |i| {$submac!(i, $($args)*)}, $g) + ); + ($i:expr, $submac:ident!( $($args:tt)* ), $g:expr) => ( + map!(__impl $i, $submac!($($args)*), $g); + ); + ($i:expr, $f:expr, $g:expr) => ( + map!(__impl $i, call!($f), $g); + ); +); + +/// `map_res!(I -> IResult, O -> Result

) => I -> IResult` +/// maps a function returning a Result on the output of a parser +/// +/// ```rust +/// # #[macro_use] extern crate nom; +/// # use nom::{Err,error::ErrorKind, IResult}; +/// use nom::character::complete::digit1; +/// # fn main() { +/// +/// named!(parse<&str, u8>, map_res!(digit1, |s: &str| s.parse::())); +/// +/// // the parser will convert the result of digit1 to a number +/// assert_eq!(parse("123"), Ok(("", 123))); +/// +/// // this will fail if digit1 fails +/// assert_eq!(parse("abc"), Err(Err::Error(error_position!("abc", ErrorKind::Digit)))); +/// +/// // this will fail if the mapped function fails (a `u8` is too small to hold `123456`) +/// assert_eq!(parse("123456"), Err(Err::Error(error_position!("123456", ErrorKind::MapRes)))); +/// # } +/// ``` +#[macro_export(local_inner_macros)] +macro_rules! map_res ( + // Internal parser, do not use directly + (__impl $i:expr, $submac:ident!( $($args:tt)* ), $submac2:ident!( $($args2:tt)* )) => ( + $crate::combinator::map_resc($i, move |i| {$submac!(i, $($args)*)}, move |i| {$submac2!(i, $($args2)*)}) + ); + ($i:expr, $submac:ident!( $($args:tt)* ), $g:expr) => ( + map_res!(__impl $i, $submac!($($args)*), call!($g)); + ); + ($i:expr, $submac:ident!( $($args:tt)* ), $submac2:ident!( $($args2:tt)* )) => ( + map_res!(__impl $i, $submac!($($args)*), $submac2!($($args2)*)); + ); + ($i:expr, $f:expr, $g:expr) => ( + map_res!(__impl $i, call!($f), call!($g)); + ); + ($i:expr, $f:expr, $submac:ident!( $($args:tt)* )) => ( + map_res!(__impl $i, call!($f), $submac!($($args)*)); + ); +); + +/// `map_opt!(I -> IResult, O -> Option

) => I -> IResult` +/// maps a function returning an Option on the output of a parser +/// +/// ```rust +/// # #[macro_use] extern crate nom; +/// # use nom::{Err,error::ErrorKind, IResult}; +/// use nom::character::complete::digit1; +/// # fn main() { +/// +/// named!(parser<&str, u8>, map_opt!(digit1, |s: &str| s.parse::().ok())); +/// +/// // the parser will convert the result of digit1 to a number +/// assert_eq!(parser("123"), Ok(("", 123))); +/// +/// // this will fail if digit1 fails +/// assert_eq!(parser("abc"), Err(Err::Error(("abc", ErrorKind::Digit)))); +/// +/// // this will fail if the mapped function fails (a `u8` is too small to hold `123456`) +/// assert_eq!(parser("123456"), Err(Err::Error(("123456", ErrorKind::MapOpt)))); +/// # } +/// ``` +#[macro_export(local_inner_macros)] +macro_rules! map_opt ( + // Internal parser, do not use directly + (__impl $i:expr, $submac:ident!( $($args:tt)* ), $submac2:ident!( $($args2:tt)* )) => ( + $crate::combinator::map_optc($i, move |i| {$submac!(i, $($args)*)}, move |i| {$submac2!(i, $($args2)*)}) + ); + ($i:expr, $submac:ident!( $($args:tt)* ), $g:expr) => ( + map_opt!(__impl $i, $submac!($($args)*), call!($g)); + ); + ($i:expr, $submac:ident!( $($args:tt)* ), $submac2:ident!( $($args2:tt)* )) => ( + map_opt!(__impl $i, $submac!($($args)*), $submac2!($($args2)*)); + ); + ($i:expr, $f:expr, $g:expr) => ( + map_opt!(__impl $i, call!($f), call!($g)); + ); + ($i:expr, $f:expr, $submac:ident!( $($args:tt)* )) => ( + map_opt!(__impl $i, call!($f), $submac!($($args)*)); + ); +); + +/// `parse_to!(O) => I -> IResult` +/// uses the `parse` method from `std::str::FromStr` to convert the current +/// input to the specified type +/// +/// this will completely consume the input +/// +/// ```rust +/// # #[macro_use] extern crate nom; +/// # use nom::{Err,error::ErrorKind, IResult}; +/// use nom::character::complete::digit1; +/// # fn main() { +/// +/// named!(parser<&str, u8>, parse_to!(u8)); +/// +/// assert_eq!(parser("123"), Ok(("", 123))); +/// +/// assert_eq!(parser("abc"), Err(Err::Error(("abc", ErrorKind::ParseTo)))); +/// +/// // this will fail if the mapped function fails (a `u8` is too small to hold `123456`) +/// assert_eq!(parser("123456"), Err(Err::Error(("123456", ErrorKind::ParseTo)))); +/// # } +/// ``` +#[macro_export(local_inner_macros)] +macro_rules! parse_to ( + ($i:expr, $t:ty ) => ( + { + use $crate::lib::std::result::Result::*; + use $crate::lib::std::option::Option; + use $crate::lib::std::option::Option::*; + use $crate::{Err,error::ErrorKind}; + + use $crate::ParseTo; + use $crate::Slice; + use $crate::InputLength; + + let res: Option<$t> = ($i).parse_to(); + match res { + Some(output) => Ok(($i.slice($i.input_len()..), output)), + None => Err(Err::Error($crate::error::make_error($i, ErrorKind::ParseTo))) + } + } + ); +); + +/// `verify!(I -> IResult, O -> bool) => I -> IResult` +/// returns the result of the child parser if it satisfies a verification function +/// +/// ``` +/// # #[macro_use] extern crate nom; +/// # fn main() { +/// named!(check, verify!(nom::number::streaming::be_u32, |val: &u32| *val < 3)); +/// # } +/// ``` +#[macro_export(local_inner_macros)] +macro_rules! verify ( + ($i:expr, $submac:ident!( $($args:tt)* ), $g:expr) => ( + $crate::combinator::verifyc($i, |i| $submac!(i, $($args)*), $g) + ); + ($i:expr, $submac:ident!( $($args:tt)* ), $submac2:ident!( $($args2:tt)* )) => ( + $crate::combinator::verifyc($i, |i| $submac!(i, $($args)*), |&o| $submac2!(o, $($args2)*)) + ); + ($i:expr, $f:expr, $g:expr) => ( + $crate::combinator::verify($f, $g)($i) + ); + ($i:expr, $f:expr, $submac:ident!( $($args:tt)* )) => ( + $crate::combinator::verify($f, |&o| $submac!(o, $($args)*))($i) + ); +); + +/// `value!(T, R -> IResult ) => R -> IResult` +/// +/// or `value!(T) => R -> IResult` +/// +/// If the child parser was successful, return the value. +/// If no child parser is provided, always return the value +/// +/// ``` +/// # #[macro_use] extern crate nom; +/// # fn main() { +/// named!(x, value!(42, delimited!(tag!("")))); +/// named!(y, delimited!(tag!(""))); +/// let r = x(&b" aaa"[..]); +/// assert_eq!(r, Ok((&b" aaa"[..], 42))); +/// +/// let r2 = y(&b" aaa"[..]); +/// assert_eq!(r2, Ok((&b" aaa"[..], 42))); +/// # } +/// ``` +#[macro_export(local_inner_macros)] +macro_rules! value ( + ($i:expr, $res:expr, $submac:ident!( $($args:tt)* )) => ( + $crate::combinator::valuec($i, $res, |i| $submac!(i, $($args)*)) + ); + ($i:expr, $res:expr, $f:expr) => ( + $crate::combinator::valuec($i, $res, $f) + ); + ($i:expr, $res:expr) => ( + Ok(($i, $res)) + ); +); + +/// `opt!(I -> IResult) => I -> IResult>` +/// make the underlying parser optional +/// +/// returns an Option of the returned type. This parser returns `Some(result)` if the child parser +/// succeeds,`None` if it fails, and `Incomplete` if it did not have enough data to decide +/// +/// *Warning*: if you are using `opt` for some kind of optional ending token (like an end of line), +/// you should combine it with `complete` to make sure it works. +/// +/// As an example, `opt!(tag!("\r\n"))` will return `Incomplete` if it receives an empty input, +/// because `tag` does not have enough input to decide. +/// On the contrary, `opt!(complete!(tag!("\r\n")))` would return `None` as produced value, +/// since `complete!` transforms an `Incomplete` in an `Error`. +/// +/// ``` +/// # #[macro_use] extern crate nom; +/// # fn main() { +/// named!( o<&[u8], Option<&[u8]> >, opt!( tag!( "abcd" ) ) ); +/// +/// let a = b"abcdef"; +/// let b = b"bcdefg"; +/// assert_eq!(o(&a[..]), Ok((&b"ef"[..], Some(&b"abcd"[..])))); +/// assert_eq!(o(&b[..]), Ok((&b"bcdefg"[..], None))); +/// # } +/// ``` +#[macro_export(local_inner_macros)] +macro_rules! opt( + ($i:expr, $submac:ident!( $($args:tt)* )) => ( + { + $crate::combinator::optc($i, |i| $submac!(i, $($args)*)) + } + ); + ($i:expr, $f:expr) => ( + $crate::combinator::opt($f)($i) + ); +); + +/// `opt_res!(I -> IResult) => I -> IResult>` +/// make the underlying parser optional +/// +/// returns a Result, with Err containing the parsing error +/// +/// ```ignore +/// # #[macro_use] extern crate nom; +/// # use nom::ErrorKind; +/// # fn main() { +/// named!( o<&[u8], Result<&[u8], nom::Err<&[u8]> > >, opt_res!( tag!( "abcd" ) ) ); +/// +/// let a = b"abcdef"; +/// let b = b"bcdefg"; +/// assert_eq!(o(&a[..]), Ok((&b"ef"[..], Ok(&b"abcd"[..]))); +/// assert_eq!(o(&b[..]), Ok((&b"bcdefg"[..], Err(error_position!(&b[..], ErrorKind::Tag)))); +/// # } +/// ``` +#[macro_export(local_inner_macros)] +macro_rules! opt_res ( + ($i:expr, $submac:ident!( $($args:tt)* )) => ( + { + use $crate::lib::std::result::Result::*; + use $crate::Err; + + let i_ = $i.clone(); + match $submac!(i_, $($args)*) { + Ok((i,o)) => Ok((i, Ok(o))), + Err(Err::Error(e)) => Ok(($i, Err(Err::Error(e)))), + // in case of failure, we return a real error + Err(e) => Err(e) + } + } + ); + ($i:expr, $f:expr) => ( + opt_res!($i, call!($f)); + ); +); + +/// `cond!(bool, I -> IResult) => I -> IResult>` +/// Conditional combinator +/// +/// Wraps another parser and calls it if the +/// condition is met. This combinator returns +/// an Option of the return type of the child +/// parser. +/// +/// This is especially useful if a parser depends +/// on the value returned by a preceding parser in +/// a `do_parse!`. +/// +/// ``` +/// # #[macro_use] extern crate nom; +/// # use nom::IResult; +/// # fn main() { +/// fn f_true(i: &[u8]) -> IResult<&[u8], Option<&[u8]>> { +/// cond!(i, true, tag!("abcd")) +/// } +/// +/// fn f_false(i: &[u8]) -> IResult<&[u8], Option<&[u8]>> { +/// cond!(i, false, tag!("abcd")) +/// } +/// +/// let a = b"abcdef"; +/// assert_eq!(f_true(&a[..]), Ok((&b"ef"[..], Some(&b"abcd"[..])))); +/// +/// assert_eq!(f_false(&a[..]), Ok((&b"abcdef"[..], None))); +/// # } +/// ``` +/// +#[macro_export(local_inner_macros)] +macro_rules! cond( + ($i:expr, $cond:expr, $submac:ident!( $($args:tt)* )) => ( + $crate::combinator::condc($i, $cond, |i| $submac!(i, $($args)*) ) + ); + ($i:expr, $cond:expr, $f:expr) => ( + $crate::combinator::cond($cond, $f)($i) + ); +); + +/// `peek!(I -> IResult) => I -> IResult` +/// returns a result without consuming the input +/// +/// the embedded parser may return Err(Err::Incomplete +/// +/// ``` +/// # #[macro_use] extern crate nom; +/// # fn main() { +/// named!(ptag, peek!( tag!( "abcd" ) ) ); +/// +/// let r = ptag(&b"abcdefgh"[..]); +/// assert_eq!(r, Ok((&b"abcdefgh"[..], &b"abcd"[..]))); +/// # } +/// ``` +#[macro_export(local_inner_macros)] +macro_rules! peek( + ($i:expr, $submac:ident!( $($args:tt)* )) => ( + $crate::combinator::peekc($i, |i| $submac!(i, $($args)*)) + ); + ($i:expr, $f:expr) => ( + $crate::combinator::peek($f)($i) + ); +); + +/// `not!(I -> IResult) => I -> IResult` +/// returns a result only if the embedded parser returns Error or Err(Err::Incomplete) +/// does not consume the input +/// +/// ``` +/// # #[macro_use] extern crate nom; +/// # use nom::Err; +/// # use nom::error::ErrorKind; +/// # fn main() { +/// named!(not_e, do_parse!( +/// res: tag!("abc") >> +/// not!(char!('e')) >> +/// (res) +/// )); +/// +/// let r = not_e(&b"abcd"[..]); +/// assert_eq!(r, Ok((&b"d"[..], &b"abc"[..]))); +/// +/// let r2 = not_e(&b"abce"[..]); +/// assert_eq!(r2, Err(Err::Error(error_position!(&b"e"[..], ErrorKind::Not)))); +/// # } +/// ``` +#[macro_export(local_inner_macros)] +macro_rules! not( + ($i:expr, $submac:ident!( $($args:tt)* )) => ( + $crate::combinator::notc($i, |i| $submac!(i, $($args)*)) + ); + ($i:expr, $f:expr) => ( + $crate::combinator::not($f)($i) + ); +); + +/// `tap!(name: I -> IResult => { block }) => I -> IResult` +/// allows access to the parser's result without affecting it +/// +/// ``` +/// # #[macro_use] extern crate nom; +/// # use std::str; +/// # fn main() { +/// named!(ptag, tap!(res: tag!( "abcd" ) => { println!("recognized {}", str::from_utf8(res).unwrap()) } ) ); +/// +/// let r = ptag(&b"abcdefgh"[..]); +/// assert_eq!(r, Ok((&b"efgh"[..], &b"abcd"[..]))); +/// # } +/// ``` +#[macro_export(local_inner_macros)] +macro_rules! tap ( + ($i:expr, $name:ident : $submac:ident!( $($args:tt)* ) => $e:expr) => ( + { + use $crate::lib::std::result::Result::*; + use $crate::{Err,Needed,IResult}; + + match $submac!($i, $($args)*) { + Ok((i,o)) => { + let $name = o; + $e; + Ok((i, $name)) + }, + Err(e) => Err(Err::convert(e)), + } + } + ); + ($i:expr, $name: ident: $f:expr => $e:expr) => ( + tap!($i, $name: call!($f) => $e); + ); +); + +/// `eof!()` returns its input if it is at the end of input data +/// +/// When we're at the end of the data, this combinator +/// will succeed +/// +/// +/// ``` +/// # #[macro_use] extern crate nom; +/// # use std::str; +/// # use nom::{Err, error::ErrorKind}; +/// # fn main() { +/// named!(parser, eof!()); +/// +/// assert_eq!(parser(&b"abc"[..]), Err(Err::Error((&b"abc"[..], ErrorKind::Eof)))); +/// assert_eq!(parser(&b""[..]), Ok((&b""[..], &b""[..]))); +/// # } +/// ``` +#[macro_export(local_inner_macros)] +macro_rules! eof ( + ($i:expr,) => ( + { + use $crate::lib::std::result::Result::*; + use $crate::{Err,error::ErrorKind}; + + use $crate::InputLength; + if ($i).input_len() == 0 { + Ok(($i, $i)) + } else { + Err(Err::Error(error_position!($i, ErrorKind::Eof))) + } + } + ); +); + +/// `exact!()` will fail if the child parser does not consume the whole data +/// +/// TODO: example +#[macro_export(local_inner_macros)] +macro_rules! exact ( + ($i:expr, $submac:ident!( $($args:tt)* )) => ({ + terminated!($i, $submac!( $($args)*), eof!()) + }); + ($i:expr, $f:expr) => ( + exact!($i, call!($f)); + ); +); + +/// `recognize!(I -> IResult ) => I -> IResult` +/// if the child parser was successful, return the consumed input as produced value +/// +/// ``` +/// # #[macro_use] extern crate nom; +/// # fn main() { +/// named!(x, recognize!(delimited!(tag!("")))); +/// let r = x(&b" aaa"[..]); +/// assert_eq!(r, Ok((&b" aaa"[..], &b""[..]))); +/// # } +/// ``` +#[macro_export(local_inner_macros)] +macro_rules! recognize ( + ($i:expr, $submac:ident!( $($args:tt)* )) => ( + $crate::combinator::recognizec($i, |i| $submac!(i, $($args)*)) + ); + ($i:expr, $f:expr) => ( + $crate::combinator::recognize($f)($i) + ); +); + +#[cfg(test)] +mod tests { + use crate::internal::{Err, IResult, Needed}; + use crate::error::ParseError; + use crate::error::ErrorKind; + #[cfg(feature = "alloc")] + use crate::lib::std::boxed::Box; + + // reproduce the tag and take macros, because of module import order + macro_rules! tag ( + ($i:expr, $tag: expr) => ({ + use $crate::lib::std::result::Result::*; + use $crate::{Err,Needed,IResult,error::ErrorKind}; + use $crate::{Compare,CompareResult,InputLength,Slice}; + + let res: IResult<_,_> = match ($i).compare($tag) { + CompareResult::Ok => { + let blen = $tag.input_len(); + Ok(($i.slice(blen..), $i.slice(..blen))) + }, + CompareResult::Incomplete => { + Err(Err::Incomplete(Needed::Size($tag.input_len()))) + }, + CompareResult::Error => { + let e:ErrorKind = ErrorKind::Tag; + Err(Err::Error($crate::error::make_error($i, e))) + } + }; + res + }); + ); + + macro_rules! take( + ($i:expr, $count:expr) => ( + { + let cnt = $count as usize; + let res:IResult<&[u8],&[u8]> = if $i.len() < cnt { + Err($crate::Err::Incomplete($crate::Needed::Size(cnt))) + } else { + Ok((&$i[cnt..],&$i[0..cnt])) + }; + res + } + ); + ); + + mod pub_named_mod { + named!(pub tst, tag!("abcd")); + } + + #[test] + fn pub_named_test() { + let a = &b"abcd"[..]; + let res = pub_named_mod::tst(a); + assert_eq!(res, Ok((&b""[..], a))); + } + + mod pub_crate_named_mod { + named!(pub(crate) tst, tag!("abcd")); + } + + #[test] + fn pub_crate_named_test() { + let a = &b"abcd"[..]; + let res = pub_crate_named_mod::tst(a); + assert_eq!(res, Ok((&b""[..], a))); + } + + #[test] + fn apply_test() { + fn sum2(a: u8, b: u8) -> u8 { + a + b + } + fn sum3(a: u8, b: u8, c: u8) -> u8 { + a + b + c + } + let a = call!(1, sum2, 2); + let b = call!(1, sum3, 2, 3); + + assert_eq!(a, 3); + assert_eq!(b, 6); + } + + #[test] + fn opt() { + named!(opt_abcd<&[u8],Option<&[u8]> >, opt!(tag!("abcd"))); + + let a = &b"abcdef"[..]; + let b = &b"bcdefg"[..]; + let c = &b"ab"[..]; + assert_eq!(opt_abcd(a), Ok((&b"ef"[..], Some(&b"abcd"[..])))); + assert_eq!(opt_abcd(b), Ok((&b"bcdefg"[..], None))); + assert_eq!(opt_abcd(c), Err(Err::Incomplete(Needed::Size(4)))); + } + + #[test] + fn opt_res() { + named!(opt_res_abcd<&[u8], Result<&[u8], Err<(&[u8], ErrorKind)>> >, opt_res!(tag!("abcd"))); + + let a = &b"abcdef"[..]; + let b = &b"bcdefg"[..]; + let c = &b"ab"[..]; + assert_eq!(opt_res_abcd(a), Ok((&b"ef"[..], Ok(&b"abcd"[..])))); + assert_eq!( + opt_res_abcd(b), + Ok(( + &b"bcdefg"[..], + Err(Err::Error(error_position!(b, ErrorKind::Tag))) + )) + ); + assert_eq!(opt_res_abcd(c), Err(Err::Incomplete(Needed::Size(4)))); + } + + use crate::lib::std::convert::From; + #[derive(Debug, PartialEq)] + pub struct CustomError(&'static str); + impl From<(I, ErrorKind)> for CustomError { + fn from(_: (I, ErrorKind)) -> Self { + CustomError("test") + } + } + + impl ParseError for CustomError { + fn from_error_kind(_: I, _: ErrorKind) -> Self { + CustomError("from_error_kind") + } + + fn append(_: I, _: ErrorKind, _: CustomError) -> Self { + CustomError("append") + } + } + + + #[test] + #[cfg(feature = "alloc")] + fn cond() { + fn f_true(i: &[u8]) -> IResult<&[u8], Option<&[u8]>, CustomError> { + fix_error!(i, CustomError, cond!(true, tag!("abcd"))) + } + + fn f_false(i: &[u8]) -> IResult<&[u8], Option<&[u8]>, CustomError> { + fix_error!(i, CustomError, cond!(false, tag!("abcd"))) + } + + assert_eq!(f_true(&b"abcdef"[..]), Ok((&b"ef"[..], Some(&b"abcd"[..])))); + assert_eq!(f_true(&b"ab"[..]), Err(Err::Incomplete(Needed::Size(4)))); + assert_eq!(f_true(&b"xxx"[..]), Err(Err::Error(CustomError("test")))); + + assert_eq!(f_false(&b"abcdef"[..]), Ok((&b"abcdef"[..], None))); + assert_eq!(f_false(&b"ab"[..]), Ok((&b"ab"[..], None))); + assert_eq!(f_false(&b"xxx"[..]), Ok((&b"xxx"[..], None))); + } + + #[test] + #[cfg(feature = "alloc")] + fn cond_wrapping() { + // Test that cond!() will wrap a given identifier in the call!() macro. + named!(tag_abcd, tag!("abcd")); + fn f_true(i: &[u8]) -> IResult<&[u8], Option<&[u8]>, CustomError> { + fix_error!(i, CustomError, cond!(true, tag_abcd)) + } + + fn f_false(i: &[u8]) -> IResult<&[u8], Option<&[u8]>, CustomError> { + fix_error!(i, CustomError, cond!(false, tag_abcd)) + } + + assert_eq!(f_true(&b"abcdef"[..]), Ok((&b"ef"[..], Some(&b"abcd"[..])))); + assert_eq!(f_true(&b"ab"[..]), Err(Err::Incomplete(Needed::Size(4)))); + assert_eq!(f_true(&b"xxx"[..]), Err(Err::Error(CustomError("test")))); + + assert_eq!(f_false(&b"abcdef"[..]), Ok((&b"abcdef"[..], None))); + assert_eq!(f_false(&b"ab"[..]), Ok((&b"ab"[..], None))); + assert_eq!(f_false(&b"xxx"[..]), Ok((&b"xxx"[..], None))); + } + + #[test] + fn peek() { + named!(peek_tag<&[u8],&[u8]>, peek!(tag!("abcd"))); + + assert_eq!(peek_tag(&b"abcdef"[..]), Ok((&b"abcdef"[..], &b"abcd"[..]))); + assert_eq!(peek_tag(&b"ab"[..]), Err(Err::Incomplete(Needed::Size(4)))); + assert_eq!( + peek_tag(&b"xxx"[..]), + Err(Err::Error(error_position!(&b"xxx"[..], ErrorKind::Tag))) + ); + } + + #[test] + fn not() { + named!(not_aaa<()>, not!(tag!("aaa"))); + assert_eq!( + not_aaa(&b"aaa"[..]), + Err(Err::Error(error_position!(&b"aaa"[..], ErrorKind::Not))) + ); + assert_eq!(not_aaa(&b"aa"[..]), Err(Err::Incomplete(Needed::Size(3)))); + assert_eq!(not_aaa(&b"abcd"[..]), Ok((&b"abcd"[..], ()))); + } + + #[test] + fn verify() { + named!(test, verify!(take!(5), |slice: &[u8]| slice[0] == b'a')); + assert_eq!(test(&b"bcd"[..]), Err(Err::Incomplete(Needed::Size(5)))); + assert_eq!( + test(&b"bcdefg"[..]), + Err(Err::Error(error_position!( + &b"bcdefg"[..], + ErrorKind::Verify + ))) + ); + assert_eq!(test(&b"abcdefg"[..]), Ok((&b"fg"[..], &b"abcde"[..]))); + } + + #[test] + fn parse_to() { + let res: IResult<_, _, (&str, ErrorKind)> = parse_to!("ab", usize); + + assert_eq!( + res, + Err(Err::Error(error_position!( + "ab", + ErrorKind::ParseTo + ))) + ); + + let res: IResult<_, _, (&str, ErrorKind)> = parse_to!("42", usize); + + assert_eq!(res, Ok(("", 42))); + //assert_eq!(ErrorKind::convert(ErrorKind::ParseTo), ErrorKind::ParseTo::); + } + +} diff --git a/third_party/rust/nom/src/combinator/mod.rs b/third_party/rust/nom/src/combinator/mod.rs new file mode 100644 index 0000000000..933bed834b --- /dev/null +++ b/third_party/rust/nom/src/combinator/mod.rs @@ -0,0 +1,859 @@ +//! general purpose combinators + +#![allow(unused_imports)] + +#[cfg(feature = "alloc")] +use crate::lib::std::boxed::Box; + +#[cfg(feature = "std")] +use crate::lib::std::fmt::Debug; +use crate::internal::*; +use crate::error::ParseError; +use crate::traits::{AsChar, InputIter, InputLength, InputTakeAtPosition, ParseTo}; +use crate::lib::std::ops::{Range, RangeFrom, RangeTo}; +use crate::lib::std::borrow::Borrow; +use crate::traits::{Compare, CompareResult, Offset, Slice}; +use crate::error::ErrorKind; +use crate::lib::std::mem::transmute; + +#[macro_use] +mod macros; + +/// Return the remaining input +/// +/// ```rust +/// # use nom::error::ErrorKind; +/// use nom::combinator::rest; +/// assert_eq!(rest::<_,(_, ErrorKind)>("abc"), Ok(("", "abc"))); +/// assert_eq!(rest::<_,(_, ErrorKind)>(""), Ok(("", ""))); +/// ``` +#[inline] +pub fn rest>(input: T) -> IResult +where + T: Slice> + Slice> + Slice>, + T: InputLength, +{ + Ok((input.slice(input.input_len()..), input)) +} + +/// Return the length of the remaining input +/// +/// ```rust +/// # use nom::error::ErrorKind; +/// use nom::combinator::rest_len; +/// assert_eq!(rest_len::<_,(_, ErrorKind)>("abc"), Ok(("abc", 3))); +/// assert_eq!(rest_len::<_,(_, ErrorKind)>(""), Ok(("", 0))); +/// ``` +#[inline] +pub fn rest_len>(input: T) -> IResult +where + T: Slice> + Slice> + Slice>, + T: InputLength, +{ + let len = input.input_len(); + Ok((input, len)) +} + +/// maps a function on the result of a parser +/// +/// ```rust +/// # #[macro_use] extern crate nom; +/// # use nom::{Err,error::ErrorKind, IResult}; +/// use nom::character::complete::digit1; +/// use nom::combinator::map; +/// # fn main() { +/// +/// let parse = map(digit1, |s: &str| s.len()); +/// +/// // the parser will count how many characters were returned by digit1 +/// assert_eq!(parse("123456"), Ok(("", 6))); +/// +/// // this will fail if digit1 fails +/// assert_eq!(parse("abc"), Err(Err::Error(("abc", ErrorKind::Digit)))); +/// # } +/// ``` +pub fn map, F, G>(first: F, second: G) -> impl Fn(I) -> IResult +where + F: Fn(I) -> IResult, + G: Fn(O1) -> O2, +{ + move |input: I| { + let (input, o1) = first(input)?; + Ok((input, second(o1))) + } +} + +#[doc(hidden)] +pub fn mapc, F, G>(input: I, first: F, second: G) -> IResult +where + F: Fn(I) -> IResult, + G: Fn(O1) -> O2, +{ + map(first, second)(input) +} + +/// applies a function returning a Result over the result of a parser +/// +/// ```rust +/// # #[macro_use] extern crate nom; +/// # use nom::{Err,error::ErrorKind, IResult}; +/// use nom::character::complete::digit1; +/// use nom::combinator::map_res; +/// # fn main() { +/// +/// let parse = map_res(digit1, |s: &str| s.parse::()); +/// +/// // the parser will convert the result of digit1 to a number +/// assert_eq!(parse("123"), Ok(("", 123))); +/// +/// // this will fail if digit1 fails +/// assert_eq!(parse("abc"), Err(Err::Error(("abc", ErrorKind::Digit)))); +/// +/// // this will fail if the mapped function fails (a `u8` is too small to hold `123456`) +/// assert_eq!(parse("123456"), Err(Err::Error(("123456", ErrorKind::MapRes)))); +/// # } +/// ``` +pub fn map_res, E2, F, G>(first: F, second: G) -> impl Fn(I) -> IResult +where + F: Fn(I) -> IResult, + G: Fn(O1) -> Result, +{ + move |input: I| { + let i = input.clone(); + let (input, o1) = first(input)?; + match second(o1) { + Ok(o2) => Ok((input, o2)), + Err(_) => Err(Err::Error(E::from_error_kind(i, ErrorKind::MapRes))), + } + } +} + +#[doc(hidden)] +pub fn map_resc, E2, F, G>(input: I, first: F, second: G) -> IResult +where + F: Fn(I) -> IResult, + G: Fn(O1) -> Result, +{ + map_res(first, second)(input) +} + +/// applies a function returning an Option over the result of a parser +/// +/// ```rust +/// # #[macro_use] extern crate nom; +/// # use nom::{Err,error::ErrorKind, IResult}; +/// use nom::character::complete::digit1; +/// use nom::combinator::map_opt; +/// # fn main() { +/// +/// let parse = map_opt(digit1, |s: &str| s.parse::().ok()); +/// +/// // the parser will convert the result of digit1 to a number +/// assert_eq!(parse("123"), Ok(("", 123))); +/// +/// // this will fail if digit1 fails +/// assert_eq!(parse("abc"), Err(Err::Error(("abc", ErrorKind::Digit)))); +/// +/// // this will fail if the mapped function fails (a `u8` is too small to hold `123456`) +/// assert_eq!(parse("123456"), Err(Err::Error(("123456", ErrorKind::MapOpt)))); +/// # } +/// ``` +pub fn map_opt, F, G>(first: F, second: G) -> impl Fn(I) -> IResult +where + F: Fn(I) -> IResult, + G: Fn(O1) -> Option, +{ + move |input: I| { + let i = input.clone(); + let (input, o1) = first(input)?; + match second(o1) { + Some(o2) => Ok((input, o2)), + None => Err(Err::Error(E::from_error_kind(i, ErrorKind::MapOpt))), + } + } +} + +#[doc(hidden)] +pub fn map_optc, F, G>(input: I, first: F, second: G) -> IResult +where + F: Fn(I) -> IResult, + G: Fn(O1) -> Option, +{ + map_opt(first, second)(input) +} + +/// applies a parser over the result of another one +/// +/// ```rust +/// # #[macro_use] extern crate nom; +/// # use nom::{Err,error::ErrorKind, IResult}; +/// use nom::character::complete::digit1; +/// use nom::bytes::complete::take; +/// use nom::combinator::map_parser; +/// # fn main() { +/// +/// let parse = map_parser(take(5u8), digit1); +/// +/// assert_eq!(parse("12345"), Ok(("", "12345"))); +/// assert_eq!(parse("123ab"), Ok(("", "123"))); +/// assert_eq!(parse("123"), Err(Err::Error(("123", ErrorKind::Eof)))); +/// # } +/// ``` +pub fn map_parser, F, G>(first: F, second: G) -> impl Fn(I) -> IResult +where + F: Fn(I) -> IResult, + G: Fn(O1) -> IResult, + O1: InputLength, +{ + move |input: I| { + let (input, o1) = first(input)?; + let (_, o2) = second(o1)?; + Ok((input, o2)) + } +} + +#[doc(hidden)] +pub fn map_parserc, F, G>(input: I, first: F, second: G) -> IResult +where + F: Fn(I) -> IResult, + G: Fn(O1) -> IResult, + O1: InputLength, +{ + map_parser(first, second)(input) +} + +/// creates a new parser from the output of the first parser, then apply that parser over the rest of the input +/// +/// ```rust +/// # #[macro_use] extern crate nom; +/// # use nom::{Err,error::ErrorKind, IResult}; +/// use nom::bytes::complete::take; +/// use nom::number::complete::be_u8; +/// use nom::combinator::flat_map; +/// # fn main() { +/// +/// let parse = flat_map(be_u8, take); +/// +/// assert_eq!(parse(&[2, 0, 1, 2][..]), Ok((&[2][..], &[0, 1][..]))); +/// assert_eq!(parse(&[4, 0, 1, 2][..]), Err(Err::Error((&[0, 1, 2][..], ErrorKind::Eof)))); +/// # } +/// ``` +pub fn flat_map, F, G, H>(first: F, second: G) -> impl Fn(I) -> IResult +where + F: Fn(I) -> IResult, + G: Fn(O1) -> H, + H: Fn(I) -> IResult +{ + move |input: I| { + let (input, o1) = first(input)?; + second(o1)(input) + } +} + +/// optional parser: will return None if not successful +/// +/// ```rust +/// # #[macro_use] extern crate nom; +/// # use nom::{Err,error::ErrorKind, IResult}; +/// use nom::combinator::opt; +/// use nom::character::complete::alpha1; +/// # fn main() { +/// +/// fn parser(i: &str) -> IResult<&str, Option<&str>> { +/// opt(alpha1)(i) +/// } +/// +/// assert_eq!(parser("abcd;"), Ok((";", Some("abcd")))); +/// assert_eq!(parser("123;"), Ok(("123;", None))); +/// # } +/// ``` +pub fn opt, F>(f: F) -> impl Fn(I) -> IResult, E> +where + F: Fn(I) -> IResult, +{ + move |input: I| { + let i = input.clone(); + match f(input) { + Ok((i, o)) => Ok((i, Some(o))), + Err(Err::Error(_)) => Ok((i, None)), + Err(e) => Err(e), + } + } +} + +#[doc(hidden)] +pub fn optc, F>(input: I, f: F) -> IResult, E> +where + F: Fn(I) -> IResult, +{ + opt(f)(input) +} + +/// calls the parser if the condition is met +/// +/// ```rust +/// # #[macro_use] extern crate nom; +/// # use nom::{Err,error::ErrorKind, IResult}; +/// use nom::combinator::cond; +/// use nom::character::complete::alpha1; +/// # fn main() { +/// +/// fn parser(b: bool, i: &str) -> IResult<&str, Option<&str>> { +/// cond(b, alpha1)(i) +/// } +/// +/// assert_eq!(parser(true, "abcd;"), Ok((";", Some("abcd")))); +/// assert_eq!(parser(false, "abcd;"), Ok(("abcd;", None))); +/// assert_eq!(parser(true, "123;"), Err(Err::Error(("123;", ErrorKind::Alpha)))); +/// assert_eq!(parser(false, "123;"), Ok(("123;", None))); +/// # } +/// ``` +pub fn cond, F>(b: bool, f: F) -> impl Fn(I) -> IResult, E> +where + F: Fn(I) -> IResult, +{ + move |input: I| { + if b { + match f(input) { + Ok((i, o)) => Ok((i, Some(o))), + Err(e) => Err(e), + } + } else { + Ok((input, None)) + } + } +} + +#[doc(hidden)] +pub fn condc, F>(input: I, b: bool, f: F) -> IResult, E> +where + F: Fn(I) -> IResult, +{ + cond(b, f)(input) +} + +/// tries to apply its parser without consuming the input +/// +/// ```rust +/// # #[macro_use] extern crate nom; +/// # use nom::{Err,error::ErrorKind, IResult}; +/// use nom::combinator::peek; +/// use nom::character::complete::alpha1; +/// # fn main() { +/// +/// let parser = peek(alpha1); +/// +/// assert_eq!(parser("abcd;"), Ok(("abcd;", "abcd"))); +/// assert_eq!(parser("123;"), Err(Err::Error(("123;", ErrorKind::Alpha)))); +/// # } +/// ``` +pub fn peek, F>(f: F) -> impl Fn(I) -> IResult +where + F: Fn(I) -> IResult, +{ + move |input: I| { + let i = input.clone(); + match f(input) { + Ok((_, o)) => Ok((i, o)), + Err(e) => Err(e), + } + } +} + +#[doc(hidden)] +pub fn peekc, F>(input: I, f: F) -> IResult +where + F: Fn(I) -> IResult, +{ + peek(f)(input) +} + +/// transforms Incomplete into Error +/// +/// ```rust +/// # #[macro_use] extern crate nom; +/// # use nom::{Err,error::ErrorKind, IResult}; +/// use nom::bytes::streaming::take; +/// use nom::combinator::complete; +/// # fn main() { +/// +/// let parser = complete(take(5u8)); +/// +/// assert_eq!(parser("abcdefg"), Ok(("fg", "abcde"))); +/// assert_eq!(parser("abcd"), Err(Err::Error(("abcd", ErrorKind::Complete)))); +/// # } +/// ``` +pub fn complete, F>(f: F) -> impl Fn(I) -> IResult +where + F: Fn(I) -> IResult, +{ + move |input: I| { + let i = input.clone(); + match f(input) { + Err(Err::Incomplete(_)) => { + Err(Err::Error(E::from_error_kind(i, ErrorKind::Complete))) + }, + rest => rest + } + } +} + +#[doc(hidden)] +pub fn completec, F>(input: I, f: F) -> IResult +where + F: Fn(I) -> IResult, +{ + complete(f)(input) +} + +/// succeeds if all the input has been consumed by its child parser +/// +/// ```rust +/// # #[macro_use] extern crate nom; +/// # use nom::{Err,error::ErrorKind, IResult}; +/// use nom::combinator::all_consuming; +/// use nom::character::complete::alpha1; +/// # fn main() { +/// +/// let parser = all_consuming(alpha1); +/// +/// assert_eq!(parser("abcd"), Ok(("", "abcd"))); +/// assert_eq!(parser("abcd;"),Err(Err::Error((";", ErrorKind::Eof)))); +/// assert_eq!(parser("123abcd;"),Err(Err::Error(("123abcd;", ErrorKind::Alpha)))); +/// # } +/// ``` +pub fn all_consuming, F>(f: F) -> impl Fn(I) -> IResult +where + I: InputLength, + F: Fn(I) -> IResult, +{ + move |input: I| { + let (input, res) = f(input)?; + if input.input_len() == 0 { + Ok((input, res)) + } else { + Err(Err::Error(E::from_error_kind(input, ErrorKind::Eof))) + } + } +} + +/// returns the result of the child parser if it satisfies a verification function +/// +/// the verification function takes as argument a reference to the output of the +/// parser +/// +/// ```rust +/// # #[macro_use] extern crate nom; +/// # use nom::{Err,error::ErrorKind, IResult}; +/// use nom::combinator::verify; +/// use nom::character::complete::alpha1; +/// # fn main() { +/// +/// let parser = verify(alpha1, |s: &str| s.len() == 4); +/// +/// assert_eq!(parser("abcd"), Ok(("", "abcd"))); +/// assert_eq!(parser("abcde"), Err(Err::Error(("abcde", ErrorKind::Verify)))); +/// assert_eq!(parser("123abcd;"),Err(Err::Error(("123abcd;", ErrorKind::Alpha)))); +/// # } +/// ``` +pub fn verify, F, G>(first: F, second: G) -> impl Fn(I) -> IResult +where + F: Fn(I) -> IResult, + G: Fn(&O2) -> bool, + O1: Borrow, + O2: ?Sized, +{ + move |input: I| { + let i = input.clone(); + let (input, o) = first(input)?; + + if second(o.borrow()) { + Ok((input, o)) + } else { + Err(Err::Error(E::from_error_kind(i, ErrorKind::Verify))) + } + } +} + +#[doc(hidden)] +pub fn verifyc, F, G>(input: I, first: F, second: G) -> IResult +where + F: Fn(I) -> IResult, + G: Fn(&O2) -> bool, + O1: Borrow, + O2: ?Sized, +{ + verify(first, second)(input) +} + +/// returns the provided value if the child parser succeeds +/// +/// ```rust +/// # #[macro_use] extern crate nom; +/// # use nom::{Err,error::ErrorKind, IResult}; +/// use nom::combinator::value; +/// use nom::character::complete::alpha1; +/// # fn main() { +/// +/// let parser = value(1234, alpha1); +/// +/// assert_eq!(parser("abcd"), Ok(("", 1234))); +/// assert_eq!(parser("123abcd;"), Err(Err::Error(("123abcd;", ErrorKind::Alpha)))); +/// # } +/// ``` +pub fn value, F>(val: O1, parser: F) -> impl Fn(I) -> IResult +where + F: Fn(I) -> IResult, +{ + move |input: I| { + parser(input).map(|(i, _)| (i, val.clone())) + } +} + +#[doc(hidden)] +pub fn valuec, F>(input: I, val: O1, parser: F) -> IResult +where + F: Fn(I) -> IResult, +{ + value(val, parser)(input) +} + +/// succeeds if the child parser returns an error +/// +/// ```rust +/// # #[macro_use] extern crate nom; +/// # use nom::{Err,error::ErrorKind, IResult}; +/// use nom::combinator::not; +/// use nom::character::complete::alpha1; +/// # fn main() { +/// +/// let parser = not(alpha1); +/// +/// assert_eq!(parser("123"), Ok(("123", ()))); +/// assert_eq!(parser("abcd"), Err(Err::Error(("abcd", ErrorKind::Not)))); +/// # } +/// ``` +pub fn not, F>(parser: F) -> impl Fn(I) -> IResult +where + F: Fn(I) -> IResult, +{ + move |input: I| { + let i = input.clone(); + match parser(input) { + Ok(_) => Err(Err::Error(E::from_error_kind(i, ErrorKind::Not))), + Err(Err::Error(_)) => Ok((i, ())), + Err(e) => Err(e), + } + } +} + +#[doc(hidden)] +pub fn notc, F>(input: I, parser: F) -> IResult +where + F: Fn(I) -> IResult, +{ + not(parser)(input) +} + +/// if the child parser was successful, return the consumed input as produced value +/// +/// ```rust +/// # #[macro_use] extern crate nom; +/// # use nom::{Err,error::ErrorKind, IResult}; +/// use nom::combinator::recognize; +/// use nom::character::complete::{char, alpha1}; +/// use nom::sequence::separated_pair; +/// # fn main() { +/// +/// let parser = recognize(separated_pair(alpha1, char(','), alpha1)); +/// +/// assert_eq!(parser("abcd,efgh"), Ok(("", "abcd,efgh"))); +/// assert_eq!(parser("abcd;"),Err(Err::Error((";", ErrorKind::Char)))); +/// # } +/// ``` +pub fn recognize>, O, E: ParseError, F>(parser: F) -> impl Fn(I) -> IResult +where + F: Fn(I) -> IResult, +{ + move |input: I| { + let i = input.clone(); + match parser(i) { + Ok((i, _)) => { + let index = input.offset(&i); + Ok((i, input.slice(..index))) + }, + Err(e) => Err(e), + } + } +} + +#[doc(hidden)] +pub fn recognizec>, O, E: ParseError, F>(input: I, parser: F) -> IResult +where + F: Fn(I) -> IResult, +{ + recognize(parser)(input) +} + +/// transforms an error to failure +/// +/// ```rust +/// # #[macro_use] extern crate nom; +/// # use nom::{Err,error::ErrorKind, IResult}; +/// use nom::combinator::cut; +/// use nom::character::complete::alpha1; +/// # fn main() { +/// +/// let parser = cut(alpha1); +/// +/// assert_eq!(parser("abcd;"), Ok((";", "abcd"))); +/// assert_eq!(parser("123;"), Err(Err::Failure(("123;", ErrorKind::Alpha)))); +/// # } +/// ``` +pub fn cut>, O, E: ParseError, F>(parser: F) -> impl Fn(I) -> IResult +where + F: Fn(I) -> IResult, +{ + move |input: I| { + let i = input.clone(); + match parser(i) { + Err(Err::Error(e)) => Err(Err::Failure(e)), + rest => rest, + } + } +} + +#[doc(hidden)] +pub fn cutc>, O, E: ParseError, F>(input: I, parser: F) -> IResult +where + F: Fn(I) -> IResult, +{ + cut(parser)(input) +} + +/// creates an iterator from input data and a parser +/// +/// call the iterator's [finish] method to get the remaining input if successful, +/// or the error value if we encountered an error +/// +/// ```rust +/// use nom::{combinator::iterator, IResult, bytes::complete::tag, character::complete::alpha1, sequence::terminated}; +/// use std::collections::HashMap; +/// +/// let data = "abc|defg|hijkl|mnopqr|123"; +/// let mut it = iterator(data, terminated(alpha1, tag("|"))); +/// +/// let parsed = it.map(|v| (v, v.len())).collect::>(); +/// let res: IResult<_,_> = it.finish(); +/// +/// assert_eq!(parsed, [("abc", 3usize), ("defg", 4), ("hijkl", 5), ("mnopqr", 6)].iter().cloned().collect()); +/// assert_eq!(res, Ok(("123", ()))); +/// ``` +pub fn iterator(input: Input, f: F) -> ParserIterator +where + F: Fn(Input) -> IResult, + Error: ParseError { + + ParserIterator { + iterator: f, + input, + state: State::Running, + } +} + +/// main structure associated to the [iterator] function +pub struct ParserIterator { + iterator: F, + input: I, + state: State, +} + +impl ParserIterator { + /// returns the remaining input if parsing was successful, or the error if we encountered an error + pub fn finish(self) -> IResult { + match &self.state { + State::Running | State::Done => Ok((self.input.clone(), ())), + State::Failure(e) => Err(Err::Failure(e.clone())), + State::Incomplete(i) => Err(Err::Incomplete(i.clone())), + } + } +} + +impl<'a, Input ,Output ,Error, F> core::iter::Iterator for &'a mut ParserIterator + where + F: Fn(Input) -> IResult, + Input: Clone +{ + type Item = Output; + + fn next(&mut self) -> Option { + if let State::Running = self.state { + let input = self.input.clone(); + + match (self.iterator)(input) { + Ok((i, o)) => { + self.input = i; + Some(o) + }, + Err(Err::Error(_)) => { + self.state = State::Done; + None + }, + Err(Err::Failure(e)) => { + self.state = State::Failure(e); + None + }, + Err(Err::Incomplete(i)) => { + self.state = State::Incomplete(i); + None + }, + } + } else { + None + } + } +} + +enum State { + Running, + Done, + Failure(E), + Incomplete(Needed), +} + + +#[cfg(test)] +mod tests { + use super::*; + use crate::internal::{Err, IResult, Needed}; + use crate::error::ParseError; + use crate::bytes::complete::take; + use crate::number::complete::be_u8; + + macro_rules! assert_parse( + ($left: expr, $right: expr) => { + let res: $crate::IResult<_, _, (_, ErrorKind)> = $left; + assert_eq!(res, $right); + }; + ); + + /*#[test] + fn t1() { + let v1:Vec = vec![1,2,3]; + let v2:Vec = vec![4,5,6]; + let d = Ok((&v1[..], &v2[..])); + let res = d.flat_map(print); + assert_eq!(res, Ok((&v2[..], ()))); + }*/ + + + /* + #[test] + fn end_of_input() { + let not_over = &b"Hello, world!"[..]; + let is_over = &b""[..]; + named!(eof_test, eof!()); + + let res_not_over = eof_test(not_over); + assert_eq!(res_not_over, Err(Err::Error(error_position!(not_over, ErrorKind::Eof)))); + + let res_over = eof_test(is_over); + assert_eq!(res_over, Ok((is_over, is_over))); + } + */ + + #[test] + fn rest_on_slices() { + let input: &[u8] = &b"Hello, world!"[..]; + let empty: &[u8] = &b""[..]; + assert_parse!(rest(input), Ok((empty, input))); + } + + #[test] + fn rest_on_strs() { + let input: &str = "Hello, world!"; + let empty: &str = ""; + assert_parse!(rest(input), Ok((empty, input))); + } + + #[test] + fn rest_len_on_slices() { + let input: &[u8] = &b"Hello, world!"[..]; + assert_parse!(rest_len(input), Ok((input, input.len()))); + } + + use crate::lib::std::convert::From; + impl From for CustomError { + fn from(_: u32) -> Self { + CustomError + } + } + + impl ParseError for CustomError { + fn from_error_kind(_: I, _: ErrorKind) -> Self { + CustomError + } + + fn append(_: I, _: ErrorKind, _: CustomError) -> Self { + CustomError + } + } + + struct CustomError; + #[allow(dead_code)] + fn custom_error(input: &[u8]) -> IResult<&[u8], &[u8], CustomError> { + //fix_error!(input, CustomError, alphanumeric) + crate::character::streaming::alphanumeric1(input) + } + + #[test] + fn test_flat_map() { + let input: &[u8] = &[3, 100, 101, 102, 103, 104][..]; + assert_parse!(flat_map(be_u8, take)(input), Ok((&[103, 104][..], &[100, 101, 102][..]))); + } + + #[test] + fn test_map_opt() { + let input: &[u8] = &[50][..]; + assert_parse!(map_opt(be_u8, |u| if u < 20 {Some(u)} else {None})(input), Err(Err::Error((&[50][..], ErrorKind::MapOpt)))); + assert_parse!(map_opt(be_u8, |u| if u > 20 {Some(u)} else {None})(input), Ok((&[][..], 50))); + } + + #[test] + fn test_map_parser() { + let input: &[u8] = &[100, 101, 102, 103, 104][..]; + assert_parse!(map_parser(take(4usize), take(2usize))(input), Ok((&[104][..], &[100, 101][..]))); + } + + #[test] + fn test_all_consuming() { + let input: &[u8] = &[100, 101, 102][..]; + assert_parse!(all_consuming(take(2usize))(input), Err(Err::Error((&[102][..], ErrorKind::Eof)))); + assert_parse!(all_consuming(take(3usize))(input), Ok((&[][..], &[100, 101, 102][..]))); + } + + #[test] + #[allow(unused)] + fn test_verify_ref() { + use crate::bytes::complete::take; + + let parser1 = verify(take(3u8), |s: &[u8]| s == &b"abc"[..]); + + assert_eq!(parser1(&b"abcd"[..]), Ok((&b"d"[..], &b"abc"[..]))); + assert_eq!(parser1(&b"defg"[..]), Err(Err::Error((&b"defg"[..], ErrorKind::Verify)))); + + fn parser2(i: &[u8]) -> IResult<&[u8], u32> { + verify(crate::number::streaming::be_u32, |val: &u32| *val < 3)(i) + } + } + + #[test] + #[cfg(feature = "alloc")] + fn test_verify_alloc() { + use crate::bytes::complete::take; + let parser1 = verify(map(take(3u8), |s: &[u8]| s.to_vec()), |s: &[u8]| s == &b"abc"[..]); + + assert_eq!(parser1(&b"abcd"[..]), Ok((&b"d"[..], (&b"abc").to_vec()))); + assert_eq!(parser1(&b"defg"[..]), Err(Err::Error((&b"defg"[..], ErrorKind::Verify)))); + } +} diff --git a/third_party/rust/nom/src/error.rs b/third_party/rust/nom/src/error.rs new file mode 100644 index 0000000000..150516121f --- /dev/null +++ b/third_party/rust/nom/src/error.rs @@ -0,0 +1,776 @@ +//! Error management +//! +//! Parsers are generic over their error type, requiring that it implements +//! the `error::ParseError` trait. + +/// this trait must be implemented by the error type of a nom parser +/// +/// There are already implementations of it for `(Input, ErrorKind)` +/// and `VerboseError`. +/// +/// It provides methods to create an error from some combinators, +/// and combine existing errors in combinators like `alt` +pub trait ParseError: Sized { + /// creates an error from the input position and an [ErrorKind] + fn from_error_kind(input: I, kind: ErrorKind) -> Self; + + /// combines an existing error with a new one created from the input + /// position and an [ErrorKind]. This is useful when backtracking + /// through a parse tree, accumulating error context on the way + fn append(input: I, kind: ErrorKind, other: Self) -> Self; + + /// creates an error from an input position and an expected character + fn from_char(input: I, _: char) -> Self { + Self::from_error_kind(input, ErrorKind::Char) + } + + /// combines two existing error. This function is used to compare errors + /// generated in various branches of [alt] + fn or(self, other: Self) -> Self { + other + } + + /// create a new error from an input position, a static string and an existing error. + /// This is used mainly in the [context] combinator, to add user friendly information + /// to errors when backtracking through a parse tree + fn add_context(_input: I, _ctx: &'static str, other: Self) -> Self { + other + } +} + +impl ParseError for (I, ErrorKind) { + fn from_error_kind(input: I, kind: ErrorKind) -> Self { + (input, kind) + } + + fn append(_: I, _: ErrorKind, other: Self) -> Self { + other + } +} + +impl ParseError for () { + fn from_error_kind(_: I, _: ErrorKind) -> Self {} + + fn append(_: I, _: ErrorKind, _: Self) -> Self {} +} + +/// creates an error from the input position and an [ErrorKind] +pub fn make_error>(input: I, kind: ErrorKind) -> E { + E::from_error_kind(input, kind) +} + +/// combines an existing error with a new one created from the input +/// position and an [ErrorKind]. This is useful when backtracking +/// through a parse tree, accumulating error context on the way +pub fn append_error>(input: I, kind: ErrorKind, other: E) -> E { + E::append(input, kind, other) +} + +/// this error type accumulates errors and their position when backtracking +/// through a parse tree. With some post processing (cf `examples/json.rs`), +/// it can be used to display user friendly error messages +#[cfg(feature = "alloc")] +#[derive(Clone, Debug, PartialEq)] +pub struct VerboseError { + /// list of errors accumulated by `VerboseError`, containing the affected + /// part of input data, and some context + pub errors: crate::lib::std::vec::Vec<(I, VerboseErrorKind)>, +} + +#[cfg(feature = "alloc")] +#[derive(Clone, Debug, PartialEq)] +/// error context for `VerboseError` +pub enum VerboseErrorKind { + /// static string added by the `context` function + Context(&'static str), + /// indicates which character was expected by the `char` function + Char(char), + /// error kind given by various nom parsers + Nom(ErrorKind), +} + +#[cfg(feature = "alloc")] +impl ParseError for VerboseError { + fn from_error_kind(input: I, kind: ErrorKind) -> Self { + VerboseError { + errors: vec![(input, VerboseErrorKind::Nom(kind))], + } + } + + fn append(input: I, kind: ErrorKind, mut other: Self) -> Self { + other.errors.push((input, VerboseErrorKind::Nom(kind))); + other + } + + fn from_char(input: I, c: char) -> Self { + VerboseError { + errors: vec![(input, VerboseErrorKind::Char(c))], + } + } + + fn add_context(input: I, ctx: &'static str, mut other: Self) -> Self { + other.errors.push((input, VerboseErrorKind::Context(ctx))); + other + } +} + +use crate::internal::{Err, IResult}; + +/// create a new error from an input position, a static string and an existing error. +/// This is used mainly in the [context] combinator, to add user friendly information +/// to errors when backtracking through a parse tree +pub fn context, F, O>(context: &'static str, f: F) -> impl Fn(I) -> IResult +where + F: Fn(I) -> IResult, +{ + move |i: I| match f(i.clone()) { + Ok(o) => Ok(o), + Err(Err::Incomplete(i)) => Err(Err::Incomplete(i)), + Err(Err::Error(e)) => Err(Err::Error(E::add_context(i, context, e))), + Err(Err::Failure(e)) => Err(Err::Failure(E::add_context(i, context, e))), + } +} + +/// transforms a `VerboseError` into a trace with input position information +#[cfg(feature = "alloc")] +pub fn convert_error(input: &str, e: VerboseError<&str>) -> crate::lib::std::string::String { + use crate::lib::std::fmt::Write; + use crate::traits::Offset; + + let mut result = crate::lib::std::string::String::new(); + + for (i, (substring, kind)) in e.errors.iter().enumerate() { + let offset = input.offset(substring); + + if input.is_empty() { + match kind { + VerboseErrorKind::Char(c) => write!(&mut result, "{}: expected '{}', got empty input\n\n", i, c), + VerboseErrorKind::Context(s) => write!(&mut result, "{}: in {}, got empty input\n\n", i, s), + VerboseErrorKind::Nom(e) => write!(&mut result, "{}: in {:?}, got empty input\n\n", i, e), + } + } else { + let prefix = &input.as_bytes()[..offset]; + + // Count the number of newlines in the first `offset` bytes of input + let line_number = prefix.iter().filter(|&&b| b == b'\n').count() + 1; + + // Find the line that includes the subslice: + // Find the *last* newline before the substring starts + let line_begin = prefix.iter().rev().position(|&b| b == b'\n').map(|pos| offset - pos).unwrap_or(0); + + // Find the full line after that newline + let line = input[line_begin..].lines().next().unwrap_or(&input[line_begin..]).trim_end(); + + // The (1-indexed) column number is the offset of our substring into that line + let column_number = line.offset(substring) + 1; + + match kind { + VerboseErrorKind::Char(c) => if let Some(actual) = substring.chars().next() { + write!( + &mut result, + "{i}: at line {line_number}:\n\ + {line}\n\ + {caret:>column$}\n\ + expected '{expected}', found {actual}\n\n", + i = i, + line_number = line_number, + line = line, + caret = '^', + column = column_number, + expected = c, + actual = actual, + ) + } else { + write!( + &mut result, + "{i}: at line {line_number}:\n\ + {line}\n\ + {caret:>column$}\n\ + expected '{expected}', got end of input\n\n", + i = i, + line_number = line_number, + line = line, + caret = '^', + column = column_number, + expected = c, + ) + }, + VerboseErrorKind::Context(s) => write!( + &mut result, + "{i}: at line {line_number}, in {context}:\n\ + {line}\n\ + {caret:>column$}\n\n", + i = i, + line_number = line_number, + context = s, + line = line, + caret = '^', + column = column_number, + ), + VerboseErrorKind::Nom(e) => write!( + &mut result, + "{i}: at line {line_number}, in {nom_err:?}:\n\ + {line}\n\ + {caret:>column$}\n\n", + i = i, + line_number = line_number, + nom_err = e, + line = line, + caret = '^', + column = column_number, + ), + } + } + // Because `write!` to a `String` is infallible, this `unwrap` is fine. + .unwrap(); + } + + result +} + +/// indicates which parser returned an error +#[cfg_attr(rustfmt, rustfmt_skip)] +#[derive(Debug,PartialEq,Eq,Hash,Clone,Copy)] +#[allow(deprecated,missing_docs)] +pub enum ErrorKind { + Tag, + MapRes, + MapOpt, + Alt, + IsNot, + IsA, + SeparatedList, + SeparatedNonEmptyList, + Many0, + Many1, + ManyTill, + Count, + TakeUntil, + LengthValue, + TagClosure, + Alpha, + Digit, + HexDigit, + OctDigit, + AlphaNumeric, + Space, + MultiSpace, + LengthValueFn, + Eof, + Switch, + TagBits, + OneOf, + NoneOf, + Char, + CrLf, + RegexpMatch, + RegexpMatches, + RegexpFind, + RegexpCapture, + RegexpCaptures, + TakeWhile1, + Complete, + Fix, + Escaped, + EscapedTransform, + NonEmpty, + ManyMN, + Not, + Permutation, + Verify, + TakeTill1, + TakeWhileMN, + ParseTo, + TooLarge, + Many0Count, + Many1Count, + Float, +} + +#[cfg_attr(rustfmt, rustfmt_skip)] +#[allow(deprecated)] +/// converts an ErrorKind to a number +pub fn error_to_u32(e: &ErrorKind) -> u32 { + match *e { + ErrorKind::Tag => 1, + ErrorKind::MapRes => 2, + ErrorKind::MapOpt => 3, + ErrorKind::Alt => 4, + ErrorKind::IsNot => 5, + ErrorKind::IsA => 6, + ErrorKind::SeparatedList => 7, + ErrorKind::SeparatedNonEmptyList => 8, + ErrorKind::Many1 => 9, + ErrorKind::Count => 10, + ErrorKind::TakeUntil => 12, + ErrorKind::LengthValue => 15, + ErrorKind::TagClosure => 16, + ErrorKind::Alpha => 17, + ErrorKind::Digit => 18, + ErrorKind::AlphaNumeric => 19, + ErrorKind::Space => 20, + ErrorKind::MultiSpace => 21, + ErrorKind::LengthValueFn => 22, + ErrorKind::Eof => 23, + ErrorKind::Switch => 27, + ErrorKind::TagBits => 28, + ErrorKind::OneOf => 29, + ErrorKind::NoneOf => 30, + ErrorKind::Char => 40, + ErrorKind::CrLf => 41, + ErrorKind::RegexpMatch => 42, + ErrorKind::RegexpMatches => 43, + ErrorKind::RegexpFind => 44, + ErrorKind::RegexpCapture => 45, + ErrorKind::RegexpCaptures => 46, + ErrorKind::TakeWhile1 => 47, + ErrorKind::Complete => 48, + ErrorKind::Fix => 49, + ErrorKind::Escaped => 50, + ErrorKind::EscapedTransform => 51, + ErrorKind::NonEmpty => 56, + ErrorKind::ManyMN => 57, + ErrorKind::HexDigit => 59, + ErrorKind::OctDigit => 61, + ErrorKind::Many0 => 62, + ErrorKind::Not => 63, + ErrorKind::Permutation => 64, + ErrorKind::ManyTill => 65, + ErrorKind::Verify => 66, + ErrorKind::TakeTill1 => 67, + ErrorKind::TakeWhileMN => 69, + ErrorKind::ParseTo => 70, + ErrorKind::TooLarge => 71, + ErrorKind::Many0Count => 72, + ErrorKind::Many1Count => 73, + ErrorKind::Float => 74, + } +} + +impl ErrorKind { + #[cfg_attr(rustfmt, rustfmt_skip)] + #[allow(deprecated)] + /// converts an ErrorKind to a text description + pub fn description(&self) -> &str { + match *self { + ErrorKind::Tag => "Tag", + ErrorKind::MapRes => "Map on Result", + ErrorKind::MapOpt => "Map on Option", + ErrorKind::Alt => "Alternative", + ErrorKind::IsNot => "IsNot", + ErrorKind::IsA => "IsA", + ErrorKind::SeparatedList => "Separated list", + ErrorKind::SeparatedNonEmptyList => "Separated non empty list", + ErrorKind::Many0 => "Many0", + ErrorKind::Many1 => "Many1", + ErrorKind::Count => "Count", + ErrorKind::TakeUntil => "Take until", + ErrorKind::LengthValue => "Length followed by value", + ErrorKind::TagClosure => "Tag closure", + ErrorKind::Alpha => "Alphabetic", + ErrorKind::Digit => "Digit", + ErrorKind::AlphaNumeric => "AlphaNumeric", + ErrorKind::Space => "Space", + ErrorKind::MultiSpace => "Multiple spaces", + ErrorKind::LengthValueFn => "LengthValueFn", + ErrorKind::Eof => "End of file", + ErrorKind::Switch => "Switch", + ErrorKind::TagBits => "Tag on bitstream", + ErrorKind::OneOf => "OneOf", + ErrorKind::NoneOf => "NoneOf", + ErrorKind::Char => "Char", + ErrorKind::CrLf => "CrLf", + ErrorKind::RegexpMatch => "RegexpMatch", + ErrorKind::RegexpMatches => "RegexpMatches", + ErrorKind::RegexpFind => "RegexpFind", + ErrorKind::RegexpCapture => "RegexpCapture", + ErrorKind::RegexpCaptures => "RegexpCaptures", + ErrorKind::TakeWhile1 => "TakeWhile1", + ErrorKind::Complete => "Complete", + ErrorKind::Fix => "Fix", + ErrorKind::Escaped => "Escaped", + ErrorKind::EscapedTransform => "EscapedTransform", + ErrorKind::NonEmpty => "NonEmpty", + ErrorKind::ManyMN => "Many(m, n)", + ErrorKind::HexDigit => "Hexadecimal Digit", + ErrorKind::OctDigit => "Octal digit", + ErrorKind::Not => "Negation", + ErrorKind::Permutation => "Permutation", + ErrorKind::ManyTill => "ManyTill", + ErrorKind::Verify => "predicate verification", + ErrorKind::TakeTill1 => "TakeTill1", + ErrorKind::TakeWhileMN => "TakeWhileMN", + ErrorKind::ParseTo => "Parse string to the specified type", + ErrorKind::TooLarge => "Needed data size is too large", + ErrorKind::Many0Count => "Count occurrence of >=0 patterns", + ErrorKind::Many1Count => "Count occurrence of >=1 patterns", + ErrorKind::Float => "Float", + } + } +} + +/// creates a parse error from a `nom::ErrorKind` +/// and the position in the input +#[allow(unused_variables)] +#[macro_export(local_inner_macros)] +macro_rules! error_position( + ($input:expr, $code:expr) => ({ + $crate::error::make_error($input, $code) + }); +); + +/// creates a parse error from a `nom::ErrorKind`, +/// the position in the input and the next error in +/// the parsing tree. +#[allow(unused_variables)] +#[macro_export(local_inner_macros)] +macro_rules! error_node_position( + ($input:expr, $code:expr, $next:expr) => ({ + $crate::error::append_error($input, $code, $next) + }); +); + +/* + +#[cfg(feature = "std")] +use $crate::lib::std::any::Any; +#[cfg(feature = "std")] +use $crate::lib::std::{error,fmt}; +#[cfg(feature = "std")] +impl error::Error for Err { + fn description(&self) -> &str { + self.description() + } +} + +#[cfg(feature = "std")] +impl fmt::Display for Err { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.description()) + } +} +*/ + +//FIXME: error rewrite +/// translate parser result from IResult to IResult with a custom type +/// +/// ``` +/// # //FIXME +/// # #[macro_use] extern crate nom; +/// # use nom::IResult; +/// # use std::convert::From; +/// # use nom::Err; +/// # use nom::error::ErrorKind; +/// # fn main() { +/// # /* +/// # // will add a Custom(42) error to the error chain +/// # named!(err_test, add_return_error!(ErrorKind::Custom(42u32), tag!("abcd"))); +/// # +/// # #[derive(Debug,Clone,PartialEq)] +/// # pub struct ErrorStr(String); +/// # +/// # // Convert to IResult<&[u8], &[u8], ErrorStr> +/// # impl From for ErrorStr { +/// # fn from(i: u32) -> Self { +/// # ErrorStr(format!("custom error code: {}", i)) +/// # } +/// # } +/// # +/// # named!(parser<&[u8], &[u8], ErrorStr>, +/// # fix_error!(ErrorStr, err_test) +/// # ); +/// # +/// # let a = &b"efghblah"[..]; +/// # assert_eq!(parser(a), Err(Err::Error(Context::Code(a, ErrorKind::Custom(ErrorStr("custom error code: 42".to_string())))))); +/// # */ +/// # } +/// ``` +#[macro_export(local_inner_macros)] +macro_rules! fix_error ( + ($i:expr, $t:ty, $submac:ident!( $($args:tt)* )) => ( + { + use $crate::lib::std::result::Result::*; + use $crate::Err; + + match $submac!($i, $($args)*) { + Ok((i,o)) => Ok((i,o)), + Err(e) => { + let e2 = match e { + Err::Error(err) => { + Err::Error(err.into()) + }, + Err::Failure(err) => { + Err::Failure(err.into()) + }, + Err::Incomplete(e) => Err::Incomplete(e), + }; + Err(e2) + } + } + } + ); + ($i:expr, $t:ty, $f:expr) => ( + fix_error!($i, $t, call!($f)); + ); +); + +/// `flat_map!(R -> IResult, S -> IResult) => R -> IResult` +/// +/// combines a parser R -> IResult and +/// a parser S -> IResult to return another +/// parser R -> IResult +/// +/// ```rust +/// # #[macro_use] extern crate nom; +/// # use nom::{Err, error::ErrorKind}; +/// use nom::number::complete::recognize_float; +/// +/// named!(parser<&str, f64>, flat_map!(recognize_float, parse_to!(f64))); +/// +/// assert_eq!(parser("123.45;"), Ok((";", 123.45))); +/// assert_eq!(parser("abc"), Err(Err::Error(("abc", ErrorKind::Char)))); +/// ``` +#[macro_export(local_inner_macros)] +macro_rules! flat_map( + ($i:expr, $submac:ident!( $($args:tt)* ), $submac2:ident!( $($args2:tt)* )) => ( + flat_map!(__impl $i, $submac!($($args)*), $submac2!($($args2)*)); + ); + ($i:expr, $submac:ident!( $($args:tt)* ), $g:expr) => ( + flat_map!(__impl $i, $submac!($($args)*), call!($g)); + ); + ($i:expr, $f:expr, $submac:ident!( $($args:tt)* )) => ( + flat_map!(__impl $i, call!($f), $submac!($($args)*)); + ); + ($i:expr, $f:expr, $g:expr) => ( + flat_map!(__impl $i, call!($f), call!($g)); + ); + (__impl $i:expr, $submac:ident!( $($args:tt)* ), $submac2:ident!( $($args2:tt)* )) => ( + $crate::combinator::map_parserc($i, move |i| {$submac!(i, $($args)*)}, move |i| {$submac2!(i, $($args2)*)}) + ); +); + +#[cfg(test)] +#[cfg(feature = "alloc")] +mod tests { + use super::*; + use crate::character::complete::char; + + #[test] + fn convert_error_panic() { + let input = ""; + + let result: IResult<_, _, VerboseError<&str>> = char('x')(input); + } +} + +/* +#[cfg(feature = "alloc")] +use lib::std::{vec::Vec, collections::HashMap}; + +#[cfg(feature = "std")] +use lib::std::hash::Hash; + +#[cfg(feature = "std")] +pub fn add_error_pattern<'a, I: Clone + Hash + Eq, O, E: Clone + Hash + Eq>( + h: &mut HashMap, &'a str>, + e: VerboseError, + message: &'a str, +) -> bool { + h.insert(e, message); + true +} + +pub fn slice_to_offsets(input: &[u8], s: &[u8]) -> (usize, usize) { + let start = input.as_ptr(); + let off1 = s.as_ptr() as usize - start as usize; + let off2 = off1 + s.len(); + (off1, off2) +} + +#[cfg(feature = "std")] +pub fn prepare_errors(input: &[u8], e: VerboseError<&[u8]>) -> Option> { + let mut v: Vec<(ErrorKind, usize, usize)> = Vec::new(); + + for (p, kind) in e.errors.drain(..) { + let (o1, o2) = slice_to_offsets(input, p); + v.push((kind, o1, o2)); + } + + v.reverse(); + Some(v) +} + +#[cfg(feature = "std")] +pub fn print_error(input: &[u8], res: VerboseError<&[u8]>) { + if let Some(v) = prepare_errors(input, res) { + let colors = generate_colors(&v); + println!("parser codes: {}", print_codes(&colors, &HashMap::new())); + println!("{}", print_offsets(input, 0, &v)); + } else { + println!("not an error"); + } +} + +#[cfg(feature = "std")] +pub fn generate_colors(v: &[(ErrorKind, usize, usize)]) -> HashMap { + let mut h: HashMap = HashMap::new(); + let mut color = 0; + + for &(ref c, _, _) in v.iter() { + h.insert(error_to_u32(c), color + 31); + color = color + 1 % 7; + } + + h +} + +pub fn code_from_offset(v: &[(ErrorKind, usize, usize)], offset: usize) -> Option { + let mut acc: Option<(u32, usize, usize)> = None; + for &(ref ek, s, e) in v.iter() { + let c = error_to_u32(ek); + if s <= offset && offset <= e { + if let Some((_, start, end)) = acc { + if start <= s && e <= end { + acc = Some((c, s, e)); + } + } else { + acc = Some((c, s, e)); + } + } + } + if let Some((code, _, _)) = acc { + return Some(code); + } else { + return None; + } +} + +#[cfg(feature = "alloc")] +pub fn reset_color(v: &mut Vec) { + v.push(0x1B); + v.push(b'['); + v.push(0); + v.push(b'm'); +} + +#[cfg(feature = "alloc")] +pub fn write_color(v: &mut Vec, color: u8) { + v.push(0x1B); + v.push(b'['); + v.push(1); + v.push(b';'); + let s = color.to_string(); + let bytes = s.as_bytes(); + v.extend(bytes.iter().cloned()); + v.push(b'm'); +} + +#[cfg(feature = "std")] +#[cfg_attr(feature = "cargo-clippy", allow(implicit_hasher))] +pub fn print_codes(colors: &HashMap, names: &HashMap) -> String { + let mut v = Vec::new(); + for (code, &color) in colors { + if let Some(&s) = names.get(code) { + let bytes = s.as_bytes(); + write_color(&mut v, color); + v.extend(bytes.iter().cloned()); + } else { + let s = code.to_string(); + let bytes = s.as_bytes(); + write_color(&mut v, color); + v.extend(bytes.iter().cloned()); + } + reset_color(&mut v); + v.push(b' '); + } + reset_color(&mut v); + + String::from_utf8_lossy(&v[..]).into_owned() +} + +#[cfg(feature = "std")] +pub fn print_offsets(input: &[u8], from: usize, offsets: &[(ErrorKind, usize, usize)]) -> String { + let mut v = Vec::with_capacity(input.len() * 3); + let mut i = from; + let chunk_size = 8; + let mut current_code: Option = None; + let mut current_code2: Option = None; + + let colors = generate_colors(&offsets); + + for chunk in input.chunks(chunk_size) { + let s = format!("{:08x}", i); + for &ch in s.as_bytes().iter() { + v.push(ch); + } + v.push(b'\t'); + + let mut k = i; + let mut l = i; + for &byte in chunk { + if let Some(code) = code_from_offset(&offsets, k) { + if let Some(current) = current_code { + if current != code { + reset_color(&mut v); + current_code = Some(code); + if let Some(&color) = colors.get(&code) { + write_color(&mut v, color); + } + } + } else { + current_code = Some(code); + if let Some(&color) = colors.get(&code) { + write_color(&mut v, color); + } + } + } + v.push(CHARS[(byte >> 4) as usize]); + v.push(CHARS[(byte & 0xf) as usize]); + v.push(b' '); + k = k + 1; + } + + reset_color(&mut v); + + if chunk_size > chunk.len() { + for _ in 0..(chunk_size - chunk.len()) { + v.push(b' '); + v.push(b' '); + v.push(b' '); + } + } + v.push(b'\t'); + + for &byte in chunk { + if let Some(code) = code_from_offset(&offsets, l) { + if let Some(current) = current_code2 { + if current != code { + reset_color(&mut v); + current_code2 = Some(code); + if let Some(&color) = colors.get(&code) { + write_color(&mut v, color); + } + } + } else { + current_code2 = Some(code); + if let Some(&color) = colors.get(&code) { + write_color(&mut v, color); + } + } + } + if (byte >= 32 && byte <= 126) || byte >= 128 { + v.push(byte); + } else { + v.push(b'.'); + } + l = l + 1; + } + reset_color(&mut v); + + v.push(b'\n'); + i = i + chunk_size; + } + + String::from_utf8_lossy(&v[..]).into_owned() +} +*/ diff --git a/third_party/rust/nom/src/internal.rs b/third_party/rust/nom/src/internal.rs index 4cd5ffee90..401f98a906 100644 --- a/third_party/rust/nom/src/internal.rs +++ b/third_party/rust/nom/src/internal.rs @@ -1,21 +1,16 @@ //! Basic types to build the parsers use self::Needed::*; - -#[cfg(feature = "verbose-errors")] -use verbose_errors::Context; - -#[cfg(not(feature = "verbose-errors"))] -use simple_errors::Context; +use crate::error::ErrorKind; /// Holds the result of parsing functions /// /// It depends on I, the input type, O, the output type, and E, the error type (by default u32) /// -/// The `Ok` side is an enum containing the remainder of the input (the part of the data that +/// The `Ok` side is a pair containing the remainder of the input (the part of the data that /// was not parsed) and the produced value. The `Err` side contains an instance of `nom::Err`. /// -pub type IResult = Result<(I, O), Err>; +pub type IResult = Result<(I, O), Err>; /// Contains information on needed data if a parser returned `Incomplete` #[derive(Debug, PartialEq, Eq, Clone, Copy)] @@ -27,13 +22,14 @@ pub enum Needed { } impl Needed { + /// indicates if we know how many bytes we need pub fn is_known(&self) -> bool { *self != Unknown } - /// Maps a `Needed` to `Needed` by appling a function to a contained `Size` value. + /// Maps a `Needed` to `Needed` by applying a function to a contained `Size` value. #[inline] - pub fn map usize>(self, f: F) -> Needed { + pub fn map usize>(self, f: F) -> Needed { match self { Unknown => Unknown, Size(n) => Size(f(n)), @@ -55,476 +51,135 @@ impl Needed { /// to decide on the next parser to apply, and that parser fails, you know there's no need /// to try other parsers, you were already in the right branch, so the data is invalid /// -/// Depending on a compilation flag, the content of the `Context` enum -/// can change. In the default case, it will only have one variant: -/// `Context::Code(I, ErrorKind)` (with `I` and `E` configurable). -/// It contains an error code and the input position that triggered it. -/// -/// If you activate the `verbose-errors` compilation flags, it will add another -/// variant to the enum: `Context::List(Vec<(I, ErrorKind)>)`. -/// This variant aggregates positions and error codes as the code backtracks -/// through the nested parsers. -/// The verbose errors feature allows for very flexible error management: -/// you can know precisely which parser got to which part of the input. -/// The main drawback is that it is a lot slower than default error -/// management. #[derive(Debug, Clone, PartialEq)] -pub enum Err { +pub enum Err { /// There was not enough data Incomplete(Needed), /// The parser had an error (recoverable) - Error(Context), + Error(E), /// The parser had an unrecoverable error: we got to the right /// branch and we know other branches won't work, so backtrack /// as fast as possible - Failure(Context), -} - -#[cfg(feature = "std")] -use std::fmt; - -#[cfg(feature = "std")] -impl fmt::Display for Err -where - I: fmt::Debug, - E: fmt::Debug, -{ - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "{:?}", self) - } -} - -#[cfg(feature = "std")] -use std::error::Error; - -#[cfg(feature = "std")] -impl Error for Err -where - I: fmt::Debug, - E: fmt::Debug, -{ - fn description(&self) -> &str { - match self { - &Err::Incomplete(..) => "there was not enough data", - &Err::Error(Context::Code(_, ref error_kind)) | &Err::Failure(Context::Code(_, ref error_kind)) => error_kind.description(), - #[cfg(feature = "verbose-errors")] - &Err::Error(Context::List(..)) | &Err::Failure(Context::List(..)) => "list of errors", - } - } - - fn cause(&self) -> Option<&Error> { - None - } + Failure(E), } -use util::Convert; - -impl> Convert> for Err { - fn convert(e: Err) -> Self { - match e { - Err::Incomplete(n) => Err::Incomplete(n), - Err::Failure(c) => Err::Failure(Context::convert(c)), - Err::Error(c) => Err::Error(Context::convert(c)), - } - } -} - -impl Err { - pub fn into_error_kind(self) -> ::util::ErrorKind { - match self { - Err::Incomplete(_) => ::util::ErrorKind::Complete, - Err::Failure(c) => c.into_error_kind(), - Err::Error(c) => c.into_error_kind(), - } - } - +impl Err { + /// tests if the result is Incomplete pub fn is_incomplete(&self) -> bool { - match *self { - Err::Incomplete(_) => true, - _ => false, - } - } -} - -/* -#[cfg(feature = "verbose-errors")] -/// This is the same as IResult, but without Done -/// -/// This is used as the Error type when converting to std::result::Result -#[derive(Debug,PartialEq,Eq,Clone)] -pub enum IError { - Error(Err), - Incomplete(Needed) -} - -#[cfg(not(feature = "verbose-errors"))] -/// This is the same as IResult, but without Done -/// -/// This is used as the Error type when converting to std::result::Result -#[derive(Debug,PartialEq,Eq,Clone)] -pub enum IError { - Error(Err), - Incomplete(Needed) -} - -impl IResult { - pub fn is_done(&self) -> bool { - match *self { - Done(_,_) => true, - _ => false - } - } - - pub fn is_err(&self) -> bool { - match *self { - Error(_) => true, - _ => false - } - } - - pub fn is_incomplete(&self) -> bool { - match *self { - Incomplete(_) => true, - _ => false - } - } - - pub fn or(self, other: IResult) -> IResult { - if self.is_done() { - self + if let Err::Incomplete(_) = self { + true } else { - other + false } } - /// Maps a `IResult` to `IResult` by appling a function - /// to a contained `Done` value, leaving `Error` and `Incomplete` value - /// untouched. - #[inline] - pub fn map N>(self, f: F) -> IResult { + /// Applies the given function to the inner error + pub fn map(self, f: F) -> Err + where F: FnOnce(E) -> E2 + { match self { - Done(i, o) => Done(i, f(o)), - Error(e) => Error(e), - Incomplete(n) => Incomplete(n), - } - } - - /// Maps a `IResult` to `IResult` by appling a function - /// to a contained `Incomplete` value, leaving `Done` and `Error` value - /// untouched. - #[inline] - pub fn map_inc(self, f: F) -> IResult - where F: FnOnce(Needed) -> Needed { - match self { - Error(e) => Error(e), - Incomplete(n) => Incomplete(f(n)), - Done(i, o) => Done(i, o), - } - } - - /// Unwrap the contained `Done(I, O)` value, or panic if the `IResult` is not - /// `Done`. - pub fn unwrap(self) -> (I, O) { - match self { - Done(i, o) => (i, o), - Incomplete(_) => panic!("unwrap() called on an IResult that is Incomplete"), - Error(_) => panic!("unwrap() called on an IResult that is Error") + Err::Incomplete(n) => Err::Incomplete(n), + Err::Failure(t) => Err::Failure(f(t)), + Err::Error(t) => Err::Error(f(t)), } } - /// Unwrap the contained `Done(I, O)` value or a default if the `IResult` is not - /// `Done`. - pub fn unwrap_or(self, default: (I, O)) -> (I, O) { - match self { - Done(i, o) => (i, o), - Incomplete(_) => default, - Error(_) => default - } + /// automatically converts between errors if the underlying type supports it + pub fn convert(e: Err) -> Self + where E: From + { + e.map(Into::into) } +} - /// Unwrap the contained `Incomplete(n)` value, or panic if the `IResult` is not - /// `Incomplete`. - pub fn unwrap_inc(self) -> Needed { +impl Err<(T, ErrorKind)> { + /// maps `Err<(T, ErrorKind)>` to `Err<(U, ErrorKind)>` with the given F: T -> U + pub fn map_input(self, f: F) -> Err<(U, ErrorKind)> + where F: FnOnce(T) -> U { match self { - Incomplete(n) => n, - Done(_, _) => panic!("unwrap_inc() called on an IResult that is Done"), - Error(_) => panic!("unwrap_inc() called on an IResult that is Error") + Err::Incomplete(n) => Err::Incomplete(n), + Err::Failure((input, k)) => Err::Failure((f(input), k)), + Err::Error((input, k)) => Err::Error((f(input), k)), } } } -pub trait GetInput { - fn remaining_input(&self) -> Option; -} - -pub trait GetOutput { - fn output(&self) -> Option; -} - -impl<'a,I,O,E> GetInput<&'a[I]> for IResult<&'a[I],O,E> { - fn remaining_input(&self) -> Option<&'a[I]> { - match *self { - Done(ref i,_) => Some(*i), - _ => None - } +#[cfg(feature = "std")] +impl Err<(&[u8], ErrorKind)> { + /// Obtaining ownership + pub fn to_owned(self) -> Err<(Vec, ErrorKind)> { + self.map_input(ToOwned::to_owned) } } -impl GetInput<()> for IResult<(),O,E> { - fn remaining_input(&self) -> Option<()> { - match *self { - Done((),_) => Some(()), - _ => None - } +#[cfg(feature = "std")] +impl Err<(&str, ErrorKind)> { + /// automatically converts between errors if the underlying type supports it + pub fn to_owned(self) -> Err<(String, ErrorKind)> { + self.map_input(ToOwned::to_owned) } } -impl<'a,O,E> GetInput<&'a str> for IResult<&'a str,O,E> { - fn remaining_input(&self) -> Option<&'a str> { - match *self { - Done(ref i,_) => Some(*i), - _ => None - } - } -} +impl Eq for Err {} -impl<'a,I,O,E> GetOutput<&'a[O]> for IResult { - fn output(&self) -> Option<&'a[O]> { - match *self { - Done(_, ref o) => Some(*o), - _ => None - } - } -} +#[cfg(feature = "std")] +use std::fmt; -impl GetOutput<()> for IResult { - fn output(&self) -> Option<()> { - match *self { - Done(_,()) => Some(()), - _ => None +#[cfg(feature = "std")] +impl fmt::Display for Err +where + E: fmt::Debug, +{ + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match self { + Err::Incomplete(Needed::Size(u)) => write!(f, "Parsing requires {} bytes/chars", u), + Err::Incomplete(Needed::Unknown) => write!(f, "Parsing requires more data"), + Err::Failure(c) => write!(f, "Parsing Failure: {:?}", c), + Err::Error(c) => write!(f, "Parsing Error: {:?}", c), } } } -impl<'a,I,E> GetOutput<&'a str> for IResult { - fn output(&self) -> Option<&'a str> { - match *self { - Done(_,ref o) => Some(*o), - _ => None - } - } -}*/ - -#[cfg(feature = "verbose-errors")] -/// creates a parse error from a `nom::ErrorKind` -/// and the position in the input -/// if "verbose-errors" is not activated, -/// it default to only the error code -#[macro_export] -macro_rules! error_position( - ($input: expr, $code:expr) => ({ - $crate::Context::Code($input, $code) - }); -); - -#[cfg(not(feature = "verbose-errors"))] -/// creates a parse error from a `nom::ErrorKind` -/// and the position in the input -/// if "verbose-errors" is not activated, -/// it default to only the error code -#[allow(unused_variables)] -#[macro_export] -macro_rules! error_position( - ($input:expr, $code:expr) => ({ - $crate::Context::Code($input, $code) - }); -); - -#[cfg(feature = "verbose-errors")] -/// creates a parse error from a `nom::ErrorKind`, -/// the position in the input and the next error in -/// the parsing tree. -/// if "verbose-errors" is not activated, -/// it default to only the error code -#[macro_export] -macro_rules! error_node_position( - ($input:expr, $code:expr, $next:expr) => { - { - let mut error_vec = match $next { - $crate::Context::Code(i, e) => { - let mut v = $crate::lib::std::vec::Vec::new(); - v.push((i, e)); - v - }, - $crate::Context::List(v) => { - v - }, - }; +#[cfg(feature = "std")] +use std::error::Error; - error_vec.push(($input, $code)); - $crate::Context::List(error_vec) - } +#[cfg(feature = "std")] +impl Error for Err +where + E: fmt::Debug, +{ + fn source(&self) -> Option<&(dyn Error + 'static)> { + None // no underlying error } -); - -#[cfg(not(feature = "verbose-errors"))] -/// creates a parse error from a `nom::ErrorKind`, -/// the position in the input and the next error in -/// the parsing tree. -/// if "verbose-errors" is not activated, -/// it default to only the error code -#[allow(unused_variables)] -#[macro_export] -macro_rules! error_node_position( - ($input:expr, $code:expr, $next:expr) => ({ - fn unify_types(_: &T, _: &T) {} - let res = $crate::Context::Code($input, $code); - unify_types(&res, &$next); - res - }); -); +} #[cfg(test)] mod tests { + use super::*; + use crate::error::ErrorKind; - /* - const REST: [u8; 0] = []; - const DONE: IResult<&'static [u8], u32> = Ok((&REST, 5)); - const ERROR: IResult<&'static [u8], u32> = Err(Err::Error(Context::Code(&b""[..], ErrorKind::Tag))); - const INCOMPLETE: IResult<&'static [u8], u32> = Err(Err::Incomplete(Needed::Unknown)); - */ + #[doc(hidden)] + #[macro_export] + macro_rules! assert_size ( + ($t:ty, $sz:expr) => ( + assert_eq!(crate::lib::std::mem::size_of::<$t>(), $sz); + ); + ); - /* #[test] - fn iresult_or() { - assert_eq!(DONE.or(ERROR), DONE); - assert_eq!(ERROR.or(DONE), DONE); - assert_eq!(INCOMPLETE.or(ERROR), ERROR); - } - - #[test] - fn needed_map() { - let unknown = Needed::Unknown; - let size = Needed::Size(5); - - assert_eq!(size.map(|x| x * 2), Needed::Size(10)); - assert_eq!(unknown.map(|x| x * 2), Needed::Unknown); - } - - #[test] - fn iresult_map() { - assert_eq!(DONE.map(|x| x * 2), IResult::Done(&b""[..], 10)); - assert_eq!(ERROR.map(|x| x * 2), IResult::Error(error_code!(ErrorKind::Tag))); - assert_eq!(INCOMPLETE.map(|x| x * 2), IResult::Incomplete(Needed::Unknown)); - } - - #[test] - fn iresult_map_inc() { - let inc_unknown: IResult<&[u8], u32> = IResult::Incomplete(Needed::Unknown); - let inc_size: IResult<&[u8], u32> = IResult::Incomplete(Needed::Size(5)); - - assert_eq!(DONE.map_inc(|n| if let Needed::Size(i) = n {Needed::Size(i+1)} else {n}), IResult::Done(&b""[..], 5)); - assert_eq!(ERROR.map_inc(|n| if let Needed::Size(i) = n {Needed::Size(i+1)} else {n}), IResult::Error(error_code!(ErrorKind::Tag))); - assert_eq!(inc_unknown.map_inc(|n| if let Needed::Size(i) = n {Needed::Size(i+1)} else {n}), IResult::Incomplete(Needed::Unknown)); - assert_eq!(inc_size.map_inc(|n| if let Needed::Size(i) = n {Needed::Size(i+1)} else {n}), IResult::Incomplete(Needed::Size(6))); - } - - #[test] - #[cfg(feature = "std")] - fn iresult_map_err() { - #[derive(Clone, Copy, Debug, PartialEq, Eq)] - struct Error(u32); - - let error_kind = error_code!(ErrorKind::Custom(Error(5))); - - assert_eq!(DONE.map_err(|_| error_kind.clone()), IResult::Done(&b""[..], 5)); - assert_eq!(ERROR.map_err(|x| {println!("err: {:?}", x); error_kind.clone()}), IResult::Error(error_kind.clone())); - assert_eq!(INCOMPLETE.map_err(|x| {println!("err: {:?}", x); error_kind.clone()}), IResult::Incomplete(Needed::Unknown)); + #[cfg(target_pointer_width = "64")] + fn size_test() { + assert_size!(IResult<&[u8], &[u8], (&[u8], u32)>, 40); + assert_size!(IResult<&str, &str, u32>, 40); + assert_size!(Needed, 16); + assert_size!(Err, 24); + assert_size!(ErrorKind, 1); } #[test] - fn iresult_unwrap_on_done() { - assert_eq!(DONE.unwrap(), (&b""[..], 5)); + fn err_map_test() { + let e = Err::Error(1); + assert_eq!(e.map(|v| v + 1), Err::Error(2)); } - #[test] - #[should_panic] - fn iresult_unwrap_on_err() { - ERROR.unwrap(); - } - - #[test] - #[should_panic] - fn iresult_unwrap_on_inc() { - INCOMPLETE.unwrap(); - } - - #[test] - fn iresult_unwrap_or_on_done() { - assert_eq!(DONE.unwrap_or((&b""[..], 2)), (&b""[..], 5)); - } - - #[test] - fn iresult_unwrap_or_on_err() { - assert_eq!(ERROR.unwrap_or((&b""[..], 2)), (&b""[..], 2)); - } - - #[test] - fn iresult_unwrap_or_on_inc() { - assert_eq!(INCOMPLETE.unwrap_or((&b""[..], 2)), (&b""[..], 2)); - } - - #[test] - #[should_panic] - fn iresult_unwrap_err_on_done() { - DONE.unwrap_err(); - } - - #[test] - fn iresult_unwrap_err_on_err() { - assert_eq!(ERROR.unwrap_err(), error_code!(ErrorKind::Tag)); - } - - #[test] - #[should_panic] - fn iresult_unwrap_err_on_inc() { - INCOMPLETE.unwrap_err(); - } - - #[test] - #[should_panic] - fn iresult_unwrap_inc_on_done() { - DONE.unwrap_inc(); - } - - #[test] - #[should_panic] - fn iresult_unwrap_inc_on_err() { - ERROR.unwrap_inc(); - } - - #[test] - fn iresult_unwrap_inc_on_inc() { - assert_eq!(INCOMPLETE.unwrap_inc(), Needed::Unknown); - } - - #[test] - fn iresult_to_result() { - assert_eq!(DONE.to_result(), Ok(5)); - assert_eq!(ERROR.to_result(), Err(error_code!(ErrorKind::Tag))); - } - - #[test] - #[should_panic] - fn iresult_to_result_on_incomplete() { - INCOMPLETE.to_result().unwrap(); - } - - #[test] - fn iresult_to_full_result() { - assert_eq!(DONE.to_full_result(), Ok(5)); - assert_eq!(INCOMPLETE.to_full_result(), Err(IError::Incomplete(Needed::Unknown))); - assert_eq!(ERROR.to_full_result(), Err(IError::Error(error_code!(ErrorKind::Tag)))); - } - */ } diff --git a/third_party/rust/nom/src/lib.rs b/third_party/rust/nom/src/lib.rs index 3b9c566174..c37410d7d1 100644 --- a/third_party/rust/nom/src/lib.rs +++ b/third_party/rust/nom/src/lib.rs @@ -6,9 +6,14 @@ //! ## Example //! //! ```rust -//! #[macro_use] //! extern crate nom; //! +//! use nom::{ +//! IResult, +//! bytes::complete::{tag, take_while_m_n}, +//! combinator::map_res, +//! sequence::tuple}; +//! //! #[derive(Debug,PartialEq)] //! pub struct Color { //! pub red: u8, @@ -24,19 +29,19 @@ //! c.is_digit(16) //! } //! -//! named!(hex_primary<&str, u8>, -//! map_res!(take_while_m_n!(2, 2, is_hex_digit), from_hex) -//! ); +//! fn hex_primary(input: &str) -> IResult<&str, u8> { +//! map_res( +//! take_while_m_n(2, 2, is_hex_digit), +//! from_hex +//! )(input) +//! } //! -//! named!(hex_color<&str, Color>, -//! do_parse!( -//! tag!("#") >> -//! red: hex_primary >> -//! green: hex_primary >> -//! blue: hex_primary >> -//! (Color { red, green, blue }) -//! ) -//! ); +//! fn hex_color(input: &str) -> IResult<&str, Color> { +//! let (input, _) = tag("#")(input)?; +//! let (input, (red, green, blue)) = tuple((hex_primary, hex_primary, hex_primary))(input)?; +//! +//! Ok((input, Color { red, green, blue })) +//! } //! //! fn main() { //! assert_eq!(hex_color("#2F14DF"), Ok(("", Color { @@ -50,18 +55,15 @@ //! The code is available on [Github](https://github.com/Geal/nom) //! //! There are a few [guides](https://github.com/Geal/nom/tree/master/doc) with more details -//! about [the design of nom](https://github.com/Geal/nom/blob/master/doc/how_nom_macros_work.md), +//! about [the design of nom macros](https://github.com/Geal/nom/blob/master/doc/how_nom_macros_work.md), //! [how to write parsers](https://github.com/Geal/nom/blob/master/doc/making_a_new_parser_from_scratch.md), //! or the [error management system](https://github.com/Geal/nom/blob/master/doc/error_management.md). //! //! **Looking for a specific combinator? Read the //! ["choose a combinator" guide](https://github.com/Geal/nom/blob/master/doc/choosing_a_combinator.md)** //! -//! If you are upgrading to nom 2.0, please read the -//! [migration document](https://github.com/Geal/nom/blob/master/doc/upgrading_to_nom_2.md). -//! -//! If you are upgrading to nom 4.0, please read the -//! [migration document](https://github.com/Geal/nom/blob/master/doc/upgrading_to_nom_4.md). +//! If you are upgrading to nom 5.0, please read the +//! [migration document](https://github.com/Geal/nom/blob/master/doc/upgrading_to_nom_5.md). //! //! See also the [FAQ](https://github.com/Geal/nom/blob/master/doc/FAQ.md). //! @@ -70,14 +72,14 @@ //! Parser combinators are an approach to parsers that is very different from //! software like [lex](https://en.wikipedia.org/wiki/Lex_(software)) and //! [yacc](https://en.wikipedia.org/wiki/Yacc). Instead of writing the grammar -//! in a separate file and generating the corresponding code, you use very small -//! functions with very specific purpose, like "take 5 bytes", or "recognize the -//! word 'HTTP'", and assemble then in meaningful patterns like "recognize +//! in a separate syntax and generating the corresponding code, you use very small +//! functions with very specific purposes, like "take 5 bytes", or "recognize the +//! word 'HTTP'", and assemble them in meaningful patterns like "recognize //! 'HTTP', then a space, then a version". //! The resulting code is small, and looks like the grammar you would have //! written with other parser approaches. //! -//! This has a few advantages: +//! This gives us a few advantages: //! //! - the parsers are small and easy to write //! - the parsers components are easy to reuse (if they're general enough, please add them to nom!) @@ -88,39 +90,55 @@ //! Here is an example of one such parser, to recognize text between parentheses: //! //! ```rust -//! #[macro_use] -//! extern crate nom; -//! -//! # fn main() { -//! named!(parens, delimited!(char!('('), is_not!(")"), char!(')'))); -//! # } +//! use nom::{ +//! IResult, +//! sequence::delimited, +//! // see the "streaming/complete" paragraph lower for an explanation of these submodules +//! character::complete::char, +//! bytes::complete::is_not +//! }; +//! +//! fn parens(input: &str) -> IResult<&str, &str> { +//! delimited(char('('), is_not(")"), char(')'))(input) +//! } //! ``` //! -//! It defines a function named `parens`, which will recognize a sequence of the character `(`, the longest byte array not containing `)`, then the character `)`, and will return the byte array in the middle. +//! It defines a function named `parens` which will recognize a sequence of the +//! character `(`, the longest byte array not containing `)`, then the character +//! `)`, and will return the byte array in the middle. //! -//! Here is another parser, written without using nom's macros this time: +//! Here is another parser, written without using nom's combinators this time: //! //! ```rust //! #[macro_use] //! extern crate nom; //! -//! use nom::{IResult,Err,Needed}; +//! use nom::{IResult, Err, Needed}; //! //! # fn main() { -//! fn take4(i:&[u8]) -> IResult<&[u8], &[u8]>{ +//! fn take4(i: &[u8]) -> IResult<&[u8], &[u8]>{ //! if i.len() < 4 { //! Err(Err::Incomplete(Needed::Size(4))) //! } else { -//! Ok((&i[4..],&i[0..4])) +//! Ok((&i[4..], &i[0..4])) //! } //! } //! # } //! ``` //! //! This function takes a byte array as input, and tries to consume 4 bytes. -//! Writing all the parsers manually, like this, is dangerous, despite Rust's safety features. There -//! are still a lot of mistakes one can make. That's why nom provides a list of macros to help in -//! developing parsers. +//! Writing all the parsers manually, like this, is dangerous, despite Rust's +//! safety features. There are still a lot of mistakes one can make. That's why +//! nom provides a list of function and macros to help in developing parsers. +//! +//! With functions, you would write it like this: +//! +//! ```rust +//! use nom::{IResult, bytes::streaming::take}; +//! fn take4(input: &str) -> IResult<&str, &str> { +//! take(4u8)(input) +//! } +//! ``` //! //! With macros, you would write it like this: //! @@ -133,6 +151,13 @@ //! # } //! ``` //! +//! nom has used macros for combinators from versions 1 to 4, and from version +//! 5, it proposes new combinators as functions, but still allows the macros style +//! (macros have been rewritten to use the functions under the hood). +//! For new parsers, we recommend using the functions instead of macros, since +//! rustc messages will be much easier to understand. +//! +//! //! A parser in nom is a function which, for an input type `I`, an output type `O` //! and an optional error type `E`, will have the following signature: //! @@ -149,123 +174,101 @@ //! `IResult` is an alias for the `Result` type: //! //! ```rust -//! use nom::{Needed, Context}; +//! use nom::{Needed, error::ErrorKind}; //! -//! type IResult = Result<(I, O), Err>; +//! type IResult = Result<(I, O), Err>; //! -//! enum Err { +//! enum Err { //! Incomplete(Needed), -//! Error(Context), -//! Failure(Context), +//! Error(E), +//! Failure(E), //! } //! ``` //! //! It can have the following values: //! //! - a correct result `Ok((I,O))` with the first element being the remaining of the input (not parsed yet), and the second the output value; -//! - an error `Err(Err::Error(c))` with `c` an enum that contains an error code with its position in the input, and optionally a chain of accumulated errors; +//! - an error `Err(Err::Error(c))` with `c` an error that can be built from the input position and a parser specific error //! - an error `Err(Err::Incomplete(Needed))` indicating that more input is necessary. `Needed` can indicate how much data is needed //! - an error `Err(Err::Failure(c))`. It works like the `Error` case, except it indicates an unrecoverable error: we cannot backtrack and test another parser //! -//! Please refer to the [documentation][doc] for an exhaustive list of parsers. See also the -//! ["choose a combinator" guide](https://github.com/Geal/nom/blob/master/doc/choosing_a_combinator.md)**. +//! Please refer to the ["choose a combinator" guide](https://github.com/Geal/nom/blob/master/doc/choosing_a_combinator.md) for an exhaustive list of parsers. +//! See also the rest of the documentation [here](https://github.com/Geal/nom/blob/master/doc). +//! . //! -//! ## Making new parsers with macros +//! ## Making new parsers with function combinators //! -//! Macros are the main way to make new parsers by combining other ones. Those macros accept other macros or function names as arguments. You then need to make a function out of that combinator with **`named!`**, or a closure with **`closure!`**. Here is how you would do, with the **`tag!`** and **`take!`** combinators: +//! nom is based on functions that generate parsers, with a signature like +//! this: `(arguments) -> impl Fn(Input) -> IResult`. +//! The arguments of a combinator can be direct values (like `take` which uses +//! a number of bytes or character as argument) or even other parsers (like +//! `delimited` which takes as argument 3 parsers, and returns the result of +//! the second one if all are successful). //! -//! ```rust -//! # #[macro_use] extern crate nom; -//! # fn main() { -//! named!(abcd_parser, tag!("abcd")); // will consume bytes if the input begins with "abcd" -//! -//! named!(take_10, take!(10)); // will consume and return 10 bytes of input -//! # } -//! ``` -//! -//! The **`named!`** macro can take three different syntaxes: -//! -//! ```rust,ignore -//! named!(my_function( &[u8] ) -> &[u8], tag!("abcd")); -//! -//! named!(my_function<&[u8], &[u8]>, tag!("abcd")); -//! -//! named!(my_function, tag!("abcd")); // when you know the parser takes &[u8] as input, and returns &[u8] as output -//! ``` -//! -//! **IMPORTANT NOTE**: Rust's macros can be very sensitive to the syntax, so you may encounter an error compiling parsers like this one: +//! Here are some examples: //! //! ```rust -//! # #[macro_use] extern crate nom; -//! # #[cfg(feature = "alloc")] -//! # fn main() { -//! named!(my_function<&[u8], Vec<&[u8]>>, many0!(tag!("abcd"))); -//! # } -//! -//! # #[cfg(not(feature = "alloc"))] -//! # fn main() {} -//! ``` -//! -//! You will get the following error: `error: expected an item keyword`. This -//! happens because `>>` is seen as an operator, so the macro parser does not -//! recognize what we want. There is a way to avoid it, by inserting a space: +//! use nom::IResult; +//! use nom::bytes::complete::{tag, take}; +//! fn abcd_parser(i: &str) -> IResult<&str, &str> { +//! tag("abcd")(i) // will consume bytes if the input begins with "abcd" +//! } //! -//! ```rust -//! # #[macro_use] extern crate nom; -//! # #[cfg(feature = "alloc")] -//! # fn main() { -//! named!(my_function<&[u8], Vec<&[u8]> >, many0!(tag!("abcd"))); -//! # } -//! # #[cfg(not(feature = "alloc"))] -//! # fn main() {} +//! fn take_10(i: &[u8]) -> IResult<&[u8], &[u8]> { +//! take(10u8)(i) // will consume and return 10 bytes of input +//! } //! ``` //! -//! This will compile correctly. I am very sorry for this inconvenience. -//! //! ## Combining parsers //! -//! There are more high level patterns, like the **`alt!`** combinator, which provides a choice between multiple parsers. If one branch fails, it tries the next, and returns the result of the first parser that succeeds: +//! There are higher level patterns, like the **`alt`** combinator, which +//! provides a choice between multiple parsers. If one branch fails, it tries +//! the next, and returns the result of the first parser that succeeds: //! //! ```rust -//! # #[macro_use] extern crate nom; -//! # fn main() { -//! named!(alt_tags, alt!(tag!("abcd") | tag!("efgh"))); +//! use nom::IResult; +//! use nom::branch::alt; +//! use nom::bytes::complete::tag; //! -//! assert_eq!(alt_tags(b"abcdxxx"), Ok((&b"xxx"[..], &b"abcd"[..]))); -//! assert_eq!(alt_tags(b"efghxxx"), Ok((&b"xxx"[..], &b"efgh"[..]))); -//! assert_eq!(alt_tags(b"ijklxxx"), Err(nom::Err::Error(error_position!(&b"ijklxxx"[..], nom::ErrorKind::Alt)))); -//! # } -//! ``` +//! let alt_tags = alt((tag("abcd"), tag("efgh"))); //! -//! The pipe `|` character is used as separator. +//! assert_eq!(alt_tags(&b"abcdxxx"[..]), Ok((&b"xxx"[..], &b"abcd"[..]))); +//! assert_eq!(alt_tags(&b"efghxxx"[..]), Ok((&b"xxx"[..], &b"efgh"[..]))); +//! assert_eq!(alt_tags(&b"ijklxxx"[..]), Err(nom::Err::Error((&b"ijklxxx"[..], nom::error::ErrorKind::Tag)))); +//! ``` //! -//! The **`opt!`** combinator makes a parser optional. If the child parser returns an error, **`opt!`** will succeed and return None: +//! The **`opt`** combinator makes a parser optional. If the child parser returns +//! an error, **`opt`** will still succeed and return None: //! //! ```rust -//! # #[macro_use] extern crate nom; -//! # fn main() { -//! named!( abcd_opt< &[u8], Option<&[u8]> >, opt!( tag!("abcd") ) ); +//! use nom::{IResult, combinator::opt, bytes::complete::tag}; +//! fn abcd_opt(i: &[u8]) -> IResult<&[u8], Option<&[u8]>> { +//! opt(tag("abcd"))(i) +//! } //! -//! assert_eq!(abcd_opt(b"abcdxxx"), Ok((&b"xxx"[..], Some(&b"abcd"[..])))); -//! assert_eq!(abcd_opt(b"efghxxx"), Ok((&b"efghxxx"[..], None))); -//! # } +//! assert_eq!(abcd_opt(&b"abcdxxx"[..]), Ok((&b"xxx"[..], Some(&b"abcd"[..])))); +//! assert_eq!(abcd_opt(&b"efghxxx"[..]), Ok((&b"efghxxx"[..], None))); //! ``` //! -//! **`many0!`** applies a parser 0 or more times, and returns a vector of the aggregated results: +//! **`many0`** applies a parser 0 or more times, and returns a vector of the aggregated results: //! //! ```rust //! # #[macro_use] extern crate nom; //! # #[cfg(feature = "alloc")] //! # fn main() { +//! use nom::{IResult, multi::many0, bytes::complete::tag}; //! use std::str; //! -//! named!(multi< Vec<&str> >, many0!( map_res!(tag!( "abcd" ), str::from_utf8) ) ); -//! let a = b"abcdef"; -//! let b = b"abcdabcdef"; -//! let c = b"azerty"; -//! assert_eq!(multi(a), Ok((&b"ef"[..], vec!["abcd"]))); -//! assert_eq!(multi(b), Ok((&b"ef"[..], vec!["abcd", "abcd"]))); -//! assert_eq!(multi(c), Ok((&b"azerty"[..], Vec::new()))); +//! fn multi(i: &str) -> IResult<&str, Vec<&str>> { +//! many0(tag("abcd"))(i) +//! } +//! +//! let a = "abcdef"; +//! let b = "abcdabcdef"; +//! let c = "azerty"; +//! assert_eq!(multi(a), Ok(("ef", vec!["abcd"]))); +//! assert_eq!(multi(b), Ok(("ef", vec!["abcd", "abcd"]))); +//! assert_eq!(multi(c), Ok(("azerty", Vec::new()))); //! # } //! # #[cfg(not(feature = "alloc"))] //! # fn main() {} @@ -273,26 +276,24 @@ //! //! Here are some basic combining macros available: //! -//! - **`opt!`**: will make the parser optional (if it returns the `O` type, the new parser returns `Option`) -//! - **`many0!`**: will apply the parser 0 or more times (if it returns the `O` type, the new parser returns `Vec`) -//! - **`many1!`**: will apply the parser 1 or more times +//! - **`opt`**: will make the parser optional (if it returns the `O` type, the new parser returns `Option`) +//! - **`many0`**: will apply the parser 0 or more times (if it returns the `O` type, the new parser returns `Vec`) +//! - **`many1`**: will apply the parser 1 or more times //! -//! There are more complex (and more useful) parsers like `do_parse!` and `tuple!`, which are used to apply a series of parsers then assemble their results. +//! There are more complex (and more useful) parsers like `tuple!`, which is +//! used to apply a series of parsers then assemble their results. //! -//! Example with `tuple!`: +//! Example with `tuple`: //! //! ```rust //! # #[macro_use] extern crate nom; //! # fn main() { -//! use nom::{ErrorKind, Needed,be_u16}; -//! -//! named!(tpl<&[u8], (u16, &[u8], &[u8]) >, -//! tuple!( -//! be_u16 , -//! take!(3), -//! tag!("fg") -//! ) -//! ); +//! use nom::{error::ErrorKind, Needed, +//! number::streaming::be_u16, +//! bytes::streaming::{tag, take}, +//! sequence::tuple}; +//! +//! let tpl = tuple((be_u16, take(3u8), tag("fg"))); //! //! assert_eq!( //! tpl(&b"abcdefgh"[..]), @@ -303,16 +304,17 @@ //! ); //! assert_eq!(tpl(&b"abcde"[..]), Err(nom::Err::Incomplete(Needed::Size(2)))); //! let input = &b"abcdejk"[..]; -//! assert_eq!(tpl(input), Err(nom::Err::Error(error_position!(&input[5..], ErrorKind::Tag)))); +//! assert_eq!(tpl(input), Err(nom::Err::Error((&input[5..], ErrorKind::Tag)))); //! # } //! ``` //! -//! Example with `do_parse!`: +//! But you can also use a sequence of combinators written in imperative style, +//! thanks to the `?` operator: //! //! ```rust //! # #[macro_use] extern crate nom; //! # fn main() { -//! use nom::IResult; +//! use nom::{IResult, bytes::complete::tag}; //! //! #[derive(Debug, PartialEq)] //! struct A { @@ -323,37 +325,86 @@ //! fn ret_int1(i:&[u8]) -> IResult<&[u8], u8> { Ok((i,1)) } //! fn ret_int2(i:&[u8]) -> IResult<&[u8], u8> { Ok((i,2)) } //! -//! named!(f<&[u8],A>, -//! do_parse!( // the parser takes a byte array as input, and returns an A struct -//! tag!("abcd") >> // begins with "abcd" -//! opt!(tag!("abcd")) >> // this is an optional parser -//! aa: ret_int1 >> // the return value of ret_int1, if it does not fail, will be stored in aa -//! tag!("efgh") >> -//! bb: ret_int2 >> -//! tag!("efgh") >> -//! -//! (A{a: aa, b: bb}) // the final tuple will be able to use the variable defined previously -//! ) -//! ); +//! fn f(i: &[u8]) -> IResult<&[u8], A> { +//! // if successful, the parser returns `Ok((remaining_input, output_value))` that we can destructure +//! let (i, _) = tag("abcd")(i)?; +//! let (i, a) = ret_int1(i)?; +//! let (i, _) = tag("efgh")(i)?; +//! let (i, b) = ret_int2(i)?; //! -//! let r = f(b"abcdabcdefghefghX"); -//! assert_eq!(r, Ok((&b"X"[..], A{a: 1, b: 2}))); +//! Ok((i, A { a, b })) +//! } //! -//! let r2 = f(b"abcdefghefghX"); -//! assert_eq!(r2, Ok((&b"X"[..], A{a: 1, b: 2}))); +//! let r = f(b"abcdefghX"); +//! assert_eq!(r, Ok((&b"X"[..], A{a: 1, b: 2}))); //! # } //! ``` //! -//! The double right arrow `>>` is used as separator between every parser in the sequence, and the last closure can see the variables storing the result of parsers. Unless the specified return type is already a tuple, the final line should be that type wrapped in a tuple. +//! ## Streaming / Complete +//! +//! Some of nom's modules have `streaming` or `complete` submodules. They hold +//! different variants of the same combinators. //! -//! More examples of [`do_parse!`](macro.do_parse.html) and [`tuple!`](macro.tuple.html) usage can be found in the [INI file parser example](tests/ini.rs). +//! A streaming parser assumes that we might not have all of the input data. +//! This can happen with some network protocol or large file parsers, where the +//! input buffer can be full and need to be resized or refilled. +//! +//! A complete parser assumes that we already have all of the input data. +//! This will be the common case with small files that can be read entirely to +//! memory. +//! +//! Here is how it works in practice: +//! +//! ```rust +//! use nom::{IResult, Err, Needed, error::ErrorKind, bytes, character}; +//! +//! fn take_streaming(i: &[u8]) -> IResult<&[u8], &[u8]> { +//! bytes::streaming::take(4u8)(i) +//! } //! +//! fn take_complete(i: &[u8]) -> IResult<&[u8], &[u8]> { +//! bytes::complete::take(4u8)(i) +//! } +//! +//! // both parsers will take 4 bytes as expected +//! assert_eq!(take_streaming(&b"abcde"[..]), Ok((&b"e"[..], &b"abcd"[..]))); +//! assert_eq!(take_complete(&b"abcde"[..]), Ok((&b"e"[..], &b"abcd"[..]))); +//! +//! // if the input is smaller than 4 bytes, the streaming parser +//! // will return `Incomplete` to indicate that we need more data +//! assert_eq!(take_streaming(&b"abc"[..]), Err(Err::Incomplete(Needed::Size(4)))); +//! +//! // but the complete parser will return an error +//! assert_eq!(take_complete(&b"abc"[..]), Err(Err::Error((&b"abc"[..], ErrorKind::Eof)))); +//! +//! // the alpha0 function recognizes 0 or more alphabetic characters +//! fn alpha0_streaming(i: &str) -> IResult<&str, &str> { +//! character::streaming::alpha0(i) +//! } +//! +//! fn alpha0_complete(i: &str) -> IResult<&str, &str> { +//! character::complete::alpha0(i) +//! } +//! +//! // if there's a clear limit to the recognized characters, both parsers work the same way +//! assert_eq!(alpha0_streaming("abcd;"), Ok((";", "abcd"))); +//! assert_eq!(alpha0_complete("abcd;"), Ok((";", "abcd"))); +//! +//! // but when there's no limit, the streaming version returns `Incomplete`, because it cannot +//! // know if more input data should be recognized. The whole input could be "abcd;", or +//! // "abcde;" +//! assert_eq!(alpha0_streaming("abcd"), Err(Err::Incomplete(Needed::Size(1)))); +//! +//! // while the complete version knows that all of the data is there +//! assert_eq!(alpha0_complete("abcd"), Ok(("", "abcd"))); +//! ``` //! **Going further:** read the [guides](https://github.com/Geal/nom/tree/master/doc)! #![cfg_attr(all(not(feature = "std"), feature = "alloc"), feature(alloc))] #![cfg_attr(not(feature = "std"), no_std)] -//#![warn(missing_docs)] #![cfg_attr(feature = "cargo-clippy", allow(doc_markdown))] #![cfg_attr(nightly, feature(test))] +#![deny(missing_docs)] +#![warn(missing_doc_code_examples)] #[cfg(all(not(feature = "std"), feature = "alloc"))] #[macro_use] @@ -363,9 +414,17 @@ extern crate alloc; extern crate lazy_static; extern crate memchr; #[cfg(feature = "regexp")] -extern crate regex; +pub extern crate regex; +#[cfg(feature = "lexical")] +extern crate lexical_core; #[cfg(nightly)] extern crate test; +#[cfg(test)] +extern crate doc_comment; + +//FIXME: reactivate doctest once https://github.com/rust-lang/rust/issues/62210 is done +//#[cfg(doctest)] +//doc_comment::doctest!("../README.md"); /// Lib module to re-export everything needed from `std` or `core`/`alloc`. This is how `serde` does /// it, albeit there it is not public. @@ -373,48 +432,40 @@ pub mod lib { /// `std` facade allowing `std`/`core` to be interchangeable. Reexports `alloc` crate optionally, /// as well as `core` or `std` #[cfg(not(feature = "std"))] + /// internal std exports for no_std compatibility pub mod std { #[cfg(feature = "alloc")] #[cfg_attr(feature = "alloc", macro_use)] pub use alloc::{boxed, string, vec}; - pub use core::{cmp, convert, fmt, iter, mem, ops, option, result, slice, str}; + pub use core::{cmp, convert, fmt, iter, mem, ops, option, result, slice, str, borrow}; + + /// internal reproduction of std prelude pub mod prelude { pub use core::prelude as v1; } } #[cfg(feature = "std")] + /// internal std exports for no_std compatibility pub mod std { - pub use std::{boxed, cmp, collections, convert, fmt, hash, iter, mem, ops, option, result, slice, str, string, vec}; + pub use std::{alloc, boxed, cmp, collections, convert, fmt, hash, iter, mem, ops, option, result, slice, str, string, vec, borrow}; + + /// internal reproduction of std prelude pub mod prelude { pub use std::prelude as v1; } } + + #[cfg(feature = "regexp")] + pub use regex; } pub use self::traits::*; pub use self::util::*; - -#[cfg(feature = "verbose-errors")] -pub use self::verbose_errors::*; - -#[cfg(not(feature = "verbose-errors"))] -pub use self::simple_errors::*; - -pub use self::branch::*; pub use self::internal::*; -pub use self::macros::*; pub use self::methods::*; -pub use self::multi::*; -pub use self::sequence::*; - pub use self::bits::*; -pub use self::bytes::*; - -pub use self::character::*; -pub use self::nom::*; - pub use self::whitespace::*; #[cfg(feature = "regexp")] @@ -424,37 +475,30 @@ pub use self::str::*; #[macro_use] mod util; -#[cfg(feature = "verbose-errors")] -#[macro_use] -pub mod verbose_errors; - -#[cfg(not(feature = "verbose-errors"))] #[macro_use] -pub mod simple_errors; +pub mod error; #[macro_use] mod internal; mod traits; #[macro_use] -mod macros; +pub mod combinator; #[macro_use] -mod branch; +pub mod branch; #[macro_use] -mod sequence; +pub mod sequence; #[macro_use] -mod multi; +pub mod multi; #[macro_use] pub mod methods; #[macro_use] -mod bytes; +pub mod bytes; #[macro_use] pub mod bits; #[macro_use] -mod character; -#[macro_use] -mod nom; +pub mod character; #[macro_use] pub mod whitespace; @@ -465,4 +509,5 @@ mod regexp; mod str; -pub mod types; +#[macro_use] +pub mod number; diff --git a/third_party/rust/nom/src/macros.rs b/third_party/rust/nom/src/macros.rs deleted file mode 100644 index a1af6fefc0..0000000000 --- a/third_party/rust/nom/src/macros.rs +++ /dev/null @@ -1,1738 +0,0 @@ -//! Macro combinators -//! -//! Macros are used to make combination easier, -//! since they often do not depend on the type -//! of the data they manipulate or return. -//! -//! There is a trick to make them easier to assemble, -//! combinators are defined like this: -//! -//! ```ignore -//! macro_rules! tag ( -//! ($i:expr, $inp: expr) => ( -//! { -//! ... -//! } -//! ); -//! ); -//! ``` -//! -//! But when used in other combinators, are Used -//! like this: -//! -//! ```ignore -//! named!(my_function, tag!("abcd")); -//! ``` -//! -//! Internally, other combinators will rewrite -//! that call to pass the input as first argument: -//! -//! ```ignore -//! macro_rules! named ( -//! ($name:ident, $submac:ident!( $($args:tt)* )) => ( -//! fn $name<'a>( i: &'a [u8] ) -> IResult<'a,&[u8], &[u8]> { -//! $submac!(i, $($args)*) -//! } -//! ); -//! ); -//! ``` -//! -//! If you want to call a combinator directly, you can -//! do it like this: -//! -//! ```ignore -//! let res = { tag!(input, "abcd"); } -//! ``` -//! -//! Combinators must have a specific variant for -//! non-macro arguments. Example: passing a function -//! to take_while! instead of another combinator. -//! -//! ```ignore -//! macro_rules! take_while( -//! ($input:expr, $submac:ident!( $($args:tt)* )) => ( -//! { -//! ... -//! } -//! ); -//! -//! // wrap the function in a macro to pass it to the main implementation -//! ($input:expr, $f:expr) => ( -//! take_while!($input, call!($f)); -//! ); -//! ); -//! ``` -#[allow(unused_variables)] - -/// Wraps a parser in a closure -#[macro_export] -macro_rules! closure ( - ($ty:ty, $submac:ident!( $($args:tt)* )) => ( - |i: $ty| { $submac!(i, $($args)*) } - ); - ($submac:ident!( $($args:tt)* )) => ( - |i| { $submac!(i, $($args)*) } - ); -); - -/// Makes a function from a parser combination -/// -/// The type can be set up if the compiler needs -/// more information -/// -/// ```ignore -/// named!(my_function( &[u8] ) -> &[u8], tag!("abcd")); -/// // first type parameter is input, second is output -/// named!(my_function<&[u8], &[u8]>, tag!("abcd")); -/// // will have &[u8] as input type, &[u8] as output type -/// named!(my_function, tag!("abcd")); -/// // will use &[u8] as input type (use this if the compiler -/// // complains about lifetime issues -/// named!(my_function<&[u8]>, tag!("abcd")); -/// // prefix them with 'pub' to make the functions public -/// named!(pub my_function, tag!("abcd")); -/// // prefix them with 'pub(crate)' to make the functions public within the crate -/// named!(pub(crate) my_function, tag!("abcd")); -/// ``` -#[macro_export] -macro_rules! named ( - (#$($args:tt)*) => ( - named_attr!(#$($args)*); - ); - ($name:ident( $i:ty ) -> $o:ty, $submac:ident!( $($args:tt)* )) => ( - fn $name( i: $i ) -> $crate::IResult<$i,$o,u32> { - $submac!(i, $($args)*) - } - ); - ($name:ident<$i:ty,$o:ty,$e:ty>, $submac:ident!( $($args:tt)* )) => ( - fn $name( i: $i ) -> $crate::IResult<$i, $o, $e> { - $submac!(i, $($args)*) - } - ); - ($name:ident<$i:ty,$o:ty>, $submac:ident!( $($args:tt)* )) => ( - fn $name( i: $i ) -> $crate::IResult<$i, $o, u32> { - $submac!(i, $($args)*) - } - ); - ($name:ident<$o:ty>, $submac:ident!( $($args:tt)* )) => ( - fn $name( i: &[u8] ) -> $crate::IResult<&[u8], $o, u32> { - $submac!(i, $($args)*) - } - ); - ($name:ident, $submac:ident!( $($args:tt)* )) => ( - fn $name( i: &[u8] ) -> $crate::IResult<&[u8], &[u8], u32> { - $submac!(i, $($args)*) - } - ); - (pub $name:ident( $i:ty ) -> $o:ty, $submac:ident!( $($args:tt)* )) => ( - pub fn $name( i: $i ) -> $crate::IResult<$i,$o, u32> { - $submac!(i, $($args)*) - } - ); - (pub $name:ident<$i:ty,$o:ty,$e:ty>, $submac:ident!( $($args:tt)* )) => ( - pub fn $name( i: $i ) -> $crate::IResult<$i, $o, $e> { - $submac!(i, $($args)*) - } - ); - (pub $name:ident<$i:ty,$o:ty>, $submac:ident!( $($args:tt)* )) => ( - pub fn $name( i: $i ) -> $crate::IResult<$i, $o, u32> { - $submac!(i, $($args)*) - } - ); - (pub $name:ident<$o:ty>, $submac:ident!( $($args:tt)* )) => ( - pub fn $name( i: &[u8] ) -> $crate::IResult<&[u8], $o, u32> { - $submac!(i, $($args)*) - } - ); - (pub $name:ident, $submac:ident!( $($args:tt)* )) => ( - pub fn $name( i: &[u8] ) -> $crate::IResult<&[u8], &[u8], u32> { - $submac!(i, $($args)*) - } - ); - (pub(crate) $name:ident( $i:ty ) -> $o:ty, $submac:ident!( $($args:tt)* )) => ( - #[allow(unused_variables)] - pub(crate) fn $name( i: $i ) -> $crate::IResult<$i,$o, u32> { - $submac!(i, $($args)*) - } - ); - (pub(crate) $name:ident<$i:ty,$o:ty,$e:ty>, $submac:ident!( $($args:tt)* )) => ( - #[allow(unused_variables)] - pub(crate) fn $name( i: $i ) -> $crate::IResult<$i, $o, $e> { - $submac!(i, $($args)*) - } - ); - (pub(crate) $name:ident<$i:ty,$o:ty>, $submac:ident!( $($args:tt)* )) => ( - #[allow(unused_variables)] - pub(crate) fn $name( i: $i ) -> $crate::IResult<$i, $o, u32> { - $submac!(i, $($args)*) - } - ); - (pub(crate) $name:ident<$o:ty>, $submac:ident!( $($args:tt)* )) => ( - #[allow(unused_variables)] - pub(crate) fn $name( i: &[u8] ) -> $crate::IResult<&[u8], $o, u32> { - $submac!(i, $($args)*) - } - ); - (pub(crate) $name:ident, $submac:ident!( $($args:tt)* )) => ( - #[allow(unused_variables)] - pub(crate) fn $name<'a>( i: &'a [u8] ) -> $crate::IResult<&[u8], &[u8], u32> { - $submac!(i, $($args)*) - } - ); -); - -/// Makes a function from a parser combination with arguments. -/// -/// ```ignore -/// //takes `&[u8]` as input -/// named_args!(tagged(open_tag: &[u8], close_tag: &[u8])<&str>, -/// delimited!(tag!(open_tag), map_res!(take!(4), str::from_utf8), tag!(close_tag)) -/// ); - -/// //takes `&str` as input -/// named_args!(tagged(open_tag: &str, close_tag: &str)<&str, &str>, -/// delimited!(tag!(open_tag), take!(4), tag!(close_tag)) -/// ); -/// ``` -/// -/// Note: if using arguments that way gets hard to read, it is always -/// possible to write the equivalent parser definition manually, like -/// this: -/// -/// ```ignore -/// fn tagged(input: &[u8], open_tag: &[u8], close_tag: &[u8]) -> IResult<&[u8], &str> { -/// // the first combinator in the tree gets the input as argument. It is then -/// // passed from one combinator to the next through macro rewriting -/// delimited!(input, -/// tag!(open_tag), take!(4), tag!(close_tag) -/// ) -/// ); -/// ``` -/// -#[macro_export] -macro_rules! named_args { - (pub $func_name:ident ( $( $arg:ident : $typ:ty ),* ) < $return_type:ty > , $submac:ident!( $($args:tt)* ) ) => { - pub fn $func_name(input: &[u8], $( $arg : $typ ),*) -> $crate::IResult<&[u8], $return_type> { - $submac!(input, $($args)*) - } - }; - (pub $func_name:ident < 'a > ( $( $arg:ident : $typ:ty ),* ) < $return_type:ty > , $submac:ident!( $($args:tt)* ) ) => { - pub fn $func_name<'this_is_probably_unique_i_hope_please, 'a>( - input: &'this_is_probably_unique_i_hope_please [u8], $( $arg : $typ ),*) -> - $crate::IResult<&'this_is_probably_unique_i_hope_please [u8], $return_type> - { - $submac!(input, $($args)*) - } - }; - (pub(crate) $func_name:ident ( $( $arg:ident : $typ:ty ),* ) < $return_type:ty > , $submac:ident!( $($args:tt)* ) ) => { - pub(crate) fn $func_name(input: &[u8], $( $arg : $typ ),*) -> $crate::IResult<&[u8], $return_type> { - $submac!(input, $($args)*) - } - }; - (pub(crate) $func_name:ident < 'a > ( $( $arg:ident : $typ:ty ),* ) < $return_type:ty > , $submac:ident!( $($args:tt)* ) ) => { - pub(crate) fn $func_name<'this_is_probably_unique_i_hope_please, 'a>(input: &'this_is_probably_unique_i_hope_please [u8], $( $arg : $typ ),*) -> $crate::IResult<&'this_is_probably_unique_i_hope_please [u8], $return_type> { - $submac!(input, $($args)*) - } - }; - ($func_name:ident ( $( $arg:ident : $typ:ty ),* ) < $return_type:ty > , $submac:ident!( $($args:tt)* ) ) => { - fn $func_name(input: &[u8], $( $arg : $typ ),*) -> $crate::IResult<&[u8], $return_type> { - $submac!(input, $($args)*) - } - }; - ($func_name:ident < 'a > ( $( $arg:ident : $typ:ty ),* ) < $return_type:ty > , $submac:ident!( $($args:tt)* ) ) => { - fn $func_name<'this_is_probably_unique_i_hope_please, 'a>( - input: &'this_is_probably_unique_i_hope_please [u8], $( $arg : $typ ),*) - -> $crate::IResult<&'this_is_probably_unique_i_hope_please [u8], $return_type> - { - $submac!(input, $($args)*) - } - }; - (pub $func_name:ident ( $( $arg:ident : $typ:ty ),* ) < $input_type:ty, $return_type:ty > , $submac:ident!( $($args:tt)* ) ) => { - pub fn $func_name(input: $input_type, $( $arg : $typ ),*) -> $crate::IResult<$input_type, $return_type> { - $submac!(input, $($args)*) - } - }; - ($func_name:ident ( $( $arg:ident : $typ:ty ),* ) < $input_type:ty, $return_type:ty > , $submac:ident!( $($args:tt)* ) ) => { - fn $func_name(input: $input_type, $( $arg : $typ ),*) -> $crate::IResult<$input_type, $return_type> { - $submac!(input, $($args)*) - } - }; - (pub $func_name:ident < 'a > ( $( $arg:ident : $typ:ty ),* ) < $input_type:ty, $return_type:ty > , $submac:ident!( $($args:tt)* ) ) => { - pub fn $func_name<'a>( - input: $input_type, $( $arg : $typ ),*) - -> $crate::IResult<$input_type, $return_type> - { - $submac!(input, $($args)*) - } - }; - ($func_name:ident < 'a > ( $( $arg:ident : $typ:ty ),* ) < $input_type:ty, $return_type:ty > , $submac:ident!( $($args:tt)* ) ) => { - fn $func_name<'a>( - input: $input_type, $( $arg : $typ ),*) - -> $crate::IResult<$input_type, $return_type> - { - $submac!(input, $($args)*) - } - }; -} - -/// Makes a function from a parser combination, with attributes -/// -/// The usage of this macro is almost identical to `named!`, except that -/// you also pass attributes to be attached to the generated function. -/// This is ideal for adding documentation to your parser. -/// -/// ```ignore -/// // Create my_function as if you wrote it with the doc comment /// My Func -/// named_attr!(#[doc = "My Func"], my_function( &[u8] ) -> &[u8], tag!("abcd")); -/// // Also works for pub functions, and multiple lines -/// named!(#[doc = "My Func\nRecognise abcd"], pub my_function, tag!("abcd")); -/// // Multiple attributes can be passed if required -/// named!(#[doc = "My Func"] #[inline(always)], pub my_function, tag!("abcd")); -/// ``` -#[macro_export] -macro_rules! named_attr ( - ($(#[$attr:meta])*, $name:ident( $i:ty ) -> $o:ty, $submac:ident!( $($args:tt)* )) => ( - $(#[$attr])* - fn $name( i: $i ) -> $crate::IResult<$i,$o,u32> { - $submac!(i, $($args)*) - } - ); - ($(#[$attr:meta])*, $name:ident<$i:ty,$o:ty,$e:ty>, $submac:ident!( $($args:tt)* )) => ( - $(#[$attr])* - fn $name( i: $i ) -> $crate::IResult<$i, $o, $e> { - $submac!(i, $($args)*) - } - ); - ($(#[$attr:meta])*, $name:ident<$i:ty,$o:ty>, $submac:ident!( $($args:tt)* )) => ( - $(#[$attr])* - fn $name( i: $i ) -> $crate::IResult<$i, $o, u32> { - $submac!(i, $($args)*) - } - ); - ($(#[$attr:meta])*, $name:ident<$o:ty>, $submac:ident!( $($args:tt)* )) => ( - $(#[$attr])* - fn $name<'a>( i: &'a[u8] ) -> $crate::IResult<&'a [u8], $o, u32> { - $submac!(i, $($args)*) - } - ); - ($(#[$attr:meta])*, $name:ident, $submac:ident!( $($args:tt)* )) => ( - $(#[$attr])* - fn $name( i: &[u8] ) -> $crate::IResult<&[u8], &[u8], u32> { - $submac!(i, $($args)*) - } - ); - ($(#[$attr:meta])*, pub $name:ident( $i:ty ) -> $o:ty, $submac:ident!( $($args:tt)* )) => ( - $(#[$attr])* - pub fn $name( i: $i ) -> $crate::IResult<$i,$o, u32> { - $submac!(i, $($args)*) - } - ); - ($(#[$attr:meta])*, pub $name:ident<$i:ty,$o:ty,$e:ty>, $submac:ident!( $($args:tt)* )) => ( - $(#[$attr])* - pub fn $name( i: $i ) -> $crate::IResult<$i, $o, $e> { - $submac!(i, $($args)*) - } - ); - ($(#[$attr:meta])*, pub $name:ident<$i:ty,$o:ty>, $submac:ident!( $($args:tt)* )) => ( - $(#[$attr])* - pub fn $name( i: $i ) -> $crate::IResult<$i, $o, u32> { - $submac!(i, $($args)*) - } - ); - ($(#[$attr:meta])*, pub $name:ident<$o:ty>, $submac:ident!( $($args:tt)* )) => ( - $(#[$attr])* - pub fn $name( i: &[u8] ) -> $crate::IResult<&[u8], $o, u32> { - $submac!(i, $($args)*) - } - ); - ($(#[$attr:meta])*, pub $name:ident, $submac:ident!( $($args:tt)* )) => ( - $(#[$attr])* - pub fn $name<'a>( i: &'a [u8] ) -> $crate::IResult<&[u8], &[u8], u32> { - $submac!(i, $($args)*) - } - ); - ($(#[$attr:meta])*, pub(crate) $name:ident( $i:ty ) -> $o:ty, $submac:ident!( $($args:tt)* )) => ( - $(#[$attr])* - pub(crate) fn $name( i: $i ) -> $crate::IResult<$i,$o, u32> { - $submac!(i, $($args)*) - } - ); - ($(#[$attr:meta])*, pub(crate) $name:ident<$i:ty,$o:ty,$e:ty>, $submac:ident!( $($args:tt)* )) => ( - $(#[$attr])* - pub(crate) fn $name( i: $i ) -> $crate::IResult<$i, $o, $e> { - $submac!(i, $($args)*) - } - ); - ($(#[$attr:meta])*, pub(crate) $name:ident<$i:ty,$o:ty>, $submac:ident!( $($args:tt)* )) => ( - $(#[$attr])* - pub(crate) fn $name( i: $i ) -> $crate::IResult<$i, $o, u32> { - $submac!(i, $($args)*) - } - ); - ($(#[$attr:meta])*, pub(crate) $name:ident<$o:ty>, $submac:ident!( $($args:tt)* )) => ( - $(#[$attr])* - pub(crate) fn $name( i: &[u8] ) -> $crate::IResult<&[u8], $o, u32> { - $submac!(i, $($args)*) - } - ); - ($(#[$attr:meta])*, pub(crate) $name:ident, $submac:ident!( $($args:tt)* )) => ( - $(#[$attr])* - pub(crate) fn $name<'a>( i: &'a [u8] ) -> $crate::IResult<&[u8], &[u8], u32> { - $submac!(i, $($args)*) - } - ); -); - -/// Used to wrap common expressions and function as macros -/// -/// ``` -/// # #[macro_use] extern crate nom; -/// # use nom::IResult; -/// # fn main() { -/// fn take_wrapper(input: &[u8], i: u8) -> IResult<&[u8],&[u8]> { take!(input, i * 10) } -/// -/// // will make a parser taking 20 bytes -/// named!(parser, call!(take_wrapper, 2)); -/// # } -/// ``` -#[macro_export] -macro_rules! call ( - ($i:expr, $fun:expr) => ( $fun( $i ) ); - ($i:expr, $fun:expr, $($args:expr),* ) => ( $fun( $i, $($args),* ) ); -); - -/// emulate function currying: `apply!(my_function, arg1, arg2, ...)` becomes `my_function(input, arg1, arg2, ...)` -/// -/// ``` -/// # #[macro_use] extern crate nom; -/// # use nom::IResult; -/// # fn main() { -/// fn take_wrapper(input: &[u8], i: u8) -> IResult<&[u8],&[u8]> { take!(input, i * 10) } -/// -/// // will make a parser taking 20 bytes -/// named!(parser, apply!(take_wrapper, 2)); -/// # } -/// ``` -#[macro_export] -macro_rules! apply ( - ($i:expr, $fun:expr, $($args:expr),* ) => ( $fun( $i, $($args),* ) ); -); - -/// Prevents backtracking if the child parser fails -/// -/// This parser will do an early return instead of sending -/// its result to the parent parser. -/// -/// If another `return_error!` combinator is present in the parent -/// chain, the error will be wrapped and another early -/// return will be made. -/// -/// This makes it easy to build report on which parser failed, -/// where it failed in the input, and the chain of parsers -/// that led it there. -/// -/// Additionally, the error chain contains number identifiers -/// that can be matched to provide useful error messages. -/// -/// ``` -/// # #[macro_use] extern crate nom; -/// # use nom::Err; -/// # use nom::ErrorKind; -/// # fn main() { -/// named!(err_test<&[u8],&[u8],u32>, alt!( -/// tag!("abcd") | -/// preceded!(tag!("efgh"), return_error!(ErrorKind::Custom(42u32), -/// do_parse!( -/// tag!("ijkl") >> -/// res: return_error!(ErrorKind::Custom(128), tag!("mnop")) >> -/// (res) -/// ) -/// ) -/// ) -/// )); -/// let a = &b"efghblah"[..]; -/// let b = &b"efghijklblah"[..]; -/// let c = &b"efghijklmnop"[..]; -/// -/// let blah = &b"blah"[..]; -/// -/// let res_a = err_test(a); -/// let res_b = err_test(b); -/// let res_c = err_test(c); -/// assert_eq!(res_a, Err(Err::Failure(error_node_position!(blah, ErrorKind::Custom(42), error_position!(blah, ErrorKind::Tag))))); -/// assert_eq!(res_b, Err(Err::Failure(error_node_position!(&b"ijklblah"[..], ErrorKind::Custom(42), -/// error_node_position!(blah, ErrorKind::Custom(128), error_position!(blah, ErrorKind::Tag)))) -/// )); -/// # } -/// ``` -/// -#[macro_export] -macro_rules! return_error ( - ($i:expr, $code:expr, $submac:ident!( $($args:tt)* )) => ( - { - use $crate::lib::std::result::Result::*; - use $crate::{Context,Err}; - - let i_ = $i.clone(); - let cl = || { - $submac!(i_, $($args)*) - }; - - fn unify_types(_: &Context, _: &Context) {} - - match cl() { - Err(Err::Incomplete(x)) => Err(Err::Incomplete(x)), - Ok((i, o)) => Ok((i, o)), - Err(Err::Error(e)) | Err(Err::Failure(e)) => { - unify_types(&e, &Context::Code($i, $code)); - return Err(Err::Failure(error_node_position!($i, $code, e))) - } - } - } - ); - ($i:expr, $code:expr, $f:expr) => ( - return_error!($i, $code, call!($f)); - ); - ($i:expr, $submac:ident!( $($args:tt)* )) => ( - { - use $crate::lib::std::result::Result::*; - use $crate::{Context,Err}; - - let i_ = $i.clone(); - let cl = || { - $submac!(i_, $($args)*) - }; - - match cl() { - Err(Err::Incomplete(x)) => Err(Err::Incomplete(x)), - Ok((i, o)) => Ok((i, o)), - Err(Err::Error(e)) | Err(Err::Failure(e)) => { - return Err(Err::Failure(e)) - } - } - } - ); - ($i:expr, $f:expr) => ( - return_error!($i, call!($f)); - ); -); - -/// Add an error if the child parser fails -/// -/// While error! does an early return and avoids backtracking, -/// add_return_error! backtracks normally. It just provides more context -/// for an error -/// -/// ``` -/// # #[macro_use] extern crate nom; -/// # use std::collections; -/// # use nom::Err; -/// # use nom::ErrorKind; -/// # fn main() { -/// named!(err_test, add_return_error!(ErrorKind::Custom(42u32), tag!("abcd"))); -/// -/// let a = &b"efghblah"[..]; -/// let res_a = err_test(a); -/// assert_eq!(res_a, Err(Err::Error(error_node_position!(a, ErrorKind::Custom(42), error_position!(a, ErrorKind::Tag))))); -/// # } -/// ``` -/// -#[macro_export] -macro_rules! add_return_error ( - ($i:expr, $code:expr, $submac:ident!( $($args:tt)* )) => ( - { - use $crate::lib::std::result::Result::*; - use $crate::{Err,ErrorKind}; - - match $submac!($i, $($args)*) { - Ok((i, o)) => Ok((i, o)), - Err(Err::Error(e)) => { - Err(Err::Error(error_node_position!($i, $code, e))) - }, - Err(Err::Failure(e)) => { - Err(Err::Failure(error_node_position!($i, $code, e))) - }, - Err(e) => Err(e), - } - } - ); - ($i:expr, $code:expr, $f:expr) => ( - add_return_error!($i, $code, call!($f)); - ); -); - -/// replaces a `Incomplete` returned by the child parser -/// with an `Error` -/// -/// ``` -/// # #[macro_use] extern crate nom; -/// # use std::collections; -/// # use nom::Err; -/// # use nom::ErrorKind; -/// # fn main() { -/// named!(take_5, complete!(take!(5))); -/// -/// let a = &b"abcd"[..]; -/// let res_a = take_5(a); -/// assert_eq!(res_a, Err(Err::Error(error_position!(a, ErrorKind::Complete)))); -/// # } -/// ``` -/// -#[macro_export] -macro_rules! complete ( - ($i:expr, $submac:ident!( $($args:tt)* )) => ( - { - use $crate::lib::std::result::Result::*; - use $crate::{Err,ErrorKind}; - - let i_ = $i.clone(); - match $submac!(i_, $($args)*) { - Err(Err::Incomplete(_)) => { - Err(Err::Error(error_position!($i, ErrorKind::Complete::))) - }, - rest => rest - } - } - ); - ($i:expr, $f:expr) => ( - complete!($i, call!($f)); - ); -); - -/// A bit like `std::try!`, this macro will return the remaining input and -/// parsed value if the child parser returned `Ok`, and will do an early -/// return for the `Err` side. -/// -/// this can provide more flexibility than `do_parse!` if needed -/// -/// ``` -/// # #[macro_use] extern crate nom; -/// # use nom::Err; -/// # use nom::ErrorKind; -/// # use nom::IResult; -/// -/// fn take_add(input:&[u8], size: u8) -> IResult<&[u8],&[u8]> { -/// let (i1, sz) = try_parse!(input, nom::be_u8); -/// let (i2, length) = try_parse!(i1, expr_opt!(size.checked_add(sz))); -/// let (i3, data) = try_parse!(i2, take!(length)); -/// return Ok((i3, data)); -/// } -/// # fn main() { -/// let arr1 = [1, 2, 3, 4, 5]; -/// let r1 = take_add(&arr1[..], 1); -/// assert_eq!(r1, Ok((&[4,5][..], &[2,3][..]))); -/// -/// let arr2 = [0xFE, 2, 3, 4, 5]; -/// // size is overflowing -/// let r1 = take_add(&arr2[..], 42); -/// assert_eq!(r1, Err(Err::Error(error_position!(&[2,3,4,5][..], ErrorKind::ExprOpt)))); -/// # } -/// ``` -#[macro_export] -macro_rules! try_parse ( - ($i:expr, $submac:ident!( $($args:tt)* )) => ({ - use $crate::lib::std::result::Result::*; - - match $submac!($i, $($args)*) { - Ok((i,o)) => (i,o), - Err(e) => return Err(e), - } - }); - ($i:expr, $f:expr) => ( - try_parse!($i, call!($f)) - ); -); - -/// `map!(I -> IResult, O -> P) => I -> IResult` -/// maps a function on the result of a parser -#[macro_export] -macro_rules! map( - // Internal parser, do not use directly - (__impl $i:expr, $submac:ident!( $($args:tt)* ), $g:expr) => ( - { - pub fn _unify R>(f: F, t: T) -> R { - f(t) - } - - ($submac!($i, $($args)*)).map(|(i,o)| { - (i, _unify($g, o)) - }) - } - ); - ($i:expr, $submac:ident!( $($args:tt)* ), $g:expr) => ( - map!(__impl $i, $submac!($($args)*), $g); - ); - ($i:expr, $f:expr, $g:expr) => ( - map!(__impl $i, call!($f), $g); - ); -); - -/// `map_res!(I -> IResult, O -> Result

) => I -> IResult` -/// maps a function returning a Result on the output of a parser -#[macro_export] -macro_rules! map_res ( - // Internal parser, do not use directly - (__impl $i:expr, $submac:ident!( $($args:tt)* ), $submac2:ident!( $($args2:tt)* )) => ( - { - use $crate::lib::std::result::Result::*; - use $crate::Err; - - let i_ = $i.clone(); - match $submac!(i_, $($args)*) { - Ok((i,o)) => { - match $submac2!(o, $($args2)*) { - Ok(output) => Ok((i, output)), - Err(_) => { - let e = $crate::ErrorKind::MapRes; - Err(Err::Error(error_position!($i, e))) - }, - } - }, - Err(e) => Err(e), - } - } - ); - ($i:expr, $submac:ident!( $($args:tt)* ), $g:expr) => ( - map_res!(__impl $i, $submac!($($args)*), call!($g)); - ); - ($i:expr, $submac:ident!( $($args:tt)* ), $submac2:ident!( $($args2:tt)* )) => ( - map_res!(__impl $i, $submac!($($args)*), $submac2!($($args2)*)); - ); - ($i:expr, $f:expr, $g:expr) => ( - map_res!(__impl $i, call!($f), call!($g)); - ); - ($i:expr, $f:expr, $submac:ident!( $($args:tt)* )) => ( - map_res!(__impl $i, call!($f), $submac!($($args)*)); - ); -); - -/// `map_res_err!(I -> IResult, O -> Result

) => I -> IResult` -/// maps a function returning a Result on the output of a parser, preserving the returned error -#[macro_export] -macro_rules! map_res_err ( - // Internal parser, do not use directly - (__impl $i:expr, $submac:ident!( $($args:tt)* ), $submac2:ident!( $($args2:tt)* )) => ( - { - use $crate::lib::std::result::Result::*; - use $crate::{Context, Convert, Err}; - - let i_ = $i.clone(); - match $submac!(i_, $($args)*) { - Ok((i,o)) => { - match $submac2!(o, $($args2)*) { - Ok(output) => Ok((i, output)), - Err(e) => { - let e = Context::convert(Context::Code($i, $crate::ErrorKind::Custom(e))); - Err(Err::Error(error_node_position!($i, $crate::ErrorKind::MapRes, e))) - }, - } - }, - Err(e) => Err(e), - } - } - ); - ($i:expr, $submac:ident!( $($args:tt)* ), $g:expr) => ( - map_res_err!(__impl $i, $submac!($($args)*), call!($g)); - ); - ($i:expr, $submac:ident!( $($args:tt)* ), $submac2:ident!( $($args2:tt)* )) => ( - map_res_err!(__impl $i, $submac!($($args)*), $submac2!($($args2)*)); - ); - ($i:expr, $f:expr, $g:expr) => ( - map_res_err!(__impl $i, call!($f), call!($g)); - ); - ($i:expr, $f:expr, $submac:ident!( $($args:tt)* )) => ( - map_res_err!(__impl $i, call!($f), $submac!($($args)*)); - ); -); - -/// `map_opt!(I -> IResult, O -> Option

) => I -> IResult` -/// maps a function returning an Option on the output of a parser -#[macro_export] -macro_rules! map_opt ( - // Internal parser, do not use directly - (__impl $i:expr, $submac:ident!( $($args:tt)* ), $submac2:ident!( $($args2:tt)* )) => ( - { - use $crate::lib::std::result::Result::*; - use $crate::lib::std::option::Option::*; - use $crate::{Err,ErrorKind}; - - let i_ = $i.clone(); - match $submac!(i_, $($args)*) { - Ok((i, o)) => match $submac2!(o, $($args2)*) { - Some(output) => Ok((i, output)), - None => { - let e = ErrorKind::MapOpt; - Err(Err::Error(error_position!($i, e))) - } - }, - Err(e) => Err(e) - } - } - ); - ($i:expr, $submac:ident!( $($args:tt)* ), $g:expr) => ( - map_opt!(__impl $i, $submac!($($args)*), call!($g)); - ); - ($i:expr, $submac:ident!( $($args:tt)* ), $submac2:ident!( $($args2:tt)* )) => ( - map_opt!(__impl $i, $submac!($($args)*), $submac2!($($args2)*)); - ); - ($i:expr, $f:expr, $g:expr) => ( - map_opt!(__impl $i, call!($f), call!($g)); - ); - ($i:expr, $f:expr, $submac:ident!( $($args:tt)* )) => ( - map_opt!(__impl $i, call!($f), $submac!($($args)*)); - ); -); - -/// `parse_to!(O) => I -> IResult` -/// uses the `parse` method from `std::str::FromStr` to convert the current -/// input to the specified type -/// -/// this will completely consume the input -#[macro_export] -macro_rules! parse_to ( - ($i:expr, $t:ty ) => ( - { - use $crate::lib::std::result::Result::*; - use $crate::lib::std::option::Option; - use $crate::lib::std::option::Option::*; - use $crate::{Err,ErrorKind,Context}; - - use $crate::ParseTo; - use $crate::Slice; - use $crate::InputLength; - - let res: Option<$t> = ($i).parse_to(); - match res { - Some(output) => Ok(($i.slice($i.input_len()..), output)), - None => Err(Err::Error(Context::Code($i, ErrorKind::ParseTo::))) - } - } - ); -); - -/// `verify!(I -> IResult, O -> bool) => I -> IResult` -/// returns the result of the child parser if it satisfies a verification function -/// -/// ``` -/// # #[macro_use] extern crate nom; -/// # fn main() { -/// named!(check, verify!(nom::be_u32, |val:u32| val >= 0 && val < 3)); -/// # } -/// ``` -#[macro_export] -macro_rules! verify ( - // Internal parser, do not use directly - (__impl $i:expr, $submac:ident!( $($args:tt)* ), $submac2:ident!( $($args2:tt)* )) => ( - { - use $crate::lib::std::result::Result::*; - use $crate::{Err,ErrorKind}; - - let i_ = $i.clone(); - match $submac!(i_, $($args)*) { - Err(e) => Err(e), - Ok((i, o)) => if $submac2!(o, $($args2)*) { - Ok((i, o)) - } else { - Err(Err::Error(error_position!($i, ErrorKind::Verify))) - } - } - } - ); - ($i:expr, $submac:ident!( $($args:tt)* ), $g:expr) => ( - verify!(__impl $i, $submac!($($args)*), call!($g)); - ); - ($i:expr, $submac:ident!( $($args:tt)* ), $submac2:ident!( $($args2:tt)* )) => ( - verify!(__impl $i, $submac!($($args)*), $submac2!($($args2)*)); - ); - ($i:expr, $f:expr, $g:expr) => ( - verify!(__impl $i, call!($f), call!($g)); - ); - ($i:expr, $f:expr, $submac:ident!( $($args:tt)* )) => ( - verify!(__impl $i, call!($f), $submac!($($args)*)); - ); -); - -/// `value!(T, R -> IResult ) => R -> IResult` -/// -/// or `value!(T) => R -> IResult` -/// -/// If the child parser was successful, return the value. -/// If no child parser is provided, always return the value -/// -/// ``` -/// # #[macro_use] extern crate nom; -/// # fn main() { -/// named!(x, value!(42, delimited!(tag!("")))); -/// named!(y, delimited!(tag!(""))); -/// let r = x(&b" aaa"[..]); -/// assert_eq!(r, Ok((&b" aaa"[..], 42))); -/// -/// let r2 = y(&b" aaa"[..]); -/// assert_eq!(r2, Ok((&b" aaa"[..], 42))); -/// # } -/// ``` -#[macro_export] -macro_rules! value ( - ($i:expr, $res:expr, $submac:ident!( $($args:tt)* )) => ( - { - use $crate::lib::std::result::Result::*; - - match $submac!($i, $($args)*) { - Ok((i,_)) => { - Ok((i, $res)) - }, - Err(e) => Err(e), - } - } - ); - ($i:expr, $res:expr, $f:expr) => ( - value!($i, $res, call!($f)) - ); - ($i:expr, $res:expr) => ( - { - let res: $crate::IResult<_,_> = Ok(($i, $res)); - res - } - ); -); - -/// `expr_res!(Result) => I -> IResult` -/// evaluate an expression that returns a Result and returns a Ok((I,T)) if Ok -/// -/// See expr_opt for an example -#[macro_export] -macro_rules! expr_res ( - ($i:expr, $e:expr) => ( - { - use $crate::lib::std::result::Result::*; - use $crate::{Err,ErrorKind}; - - match $e { - Ok(output) => Ok(($i, output)), - Err(_) => Err(Err::Error(error_position!($i, ErrorKind::ExprRes::))) - } - } - ); -); - -/// `expr_opt!(Option) => I -> IResult` -/// evaluate an expression that returns a Option and returns a Ok((I,T)) if Some -/// -/// Useful when doing computations in a chain -/// -/// ``` -/// # #[macro_use] extern crate nom; -/// # use nom::Err; -/// # use nom::IResult; -/// # use nom::{be_u8,ErrorKind}; -/// -/// fn take_add(input:&[u8], size: u8) -> IResult<&[u8],&[u8]> { -/// do_parse!(input, -/// sz: be_u8 >> -/// length: expr_opt!(size.checked_add(sz)) >> // checking for integer overflow (returns an Option) -/// data: take!(length) >> -/// (data) -/// ) -/// } -/// # fn main() { -/// let arr1 = [1, 2, 3, 4, 5]; -/// let r1 = take_add(&arr1[..], 1); -/// assert_eq!(r1, Ok((&[4,5][..], &[2,3][..]))); -/// -/// let arr2 = [0xFE, 2, 3, 4, 5]; -/// // size is overflowing -/// let r1 = take_add(&arr2[..], 42); -/// assert_eq!(r1, Err(Err::Error(error_position!(&[2,3,4,5][..], ErrorKind::ExprOpt)))); -/// # } -/// ``` -#[macro_export] -macro_rules! expr_opt ( - ($i:expr, $e:expr) => ( - { - use $crate::lib::std::result::Result::*; - use $crate::{Err,ErrorKind}; - - match $e { - $crate::lib::std::option::Option::Some(output) => Ok(($i, output)), - $crate::lib::std::option::Option::None => Err(Err::Error(error_position!($i, ErrorKind::ExprOpt::))) - } - } - ); -); - -/// `opt!(I -> IResult) => I -> IResult>` -/// make the underlying parser optional -/// -/// returns an Option of the returned type. This parser returns `Some(result)` if the child parser -/// succeeds,`None` if it fails, and `Incomplete` if it did not have enough data to decide -/// -/// *Warning*: if you are using `opt` for some kind of optional ending token (like an end of line), -/// you should combine it with `complete` to make sure it works. -/// -/// As an example, `opt!(tag!("\r\n"))` will return `Incomplete` if it receives an empty input, -/// because `tag` does not have enough input to decide. -/// On the contrary, `opt!(complete!(tag!("\r\n")))` would return `None` as produced value, -/// since `complete!` transforms an `Incomplete` in an `Error`. -/// -/// ``` -/// # #[macro_use] extern crate nom; -/// # fn main() { -/// named!( o<&[u8], Option<&[u8]> >, opt!( tag!( "abcd" ) ) ); -/// -/// let a = b"abcdef"; -/// let b = b"bcdefg"; -/// assert_eq!(o(&a[..]), Ok((&b"ef"[..], Some(&b"abcd"[..])))); -/// assert_eq!(o(&b[..]), Ok((&b"bcdefg"[..], None))); -/// # } -/// ``` -#[macro_export] -macro_rules! opt( - ($i:expr, $submac:ident!( $($args:tt)* )) => ( - { - use $crate::lib::std::result::Result::*; - use $crate::lib::std::option::Option::*; - use $crate::Err; - - let i_ = $i.clone(); - match $submac!(i_, $($args)*) { - Ok((i,o)) => Ok((i, Some(o))), - Err(Err::Error(_)) => Ok(($i, None)), - Err(e) => Err(e), - } - } - ); - ($i:expr, $f:expr) => ( - opt!($i, call!($f)); - ); -); - -/// `opt_res!(I -> IResult) => I -> IResult>` -/// make the underlying parser optional -/// -/// returns a Result, with Err containing the parsing error -/// -/// ```ignore -/// # #[macro_use] extern crate nom; -/// # #[cfg(feature = "verbose-errors")] -/// # use nom::Err::Position; -/// # use nom::ErrorKind; -/// # fn main() { -/// named!( o<&[u8], Result<&[u8], nom::Err<&[u8]> > >, opt_res!( tag!( "abcd" ) ) ); -/// -/// let a = b"abcdef"; -/// let b = b"bcdefg"; -/// assert_eq!(o(&a[..]), Ok((&b"ef"[..], Ok(&b"abcd"[..]))); -/// assert_eq!(o(&b[..]), Ok((&b"bcdefg"[..], Err(error_position!(&b[..], ErrorKind::Tag)))); -/// # } -/// ``` -#[macro_export] -macro_rules! opt_res ( - ($i:expr, $submac:ident!( $($args:tt)* )) => ( - { - use $crate::lib::std::result::Result::*; - use $crate::Err; - - let i_ = $i.clone(); - match $submac!(i_, $($args)*) { - Ok((i,o)) => Ok((i, Ok(o))), - Err(Err::Error(e)) => Ok(($i, Err(Err::Error(e)))), - // in case of failure, we return a real error - Err(e) => Err(e) - } - } - ); - ($i:expr, $f:expr) => ( - opt_res!($i, call!($f)); - ); -); - -/// `cond_with_error!(bool, I -> IResult) => I -> IResult>` -/// Conditional combinator -/// -/// Wraps another parser and calls it if the -/// condition is met. This combinator returns -/// an Option of the return type of the child -/// parser. -/// -/// This is especially useful if a parser depends -/// on the value returned by a preceding parser in -/// a `do_parse!`. -/// -/// ``` -/// # #[macro_use] extern crate nom; -/// # use nom::IResult; -/// # fn main() { -/// let b = true; -/// let f: Box IResult<&[u8],Option<&[u8]>>> = Box::new(closure!(&'static[u8], -/// cond!( b, tag!("abcd") )) -/// ); -/// -/// let a = b"abcdef"; -/// assert_eq!(f(&a[..]), Ok((&b"ef"[..], Some(&b"abcd"[..])))); -/// -/// let b2 = false; -/// let f2:Box IResult<&[u8],Option<&[u8]>>> = Box::new(closure!(&'static[u8], -/// cond!( b2, tag!("abcd") )) -/// ); -/// assert_eq!(f2(&a[..]), Ok((&b"abcdef"[..], None))); -/// # } -/// ``` -/// -#[macro_export] -macro_rules! cond_with_error( - ($i:expr, $cond:expr, $submac:ident!( $($args:tt)* )) => ( - { - use $crate::lib::std::result::Result::*; - - if $cond { - match $submac!($i, $($args)*) { - Ok((i,o)) => Ok((i, $crate::lib::std::option::Option::Some(o))), - Err(e) => Err(e), - } - } else { - let res: $crate::lib::std::result::Result<_,_> = Ok(($i, $crate::lib::std::option::Option::None)); - res - } - } - ); - ($i:expr, $cond:expr, $f:expr) => ( - cond_with_error!($i, $cond, call!($f)); - ); -); - -/// `cond!(bool, I -> IResult) => I -> IResult>` -/// Conditional combinator -/// -/// Wraps another parser and calls it if the -/// condition is met. This combinator returns -/// an Option of the return type of the child -/// parser. -/// -/// This is especially useful if a parser depends -/// on the value returned by a preceding parser in -/// a `do_parse!`. -/// -/// ``` -/// # #[macro_use] extern crate nom; -/// # use nom::IResult; -/// # fn main() { -/// let b = true; -/// let f: Box IResult<&[u8],Option<&[u8]>>> = Box::new(closure!(&'static[u8], -/// cond!( b, tag!("abcd") )) -/// ); -/// -/// let a = b"abcdef"; -/// assert_eq!(f(&a[..]), Ok((&b"ef"[..], Some(&b"abcd"[..])))); -/// -/// let b2 = false; -/// let f2:Box IResult<&[u8],Option<&[u8]>>> = Box::new(closure!(&'static[u8], -/// cond!( b2, tag!("abcd") )) -/// ); -/// assert_eq!(f2(&a[..]), Ok((&b"abcdef"[..], None))); -/// # } -/// ``` -/// -#[macro_export] -macro_rules! cond( - ($i:expr, $cond:expr, $submac:ident!( $($args:tt)* )) => ( - { - use $crate::lib::std::result::Result::*; - use $crate::lib::std::option::Option::*; - use $crate::Err; - - if $cond { - let i_ = $i.clone(); - match $submac!(i_, $($args)*) { - Ok((i,o)) => Ok((i, Some(o))), - Err(Err::Error(_)) => { - Ok(($i, None)) - }, - Err(e) => Err(e), - } - } else { - Ok(($i, None)) - } - } - ); - ($i:expr, $cond:expr, $f:expr) => ( - cond!($i, $cond, call!($f)); - ); -); - -/// `cond_reduce!(bool, I -> IResult) => I -> IResult` -/// Conditional combinator with error -/// -/// Wraps another parser and calls it if the -/// condition is met. This combinator returns -/// an error if the condition is false -/// -/// This is especially useful if a parser depends -/// on the value returned by a preceding parser in -/// a `do_parse!`. -/// -/// ``` -/// # #[macro_use] extern crate nom; -/// # use nom::{Err,ErrorKind,IResult}; -/// # fn main() { -/// let b = true; -/// let f = closure!(&'static[u8], -/// cond_reduce!( b, tag!("abcd") ) -/// ); -/// -/// let a = b"abcdef"; -/// assert_eq!(f(&a[..]), Ok((&b"ef"[..], &b"abcd"[..]))); -/// -/// let b2 = false; -/// let f2 = closure!(&'static[u8], -/// cond_reduce!( b2, tag!("abcd") ) -/// ); -/// assert_eq!(f2(&a[..]), Err(Err::Error(error_position!(&a[..], ErrorKind::CondReduce)))); -/// # } -/// ``` -/// -#[macro_export] -macro_rules! cond_reduce( - ($i:expr, $cond:expr, $submac:ident!( $($args:tt)* )) => ( - { - use $crate::lib::std::result::Result::*; - use $crate::{Convert,Err,ErrorKind,IResult}; - let default_err = Err(Err::convert(Err::Error(error_position!($i, ErrorKind::CondReduce::)))); - - if $cond { - let sub_res = $submac!($i, $($args)*); - fn unify_types(_: &IResult, _: &IResult) {} - unify_types(&sub_res, &default_err); - - match sub_res { - Ok((i,o)) => Ok((i, o)), - Err(e) => Err(e), - } - } else { - default_err - } - } - ); - ($i:expr, $cond:expr, $f:expr) => ( - cond_reduce!($i, $cond, call!($f)); - ); -); - -/// `peek!(I -> IResult) => I -> IResult` -/// returns a result without consuming the input -/// -/// the embedded parser may return Err(Err::Incomplete -/// -/// ``` -/// # #[macro_use] extern crate nom; -/// # fn main() { -/// named!(ptag, peek!( tag!( "abcd" ) ) ); -/// -/// let r = ptag(&b"abcdefgh"[..]); -/// assert_eq!(r, Ok((&b"abcdefgh"[..], &b"abcd"[..]))); -/// # } -/// ``` -#[macro_export] -macro_rules! peek( - ($i:expr, $submac:ident!( $($args:tt)* )) => ( - { - use $crate::lib::std::result::Result::*; - use $crate::{Convert,Err}; - - let i_ = $i.clone(); - match $submac!(i_, $($args)*) { - Ok((_,o)) => Ok(($i, o)), - Err(e) => Err(Err::convert(e)), - } - } - ); - ($i:expr, $f:expr) => ( - peek!($i, call!($f)); - ); -); - -/// `not!(I -> IResult) => I -> IResult` -/// returns a result only if the embedded parser returns Error or Err(Err::Incomplete) -/// does not consume the input -/// -/// ``` -/// # #[macro_use] extern crate nom; -/// # use nom::Err; -/// # use nom::ErrorKind; -/// # fn main() { -/// named!(not_e, do_parse!( -/// res: tag!("abc") >> -/// not!(char!('e')) >> -/// (res) -/// )); -/// -/// let r = not_e(&b"abcd"[..]); -/// assert_eq!(r, Ok((&b"d"[..], &b"abc"[..]))); -/// -/// let r2 = not_e(&b"abce"[..]); -/// assert_eq!(r2, Err(Err::Error(error_position!(&b"e"[..], ErrorKind::Not)))); -/// # } -/// ``` -#[macro_export] -macro_rules! not( - ($i:expr, $submac:ident!( $($args:tt)* )) => ( - { - use $crate::lib::std::result::Result::*; - use $crate::{Context,ErrorKind,Err,IResult}; - - let i_ = $i.clone(); - - //we need this to avoid type inference errors - fn unify_types(_: &IResult, _: &IResult) {} - - match $submac!(i_, $($args)*) { - Err(Err::Failure(e)) => Err(Err::Failure(e)), - Err(Err::Incomplete(i)) => Err(Err::Incomplete(i)), - Err(_) => Ok(($i, ())), - Ok(_) => { - let c = Context::Code($i, ErrorKind::Not); - let err = Err(Err::Error(c)); - let default = Ok(($i, ())); - - unify_types(&err, &default); - err - }, - } - } - ); - ($i:expr, $f:expr) => ( - not!($i, call!($f)); - ); -); - -/// `tap!(name: I -> IResult => { block }) => I -> IResult` -/// allows access to the parser's result without affecting it -/// -/// ``` -/// # #[macro_use] extern crate nom; -/// # use std::str; -/// # fn main() { -/// named!(ptag, tap!(res: tag!( "abcd" ) => { println!("recognized {}", str::from_utf8(res).unwrap()) } ) ); -/// -/// let r = ptag(&b"abcdefgh"[..]); -/// assert_eq!(r, Ok((&b"efgh"[..], &b"abcd"[..]))); -/// # } -/// ``` -#[macro_export] -macro_rules! tap ( - ($i:expr, $name:ident : $submac:ident!( $($args:tt)* ) => $e:expr) => ( - { - use $crate::lib::std::result::Result::*; - use $crate::{Convert,Err,Needed,IResult}; - - match $submac!($i, $($args)*) { - Ok((i,o)) => { - let $name = o; - $e; - Ok((i, $name)) - }, - Err(e) => Err(Err::convert(e)), - } - } - ); - ($i:expr, $name: ident: $f:expr => $e:expr) => ( - tap!($i, $name: call!($f) => $e); - ); -); - -/// `eof!()` returns its input if it is at the end of input data -/// -/// This combinator works with the `AtEof` trait that input types must implement. -/// If an input type's `at_eof` method returns true, it means there will be no -/// more refills (like what happens when buffering big files). -/// -/// When we're at the end of the data and `at_eof` returns true, this combinator -/// will succeed -/// -/// TODO: example -#[macro_export] -macro_rules! eof ( - ($i:expr,) => ( - { - use $crate::lib::std::result::Result::*; - use $crate::{AtEof,Err,ErrorKind}; - - use $crate::InputLength; - if ($i).at_eof() && ($i).input_len() == 0 { - Ok(($i, $i)) - } else { - //FIXME what do we do with need_more? - Err(Err::Error(error_position!($i, ErrorKind::Eof::))) - } - } - ); -); - -/// `exact!()` will fail if the child parser does not consume the whole data -/// -/// This combinator works with the `AtEof` trait that input types must implement. -/// If an input type's `at_eof` method returns true, it means there will be no -/// more refills (like what happens when buffering big files). -/// -/// TODO: example -#[macro_export] -macro_rules! exact ( - ($i:expr, $submac:ident!( $($args:tt)* )) => ({ - terminated!($i, $submac!( $($args)*), eof!()) - }); - ($i:expr, $f:expr) => ( - exact!($i, call!($f)); - ); -); - -/// `recognize!(I -> IResult ) => I -> IResult` -/// if the child parser was successful, return the consumed input as produced value -/// -/// ``` -/// # #[macro_use] extern crate nom; -/// # fn main() { -/// named!(x, recognize!(delimited!(tag!("")))); -/// let r = x(&b" aaa"[..]); -/// assert_eq!(r, Ok((&b" aaa"[..], &b""[..]))); -/// # } -/// ``` -#[macro_export] -macro_rules! recognize ( - ($i:expr, $submac:ident!( $($args:tt)* )) => ( - { - use $crate::lib::std::result::Result::*; - - use $crate::Offset; - use $crate::Slice; - let i_ = $i.clone(); - match $submac!(i_, $($args)*) { - Ok((i,_)) => { - let index = (&$i).offset(&i); - Ok((i, ($i).slice(..index))) - }, - Err(e) => Err(e) - } - } - ); - ($i:expr, $f:expr) => ( - recognize!($i, call!($f)) - ); -); - -#[cfg(test)] -mod tests { - use internal::{Err, IResult, Needed}; - use util::ErrorKind; - #[cfg(feature = "alloc")] - use lib::std::boxed::Box; - - // reproduce the tag and take macros, because of module import order - macro_rules! tag ( - ($i:expr, $tag: expr) => ({ - use $crate::lib::std::result::Result::*; - use $crate::{Err,Needed,IResult,ErrorKind}; - use $crate::{Compare,CompareResult,InputLength,Slice,need_more}; - - let res: IResult<_,_> = match ($i).compare($tag) { - CompareResult::Ok => { - let blen = $tag.input_len(); - Ok(($i.slice(blen..), $i.slice(..blen))) - }, - CompareResult::Incomplete => { - need_more($i, Needed::Size($tag.input_len())) - }, - CompareResult::Error => { - let e:ErrorKind = ErrorKind::Tag; - Err(Err::Error($crate::Context::Code($i, e))) - } - }; - res - }); - ); - - macro_rules! take( - ($i:expr, $count:expr) => ( - { - let cnt = $count as usize; - let res:IResult<&[u8],&[u8]> = if $i.len() < cnt { - $crate::need_more($i, $crate::Needed::Size(cnt)) - } else { - Ok((&$i[cnt..],&$i[0..cnt])) - }; - res - } - ); - ); - - mod pub_named_mod { - named!(pub tst, tag!("abcd")); - } - - #[test] - fn pub_named_test() { - let a = &b"abcd"[..]; - let res = pub_named_mod::tst(a); - assert_eq!(res, Ok((&b""[..], a))); - } - - mod pub_crate_named_mod { - named!(pub(crate) tst, tag!("abcd")); - } - - #[test] - fn pub_crate_named_test() { - let a = &b"abcd"[..]; - let res = pub_crate_named_mod::tst(a); - assert_eq!(res, Ok((&b""[..], a))); - } - - #[test] - fn apply_test() { - fn sum2(a: u8, b: u8) -> u8 { - a + b - } - fn sum3(a: u8, b: u8, c: u8) -> u8 { - a + b + c - } - let a = apply!(1, sum2, 2); - let b = apply!(1, sum3, 2, 3); - - assert_eq!(a, 3); - assert_eq!(b, 6); - } - - #[test] - fn opt() { - named!(opt_abcd<&[u8],Option<&[u8]> >, opt!(tag!("abcd"))); - - let a = &b"abcdef"[..]; - let b = &b"bcdefg"[..]; - let c = &b"ab"[..]; - assert_eq!(opt_abcd(a), Ok((&b"ef"[..], Some(&b"abcd"[..])))); - assert_eq!(opt_abcd(b), Ok((&b"bcdefg"[..], None))); - assert_eq!(opt_abcd(c), Err(Err::Incomplete(Needed::Size(4)))); - } - - #[cfg(feature = "verbose-errors")] - #[test] - fn opt_res() { - named!(opt_res_abcd<&[u8], Result<&[u8], Err<&[u8]> > >, opt_res!(tag!("abcd"))); - - let a = &b"abcdef"[..]; - let b = &b"bcdefg"[..]; - let c = &b"ab"[..]; - assert_eq!(opt_res_abcd(a), Ok((&b"ef"[..], Ok(&b"abcd"[..])))); - assert_eq!( - opt_res_abcd(b), - Ok(( - &b"bcdefg"[..], - Err(Err::Error(error_position!(b, ErrorKind::Tag))) - )) - ); - assert_eq!(opt_res_abcd(c), Err(Err::Incomplete(Needed::Size(4)))); - } - - #[cfg(not(feature = "verbose-errors"))] - #[test] - fn opt_res() { - named!(opt_res_abcd<&[u8], Result<&[u8], Err<&[u8], u32>> >, opt_res!(tag!("abcd"))); - - let a = &b"abcdef"[..]; - let b = &b"bcdefg"[..]; - let c = &b"ab"[..]; - assert_eq!(opt_res_abcd(a), Ok((&b"ef"[..], Ok(&b"abcd"[..])))); - assert_eq!( - opt_res_abcd(b), - Ok(( - &b"bcdefg"[..], - Err(Err::Error(error_position!(b, ErrorKind::Tag))) - )) - ); - assert_eq!(opt_res_abcd(c), Err(Err::Incomplete(Needed::Size(4)))); - } - - use lib::std::convert::From; - #[derive(Debug, PartialEq)] - pub struct CustomError(&'static str); - impl From for CustomError { - fn from(_: u32) -> Self { - CustomError("test") - } - } - - #[test] - #[cfg(feature = "alloc")] - fn cond() { - let f_true: Box IResult<&[u8], Option<&[u8]>, CustomError>> = Box::new(closure!( - &'static [u8], - fix_error!(CustomError, cond!(true, tag!("abcd"))) - )); - let f_false: Box IResult<&[u8], Option<&[u8]>, CustomError>> = Box::new(closure!( - &'static [u8], - fix_error!(CustomError, cond!(false, tag!("abcd"))) - )); - //let f_false = closure!(&'static [u8], cond!( false, tag!("abcd") ) ); - - assert_eq!(f_true(&b"abcdef"[..]), Ok((&b"ef"[..], Some(&b"abcd"[..])))); - assert_eq!(f_true(&b"ab"[..]), Err(Err::Incomplete(Needed::Size(4)))); - assert_eq!(f_true(&b"xxx"[..]), Ok((&b"xxx"[..], None))); - - assert_eq!(f_false(&b"abcdef"[..]), Ok((&b"abcdef"[..], None))); - assert_eq!(f_false(&b"ab"[..]), Ok((&b"ab"[..], None))); - assert_eq!(f_false(&b"xxx"[..]), Ok((&b"xxx"[..], None))); - } - - #[test] - #[cfg(feature = "alloc")] - fn cond_wrapping() { - // Test that cond!() will wrap a given identifier in the call!() macro. - named!(tag_abcd, tag!("abcd")); - let f_true: Box IResult<&[u8], Option<&[u8]>, CustomError>> = Box::new(closure!( - &'static [u8], - fix_error!(CustomError, cond!(true, tag_abcd)) - )); - let f_false: Box IResult<&[u8], Option<&[u8]>, CustomError>> = Box::new(closure!( - &'static [u8], - fix_error!(CustomError, cond!(false, tag_abcd)) - )); - //let f_false = closure!(&'static [u8], cond!( b2, tag!("abcd") ) ); - - assert_eq!(f_true(&b"abcdef"[..]), Ok((&b"ef"[..], Some(&b"abcd"[..])))); - assert_eq!(f_true(&b"ab"[..]), Err(Err::Incomplete(Needed::Size(4)))); - assert_eq!(f_true(&b"xxx"[..]), Ok((&b"xxx"[..], None))); - - assert_eq!(f_false(&b"abcdef"[..]), Ok((&b"abcdef"[..], None))); - assert_eq!(f_false(&b"ab"[..]), Ok((&b"ab"[..], None))); - assert_eq!(f_false(&b"xxx"[..]), Ok((&b"xxx"[..], None))); - } - - #[test] - fn peek() { - named!(peek_tag<&[u8],&[u8]>, peek!(tag!("abcd"))); - - assert_eq!(peek_tag(&b"abcdef"[..]), Ok((&b"abcdef"[..], &b"abcd"[..]))); - assert_eq!(peek_tag(&b"ab"[..]), Err(Err::Incomplete(Needed::Size(4)))); - assert_eq!( - peek_tag(&b"xxx"[..]), - Err(Err::Error(error_position!(&b"xxx"[..], ErrorKind::Tag))) - ); - } - - #[test] - fn not() { - use types::CompleteStr; - - named!(not_aaa<()>, not!(tag!("aaa"))); - assert_eq!( - not_aaa(&b"aaa"[..]), - Err(Err::Error(error_position!(&b"aaa"[..], ErrorKind::Not))) - ); - assert_eq!(not_aaa(&b"aa"[..]), Err(Err::Incomplete(Needed::Size(3)))); - assert_eq!(not_aaa(&b"abcd"[..]), Ok((&b"abcd"[..], ()))); - - named!(not_aaa_complete, not!(tag!("aaa"))); - assert_eq!( - not_aaa_complete(CompleteStr("aaa")), - Err(Err::Error(error_position!( - CompleteStr("aaa"), - ErrorKind::Not - ))) - ); - assert_eq!( - not_aaa_complete(CompleteStr("aa")), - Ok((CompleteStr("aa"), ())) - ); - assert_eq!( - not_aaa_complete(CompleteStr("abcd")), - Ok((CompleteStr("abcd"), ())) - ); - } - - #[test] - fn verify() { - named!(test, verify!(take!(5), |slice: &[u8]| slice[0] == b'a')); - assert_eq!(test(&b"bcd"[..]), Err(Err::Incomplete(Needed::Size(5)))); - assert_eq!( - test(&b"bcdefg"[..]), - Err(Err::Error(error_position!( - &b"bcdefg"[..], - ErrorKind::Verify - ))) - ); - assert_eq!(test(&b"abcdefg"[..]), Ok((&b"fg"[..], &b"abcde"[..]))); - } - - #[test] - fn parse_to() { - use util::Convert; - - assert_eq!( - parse_to!("ab", usize), - Err(Err::Error(error_position!( - "ab", - ErrorKind::ParseTo - ))) - ); - assert_eq!(parse_to!("42", usize), Ok(("", 42))); - assert_eq!(ErrorKind::::convert(ErrorKind::ParseTo::), ErrorKind::ParseTo::); - } - - #[test] - fn map_res_err() { - use Context; - use be_u8; - - #[derive(Debug, Eq, PartialEq)] - enum ParseError { - InvalidValue(u8), - } - - impl From for ParseError { - fn from(_: u32) -> Self { - unreachable!() - } - } - - #[derive(Debug, Eq, PartialEq)] - enum ValidValue { - One, - Two, - } - - fn validate(value: u8) -> Result { - match value { - b'1' => Ok(ValidValue::One), - b'2' => Ok(ValidValue::Two), - _ => Err(ParseError::InvalidValue(value)) - } - } - - named!(test<&[u8], ValidValue, ParseError>, - map_res_err!(fix_error!(ParseError, be_u8), validate) - ); - - assert_eq!(test(&b"1"[..]), Ok((&b""[..], ValidValue::One))); - assert_eq!(test(&b"2"[..]), Ok((&b""[..], ValidValue::Two))); - - #[cfg(feature = "verbose-errors")] - { - assert_eq!( - test(&b"3"[..]), - Err( - Err::Error( - Context::List( - vec![ - (&b"3"[..], ErrorKind::Custom(ParseError::InvalidValue(b'3'))), - (&b"3"[..], ErrorKind::MapRes) - ] - ) - ) - ) - ); - } - - #[cfg(not(feature = "verbose-errors"))] - { - assert_eq!(test(&b"3"[..]), Err(Err::Error(Context::Code(&b"3"[..], ErrorKind::MapRes)))); - } - } -} diff --git a/third_party/rust/nom/src/methods.rs b/third_party/rust/nom/src/methods.rs index 7af12b7733..520f1e8bfd 100644 --- a/third_party/rust/nom/src/methods.rs +++ b/third_party/rust/nom/src/methods.rs @@ -1,624 +1,20 @@ -//! Method macro combinators -//! -//! These macros make parsers as methods of structs -//! and that can take methods of structs to call -//! as parsers. -//! -//! There is a trick to make them easier to assemble, -//! combinators are defined like this: -//! -//! ```ignore -//! macro_rules! tag ( -//! ($i:expr, $inp: expr) => ( -//! { -//! ... -//! } -//! ); -//! ); -//! ``` -//! -//! But when used as methods in other combinators, are used -//! like this: -//! -//! ```ignore -//! method!(my_function >, self, tag!("abcd")); -//! ``` -//! -//! Internally, other combinators will rewrite -//! that call to pass the input as second argument: -//! -//! ```ignore -//! macro_rules! method ( -//! ($name:ident<$a:ty>, $self_:ident, $submac:ident!( $($args:tt)* )) => ( -//! fn $name( $self_: $a, i: &[u8] ) -> IResult<&[u8], &[u8]> { -//! $submac!(i, $($args)*) -//! } -//! ); -//! ); -//! ``` -//! -//! The `method!` macro is similar to the `named!` macro in the macros module. -//! While `named!` will create a parser function, `method!` will create a parser -//! method on the struct it is defined in. -//! -//! Compared to the `named!` macro there are a few differences in how they are -//! invoked. A `method!` invocation always has to have the type of `self` -//! declared and it can't be a reference due to Rust's borrow lifetime -//! restrictions: -//! -//! ```ignore -//! // -`self`'s type- -//! method!(method_name< Parser<'a> >, ...); -//! ``` -//! `self`'s type always comes first. -//! The next difference is you have to input the self struct. Due to Rust's -//! macro hygiene the macro can't declare it on it's own. -//! -//! ```ignore -//! // -self- -//! method!(method_name, &'a str, &'a str>, self, ...); -//! ``` -//! When making a parsing struct with parsing methods, due to the static borrow -//! checker,calling any parsing methods on self (or any other parsing struct) -//! will cause self to be moved for the rest of the method.To get around this -//! restriction all self is moved into the called method and then the called -//! method will return self to the caller. -//! -//! To call a method on self you need to use the `call_m!` macro. For example: -//! -//! ```ignore -//! struct<'a> Parser<'a> { -//! parsed: &'a str, -//! } -//! impl<'a> Parser<'a> { -//! // Constructor omitted for brevity -//! method!(take4, &'a str, &'a str>, self, take!(4)); -//! method!(caller, &'a str, &'a str>, self, call_m!(self.take4)); -//! } -//! ``` -//! More complicated combinations still mostly look the same as their `named!` -//! counterparts: -//! -//! ```ignore -//! method!(pub simple_chain<&mut Parser<'a>, &'a str, &'a str>, self, -//! do_parse!( -//! call_m!(self.tag_abc) >> -//! call_m!(self.tag_def) >> -//! call_m!(self.tag_ghi) >> -//! last: map!(call_m!(self.simple_peek), |parsed| sb.parsed = parsed) >> -//! (last) -//! ) -//! ); -//! ``` -//! The three additions to method definitions to remember are: -//! 1. Specify `self`'s type -//! 2. Pass `self` to the macro -//! 4. Call parser methods using the `call_m!` macro. +//! method combinators -/// Makes a method from a parser combination -/// -/// The must be set up because the compiler needs -/// the information -/// -/// ```ignore -/// method!(my_function >( &[u8] ) -> &[u8], tag!("abcd")); -/// // first type parameter is `self`'s type, second is input, third is output -/// method!(my_function, &[u8], &[u8]>, tag!("abcd")); -/// //prefix them with 'pub' to make the methods public -/// method!(pub my_function,&[u8], &[u8]>, tag!("abcd")); -/// ``` +/// do not use: method combinators moved to the nom-methods crate #[macro_export] macro_rules! method ( - // Non-public immutable self - ($name:ident<$a:ty>( $i:ty ) -> $o:ty, $self_:ident, $submac:ident!( $($args:tt)* )) => ( - #[allow(unused_variables)] - fn $name( $self_: $a, i: $i ) -> ($a, $crate::IResult<$i,$o,u32>) { - let result = $submac!(i, $($args)*); - ($self_, result) - } - ); - ($name:ident<$a:ty,$i:ty,$o:ty,$e:ty>, $self_:ident, $submac:ident!( $($args:tt)* )) => ( - #[allow(unused_variables)] - fn $name( $self_: $a, i: $i ) -> ($a, $crate::IResult<$i, $o, $e>) { - let result = $submac!(i, $($args)*); - ($self_, result) - } - ); - ($name:ident<$a:ty,$i:ty,$o:ty>, $self_:ident, $submac:ident!( $($args:tt)* )) => ( - #[allow(unused_variables)] - fn $name( $self_: $a, i: $i ) -> ($a, $crate::IResult<$i,$o,u32>) { - let result = $submac!(i, $($args)*); - ($self_, result) - } - ); - ($name:ident<$a:ty,$o:ty>, $self_:ident, $submac:ident!( $($args:tt)* )) => ( - #[allow(unused_variables)] - fn $name( $self_: $a, i: &[u8] ) -> ($a, $crate::IResult<&[u8], $o, u32>) { - let result = $submac!(i, $($args)*); - ($self_, result) - } - ); - ($name:ident<$a:ty>, $self_:ident, $submac:ident!( $($args:tt)* )) => ( - #[allow(unused_variables)] - fn $name( $self_: $a, i: &[u8] ) -> ($a, $crate::IResult<&[u8], &[u8], u32>) { - let result = $submac!(i, $($args)*); - ($self_, result) - } - ); - // Public immutable self - (pub $name:ident<$a:ty>( $i:ty ) -> $o:ty, $self_:ident, $submac:ident!( $($args:tt)* )) => ( - #[allow(unused_variables)] - pub fn $name( $self_: $a, i: $i ) -> ($a, $crate::IResult<$i,$o,u32>) { - let result = $submac!(i, $($args)*); - ($self_, result) - } - ); - (pub $name:ident<$a:ty,$i:ty,$o:ty,$e:ty>, $self_:ident, $submac:ident!( $($args:tt)* )) => ( - #[allow(unused_variables)] - pub fn $name( $self_: $a, i: $i ) -> ($a, $crate::IResult<$i, $o, $e>) { - let result = $submac!(i, $($args)*); - ($self_, result) - } - ); - (pub $name:ident<$a:ty,$i:ty,$o:ty>, $self_:ident, $submac:ident!( $($args:tt)* )) => ( - #[allow(unused_variables)] - pub fn $name( $self_: $a,i: $i ) -> ($a, $crate::IResult<$i,$o,u32>) { - let result = $submac!(i, $($args)*); - ($self_, result) - } - ); - (pub $name:ident<$a:ty,$o:ty>, $self_:ident, $submac:ident!( $($args:tt)* )) => ( - #[allow(unused_variables)] - pub fn $name( $self_: $a, i: &[u8] ) -> ($a, $crate::IResult<&[u8], $o, u32>) { - let result = $submac!(i, $($args)*); - ($self_, result) - } - ); - (pub $name:ident<$a:ty>, $self_:ident, $submac:ident!( $($args:tt)* )) => ( - #[allow(unused_variables)] - pub fn $name( $self_: $a, i: &[u8] ) -> ($a, $crate::IResult<&[u8], &[u8], u32>) { - let result = $submac!(i, $($args)*); - ($self_, result) - } - ); - // Non-public mutable self - ($name:ident<$a:ty>( $i:ty ) -> $o:ty, mut $self_:ident, $submac:ident!( $($args:tt)* )) => ( - #[allow(unused_variables)] - fn $name( mut $self_: $a, i: $i ) -> ($a, $crate::IResult<$i,$o,u32>) { - let result = $submac!(i, $($args)*); - ($self_, result) - } - ); - ($name:ident<$a:ty,$i:ty,$o:ty,$e:ty>, mut $self_:ident, $submac:ident!( $($args:tt)* )) => ( - #[allow(unused_variables)] - fn $name( mut $self_: $a, i: $i ) -> ($a, $crate::IResult<$i, $o, $e>) { - let result = $submac!(i, $($args)*); - ($self_, result) - } - ); - ($name:ident<$a:ty,$i:ty,$o:ty>, mut $self_:ident, $submac:ident!( $($args:tt)* )) => ( - #[allow(unused_variables)] - fn $name( mut $self_: $a, i: $i ) -> ($a, $crate::IResult<$i,$o,u32>) { - let result = $submac!(i, $($args)*); - ($self_, result) - } - ); - ($name:ident<$a:ty,$o:ty>, mut $self_:ident, $submac:ident!( $($args:tt)* )) => ( - #[allow(unused_variables)] - fn $name( mut $self_: $a, i: &[u8] ) -> ($a, $crate::IResult<&[u8], $o, u32>) { - let result = $submac!(i, $($args)*); - ($self_, result) - } - ); - ($name:ident<$a:ty>, mut $self_:ident, $submac:ident!( $($args:tt)* )) => ( - #[allow(unused_variables)] - fn $name( mut $self_: $a, i: &[u8] ) -> ($a, $crate::IResult<&[u8], &[u8], u32>) { - let result = $submac!(i, $($args)*); - ($self_, result) - } - ); - // Public mutable self - (pub $name:ident<$a:ty>( $i:ty ) -> $o:ty, mut $self_:ident, $submac:ident!( $($args:tt)* )) => ( - #[allow(unused_variables)] - pub fn $name( mut $self_: $a, i: $i ) -> ($a, $crate::IResult<$i,$o,u32>) { - let result = $submac!(i, $($args)*); - ($self_, result) - } - ); - (pub $name:ident<$a:ty,$i:ty,$o:ty,$e:ty>, mut $self_:ident, $submac:ident!( $($args:tt)* )) => ( - #[allow(unused_variables)] - pub fn $name( mut $self_: $a, i: $i ) -> ($a, $crate::IResult<$i, $o, $e>) { - let result = $submac!(i, $($args)*); - ($self_, result) - } - ); - (pub $name:ident<$a:ty,$i:ty,$o:ty>, mut $self_:ident, $submac:ident!( $($args:tt)* )) => ( - #[allow(unused_variables)] - pub fn $name( mut $self_: $a,i: $i ) -> ($a, $crate::IResult<$i,$o,u32>) { - let result = $submac!(i, $($args)*); - ($self_, result) - } - ); - (pub $name:ident<$a:ty,$o:ty>, mut $self_:ident, $submac:ident!( $($args:tt)* )) => ( - #[allow(unused_variables)] - pub fn $name( mut $self_: $a, i: &[u8] ) -> ($a, $crate::IResult<&[u8], $o, u32>) { - let result = $submac!(i, $($args)*); - ($self_, result) - } - ); - (pub $name:ident<$a:ty>, mut $self_:ident, $submac:ident!( $($args:tt)* )) => ( - #[allow(unused_variables)] - pub fn $name( mut $self_: $a, i: &[u8] ) -> ($a, $crate::IResult<&[u8], &[u8], u32>) { - let result = $submac!(i, $($args)*); - ($self_, result) - } - ); + ($($args:tt)*) => (compile_error!("method combinators moved to the nom-methods crate");); ); -/// Used to called methods then move self back into self +/// do not use: method combinators moved to the nom-methods crate #[macro_export] macro_rules! call_m ( - ($i:expr, $self_:ident.$method:ident) => ( - { - let (tmp, res) = $self_.$method($i); - $self_ = tmp; - res - } - ); - ($i:expr, $self_:ident.$method:ident, $($args:expr),* ) => ( - { - let (tmp, res) = $self_.$method($i, $($args),*); - $self_ = tmp; - res - } - ); + ($($args:tt)*) => (compile_error!("method combinators moved to the nom-methods crate");); ); -/// emulate function currying for method calls on structs -/// `apply_m!(self.my_function, arg1, arg2, ...)` becomes `self.my_function(input, arg1, arg2, ...)` -/// -/// Supports up to 6 arguments +/// do not use: method combinators moved to the nom-methods crate #[macro_export] macro_rules! apply_m ( - ($i:expr, $self_:ident.$method:ident, $($args:expr),* ) => ( { let (tmp, res) = $self_.$method( $i, $($args),* ); $self_ = tmp; res } ); + ($($args:tt)*) => (compile_error!("method combinators moved to the nom-methods crate");); ); -#[cfg(test)] -#[allow(deprecated)] -mod tests { - // reproduce the tag_s and take_s macros, because of module import order - macro_rules! tag_s ( - ($i:expr, $tag: expr) => ( - { - use $crate::lib::std::result::Result::*; - use $crate::{Err,ErrorKind,Needed,IResult, need_more}; - - let res: IResult<_,_> = if $tag.len() > $i.len() { - need_more($i, Needed::Size($tag.len())) - //} else if &$i[0..$tag.len()] == $tag { - } else if ($i).starts_with($tag) { - Ok((&$i[$tag.len()..], &$i[0..$tag.len()])) - } else { - Err(Err::Error(error_position!($i, ErrorKind::TagStr))) - }; - res - } - ); - ); - - macro_rules! take_s ( - ($i:expr, $count:expr) => ( - { - use $crate::lib::std::result::Result::*; - use $crate::{Needed,IResult,need_more}; - - let cnt = $count as usize; - let res: IResult<_,_> = if $i.chars().count() < cnt { - need_more($i, Needed::Size(cnt)) - } else { - let mut offset = $i.len(); - let mut count = 0; - for (o, _) in $i.char_indices() { - if count == cnt { - offset = o; - break; - } - count += 1; - } - Ok((&$i[offset..], &$i[..offset])) - }; - res - } - ); - ); - - struct Parser<'a> { - bcd: &'a str, - } - - impl<'a> Parser<'a> { - pub fn new() -> Parser<'a> { - Parser { bcd: "" } - } - - method!( - tag_abc, &'a str, &'a str>, - self, - tag_s!("áβç") - ); - method!(tag_bcd >(&'a str) -> &'a str, self, tag_s!("βçδ")); - method!(pub tag_hij >(&'a str) -> &'a str, self, tag_s!("λïJ")); - method!(pub tag_ijk, &'a str, &'a str>, self, tag_s!("ïJƙ")); - method!(take3, &'a str, &'a str>, self, take_s!(3)); - method!(pub simple_call, &'a str, &'a str>, mut self, - call_m!(self.tag_abc) - ); - method!(pub simple_peek, &'a str, &'a str>, mut self, - peek!(call_m!(self.take3)) - ); - method!(pub simple_chain, &'a str, &'a str>, mut self, - do_parse!( - map!(call_m!(self.tag_bcd), |bcd| self.bcd = bcd) >> - last: call_m!(self.simple_peek) >> - (last) - ) - ); - fn tag_stuff(mut self: Parser<'a>, input: &'a str, something: &'a str) -> (Parser<'a>, ::IResult<&'a str, &'a str>) { - self.bcd = something; - let (tmp, res) = self.tag_abc(input); - self = tmp; - (self, res) - } - method!(use_apply, &'a str, &'a str>, mut self, apply_m!(self.tag_stuff, "βçδ")); - } - - #[test] - fn test_method_call_abc() { - let p = Parser::new(); - let input: &str = "áβçδèƒϱλïJƙ"; - let consumed: &str = "áβç"; - let leftover: &str = "δèƒϱλïJƙ"; - let (_, res) = p.tag_abc(input); - match res { - Ok((extra, output)) => { - assert!( - extra == leftover, - "`Parser.tag_abc` consumed leftover input. leftover: {}", - extra - ); - assert!( - output == consumed, - "`Parser.tag_abc` doesnt return the string it consumed \ - on success. Expected `{}`, got `{}`.", - consumed, - output - ); - } - other => panic!( - "`Parser.tag_abc` didn't succeed when it should have. \ - Got `{:?}`.", - other - ), - } - } - - #[test] - fn test_method_call_bcd() { - let p = Parser::new(); - let input: &str = "βçδèƒϱλïJƙ"; - let consumed: &str = "βçδ"; - let leftover: &str = "èƒϱλïJƙ"; - let (_, res) = p.tag_bcd(input); - match res { - Ok((extra, output)) => { - assert!( - extra == leftover, - "`Parser.tag_bcd` consumed leftover input. leftover: {}", - extra - ); - assert!( - output == consumed, - "`Parser.tag_bcd` doesn't return the string it consumed \ - on success. Expected `{}`, got `{}`.", - consumed, - output - ); - } - other => panic!( - "`Parser.tag_bcd` didn't succeed when it should have. \ - Got `{:?}`.", - other - ), - } - } - - #[test] - fn test_method_call_hij() { - let p = Parser::new(); - let input: &str = "λïJƙℓ₥ñôƥ9řƨ"; - let consumed: &str = "λïJ"; - let leftover: &str = "ƙℓ₥ñôƥ9řƨ"; - let (_, res) = p.tag_hij(input); - match res { - Ok((extra, output)) => { - assert!( - extra == leftover, - "`Parser.tag_hij` consumed leftover input. leftover: {}", - extra - ); - assert!( - output == consumed, - "`Parser.tag_hij` doesn't return the string it consumed \ - on success. Expected `{}`, got `{}`.", - consumed, - output - ); - } - other => panic!( - "`Parser.tag_hij` didn't succeed when it should have. \ - Got `{:?}`.", - other - ), - } - } - - #[test] - fn test_method_call_ijk() { - let p = Parser::new(); - let input: &str = "ïJƙℓ₥ñôƥ9řƨ"; - let consumed: &str = "ïJƙ"; - let leftover: &str = "ℓ₥ñôƥ9řƨ"; - let (_, res) = p.tag_ijk(input); - match res { - Ok((extra, output)) => { - assert!( - extra == leftover, - "`Parser.tag_ijk` consumed leftover input. leftover: {}", - extra - ); - assert!( - output == consumed, - "`Parser.tag_ijk` doesn't return the string it consumed \ - on success. Expected `{}`, got `{}`.", - consumed, - output - ); - } - other => panic!( - "`Parser.tag_ijk` didn't succeed when it should have. \ - Got `{:?}`.", - other - ), - } - } - #[test] - fn test_method_simple_call() { - let p = Parser::new(); - let input: &str = "áβçδèƒϱλïJƙ"; - let consumed: &str = "áβç"; - let leftover: &str = "δèƒϱλïJƙ"; - let (_, res) = p.simple_call(input); - match res { - Ok((extra, output)) => { - assert!( - extra == leftover, - "`Parser.simple_call` consumed leftover input. leftover: {}", - extra - ); - assert!( - output == consumed, - "`Parser.simple_call` doesn't return the string it consumed \ - on success. Expected `{}`, got `{}`.", - consumed, - output - ); - } - other => panic!( - "`Parser.simple_call` didn't succeed when it should have. \ - Got `{:?}`.", - other - ), - } - } - - #[test] - fn test_apply_m() { - let mut p = Parser::new(); - let input: &str = "áβçδèƒϱλïJƙ"; - let consumed: &str = "áβç"; - let leftover: &str = "δèƒϱλïJƙ"; - let (tmp, res) = p.use_apply(input); - p = tmp; - match res { - Ok((extra, output)) => { - assert!( - extra == leftover, - "`Parser.use_apply` consumed leftover input. leftover: {}", - extra - ); - assert!( - output == consumed, - "`Parser.use_apply` doesn't return the string it was supposed to \ - on success. Expected `{}`, got `{}`.", - leftover, - output - ); - assert!( - p.bcd == "βçδ", - "Parser.use_apply didn't modify the parser field correctly: {}", - p.bcd - ); - } - other => panic!( - "`Parser.use_apply` didn't succeed when it should have. \ - Got `{:?}`.", - other - ), - } - } - - #[test] - fn test_method_call_peek() { - let p = Parser::new(); - let input: &str = "ж¥ƺáβçδèƒϱλïJƙ"; - let consumed: &str = "ж¥ƺ"; - let (_, res) = p.simple_peek(input); - match res { - Ok((extra, output)) => { - assert!( - extra == input, - "`Parser.simple_peek` consumed leftover input. leftover: {}", - extra - ); - assert!( - output == consumed, - "`Parser.simple_peek` doesn't return the string it consumed \ - on success. Expected `{}`, got `{}`.", - consumed, - output - ); - } - other => panic!( - "`Parser.simple_peek` didn't succeed when it should have. \ - Got `{:?}`.", - other - ), - } - } - - #[test] - fn test_method_call_chain() { - let mut p = Parser::new(); - let input: &str = "βçδδèƒϱλïJƙℓ"; - let leftover: &str = "δèƒϱλïJƙℓ"; - let output: &str = "δèƒ"; - let (tmp, res) = p.simple_chain(input); - p = tmp; - match res { - Ok((extra, out)) => { - assert!( - extra == leftover, - "`Parser.simple_chain` consumed leftover input. leftover: {}", - extra - ); - assert!( - out == output, - "`Parser.simple_chain` doesn't return the string it was supposed to \ - on success. Expected `{}`, got `{}`.", - output, - out - ); - assert!( - p.bcd == "βçδ", - "Parser.simple_chain didn't modify the parser field correctly: {}", - p.bcd - ); - } - other => panic!( - "`Parser.simple_chain` didn't succeed when it should have. \ - Got `{:?}`.", - other - ), - } - } -} diff --git a/third_party/rust/nom/src/multi.rs b/third_party/rust/nom/src/multi.rs deleted file mode 100644 index b7e2591121..0000000000 --- a/third_party/rust/nom/src/multi.rs +++ /dev/null @@ -1,1681 +0,0 @@ -//! Parsers for applying parsers multiple times - -/// `separated_list!(I -> IResult, I -> IResult) => I -> IResult>` -/// separated_list(sep, X) returns Vec will return Incomplete if there may be more elements -#[cfg(feature = "alloc")] -#[macro_export] -macro_rules! separated_list( - ($i:expr, $sep:ident!( $($args:tt)* ), $submac:ident!( $($args2:tt)* )) => ( - { - use $crate::lib::std::result::Result::*; - use $crate::Err; - - use $crate::InputLength; - - //FIXME: use crate vec - let mut res = $crate::lib::std::vec::Vec::new(); - let mut input = $i.clone(); - - // get the first element - let input_ = input.clone(); - match $submac!(input_, $($args2)*) { - Err(Err::Error(_)) => Ok((input, res)), - Err(e) => Err(e), - Ok((i,o)) => { - if i.input_len() == input.input_len() { - Err(Err::Error(error_position!(input, $crate::ErrorKind::SeparatedList))) - } else { - res.push(o); - input = i; - - let ret; - - loop { - // get the separator first - let input_ = input.clone(); - match $sep!(input_, $($args)*) { - Err(Err::Error(_)) => { - ret = Ok((input, res)); - break; - } - Err(e) => { - ret = Err(e); - break; - }, - Ok((i2,_)) => { - let i2_len = i2.input_len(); - if i2_len == input.input_len() { - ret = Ok((input, res)); - break; - } - - // get the element next - match $submac!(i2, $($args2)*) { - Err(Err::Error(_)) => { - ret = Ok((input, res)); - break; - }, - Err(e) => { - ret = Err(e); - break; - }, - Ok((i3,o3)) => { - if i3.input_len() == i2_len { - ret = Ok((input, res)); - break; - } - res.push(o3); - input = i3; - } - } - } - } - } - - ret - } - }, - } - } - ); - ($i:expr, $submac:ident!( $($args:tt)* ), $g:expr) => ( - separated_list!($i, $submac!($($args)*), call!($g)); - ); - ($i:expr, $f:expr, $submac:ident!( $($args:tt)* )) => ( - separated_list!($i, call!($f), $submac!($($args)*)); - ); - ($i:expr, $f:expr, $g:expr) => ( - separated_list!($i, call!($f), call!($g)); - ); -); - -/// `separated_nonempty_list!(I -> IResult, I -> IResult) => I -> IResult>` -/// separated_nonempty_list(sep, X) returns Vec will return Incomplete if there may be more elements -#[macro_export] -macro_rules! separated_nonempty_list( - ($i:expr, $sep:ident!( $($args:tt)* ), $submac:ident!( $($args2:tt)* )) => ( - { - use $crate::lib::std::result::Result::*; - use $crate::{Err,ErrorKind}; - use $crate::InputLength; - - let mut res = $crate::lib::std::vec::Vec::new(); - let mut input = $i.clone(); - - // get the first element - let input_ = input.clone(); - match $submac!(input_, $($args2)*) { - Err(e) => Err(e), - Ok((i,o)) => { - if i.input_len() == input.input_len() { - let e = ErrorKind::SeparatedNonEmptyList; - Err(Err::Error(error_position!(input, e))) - } else { - res.push(o); - input = i; - - let ret; - - loop { - // get the separator first - let input_ = input.clone(); - match $sep!(input_, $($args)*) { - Err(Err::Error(_)) => { - ret = Ok((input, res)); - break; - } - Err(e) => { - ret = Err(e); - break; - }, - Ok((i2,_)) => { - let i2_len = i2.input_len(); - if i2_len == input.input_len() { - ret = Ok((input, res)); - break; - } - - // get the element next - match $submac!(i2, $($args2)*) { - Err(Err::Error(_)) => { - ret = Ok((input, res)); - break; - }, - Err(e) => { - ret = Err(e); - break; - }, - Ok((i3,o3)) => { - if i3.input_len() == i2_len { - ret = Ok((input, res)); - break; - } - res.push(o3); - input = i3; - } - } - } - } - } - - ret - } - }, - } - } - ); - ($i:expr, $submac:ident!( $($args:tt)* ), $g:expr) => ( - separated_nonempty_list!($i, $submac!($($args)*), call!($g)); - ); - ($i:expr, $f:expr, $submac:ident!( $($args:tt)* )) => ( - separated_nonempty_list!($i, call!($f), $submac!($($args)*)); - ); - ($i:expr, $f:expr, $g:expr) => ( - separated_nonempty_list!($i, call!($f), call!($g)); - ); -); - -/// `separated_list_complete!(I -> IResult, I -> IResult) => I -> IResult>` -/// This is equivalent to the `separated_list!` combinator, except that it will return `Error` -/// when either the separator or element subparser returns `Incomplete`. -#[macro_export] -macro_rules! separated_list_complete { - ($i:expr, $sep:ident!( $($args:tt)* ), $submac:ident!( $($args2:tt)* )) => ({ - separated_list!($i, complete!($sep!($($args)*)), complete!($submac!($($args2)*))) - }); - - ($i:expr, $submac:ident!( $($args:tt)* ), $g:expr) => ( - separated_list_complete!($i, $submac!($($args)*), call!($g)); - ); - ($i:expr, $f:expr, $submac:ident!( $($args:tt)* )) => ( - separated_list_complete!($i, call!($f), $submac!($($args)*)); - ); - ($i:expr, $f:expr, $g:expr) => ( - separated_list_complete!($i, call!($f), call!($g)); - ); -} - -/// `separated_nonempty_list_complete!(I -> IResult, I -> IResult) => I -> IResult>` -/// This is equivalent to the `separated_nonempty_list!` combinator, except that it will return -/// `Error` when either the separator or element subparser returns `Incomplete`. -#[macro_export] -macro_rules! separated_nonempty_list_complete { - ($i:expr, $sep:ident!( $($args:tt)* ), $submac:ident!( $($args2:tt)* )) => ({ - separated_nonempty_list!($i, complete!($sep!($($args)*)), complete!($submac!($($args2)*))) - }); - - ($i:expr, $submac:ident!( $($args:tt)* ), $g:expr) => ( - separated_nonempty_list_complete!($i, $submac!($($args)*), call!($g)); - ); - ($i:expr, $f:expr, $submac:ident!( $($args:tt)* )) => ( - separated_nonempty_list_complete!($i, call!($f), $submac!($($args)*)); - ); - ($i:expr, $f:expr, $g:expr) => ( - separated_nonempty_list_complete!($i, call!($f), call!($g)); - ); -} - -/// `many0!(I -> IResult) => I -> IResult>` -/// Applies the parser 0 or more times and returns the list of results in a Vec. -/// -/// The embedded parser may return Incomplete. -/// -/// `many0` will only return `Error` if the embedded parser does not consume any input -/// (to avoid infinite loops). -/// -/// ``` -/// # #[macro_use] extern crate nom; -/// # fn main() { -/// named!(multi<&[u8], Vec<&[u8]> >, many0!( tag!( "abcd" ) ) ); -/// -/// let a = b"abcdabcdefgh"; -/// let b = b"azerty"; -/// -/// let res = vec![&b"abcd"[..], &b"abcd"[..]]; -/// assert_eq!(multi(&a[..]),Ok((&b"efgh"[..], res))); -/// assert_eq!(multi(&b[..]),Ok((&b"azerty"[..], Vec::new()))); -/// # } -/// ``` -/// -#[cfg(feature = "alloc")] -#[macro_export] -macro_rules! many0( - ($i:expr, $submac:ident!( $($args:tt)* )) => ( - { - use $crate::lib::std::result::Result::*; - use $crate::{Err,AtEof}; - - let ret; - let mut res = $crate::lib::std::vec::Vec::new(); - let mut input = $i.clone(); - - loop { - let input_ = input.clone(); - match $submac!(input_, $($args)*) { - Ok((i, o)) => { - // loop trip must always consume (otherwise infinite loops) - if i == input { - - if i.at_eof() { - ret = Ok((input, res)); - } else { - ret = Err(Err::Error(error_position!(input, $crate::ErrorKind::Many0))); - } - break; - } - res.push(o); - - input = i; - }, - Err(Err::Error(_)) => { - ret = Ok((input, res)); - break; - }, - Err(e) => { - ret = Err(e); - break; - }, - } - } - - ret - } - ); - ($i:expr, $f:expr) => ( - many0!($i, call!($f)); - ); -); - -/// `many1!(I -> IResult) => I -> IResult>` -/// Applies the parser 1 or more times and returns the list of results in a Vec -/// -/// the embedded parser may return Incomplete -/// -/// ``` -/// # #[macro_use] extern crate nom; -/// # use nom::Err; -/// # use nom::ErrorKind; -/// # use nom::types::CompleteByteSlice; -/// # fn main() { -/// named!(multi<&[u8], Vec<&[u8]> >, many1!( tag!( "abcd" ) ) ); -/// -/// let a = b"abcdabcdefgh"; -/// let b = b"azerty"; -/// -/// let res = vec![&b"abcd"[..], &b"abcd"[..]]; -/// assert_eq!(multi(&a[..]), Ok((&b"efgh"[..], res))); -/// assert_eq!(multi(&b[..]), Err(Err::Error(error_position!(&b[..], ErrorKind::Many1)))); -/// -/// named!(multi_complete >, many1!( tag!( "abcd" ) ) ); -/// let c = CompleteByteSlice(b"abcdabcd"); -/// -/// let res = vec![CompleteByteSlice(b"abcd"), CompleteByteSlice(b"abcd")]; -/// assert_eq!(multi_complete(c), Ok((CompleteByteSlice(b""), res))); -/// # } -/// ``` -#[cfg(feature = "alloc")] -#[macro_export] -macro_rules! many1( - ($i:expr, $submac:ident!( $($args:tt)* )) => ( - { - use $crate::lib::std::result::Result::*; - use $crate::Err; - - use $crate::InputLength; - let i_ = $i.clone(); - match $submac!(i_, $($args)*) { - Err(Err::Error(_)) => Err(Err::Error( - error_position!(i_, $crate::ErrorKind::Many1) - )), - Err(Err::Failure(_)) => Err(Err::Failure( - error_position!(i_, $crate::ErrorKind::Many1) - )), - Err(i) => Err(i), - Ok((i1,o1)) => { - let mut res = $crate::lib::std::vec::Vec::with_capacity(4); - res.push(o1); - let mut input = i1; - let mut error = $crate::lib::std::option::Option::None; - loop { - let input_ = input.clone(); - match $submac!(input_, $($args)*) { - Err(Err::Error(_)) => { - break; - }, - Err(e) => { - error = $crate::lib::std::option::Option::Some(e); - break; - }, - Ok((i, o)) => { - if i.input_len() == input.input_len() { - break; - } - res.push(o); - input = i; - } - } - } - - match error { - $crate::lib::std::option::Option::Some(e) => Err(e), - $crate::lib::std::option::Option::None => Ok((input, res)) - } - } - } - } - ); - ($i:expr, $f:expr) => ( - many1!($i, call!($f)); - ); -); - -/// `many_till!(I -> IResult, I -> IResult) => I -> IResult, P)>` -/// Applies the first parser until the second applies. Returns a tuple containing the list -/// of results from the first in a Vec and the result of the second. -/// -/// The first embedded parser may return Incomplete -/// -/// ``` -/// # #[macro_use] extern crate nom; -/// # use nom::Err; -/// # use nom::ErrorKind; -/// # fn main() { -/// named!(multi<&[u8], (Vec<&[u8]>, &[u8]) >, many_till!( tag!( "abcd" ), tag!( "efgh" ) ) ); -/// -/// let a = b"abcdabcdefghabcd"; -/// let b = b"efghabcd"; -/// let c = b"azerty"; -/// -/// let res_a = (vec![&b"abcd"[..], &b"abcd"[..]], &b"efgh"[..]); -/// let res_b: (Vec<&[u8]>, &[u8]) = (Vec::new(), &b"efgh"[..]); -/// assert_eq!(multi(&a[..]),Ok((&b"abcd"[..], res_a))); -/// assert_eq!(multi(&b[..]),Ok((&b"abcd"[..], res_b))); -/// assert_eq!(multi(&c[..]), Err(Err::Error(error_node_position!(&c[..], ErrorKind::ManyTill, -/// error_position!(&c[..], ErrorKind::Tag))))); -/// # } -/// ``` -#[cfg(feature = "alloc")] -#[macro_export] -macro_rules! many_till( - (__impl $i:expr, $submac1:ident!( $($args1:tt)* ), $submac2:ident!( $($args2:tt)* )) => ( - { - use $crate::lib::std::result::Result::*; - use $crate::{Err,ErrorKind}; - - let ret; - let mut res = $crate::lib::std::vec::Vec::new(); - let mut input = $i.clone(); - - loop { - match $submac2!(input, $($args2)*) { - Ok((i, o)) => { - ret = Ok((i, (res, o))); - break; - }, - Err(e1) => { - match $submac1!(input, $($args1)*) { - Err(Err::Error(err)) => { - fn unify_types(_: &T, _: &T) {} - let e = Err::Error(error_node_position!(input, ErrorKind::ManyTill, err)); - unify_types(&e1, &e); - - ret = Err(e); - break; - }, - Err(e) => { - ret = Err(e); - break; - }, - Ok((i, o)) => { - // loop trip must always consume (otherwise infinite loops) - if i == input { - ret = Err(Err::Error(error_position!(input, $crate::ErrorKind::ManyTill))); - break; - } - - res.push(o); - input = i; - }, - } - }, - } - } - - ret - } - ); - ($i:expr, $submac1:ident!( $($args1:tt)* ), $submac2:ident!( $($args2:tt)* )) => ( - many_till!(__impl $i, $submac1!($($args1)*), $submac2!($($args2)*)); - ); - ($i:expr, $submac1:ident!( $($args1:tt)* ), $g:expr) => ( - many_till!(__impl $i, $submac1!($($args1)*), call!($g)); - ); - ($i:expr, $f:expr, $submac2:ident!( $($args2:tt)* )) => ( - many_till!(__impl $i, call!($f), $submac2!($($args2)*)); - ); - ($i:expr, $f:expr, $g: expr) => ( - many_till!(__impl $i, call!($f), call!($g)); - ); -); - -/// `many_m_n!(usize, usize, I -> IResult) => I -> IResult>` -/// Applies the parser between m and n times (n included) and returns the list of -/// results in a Vec -/// -/// the embedded parser may return Incomplete -/// -/// ``` -/// # #[macro_use] extern crate nom; -/// # use nom::Err; -/// # use nom::ErrorKind; -/// # fn main() { -/// named!(multi<&[u8], Vec<&[u8]> >, many_m_n!(2, 4, tag!( "abcd" ) ) ); -/// -/// let a = b"abcdefgh"; -/// let b = b"abcdabcdefgh"; -/// let c = b"abcdabcdabcdabcdabcdefgh"; -/// -/// assert_eq!(multi(&a[..]), Err(Err::Error(error_position!(&a[..], ErrorKind::ManyMN)))); -/// let res = vec![&b"abcd"[..], &b"abcd"[..]]; -/// assert_eq!(multi(&b[..]),Ok((&b"efgh"[..], res))); -/// let res2 = vec![&b"abcd"[..], &b"abcd"[..], &b"abcd"[..], &b"abcd"[..]]; -/// assert_eq!(multi(&c[..]),Ok((&b"abcdefgh"[..], res2))); -/// # } -/// ``` -#[cfg(feature = "alloc")] -#[macro_export] -macro_rules! many_m_n( - ($i:expr, $m:expr, $n: expr, $submac:ident!( $($args:tt)* )) => ( - { - use $crate::lib::std::result::Result::*; - use $crate::{Context,Err,Needed}; - - use $crate::InputLength; - let mut res = $crate::lib::std::vec::Vec::with_capacity($m); - let mut input = $i.clone(); - let mut count: usize = 0; - let mut err = false; - let mut incomplete: $crate::lib::std::option::Option = $crate::lib::std::option::Option::None; - let mut failure: $crate::lib::std::option::Option> = $crate::lib::std::option::Option::None; - loop { - if count == $n { break } - let i_ = input.clone(); - match $submac!(i_, $($args)*) { - Ok((i, o)) => { - // do not allow parsers that do not consume input (causes infinite loops) - if i.input_len() == input.input_len() { - break; - } - res.push(o); - input = i; - count += 1; - } - Err(Err::Error(_)) => { - err = true; - break; - }, - Err(Err::Incomplete(i)) => { - incomplete = $crate::lib::std::option::Option::Some(i); - break; - }, - Err(Err::Failure(e)) => { - failure = $crate::lib::std::option::Option::Some(e); - break; - }, - } - } - - if count < $m { - if err { - Err(Err::Error(error_position!($i, $crate::ErrorKind::ManyMN))) - } else { - match failure { - $crate::lib::std::option::Option::Some(i) => Err(Err::Failure(i)), - $crate::lib::std::option::Option::None => match incomplete { - $crate::lib::std::option::Option::Some(i) => $crate::need_more($i, i), - $crate::lib::std::option::Option::None => $crate::need_more($i, Needed::Unknown) - } - } - } - } else { - match failure { - $crate::lib::std::option::Option::Some(i) => Err(Err::Failure(i)), - $crate::lib::std::option::Option::None => match incomplete { - $crate::lib::std::option::Option::Some(i) => $crate::need_more($i, i), - $crate::lib::std::option::Option::None => Ok((input, res)) - } - } - } - } - ); - ($i:expr, $m:expr, $n: expr, $f:expr) => ( - many_m_n!($i, $m, $n, call!($f)); - ); -); - -/// `count!(I -> IResult, nb) => I -> IResult>` -/// Applies the child parser a specified number of times -/// -/// ``` -/// # #[macro_use] extern crate nom; -/// # use nom::Err; -/// # use nom::ErrorKind; -/// # fn main() { -/// named!(counter< Vec<&[u8]> >, count!( tag!( "abcd" ), 2 ) ); -/// -/// let a = b"abcdabcdabcdef"; -/// let b = b"abcdefgh"; -/// let res = vec![&b"abcd"[..], &b"abcd"[..]]; -/// -/// assert_eq!(counter(&a[..]),Ok((&b"abcdef"[..], res))); -/// assert_eq!(counter(&b[..]), Err(Err::Error(error_position!(&b[..], ErrorKind::Count)))); -/// # } -/// ``` -/// -#[cfg(feature = "alloc")] -#[macro_export] -macro_rules! count( - ($i:expr, $submac:ident!( $($args:tt)* ), $count: expr) => ( - { - use $crate::lib::std::result::Result::*; - use $crate::Err; - - let ret; - let mut input = $i.clone(); - let mut res = $crate::lib::std::vec::Vec::new(); - - loop { - if res.len() == $count { - ret = Ok((input, res)); - break; - } - - let input_ = input.clone(); - match $submac!(input_, $($args)*) { - Ok((i,o)) => { - res.push(o); - input = i; - }, - Err(Err::Error(e)) => { - fn unify_types(_: &T, _: &T) {} - let e2 = error_position!($i, $crate::ErrorKind::Count); - unify_types(&e, &e2); - - ret = Err(Err::Error(e2)); - break; - }, - Err(e) => { - ret = Err(e); - break; - }, - } - } - - ret - } - ); - ($i:expr, $f:expr, $count: expr) => ( - count!($i, call!($f), $count); - ); -); - -/// `count_fixed!(O, I -> IResult, nb) => I -> IResult` -/// Applies the child parser a fixed number of times and returns a fixed size array -/// The type must be specified and it must be `Copy` -/// -/// ``` -/// # #[macro_use] extern crate nom; -/// # use nom::Err; -/// # use nom::ErrorKind; -/// # fn main() { -/// named!(counter< [&[u8]; 2] >, count_fixed!( &[u8], tag!( "abcd" ), 2 ) ); -/// // can omit the type specifier if returning slices -/// // named!(counter< [&[u8]; 2] >, count_fixed!( tag!( "abcd" ), 2 ) ); -/// -/// let a = b"abcdabcdabcdef"; -/// let b = b"abcdefgh"; -/// let res = [&b"abcd"[..], &b"abcd"[..]]; -/// -/// assert_eq!(counter(&a[..]),Ok((&b"abcdef"[..], res))); -/// assert_eq!(counter(&b[..]), Err(Err::Error(error_position!(&b[..], ErrorKind::Count)))); -/// # } -/// ``` -/// -#[macro_export] -macro_rules! count_fixed ( - ($i:expr, $typ:ty, $submac:ident!( $($args:tt)* ), $count: expr) => ( - { - use $crate::lib::std::result::Result::*; - use $crate::Err; - - let ret; - let mut input = $i.clone(); - // `$typ` must be Copy, and thus having no destructor, this is panic safe - let mut res: [$typ; $count] = unsafe{[$crate::lib::std::mem::uninitialized(); $count as usize]}; - let mut cnt: usize = 0; - - loop { - if cnt == $count { - ret = Ok((input, res)); break; - } - - match $submac!(input, $($args)*) { - Ok((i,o)) => { - res[cnt] = o; - cnt += 1; - input = i; - }, - Err(Err::Error(e)) => { - fn unify_types(_: &T, _: &T) {} - let e2 = error_position!($i, $crate::ErrorKind::Count); - unify_types(&e, &e2); - ret = Err(Err::Error(e2)); - break; - }, - Err(e) => { - ret = Err(e); - break; - }, - } - } - - ret - } -); - ($i:expr, $typ: ty, $f:expr, $count: expr) => ( - count_fixed!($i, $typ, call!($f), $count); - ); -); - -/// `length_count!(I -> IResult, I -> IResult) => I -> IResult>` -/// gets a number from the first parser, then applies the second parser that many times -#[macro_export] -macro_rules! length_count( - ($i:expr, $submac:ident!( $($args:tt)* ), $submac2:ident!( $($args2:tt)* )) => ( - { - use $crate::lib::std::result::Result::*; - use $crate::{Err,Convert}; - - match $submac!($i, $($args)*) { - Err(e) => Err(Err::convert(e)), - Ok((i, o)) => { - match count!(i, $submac2!($($args2)*), o as usize) { - Err(e) => Err(Err::convert(e)), - Ok((i2, o2)) => Ok((i2, o2)) - } - } - } - } - ); - - ($i:expr, $submac:ident!( $($args:tt)* ), $g:expr) => ( - length_count!($i, $submac!($($args)*), call!($g)); - ); - - ($i:expr, $f:expr, $submac:ident!( $($args:tt)* )) => ( - length_count!($i, call!($f), $submac!($($args)*)); - ); - - ($i:expr, $f:expr, $g:expr) => ( - length_count!($i, call!($f), call!($g)); - ); -); - -/// `length_data!(I -> IResult) => O` -/// -/// `length_data` gets a number from the first parser, than takes a subslice of the input -/// of that size, and returns that subslice -#[macro_export] -macro_rules! length_data( - ($i:expr, $submac:ident!( $($args:tt)* )) => ({ - use $crate::lib::std::result::Result::*; - use $crate::{Convert,Err}; - - match $submac!($i, $($args)*) { - Err(e) => Err(e), - Ok((i, o)) => { - match take!(i, o as usize) { - Err(e) => Err(Err::convert(e)), - Ok((i2, o2)) => Ok((i2, o2)) - } - } - } - }); - - ($i:expr, $f:expr) => ( - length_data!($i, call!($f)); - ); -); - -/// `length_value!(I -> IResult, I -> IResult) => I -> IResult` -/// -/// Gets a number from the first parser, takes a subslice of the input of that size, -/// then applies the second parser on that subslice. If the second parser returns -/// `Incomplete`, `length_value` will return an error -#[macro_export] -macro_rules! length_value( - ($i:expr, $submac:ident!( $($args:tt)* ), $submac2:ident!( $($args2:tt)* )) => ( - { - use $crate::lib::std::result::Result::*; - use $crate::{Err,Convert}; - - match $submac!($i, $($args)*) { - Err(e) => Err(e), - Ok((i, o)) => { - match take!(i, o as usize) { - Err(e) => Err(Err::convert(e)), - Ok((i2, o2)) => { - match complete!(o2, $submac2!($($args2)*)) { - Err(e) => Err(Err::convert(e)), - Ok((_, o3)) => Ok((i2, o3)) - } - } - } - } - } - } - ); - - ($i:expr, $submac:ident!( $($args:tt)* ), $g:expr) => ( - length_value!($i, $submac!($($args)*), call!($g)); - ); - - ($i:expr, $f:expr, $submac:ident!( $($args:tt)* )) => ( - length_value!($i, call!($f), $submac!($($args)*)); - ); - - ($i:expr, $f:expr, $g:expr) => ( - length_value!($i, call!($f), call!($g)); - ); -); - -/// `fold_many0!(I -> IResult, R, Fn(R, O) -> R) => I -> IResult` -/// Applies the parser 0 or more times and folds the list of return values -/// -/// the embedded parser may return Incomplete -/// -/// ``` -/// # #[macro_use] extern crate nom; -/// # fn main() { -/// named!(multi<&[u8], Vec<&[u8]> >, -/// fold_many0!( tag!( "abcd" ), Vec::new(), |mut acc: Vec<_>, item| { -/// acc.push(item); -/// acc -/// })); -/// -/// let a = b"abcdabcdefgh"; -/// let b = b"azerty"; -/// -/// let res = vec![&b"abcd"[..], &b"abcd"[..]]; -/// assert_eq!(multi(&a[..]),Ok((&b"efgh"[..], res))); -/// assert_eq!(multi(&b[..]),Ok((&b"azerty"[..], Vec::new()))); -/// # } -/// ``` -/// 0 or more -#[macro_export] -macro_rules! fold_many0( - ($i:expr, $submac:ident!( $($args:tt)* ), $init:expr, $f:expr) => ( - { - use $crate::lib::std::result::Result::*; - use $crate::{Err,AtEof}; - - let ret; - let f = $f; - let mut res = $init; - let mut input = $i.clone(); - - loop { - match $submac!(input, $($args)*) { - Ok((i, o)) => { - // loop trip must always consume (otherwise infinite loops) - if i == input { - if i.at_eof() { - ret = Ok((input, res)); - } else { - ret = Err(Err::Error(error_position!(input, $crate::ErrorKind::Many0))); - } - break; - } - - res = f(res, o); - input = i; - }, - Err(Err::Error(_)) => { - ret = Ok((input, res)); - break; - }, - Err(e) => { - ret = Err(e); - break; - }, - } - } - - ret - } - ); - ($i:expr, $f:expr, $init:expr, $fold_f:expr) => ( - fold_many0!($i, call!($f), $init, $fold_f); - ); -); - -/// `fold_many1!(I -> IResult, R, Fn(R, O) -> R) => I -> IResult` -/// Applies the parser 1 or more times and folds the list of return values -/// -/// the embedded parser may return Incomplete -/// -/// ``` -/// # #[macro_use] extern crate nom; -/// # use nom::Err; -/// # use nom::ErrorKind; -/// # fn main() { -/// named!(multi<&[u8], Vec<&[u8]> >, -/// fold_many1!( tag!( "abcd" ), Vec::new(), |mut acc: Vec<_>, item| { -/// acc.push(item); -/// acc -/// })); -/// -/// let a = b"abcdabcdefgh"; -/// let b = b"azerty"; -/// -/// let res = vec![&b"abcd"[..], &b"abcd"[..]]; -/// assert_eq!(multi(&a[..]),Ok((&b"efgh"[..], res))); -/// assert_eq!(multi(&b[..]), Err(Err::Error(error_position!(&b[..], ErrorKind::Many1)))); -/// # } -/// ``` -#[macro_export] -macro_rules! fold_many1( - ($i:expr, $submac:ident!( $($args:tt)* ), $init:expr, $f:expr) => ( - { - use $crate::lib::std::result::Result::*; - use $crate::{Err,Needed,InputLength,Context,AtEof}; - - match $submac!($i, $($args)*) { - Err(Err::Error(_)) => Err(Err::Error( - error_position!($i, $crate::ErrorKind::Many1) - )), - Err(Err::Failure(_)) => Err(Err::Failure( - error_position!($i, $crate::ErrorKind::Many1) - )), - Err(Err::Incomplete(i)) => Err(Err::Incomplete(i)), - Ok((i1,o1)) => { - let f = $f; - let mut acc = f($init, o1); - let mut input = i1; - let mut incomplete: $crate::lib::std::option::Option = - $crate::lib::std::option::Option::None; - let mut failure: $crate::lib::std::option::Option> = - $crate::lib::std::option::Option::None; - loop { - match $submac!(input, $($args)*) { - Err(Err::Error(_)) => { - break; - }, - Err(Err::Incomplete(i)) => { - incomplete = $crate::lib::std::option::Option::Some(i); - break; - }, - Err(Err::Failure(e)) => { - failure = $crate::lib::std::option::Option::Some(e); - break; - }, - Ok((i, o)) => { - if i.input_len() == input.input_len() { - if !i.at_eof() { - failure = $crate::lib::std::option::Option::Some(error_position!(i, $crate::ErrorKind::Many1)); - } - break; - } - acc = f(acc, o); - input = i; - } - } - } - - match failure { - $crate::lib::std::option::Option::Some(e) => Err(Err::Failure(e)), - $crate::lib::std::option::Option::None => match incomplete { - $crate::lib::std::option::Option::Some(i) => $crate::need_more($i, i), - $crate::lib::std::option::Option::None => Ok((input, acc)) - } - } - } - } - } - ); - ($i:expr, $f:expr, $init:expr, $fold_f:expr) => ( - fold_many1!($i, call!($f), $init, $fold_f); - ); -); - -/// `fold_many_m_n!(usize, usize, I -> IResult, R, Fn(R, O) -> R) => I -> IResult` -/// Applies the parser between m and n times (n included) and folds the list of return value -/// -/// the embedded parser may return Incomplete -/// -/// ``` -/// # #[macro_use] extern crate nom; -/// # use nom::Err; -/// # use nom::ErrorKind; -/// # fn main() { -/// named!(multi<&[u8], Vec<&[u8]> >, -/// fold_many_m_n!(2, 4, tag!( "abcd" ), Vec::new(), |mut acc: Vec<_>, item| { -/// acc.push(item); -/// acc -/// })); -/// -/// let a = b"abcdefgh"; -/// let b = b"abcdabcdefgh"; -/// let c = b"abcdabcdabcdabcdabcdefgh"; -/// -/// assert_eq!(multi(&a[..]), Err(Err::Error(error_position!(&a[..], ErrorKind::ManyMN)))); -/// let res = vec![&b"abcd"[..], &b"abcd"[..]]; -/// assert_eq!(multi(&b[..]),Ok((&b"efgh"[..], res))); -/// let res2 = vec![&b"abcd"[..], &b"abcd"[..], &b"abcd"[..], &b"abcd"[..]]; -/// assert_eq!(multi(&c[..]),Ok((&b"abcdefgh"[..], res2))); -/// # } -/// ``` -#[macro_export] -macro_rules! fold_many_m_n( - ($i:expr, $m:expr, $n: expr, $submac:ident!( $($args:tt)* ), $init:expr, $f:expr) => ( - { - use $crate::lib::std::result::Result::*; - use $crate::{Err,Needed}; - - use $crate::InputLength; - let mut acc = $init; - let f = $f; - let mut input = $i.clone(); - let mut count: usize = 0; - let mut err = false; - let mut incomplete: $crate::lib::std::option::Option = $crate::lib::std::option::Option::None; - loop { - if count == $n { break } - match $submac!(input, $($args)*) { - Ok((i, o)) => { - // do not allow parsers that do not consume input (causes infinite loops) - if i.input_len() == input.input_len() { - break; - } - acc = f(acc, o); - input = i; - count += 1; - } - //FIXME: handle failure properly - Err(Err::Error(_)) | Err(Err::Failure(_)) => { - err = true; - break; - }, - Err(Err::Incomplete(i)) => { - incomplete = $crate::lib::std::option::Option::Some(i); - break; - }, - } - } - - if count < $m { - if err { - Err(Err::Error(error_position!($i, $crate::ErrorKind::ManyMN))) - } else { - match incomplete { - $crate::lib::std::option::Option::Some(i) => Err(Err::Incomplete(i)), - $crate::lib::std::option::Option::None => Err(Err::Incomplete(Needed::Unknown)) - } - } - } else { - match incomplete { - $crate::lib::std::option::Option::Some(i) => Err(Err::Incomplete(i)), - $crate::lib::std::option::Option::None => Ok((input, acc)) - } - } - } - ); - ($i:expr, $m:expr, $n: expr, $f:expr, $init:expr, $fold_f:expr) => ( - fold_many_m_n!($i, $m, $n, call!($f), $init, $fold_f); - ); -); - -#[cfg(test)] -mod tests { - use internal::{Err, IResult, Needed}; - use nom::{digit, be_u16, be_u8, le_u16}; - use lib::std::str::{self, FromStr}; - #[cfg(feature = "alloc")] - use lib::std::vec::Vec; - use util::ErrorKind; - - // reproduce the tag and take macros, because of module import order - macro_rules! tag ( - ($i:expr, $inp: expr) => ( - { - #[inline(always)] - fn as_bytes(b: &T) -> &[u8] { - b.as_bytes() - } - - let expected = $inp; - let bytes = as_bytes(&expected); - - tag_bytes!($i,bytes) - } - ); - ); - - macro_rules! tag_bytes ( - ($i:expr, $bytes: expr) => ( - { - use $crate::lib::std::cmp::min; - let len = $i.len(); - let blen = $bytes.len(); - let m = min(len, blen); - let reduced = &$i[..m]; - let b = &$bytes[..m]; - - let res: IResult<_,_,u32> = if reduced != b { - Err($crate::Err::Error($crate::Context::Code($i, $crate::ErrorKind::Tag::))) - } else if m < blen { - Err($crate::Err::Incomplete(Needed::Size(blen))) - } else { - Ok((&$i[blen..], reduced)) - }; - res - } - ); - ); - - macro_rules! take ( - ($i:expr, $count:expr) => ( - { - let cnt = $count as usize; - let res:IResult<&[u8],&[u8],u32> = if $i.len() < cnt { - Err($crate::Err::Incomplete(Needed::Size(cnt))) - } else { - Ok((&$i[cnt..],&$i[0..cnt])) - }; - res - } - ) - ); - - #[test] - #[cfg(feature = "alloc")] - fn separated_list() { - named!(multi<&[u8],Vec<&[u8]> >, separated_list!(tag!(","), tag!("abcd"))); - named!(multi_empty<&[u8],Vec<&[u8]> >, separated_list!(tag!(","), tag!(""))); - named!(multi_longsep<&[u8],Vec<&[u8]> >, separated_list!(tag!(".."), tag!("abcd"))); - - let a = &b"abcdef"[..]; - let b = &b"abcd,abcdef"[..]; - let c = &b"azerty"[..]; - let d = &b",,abc"[..]; - let e = &b"abcd,abcd,ef"[..]; - let f = &b"abc"[..]; - let g = &b"abcd."[..]; - let h = &b"abcd,abc"[..]; - - let res1 = vec![&b"abcd"[..]]; - assert_eq!(multi(a), Ok((&b"ef"[..], res1))); - let res2 = vec![&b"abcd"[..], &b"abcd"[..]]; - assert_eq!(multi(b), Ok((&b"ef"[..], res2))); - assert_eq!(multi(c), Ok((&b"azerty"[..], Vec::new()))); - assert_eq!( - multi_empty(d), - Err(Err::Error(error_position!(d, ErrorKind::SeparatedList))) - ); - //let res3 = vec![&b""[..], &b""[..], &b""[..]]; - //assert_eq!(multi_empty(d),Ok((&b"abc"[..], res3))); - let res4 = vec![&b"abcd"[..], &b"abcd"[..]]; - assert_eq!(multi(e), Ok((&b",ef"[..], res4))); - - assert_eq!(multi(f), Err(Err::Incomplete(Needed::Size(4)))); - assert_eq!(multi_longsep(g), Err(Err::Incomplete(Needed::Size(2)))); - assert_eq!(multi(h), Err(Err::Incomplete(Needed::Size(4)))); - } - - #[test] - #[cfg(feature = "alloc")] - fn separated_list_complete() { - use nom::alpha; - - named!(multi<&[u8],Vec<&[u8]> >, separated_list_complete!(tag!(","), alpha)); - let a = &b"abcdef;"[..]; - let b = &b"abcd,abcdef;"[..]; - let c = &b"abcd,abcd,ef;"[..]; - let d = &b"abc."[..]; - let e = &b"abcd,ef."[..]; - let f = &b"123"[..]; - - assert_eq!(multi(a), Ok((&b";"[..], vec![&a[..a.len() - 1]]))); - assert_eq!( - multi(b), - Ok((&b";"[..], vec![&b"abcd"[..], &b"abcdef"[..]])) - ); - assert_eq!( - multi(c), - Ok((&b";"[..], vec![&b"abcd"[..], &b"abcd"[..], &b"ef"[..]])) - ); - assert_eq!(multi(d), Ok((&b"."[..], vec![&b"abc"[..]]))); - assert_eq!(multi(e), Ok((&b"."[..], vec![&b"abcd"[..], &b"ef"[..]]))); - assert_eq!(multi(f), Ok((&b"123"[..], Vec::new()))); - } - - #[test] - #[cfg(feature = "alloc")] - fn separated_nonempty_list() { - named!(multi<&[u8],Vec<&[u8]> >, separated_nonempty_list!(tag!(","), tag!("abcd"))); - named!(multi_longsep<&[u8],Vec<&[u8]> >, separated_nonempty_list!(tag!(".."), tag!("abcd"))); - - let a = &b"abcdef"[..]; - let b = &b"abcd,abcdef"[..]; - let c = &b"azerty"[..]; - let d = &b"abcd,abcd,ef"[..]; - - let f = &b"abc"[..]; - let g = &b"abcd."[..]; - let h = &b"abcd,abc"[..]; - - let res1 = vec![&b"abcd"[..]]; - assert_eq!(multi(a), Ok((&b"ef"[..], res1))); - let res2 = vec![&b"abcd"[..], &b"abcd"[..]]; - assert_eq!(multi(b), Ok((&b"ef"[..], res2))); - assert_eq!( - multi(c), - Err(Err::Error(error_position!(c, ErrorKind::Tag))) - ); - let res3 = vec![&b"abcd"[..], &b"abcd"[..]]; - assert_eq!(multi(d), Ok((&b",ef"[..], res3))); - - assert_eq!(multi(f), Err(Err::Incomplete(Needed::Size(4)))); - assert_eq!(multi_longsep(g), Err(Err::Incomplete(Needed::Size(2)))); - assert_eq!(multi(h), Err(Err::Incomplete(Needed::Size(4)))); - } - - #[test] - #[cfg(feature = "alloc")] - fn separated_nonempty_list_complete() { - use nom::alpha; - - named!(multi<&[u8],Vec<&[u8]> >, separated_nonempty_list_complete!(tag!(","), alpha)); - let a = &b"abcdef;"[..]; - let b = &b"abcd,abcdef;"[..]; - let c = &b"abcd,abcd,ef;"[..]; - let d = &b"abc."[..]; - let e = &b"abcd,ef."[..]; - let f = &b"123"[..]; - - assert_eq!(multi(a), Ok((&b";"[..], vec![&a[..a.len() - 1]]))); - assert_eq!( - multi(b), - Ok((&b";"[..], vec![&b"abcd"[..], &b"abcdef"[..]])) - ); - assert_eq!( - multi(c), - Ok((&b";"[..], vec![&b"abcd"[..], &b"abcd"[..], &b"ef"[..]])) - ); - assert_eq!(multi(d), Ok((&b"."[..], vec![&b"abc"[..]]))); - assert_eq!(multi(e), Ok((&b"."[..], vec![&b"abcd"[..], &b"ef"[..]]))); - assert_eq!( - multi(f), - Err(Err::Error(error_position!(&b"123"[..], ErrorKind::Alpha))) - ); - } - - #[test] - #[cfg(feature = "alloc")] - fn many0() { - named!(tag_abcd, tag!("abcd")); - named!(tag_empty, tag!("")); - named!( multi<&[u8],Vec<&[u8]> >, many0!(tag_abcd) ); - named!( multi_empty<&[u8],Vec<&[u8]> >, many0!(tag_empty) ); - - assert_eq!(multi(&b"abcdef"[..]), Ok((&b"ef"[..], vec![&b"abcd"[..]]))); - assert_eq!( - multi(&b"abcdabcdefgh"[..]), - Ok((&b"efgh"[..], vec![&b"abcd"[..], &b"abcd"[..]])) - ); - assert_eq!(multi(&b"azerty"[..]), Ok((&b"azerty"[..], Vec::new()))); - assert_eq!(multi(&b"abcdab"[..]), Err(Err::Incomplete(Needed::Size(4)))); - assert_eq!(multi(&b"abcd"[..]), Err(Err::Incomplete(Needed::Size(4)))); - assert_eq!(multi(&b""[..]), Err(Err::Incomplete(Needed::Size(4)))); - assert_eq!( - multi_empty(&b"abcdef"[..]), - Err(Err::Error(error_position!( - &b"abcdef"[..], - ErrorKind::Many0 - ))) - ); - } - - #[cfg(nightly)] - use test::Bencher; - - #[cfg(nightly)] - #[bench] - fn many0_bench(b: &mut Bencher) { - named!(multi<&[u8],Vec<&[u8]> >, many0!(tag!("abcd"))); - b.iter(|| multi(&b"abcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcd"[..])); - } - - #[test] - #[cfg(feature = "alloc")] - fn many1() { - named!(multi<&[u8],Vec<&[u8]> >, many1!(tag!("abcd"))); - - let a = &b"abcdef"[..]; - let b = &b"abcdabcdefgh"[..]; - let c = &b"azerty"[..]; - let d = &b"abcdab"[..]; - - let res1 = vec![&b"abcd"[..]]; - assert_eq!(multi(a), Ok((&b"ef"[..], res1))); - let res2 = vec![&b"abcd"[..], &b"abcd"[..]]; - assert_eq!(multi(b), Ok((&b"efgh"[..], res2))); - assert_eq!( - multi(c), - Err(Err::Error(error_position!(c, ErrorKind::Many1))) - ); - assert_eq!(multi(d), Err(Err::Incomplete(Needed::Size(4)))); - } - - #[test] - #[cfg(feature = "alloc")] - fn many_till() { - named!(multi<&[u8], (Vec<&[u8]>, &[u8]) >, many_till!( tag!( "abcd" ), tag!( "efgh" ) ) ); - - let a = b"abcdabcdefghabcd"; - let b = b"efghabcd"; - let c = b"azerty"; - - let res_a = (vec![&b"abcd"[..], &b"abcd"[..]], &b"efgh"[..]); - let res_b: (Vec<&[u8]>, &[u8]) = (Vec::new(), &b"efgh"[..]); - assert_eq!(multi(&a[..]), Ok((&b"abcd"[..], res_a))); - assert_eq!(multi(&b[..]), Ok((&b"abcd"[..], res_b))); - assert_eq!( - multi(&c[..]), - Err(Err::Error(error_node_position!( - &c[..], - ErrorKind::ManyTill, - error_position!(&c[..], ErrorKind::Tag) - ))) - ); - } - - #[test] - #[cfg(feature = "std")] - fn infinite_many() { - fn tst(input: &[u8]) -> IResult<&[u8], &[u8]> { - println!("input: {:?}", input); - Err(Err::Error(error_position!(input, ErrorKind::Custom(0u32)))) - } - - // should not go into an infinite loop - named!(multi0<&[u8],Vec<&[u8]> >, many0!(tst)); - let a = &b"abcdef"[..]; - assert_eq!(multi0(a), Ok((a, Vec::new()))); - - named!(multi1<&[u8],Vec<&[u8]> >, many1!(tst)); - let a = &b"abcdef"[..]; - assert_eq!( - multi1(a), - Err(Err::Error(error_position!(a, ErrorKind::Many1))) - ); - } - - #[test] - #[cfg(feature = "alloc")] - fn many_m_n() { - named!(multi<&[u8],Vec<&[u8]> >, many_m_n!(2, 4, tag!("Abcd"))); - - let a = &b"Abcdef"[..]; - let b = &b"AbcdAbcdefgh"[..]; - let c = &b"AbcdAbcdAbcdAbcdefgh"[..]; - let d = &b"AbcdAbcdAbcdAbcdAbcdefgh"[..]; - let e = &b"AbcdAb"[..]; - - assert_eq!( - multi(a), - Err(Err::Error(error_position!(a, ErrorKind::ManyMN))) - ); - let res1 = vec![&b"Abcd"[..], &b"Abcd"[..]]; - assert_eq!(multi(b), Ok((&b"efgh"[..], res1))); - let res2 = vec![&b"Abcd"[..], &b"Abcd"[..], &b"Abcd"[..], &b"Abcd"[..]]; - assert_eq!(multi(c), Ok((&b"efgh"[..], res2))); - let res3 = vec![&b"Abcd"[..], &b"Abcd"[..], &b"Abcd"[..], &b"Abcd"[..]]; - assert_eq!(multi(d), Ok((&b"Abcdefgh"[..], res3))); - assert_eq!(multi(e), Err(Err::Incomplete(Needed::Size(4)))); - } - - #[test] - #[cfg(feature = "alloc")] - fn count() { - const TIMES: usize = 2; - named!(tag_abc, tag!("abc")); - named!( cnt_2<&[u8], Vec<&[u8]> >, count!(tag_abc, TIMES ) ); - - assert_eq!( - cnt_2(&b"abcabcabcdef"[..]), - Ok((&b"abcdef"[..], vec![&b"abc"[..], &b"abc"[..]])) - ); - assert_eq!(cnt_2(&b"ab"[..]), Err(Err::Incomplete(Needed::Size(3)))); - assert_eq!(cnt_2(&b"abcab"[..]), Err(Err::Incomplete(Needed::Size(3)))); - assert_eq!( - cnt_2(&b"xxx"[..]), - Err(Err::Error(error_position!(&b"xxx"[..], ErrorKind::Count))) - ); - assert_eq!( - cnt_2(&b"xxxabcabcdef"[..]), - Err(Err::Error(error_position!( - &b"xxxabcabcdef"[..], - ErrorKind::Count - ))) - ); - assert_eq!( - cnt_2(&b"abcxxxabcdef"[..]), - Err(Err::Error(error_position!( - &b"abcxxxabcdef"[..], - ErrorKind::Count - ))) - ); - } - - #[test] - #[cfg(feature = "alloc")] - fn count_zero() { - const TIMES: usize = 0; - named!(tag_abc, tag!("abc")); - named!( counter_2<&[u8], Vec<&[u8]> >, count!(tag_abc, TIMES ) ); - - let done = &b"abcabcabcdef"[..]; - let parsed_done = Vec::new(); - let rest = done; - let incomplete_1 = &b"ab"[..]; - let parsed_incompl_1 = Vec::new(); - let incomplete_2 = &b"abcab"[..]; - let parsed_incompl_2 = Vec::new(); - let error = &b"xxx"[..]; - let error_remain = &b"xxx"[..]; - let parsed_err = Vec::new(); - let error_1 = &b"xxxabcabcdef"[..]; - let parsed_err_1 = Vec::new(); - let error_1_remain = &b"xxxabcabcdef"[..]; - let error_2 = &b"abcxxxabcdef"[..]; - let parsed_err_2 = Vec::new(); - let error_2_remain = &b"abcxxxabcdef"[..]; - - assert_eq!(counter_2(done), Ok((rest, parsed_done))); - assert_eq!( - counter_2(incomplete_1), - Ok((incomplete_1, parsed_incompl_1)) - ); - assert_eq!( - counter_2(incomplete_2), - Ok((incomplete_2, parsed_incompl_2)) - ); - assert_eq!(counter_2(error), Ok((error_remain, parsed_err))); - assert_eq!(counter_2(error_1), Ok((error_1_remain, parsed_err_1))); - assert_eq!(counter_2(error_2), Ok((error_2_remain, parsed_err_2))); - } - - #[test] - fn count_fixed() { - const TIMES: usize = 2; - named!(tag_abc, tag!("abc")); - named!( cnt_2<&[u8], [&[u8]; TIMES] >, count_fixed!(&[u8], tag_abc, TIMES ) ); - - assert_eq!( - cnt_2(&b"abcabcabcdef"[..]), - Ok((&b"abcdef"[..], [&b"abc"[..], &b"abc"[..]])) - ); - assert_eq!(cnt_2(&b"ab"[..]), Err(Err::Incomplete(Needed::Size(3)))); - assert_eq!(cnt_2(&b"abcab"[..]), Err(Err::Incomplete(Needed::Size(3)))); - assert_eq!( - cnt_2(&b"xxx"[..]), - Err(Err::Error(error_position!(&b"xxx"[..], ErrorKind::Count))) - ); - assert_eq!( - cnt_2(&b"xxxabcabcdef"[..]), - Err(Err::Error(error_position!( - &b"xxxabcabcdef"[..], - ErrorKind::Count - ))) - ); - assert_eq!( - cnt_2(&b"abcxxxabcdef"[..]), - Err(Err::Error(error_position!( - &b"abcxxxabcdef"[..], - ErrorKind::Count - ))) - ); - } - - #[allow(dead_code)] - pub fn compile_count_fixed(input: &[u8]) -> IResult<&[u8], ()> { - do_parse!( - input, - tag!("abcd") >> count_fixed!(u16, le_u16, 4) >> eof!() >> () - ) - } - - #[derive(Debug, Clone, PartialEq)] - pub struct NilError; - - impl From for NilError { - fn from(_: u32) -> Self { - NilError - } - } - - #[allow(unused_variables)] - #[test] - fn count_fixed_no_type() { - const TIMES: usize = 2; - named!(tag_abc, tag!("abc")); - named!( counter_2<&[u8], [&[u8]; TIMES], NilError >, count_fixed!(&[u8], fix_error!(NilError, tag_abc), TIMES ) ); - - let done = &b"abcabcabcdef"[..]; - let parsed_main = [&b"abc"[..], &b"abc"[..]]; - let rest = &b"abcdef"[..]; - let incomplete_1 = &b"ab"[..]; - let incomplete_2 = &b"abcab"[..]; - let error = &b"xxx"[..]; - let error_1 = &b"xxxabcabcdef"[..]; - let error_1_remain = &b"xxxabcabcdef"[..]; - let error_2 = &b"abcxxxabcdef"[..]; - let error_2_remain = &b"abcxxxabcdef"[..]; - - assert_eq!(counter_2(done), Ok((rest, parsed_main))); - assert_eq!( - counter_2(incomplete_1), - Err(Err::Incomplete(Needed::Size(3))) - ); - assert_eq!( - counter_2(incomplete_2), - Err(Err::Incomplete(Needed::Size(3))) - ); - assert_eq!( - counter_2(error), - Err(Err::Error(error_position!(error, ErrorKind::Count))) - ); - assert_eq!( - counter_2(error_1), - Err(Err::Error(error_position!( - error_1_remain, - ErrorKind::Count - ))) - ); - assert_eq!( - counter_2(error_2), - Err(Err::Error(error_position!( - error_2_remain, - ErrorKind::Count - ))) - ); - } - - named!(pub number, map_res!( - map_res!( - digit, - str::from_utf8 - ), - FromStr::from_str - )); - - #[test] - #[cfg(feature = "alloc")] - fn length_count() { - named!(tag_abc, tag!(&b"abc"[..])); - named!( cnt<&[u8], Vec<&[u8]> >, length_count!(number, tag_abc) ); - - assert_eq!( - cnt(&b"2abcabcabcdef"[..]), - Ok((&b"abcdef"[..], vec![&b"abc"[..], &b"abc"[..]])) - ); - assert_eq!(cnt(&b"2ab"[..]), Err(Err::Incomplete(Needed::Size(3)))); - assert_eq!(cnt(&b"3abcab"[..]), Err(Err::Incomplete(Needed::Size(3)))); - assert_eq!( - cnt(&b"xxx"[..]), - Err(Err::Error(error_position!(&b"xxx"[..], ErrorKind::Digit))) - ); - assert_eq!( - cnt(&b"2abcxxx"[..]), - Err(Err::Error(error_position!( - &b"abcxxx"[..], - ErrorKind::Count - ))) - ); - } - - #[test] - fn length_data() { - named!( take<&[u8], &[u8]>, length_data!(number) ); - - assert_eq!( - take(&b"6abcabcabcdef"[..]), - Ok((&b"abcdef"[..], &b"abcabc"[..])) - ); - assert_eq!(take(&b"3ab"[..]), Err(Err::Incomplete(Needed::Size(3)))); - assert_eq!( - take(&b"xxx"[..]), - Err(Err::Error(error_position!(&b"xxx"[..], ErrorKind::Digit))) - ); - assert_eq!(take(&b"2abcxxx"[..]), Ok((&b"cxxx"[..], &b"ab"[..]))); - } - - #[test] - fn length_value_test() { - named!(length_value_1<&[u8], u16 >, length_value!(be_u8, be_u16)); - named!(length_value_2<&[u8], (u8, u8) >, length_value!(be_u8, tuple!(be_u8, be_u8))); - - let i1 = [0, 5, 6]; - assert_eq!( - length_value_1(&i1), - Err(Err::Error(error_position!(&b""[..], ErrorKind::Complete))) - ); - assert_eq!( - length_value_2(&i1), - Err(Err::Error(error_position!(&b""[..], ErrorKind::Complete))) - ); - - let i2 = [1, 5, 6, 3]; - assert_eq!( - length_value_1(&i2), - Err(Err::Error(error_position!(&i2[1..2], ErrorKind::Complete))) - ); - assert_eq!( - length_value_2(&i2), - Err(Err::Error(error_position!(&i2[1..2], ErrorKind::Complete))) - ); - - let i3 = [2, 5, 6, 3, 4, 5, 7]; - assert_eq!(length_value_1(&i3), Ok((&i3[3..], 1286))); - assert_eq!(length_value_2(&i3), Ok((&i3[3..], (5, 6)))); - - let i4 = [3, 5, 6, 3, 4, 5]; - assert_eq!(length_value_1(&i4), Ok((&i4[4..], 1286))); - assert_eq!(length_value_2(&i4), Ok((&i4[4..], (5, 6)))); - } - - #[test] - #[cfg(feature = "alloc")] - fn fold_many0() { - fn fold_into_vec(mut acc: Vec, item: T) -> Vec { - acc.push(item); - acc - }; - named!(tag_abcd, tag!("abcd")); - named!(tag_empty, tag!("")); - named!( multi<&[u8],Vec<&[u8]> >, fold_many0!(tag_abcd, Vec::new(), fold_into_vec) ); - named!( multi_empty<&[u8],Vec<&[u8]> >, fold_many0!(tag_empty, Vec::new(), fold_into_vec) ); - - assert_eq!(multi(&b"abcdef"[..]), Ok((&b"ef"[..], vec![&b"abcd"[..]]))); - assert_eq!( - multi(&b"abcdabcdefgh"[..]), - Ok((&b"efgh"[..], vec![&b"abcd"[..], &b"abcd"[..]])) - ); - assert_eq!(multi(&b"azerty"[..]), Ok((&b"azerty"[..], Vec::new()))); - assert_eq!(multi(&b"abcdab"[..]), Err(Err::Incomplete(Needed::Size(4)))); - assert_eq!(multi(&b"abcd"[..]), Err(Err::Incomplete(Needed::Size(4)))); - assert_eq!(multi(&b""[..]), Err(Err::Incomplete(Needed::Size(4)))); - assert_eq!( - multi_empty(&b"abcdef"[..]), - Err(Err::Error(error_position!( - &b"abcdef"[..], - ErrorKind::Many0 - ))) - ); - } - - #[test] - #[cfg(feature = "alloc")] - fn fold_many1() { - fn fold_into_vec(mut acc: Vec, item: T) -> Vec { - acc.push(item); - acc - }; - named!(multi<&[u8],Vec<&[u8]> >, fold_many1!(tag!("abcd"), Vec::new(), fold_into_vec)); - - let a = &b"abcdef"[..]; - let b = &b"abcdabcdefgh"[..]; - let c = &b"azerty"[..]; - let d = &b"abcdab"[..]; - - let res1 = vec![&b"abcd"[..]]; - assert_eq!(multi(a), Ok((&b"ef"[..], res1))); - let res2 = vec![&b"abcd"[..], &b"abcd"[..]]; - assert_eq!(multi(b), Ok((&b"efgh"[..], res2))); - assert_eq!( - multi(c), - Err(Err::Error(error_position!(c, ErrorKind::Many1))) - ); - assert_eq!(multi(d), Err(Err::Incomplete(Needed::Size(4)))); - } - - #[test] - #[cfg(feature = "alloc")] - fn fold_many_m_n() { - fn fold_into_vec(mut acc: Vec, item: T) -> Vec { - acc.push(item); - acc - }; - named!(multi<&[u8],Vec<&[u8]> >, fold_many_m_n!(2, 4, tag!("Abcd"), Vec::new(), fold_into_vec)); - - let a = &b"Abcdef"[..]; - let b = &b"AbcdAbcdefgh"[..]; - let c = &b"AbcdAbcdAbcdAbcdefgh"[..]; - let d = &b"AbcdAbcdAbcdAbcdAbcdefgh"[..]; - let e = &b"AbcdAb"[..]; - - assert_eq!( - multi(a), - Err(Err::Error(error_position!(a, ErrorKind::ManyMN))) - ); - let res1 = vec![&b"Abcd"[..], &b"Abcd"[..]]; - assert_eq!(multi(b), Ok((&b"efgh"[..], res1))); - let res2 = vec![&b"Abcd"[..], &b"Abcd"[..], &b"Abcd"[..], &b"Abcd"[..]]; - assert_eq!(multi(c), Ok((&b"efgh"[..], res2))); - let res3 = vec![&b"Abcd"[..], &b"Abcd"[..], &b"Abcd"[..], &b"Abcd"[..]]; - assert_eq!(multi(d), Ok((&b"Abcdefgh"[..], res3))); - assert_eq!(multi(e), Err(Err::Incomplete(Needed::Size(4)))); - } - -} diff --git a/third_party/rust/nom/src/multi/macros.rs b/third_party/rust/nom/src/multi/macros.rs new file mode 100644 index 0000000000..10b66b8424 --- /dev/null +++ b/third_party/rust/nom/src/multi/macros.rs @@ -0,0 +1,988 @@ +//! Parsers for applying parsers multiple times + +/// `separated_list!(I -> IResult, I -> IResult) => I -> IResult>` +/// separated_list(sep, X) returns a Vec +/// +/// ```rust +/// # #[macro_use] extern crate nom; +/// # use nom::{Err, error::ErrorKind, Needed, IResult}; +/// use nom::multi::separated_list; +/// use nom::bytes::complete::tag; +/// +/// # fn main() { +/// named!(parser<&str, Vec<&str>>, separated_list!(tag("|"), tag("abc"))); +/// +/// assert_eq!(parser("abc|abc|abc"), Ok(("", vec!["abc", "abc", "abc"]))); +/// assert_eq!(parser("abc123abc"), Ok(("123abc", vec!["abc"]))); +/// assert_eq!(parser("abc|def"), Ok(("|def", vec!["abc"]))); +/// assert_eq!(parser(""), Ok(("", vec![]))); +/// assert_eq!(parser("def|abc"), Ok(("def|abc", vec![]))); +/// # } +/// ``` +#[cfg(feature = "alloc")] +#[macro_export(local_inner_macros)] +macro_rules! separated_list( + ($i:expr, $submac:ident!( $($args:tt)* ), $submac2:ident!( $($args2:tt)* )) => ( + separated_list!($i, |i| $submac!(i, $($args)*), |i| $submac2!(i, $($args2)*)) + ); + + ($i:expr, $submac:ident!( $($args:tt)* ), $g:expr) => ( + separated_list!($i, |i| $submac!(i, $($args)*), $g); + ); + + ($i:expr, $f:expr, $submac:ident!( $($args:tt)* )) => ( + separated_list!($i, $f, |i| $submac!(i, $($args)*)); + ); + + ($i:expr, $f:expr, $g:expr) => ( + $crate::multi::separated_listc($i, $f, $g) + ); +); + +/// `separated_nonempty_list!(I -> IResult, I -> IResult) => I -> IResult>` +/// separated_nonempty_list(sep, X) returns a Vec +/// +/// it will return an error if there is no element in the list +/// ```rust +/// # #[macro_use] extern crate nom; +/// # use nom::{Err, error::ErrorKind, Needed, IResult}; +/// use nom::multi::separated_nonempty_list; +/// use nom::bytes::complete::tag; +/// +/// # fn main() { +/// named!(parser<&str, Vec<&str>>, separated_nonempty_list!(tag("|"), tag("abc"))); +/// +/// assert_eq!(parser("abc|abc|abc"), Ok(("", vec!["abc", "abc", "abc"]))); +/// assert_eq!(parser("abc123abc"), Ok(("123abc", vec!["abc"]))); +/// assert_eq!(parser("abc|def"), Ok(("|def", vec!["abc"]))); +/// assert_eq!(parser(""), Err(Err::Error(("", ErrorKind::Tag)))); +/// assert_eq!(parser("def|abc"), Err(Err::Error(("def|abc", ErrorKind::Tag)))); +/// # } +/// ``` +#[cfg(feature = "alloc")] +#[macro_export(local_inner_macros)] +macro_rules! separated_nonempty_list( + ($i:expr, $submac:ident!( $($args:tt)* ), $submac2:ident!( $($args2:tt)* )) => ( + separated_nonempty_list!($i, |i| $submac!(i, $($args)*), |i| $submac2!(i, $($args2)*)) + ); + + ($i:expr, $submac:ident!( $($args:tt)* ), $g:expr) => ( + separated_nonempty_list!($i, |i| $submac!(i, $($args)*), $g); + ); + + ($i:expr, $f:expr, $submac:ident!( $($args:tt)* )) => ( + separated_nonempty_list!($i, $f, |i| $submac!(i, $($args)*)); + ); + + ($i:expr, $f:expr, $g:expr) => ( + $crate::multi::separated_nonempty_listc($i, $f, $g) + ); +); + +/// `many0!(I -> IResult) => I -> IResult>` +/// Applies the parser 0 or more times and returns the list of results in a Vec. +/// +/// The embedded parser may return Incomplete. +/// +/// `many0` will only return `Error` if the embedded parser does not consume any input +/// (to avoid infinite loops). +/// +/// ``` +/// # #[macro_use] extern crate nom; +/// # fn main() { +/// named!(multi<&[u8], Vec<&[u8]> >, many0!( tag!( "abcd" ) ) ); +/// +/// let a = b"abcdabcdefgh"; +/// let b = b"azerty"; +/// +/// let res = vec![&b"abcd"[..], &b"abcd"[..]]; +/// assert_eq!(multi(&a[..]),Ok((&b"efgh"[..], res))); +/// assert_eq!(multi(&b[..]),Ok((&b"azerty"[..], Vec::new()))); +/// # } +/// ``` +/// +#[cfg(feature = "alloc")] +#[macro_export(local_inner_macros)] +macro_rules! many0( + ($i:expr, $submac:ident!( $($args:tt)* )) => ( + many0!($i, |i| $submac!(i, $($args)*)) + ); + ($i:expr, $f:expr) => ( + $crate::multi::many0c($i, $f) + ); +); + +/// `many1!(I -> IResult) => I -> IResult>` +/// Applies the parser 1 or more times and returns the list of results in a Vec +/// +/// the embedded parser may return Incomplete +/// +/// ``` +/// # #[macro_use] extern crate nom; +/// # use nom::Err; +/// # use nom::error::ErrorKind; +/// # fn main() { +/// named!(multi<&[u8], Vec<&[u8]> >, many1!( tag!( "abcd" ) ) ); +/// +/// let a = b"abcdabcdefgh"; +/// let b = b"azerty"; +/// +/// let res = vec![&b"abcd"[..], &b"abcd"[..]]; +/// assert_eq!(multi(&a[..]), Ok((&b"efgh"[..], res))); +/// assert_eq!(multi(&b[..]), Err(Err::Error(error_position!(&b[..], ErrorKind::Tag)))); +/// # } +/// ``` +#[cfg(feature = "alloc")] +#[macro_export(local_inner_macros)] +macro_rules! many1( + ($i:expr, $submac:ident!( $($args:tt)* )) => ( + many1!($i, |i| $submac!(i, $($args)*)) + ); + ($i:expr, $f:expr) => ( + $crate::multi::many1c($i, $f) + ); +); + +/// `many_till!(I -> IResult, I -> IResult) => I -> IResult, P)>` +/// Applies the first parser until the second applies. Returns a tuple containing the list +/// of results from the first in a Vec and the result of the second. +/// +/// The first embedded parser may return Incomplete +/// +/// ``` +/// # #[macro_use] extern crate nom; +/// # use nom::Err; +/// # use nom::error::ErrorKind; +/// # fn main() { +/// named!(multi<&[u8], (Vec<&[u8]>, &[u8]) >, many_till!( tag!( "abcd" ), tag!( "efgh" ) ) ); +/// +/// let a = b"abcdabcdefghabcd"; +/// let b = b"efghabcd"; +/// let c = b"azerty"; +/// +/// let res_a = (vec![&b"abcd"[..], &b"abcd"[..]], &b"efgh"[..]); +/// let res_b: (Vec<&[u8]>, &[u8]) = (Vec::new(), &b"efgh"[..]); +/// assert_eq!(multi(&a[..]),Ok((&b"abcd"[..], res_a))); +/// assert_eq!(multi(&b[..]),Ok((&b"abcd"[..], res_b))); +/// assert_eq!(multi(&c[..]), Err(Err::Error(error_node_position!(&c[..], ErrorKind::ManyTill, +/// error_position!(&c[..], ErrorKind::Tag))))); +/// # } +/// ``` +#[cfg(feature = "alloc")] +#[macro_export(local_inner_macros)] +macro_rules! many_till( + ($i:expr, $submac:ident!( $($args:tt)* ), $submac2:ident!( $($args2:tt)* )) => ( + many_till!($i, |i| $submac!(i, $($args)*), |i| $submac2!(i, $($args2)*)) + ); + + ($i:expr, $submac:ident!( $($args:tt)* ), $g:expr) => ( + many_till!($i, |i| $submac!(i, $($args)*), $g); + ); + + ($i:expr, $f:expr, $submac:ident!( $($args:tt)* )) => ( + many_till!($i, $f, |i| $submac!(i, $($args)*)); + ); + + ($i:expr, $f:expr, $g:expr) => ( + $crate::multi::many_tillc($i, $f, $g) + ); +); + +/// `many_m_n!(usize, usize, I -> IResult) => I -> IResult>` +/// Applies the parser between m and n times (n included) and returns the list of +/// results in a Vec +/// +/// the embedded parser may return Incomplete +/// +/// ``` +/// # #[macro_use] extern crate nom; +/// # use nom::Err; +/// # use nom::error::ErrorKind; +/// # fn main() { +/// named!(multi<&[u8], Vec<&[u8]> >, many_m_n!(2, 4, tag!( "abcd" ) ) ); +/// +/// let a = b"abcdefgh"; +/// let b = b"abcdabcdefgh"; +/// let c = b"abcdabcdabcdabcdabcdefgh"; +/// +/// assert_eq!(multi(&a[..]), Err(Err::Error(error_position!(&b"efgh"[..], ErrorKind::Tag)))); +/// let res = vec![&b"abcd"[..], &b"abcd"[..]]; +/// assert_eq!(multi(&b[..]),Ok((&b"efgh"[..], res))); +/// let res2 = vec![&b"abcd"[..], &b"abcd"[..], &b"abcd"[..], &b"abcd"[..]]; +/// assert_eq!(multi(&c[..]),Ok((&b"abcdefgh"[..], res2))); +/// # } +/// ``` +#[cfg(feature = "alloc")] +#[macro_export(local_inner_macros)] +macro_rules! many_m_n( + ($i:expr, $m:expr, $n: expr, $submac:ident!( $($args:tt)* )) => ( + many_m_n!($i, $m, $n, |i| $submac!(i, $($args)*)) + ); + ($i:expr, $m:expr, $n: expr, $f:expr) => ( + $crate::multi::many_m_nc($i, $m, $n, $f) + ); +); + +/// `many0_count!(I -> IResult) => I -> IResult` +/// Applies the parser 0 or more times and returns the number of times the parser was applied. +/// +/// `many0_count` will only return `Error` if the embedded parser does not consume any input +/// (to avoid infinite loops). +/// +/// ``` +/// #[macro_use] extern crate nom; +/// use nom::character::streaming::digit1; +/// +/// named!(number<&[u8], usize>, many0_count!(pair!(digit1, tag!(",")))); +/// +/// fn main() { +/// assert_eq!(number(&b"123,45,abc"[..]), Ok((&b"abc"[..], 2))); +/// } +/// ``` +/// +#[macro_export] +macro_rules! many0_count { + ($i:expr, $submac:ident!( $($args:tt)* )) => ( + $crate::multi::many0_countc($i, |i| $submac!(i, $($args)*)) + ); + + ($i:expr, $f:expr) => ( + $crate::multi::many0_countc($i, $f) + ); +} + +/// `many1_count!(I -> IResult) => I -> IResult` +/// Applies the parser 1 or more times and returns the number of times the parser was applied. +/// +/// ``` +/// #[macro_use] extern crate nom; +/// use nom::character::streaming::digit1; +/// +/// named!(number<&[u8], usize>, many1_count!(pair!(digit1, tag!(",")))); +/// +/// fn main() { +/// assert_eq!(number(&b"123,45,abc"[..]), Ok((&b"abc"[..], 2))); +/// } +/// ``` +/// +#[macro_export] +macro_rules! many1_count { + ($i:expr, $submac:ident!( $($args:tt)* )) => ( + $crate::multi::many1_countc($i, |i| $submac!(i, $($args)*)) + ); + + ($i:expr, $f:expr) => ( + $crate::multi::many1_countc($i, $f) + ); +} + +/// `count!(I -> IResult, nb) => I -> IResult>` +/// Applies the child parser a specified number of times +/// +/// ``` +/// # #[macro_use] extern crate nom; +/// # use nom::Err; +/// # use nom::error::ErrorKind; +/// # fn main() { +/// named!(counter< Vec<&[u8]> >, count!( tag!( "abcd" ), 2 ) ); +/// +/// let a = b"abcdabcdabcdef"; +/// let b = b"abcdefgh"; +/// let res = vec![&b"abcd"[..], &b"abcd"[..]]; +/// +/// assert_eq!(counter(&a[..]),Ok((&b"abcdef"[..], res))); +/// assert_eq!(counter(&b[..]), Err(Err::Error(error_position!(&b"efgh"[..], ErrorKind::Tag)))); +/// # } +/// ``` +/// +#[cfg(feature = "alloc")] +#[macro_export(local_inner_macros)] +macro_rules! count( + ($i:expr, $submac:ident!( $($args:tt)* ), $count: expr) => ( + count!($i, |i| $submac!(i, $($args)*), $count) + ); + ($i:expr, $f:expr, $count: expr) => ( + $crate::multi::count($f, $count)($i) + ); +); + +/// `length_count!(I -> IResult, I -> IResult) => I -> IResult>` +/// gets a number from the first parser, then applies the second parser that many times +/// +/// ```rust +/// # #[macro_use] extern crate nom; +/// # use nom::{Err, Needed}; +/// # use nom::error::ErrorKind; +/// use nom::number::complete::be_u8; +/// # fn main() { +/// named!(parser>, length_count!(be_u8, tag!("abc"))); +/// +/// assert_eq!(parser(&b"\x02abcabcabc"[..]), Ok(((&b"abc"[..], vec![&b"abc"[..], &b"abc"[..]])))); +/// assert_eq!(parser(&b"\x04abcabcabc"[..]), Err(Err::Incomplete(Needed::Size(3)))); +/// # } +/// ``` +#[macro_export(local_inner_macros)] +#[cfg(feature = "alloc")] +macro_rules! length_count( + ($i:expr, $submac:ident!( $($args:tt)* ), $submac2:ident!( $($args2:tt)* )) => ( + { + use $crate::lib::std::result::Result::*; + use $crate::Err; + + match $submac!($i, $($args)*) { + Err(e) => Err(Err::convert(e)), + Ok((i, o)) => { + match count!(i, $submac2!($($args2)*), o as usize) { + Err(e) => Err(Err::convert(e)), + Ok((i2, o2)) => Ok((i2, o2)) + } + } + } + } + ); + + ($i:expr, $submac:ident!( $($args:tt)* ), $g:expr) => ( + length_count!($i, $submac!($($args)*), call!($g)); + ); + + ($i:expr, $f:expr, $submac:ident!( $($args:tt)* )) => ( + length_count!($i, call!($f), $submac!($($args)*)); + ); + + ($i:expr, $f:expr, $g:expr) => ( + length_count!($i, call!($f), call!($g)); + ); +); + +/// `length_data!(I -> IResult) => O` +/// +/// `length_data` gets a number from the first parser, then takes a subslice of the input +/// of that size and returns that subslice +/// +/// ```rust +/// # #[macro_use] extern crate nom; +/// # use nom::{Err, Needed}; +/// # use nom::error::ErrorKind; +/// use nom::number::complete::be_u8; +/// # fn main() { +/// named!(parser, length_data!(be_u8)); +/// +/// assert_eq!(parser(&b"\x06abcabcabc"[..]), Ok((&b"abc"[..], &b"abcabc"[..]))); +/// assert_eq!(parser(&b"\x06abc"[..]), Err(Err::Incomplete(Needed::Size(6)))); +/// # } +/// ``` +#[macro_export(local_inner_macros)] +macro_rules! length_data( + ($i:expr, $submac:ident!( $($args:tt)* )) => ({ + $crate::multi::length_data(|i| $submac!(i, $($args)*))($i) + }); + + ($i:expr, $f:expr) => ( + $crate::multi::length_data($f)($i) + ); +); + +/// `length_value!(I -> IResult, I -> IResult) => I -> IResult` +/// +/// Gets a number from the first parser, takes a subslice of the input of that size, +/// then applies the second parser on that subslice. If the second parser returns +/// `Incomplete`, `length_value` will return an error +/// +/// ```rust +/// # #[macro_use] extern crate nom; +/// # use nom::{Err, Needed}; +/// # use nom::error::ErrorKind; +/// use nom::number::complete::be_u8; +/// use nom::character::complete::alpha0; +/// use nom::bytes::complete::tag; +/// # fn main() { +/// named!(parser, length_value!(be_u8, alpha0)); +/// +/// assert_eq!(parser(&b"\x06abcabcabc"[..]), Ok((&b"abc"[..], &b"abcabc"[..]))); +/// assert_eq!(parser(&b"\x06abc"[..]), Err(Err::Incomplete(Needed::Size(6)))); +/// # } +/// ``` +#[macro_export(local_inner_macros)] +macro_rules! length_value( + ($i:expr, $submac:ident!( $($args:tt)* ), $submac2:ident!( $($args2:tt)* )) => ( + length_value!($i, |i| $submac!(i, $($args)*), |i| $submac2!(i, $($args2)*)) + ); + + ($i:expr, $submac:ident!( $($args:tt)* ), $g:expr) => ( + length_value!($i, |i| $submac!(i, $($args)*), $g); + ); + + ($i:expr, $f:expr, $submac:ident!( $($args:tt)* )) => ( + length_value!($i, $f, |i| $submac!(i, $($args)*)); + ); + + ($i:expr, $f:expr, $g:expr) => ( + $crate::multi::length_valuec($i, $f, $g); + ); +); + +/// `fold_many0!(I -> IResult, R, Fn(R, O) -> R) => I -> IResult` +/// Applies the parser 0 or more times and folds the list of return values +/// +/// the embedded parser may return Incomplete +/// +/// ``` +/// # #[macro_use] extern crate nom; +/// # fn main() { +/// named!(multi<&[u8], Vec<&[u8]> >, +/// fold_many0!( tag!( "abcd" ), Vec::new(), |mut acc: Vec<_>, item| { +/// acc.push(item); +/// acc +/// })); +/// +/// let a = b"abcdabcdefgh"; +/// let b = b"azerty"; +/// +/// let res = vec![&b"abcd"[..], &b"abcd"[..]]; +/// assert_eq!(multi(&a[..]),Ok((&b"efgh"[..], res))); +/// assert_eq!(multi(&b[..]),Ok((&b"azerty"[..], Vec::new()))); +/// # } +/// ``` +/// 0 or more +#[macro_export(local_inner_macros)] +macro_rules! fold_many0( + ($i:expr, $submac:ident!( $($args:tt)* ), $init:expr, $fold_f:expr) => ( + fold_many0!($i, |i| $submac!(i, $($args)*), $init, $fold_f) + ); + ($i:expr, $f:expr, $init:expr, $fold_f:expr) => ( + $crate::multi::fold_many0($f, $init, $fold_f)($i) + ); +); + +/// `fold_many1!(I -> IResult, R, Fn(R, O) -> R) => I -> IResult` +/// Applies the parser 1 or more times and folds the list of return values +/// +/// the embedded parser may return Incomplete +/// +/// ``` +/// # #[macro_use] extern crate nom; +/// # use nom::Err; +/// # use nom::error::ErrorKind; +/// # fn main() { +/// named!(multi<&[u8], Vec<&[u8]> >, +/// fold_many1!( tag!( "abcd" ), Vec::new(), |mut acc: Vec<_>, item| { +/// acc.push(item); +/// acc +/// })); +/// +/// let a = b"abcdabcdefgh"; +/// let b = b"azerty"; +/// +/// let res = vec![&b"abcd"[..], &b"abcd"[..]]; +/// assert_eq!(multi(&a[..]),Ok((&b"efgh"[..], res))); +/// assert_eq!(multi(&b[..]), Err(Err::Error(error_position!(&b[..], ErrorKind::Many1)))); +/// # } +/// ``` +#[macro_export(local_inner_macros)] +macro_rules! fold_many1( + ($i:expr, $submac:ident!( $($args:tt)* ), $init:expr, $fold_f:expr) => ( + fold_many1!($i, |i| $submac!(i, $($args)*), $init, $fold_f) + ); + ($i:expr, $f:expr, $init:expr, $fold_f:expr) => ( + $crate::multi::fold_many1c($i, $f, $init, $fold_f) + ); + ($i:expr, $f:expr, $init:expr, $fold_f:expr) => ( + fold_many1!($i, call!($f), $init, $fold_f); + ); +); + +/// `fold_many_m_n!(usize, usize, I -> IResult, R, Fn(R, O) -> R) => I -> IResult` +/// Applies the parser between m and n times (n included) and folds the list of return value +/// +/// the embedded parser may return Incomplete +/// +/// ``` +/// # #[macro_use] extern crate nom; +/// # use nom::Err; +/// # use nom::error::ErrorKind; +/// # fn main() { +/// named!(multi<&[u8], Vec<&[u8]> >, +/// fold_many_m_n!(2, 4, tag!( "abcd" ), Vec::new(), |mut acc: Vec<_>, item| { +/// acc.push(item); +/// acc +/// })); +/// +/// let a = b"abcdefgh"; +/// let b = b"abcdabcdefgh"; +/// let c = b"abcdabcdabcdabcdabcdefgh"; +/// +/// assert_eq!(multi(&a[..]), Err(Err::Error(error_position!(&a[..], ErrorKind::ManyMN)))); +/// let res = vec![&b"abcd"[..], &b"abcd"[..]]; +/// assert_eq!(multi(&b[..]),Ok((&b"efgh"[..], res))); +/// let res2 = vec![&b"abcd"[..], &b"abcd"[..], &b"abcd"[..], &b"abcd"[..]]; +/// assert_eq!(multi(&c[..]),Ok((&b"abcdefgh"[..], res2))); +/// # } +/// ``` +#[macro_export(local_inner_macros)] +macro_rules! fold_many_m_n( + ($i:expr, $m:expr, $n:expr, $submac:ident!( $($args:tt)* ), $init:expr, $fold_f:expr) => ( + fold_many_m_n!($i, $m, $n, |i| $submac!(i, $($args)*), $init, $fold_f) + ); + ($i:expr, $m:expr, $n:expr, $f:expr, $init:expr, $fold_f:expr) => ( + $crate::multi::fold_many_m_nc($i, $m, $n, $f, $init, $fold_f) + ); +); + +#[cfg(test)] +mod tests { + use crate::internal::{Err, IResult, Needed}; + use crate::error::ParseError; + use crate::lib::std::str::{self, FromStr}; + #[cfg(feature = "alloc")] + use crate::lib::std::vec::Vec; + use crate::character::streaming::digit1 as digit; + use crate::number::streaming::{be_u16, be_u8}; + use crate::error::ErrorKind; + + // reproduce the tag and take macros, because of module import order + macro_rules! tag ( + ($i:expr, $inp: expr) => ( + { + #[inline(always)] + fn as_bytes(b: &T) -> &[u8] { + b.as_bytes() + } + + let expected = $inp; + let bytes = as_bytes(&expected); + + tag_bytes!($i,bytes) + } + ); + ); + + macro_rules! tag_bytes ( + ($i:expr, $bytes: expr) => ( + { + use $crate::lib::std::cmp::min; + let len = $i.len(); + let blen = $bytes.len(); + let m = min(len, blen); + let reduced = &$i[..m]; + let b = &$bytes[..m]; + + let res: IResult<_,_,_> = if reduced != b { + Err($crate::Err::Error($crate::error::make_error($i, $crate::error::ErrorKind::Tag))) + } else if m < blen { + Err($crate::Err::Incomplete(Needed::Size(blen))) + } else { + Ok((&$i[blen..], reduced)) + }; + res + } + ); + ); + + #[test] + #[cfg(feature = "alloc")] + fn separated_list() { + named!(multi<&[u8],Vec<&[u8]> >, separated_list!(tag!(","), tag!("abcd"))); + named!(multi_empty<&[u8],Vec<&[u8]> >, separated_list!(tag!(","), tag!(""))); + named!(multi_longsep<&[u8],Vec<&[u8]> >, separated_list!(tag!(".."), tag!("abcd"))); + + let a = &b"abcdef"[..]; + let b = &b"abcd,abcdef"[..]; + let c = &b"azerty"[..]; + let d = &b",,abc"[..]; + let e = &b"abcd,abcd,ef"[..]; + let f = &b"abc"[..]; + let g = &b"abcd."[..]; + let h = &b"abcd,abc"[..]; + + let res1 = vec![&b"abcd"[..]]; + assert_eq!(multi(a), Ok((&b"ef"[..], res1))); + let res2 = vec![&b"abcd"[..], &b"abcd"[..]]; + assert_eq!(multi(b), Ok((&b"ef"[..], res2))); + assert_eq!(multi(c), Ok((&b"azerty"[..], Vec::new()))); + assert_eq!(multi_empty(d), Err(Err::Error(error_position!(d, ErrorKind::SeparatedList)))); + //let res3 = vec![&b""[..], &b""[..], &b""[..]]; + //assert_eq!(multi_empty(d),Ok((&b"abc"[..], res3))); + let res4 = vec![&b"abcd"[..], &b"abcd"[..]]; + assert_eq!(multi(e), Ok((&b",ef"[..], res4))); + + assert_eq!(multi(f), Err(Err::Incomplete(Needed::Size(4)))); + assert_eq!(multi_longsep(g), Err(Err::Incomplete(Needed::Size(2)))); + assert_eq!(multi(h), Err(Err::Incomplete(Needed::Size(4)))); + } + + #[test] + #[cfg(feature = "alloc")] + fn separated_nonempty_list() { + named!(multi<&[u8],Vec<&[u8]> >, separated_nonempty_list!(tag!(","), tag!("abcd"))); + named!(multi_longsep<&[u8],Vec<&[u8]> >, separated_nonempty_list!(tag!(".."), tag!("abcd"))); + + let a = &b"abcdef"[..]; + let b = &b"abcd,abcdef"[..]; + let c = &b"azerty"[..]; + let d = &b"abcd,abcd,ef"[..]; + + let f = &b"abc"[..]; + let g = &b"abcd."[..]; + let h = &b"abcd,abc"[..]; + + let res1 = vec![&b"abcd"[..]]; + assert_eq!(multi(a), Ok((&b"ef"[..], res1))); + let res2 = vec![&b"abcd"[..], &b"abcd"[..]]; + assert_eq!(multi(b), Ok((&b"ef"[..], res2))); + assert_eq!(multi(c), Err(Err::Error(error_position!(c, ErrorKind::Tag)))); + let res3 = vec![&b"abcd"[..], &b"abcd"[..]]; + assert_eq!(multi(d), Ok((&b",ef"[..], res3))); + + assert_eq!(multi(f), Err(Err::Incomplete(Needed::Size(4)))); + assert_eq!(multi_longsep(g), Err(Err::Incomplete(Needed::Size(2)))); + assert_eq!(multi(h), Err(Err::Incomplete(Needed::Size(4)))); + } + + #[test] + #[cfg(feature = "alloc")] + fn many0() { + named!(tag_abcd, tag!("abcd")); + named!(tag_empty, tag!("")); + named!( multi<&[u8],Vec<&[u8]> >, many0!(tag_abcd) ); + named!( multi_empty<&[u8],Vec<&[u8]> >, many0!(tag_empty) ); + + assert_eq!(multi(&b"abcdef"[..]), Ok((&b"ef"[..], vec![&b"abcd"[..]]))); + assert_eq!(multi(&b"abcdabcdefgh"[..]), Ok((&b"efgh"[..], vec![&b"abcd"[..], &b"abcd"[..]]))); + assert_eq!(multi(&b"azerty"[..]), Ok((&b"azerty"[..], Vec::new()))); + assert_eq!(multi(&b"abcdab"[..]), Err(Err::Incomplete(Needed::Size(4)))); + assert_eq!(multi(&b"abcd"[..]), Err(Err::Incomplete(Needed::Size(4)))); + assert_eq!(multi(&b""[..]), Err(Err::Incomplete(Needed::Size(4)))); + assert_eq!( + multi_empty(&b"abcdef"[..]), + Err(Err::Error(error_position!(&b"abcdef"[..], ErrorKind::Many0))) + ); + } + + #[cfg(nightly)] + use test::Bencher; + + #[cfg(nightly)] + #[bench] + fn many0_bench(b: &mut Bencher) { + named!(multi<&[u8],Vec<&[u8]> >, many0!(tag!("abcd"))); + b.iter(|| multi(&b"abcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcd"[..])); + } + + #[test] + #[cfg(feature = "alloc")] + fn many1() { + named!(multi<&[u8],Vec<&[u8]> >, many1!(tag!("abcd"))); + + let a = &b"abcdef"[..]; + let b = &b"abcdabcdefgh"[..]; + let c = &b"azerty"[..]; + let d = &b"abcdab"[..]; + + let res1 = vec![&b"abcd"[..]]; + assert_eq!(multi(a), Ok((&b"ef"[..], res1))); + let res2 = vec![&b"abcd"[..], &b"abcd"[..]]; + assert_eq!(multi(b), Ok((&b"efgh"[..], res2))); + assert_eq!(multi(c), Err(Err::Error(error_position!(c, ErrorKind::Tag)))); + assert_eq!(multi(d), Err(Err::Incomplete(Needed::Size(4)))); + } + + #[test] + #[cfg(feature = "alloc")] + fn many_till() { + named!(multi<&[u8], (Vec<&[u8]>, &[u8]) >, many_till!( tag!( "abcd" ), tag!( "efgh" ) ) ); + + let a = b"abcdabcdefghabcd"; + let b = b"efghabcd"; + let c = b"azerty"; + + let res_a = (vec![&b"abcd"[..], &b"abcd"[..]], &b"efgh"[..]); + let res_b: (Vec<&[u8]>, &[u8]) = (Vec::new(), &b"efgh"[..]); + assert_eq!(multi(&a[..]), Ok((&b"abcd"[..], res_a))); + assert_eq!(multi(&b[..]), Ok((&b"abcd"[..], res_b))); + assert_eq!( + multi(&c[..]), + Err(Err::Error(error_node_position!( + &c[..], + ErrorKind::ManyTill, + error_position!(&c[..], ErrorKind::Tag) + ))) + ); + } + + #[test] + #[cfg(feature = "std")] + fn infinite_many() { + fn tst(input: &[u8]) -> IResult<&[u8], &[u8]> { + println!("input: {:?}", input); + Err(Err::Error(error_position!(input, ErrorKind::Tag))) + } + + // should not go into an infinite loop + named!(multi0<&[u8],Vec<&[u8]> >, many0!(tst)); + let a = &b"abcdef"[..]; + assert_eq!(multi0(a), Ok((a, Vec::new()))); + + named!(multi1<&[u8],Vec<&[u8]> >, many1!(tst)); + let a = &b"abcdef"[..]; + assert_eq!(multi1(a), Err(Err::Error(error_position!(a, ErrorKind::Tag)))); + } + + #[test] + #[cfg(feature = "alloc")] + fn many_m_n() { + named!(multi<&[u8],Vec<&[u8]> >, many_m_n!(2, 4, tag!("Abcd"))); + + let a = &b"Abcdef"[..]; + let b = &b"AbcdAbcdefgh"[..]; + let c = &b"AbcdAbcdAbcdAbcdefgh"[..]; + let d = &b"AbcdAbcdAbcdAbcdAbcdefgh"[..]; + let e = &b"AbcdAb"[..]; + + assert_eq!(multi(a), Err(Err::Error(error_position!(&b"ef"[..], ErrorKind::Tag)))); + let res1 = vec![&b"Abcd"[..], &b"Abcd"[..]]; + assert_eq!(multi(b), Ok((&b"efgh"[..], res1))); + let res2 = vec![&b"Abcd"[..], &b"Abcd"[..], &b"Abcd"[..], &b"Abcd"[..]]; + assert_eq!(multi(c), Ok((&b"efgh"[..], res2))); + let res3 = vec![&b"Abcd"[..], &b"Abcd"[..], &b"Abcd"[..], &b"Abcd"[..]]; + assert_eq!(multi(d), Ok((&b"Abcdefgh"[..], res3))); + assert_eq!(multi(e), Err(Err::Incomplete(Needed::Size(4)))); + } + + #[test] + #[cfg(feature = "alloc")] + fn count() { + const TIMES: usize = 2; + named!(tag_abc, tag!("abc")); + named!( cnt_2<&[u8], Vec<&[u8]> >, count!(tag_abc, TIMES ) ); + + assert_eq!(cnt_2(&b"abcabcabcdef"[..]), Ok((&b"abcdef"[..], vec![&b"abc"[..], &b"abc"[..]]))); + assert_eq!(cnt_2(&b"ab"[..]), Err(Err::Incomplete(Needed::Size(3)))); + assert_eq!(cnt_2(&b"abcab"[..]), Err(Err::Incomplete(Needed::Size(3)))); + assert_eq!(cnt_2(&b"xxx"[..]), Err(Err::Error(error_position!(&b"xxx"[..], ErrorKind::Tag)))); + assert_eq!( + cnt_2(&b"xxxabcabcdef"[..]), + Err(Err::Error(error_position!(&b"xxxabcabcdef"[..], ErrorKind::Tag))) + ); + assert_eq!( + cnt_2(&b"abcxxxabcdef"[..]), + Err(Err::Error(error_position!(&b"xxxabcdef"[..], ErrorKind::Tag))) + ); + } + + #[test] + #[cfg(feature = "alloc")] + fn count_zero() { + const TIMES: usize = 0; + named!(tag_abc, tag!("abc")); + named!( counter_2<&[u8], Vec<&[u8]> >, count!(tag_abc, TIMES ) ); + + let done = &b"abcabcabcdef"[..]; + let parsed_done = Vec::new(); + let rest = done; + let incomplete_1 = &b"ab"[..]; + let parsed_incompl_1 = Vec::new(); + let incomplete_2 = &b"abcab"[..]; + let parsed_incompl_2 = Vec::new(); + let error = &b"xxx"[..]; + let error_remain = &b"xxx"[..]; + let parsed_err = Vec::new(); + let error_1 = &b"xxxabcabcdef"[..]; + let parsed_err_1 = Vec::new(); + let error_1_remain = &b"xxxabcabcdef"[..]; + let error_2 = &b"abcxxxabcdef"[..]; + let parsed_err_2 = Vec::new(); + let error_2_remain = &b"abcxxxabcdef"[..]; + + assert_eq!(counter_2(done), Ok((rest, parsed_done))); + assert_eq!(counter_2(incomplete_1), Ok((incomplete_1, parsed_incompl_1))); + assert_eq!(counter_2(incomplete_2), Ok((incomplete_2, parsed_incompl_2))); + assert_eq!(counter_2(error), Ok((error_remain, parsed_err))); + assert_eq!(counter_2(error_1), Ok((error_1_remain, parsed_err_1))); + assert_eq!(counter_2(error_2), Ok((error_2_remain, parsed_err_2))); + } + + #[derive(Debug, Clone, PartialEq)] + pub struct NilError; + + impl From<(I,ErrorKind)> for NilError { + fn from(_: (I, ErrorKind)) -> Self { + NilError + } + } + + impl ParseError for NilError { + fn from_error_kind(_: I, _: ErrorKind) -> NilError { + NilError + } + fn append(_: I, _: ErrorKind, _: NilError) -> NilError { + NilError + } + } + + named!(pub number, map_res!( + map_res!( + digit, + str::from_utf8 + ), + FromStr::from_str + )); + + #[test] + #[cfg(feature = "alloc")] + fn length_count() { + named!(tag_abc, tag!(&b"abc"[..])); + named!( cnt<&[u8], Vec<&[u8]> >, length_count!(number, tag_abc) ); + + assert_eq!(cnt(&b"2abcabcabcdef"[..]), Ok((&b"abcdef"[..], vec![&b"abc"[..], &b"abc"[..]]))); + assert_eq!(cnt(&b"2ab"[..]), Err(Err::Incomplete(Needed::Size(3)))); + assert_eq!(cnt(&b"3abcab"[..]), Err(Err::Incomplete(Needed::Size(3)))); + assert_eq!(cnt(&b"xxx"[..]), Err(Err::Error(error_position!(&b"xxx"[..], ErrorKind::Digit)))); + assert_eq!( + cnt(&b"2abcxxx"[..]), + Err(Err::Error(error_position!(&b"xxx"[..], ErrorKind::Tag))) + ); + } + + #[test] + fn length_data() { + named!( take<&[u8], &[u8]>, length_data!(number) ); + + assert_eq!(take(&b"6abcabcabcdef"[..]), Ok((&b"abcdef"[..], &b"abcabc"[..]))); + assert_eq!(take(&b"3ab"[..]), Err(Err::Incomplete(Needed::Size(3)))); + assert_eq!(take(&b"xxx"[..]), Err(Err::Error(error_position!(&b"xxx"[..], ErrorKind::Digit)))); + assert_eq!(take(&b"2abcxxx"[..]), Ok((&b"cxxx"[..], &b"ab"[..]))); + } + + #[test] + fn length_value_test() { + named!(length_value_1<&[u8], u16 >, length_value!(be_u8, be_u16)); + named!(length_value_2<&[u8], (u8, u8) >, length_value!(be_u8, tuple!(be_u8, be_u8))); + + let i1 = [0, 5, 6]; + assert_eq!(length_value_1(&i1), Err(Err::Error(error_position!(&b""[..], ErrorKind::Complete)))); + assert_eq!(length_value_2(&i1), Err(Err::Error(error_position!(&b""[..], ErrorKind::Complete)))); + + let i2 = [1, 5, 6, 3]; + assert_eq!( + length_value_1(&i2), + Err(Err::Error(error_position!(&i2[1..2], ErrorKind::Complete))) + ); + assert_eq!( + length_value_2(&i2), + Err(Err::Error(error_position!(&i2[1..2], ErrorKind::Complete))) + ); + + let i3 = [2, 5, 6, 3, 4, 5, 7]; + assert_eq!(length_value_1(&i3), Ok((&i3[3..], 1286))); + assert_eq!(length_value_2(&i3), Ok((&i3[3..], (5, 6)))); + + let i4 = [3, 5, 6, 3, 4, 5]; + assert_eq!(length_value_1(&i4), Ok((&i4[4..], 1286))); + assert_eq!(length_value_2(&i4), Ok((&i4[4..], (5, 6)))); + } + + #[test] + #[cfg(feature = "alloc")] + fn fold_many0() { + fn fold_into_vec(mut acc: Vec, item: T) -> Vec { + acc.push(item); + acc + }; + named!(tag_abcd, tag!("abcd")); + named!(tag_empty, tag!("")); + named!( multi<&[u8],Vec<&[u8]> >, fold_many0!(tag_abcd, Vec::new(), fold_into_vec) ); + named!( multi_empty<&[u8],Vec<&[u8]> >, fold_many0!(tag_empty, Vec::new(), fold_into_vec) ); + + assert_eq!(multi(&b"abcdef"[..]), Ok((&b"ef"[..], vec![&b"abcd"[..]]))); + assert_eq!(multi(&b"abcdabcdefgh"[..]), Ok((&b"efgh"[..], vec![&b"abcd"[..], &b"abcd"[..]]))); + assert_eq!(multi(&b"azerty"[..]), Ok((&b"azerty"[..], Vec::new()))); + assert_eq!(multi(&b"abcdab"[..]), Err(Err::Incomplete(Needed::Size(4)))); + assert_eq!(multi(&b"abcd"[..]), Err(Err::Incomplete(Needed::Size(4)))); + assert_eq!(multi(&b""[..]), Err(Err::Incomplete(Needed::Size(4)))); + assert_eq!( + multi_empty(&b"abcdef"[..]), + Err(Err::Error(error_position!(&b"abcdef"[..], ErrorKind::Many0))) + ); + } + + #[test] + #[cfg(feature = "alloc")] + fn fold_many1() { + fn fold_into_vec(mut acc: Vec, item: T) -> Vec { + acc.push(item); + acc + }; + named!(multi<&[u8],Vec<&[u8]> >, fold_many1!(tag!("abcd"), Vec::new(), fold_into_vec)); + + let a = &b"abcdef"[..]; + let b = &b"abcdabcdefgh"[..]; + let c = &b"azerty"[..]; + let d = &b"abcdab"[..]; + + let res1 = vec![&b"abcd"[..]]; + assert_eq!(multi(a), Ok((&b"ef"[..], res1))); + let res2 = vec![&b"abcd"[..], &b"abcd"[..]]; + assert_eq!(multi(b), Ok((&b"efgh"[..], res2))); + assert_eq!(multi(c), Err(Err::Error(error_position!(c, ErrorKind::Many1)))); + assert_eq!(multi(d), Err(Err::Incomplete(Needed::Size(4)))); + } + + #[test] + #[cfg(feature = "alloc")] + fn fold_many_m_n() { + fn fold_into_vec(mut acc: Vec, item: T) -> Vec { + acc.push(item); + acc + }; + named!(multi<&[u8],Vec<&[u8]> >, fold_many_m_n!(2, 4, tag!("Abcd"), Vec::new(), fold_into_vec)); + + let a = &b"Abcdef"[..]; + let b = &b"AbcdAbcdefgh"[..]; + let c = &b"AbcdAbcdAbcdAbcdefgh"[..]; + let d = &b"AbcdAbcdAbcdAbcdAbcdefgh"[..]; + let e = &b"AbcdAb"[..]; + + assert_eq!(multi(a), Err(Err::Error(error_position!(a, ErrorKind::ManyMN)))); + let res1 = vec![&b"Abcd"[..], &b"Abcd"[..]]; + assert_eq!(multi(b), Ok((&b"efgh"[..], res1))); + let res2 = vec![&b"Abcd"[..], &b"Abcd"[..], &b"Abcd"[..], &b"Abcd"[..]]; + assert_eq!(multi(c), Ok((&b"efgh"[..], res2))); + let res3 = vec![&b"Abcd"[..], &b"Abcd"[..], &b"Abcd"[..], &b"Abcd"[..]]; + assert_eq!(multi(d), Ok((&b"Abcdefgh"[..], res3))); + assert_eq!(multi(e), Err(Err::Incomplete(Needed::Size(4)))); + } + + #[test] + fn many0_count() { + named!( + count0_nums(&[u8]) -> usize, + many0_count!(pair!(digit, tag!(","))) + ); + + assert_eq!(count0_nums(&b"123,junk"[..]), Ok((&b"junk"[..], 1))); + + assert_eq!(count0_nums(&b"123,45,junk"[..]), Ok((&b"junk"[..], 2))); + + assert_eq!(count0_nums(&b"1,2,3,4,5,6,7,8,9,0,junk"[..]), Ok((&b"junk"[..], 10))); + + assert_eq!(count0_nums(&b"hello"[..]), Ok((&b"hello"[..], 0))); + } + + #[test] + fn many1_count() { + named!( + count1_nums(&[u8]) -> usize, + many1_count!(pair!(digit, tag!(","))) + ); + + assert_eq!(count1_nums(&b"123,45,junk"[..]), Ok((&b"junk"[..], 2))); + + assert_eq!(count1_nums(&b"1,2,3,4,5,6,7,8,9,0,junk"[..]), Ok((&b"junk"[..], 10))); + + assert_eq!( + count1_nums(&b"hello"[..]), + Err(Err::Error(error_position!(&b"hello"[..], ErrorKind::Many1Count))) + ); + } + +} diff --git a/third_party/rust/nom/src/multi/mod.rs b/third_party/rust/nom/src/multi/mod.rs new file mode 100644 index 0000000000..e8f26dd771 --- /dev/null +++ b/third_party/rust/nom/src/multi/mod.rs @@ -0,0 +1,997 @@ +//! combinators applying their child parser multiple times + +#[macro_use] +mod macros; + +use crate::internal::{Err, IResult, Needed}; +use crate::error::ParseError; +use crate::traits::{InputLength, InputTake, ToUsize}; +#[cfg(feature = "alloc")] +use crate::lib::std::vec::Vec; +use crate::error::ErrorKind; + +/// Repeats the embedded parser until it fails +/// and returns the results in a `Vec`. +/// +/// # Arguments +/// * `f` The parser to apply. +/// +/// *Note*: if the parser passed to `many0` accepts empty inputs +/// (like `alpha0` or `digit0`), `many0` will return an error, +/// to prevent going into an infinite loop +/// +/// ```rust +/// # use nom::{Err, error::ErrorKind, Needed, IResult}; +/// use nom::multi::many0; +/// use nom::bytes::complete::tag; +/// +/// fn parser(s: &str) -> IResult<&str, Vec<&str>> { +/// many0(tag("abc"))(s) +/// } +/// +/// assert_eq!(parser("abcabc"), Ok(("", vec!["abc", "abc"]))); +/// assert_eq!(parser("abc123"), Ok(("123", vec!["abc"]))); +/// assert_eq!(parser("123123"), Ok(("123123", vec![]))); +/// assert_eq!(parser(""), Ok(("", vec![]))); +/// ``` +#[cfg(feature = "alloc")] +pub fn many0(f: F) -> impl Fn(I) -> IResult, E> +where + I: Clone + PartialEq, + F: Fn(I) -> IResult, + E: ParseError, +{ + move |i: I| { + let mut acc = crate::lib::std::vec::Vec::with_capacity(4); + let mut i = i.clone(); + loop { + match f(i.clone()) { + Err(Err::Error(_)) => return Ok((i, acc)), + Err(e) => return Err(e), + Ok((i1, o)) => { + if i1 == i { + return Err(Err::Error(E::from_error_kind(i, ErrorKind::Many0))); + } + + i = i1; + acc.push(o); + } + } + } + } +} +// this implementation is used for type inference issues in macros +#[doc(hidden)] +#[cfg(feature = "alloc")] +pub fn many0c(input: I, f: F) -> IResult, E> +where + I: Clone + PartialEq, + F: Fn(I) -> IResult, + E: ParseError, +{ + many0(f)(input) +} + +/// Runs the embedded parser until it fails and +/// returns the results in a `Vec`. Fails if +/// the embedded parser does not produce at least +/// one result. +/// +/// # Arguments +/// * `f` The parser to apply. +/// +/// *Note*: if the parser passed to `many1` accepts empty inputs +/// (like `alpha0` or `digit0`), `many1` will return an error, +/// to prevent going into an infinite loop +/// +/// ```rust +/// # use nom::{Err, error::ErrorKind, Needed, IResult}; +/// use nom::multi::many1; +/// use nom::bytes::complete::tag; +/// +/// fn parser(s: &str) -> IResult<&str, Vec<&str>> { +/// many1(tag("abc"))(s) +/// } +/// +/// assert_eq!(parser("abcabc"), Ok(("", vec!["abc", "abc"]))); +/// assert_eq!(parser("abc123"), Ok(("123", vec!["abc"]))); +/// assert_eq!(parser("123123"), Err(Err::Error(("123123", ErrorKind::Tag)))); +/// assert_eq!(parser(""), Err(Err::Error(("", ErrorKind::Tag)))); +/// ``` +#[cfg(feature = "alloc")] +pub fn many1(f: F) -> impl Fn(I) -> IResult, E> +where + I: Clone + PartialEq, + F: Fn(I) -> IResult, + E: ParseError, +{ + move |i: I| { + let mut i = i.clone(); + match f(i.clone()) { + Err(Err::Error(err)) => return Err(Err::Error(E::append(i, ErrorKind::Many1, err))), + Err(e) => return Err(e), + Ok((i1, o)) => { + let mut acc = crate::lib::std::vec::Vec::with_capacity(4); + acc.push(o); + i = i1; + + loop { + match f(i.clone()) { + Err(Err::Error(_)) => return Ok((i, acc)), + Err(e) => return Err(e), + Ok((i1, o)) => { + if i1 == i { + return Err(Err::Error(E::from_error_kind(i, ErrorKind::Many1))); + } + + i = i1; + acc.push(o); + } + } + } + } + } + } +} + +// this implementation is used for type inference issues in macros +#[doc(hidden)] +#[cfg(feature = "alloc")] +pub fn many1c(input: I, f: F) -> IResult, E> +where + I: Clone + Copy + PartialEq, + F: Fn(I) -> IResult, + E: ParseError, +{ + many1(f)(input) +} + +/// Applies the parser `f` until the parser `g` produces +/// a result. Returns a pair consisting of the results of +/// `f` in a `Vec` and the result of `g`. +/// ```rust +/// # use nom::{Err, error::ErrorKind, Needed, IResult}; +/// use nom::multi::many_till; +/// use nom::bytes::complete::tag; +/// +/// fn parser(s: &str) -> IResult<&str, (Vec<&str>, &str)> { +/// many_till(tag("abc"), tag("end"))(s) +/// }; +/// +/// assert_eq!(parser("abcabcend"), Ok(("", (vec!["abc", "abc"], "end")))); +/// assert_eq!(parser("abc123end"), Err(Err::Error(("123end", ErrorKind::Tag)))); +/// assert_eq!(parser("123123end"), Err(Err::Error(("123123end", ErrorKind::Tag)))); +/// assert_eq!(parser(""), Err(Err::Error(("", ErrorKind::Tag)))); +/// assert_eq!(parser("abcendefg"), Ok(("efg", (vec!["abc"], "end")))); +/// ``` +#[cfg(feature = "alloc")] +pub fn many_till(f: F, g: G) -> impl Fn(I) -> IResult, P), E> +where + I: Clone + PartialEq, + F: Fn(I) -> IResult, + G: Fn(I) -> IResult, + E: ParseError, +{ + move |i: I| { + let mut res = crate::lib::std::vec::Vec::new(); + let mut i = i.clone(); + loop { + match g(i.clone()) { + Ok((i1, o)) => return Ok((i1, (res, o))), + Err(Err::Error(_)) => { + match f(i.clone()) { + Err(Err::Error(err)) => + return Err(Err::Error(E::append(i, ErrorKind::ManyTill, err))), + Err(e) => return Err(e), + Ok((i1, o)) => { + // loop trip must always consume (otherwise infinite loops) + if i1 == i { + return Err(Err::Error(E::from_error_kind(i1, ErrorKind::ManyTill))); + } + + res.push(o); + i = i1; + } + } + }, + Err(e) => return Err(e), + } + } + } +} + +// this implementation is used for type inference issues in macros +#[doc(hidden)] +#[cfg(feature = "alloc")] +pub fn many_tillc(i: I, f: F, g: G) -> IResult, P), E> +where + I: Clone + PartialEq, + F: Fn(I) -> IResult, + G: Fn(I) -> IResult, + E: ParseError, +{ + many_till(f, g)(i) +} + +/// Alternates between two parsers to produce +/// a list of elements. +/// # Arguments +/// * `sep` Parses the separator between list elements. +/// * `f` Parses the elements of the list. +/// +/// ```rust +/// # use nom::{Err, error::ErrorKind, Needed, IResult}; +/// use nom::multi::separated_list; +/// use nom::bytes::complete::tag; +/// +/// fn parser(s: &str) -> IResult<&str, Vec<&str>> { +/// separated_list(tag("|"), tag("abc"))(s) +/// } +/// +/// assert_eq!(parser("abc|abc|abc"), Ok(("", vec!["abc", "abc", "abc"]))); +/// assert_eq!(parser("abc123abc"), Ok(("123abc", vec!["abc"]))); +/// assert_eq!(parser("abc|def"), Ok(("|def", vec!["abc"]))); +/// assert_eq!(parser(""), Ok(("", vec![]))); +/// assert_eq!(parser("def|abc"), Ok(("def|abc", vec![]))); +/// ``` +#[cfg(feature = "alloc")] +pub fn separated_list(sep: G, f: F) -> impl Fn(I) -> IResult, E> +where + I: Clone + PartialEq, + F: Fn(I) -> IResult, + G: Fn(I) -> IResult, + E: ParseError, +{ + move |i: I| { + let mut res = Vec::new(); + let mut i = i.clone(); + + match f(i.clone()) { + Err(Err::Error(_)) => return Ok((i, res)), + Err(e) => return Err(e), + Ok((i1, o)) => { + if i1 == i { + return Err(Err::Error(E::from_error_kind(i1, ErrorKind::SeparatedList))); + } + + res.push(o); + i = i1; + } + } + + loop { + match sep(i.clone()) { + Err(Err::Error(_)) => return Ok((i, res)), + Err(e) => return Err(e), + Ok((i1, _)) => { + if i1 == i { + return Err(Err::Error(E::from_error_kind(i1, ErrorKind::SeparatedList))); + } + + match f(i1.clone()) { + Err(Err::Error(_)) => return Ok((i, res)), + Err(e) => return Err(e), + Ok((i2, o)) => { + if i2 == i { + return Err(Err::Error(E::from_error_kind(i2, ErrorKind::SeparatedList))); + } + + res.push(o); + i = i2; + } + } + } + } + } + } +} + +// this implementation is used for type inference issues in macros +#[doc(hidden)] +#[cfg(feature = "alloc")] +pub fn separated_listc(i: I, sep: G, f: F) -> IResult, E> +where + I: Clone + PartialEq, + F: Fn(I) -> IResult, + G: Fn(I) -> IResult, + E: ParseError, +{ + separated_list(sep, f)(i) +} + +/// Alternates between two parsers to produce +/// a list of elements. Fails if the element +/// parser does not produce at least one element. +/// # Arguments +/// * `sep` Parses the separator between list elements. +/// * `f` Parses the elements of the list. +/// ```rust +/// # #[macro_use] extern crate nom; +/// # use nom::{Err, error::ErrorKind, Needed, IResult}; +/// use nom::multi::separated_nonempty_list; +/// use nom::bytes::complete::tag; +/// +/// fn parser(s: &str) -> IResult<&str, Vec<&str>> { +/// separated_nonempty_list(tag("|"), tag("abc"))(s) +/// } +/// +/// assert_eq!(parser("abc|abc|abc"), Ok(("", vec!["abc", "abc", "abc"]))); +/// assert_eq!(parser("abc123abc"), Ok(("123abc", vec!["abc"]))); +/// assert_eq!(parser("abc|def"), Ok(("|def", vec!["abc"]))); +/// assert_eq!(parser(""), Err(Err::Error(("", ErrorKind::Tag)))); +/// assert_eq!(parser("def|abc"), Err(Err::Error(("def|abc", ErrorKind::Tag)))); +/// ``` +#[cfg(feature = "alloc")] +pub fn separated_nonempty_list(sep: G, f: F) -> impl Fn(I) -> IResult, E> +where + I: Clone + PartialEq, + F: Fn(I) -> IResult, + G: Fn(I) -> IResult, + E: ParseError, +{ + move |i: I| { + let mut res = Vec::new(); + let mut i = i.clone(); + + // Parse the first element + match f(i.clone()) { + Err(e)=> return Err(e), + Ok((i1, o)) => { + if i1 == i { + return Err(Err::Error(E::from_error_kind(i1, ErrorKind::SeparatedList))); + } + + res.push(o); + i = i1; + } + } + + loop { + match sep(i.clone()) { + Err(Err::Error(_)) => return Ok((i, res)), + Err(e) => return Err(e), + Ok((i1, _)) => { + if i1 == i { + return Err(Err::Error(E::from_error_kind(i1, ErrorKind::SeparatedList))); + } + + match f(i1.clone()) { + Err(Err::Error(_)) => return Ok((i, res)), + Err(e) => return Err(e), + Ok((i2, o)) => { + if i2 == i { + return Err(Err::Error(E::from_error_kind(i2, ErrorKind::SeparatedList))); + } + + res.push(o); + i = i2; + } + } + } + } + } + } +} + +// this implementation is used for type inference issues in macros +#[doc(hidden)] +#[cfg(feature = "alloc")] +pub fn separated_nonempty_listc(i: I, sep: G, f: F) -> IResult, E> +where + I: Clone + PartialEq, + F: Fn(I) -> IResult, + G: Fn(I) -> IResult, + E: ParseError, +{ + separated_nonempty_list(sep, f)(i) +} + +/// Repeats the embedded parser `n` times or until it fails +/// and returns the results in a `Vec`. Fails if the +/// embedded parser does not succeed at least `m` times. +/// # Arguments +/// * `m` The minimum number of iterations. +/// * `n` The maximum number of iterations. +/// * `f` The parser to apply. +/// ```rust +/// # #[macro_use] extern crate nom; +/// # use nom::{Err, error::ErrorKind, Needed, IResult}; +/// use nom::multi::many_m_n; +/// use nom::bytes::complete::tag; +/// +/// fn parser(s: &str) -> IResult<&str, Vec<&str>> { +/// many_m_n(0, 2, tag("abc"))(s) +/// } +/// +/// assert_eq!(parser("abcabc"), Ok(("", vec!["abc", "abc"]))); +/// assert_eq!(parser("abc123"), Ok(("123", vec!["abc"]))); +/// assert_eq!(parser("123123"), Ok(("123123", vec![]))); +/// assert_eq!(parser(""), Ok(("", vec![]))); +/// assert_eq!(parser("abcabcabc"), Ok(("abc", vec!["abc", "abc"]))); +/// ``` +#[cfg(feature = "alloc")] +pub fn many_m_n(m: usize, n: usize, f: F) -> impl Fn(I) -> IResult, E> +where + I: Clone + PartialEq, + F: Fn(I) -> IResult, + E: ParseError, +{ + move |i: I| { + let mut res = crate::lib::std::vec::Vec::with_capacity(m); + let mut input = i.clone(); + let mut count: usize = 0; + + if n == 0 { + return Ok((i, vec!())) + } + + loop { + let _i = input.clone(); + match f(_i) { + Ok((i, o)) => { + // do not allow parsers that do not consume input (causes infinite loops) + if i == input { + return Err(Err::Error(E::from_error_kind(input, ErrorKind::ManyMN))); + } + + res.push(o); + input = i; + count += 1; + + if count == n { + return Ok((input, res)); + } + } + Err(Err::Error(e)) => { + if count < m { + return Err(Err::Error(E::append(input, ErrorKind::ManyMN, e))); + } else { + return Ok((input, res)); + } + } + Err(e) => { + return Err(e); + } + } + } + } +} + +// this implementation is used for type inference issues in macros +#[doc(hidden)] +#[cfg(feature = "alloc")] +pub fn many_m_nc(i: I, m: usize, n: usize, f: F) -> IResult, E> +where + I: Clone + PartialEq, + F: Fn(I) -> IResult, + E: ParseError, +{ + many_m_n(m, n, f)(i) +} + +/// Repeats the embedded parser until it fails +/// and returns the number of successful iterations. +/// # Arguments +/// * `f` The parser to apply. +/// ```rust +/// # #[macro_use] extern crate nom; +/// # use nom::{Err, error::ErrorKind, Needed, IResult}; +/// use nom::multi::many0_count; +/// use nom::bytes::complete::tag; +/// +/// fn parser(s: &str) -> IResult<&str, usize> { +/// many0_count(tag("abc"))(s) +/// } +/// +/// assert_eq!(parser("abcabc"), Ok(("", 2))); +/// assert_eq!(parser("abc123"), Ok(("123", 1))); +/// assert_eq!(parser("123123"), Ok(("123123", 0))); +/// assert_eq!(parser(""), Ok(("", 0))); +/// ``` +pub fn many0_count(f: F) -> impl Fn(I) -> IResult +where + I: Clone + PartialEq, + F: Fn(I) -> IResult, + E: ParseError, +{ + move |i: I| { + let mut input = i.clone(); + let mut count = 0; + + loop { + let input_ = input.clone(); + match f(input_) { + Ok((i, _)) => { + // loop trip must always consume (otherwise infinite loops) + if i == input { + return Err(Err::Error(E::from_error_kind(input, ErrorKind::Many0Count))); + } + + input = i; + count += 1; + } + + Err(Err::Error(_)) => return Ok((input, count)), + + Err(e) => return Err(e), + } + } + } +} + +#[doc(hidden)] +pub fn many0_countc(i: I, f: F) -> IResult +where + I: Clone + PartialEq, + F: Fn(I) -> IResult, + E: ParseError, +{ + many0_count(f)(i) +} + +/// Repeats the embedded parser until it fails +/// and returns the number of successful iterations. +/// Fails if the embedded parser does not succeed +/// at least once. +/// # Arguments +/// * `f` The parser to apply. +/// ```rust +/// # #[macro_use] extern crate nom; +/// # use nom::{Err, error::ErrorKind, Needed, IResult}; +/// use nom::multi::many1_count; +/// use nom::bytes::complete::tag; +/// +/// fn parser(s: &str) -> IResult<&str, usize> { +/// many1_count(tag("abc"))(s) +/// } +/// +/// assert_eq!(parser("abcabc"), Ok(("", 2))); +/// assert_eq!(parser("abc123"), Ok(("123", 1))); +/// assert_eq!(parser("123123"), Err(Err::Error(("123123", ErrorKind::Many1Count)))); +/// assert_eq!(parser(""), Err(Err::Error(("", ErrorKind::Many1Count)))); +/// ``` +pub fn many1_count(f: F) -> impl Fn(I) -> IResult +where + I: Clone + PartialEq, + F: Fn(I) -> IResult, + E: ParseError, +{ + move |i: I| { + let i_ = i.clone(); + match f(i_) { + Err(Err::Error(_)) => Err(Err::Error(E::from_error_kind(i, ErrorKind::Many1Count))), + Err(i) => Err(i), + Ok((i1, _)) => { + let mut count = 1; + let mut input = i1; + + loop { + let input_ = input.clone(); + match f(input_) { + Err(Err::Error(_)) => return Ok((input, count)), + Err(e) => return Err(e), + Ok((i, _)) => { + if i == input { + return Err(Err::Error(E::from_error_kind(i, ErrorKind::Many1Count))); + } + + count += 1; + input = i; + } + } + } + } + } + } +} + +#[doc(hidden)] +pub fn many1_countc(i: I, f: F) -> IResult +where + I: Clone + PartialEq, + F: Fn(I) -> IResult, + E: ParseError, +{ + many1_count(f)(i) +} + +/// Runs the embedded parser a specified number +/// of times. Returns the results in a `Vec`. +/// # Arguments +/// * `f` The parser to apply. +/// * `count` How often to apply the parser. +/// ```rust +/// # #[macro_use] extern crate nom; +/// # use nom::{Err, error::ErrorKind, Needed, IResult}; +/// use nom::multi::count; +/// use nom::bytes::complete::tag; +/// +/// fn parser(s: &str) -> IResult<&str, Vec<&str>> { +/// count(tag("abc"), 2)(s) +/// } +/// +/// assert_eq!(parser("abcabc"), Ok(("", vec!["abc", "abc"]))); +/// assert_eq!(parser("abc123"), Err(Err::Error(("123", ErrorKind::Tag)))); +/// assert_eq!(parser("123123"), Err(Err::Error(("123123", ErrorKind::Tag)))); +/// assert_eq!(parser(""), Err(Err::Error(("", ErrorKind::Tag)))); +/// assert_eq!(parser("abcabcabc"), Ok(("abc", vec!["abc", "abc"]))); +/// ``` +#[cfg(feature = "alloc")] +pub fn count(f: F, count: usize) -> impl Fn(I) -> IResult, E> +where + I: Clone + PartialEq, + F: Fn(I) -> IResult, + E: ParseError, +{ + move |i: I | { + let mut input = i.clone(); + let mut res = crate::lib::std::vec::Vec::new(); + + for _ in 0..count { + let input_ = input.clone(); + match f(input_) { + Ok((i, o)) => { + res.push(o); + input = i; + } + Err(Err::Error(e)) => { + return Err(Err::Error(E::append(i, ErrorKind::Count, e))); + } + Err(e) => { + return Err(e); + } + } + } + + Ok((input, res)) + } +} + +/// Applies a parser until it fails and accumulates +/// the results using a given function and initial value. +/// # Arguments +/// * `f` The parser to apply. +/// * `init` The initial value. +/// * `g` The function that combines a result of `f` with +/// the current accumulator. +/// ```rust +/// # #[macro_use] extern crate nom; +/// # use nom::{Err, error::ErrorKind, Needed, IResult}; +/// use nom::multi::fold_many0; +/// use nom::bytes::complete::tag; +/// +/// fn parser(s: &str) -> IResult<&str, Vec<&str>> { +/// fold_many0( +/// tag("abc"), +/// Vec::new(), +/// |mut acc: Vec<_>, item| { +/// acc.push(item); +/// acc +/// } +/// )(s) +/// } +/// +/// assert_eq!(parser("abcabc"), Ok(("", vec!["abc", "abc"]))); +/// assert_eq!(parser("abc123"), Ok(("123", vec!["abc"]))); +/// assert_eq!(parser("123123"), Ok(("123123", vec![]))); +/// assert_eq!(parser(""), Ok(("", vec![]))); +/// ``` +pub fn fold_many0(f: F, init: R, g: G) -> impl Fn(I) -> IResult +where + I: Clone + PartialEq, + F: Fn(I) -> IResult, + G: Fn(R, O) -> R, + E: ParseError, + R: Clone, +{ + move |i: I| { + let mut res = init.clone(); + let mut input = i.clone(); + + loop { + let i_ = input.clone(); + match f(i_) { + Ok((i, o)) => { + // loop trip must always consume (otherwise infinite loops) + if i == input { + return Err(Err::Error(E::from_error_kind(input, ErrorKind::Many0))); + } + + res = g(res, o); + input = i; + } + Err(Err::Error(_)) => { + return Ok((input, res)); + } + Err(e) => { + return Err(e); + } + } + } + } +} + +#[doc(hidden)] +pub fn fold_many0c(i: I, f: F, init: R, g: G) -> IResult +where + I: Clone + PartialEq, + F: Fn(I) -> IResult, + G: Fn(R, O) -> R, + E: ParseError, + R: Clone, +{ + fold_many0(f, init, g)(i) +} + +/// Applies a parser until it fails and accumulates +/// the results using a given function and initial value. +/// Fails if the embedded parser does not succeed at least +/// once. +/// # Arguments +/// * `f` The parser to apply. +/// * `init` The initial value. +/// * `g` The function that combines a result of `f` with +/// the current accumulator. +/// ```rust +/// # #[macro_use] extern crate nom; +/// # use nom::{Err, error::ErrorKind, Needed, IResult}; +/// use nom::multi::fold_many1; +/// use nom::bytes::complete::tag; +/// +/// fn parser(s: &str) -> IResult<&str, Vec<&str>> { +/// fold_many1( +/// tag("abc"), +/// Vec::new(), +/// |mut acc: Vec<_>, item| { +/// acc.push(item); +/// acc +/// } +/// )(s) +/// } +/// +/// assert_eq!(parser("abcabc"), Ok(("", vec!["abc", "abc"]))); +/// assert_eq!(parser("abc123"), Ok(("123", vec!["abc"]))); +/// assert_eq!(parser("123123"), Err(Err::Error(("123123", ErrorKind::Many1)))); +/// assert_eq!(parser(""), Err(Err::Error(("", ErrorKind::Many1)))); +/// ``` +pub fn fold_many1(f: F, init: R, g: G) -> impl Fn(I) -> IResult +where + I: Clone + PartialEq, + F: Fn(I) -> IResult, + G: Fn(R, O) -> R, + E: ParseError, + R: Clone, +{ + move |i: I| { + let _i = i.clone(); + let init = init.clone(); + match f(_i) { + Err(Err::Error(_)) => Err(Err::Error(E::from_error_kind(i, ErrorKind::Many1))), + Err(e) => return Err(e), + Ok((i1, o1)) => { + let mut acc = g(init, o1); + let mut input = i1; + + loop { + let _input = input.clone(); + match f(_input) { + Err(Err::Error(_)) => { + break; + } + Err(e) => return Err(e), + Ok((i, o)) => { + if i == input { + return Err(Err::Failure(E::from_error_kind(i, ErrorKind::Many1))); + } + + acc = g(acc, o); + input = i; + } + } + } + + Ok((input, acc)) + } + } + } +} + +#[doc(hidden)] +pub fn fold_many1c(i: I, f: F, init: R, g: G) -> IResult +where + I: Clone + PartialEq, + F: Fn(I) -> IResult, + G: Fn(R, O) -> R, + E: ParseError, + R: Clone, +{ + fold_many1(f, init, g)(i) +} + +/// Applies a parser `n` times or until it fails and accumulates +/// the results using a given function and initial value. +/// Fails if the embedded parser does not succeed at least `m` +/// times. +/// # Arguments +/// * `m` The minimum number of iterations. +/// * `n` The maximum number of iterations. +/// * `f` The parser to apply. +/// * `init` The initial value. +/// * `g` The function that combines a result of `f` with +/// the current accumulator. +/// ```rust +/// # #[macro_use] extern crate nom; +/// # use nom::{Err, error::ErrorKind, Needed, IResult}; +/// use nom::multi::fold_many_m_n; +/// use nom::bytes::complete::tag; +/// +/// fn parser(s: &str) -> IResult<&str, Vec<&str>> { +/// fold_many_m_n( +/// 0, +/// 2, +/// tag("abc"), +/// Vec::new(), +/// |mut acc: Vec<_>, item| { +/// acc.push(item); +/// acc +/// } +/// )(s) +/// } +/// +/// assert_eq!(parser("abcabc"), Ok(("", vec!["abc", "abc"]))); +/// assert_eq!(parser("abc123"), Ok(("123", vec!["abc"]))); +/// assert_eq!(parser("123123"), Ok(("123123", vec![]))); +/// assert_eq!(parser(""), Ok(("", vec![]))); +/// assert_eq!(parser("abcabcabc"), Ok(("abc", vec!["abc", "abc"]))); +/// ``` +pub fn fold_many_m_n(m: usize, n: usize, f: F, init: R, g: G) -> impl Fn(I) ->IResult +where + I: Clone + PartialEq, + F: Fn(I) -> IResult, + G: Fn(R, O) -> R, + E: ParseError, + R: Clone, +{ + move |i: I| { + let mut acc = init.clone(); + let mut input = i.clone(); + for count in 0..n { + let _input = input.clone(); + match f(_input) { + Ok((i, o)) => { + // do not allow parsers that do not consume input (causes infinite loops) + if i == input { + return Err(Err::Error(E::from_error_kind(i, ErrorKind::ManyMN))); + } + + acc = g(acc, o); + input = i; + } + //FInputXMError: handle failure properly + Err(Err::Error(_)) => if count < m { + return Err(Err::Error(E::from_error_kind(i, ErrorKind::ManyMN))); + } else { + break; + } + Err(e) => return Err(e), + } + } + + Ok((input, acc)) + } +} + +#[doc(hidden)] +pub fn fold_many_m_nc(i: I, m: usize, n: usize, f: F, init: R, g: G) -> IResult +where + I: Clone + PartialEq, + F: Fn(I) -> IResult, + G: Fn(R, O) -> R, + E: ParseError, + R: Clone, +{ + fold_many_m_n(m, n, f, init, g)(i) +} + +/// Gets a number from the parser and returns a +/// subslice of the input of that size. +/// If the parser returns Incomplete, +/// length_data will return an error. +/// # Arguments +/// * `f` The parser to apply. +/// ```rust +/// # #[macro_use] extern crate nom; +/// # use nom::{Err, error::ErrorKind, Needed, IResult}; +/// # use nom::Needed::Size; +/// use nom::number::complete::be_u16; +/// use nom::multi::length_data; +/// use nom::bytes::complete::tag; +/// +/// fn parser(s: &[u8]) -> IResult<&[u8], &[u8]> { +/// length_data(be_u16)(s) +/// } +/// +/// assert_eq!(parser(b"\x00\x03abcefg"), Ok((&b"efg"[..], &b"abc"[..]))); +/// assert_eq!(parser(b"\x00\x03"), Err(Err::Incomplete(Size(3)))); +/// ``` +pub fn length_data(f: F) -> impl Fn(I) -> IResult +where + I: Clone + InputLength + InputTake, + N: Copy + ToUsize, + F: Fn(I) -> IResult, + E: ParseError, +{ + move |i: I| { + let (i, length) = f(i)?; + + let length: usize = length.to_usize(); + + if i.input_len() < length { + Err(Err::Incomplete(Needed::Size(length))) + } else { + Ok(i.take_split(length)) + } + } +} + +/// Gets a number from the first parser, +/// takes a subslice of the input of that size, +/// then applies the second parser on that subslice. +/// If the second parser returns Incomplete, +/// length_value will return an error. +/// # Arguments +/// * `f` The parser to apply. +/// ```rust +/// # #[macro_use] extern crate nom; +/// # use nom::{Err, error::ErrorKind, Needed, IResult}; +/// # use nom::Needed::Size; +/// use nom::number::complete::be_u16; +/// use nom::multi::length_value; +/// use nom::bytes::complete::tag; +/// +/// fn parser(s: &[u8]) -> IResult<&[u8], &[u8]> { +/// length_value(be_u16, tag("abc"))(s) +/// } +/// +/// assert_eq!(parser(b"\x00\x03abcefg"), Ok((&b"efg"[..], &b"abc"[..]))); +/// assert_eq!(parser(b"\x00\x03123123"), Err(Err::Error((&b"123"[..], ErrorKind::Tag)))); +/// assert_eq!(parser(b"\x00\x03"), Err(Err::Incomplete(Size(3)))); +/// ``` +pub fn length_value(f: F, g: G) -> impl Fn(I) -> IResult +where + I: Clone + InputLength + InputTake, + N: Copy + ToUsize, + F: Fn(I) -> IResult, + G: Fn(I) -> IResult, + E: ParseError, +{ + move |i: I| { + let (i, length) = f(i)?; + + let length: usize = length.to_usize(); + + if i.input_len() < length { + Err(Err::Incomplete(Needed::Size(length))) + } else { + let (rest, i) = i.take_split(length); + match g(i.clone()) { + Err(Err::Incomplete(_)) => + Err(Err::Error(E::from_error_kind(i, ErrorKind::Complete))), + Err(e) => Err(e), + Ok((_, o)) => Ok((rest,o)), + } + } + } +} + +#[doc(hidden)] +pub fn length_valuec(i: I, f: F, g: G) -> IResult +where + I: Clone + InputLength + InputTake, + N: Copy + ToUsize, + F: Fn(I) -> IResult, + G: Fn(I) -> IResult, + E: ParseError, +{ + length_value(f, g)(i) +} diff --git a/third_party/rust/nom/src/nom.rs b/third_party/rust/nom/src/nom.rs deleted file mode 100644 index 341bbbef0e..0000000000 --- a/third_party/rust/nom/src/nom.rs +++ /dev/null @@ -1,1765 +0,0 @@ -//! Useful parser combinators -//! -//! A number of useful parser combinators have already been implemented. -//! Some of them use macros, other are implemented through functions. -//! Hopefully, the syntax will converge to onely one way in the future, -//! but the macros system makes no promises. -//! -#![allow(unused_imports)] - -#[cfg(feature = "alloc")] -use lib::std::boxed::Box; - -#[cfg(feature = "std")] -use lib::std::fmt::Debug; -use internal::*; -use traits::{AsChar, InputIter, InputLength, InputTakeAtPosition}; -use traits::{need_more, need_more_err, AtEof, ParseTo}; -use lib::std::ops::{Range, RangeFrom, RangeTo}; -use traits::{Compare, CompareResult, Offset, Slice}; -use util::ErrorKind; -use lib::std::mem::transmute; - -#[cfg(feature = "alloc")] -#[inline] -pub fn tag_cl<'a, 'b>(rec: &'a [u8]) -> Box IResult<&'b [u8], &'b [u8]> + 'a> { - Box::new(move |i: &'b [u8]| -> IResult<&'b [u8], &'b [u8]> { - if i.len() >= rec.len() && &i[0..rec.len()] == rec { - Ok((&i[rec.len()..], &i[0..rec.len()])) - } else { - let e: ErrorKind = ErrorKind::TagClosure; - Err(Err::Error(error_position!(i, e))) - } - }) -} - -#[cfg(feature = "std")] -#[inline] -pub fn print(input: T) -> IResult { - println!("{:?}", input); - Ok((input, ())) -} - -#[inline] -pub fn begin(input: &[u8]) -> IResult<(), &[u8]> { - Ok(((), input)) -} - -pub fn crlf(input: T) -> IResult -where - T: Slice> + Slice> + Slice>, - T: InputIter + AtEof, - T: Compare<&'static str>, -{ - match input.compare("\r\n") { - //FIXME: is this the right index? - CompareResult::Ok => Ok((input.slice(2..), input.slice(0..2))), - CompareResult::Incomplete => need_more_err(input, Needed::Size(2), ErrorKind::CrLf), - CompareResult::Error => { - let e: ErrorKind = ErrorKind::CrLf; - Err(Err::Error(error_position!(input, e))) - } - } -} - -// FIXME: when rust-lang/rust#17436 is fixed, macros will be able to export -// public methods -pub fn not_line_ending(input: T) -> IResult -where - T: Slice> + Slice> + Slice>, - T: InputIter + InputLength + AtEof, - T: Compare<&'static str>, - ::Item: AsChar, - ::RawItem: AsChar, -{ - match input.position(|item| { - let c = item.as_char(); - c == '\r' || c == '\n' - }) { - None => { - if input.at_eof() { - Ok((input.slice(input.input_len()..), input)) - } else { - Err(Err::Incomplete(Needed::Unknown)) - } - } - Some(index) => { - let mut it = input.slice(index..).iter_elements(); - let nth = it.next().unwrap().as_char(); - if nth == '\r' { - let sliced = input.slice(index..); - let comp = sliced.compare("\r\n"); - match comp { - //FIXME: calculate the right index - CompareResult::Incomplete => need_more_err(input, Needed::Unknown, ErrorKind::Tag), - CompareResult::Error => { - let e: ErrorKind = ErrorKind::Tag; - Err(Err::Error(error_position!(input, e))) - } - CompareResult::Ok => Ok((input.slice(index..), input.slice(..index))), - } - } else { - Ok((input.slice(index..), input.slice(..index))) - } - } - } -} - -/// Recognizes an end of line (both '\n' and '\r\n') -pub fn line_ending(input: T) -> IResult -where - T: Slice> + Slice> + Slice>, - T: InputIter + InputLength + AtEof, - T: Compare<&'static str>, -{ - match input.compare("\n") { - CompareResult::Ok => Ok((input.slice(1..), input.slice(0..1))), - CompareResult::Incomplete => need_more_err(input, Needed::Size(1), ErrorKind::CrLf::), - CompareResult::Error => { - match input.compare("\r\n") { - //FIXME: is this the right index? - CompareResult::Ok => Ok((input.slice(2..), input.slice(0..2))), - CompareResult::Incomplete => need_more_err(input, Needed::Size(2), ErrorKind::CrLf::), - CompareResult::Error => Err(Err::Error(error_position!(input, ErrorKind::CrLf::))), - } - } - } -} - -pub fn eol(input: T) -> IResult -where - T: Slice> + Slice> + Slice>, - T: InputIter + InputLength + AtEof, - T: Compare<&'static str>, -{ - line_ending(input) -} - -/// Tests if byte is ASCII alphabetic: A-Z, a-z -#[inline] -pub fn is_alphabetic(chr: u8) -> bool { - (chr >= 0x41 && chr <= 0x5A) || (chr >= 0x61 && chr <= 0x7A) -} - -/// Tests if byte is ASCII digit: 0-9 -#[inline] -pub fn is_digit(chr: u8) -> bool { - chr >= 0x30 && chr <= 0x39 -} - -/// Tests if byte is ASCII hex digit: 0-9, A-F, a-f -#[inline] -pub fn is_hex_digit(chr: u8) -> bool { - (chr >= 0x30 && chr <= 0x39) || (chr >= 0x41 && chr <= 0x46) || (chr >= 0x61 && chr <= 0x66) -} - -/// Tests if byte is ASCII octal digit: 0-7 -#[inline] -pub fn is_oct_digit(chr: u8) -> bool { - chr >= 0x30 && chr <= 0x37 -} - -/// Tests if byte is ASCII alphanumeric: A-Z, a-z, 0-9 -#[inline] -pub fn is_alphanumeric(chr: u8) -> bool { - is_alphabetic(chr) || is_digit(chr) -} - -/// Tests if byte is ASCII space or tab -#[inline] -pub fn is_space(chr: u8) -> bool { - chr == b' ' || chr == b'\t' -} - -// FIXME: when rust-lang/rust#17436 is fixed, macros will be able to export -//pub filter!(alpha is_alphabetic) -//pub filter!(digit is_digit) -//pub filter!(hex_digit is_hex_digit) -//pub filter!(oct_digit is_oct_digit) -//pub filter!(alphanumeric is_alphanumeric) - -/// Recognizes one or more lowercase and uppercase alphabetic characters. -/// For ASCII strings: a-zA-Z -/// For UTF8 strings, any alphabetic code point (ie, not only the ASCII ones) -pub fn alpha(input: T) -> IResult -where - T: InputTakeAtPosition, - ::Item: AsChar, -{ - alpha1(input) -} - -/// Recognizes zero or more lowercase and uppercase alphabetic characters. -/// For ASCII strings: a-zA-Z -/// For UTF8 strings, any alphabetic code point (ie, not only the ASCII ones) -pub fn alpha0(input: T) -> IResult -where - T: InputTakeAtPosition, - ::Item: AsChar, -{ - input.split_at_position(|item| !item.is_alpha()) -} - -/// Recognizes one or more lowercase and uppercase alphabetic characters -/// For ASCII strings: a-zA-Z -/// For UTF8 strings, any alphabetic code point (ie, not only the ASCII ones) -pub fn alpha1(input: T) -> IResult -where - T: InputTakeAtPosition, - ::Item: AsChar, -{ - input.split_at_position1(|item| !item.is_alpha(), ErrorKind::Alpha) -} - -/// Recognizes one or more numerical characters: 0-9 -pub fn digit(input: T) -> IResult -where - T: InputTakeAtPosition, - ::Item: AsChar, -{ - digit1(input) -} - -/// Recognizes zero or more numerical characters: 0-9 -pub fn digit0(input: T) -> IResult -where - T: InputTakeAtPosition, - ::Item: AsChar, -{ - input.split_at_position(|item| !item.is_dec_digit()) -} - -/// Recognizes one or more numerical characters: 0-9 -pub fn digit1(input: T) -> IResult -where - T: InputTakeAtPosition, - ::Item: AsChar, -{ - input.split_at_position1(|item| !item.is_dec_digit(), ErrorKind::Digit) -} - -/// Recognizes one or more hexadecimal numerical characters: 0-9, A-F, a-f -pub fn hex_digit(input: T) -> IResult -where - T: InputTakeAtPosition, - ::Item: AsChar, -{ - hex_digit1(input) -} - -/// Recognizes zero or more hexadecimal numerical characters: 0-9, A-F, a-f -pub fn hex_digit0(input: T) -> IResult -where - T: InputTakeAtPosition, - ::Item: AsChar, -{ - input.split_at_position(|item| !item.is_hex_digit()) -} -/// Recognizes one or more hexadecimal numerical characters: 0-9, A-F, a-f -pub fn hex_digit1(input: T) -> IResult -where - T: InputTakeAtPosition, - ::Item: AsChar, -{ - input.split_at_position1(|item| !item.is_hex_digit(), ErrorKind::HexDigit) -} - -/// Recognizes one or more octal characters: 0-7 -pub fn oct_digit(input: T) -> IResult -where - T: InputTakeAtPosition, - ::Item: AsChar, -{ - oct_digit1(input) -} - -/// Recognizes zero or more octal characters: 0-7 -pub fn oct_digit0(input: T) -> IResult -where - T: InputTakeAtPosition, - ::Item: AsChar, -{ - input.split_at_position(|item| !item.is_oct_digit()) -} - -/// Recognizes one or more octal characters: 0-7 -pub fn oct_digit1(input: T) -> IResult -where - T: InputTakeAtPosition, - ::Item: AsChar, -{ - input.split_at_position1(|item| !item.is_oct_digit(), ErrorKind::OctDigit) -} - -/// Recognizes one or more numerical and alphabetic characters -/// For ASCII strings: 0-9a-zA-Z -/// For UTF8 strings, 0-9 and any alphabetic code point (ie, not only the ASCII ones) -pub fn alphanumeric(input: T) -> IResult -where - T: InputTakeAtPosition, - ::Item: AsChar, -{ - alphanumeric1(input) -} - -/// Recognizes zero or more numerical and alphabetic characters. -/// For ASCII strings: 0-9a-zA-Z -/// For UTF8 strings, 0-9 and any alphabetic code point (ie, not only the ASCII ones) -pub fn alphanumeric0(input: T) -> IResult -where - T: InputTakeAtPosition, - ::Item: AsChar, -{ - input.split_at_position(|item| !item.is_alphanum()) -} -/// Recognizes one or more numerical and alphabetic characters. -/// For ASCII strings: 0-9a-zA-Z -/// For UTF8 strings, 0-9 and any alphabetic code point (ie, not only the ASCII ones) -pub fn alphanumeric1(input: T) -> IResult -where - T: InputTakeAtPosition, - ::Item: AsChar, -{ - input.split_at_position1(|item| !item.is_alphanum(), ErrorKind::AlphaNumeric) -} - -/// Recognizes one or more spaces and tabs -pub fn space(input: T) -> IResult -where - T: InputTakeAtPosition, - ::Item: AsChar + Clone, -{ - space1(input) -} - -/// Recognizes zero or more spaces and tabs -pub fn space0(input: T) -> IResult -where - T: InputTakeAtPosition, - ::Item: AsChar + Clone, -{ - input.split_at_position(|item| { - let c = item.clone().as_char(); - !(c == ' ' || c == '\t') - }) -} -/// Recognizes one or more spaces and tabs -pub fn space1(input: T) -> IResult -where - T: InputTakeAtPosition, - ::Item: AsChar + Clone, -{ - input.split_at_position1( - |item| { - let c = item.clone().as_char(); - !(c == ' ' || c == '\t') - }, - ErrorKind::Space, - ) -} - -/// Recognizes one or more spaces, tabs, carriage returns and line feeds -pub fn multispace(input: T) -> IResult -where - T: InputTakeAtPosition, - ::Item: AsChar + Clone, -{ - multispace1(input) -} - -/// Recognizes zero or more spaces, tabs, carriage returns and line feeds -pub fn multispace0(input: T) -> IResult -where - T: InputTakeAtPosition, - ::Item: AsChar + Clone, -{ - input.split_at_position(|item| { - let c = item.clone().as_char(); - !(c == ' ' || c == '\t' || c == '\r' || c == '\n') - }) -} -/// Recognizes one or more spaces, tabs, carriage returns and line feeds -pub fn multispace1(input: T) -> IResult -where - T: InputTakeAtPosition, - ::Item: AsChar + Clone, -{ - input.split_at_position1( - |item| { - let c = item.clone().as_char(); - !(c == ' ' || c == '\t' || c == '\r' || c == '\n') - }, - ErrorKind::MultiSpace, - ) -} - -pub fn sized_buffer(input: &[u8]) -> IResult<&[u8], &[u8]> { - if input.is_empty() { - return need_more(input, Needed::Unknown); - } - - let len = input[0] as usize; - - if input.len() >= len + 1 { - Ok((&input[len + 1..], &input[1..len + 1])) - } else { - need_more(input, Needed::Size(1 + len)) - } -} - -/// Recognizes an unsigned 1 byte integer (equivalent to take!(1) -#[inline] -pub fn be_u8(i: &[u8]) -> IResult<&[u8], u8> { - if i.len() < 1 { - need_more(i, Needed::Size(1)) - } else { - Ok((&i[1..], i[0])) - } -} - -/// Recognizes big endian unsigned 2 bytes integer -#[inline] -pub fn be_u16(i: &[u8]) -> IResult<&[u8], u16> { - if i.len() < 2 { - need_more(i, Needed::Size(2)) - } else { - let res = ((i[0] as u16) << 8) + i[1] as u16; - Ok((&i[2..], res)) - } -} - -/// Recognizes big endian unsigned 3 byte integer -#[inline] -pub fn be_u24(i: &[u8]) -> IResult<&[u8], u32> { - if i.len() < 3 { - need_more(i, Needed::Size(3)) - } else { - let res = ((i[0] as u32) << 16) + ((i[1] as u32) << 8) + (i[2] as u32); - Ok((&i[3..], res)) - } -} - -/// Recognizes big endian unsigned 4 bytes integer -#[inline] -pub fn be_u32(i: &[u8]) -> IResult<&[u8], u32> { - if i.len() < 4 { - need_more(i, Needed::Size(4)) - } else { - let res = ((i[0] as u32) << 24) + ((i[1] as u32) << 16) + ((i[2] as u32) << 8) + i[3] as u32; - Ok((&i[4..], res)) - } -} - -/// Recognizes big endian unsigned 8 bytes integer -#[inline] -pub fn be_u64(i: &[u8]) -> IResult<&[u8], u64, u32> { - if i.len() < 8 { - need_more(i, Needed::Size(8)) - } else { - let res = ((i[0] as u64) << 56) + ((i[1] as u64) << 48) + ((i[2] as u64) << 40) + ((i[3] as u64) << 32) + ((i[4] as u64) << 24) - + ((i[5] as u64) << 16) + ((i[6] as u64) << 8) + i[7] as u64; - Ok((&i[8..], res)) - } -} - -/// Recognizes big endian unsigned 16 bytes integer -#[inline] -pub fn be_u128(i: &[u8]) -> IResult<&[u8], u128, u32> { - if i.len() < 16 { - need_more(i, Needed::Size(16)) - } else { - let res = ((i[0] as u128) << 120) - + ((i[1] as u128) << 112) - + ((i[2] as u128) << 104) - + ((i[3] as u128) << 96) - + ((i[4] as u128) << 88) - + ((i[5] as u128) << 80) - + ((i[6] as u128) << 72) - + ((i[7] as u128) << 64) - + ((i[8] as u128) << 56) - + ((i[9] as u128) << 48) - + ((i[10] as u128) << 40) - + ((i[11] as u128) << 32) - + ((i[12] as u128) << 24) - + ((i[13] as u128) << 16) - + ((i[14] as u128) << 8) - + i[15] as u128; - Ok((&i[16..], res)) - } -} - -/// Recognizes a signed 1 byte integer (equivalent to take!(1) -#[inline] -pub fn be_i8(i: &[u8]) -> IResult<&[u8], i8> { - map!(i, be_u8, |x| x as i8) -} - -/// Recognizes big endian signed 2 bytes integer -#[inline] -pub fn be_i16(i: &[u8]) -> IResult<&[u8], i16> { - map!(i, be_u16, |x| x as i16) -} - -/// Recognizes big endian signed 3 bytes integer -#[inline] -pub fn be_i24(i: &[u8]) -> IResult<&[u8], i32> { - // Same as the unsigned version but we need to sign-extend manually here - map!(i, be_u24, |x| if x & 0x80_00_00 != 0 { - (x | 0xff_00_00_00) as i32 - } else { - x as i32 - }) -} - -/// Recognizes big endian signed 4 bytes integer -#[inline] -pub fn be_i32(i: &[u8]) -> IResult<&[u8], i32> { - map!(i, be_u32, |x| x as i32) -} - -/// Recognizes big endian signed 8 bytes integer -#[inline] -pub fn be_i64(i: &[u8]) -> IResult<&[u8], i64> { - map!(i, be_u64, |x| x as i64) -} - -/// Recognizes big endian signed 16 bytes integer -#[inline] -pub fn be_i128(i: &[u8]) -> IResult<&[u8], i128> { - map!(i, be_u128, |x| x as i128) -} - -/// Recognizes an unsigned 1 byte integer (equivalent to take!(1) -#[inline] -pub fn le_u8(i: &[u8]) -> IResult<&[u8], u8> { - if i.len() < 1 { - need_more(i, Needed::Size(1)) - } else { - Ok((&i[1..], i[0])) - } -} - -/// Recognizes little endian unsigned 2 bytes integer -#[inline] -pub fn le_u16(i: &[u8]) -> IResult<&[u8], u16> { - if i.len() < 2 { - need_more(i, Needed::Size(2)) - } else { - let res = ((i[1] as u16) << 8) + i[0] as u16; - Ok((&i[2..], res)) - } -} - -/// Recognizes little endian unsigned 3 byte integer -#[inline] -pub fn le_u24(i: &[u8]) -> IResult<&[u8], u32> { - if i.len() < 3 { - need_more(i, Needed::Size(3)) - } else { - let res = (i[0] as u32) + ((i[1] as u32) << 8) + ((i[2] as u32) << 16); - Ok((&i[3..], res)) - } -} - -/// Recognizes little endian unsigned 4 bytes integer -#[inline] -pub fn le_u32(i: &[u8]) -> IResult<&[u8], u32> { - if i.len() < 4 { - need_more(i, Needed::Size(4)) - } else { - let res = ((i[3] as u32) << 24) + ((i[2] as u32) << 16) + ((i[1] as u32) << 8) + i[0] as u32; - Ok((&i[4..], res)) - } -} - -/// Recognizes little endian unsigned 8 bytes integer -#[inline] -pub fn le_u64(i: &[u8]) -> IResult<&[u8], u64> { - if i.len() < 8 { - need_more(i, Needed::Size(8)) - } else { - let res = ((i[7] as u64) << 56) + ((i[6] as u64) << 48) + ((i[5] as u64) << 40) + ((i[4] as u64) << 32) + ((i[3] as u64) << 24) - + ((i[2] as u64) << 16) + ((i[1] as u64) << 8) + i[0] as u64; - Ok((&i[8..], res)) - } -} - -/// Recognizes little endian unsigned 16 bytes integer -#[inline] -pub fn le_u128(i: &[u8]) -> IResult<&[u8], u128, u32> { - if i.len() < 16 { - need_more(i, Needed::Size(16)) - } else { - let res = ((i[15] as u128) << 120) - + ((i[14] as u128) << 112) - + ((i[13] as u128) << 104) - + ((i[12] as u128) << 96) - + ((i[11] as u128) << 88) - + ((i[10] as u128) << 80) - + ((i[9] as u128) << 72) - + ((i[8] as u128) << 64) - + ((i[7] as u128) << 56) - + ((i[6] as u128) << 48) - + ((i[5] as u128) << 40) - + ((i[4] as u128) << 32) - + ((i[3] as u128) << 24) - + ((i[2] as u128) << 16) - + ((i[1] as u128) << 8) - + i[0] as u128; - Ok((&i[16..], res)) - } -} - -/// Recognizes a signed 1 byte integer (equivalent to take!(1) -#[inline] -pub fn le_i8(i: &[u8]) -> IResult<&[u8], i8> { - map!(i, le_u8, |x| x as i8) -} - -/// Recognizes little endian signed 2 bytes integer -#[inline] -pub fn le_i16(i: &[u8]) -> IResult<&[u8], i16> { - map!(i, le_u16, |x| x as i16) -} - -/// Recognizes little endian signed 3 bytes integer -#[inline] -pub fn le_i24(i: &[u8]) -> IResult<&[u8], i32> { - // Same as the unsigned version but we need to sign-extend manually here - map!(i, le_u24, |x| if x & 0x80_00_00 != 0 { - (x | 0xff_00_00_00) as i32 - } else { - x as i32 - }) -} - -/// Recognizes little endian signed 4 bytes integer -#[inline] -pub fn le_i32(i: &[u8]) -> IResult<&[u8], i32> { - map!(i, le_u32, |x| x as i32) -} - -/// Recognizes little endian signed 8 bytes integer -#[inline] -pub fn le_i64(i: &[u8]) -> IResult<&[u8], i64> { - map!(i, le_u64, |x| x as i64) -} - -/// Recognizes little endian signed 16 bytes integer -#[inline] -pub fn le_i128(i: &[u8]) -> IResult<&[u8], i128> { - map!(i, le_u128, |x| x as i128) -} - -/// Configurable endianness -#[derive(Debug, PartialEq, Eq, Clone, Copy)] -pub enum Endianness { - Big, - Little, -} - -/// if the parameter is nom::Endianness::Big, parse a big endian u16 integer, -/// otherwise a little endian u16 integer -#[macro_export] -macro_rules! u16 ( ($i:expr, $e:expr) => ( {if $crate::Endianness::Big == $e { $crate::be_u16($i) } else { $crate::le_u16($i) } } );); -/// if the parameter is nom::Endianness::Big, parse a big endian u32 integer, -/// otherwise a little endian u32 integer -#[macro_export] -macro_rules! u32 ( ($i:expr, $e:expr) => ( {if $crate::Endianness::Big == $e { $crate::be_u32($i) } else { $crate::le_u32($i) } } );); -/// if the parameter is nom::Endianness::Big, parse a big endian u64 integer, -/// otherwise a little endian u64 integer -#[macro_export] -macro_rules! u64 ( ($i:expr, $e:expr) => ( {if $crate::Endianness::Big == $e { $crate::be_u64($i) } else { $crate::le_u64($i) } } );); -/// if the parameter is nom::Endianness::Big, parse a big endian u128 integer, -/// otherwise a little endian u128 integer -#[macro_export] -macro_rules! u128 ( ($i:expr, $e:expr) => ( {if $crate::Endianness::Big == $e { $crate::be_u128($i) } else { $crate::le_u128($i) } } );); - -/// if the parameter is nom::Endianness::Big, parse a big endian i16 integer, -/// otherwise a little endian i16 integer -#[macro_export] -macro_rules! i16 ( ($i:expr, $e:expr) => ( {if $crate::Endianness::Big == $e { $crate::be_i16($i) } else { $crate::le_i16($i) } } );); -/// if the parameter is nom::Endianness::Big, parse a big endian i32 integer, -/// otherwise a little endian i32 integer -#[macro_export] -macro_rules! i32 ( ($i:expr, $e:expr) => ( {if $crate::Endianness::Big == $e { $crate::be_i32($i) } else { $crate::le_i32($i) } } );); -/// if the parameter is nom::Endianness::Big, parse a big endian i64 integer, -/// otherwise a little endian i64 integer -#[macro_export] -macro_rules! i64 ( ($i:expr, $e:expr) => ( {if $crate::Endianness::Big == $e { $crate::be_i64($i) } else { $crate::le_i64($i) } } );); -/// if the parameter is nom::Endianness::Big, parse a big endian i64 integer, -/// otherwise a little endian i64 integer -#[macro_export] -macro_rules! i128 ( ($i:expr, $e:expr) => ( {if $crate::Endianness::Big == $e { $crate::be_i128($i) } else { $crate::le_i128($i) } } );); - -/// Recognizes big endian 4 bytes floating point number -#[inline] -pub fn be_f32(input: &[u8]) -> IResult<&[u8], f32> { - match be_u32(input) { - Err(e) => Err(e), - Ok((i, o)) => unsafe { Ok((i, transmute::(o))) }, - } -} - -/// Recognizes big endian 8 bytes floating point number -#[inline] -pub fn be_f64(input: &[u8]) -> IResult<&[u8], f64> { - match be_u64(input) { - Err(e) => Err(e), - Ok((i, o)) => unsafe { Ok((i, transmute::(o))) }, - } -} - -/// Recognizes little endian 4 bytes floating point number -#[inline] -pub fn le_f32(input: &[u8]) -> IResult<&[u8], f32> { - match le_u32(input) { - Err(e) => Err(e), - Ok((i, o)) => unsafe { Ok((i, transmute::(o))) }, - } -} - -/// Recognizes little endian 8 bytes floating point number -#[inline] -pub fn le_f64(input: &[u8]) -> IResult<&[u8], f64> { - match le_u64(input) { - Err(e) => Err(e), - Ok((i, o)) => unsafe { Ok((i, transmute::(o))) }, - } -} - -/// Recognizes a hex-encoded integer -#[inline] -pub fn hex_u32(input: &[u8]) -> IResult<&[u8], u32> { - match is_a!(input, &b"0123456789abcdefABCDEF"[..]) { - Err(e) => Err(e), - Ok((i, o)) => { - // Do not parse more than 8 characters for a u32 - let (parsed, remaining) = if o.len() <= 8 { - (o, i) - } else { - (&input[..8], &input[8..]) - }; - - let res = parsed - .iter() - .rev() - .enumerate() - .map(|(k, &v)| { - let digit = v as char; - digit.to_digit(16).unwrap_or(0) << (k * 4) - }) - .sum(); - - Ok((remaining, res)) - } - } -} - -/// Recognizes non empty buffers -#[inline] -pub fn non_empty(input: T) -> IResult -where - T: Slice> + Slice> + Slice>, - T: InputLength + AtEof, -{ - if input.input_len() == 0 { - return need_more_err(input, Needed::Unknown, ErrorKind::NonEmpty::); - } else { - Ok((input.slice(input.input_len()..), input)) - } -} - -/// Return the remaining input. -#[inline] -pub fn rest(input: T) -> IResult -where - T: Slice> + Slice> + Slice>, - T: InputLength, -{ - Ok((input.slice(input.input_len()..), input)) -} - -/// Return the length of the remaining input. -#[inline] -pub fn rest_len(input: T) -> IResult -where - T: Slice> + Slice> + Slice>, - T: InputLength, -{ - let len = input.input_len(); - Ok((input, len)) -} - -/// Return the remaining input, for strings. -#[inline] -pub fn rest_s(input: &str) -> IResult<&str, &str> { - Ok((&input[input.len()..], input)) -} - -#[allow(unused_imports)] -#[cfg_attr(rustfmt, rustfmt_skip)] -pub fn recognize_float(input: T) -> IResult -where - T: Slice> + Slice> + Slice>, - T: Clone + Offset, - T: InputIter + AtEof, - ::Item: AsChar, - T: InputTakeAtPosition, - ::Item: AsChar -{ - recognize!(input, - tuple!( - opt!(alt!(char!('+') | char!('-'))), - alt!( - value!((), tuple!(digit, opt!(pair!(char!('.'), opt!(digit))))) - | value!((), tuple!(char!('.'), digit)) - ), - opt!(tuple!( - alt!(char!('e') | char!('E')), - opt!(alt!(char!('+') | char!('-'))), - digit - ) - ) - ) - ) -} - -/// Recognizes floating point number in a byte string and returns a f32 -#[cfg(feature = "alloc")] -//pub fn float(input: &[u8]) -> IResult<&[u8], f32> { -pub fn float(input: T) -> IResult -where - T: Slice> + Slice> + Slice>, - T: Clone + Offset, - T: InputIter + InputLength + ParseTo + AtEof, - ::Item: AsChar, - T: InputTakeAtPosition, - ::Item: AsChar -{ - flat_map!(input, recognize_float, parse_to!(f32)) -} - -/// Recognizes floating point number in a string and returns a f32 -#[cfg(feature = "alloc")] -#[deprecated(since = "4.1.0", note = "Please use `float` instead")] -pub fn float_s(input: T) -> IResult -where - T: Slice> + Slice> + Slice>, - T: Clone + Offset, - T: InputIter + InputLength + ParseTo + AtEof, - ::Item: AsChar, - T: InputTakeAtPosition, - ::Item: AsChar -{ - flat_map!(input, call!(recognize_float), parse_to!(f32)) -} - -/// Recognizes floating point number in a byte string and returns a f64 -#[cfg(feature = "alloc")] -pub fn double(input: T) -> IResult -where - T: Slice> + Slice> + Slice>, - T: Clone + Offset, - T: InputIter + InputLength + ParseTo + AtEof, - ::Item: AsChar, - T: InputTakeAtPosition, - ::Item: AsChar -{ - flat_map!(input, call!(recognize_float), parse_to!(f64)) -} - -/// Recognizes floating point number in a string and returns a f64 -#[cfg(feature = "alloc")] -#[deprecated(since = "4.1.0", note = "Please use `double` instead")] -pub fn double_s(input: T) -> IResult -where - T: Slice> + Slice> + Slice>, - T: Clone + Offset, - T: InputIter + InputLength + ParseTo + AtEof, - ::Item: AsChar, - T: InputTakeAtPosition, - ::Item: AsChar -{ - flat_map!(input, call!(recognize_float), parse_to!(f64)) -} - -#[cfg(test)] -mod tests { - use super::*; - use internal::{Err, IResult, Needed}; - use types::{CompleteByteSlice, CompleteStr}; - - #[test] - #[cfg(feature = "alloc")] - fn tag_closure() { - let x = tag_cl(&b"abcd"[..]); - let r = x(&b"abcdabcdefgh"[..]); - assert_eq!(r, Ok((&b"abcdefgh"[..], &b"abcd"[..]))); - - let r2 = x(&b"abcefgh"[..]); - assert_eq!( - r2, - Err(Err::Error(error_position!( - &b"abcefgh"[..], - ErrorKind::TagClosure - ),)) - ); - } - - #[test] - fn character() { - let empty: &[u8] = b""; - let a: &[u8] = b"abcd"; - let b: &[u8] = b"1234"; - let c: &[u8] = b"a123"; - let d: &[u8] = "azé12".as_bytes(); - let e: &[u8] = b" "; - let f: &[u8] = b" ;"; - assert_eq!(alpha(a), Err(Err::Incomplete(Needed::Size(1)))); - assert_eq!( - alpha(CompleteByteSlice(a)), - Ok((CompleteByteSlice(empty), CompleteByteSlice(a))) - ); - assert_eq!( - alpha(b), - Err(Err::Error(error_position!(b, ErrorKind::Alpha))) - ); - assert_eq!(alpha(c), Ok((&c[1..], &b"a"[..]))); - assert_eq!(alpha(d), Ok(("é12".as_bytes(), &b"az"[..]))); - assert_eq!( - digit(a), - Err(Err::Error(error_position!(a, ErrorKind::Digit))) - ); - assert_eq!(digit(b), Err(Err::Incomplete(Needed::Size(1)))); - assert_eq!( - digit(CompleteByteSlice(b)), - Ok((CompleteByteSlice(empty), CompleteByteSlice(b))) - ); - assert_eq!( - digit(c), - Err(Err::Error(error_position!(c, ErrorKind::Digit))) - ); - assert_eq!( - digit(d), - Err(Err::Error(error_position!(d, ErrorKind::Digit))) - ); - assert_eq!(hex_digit(a), Err(Err::Incomplete(Needed::Size(1)))); - assert_eq!( - hex_digit(CompleteByteSlice(a)), - Ok((CompleteByteSlice(empty), CompleteByteSlice(a))) - ); - assert_eq!(hex_digit(b), Err(Err::Incomplete(Needed::Size(1)))); - assert_eq!( - hex_digit(CompleteByteSlice(b)), - Ok((CompleteByteSlice(empty), CompleteByteSlice(b))) - ); - assert_eq!(hex_digit(c), Err(Err::Incomplete(Needed::Size(1)))); - assert_eq!( - hex_digit(CompleteByteSlice(c)), - Ok((CompleteByteSlice(empty), CompleteByteSlice(c))) - ); - assert_eq!(hex_digit(d), Ok(("zé12".as_bytes(), &b"a"[..]))); - assert_eq!( - hex_digit(e), - Err(Err::Error(error_position!(e, ErrorKind::HexDigit))) - ); - assert_eq!( - oct_digit(a), - Err(Err::Error(error_position!(a, ErrorKind::OctDigit))) - ); - assert_eq!(oct_digit(b), Err(Err::Incomplete(Needed::Size(1)))); - assert_eq!( - oct_digit(CompleteByteSlice(b)), - Ok((CompleteByteSlice(empty), CompleteByteSlice(b))) - ); - assert_eq!( - oct_digit(c), - Err(Err::Error(error_position!(c, ErrorKind::OctDigit))) - ); - assert_eq!( - oct_digit(d), - Err(Err::Error(error_position!(d, ErrorKind::OctDigit))) - ); - assert_eq!(alphanumeric(a), Err(Err::Incomplete(Needed::Size(1)))); - assert_eq!( - alphanumeric(CompleteByteSlice(a)), - Ok((CompleteByteSlice(empty), CompleteByteSlice(a))) - ); - //assert_eq!(fix_error!(b,(), alphanumeric), Ok((empty, b))); - assert_eq!(alphanumeric(c), Err(Err::Incomplete(Needed::Size(1)))); - assert_eq!( - alphanumeric(CompleteByteSlice(c)), - Ok((CompleteByteSlice(empty), CompleteByteSlice(c))) - ); - assert_eq!(alphanumeric(d), Ok(("é12".as_bytes(), &b"az"[..]))); - assert_eq!(space(e), Err(Err::Incomplete(Needed::Size(1)))); - assert_eq!( - space(CompleteByteSlice(e)), - Ok((CompleteByteSlice(empty), CompleteByteSlice(b" "))) - ); - assert_eq!(space(f), Ok((&b";"[..], &b" "[..]))); - assert_eq!( - space(CompleteByteSlice(f)), - Ok((CompleteByteSlice(b";"), CompleteByteSlice(b" "))) - ); - } - - #[cfg(feature = "alloc")] - #[test] - fn character_s() { - let empty = ""; - let a = "abcd"; - let b = "1234"; - let c = "a123"; - let d = "azé12"; - let e = " "; - assert_eq!(alpha(a), Err(Err::Incomplete(Needed::Size(1)))); - assert_eq!( - alpha(CompleteStr(a)), - Ok((CompleteStr(empty), CompleteStr(a))) - ); - assert_eq!( - alpha(b), - Err(Err::Error(error_position!(b, ErrorKind::Alpha))) - ); - assert_eq!(alpha(c), Ok((&c[1..], &"a"[..]))); - assert_eq!(alpha(d), Ok(("12", &"azé"[..]))); - assert_eq!( - digit(a), - Err(Err::Error(error_position!(a, ErrorKind::Digit))) - ); - assert_eq!(digit(b), Err(Err::Incomplete(Needed::Size(1)))); - assert_eq!( - digit(CompleteStr(b)), - Ok((CompleteStr(empty), CompleteStr(b))) - ); - assert_eq!( - digit(c), - Err(Err::Error(error_position!(c, ErrorKind::Digit))) - ); - assert_eq!( - digit(d), - Err(Err::Error(error_position!(d, ErrorKind::Digit))) - ); - assert_eq!(hex_digit(a), Err(Err::Incomplete(Needed::Size(1)))); - assert_eq!( - hex_digit(CompleteStr(a)), - Ok((CompleteStr(empty), CompleteStr(a))) - ); - assert_eq!(hex_digit(b), Err(Err::Incomplete(Needed::Size(1)))); - assert_eq!( - hex_digit(CompleteStr(b)), - Ok((CompleteStr(empty), CompleteStr(b))) - ); - assert_eq!(hex_digit(c), Err(Err::Incomplete(Needed::Size(1)))); - assert_eq!( - hex_digit(CompleteStr(c)), - Ok((CompleteStr(empty), CompleteStr(c))) - ); - assert_eq!(hex_digit(d), Ok(("zé12", &"a"[..]))); - assert_eq!( - hex_digit(e), - Err(Err::Error(error_position!(e, ErrorKind::HexDigit))) - ); - assert_eq!( - oct_digit(a), - Err(Err::Error(error_position!(a, ErrorKind::OctDigit))) - ); - assert_eq!(oct_digit(b), Err(Err::Incomplete(Needed::Size(1)))); - assert_eq!( - oct_digit(CompleteStr(b)), - Ok((CompleteStr(empty), CompleteStr(b))) - ); - assert_eq!( - oct_digit(c), - Err(Err::Error(error_position!(c, ErrorKind::OctDigit))) - ); - assert_eq!( - oct_digit(d), - Err(Err::Error(error_position!(d, ErrorKind::OctDigit))) - ); - assert_eq!(alphanumeric(a), Err(Err::Incomplete(Needed::Size(1)))); - assert_eq!( - alphanumeric(CompleteStr(a)), - Ok((CompleteStr(empty), CompleteStr(a))) - ); - //assert_eq!(fix_error!(b,(), alphanumeric), Ok((empty, b))); - assert_eq!(alphanumeric(c), Err(Err::Incomplete(Needed::Size(1)))); - assert_eq!( - alphanumeric(CompleteStr(c)), - Ok((CompleteStr(empty), CompleteStr(c))) - ); - assert_eq!(alphanumeric(d), Err(Err::Incomplete(Needed::Size(1)))); - assert_eq!( - alphanumeric(CompleteStr(d)), - Ok((CompleteStr(""), CompleteStr("azé12"))) - ); - assert_eq!(space(e), Err(Err::Incomplete(Needed::Size(1)))); - } - - use traits::Offset; - #[test] - fn offset() { - let a = &b"abcd;"[..]; - let b = &b"1234;"[..]; - let c = &b"a123;"[..]; - let d = &b" \t;"[..]; - let e = &b" \t\r\n;"[..]; - let f = &b"123abcDEF;"[..]; - - match alpha(a) { - Ok((i, _)) => { - assert_eq!(a.offset(i) + i.len(), a.len()); - } - _ => panic!("wrong return type in offset test for alpha"), - } - match digit(b) { - Ok((i, _)) => { - assert_eq!(b.offset(i) + i.len(), b.len()); - } - _ => panic!("wrong return type in offset test for digit"), - } - match alphanumeric(c) { - Ok((i, _)) => { - assert_eq!(c.offset(i) + i.len(), c.len()); - } - _ => panic!("wrong return type in offset test for alphanumeric"), - } - match space(d) { - Ok((i, _)) => { - assert_eq!(d.offset(i) + i.len(), d.len()); - } - _ => panic!("wrong return type in offset test for space"), - } - match multispace(e) { - Ok((i, _)) => { - assert_eq!(e.offset(i) + i.len(), e.len()); - } - _ => panic!("wrong return type in offset test for multispace"), - } - match hex_digit(f) { - Ok((i, _)) => { - assert_eq!(f.offset(i) + i.len(), f.len()); - } - _ => panic!("wrong return type in offset test for hex_digit"), - } - match oct_digit(f) { - Ok((i, _)) => { - assert_eq!(f.offset(i) + i.len(), f.len()); - } - _ => panic!("wrong return type in offset test for oct_digit"), - } - } - - #[test] - fn is_not_line_ending_bytes() { - let a: &[u8] = b"ab12cd\nefgh"; - assert_eq!(not_line_ending(a), Ok((&b"\nefgh"[..], &b"ab12cd"[..]))); - - let b: &[u8] = b"ab12cd\nefgh\nijkl"; - assert_eq!( - not_line_ending(b), - Ok((&b"\nefgh\nijkl"[..], &b"ab12cd"[..])) - ); - - let c: &[u8] = b"ab12cd\r\nefgh\nijkl"; - assert_eq!( - not_line_ending(c), - Ok((&b"\r\nefgh\nijkl"[..], &b"ab12cd"[..])) - ); - - let d = CompleteByteSlice(b"ab12cd"); - assert_eq!(not_line_ending(d), Ok((CompleteByteSlice(b""), d))); - - let d: &[u8] = b"ab12cd"; - assert_eq!(not_line_ending(d), Err(Err::Incomplete(Needed::Unknown))); - } - - #[test] - fn is_not_line_ending_str() { - /* - let a: &str = "ab12cd\nefgh"; - assert_eq!(not_line_ending(a), Ok((&"\nefgh"[..], &"ab12cd"[..]))); - - let b: &str = "ab12cd\nefgh\nijkl"; - assert_eq!(not_line_ending(b), Ok((&"\nefgh\nijkl"[..], &"ab12cd"[..]))); - - let c: &str = "ab12cd\r\nefgh\nijkl"; - assert_eq!(not_line_ending(c), Ok((&"\r\nefgh\nijkl"[..], &"ab12cd"[..]))); - - let d = "βèƒôřè\nÂßÇáƒƭèř"; - assert_eq!(not_line_ending(d), Ok((&"\nÂßÇáƒƭèř"[..], &"βèƒôřè"[..]))); - - let e = "βèƒôřè\r\nÂßÇáƒƭèř"; - assert_eq!(not_line_ending(e), Ok((&"\r\nÂßÇáƒƭèř"[..], &"βèƒôřè"[..]))); - */ - - let f = "βèƒôřè\rÂßÇáƒƭèř"; - assert_eq!( - not_line_ending(f), - Err(Err::Error(error_position!(f, ErrorKind::Tag))) - ); - - let g = CompleteStr("ab12cd"); - assert_eq!(not_line_ending(g), Ok((CompleteStr(""), g))); - - let g2: &str = "ab12cd"; - assert_eq!(not_line_ending(g2), Err(Err::Incomplete(Needed::Unknown))); - } - - #[test] - #[cfg(feature = "alloc")] - fn buffer_with_size() { - use lib::std::vec::Vec; - let i: Vec = vec![7, 8]; - let o: Vec = vec![4, 5, 6]; - //let arr:[u8; 6usize] = [3, 4, 5, 6, 7, 8]; - let arr: [u8; 6usize] = [3, 4, 5, 6, 7, 8]; - let res = sized_buffer(&arr[..]); - assert_eq!(res, Ok((&i[..], &o[..]))) - } - - /*#[test] - fn t1() { - let v1:Vec = vec![1,2,3]; - let v2:Vec = vec![4,5,6]; - let d = Ok((&v1[..], &v2[..])); - let res = d.flat_map(print); - assert_eq!(res, Ok((&v2[..], ()))); - }*/ - - #[test] - fn i8_tests() { - assert_eq!(be_i8(&[0x00]), Ok((&b""[..], 0))); - assert_eq!(be_i8(&[0x7f]), Ok((&b""[..], 127))); - assert_eq!(be_i8(&[0xff]), Ok((&b""[..], -1))); - assert_eq!(be_i8(&[0x80]), Ok((&b""[..], -128))); - } - - #[test] - fn i16_tests() { - assert_eq!(be_i16(&[0x00, 0x00]), Ok((&b""[..], 0))); - assert_eq!(be_i16(&[0x7f, 0xff]), Ok((&b""[..], 32_767_i16))); - assert_eq!(be_i16(&[0xff, 0xff]), Ok((&b""[..], -1))); - assert_eq!(be_i16(&[0x80, 0x00]), Ok((&b""[..], -32_768_i16))); - } - - #[test] - fn u24_tests() { - assert_eq!(be_u24(&[0x00, 0x00, 0x00]), Ok((&b""[..], 0))); - assert_eq!(be_u24(&[0x00, 0xFF, 0xFF]), Ok((&b""[..], 65_535_u32))); - assert_eq!(be_u24(&[0x12, 0x34, 0x56]), Ok((&b""[..], 1_193_046_u32))); - } - - #[test] - fn i24_tests() { - assert_eq!(be_i24(&[0xFF, 0xFF, 0xFF]), Ok((&b""[..], -1_i32))); - assert_eq!(be_i24(&[0xFF, 0x00, 0x00]), Ok((&b""[..], -65_536_i32))); - assert_eq!(be_i24(&[0xED, 0xCB, 0xAA]), Ok((&b""[..], -1_193_046_i32))); - } - - #[test] - fn i32_tests() { - assert_eq!(be_i32(&[0x00, 0x00, 0x00, 0x00]), Ok((&b""[..], 0))); - assert_eq!( - be_i32(&[0x7f, 0xff, 0xff, 0xff]), - Ok((&b""[..], 2_147_483_647_i32)) - ); - assert_eq!(be_i32(&[0xff, 0xff, 0xff, 0xff]), Ok((&b""[..], -1))); - assert_eq!( - be_i32(&[0x80, 0x00, 0x00, 0x00]), - Ok((&b""[..], -2_147_483_648_i32)) - ); - } - - #[test] - fn i64_tests() { - assert_eq!( - be_i64(&[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00]), - Ok((&b""[..], 0)) - ); - assert_eq!( - be_i64(&[0x7f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff]), - Ok((&b""[..], 9_223_372_036_854_775_807_i64)) - ); - assert_eq!( - be_i64(&[0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff]), - Ok((&b""[..], -1)) - ); - assert_eq!( - be_i64(&[0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00]), - Ok((&b""[..], -9_223_372_036_854_775_808_i64)) - ); - } - - #[test] - fn i128_tests() { - assert_eq!( - be_i128(&[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00]), - Ok((&b""[..], 0)) - ); - assert_eq!( - be_i128(&[0x7f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff]), - Ok((&b""[..], 170_141_183_460_469_231_731_687_303_715_884_105_727_i128)) - ); - assert_eq!( - be_i128(&[0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff]), - Ok((&b""[..], -1)) - ); - assert_eq!( - be_i128(&[0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00]), - Ok((&b""[..], -170_141_183_460_469_231_731_687_303_715_884_105_728_i128)) - ); - } - - #[test] - fn le_i8_tests() { - assert_eq!(le_i8(&[0x00]), Ok((&b""[..], 0))); - assert_eq!(le_i8(&[0x7f]), Ok((&b""[..], 127))); - assert_eq!(le_i8(&[0xff]), Ok((&b""[..], -1))); - assert_eq!(le_i8(&[0x80]), Ok((&b""[..], -128))); - } - - #[test] - fn le_i16_tests() { - assert_eq!(le_i16(&[0x00, 0x00]), Ok((&b""[..], 0))); - assert_eq!(le_i16(&[0xff, 0x7f]), Ok((&b""[..], 32_767_i16))); - assert_eq!(le_i16(&[0xff, 0xff]), Ok((&b""[..], -1))); - assert_eq!(le_i16(&[0x00, 0x80]), Ok((&b""[..], -32_768_i16))); - } - - #[test] - fn le_u24_tests() { - assert_eq!(le_u24(&[0x00, 0x00, 0x00]), Ok((&b""[..], 0))); - assert_eq!(le_u24(&[0xFF, 0xFF, 0x00]), Ok((&b""[..], 65_535_u32))); - assert_eq!(le_u24(&[0x56, 0x34, 0x12]), Ok((&b""[..], 1_193_046_u32))); - } - - #[test] - fn le_i24_tests() { - assert_eq!(le_i24(&[0xFF, 0xFF, 0xFF]), Ok((&b""[..], -1_i32))); - assert_eq!(le_i24(&[0x00, 0x00, 0xFF]), Ok((&b""[..], -65_536_i32))); - assert_eq!(le_i24(&[0xAA, 0xCB, 0xED]), Ok((&b""[..], -1_193_046_i32))); - } - - #[test] - fn le_i32_tests() { - assert_eq!(le_i32(&[0x00, 0x00, 0x00, 0x00]), Ok((&b""[..], 0))); - assert_eq!( - le_i32(&[0xff, 0xff, 0xff, 0x7f]), - Ok((&b""[..], 2_147_483_647_i32)) - ); - assert_eq!(le_i32(&[0xff, 0xff, 0xff, 0xff]), Ok((&b""[..], -1))); - assert_eq!( - le_i32(&[0x00, 0x00, 0x00, 0x80]), - Ok((&b""[..], -2_147_483_648_i32)) - ); - } - - #[test] - fn le_i64_tests() { - assert_eq!( - le_i64(&[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00]), - Ok((&b""[..], 0)) - ); - assert_eq!( - le_i64(&[0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f]), - Ok((&b""[..], 9_223_372_036_854_775_807_i64)) - ); - assert_eq!( - le_i64(&[0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff]), - Ok((&b""[..], -1)) - ); - assert_eq!( - le_i64(&[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80]), - Ok((&b""[..], -9_223_372_036_854_775_808_i64)) - ); - } - - #[test] - fn le_i128_tests() { - assert_eq!( - le_i128(&[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00]), - Ok((&b""[..], 0)) - ); - assert_eq!( - le_i128(&[0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f]), - Ok((&b""[..], 170_141_183_460_469_231_731_687_303_715_884_105_727_i128)) - ); - assert_eq!( - le_i128(&[0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff]), - Ok((&b""[..], -1)) - ); - assert_eq!( - le_i128(&[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80]), - Ok((&b""[..], -170_141_183_460_469_231_731_687_303_715_884_105_728_i128)) - ); - } - - #[test] - fn be_f32_tests() { - assert_eq!(be_f32(&[0x00, 0x00, 0x00, 0x00]), Ok((&b""[..], 0_f32))); - assert_eq!( - be_f32(&[0x4d, 0x31, 0x1f, 0xd8]), - Ok((&b""[..], 185_728_392_f32)) - ); - } - - #[test] - fn be_f64_tests() { - assert_eq!( - be_f64(&[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00]), - Ok((&b""[..], 0_f64)) - ); - assert_eq!( - be_f64(&[0x41, 0xa6, 0x23, 0xfb, 0x10, 0x00, 0x00, 0x00]), - Ok((&b""[..], 185_728_392_f64)) - ); - } - - #[test] - fn le_f32_tests() { - assert_eq!(le_f32(&[0x00, 0x00, 0x00, 0x00]), Ok((&b""[..], 0_f32))); - assert_eq!( - le_f32(&[0xd8, 0x1f, 0x31, 0x4d]), - Ok((&b""[..], 185_728_392_f32)) - ); - } - - #[test] - fn le_f64_tests() { - assert_eq!( - le_f64(&[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00]), - Ok((&b""[..], 0_f64)) - ); - assert_eq!( - le_f64(&[0x00, 0x00, 0x00, 0x10, 0xfb, 0x23, 0xa6, 0x41]), - Ok((&b""[..], 185_728_392_f64)) - ); - } - - #[test] - fn hex_u32_tests() { - assert_eq!( - hex_u32(&b";"[..]), - Err(Err::Error(error_position!(&b";"[..], ErrorKind::IsA))) - ); - assert_eq!(hex_u32(&b"ff;"[..]), Ok((&b";"[..], 255))); - assert_eq!(hex_u32(&b"1be2;"[..]), Ok((&b";"[..], 7138))); - assert_eq!(hex_u32(&b"c5a31be2;"[..]), Ok((&b";"[..], 3_315_801_058))); - assert_eq!(hex_u32(&b"C5A31be2;"[..]), Ok((&b";"[..], 3_315_801_058))); - assert_eq!(hex_u32(&b"00c5a31be2;"[..]), Ok((&b"e2;"[..], 12_952_347))); - assert_eq!( - hex_u32(&b"c5a31be201;"[..]), - Ok((&b"01;"[..], 3_315_801_058)) - ); - assert_eq!(hex_u32(&b"ffffffff;"[..]), Ok((&b";"[..], 4_294_967_295))); - assert_eq!(hex_u32(&b"0x1be2;"[..]), Ok((&b"x1be2;"[..], 0))); - } - - /* - #[test] - fn end_of_input() { - let not_over = &b"Hello, world!"[..]; - let is_over = &b""[..]; - named!(eof_test, eof!()); - - let res_not_over = eof_test(not_over); - assert_eq!(res_not_over, Err(Err::Error(error_position!(not_over, ErrorKind::Eof)))); - - let res_over = eof_test(is_over); - assert_eq!(res_over, Ok((is_over, is_over))); - } - */ - - #[test] - fn rest_on_slices() { - let input: &[u8] = &b"Hello, world!"[..]; - let empty: &[u8] = &b""[..]; - assert_eq!(rest(input), Ok((empty, input))); - } - - #[test] - fn rest_on_strs() { - let input: &str = "Hello, world!"; - let empty: &str = ""; - assert_eq!(rest(input), Ok((empty, input))); - } - - #[test] - fn rest_len_on_slices() { - let input: &[u8] = &b"Hello, world!"[..]; - assert_eq!(rest_len(input), Ok((input, input.len()))); - } - - #[test] - fn configurable_endianness() { - named!(be_tst16, u16!(Endianness::Big)); - named!(le_tst16, u16!(Endianness::Little)); - assert_eq!(be_tst16(&[0x80, 0x00]), Ok((&b""[..], 32_768_u16))); - assert_eq!(le_tst16(&[0x80, 0x00]), Ok((&b""[..], 128_u16))); - - named!(be_tst32, u32!(Endianness::Big)); - named!(le_tst32, u32!(Endianness::Little)); - assert_eq!( - be_tst32(&[0x12, 0x00, 0x60, 0x00]), - Ok((&b""[..], 302_014_464_u32)) - ); - assert_eq!( - le_tst32(&[0x12, 0x00, 0x60, 0x00]), - Ok((&b""[..], 6_291_474_u32)) - ); - - named!(be_tst64, u64!(Endianness::Big)); - named!(le_tst64, u64!(Endianness::Little)); - assert_eq!( - be_tst64(&[0x12, 0x00, 0x60, 0x00, 0x12, 0x00, 0x80, 0x00]), - Ok((&b""[..], 1_297_142_246_100_992_000_u64)) - ); - assert_eq!( - le_tst64(&[0x12, 0x00, 0x60, 0x00, 0x12, 0x00, 0x80, 0x00]), - Ok((&b""[..], 36_028_874_334_666_770_u64)) - ); - - named!(be_tsti16, i16!(Endianness::Big)); - named!(le_tsti16, i16!(Endianness::Little)); - assert_eq!(be_tsti16(&[0x00, 0x80]), Ok((&b""[..], 128_i16))); - assert_eq!(le_tsti16(&[0x00, 0x80]), Ok((&b""[..], -32_768_i16))); - - named!(be_tsti32, i32!(Endianness::Big)); - named!(le_tsti32, i32!(Endianness::Little)); - assert_eq!( - be_tsti32(&[0x00, 0x12, 0x60, 0x00]), - Ok((&b""[..], 1_204_224_i32)) - ); - assert_eq!( - le_tsti32(&[0x00, 0x12, 0x60, 0x00]), - Ok((&b""[..], 6_296_064_i32)) - ); - - named!(be_tsti64, i64!(Endianness::Big)); - named!(le_tsti64, i64!(Endianness::Little)); - assert_eq!( - be_tsti64(&[0x00, 0xFF, 0x60, 0x00, 0x12, 0x00, 0x80, 0x00]), - Ok((&b""[..], 71_881_672_479_506_432_i64)) - ); - assert_eq!( - le_tsti64(&[0x00, 0xFF, 0x60, 0x00, 0x12, 0x00, 0x80, 0x00]), - Ok((&b""[..], 36_028_874_334_732_032_i64)) - ); - } - - #[test] - #[cfg(feature = "std")] - fn manual_configurable_endianness_test() { - let x = 1; - let int_parse: Box IResult<&[u8], u16>> = if x == 2 { - Box::new(be_u16) - } else { - Box::new(le_u16) - }; - println!("{:?}", int_parse(&b"3"[..])); - assert_eq!(int_parse(&[0x80, 0x00]), Ok((&b""[..], 128_u16))); - } - - use lib::std::convert::From; - impl From for CustomError { - fn from(_: u32) -> Self { - CustomError - } - } - - struct CustomError; - #[allow(dead_code)] - fn custom_error(input: &[u8]) -> IResult<&[u8], &[u8], CustomError> { - fix_error!(input, CustomError, alphanumeric) - } - - #[test] - fn hex_digit_test() { - let i = &b"0123456789abcdefABCDEF;"[..]; - assert_eq!(hex_digit(i), Ok((&b";"[..], &i[..i.len() - 1]))); - - let i = &b"g"[..]; - assert_eq!( - hex_digit(i), - Err(Err::Error(error_position!(i, ErrorKind::HexDigit))) - ); - - let i = &b"G"[..]; - assert_eq!( - hex_digit(i), - Err(Err::Error(error_position!(i, ErrorKind::HexDigit))) - ); - - assert!(is_hex_digit(b'0')); - assert!(is_hex_digit(b'9')); - assert!(is_hex_digit(b'a')); - assert!(is_hex_digit(b'f')); - assert!(is_hex_digit(b'A')); - assert!(is_hex_digit(b'F')); - assert!(!is_hex_digit(b'g')); - assert!(!is_hex_digit(b'G')); - assert!(!is_hex_digit(b'/')); - assert!(!is_hex_digit(b':')); - assert!(!is_hex_digit(b'@')); - assert!(!is_hex_digit(b'\x60')); - } - - #[test] - fn oct_digit_test() { - let i = &b"01234567;"[..]; - assert_eq!(oct_digit(i), Ok((&b";"[..], &i[..i.len() - 1]))); - - let i = &b"8"[..]; - assert_eq!( - oct_digit(i), - Err(Err::Error(error_position!(i, ErrorKind::OctDigit))) - ); - - assert!(is_oct_digit(b'0')); - assert!(is_oct_digit(b'7')); - assert!(!is_oct_digit(b'8')); - assert!(!is_oct_digit(b'9')); - assert!(!is_oct_digit(b'a')); - assert!(!is_oct_digit(b'A')); - assert!(!is_oct_digit(b'/')); - assert!(!is_oct_digit(b':')); - assert!(!is_oct_digit(b'@')); - assert!(!is_oct_digit(b'\x60')); - } - - #[test] - fn full_line_windows() { - named!( - take_full_line<(&[u8], &[u8])>, - tuple!(not_line_ending, line_ending) - ); - let input = b"abc\r\n"; - let output = take_full_line(input); - assert_eq!(output, Ok((&b""[..], (&b"abc"[..], &b"\r\n"[..])))); - } - - #[test] - fn full_line_unix() { - named!( - take_full_line<(&[u8], &[u8])>, - tuple!(not_line_ending, line_ending) - ); - let input = b"abc\n"; - let output = take_full_line(input); - assert_eq!(output, Ok((&b""[..], (&b"abc"[..], &b"\n"[..])))); - } - - #[test] - fn check_windows_lineending() { - let input = b"\r\n"; - let output = line_ending(&input[..]); - assert_eq!(output, Ok((&b""[..], &b"\r\n"[..]))); - } - - #[test] - fn check_unix_lineending() { - let input = b"\n"; - let output = line_ending(&input[..]); - assert_eq!(output, Ok((&b""[..], &b"\n"[..]))); - } - - #[test] - fn cr_lf() { - assert_eq!(crlf(&b"\r\na"[..]), Ok((&b"a"[..], &b"\r\n"[..]))); - assert_eq!(crlf(&b"\r"[..]), Err(Err::Incomplete(Needed::Size(2)))); - assert_eq!( - crlf(&b"\ra"[..]), - Err(Err::Error(error_position!(&b"\ra"[..], ErrorKind::CrLf))) - ); - - assert_eq!(crlf("\r\na"), Ok(("a", "\r\n"))); - assert_eq!(crlf("\r"), Err(Err::Incomplete(Needed::Size(2)))); - assert_eq!( - crlf("\ra"), - Err(Err::Error(error_position!("\ra", ErrorKind::CrLf))) - ); - } - - #[test] - fn end_of_line() { - assert_eq!(eol(&b"\na"[..]), Ok((&b"a"[..], &b"\n"[..]))); - assert_eq!(eol(&b"\r\na"[..]), Ok((&b"a"[..], &b"\r\n"[..]))); - assert_eq!(eol(&b"\r"[..]), Err(Err::Incomplete(Needed::Size(2)))); - assert_eq!( - eol(&b"\ra"[..]), - Err(Err::Error(error_position!(&b"\ra"[..], ErrorKind::CrLf))) - ); - - assert_eq!(eol("\na"), Ok(("a", "\n"))); - assert_eq!(eol("\r\na"), Ok(("a", "\r\n"))); - assert_eq!(eol("\r"), Err(Err::Incomplete(Needed::Size(2)))); - assert_eq!( - eol("\ra"), - Err(Err::Error(error_position!("\ra", ErrorKind::CrLf))) - ); - } - - #[test] - #[cfg(feature = "std")] - fn float_test() { - let mut test_cases = vec![ - "+3.14", - "3.14", - "-3.14", - "0", - "0.0", - "1.", - ".789", - "-.5", - "1e7", - "-1E-7", - ".3e-2", - "1.e4", - "1.2e4", - "-1.234E-12", - "-1.234e-12", - ]; - - for test in test_cases.drain(..) { - let expected32 = str::parse::(test).unwrap(); - let expected64 = str::parse::(test).unwrap(); - - println!("now parsing: {} -> {}", test, expected32); - - assert_eq!( - recognize_float(CompleteStr(test)), - Ok((CompleteStr(""), CompleteStr(test))) - ); - let larger = format!("{};", test); - assert_eq!(recognize_float(&larger[..]), Ok((";", test))); - - assert_eq!(float(larger.as_bytes()), Ok((&b";"[..], expected32))); - assert_eq!(float(&larger[..]), Ok((";", expected32))); - assert_eq!(float(CompleteByteSlice(test.as_bytes())), Ok((CompleteByteSlice(&b""[..]), expected32))); - assert_eq!(float(CompleteStr(test)), Ok((CompleteStr(""), expected32))); - - assert_eq!(double(larger.as_bytes()), Ok((&b";"[..], expected64))); - assert_eq!(double(&larger[..]), Ok((";", expected64))); - assert_eq!(double(CompleteByteSlice(test.as_bytes())), Ok((CompleteByteSlice(&b""[..]), expected64))); - assert_eq!(double(CompleteStr(test)), Ok((CompleteStr(""), expected64))); - - //deprecated functions - assert_eq!(float_s(&larger[..]), Ok((";", expected32))); - assert_eq!(double_s(&larger[..]), Ok((";", expected64))); - } - - let remaining_exponent = "-1.234E-"; - assert_eq!( - recognize_float(remaining_exponent), - Err(Err::Incomplete(Needed::Size(1))) - ); - } - - #[allow(dead_code)] - pub fn end_of_line_completestr(input: CompleteStr) -> IResult { - alt!(input, eof!() | eol) - } -} diff --git a/third_party/rust/nom/src/number/complete.rs b/third_party/rust/nom/src/number/complete.rs new file mode 100644 index 0000000000..b8d423271c --- /dev/null +++ b/third_party/rust/nom/src/number/complete.rs @@ -0,0 +1,1198 @@ +//! parsers recognizing numbers, complete input version + +use crate::internal::*; +use crate::error::ParseError; +use crate::traits::{AsChar, InputIter, InputLength, InputTakeAtPosition}; +use crate::lib::std::ops::{RangeFrom, RangeTo}; +use crate::traits::{Offset, Slice}; +use crate::error::{ErrorKind, make_error}; +use crate::character::complete::{char, digit1}; +use crate::combinator::{opt, cut, map, recognize}; +use crate::branch::alt; +use crate::sequence::{tuple, pair}; + +/// Recognizes an unsigned 1 byte integer +/// +/// *complete version*: returns an error if there is not enough input data +/// ```rust +/// # use nom::{Err, error::ErrorKind, Needed}; +/// # use nom::Needed::Size; +/// use nom::number::complete::be_u8; +/// +/// let parser = |s| { +/// be_u8(s) +/// }; +/// +/// assert_eq!(parser(b"\x00\x03abcefg"), Ok((&b"\x03abcefg"[..], 0x00))); +/// assert_eq!(parser(b""), Err(Err::Error((&[][..], ErrorKind::Eof)))); +/// ``` +#[inline] +pub fn be_u8<'a, E: ParseError<&'a[u8]>>(i: &'a[u8]) -> IResult<&'a[u8], u8, E> { + if i.len() < 1 { + Err(Err::Error(make_error(i, ErrorKind::Eof))) + } else { + Ok((&i[1..], i[0])) + } +} + +/// Recognizes a big endian unsigned 2 bytes integer +/// +/// *complete version*: returns an error if there is not enough input data +/// ```rust +/// # use nom::{Err, error::ErrorKind, Needed}; +/// # use nom::Needed::Size; +/// use nom::number::complete::be_u16; +/// +/// let parser = |s| { +/// be_u16(s) +/// }; +/// +/// assert_eq!(parser(b"\x00\x03abcefg"), Ok((&b"abcefg"[..], 0x0003))); +/// assert_eq!(parser(b"\x01"), Err(Err::Error((&[0x01][..], ErrorKind::Eof)))); +/// ``` +#[inline] +pub fn be_u16<'a, E: ParseError<&'a[u8]>>(i: &'a[u8]) -> IResult<&'a[u8], u16, E> { + if i.len() < 2 { + Err(Err::Error(make_error(i, ErrorKind::Eof))) + } else { + let res = ((i[0] as u16) << 8) + i[1] as u16; + Ok((&i[2..], res)) + } +} + +/// Recognizes a big endian unsigned 3 byte integer +/// +/// *complete version*: returns an error if there is not enough input data +/// ```rust +/// # use nom::{Err, error::ErrorKind, Needed}; +/// # use nom::Needed::Size; +/// use nom::number::complete::be_u24; +/// +/// let parser = |s| { +/// be_u24(s) +/// }; +/// +/// assert_eq!(parser(b"\x00\x03\x05abcefg"), Ok((&b"abcefg"[..], 0x000305))); +/// assert_eq!(parser(b"\x01"), Err(Err::Error((&[0x01][..], ErrorKind::Eof)))); +/// ``` +#[inline] +pub fn be_u24<'a, E: ParseError<&'a[u8]>>(i: &'a[u8]) -> IResult<&'a[u8], u32, E> { + if i.len() < 3 { + Err(Err::Error(make_error(i, ErrorKind::Eof))) + } else { + let res = ((i[0] as u32) << 16) + ((i[1] as u32) << 8) + (i[2] as u32); + Ok((&i[3..], res)) + } +} + +/// Recognizes a big endian unsigned 4 bytes integer +/// +/// *complete version*: returns an error if there is not enough input data +/// ```rust +/// # use nom::{Err, error::ErrorKind, Needed}; +/// # use nom::Needed::Size; +/// use nom::number::complete::be_u32; +/// +/// let parser = |s| { +/// be_u32(s) +/// }; +/// +/// assert_eq!(parser(b"\x00\x03\x05\x07abcefg"), Ok((&b"abcefg"[..], 0x00030507))); +/// assert_eq!(parser(b"\x01"), Err(Err::Error((&[0x01][..], ErrorKind::Eof)))); +/// ``` +#[inline] +pub fn be_u32<'a, E: ParseError<&'a[u8]>>(i: &'a[u8]) -> IResult<&'a[u8], u32, E> { + if i.len() < 4 { + Err(Err::Error(make_error(i, ErrorKind::Eof))) + } else { + let res = ((i[0] as u32) << 24) + ((i[1] as u32) << 16) + ((i[2] as u32) << 8) + i[3] as u32; + Ok((&i[4..], res)) + } +} + +/// Recognizes a big endian unsigned 8 bytes integer +/// +/// *complete version*: returns an error if there is not enough input data +/// ```rust +/// # use nom::{Err, error::ErrorKind, Needed}; +/// # use nom::Needed::Size; +/// use nom::number::complete::be_u64; +/// +/// let parser = |s| { +/// be_u64(s) +/// }; +/// +/// assert_eq!(parser(b"\x00\x01\x02\x03\x04\x05\x06\x07abcefg"), Ok((&b"abcefg"[..], 0x0001020304050607))); +/// assert_eq!(parser(b"\x01"), Err(Err::Error((&[0x01][..], ErrorKind::Eof)))); +/// ``` +#[inline] +pub fn be_u64<'a, E: ParseError<&'a[u8]>>(i: &'a[u8]) -> IResult<&'a[u8], u64, E> { + if i.len() < 8 { + Err(Err::Error(make_error(i, ErrorKind::Eof))) + } else { + let res = ((i[0] as u64) << 56) + ((i[1] as u64) << 48) + ((i[2] as u64) << 40) + ((i[3] as u64) << 32) + ((i[4] as u64) << 24) + + ((i[5] as u64) << 16) + ((i[6] as u64) << 8) + i[7] as u64; + Ok((&i[8..], res)) + } +} + +/// Recognizes a big endian unsigned 16 bytes integer +/// +/// *complete version*: returns an error if there is not enough input data +/// ```rust +/// # use nom::{Err, error::ErrorKind, Needed}; +/// # use nom::Needed::Size; +/// use nom::number::complete::be_u128; +/// +/// let parser = |s| { +/// be_u128(s) +/// }; +/// +/// assert_eq!(parser(b"\x00\x01\x02\x03\x04\x05\x06\x07\x00\x01\x02\x03\x04\x05\x06\x07abcefg"), Ok((&b"abcefg"[..], 0x00010203040506070001020304050607))); +/// assert_eq!(parser(b"\x01"), Err(Err::Error((&[0x01][..], ErrorKind::Eof)))); +/// ``` +#[inline] +#[cfg(stable_i128)] +pub fn be_u128<'a, E: ParseError<&'a[u8]>>(i: &'a[u8]) -> IResult<&'a[u8], u128, E> { + if i.len() < 16 { + Err(Err::Error(make_error(i, ErrorKind::Eof))) + } else { + let res = ((i[0] as u128) << 120) + + ((i[1] as u128) << 112) + + ((i[2] as u128) << 104) + + ((i[3] as u128) << 96) + + ((i[4] as u128) << 88) + + ((i[5] as u128) << 80) + + ((i[6] as u128) << 72) + + ((i[7] as u128) << 64) + + ((i[8] as u128) << 56) + + ((i[9] as u128) << 48) + + ((i[10] as u128) << 40) + + ((i[11] as u128) << 32) + + ((i[12] as u128) << 24) + + ((i[13] as u128) << 16) + + ((i[14] as u128) << 8) + + i[15] as u128; + Ok((&i[16..], res)) + } +} + +/// Recognizes a signed 1 byte integer +/// +/// *complete version*: returns an error if there is not enough input data +/// ```rust +/// # use nom::{Err, error::ErrorKind, Needed}; +/// # use nom::Needed::Size; +/// use nom::number::complete::be_i8; +/// +/// let parser = |s| { +/// be_i8(s) +/// }; +/// +/// assert_eq!(parser(b"\x00\x03abcefg"), Ok((&b"\x03abcefg"[..], 0x00))); +/// assert_eq!(parser(b""), Err(Err::Error((&[][..], ErrorKind::Eof)))); +/// ``` +#[inline] +pub fn be_i8<'a, E: ParseError<&'a[u8]>>(i: &'a[u8]) -> IResult<&'a[u8], i8, E> { + map!(i, be_u8, |x| x as i8) +} + +/// Recognizes a big endian signed 2 bytes integer +/// +/// *complete version*: returns an error if there is not enough input data +/// ```rust +/// # use nom::{Err, error::ErrorKind, Needed}; +/// # use nom::Needed::Size; +/// use nom::number::complete::be_i16; +/// +/// let parser = |s| { +/// be_i16(s) +/// }; +/// +/// assert_eq!(parser(b"\x00\x03abcefg"), Ok((&b"abcefg"[..], 0x0003))); +/// assert_eq!(parser(b"\x01"), Err(Err::Error((&[0x01][..], ErrorKind::Eof)))); +/// ``` +#[inline] +pub fn be_i16<'a, E: ParseError<&'a [u8]>>(i: &'a[u8]) -> IResult<&'a[u8], i16, E> { + map!(i, be_u16, |x| x as i16) +} + +/// Recognizes a big endian signed 3 bytes integer +/// +/// *complete version*: returns an error if there is not enough input data +/// ```rust +/// # use nom::{Err, error::ErrorKind, Needed}; +/// # use nom::Needed::Size; +/// use nom::number::complete::be_i24; +/// +/// let parser = |s| { +/// be_i24(s) +/// }; +/// +/// assert_eq!(parser(b"\x00\x03\x05abcefg"), Ok((&b"abcefg"[..], 0x000305))); +/// assert_eq!(parser(b"\x01"), Err(Err::Error((&[0x01][..], ErrorKind::Eof)))); +/// ``` +#[inline] +pub fn be_i24<'a, E: ParseError<&'a[u8]>>(i: &'a[u8]) -> IResult<&'a[u8], i32, E> { + // Same as the unsigned version but we need to sign-extend manually here + map!(i, be_u24, |x| if x & 0x80_00_00 != 0 { + (x | 0xff_00_00_00) as i32 + } else { + x as i32 + }) +} + +/// Recognizes a big endian signed 4 bytes integer +/// +/// *complete version*: returns an error if there is not enough input data +/// ```rust +/// # use nom::{Err, error::ErrorKind, Needed}; +/// # use nom::Needed::Size; +/// use nom::number::complete::be_i32; +/// +/// let parser = |s| { +/// be_i32(s) +/// }; +/// +/// assert_eq!(parser(b"\x00\x03\x05\x07abcefg"), Ok((&b"abcefg"[..], 0x00030507))); +/// assert_eq!(parser(b"\x01"), Err(Err::Error((&[0x01][..], ErrorKind::Eof)))); +/// ``` +#[inline] +pub fn be_i32<'a, E: ParseError<&'a[u8]>>(i: &'a[u8]) -> IResult<&'a[u8], i32, E> { + map!(i, be_u32, |x| x as i32) +} + +/// Recognizes a big endian signed 8 bytes integer +/// +/// *complete version*: returns an error if there is not enough input data +/// ```rust +/// # use nom::{Err, error::ErrorKind, Needed}; +/// # use nom::Needed::Size; +/// use nom::number::complete::be_i64; +/// +/// let parser = |s| { +/// be_i64(s) +/// }; +/// +/// assert_eq!(parser(b"\x00\x01\x02\x03\x04\x05\x06\x07abcefg"), Ok((&b"abcefg"[..], 0x0001020304050607))); +/// assert_eq!(parser(b"\x01"), Err(Err::Error((&[0x01][..], ErrorKind::Eof)))); +/// ``` +#[inline] +pub fn be_i64<'a, E: ParseError<&'a[u8]>>(i: &'a[u8]) -> IResult<&'a[u8], i64, E> { + map!(i, be_u64, |x| x as i64) +} + +/// Recognizes a big endian signed 16 bytes integer +/// +/// *complete version*: returns an error if there is not enough input data +/// ```rust +/// # use nom::{Err, error::ErrorKind, Needed}; +/// # use nom::Needed::Size; +/// use nom::number::complete::be_i128; +/// +/// let parser = |s| { +/// be_i128(s) +/// }; +/// +/// assert_eq!(parser(b"\x00\x01\x02\x03\x04\x05\x06\x07\x00\x01\x02\x03\x04\x05\x06\x07abcefg"), Ok((&b"abcefg"[..], 0x00010203040506070001020304050607))); +/// assert_eq!(parser(b"\x01"), Err(Err::Error((&[0x01][..], ErrorKind::Eof)))); +/// ``` +#[inline] +#[cfg(stable_i128)] +pub fn be_i128<'a, E: ParseError<&'a [u8]>>(i: &'a[u8]) -> IResult<&'a[u8], i128, E> { + map!(i, be_u128, |x| x as i128) +} + +/// Recognizes an unsigned 1 byte integer +/// +/// *complete version*: returns an error if there is not enough input data +/// ```rust +/// # use nom::{Err, error::ErrorKind, Needed}; +/// # use nom::Needed::Size; +/// use nom::number::complete::le_u8; +/// +/// let parser = |s| { +/// le_u8(s) +/// }; +/// +/// assert_eq!(parser(b"\x00\x03abcefg"), Ok((&b"\x03abcefg"[..], 0x00))); +/// assert_eq!(parser(b""), Err(Err::Error((&[][..], ErrorKind::Eof)))); +/// ``` +#[inline] +pub fn le_u8<'a, E: ParseError<&'a [u8]>>(i: &'a[u8]) -> IResult<&'a[u8], u8, E> { + if i.len() < 1 { + Err(Err::Error(make_error(i, ErrorKind::Eof))) + } else { + Ok((&i[1..], i[0])) + } +} + +/// Recognizes a little endian unsigned 2 bytes integer +/// +/// *complete version*: returns an error if there is not enough input data +/// ```rust +/// # use nom::{Err, error::ErrorKind, Needed}; +/// # use nom::Needed::Size; +/// use nom::number::complete::le_u16; +/// +/// let parser = |s| { +/// le_u16(s) +/// }; +/// +/// assert_eq!(parser(b"\x00\x03abcefg"), Ok((&b"abcefg"[..], 0x0300))); +/// assert_eq!(parser(b"\x01"), Err(Err::Error((&[0x01][..], ErrorKind::Eof)))); +/// ``` +#[inline] +pub fn le_u16<'a, E: ParseError<&'a [u8]>>(i: &'a[u8]) -> IResult<&'a[u8], u16, E> { + if i.len() < 2 { + Err(Err::Error(make_error(i, ErrorKind::Eof))) + } else { + let res = ((i[1] as u16) << 8) + i[0] as u16; + Ok((&i[2..], res)) + } +} + +/// Recognizes a little endian unsigned 3 byte integer +/// +/// *complete version*: returns an error if there is not enough input data +/// ```rust +/// # use nom::{Err, error::ErrorKind, Needed}; +/// # use nom::Needed::Size; +/// use nom::number::complete::le_u24; +/// +/// let parser = |s| { +/// le_u24(s) +/// }; +/// +/// assert_eq!(parser(b"\x00\x03\x05abcefg"), Ok((&b"abcefg"[..], 0x050300))); +/// assert_eq!(parser(b"\x01"), Err(Err::Error((&[0x01][..], ErrorKind::Eof)))); +/// ``` +#[inline] +pub fn le_u24<'a, E: ParseError<&'a [u8]>>(i: &'a[u8]) -> IResult<&'a[u8], u32, E> { + if i.len() < 3 { + Err(Err::Error(make_error(i, ErrorKind::Eof))) + } else { + let res = (i[0] as u32) + ((i[1] as u32) << 8) + ((i[2] as u32) << 16); + Ok((&i[3..], res)) + } +} + +/// Recognizes a little endian unsigned 4 bytes integer +/// +/// *complete version*: returns an error if there is not enough input data +/// ```rust +/// # use nom::{Err, error::ErrorKind, Needed}; +/// # use nom::Needed::Size; +/// use nom::number::complete::le_u32; +/// +/// let parser = |s| { +/// le_u32(s) +/// }; +/// +/// assert_eq!(parser(b"\x00\x03\x05\x07abcefg"), Ok((&b"abcefg"[..], 0x07050300))); +/// assert_eq!(parser(b"\x01"), Err(Err::Error((&[0x01][..], ErrorKind::Eof)))); +/// ``` +#[inline] +pub fn le_u32<'a, E: ParseError<&'a [u8]>>(i: &'a[u8]) -> IResult<&'a[u8], u32, E> { + if i.len() < 4 { + Err(Err::Error(make_error(i, ErrorKind::Eof))) + } else { + let res = ((i[3] as u32) << 24) + ((i[2] as u32) << 16) + ((i[1] as u32) << 8) + i[0] as u32; + Ok((&i[4..], res)) + } +} + +/// Recognizes a little endian unsigned 8 bytes integer +/// +/// *complete version*: returns an error if there is not enough input data +/// ```rust +/// # use nom::{Err, error::ErrorKind, Needed}; +/// # use nom::Needed::Size; +/// use nom::number::complete::le_u64; +/// +/// let parser = |s| { +/// le_u64(s) +/// }; +/// +/// assert_eq!(parser(b"\x00\x01\x02\x03\x04\x05\x06\x07abcefg"), Ok((&b"abcefg"[..], 0x0706050403020100))); +/// assert_eq!(parser(b"\x01"), Err(Err::Error((&[0x01][..], ErrorKind::Eof)))); +/// ``` +#[inline] +pub fn le_u64<'a, E: ParseError<&'a [u8]>>(i: &'a[u8]) -> IResult<&'a[u8], u64, E> { + if i.len() < 8 { + Err(Err::Error(make_error(i, ErrorKind::Eof))) + } else { + let res = ((i[7] as u64) << 56) + ((i[6] as u64) << 48) + ((i[5] as u64) << 40) + ((i[4] as u64) << 32) + ((i[3] as u64) << 24) + + ((i[2] as u64) << 16) + ((i[1] as u64) << 8) + i[0] as u64; + Ok((&i[8..], res)) + } +} + +/// Recognizes a little endian unsigned 16 bytes integer +/// +/// *complete version*: returns an error if there is not enough input data +/// ```rust +/// # use nom::{Err, error::ErrorKind, Needed}; +/// # use nom::Needed::Size; +/// use nom::number::complete::le_u128; +/// +/// let parser = |s| { +/// le_u128(s) +/// }; +/// +/// assert_eq!(parser(b"\x00\x01\x02\x03\x04\x05\x06\x07\x00\x01\x02\x03\x04\x05\x06\x07abcefg"), Ok((&b"abcefg"[..], 0x07060504030201000706050403020100))); +/// assert_eq!(parser(b"\x01"), Err(Err::Error((&[0x01][..], ErrorKind::Eof)))); +/// ``` +#[inline] +#[cfg(stable_i128)] +pub fn le_u128<'a, E: ParseError<&'a [u8]>>(i: &'a[u8]) -> IResult<&'a[u8], u128, E> { + if i.len() < 16 { + Err(Err::Error(make_error(i, ErrorKind::Eof))) + } else { + let res = ((i[15] as u128) << 120) + + ((i[14] as u128) << 112) + + ((i[13] as u128) << 104) + + ((i[12] as u128) << 96) + + ((i[11] as u128) << 88) + + ((i[10] as u128) << 80) + + ((i[9] as u128) << 72) + + ((i[8] as u128) << 64) + + ((i[7] as u128) << 56) + + ((i[6] as u128) << 48) + + ((i[5] as u128) << 40) + + ((i[4] as u128) << 32) + + ((i[3] as u128) << 24) + + ((i[2] as u128) << 16) + + ((i[1] as u128) << 8) + + i[0] as u128; + Ok((&i[16..], res)) + } +} + +/// Recognizes a signed 1 byte integer +/// +/// *complete version*: returns an error if there is not enough input data +/// ```rust +/// # use nom::{Err, error::ErrorKind, Needed}; +/// # use nom::Needed::Size; +/// use nom::number::complete::le_i8; +/// +/// let parser = |s| { +/// le_i8(s) +/// }; +/// +/// assert_eq!(parser(b"\x00\x03abcefg"), Ok((&b"\x03abcefg"[..], 0x00))); +/// assert_eq!(parser(b""), Err(Err::Error((&[][..], ErrorKind::Eof)))); +/// ``` +#[inline] +pub fn le_i8<'a, E: ParseError<&'a [u8]>>(i: &'a[u8]) -> IResult<&'a[u8], i8, E> { + map!(i, le_u8, |x| x as i8) +} + +/// Recognizes a little endian signed 2 bytes integer +/// +/// *complete version*: returns an error if there is not enough input data +/// ```rust +/// # use nom::{Err, error::ErrorKind, Needed}; +/// # use nom::Needed::Size; +/// use nom::number::complete::le_i16; +/// +/// let parser = |s| { +/// le_i16(s) +/// }; +/// +/// assert_eq!(parser(b"\x00\x03abcefg"), Ok((&b"abcefg"[..], 0x0300))); +/// assert_eq!(parser(b"\x01"), Err(Err::Error((&[0x01][..], ErrorKind::Eof)))); +/// ``` +#[inline] +pub fn le_i16<'a, E: ParseError<&'a [u8]>>(i: &'a[u8]) -> IResult<&'a[u8], i16, E> { + map!(i, le_u16, |x| x as i16) +} + +/// Recognizes a little endian signed 3 bytes integer +/// +/// *complete version*: returns an error if there is not enough input data +/// ```rust +/// # use nom::{Err, error::ErrorKind, Needed}; +/// # use nom::Needed::Size; +/// use nom::number::complete::le_i24; +/// +/// let parser = |s| { +/// le_i24(s) +/// }; +/// +/// assert_eq!(parser(b"\x00\x03\x05abcefg"), Ok((&b"abcefg"[..], 0x050300))); +/// assert_eq!(parser(b"\x01"), Err(Err::Error((&[0x01][..], ErrorKind::Eof)))); +/// ``` +#[inline] +pub fn le_i24<'a, E: ParseError<&'a [u8]>>(i: &'a[u8]) -> IResult<&'a[u8], i32, E> { + // Same as the unsigned version but we need to sign-extend manually here + map!(i, le_u24, |x| if x & 0x80_00_00 != 0 { + (x | 0xff_00_00_00) as i32 + } else { + x as i32 + }) +} + +/// Recognizes a little endian signed 4 bytes integer +/// +/// *complete version*: returns an error if there is not enough input data +/// ```rust +/// # use nom::{Err, error::ErrorKind, Needed}; +/// # use nom::Needed::Size; +/// use nom::number::complete::le_i32; +/// +/// let parser = |s| { +/// le_i32(s) +/// }; +/// +/// assert_eq!(parser(b"\x00\x03\x05\x07abcefg"), Ok((&b"abcefg"[..], 0x07050300))); +/// assert_eq!(parser(b"\x01"), Err(Err::Error((&[0x01][..], ErrorKind::Eof)))); +/// ``` +#[inline] +pub fn le_i32<'a, E: ParseError<&'a [u8]>>(i: &'a[u8]) -> IResult<&'a[u8], i32, E> { + map!(i, le_u32, |x| x as i32) +} + +/// Recognizes a little endian signed 8 bytes integer +/// +/// *complete version*: returns an error if there is not enough input data +/// ```rust +/// # use nom::{Err, error::ErrorKind, Needed}; +/// # use nom::Needed::Size; +/// use nom::number::complete::le_i64; +/// +/// let parser = |s| { +/// le_i64(s) +/// }; +/// +/// assert_eq!(parser(b"\x00\x01\x02\x03\x04\x05\x06\x07abcefg"), Ok((&b"abcefg"[..], 0x0706050403020100))); +/// assert_eq!(parser(b"\x01"), Err(Err::Error((&[0x01][..], ErrorKind::Eof)))); +/// ``` +#[inline] +pub fn le_i64<'a, E: ParseError<&'a [u8]>>(i: &'a[u8]) -> IResult<&'a[u8], i64, E> { + map!(i, le_u64, |x| x as i64) +} + +/// Recognizes a little endian signed 16 bytes integer +/// +/// *complete version*: returns an error if there is not enough input data +/// ```rust +/// # use nom::{Err, error::ErrorKind, Needed}; +/// # use nom::Needed::Size; +/// use nom::number::complete::le_i128; +/// +/// let parser = |s| { +/// le_i128(s) +/// }; +/// +/// assert_eq!(parser(b"\x00\x01\x02\x03\x04\x05\x06\x07\x00\x01\x02\x03\x04\x05\x06\x07abcefg"), Ok((&b"abcefg"[..], 0x07060504030201000706050403020100))); +/// assert_eq!(parser(b"\x01"), Err(Err::Error((&[0x01][..], ErrorKind::Eof)))); +/// ``` +#[inline] +#[cfg(stable_i128)] +pub fn le_i128<'a, E: ParseError<&'a [u8]>>(i: &'a[u8]) -> IResult<&'a[u8], i128, E> { + map!(i, le_u128, |x| x as i128) +} + +/// Recognizes a big endian 4 bytes floating point number +/// +/// *complete version*: returns an error if there is not enough input data +/// ```rust +/// # use nom::{Err, error::ErrorKind, Needed}; +/// # use nom::Needed::Size; +/// use nom::number::complete::be_f32; +/// +/// let parser = |s| { +/// be_f32(s) +/// }; +/// +/// assert_eq!(parser(&[0x41, 0x48, 0x00, 0x00][..]), Ok((&b""[..], 12.5))); +/// assert_eq!(parser(b"abc"), Err(Err::Error((&b"abc"[..], ErrorKind::Eof)))); +/// ``` +#[inline] +pub fn be_f32<'a, E: ParseError<&'a [u8]>>(input: &'a[u8]) -> IResult<&'a[u8], f32, E> { + match be_u32(input) { + Err(e) => Err(e), + Ok((i, o)) => Ok((i, f32::from_bits(o))), + } +} + +/// Recognizes a big endian 8 bytes floating point number +/// +/// *complete version*: returns an error if there is not enough input data +/// ```rust +/// # use nom::{Err, error::ErrorKind, Needed}; +/// # use nom::Needed::Size; +/// use nom::number::complete::be_f64; +/// +/// let parser = |s| { +/// be_f64(s) +/// }; +/// +/// assert_eq!(parser(&[0x40, 0x29, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00][..]), Ok((&b""[..], 12.5))); +/// assert_eq!(parser(b"abc"), Err(Err::Error((&b"abc"[..], ErrorKind::Eof)))); +/// ``` +#[inline] +pub fn be_f64<'a, E: ParseError<&'a [u8]>>(input: &'a[u8]) -> IResult<&'a[u8], f64, E> { + match be_u64(input) { + Err(e) => Err(e), + Ok((i, o)) => Ok((i, f64::from_bits(o))), + } +} + +/// Recognizes a little endian 4 bytes floating point number +/// +/// *complete version*: returns an error if there is not enough input data +/// ```rust +/// # use nom::{Err, error::ErrorKind, Needed}; +/// # use nom::Needed::Size; +/// use nom::number::complete::le_f32; +/// +/// let parser = |s| { +/// le_f32(s) +/// }; +/// +/// assert_eq!(parser(&[0x00, 0x00, 0x48, 0x41][..]), Ok((&b""[..], 12.5))); +/// assert_eq!(parser(b"abc"), Err(Err::Error((&b"abc"[..], ErrorKind::Eof)))); +/// ``` +#[inline] +pub fn le_f32<'a, E: ParseError<&'a [u8]>>(input: &'a[u8]) -> IResult<&'a[u8], f32, E> { + match le_u32(input) { + Err(e) => Err(e), + Ok((i, o)) => Ok((i, f32::from_bits(o))), + } +} + +/// Recognizes a little endian 8 bytes floating point number +/// +/// *complete version*: returns an error if there is not enough input data +/// ```rust +/// # use nom::{Err, error::ErrorKind, Needed}; +/// # use nom::Needed::Size; +/// use nom::number::complete::le_f64; +/// +/// let parser = |s| { +/// le_f64(s) +/// }; +/// +/// assert_eq!(parser(&[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x29, 0x40][..]), Ok((&b""[..], 12.5))); +/// assert_eq!(parser(b"abc"), Err(Err::Error((&b"abc"[..], ErrorKind::Eof)))); +/// ``` +#[inline] +pub fn le_f64<'a, E: ParseError<&'a [u8]>>(input: &'a[u8]) -> IResult<&'a[u8], f64, E> { + match le_u64(input) { + Err(e) => Err(e), + Ok((i, o)) => Ok((i, f64::from_bits(o))), + } +} + +/// Recognizes a hex-encoded integer +/// +/// *complete version*: will parse until the end of input if it has less than 8 bytes +/// ```rust +/// # use nom::{Err, error::ErrorKind, Needed}; +/// # use nom::Needed::Size; +/// use nom::number::complete::hex_u32; +/// +/// let parser = |s| { +/// hex_u32(s) +/// }; +/// +/// assert_eq!(parser(b"01AE"), Ok((&b""[..], 0x01AE))); +/// assert_eq!(parser(b"abc"), Ok((&b""[..], 0x0ABC))); +/// assert_eq!(parser(b"ggg"), Err(Err::Error((&b"ggg"[..], ErrorKind::IsA)))); +/// ``` +#[inline] +pub fn hex_u32<'a, E: ParseError<&'a [u8]>>(input: &'a[u8]) -> IResult<&'a[u8], u32, E> { + let (i, o) = crate::bytes::complete::is_a(&b"0123456789abcdefABCDEF"[..])(input)?; + // Do not parse more than 8 characters for a u32 + let (parsed, remaining) = if o.len() <= 8 { + (o, i) + } else { + (&input[..8], &input[8..]) + }; + + let res = parsed + .iter() + .rev() + .enumerate() + .map(|(k, &v)| { + let digit = v as char; + digit.to_digit(16).unwrap_or(0) << (k * 4) + }) + .sum(); + + Ok((remaining, res)) +} + +/// Recognizes floating point number in a byte string and returns the corresponding slice +/// +/// *complete version*: can parse until the end of input +/// +/// ```rust +/// # use nom::{Err, error::ErrorKind, Needed}; +/// # use nom::Needed::Size; +/// use nom::number::complete::recognize_float; +/// +/// let parser = |s| { +/// recognize_float(s) +/// }; +/// +/// assert_eq!(parser("11e-1"), Ok(("", "11e-1"))); +/// assert_eq!(parser("123E-02"), Ok(("", "123E-02"))); +/// assert_eq!(parser("123K-01"), Ok(("K-01", "123"))); +/// assert_eq!(parser("abc"), Err(Err::Error(("abc", ErrorKind::Char)))); +/// ``` +#[allow(unused_imports)] +#[cfg_attr(rustfmt, rustfmt_skip)] +pub fn recognize_float>(input: T) -> IResult +where + T: Slice> + Slice>, + T: Clone + Offset, + T: InputIter, + ::Item: AsChar, + T: InputTakeAtPosition, + ::Item: AsChar, +{ + recognize( + tuple(( + opt(alt((char('+'), char('-')))), + alt(( + map(tuple((digit1, opt(pair(char('.'), opt(digit1))))), |_| ()), + map(tuple((char('.'), digit1)), |_| ()) + )), + opt(tuple(( + alt((char('e'), char('E'))), + opt(alt((char('+'), char('-')))), + cut(digit1) + ))) + )) + )(input) +} + +/// Recognizes floating point number in a byte string and returns a f32 +/// +/// *complete version*: can parse until the end of input +/// ```rust +/// # use nom::{Err, error::ErrorKind, Needed}; +/// # use nom::Needed::Size; +/// use nom::number::complete::float; +/// +/// let parser = |s| { +/// float(s) +/// }; +/// +/// assert_eq!(parser("11e-1"), Ok(("", 1.1))); +/// assert_eq!(parser("123E-02"), Ok(("", 1.23))); +/// assert_eq!(parser("123K-01"), Ok(("K-01", 123.0))); +/// assert_eq!(parser("abc"), Err(Err::Error(("abc", ErrorKind::Char)))); +/// ``` +#[cfg(not(feature = "lexical"))] +pub fn float>(input: T) -> IResult +where + T: Slice> + Slice>, + T: Clone + Offset, + T: InputIter + InputLength + crate::traits::ParseTo, + ::Item: AsChar, + T: InputTakeAtPosition, + ::Item: AsChar +{ + match recognize_float(input) { + Err(e) => Err(e), + Ok((i, s)) => match s.parse_to() { + Some(n) => Ok((i, n)), + None => Err(Err::Error(E::from_error_kind(i, ErrorKind::Float))) + } + } +} + +/// Recognizes floating point number in a byte string and returns a f32 +/// +/// *complete version*: can parse until the end of input +/// +/// this function uses the lexical-core crate for float parsing by default, you +/// can deactivate it by removing the "lexical" feature +/// ```rust +/// # use nom::{Err, error::ErrorKind, Needed}; +/// # use nom::Needed::Size; +/// use nom::number::complete::float; +/// +/// let parser = |s| { +/// float(s) +/// }; +/// +/// assert_eq!(parser("1.1"), Ok(("", 1.1))); +/// assert_eq!(parser("123E-02"), Ok(("", 1.23))); +/// assert_eq!(parser("123K-01"), Ok(("K-01", 123.0))); +/// assert_eq!(parser("abc"), Err(Err::Error(("abc", ErrorKind::Float)))); +/// ``` +#[cfg(feature = "lexical")] +pub fn float>(input: T) -> IResult +where + T: crate::traits::AsBytes + InputLength + Slice>, +{ + match ::lexical_core::parse_partial(input.as_bytes()) { + Ok((value, processed)) => Ok((input.slice(processed..), value)), + Err(_) => Err(Err::Error(E::from_error_kind(input, ErrorKind::Float))) + } +} + +/// Recognizes floating point number in a byte string and returns a f64 +/// +/// *complete version*: can parse until the end of input +/// ```rust +/// # use nom::{Err, error::ErrorKind, Needed}; +/// # use nom::Needed::Size; +/// use nom::number::complete::double; +/// +/// let parser = |s| { +/// double(s) +/// }; +/// +/// assert_eq!(parser("11e-1"), Ok(("", 1.1))); +/// assert_eq!(parser("123E-02"), Ok(("", 1.23))); +/// assert_eq!(parser("123K-01"), Ok(("K-01", 123.0))); +/// assert_eq!(parser("abc"), Err(Err::Error(("abc", ErrorKind::Char)))); +/// ``` +#[cfg(not(feature = "lexical"))] +pub fn double>(input: T) -> IResult +where + T: Slice> + Slice>, + T: Clone + Offset, + T: InputIter + InputLength + crate::traits::ParseTo, + ::Item: AsChar, + T: InputTakeAtPosition, + ::Item: AsChar +{ + match recognize_float(input) { + Err(e) => Err(e), + Ok((i, s)) => match s.parse_to() { + Some(n) => Ok((i, n)), + None => Err(Err::Error(E::from_error_kind(i, ErrorKind::Float))) + } + } +} + +/// Recognizes floating point number in a byte string and returns a f64 +/// +/// *complete version*: can parse until the end of input +/// +/// this function uses the lexical-core crate for float parsing by default, you +/// can deactivate it by removing the "lexical" feature +/// ```rust +/// # use nom::{Err, error::ErrorKind, Needed}; +/// # use nom::Needed::Size; +/// use nom::number::complete::double; +/// +/// let parser = |s| { +/// double(s) +/// }; +/// +/// assert_eq!(parser("1.1"), Ok(("", 1.1))); +/// assert_eq!(parser("123E-02"), Ok(("", 1.23))); +/// assert_eq!(parser("123K-01"), Ok(("K-01", 123.0))); +/// assert_eq!(parser("abc"), Err(Err::Error(("abc", ErrorKind::Float)))); +/// ``` +#[cfg(feature = "lexical")] +pub fn double>(input: T) -> IResult +where + T: crate::traits::AsBytes + InputLength + Slice>, +{ + match ::lexical_core::parse_partial(input.as_bytes()) { + Ok((value, processed)) => Ok((input.slice(processed..), value)), + Err(_) => Err(Err::Error(E::from_error_kind(input, ErrorKind::Float))) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::internal::Err; + use crate::error::ErrorKind; + + macro_rules! assert_parse( + ($left: expr, $right: expr) => { + let res: $crate::IResult<_, _, (_, ErrorKind)> = $left; + assert_eq!(res, $right); + }; + ); + + #[test] + fn i8_tests() { + assert_parse!(be_i8(&[0x00]), Ok((&b""[..], 0))); + assert_parse!(be_i8(&[0x7f]), Ok((&b""[..], 127))); + assert_parse!(be_i8(&[0xff]), Ok((&b""[..], -1))); + assert_parse!(be_i8(&[0x80]), Ok((&b""[..], -128))); + } + + #[test] + fn i16_tests() { + assert_parse!(be_i16(&[0x00, 0x00]), Ok((&b""[..], 0))); + assert_parse!(be_i16(&[0x7f, 0xff]), Ok((&b""[..], 32_767_i16))); + assert_parse!(be_i16(&[0xff, 0xff]), Ok((&b""[..], -1))); + assert_parse!(be_i16(&[0x80, 0x00]), Ok((&b""[..], -32_768_i16))); + } + + #[test] + fn u24_tests() { + assert_parse!(be_u24(&[0x00, 0x00, 0x00]), Ok((&b""[..], 0))); + assert_parse!(be_u24(&[0x00, 0xFF, 0xFF]), Ok((&b""[..], 65_535_u32))); + assert_parse!(be_u24(&[0x12, 0x34, 0x56]), Ok((&b""[..], 1_193_046_u32))); + } + + #[test] + fn i24_tests() { + assert_parse!(be_i24(&[0xFF, 0xFF, 0xFF]), Ok((&b""[..], -1_i32))); + assert_parse!(be_i24(&[0xFF, 0x00, 0x00]), Ok((&b""[..], -65_536_i32))); + assert_parse!(be_i24(&[0xED, 0xCB, 0xAA]), Ok((&b""[..], -1_193_046_i32))); + } + + #[test] + fn i32_tests() { + assert_parse!(be_i32(&[0x00, 0x00, 0x00, 0x00]), Ok((&b""[..], 0))); + assert_parse!( + be_i32(&[0x7f, 0xff, 0xff, 0xff]), + Ok((&b""[..], 2_147_483_647_i32)) + ); + assert_parse!(be_i32(&[0xff, 0xff, 0xff, 0xff]), Ok((&b""[..], -1))); + assert_parse!( + be_i32(&[0x80, 0x00, 0x00, 0x00]), + Ok((&b""[..], -2_147_483_648_i32)) + ); + } + + #[test] + fn i64_tests() { + assert_parse!( + be_i64(&[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00]), + Ok((&b""[..], 0)) + ); + assert_parse!( + be_i64(&[0x7f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff]), + Ok((&b""[..], 9_223_372_036_854_775_807_i64)) + ); + assert_parse!( + be_i64(&[0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff]), + Ok((&b""[..], -1)) + ); + assert_parse!( + be_i64(&[0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00]), + Ok((&b""[..], -9_223_372_036_854_775_808_i64)) + ); + } + + #[test] + #[cfg(stable_i128)] + fn i128_tests() { + assert_parse!( + be_i128(&[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00]), + Ok((&b""[..], 0)) + ); + assert_parse!( + be_i128(&[0x7f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff]), + Ok((&b""[..], 170_141_183_460_469_231_731_687_303_715_884_105_727_i128)) + ); + assert_parse!( + be_i128(&[0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff]), + Ok((&b""[..], -1)) + ); + assert_parse!( + be_i128(&[0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00]), + Ok((&b""[..], -170_141_183_460_469_231_731_687_303_715_884_105_728_i128)) + ); + } + + #[test] + fn le_i8_tests() { + assert_parse!(le_i8(&[0x00]), Ok((&b""[..], 0))); + assert_parse!(le_i8(&[0x7f]), Ok((&b""[..], 127))); + assert_parse!(le_i8(&[0xff]), Ok((&b""[..], -1))); + assert_parse!(le_i8(&[0x80]), Ok((&b""[..], -128))); + } + + #[test] + fn le_i16_tests() { + assert_parse!(le_i16(&[0x00, 0x00]), Ok((&b""[..], 0))); + assert_parse!(le_i16(&[0xff, 0x7f]), Ok((&b""[..], 32_767_i16))); + assert_parse!(le_i16(&[0xff, 0xff]), Ok((&b""[..], -1))); + assert_parse!(le_i16(&[0x00, 0x80]), Ok((&b""[..], -32_768_i16))); + } + + #[test] + fn le_u24_tests() { + assert_parse!(le_u24(&[0x00, 0x00, 0x00]), Ok((&b""[..], 0))); + assert_parse!(le_u24(&[0xFF, 0xFF, 0x00]), Ok((&b""[..], 65_535_u32))); + assert_parse!(le_u24(&[0x56, 0x34, 0x12]), Ok((&b""[..], 1_193_046_u32))); + } + + #[test] + fn le_i24_tests() { + assert_parse!(le_i24(&[0xFF, 0xFF, 0xFF]), Ok((&b""[..], -1_i32))); + assert_parse!(le_i24(&[0x00, 0x00, 0xFF]), Ok((&b""[..], -65_536_i32))); + assert_parse!(le_i24(&[0xAA, 0xCB, 0xED]), Ok((&b""[..], -1_193_046_i32))); + } + + #[test] + fn le_i32_tests() { + assert_parse!(le_i32(&[0x00, 0x00, 0x00, 0x00]), Ok((&b""[..], 0))); + assert_parse!( + le_i32(&[0xff, 0xff, 0xff, 0x7f]), + Ok((&b""[..], 2_147_483_647_i32)) + ); + assert_parse!(le_i32(&[0xff, 0xff, 0xff, 0xff]), Ok((&b""[..], -1))); + assert_parse!( + le_i32(&[0x00, 0x00, 0x00, 0x80]), + Ok((&b""[..], -2_147_483_648_i32)) + ); + } + + #[test] + fn le_i64_tests() { + assert_parse!( + le_i64(&[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00]), + Ok((&b""[..], 0)) + ); + assert_parse!( + le_i64(&[0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f]), + Ok((&b""[..], 9_223_372_036_854_775_807_i64)) + ); + assert_parse!( + le_i64(&[0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff]), + Ok((&b""[..], -1)) + ); + assert_parse!( + le_i64(&[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80]), + Ok((&b""[..], -9_223_372_036_854_775_808_i64)) + ); + } + + #[test] + #[cfg(stable_i128)] + fn le_i128_tests() { + assert_parse!( + le_i128(&[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00]), + Ok((&b""[..], 0)) + ); + assert_parse!( + le_i128(&[0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f]), + Ok((&b""[..], 170_141_183_460_469_231_731_687_303_715_884_105_727_i128)) + ); + assert_parse!( + le_i128(&[0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff]), + Ok((&b""[..], -1)) + ); + assert_parse!( + le_i128(&[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80]), + Ok((&b""[..], -170_141_183_460_469_231_731_687_303_715_884_105_728_i128)) + ); + } + + #[test] + fn be_f32_tests() { + assert_parse!(be_f32(&[0x00, 0x00, 0x00, 0x00]), Ok((&b""[..], 0_f32))); + assert_parse!( + be_f32(&[0x4d, 0x31, 0x1f, 0xd8]), + Ok((&b""[..], 185_728_392_f32)) + ); + } + + #[test] + fn be_f64_tests() { + assert_parse!( + be_f64(&[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00]), + Ok((&b""[..], 0_f64)) + ); + assert_parse!( + be_f64(&[0x41, 0xa6, 0x23, 0xfb, 0x10, 0x00, 0x00, 0x00]), + Ok((&b""[..], 185_728_392_f64)) + ); + } + + #[test] + fn le_f32_tests() { + assert_parse!(le_f32(&[0x00, 0x00, 0x00, 0x00]), Ok((&b""[..], 0_f32))); + assert_parse!( + le_f32(&[0xd8, 0x1f, 0x31, 0x4d]), + Ok((&b""[..], 185_728_392_f32)) + ); + } + + #[test] + fn le_f64_tests() { + assert_parse!( + le_f64(&[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00]), + Ok((&b""[..], 0_f64)) + ); + assert_parse!( + le_f64(&[0x00, 0x00, 0x00, 0x10, 0xfb, 0x23, 0xa6, 0x41]), + Ok((&b""[..], 185_728_392_f64)) + ); + } + + #[test] + fn hex_u32_tests() { + assert_parse!( + hex_u32(&b";"[..]), + Err(Err::Error(error_position!(&b";"[..], ErrorKind::IsA))) + ); + assert_parse!(hex_u32(&b"ff;"[..]), Ok((&b";"[..], 255))); + assert_parse!(hex_u32(&b"1be2;"[..]), Ok((&b";"[..], 7138))); + assert_parse!(hex_u32(&b"c5a31be2;"[..]), Ok((&b";"[..], 3_315_801_058))); + assert_parse!(hex_u32(&b"C5A31be2;"[..]), Ok((&b";"[..], 3_315_801_058))); + assert_parse!(hex_u32(&b"00c5a31be2;"[..]), Ok((&b"e2;"[..], 12_952_347))); + assert_parse!( + hex_u32(&b"c5a31be201;"[..]), + Ok((&b"01;"[..], 3_315_801_058)) + ); + assert_parse!(hex_u32(&b"ffffffff;"[..]), Ok((&b";"[..], 4_294_967_295))); + assert_parse!(hex_u32(&b"0x1be2;"[..]), Ok((&b"x1be2;"[..], 0))); + assert_parse!(hex_u32(&b"12af"[..]), Ok((&b""[..], 0x12af))); + } + + #[test] + #[cfg(feature = "std")] + fn float_test() { + let mut test_cases = vec![ + "+3.14", + "3.14", + "-3.14", + "0", + "0.0", + "1.", + ".789", + "-.5", + "1e7", + "-1E-7", + ".3e-2", + "1.e4", + "1.2e4", + "12.34", + "-1.234E-12", + "-1.234e-12", + ]; + + for test in test_cases.drain(..) { + let expected32 = str::parse::(test).unwrap(); + let expected64 = str::parse::(test).unwrap(); + + println!("now parsing: {} -> {}", test, expected32); + + let larger = format!("{}", test); + assert_parse!(recognize_float(&larger[..]), Ok(("", test))); + + assert_parse!(float(larger.as_bytes()), Ok((&b""[..], expected32))); + assert_parse!(float(&larger[..]), Ok(("", expected32))); + + assert_parse!(double(larger.as_bytes()), Ok((&b""[..], expected64))); + assert_parse!(double(&larger[..]), Ok(("", expected64))); + } + + let remaining_exponent = "-1.234E-"; + assert_parse!( + recognize_float(remaining_exponent), + Err(Err::Failure(("", ErrorKind::Digit))) + ); + } + +} diff --git a/third_party/rust/nom/src/number/macros.rs b/third_party/rust/nom/src/number/macros.rs new file mode 100644 index 0000000000..cb8c2a9470 --- /dev/null +++ b/third_party/rust/nom/src/number/macros.rs @@ -0,0 +1,265 @@ +//! parsers recognizing numbers + +/// if the parameter is nom::Endianness::Big, parse a big endian u16 integer, +/// otherwise a little endian u16 integer +/// +/// ```rust +/// # #[macro_use] extern crate nom; +/// # use nom::{Err, Needed}; +/// use nom::number::Endianness; +/// +/// # fn main() { +/// named!(be, u16!(Endianness::Big)); +/// +/// assert_eq!(be(b"\x00\x01abcd"), Ok((&b"abcd"[..], 0x0001))); +/// assert_eq!(be(b"\x01"), Err(Err::Incomplete(Needed::Size(2)))); +/// +/// named!(le, u16!(Endianness::Little)); +/// +/// assert_eq!(le(b"\x00\x01abcd"), Ok((&b"abcd"[..], 0x0100))); +/// assert_eq!(le(b"\x01"), Err(Err::Incomplete(Needed::Size(2)))); +/// # } +/// ``` +#[macro_export(local_inner_macros)] +macro_rules! u16 ( ($i:expr, $e:expr) => ( {if $crate::number::Endianness::Big == $e { $crate::number::streaming::be_u16($i) } else { $crate::number::streaming::le_u16($i) } } );); + +/// if the parameter is nom::Endianness::Big, parse a big endian u32 integer, +/// otherwise a little endian u32 integer +/// +/// ```rust +/// # #[macro_use] extern crate nom; +/// # use nom::{Err, Needed}; +/// use nom::number::Endianness; +/// +/// # fn main() { +/// named!(be, u32!(Endianness::Big)); +/// +/// assert_eq!(be(b"\x00\x01\x02\x03abcd"), Ok((&b"abcd"[..], 0x00010203))); +/// assert_eq!(be(b"\x01"), Err(Err::Incomplete(Needed::Size(4)))); +/// +/// named!(le, u32!(Endianness::Little)); +/// +/// assert_eq!(le(b"\x00\x01\x02\x03abcd"), Ok((&b"abcd"[..], 0x03020100))); +/// assert_eq!(le(b"\x01"), Err(Err::Incomplete(Needed::Size(4)))); +/// # } +/// ``` +#[macro_export(local_inner_macros)] +macro_rules! u32 ( ($i:expr, $e:expr) => ( {if $crate::number::Endianness::Big == $e { $crate::number::streaming::be_u32($i) } else { $crate::number::streaming::le_u32($i) } } );); + +/// if the parameter is nom::Endianness::Big, parse a big endian u64 integer, +/// otherwise a little endian u64 integer +/// +/// ```rust +/// # #[macro_use] extern crate nom; +/// # use nom::{Err, Needed}; +/// use nom::number::Endianness; +/// +/// # fn main() { +/// named!(be, u64!(Endianness::Big)); +/// +/// assert_eq!(be(b"\x00\x01\x02\x03\x04\x05\x06\x07abcd"), Ok((&b"abcd"[..], 0x0001020304050607))); +/// assert_eq!(be(b"\x01"), Err(Err::Incomplete(Needed::Size(8)))); +/// +/// named!(le, u64!(Endianness::Little)); +/// +/// assert_eq!(le(b"\x00\x01\x02\x03\x04\x05\x06\x07abcd"), Ok((&b"abcd"[..], 0x0706050403020100))); +/// assert_eq!(le(b"\x01"), Err(Err::Incomplete(Needed::Size(8)))); +/// # } +/// ``` +#[macro_export(local_inner_macros)] +macro_rules! u64 ( ($i:expr, $e:expr) => ( {if $crate::number::Endianness::Big == $e { $crate::number::streaming::be_u64($i) } else { $crate::number::streaming::le_u64($i) } } );); + +/// if the parameter is nom::Endianness::Big, parse a big endian u128 integer, +/// otherwise a little endian u128 integer +/// +/// ```rust +/// # #[macro_use] extern crate nom; +/// # use nom::{Err, Needed}; +/// use nom::number::Endianness; +/// +/// # fn main() { +/// named!(be, u128!(Endianness::Big)); +/// +/// assert_eq!(be(b"\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x10\x11\x12\x13\x14\x15abcd"), Ok((&b"abcd"[..], 0x00010203040506070809101112131415))); +/// assert_eq!(be(b"\x01"), Err(Err::Incomplete(Needed::Size(16)))); +/// +/// named!(le, u128!(Endianness::Little)); +/// +/// assert_eq!(le(b"\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x10\x11\x12\x13\x14\x15abcd"), Ok((&b"abcd"[..], 0x15141312111009080706050403020100))); +/// assert_eq!(le(b"\x01"), Err(Err::Incomplete(Needed::Size(16)))); +/// # } +/// ``` +#[macro_export(local_inner_macros)] +#[cfg(stable_i128)] +macro_rules! u128 ( ($i:expr, $e:expr) => ( {if $crate::number::Endianness::Big == $e { $crate::number::streaming::be_u128($i) } else { $crate::number::streaming::le_u128($i) } } );); + +/// if the parameter is nom::Endianness::Big, parse a big endian i16 integer, +/// otherwise a little endian i16 integer +/// +/// ```rust +/// # #[macro_use] extern crate nom; +/// # use nom::{Err, Needed}; +/// use nom::number::Endianness; +/// +/// # fn main() { +/// named!(be, i16!(Endianness::Big)); +/// +/// assert_eq!(be(b"\x00\x01abcd"), Ok((&b"abcd"[..], 0x0001))); +/// assert_eq!(be(b"\x01"), Err(Err::Incomplete(Needed::Size(2)))); +/// +/// named!(le, i16!(Endianness::Little)); +/// +/// assert_eq!(le(b"\x00\x01abcd"), Ok((&b"abcd"[..], 0x0100))); +/// assert_eq!(le(b"\x01"), Err(Err::Incomplete(Needed::Size(2)))); +/// # } +/// ``` +#[macro_export(local_inner_macros)] +macro_rules! i16 ( ($i:expr, $e:expr) => ( {if $crate::number::Endianness::Big == $e { $crate::number::streaming::be_i16($i) } else { $crate::number::streaming::le_i16($i) } } );); + +/// if the parameter is nom::Endianness::Big, parse a big endian i32 integer, +/// otherwise a little endian i32 integer +/// +/// ```rust +/// # #[macro_use] extern crate nom; +/// # use nom::{Err, Needed}; +/// use nom::number::Endianness; +/// +/// # fn main() { +/// named!(be, i32!(Endianness::Big)); +/// +/// assert_eq!(be(b"\x00\x01\x02\x03abcd"), Ok((&b"abcd"[..], 0x00010203))); +/// assert_eq!(be(b"\x01"), Err(Err::Incomplete(Needed::Size(4)))); +/// +/// named!(le, i32!(Endianness::Little)); +/// +/// assert_eq!(le(b"\x00\x01\x02\x03abcd"), Ok((&b"abcd"[..], 0x03020100))); +/// assert_eq!(le(b"\x01"), Err(Err::Incomplete(Needed::Size(4)))); +/// # } +/// ``` +#[macro_export(local_inner_macros)] +macro_rules! i32 ( ($i:expr, $e:expr) => ( {if $crate::number::Endianness::Big == $e { $crate::number::streaming::be_i32($i) } else { $crate::number::streaming::le_i32($i) } } );); + +/// if the parameter is nom::Endianness::Big, parse a big endian i64 integer, +/// otherwise a little endian i64 integer +/// +/// ```rust +/// # #[macro_use] extern crate nom; +/// # use nom::{Err, Needed}; +/// use nom::number::Endianness; +/// +/// # fn main() { +/// named!(be, i64!(Endianness::Big)); +/// +/// assert_eq!(be(b"\x00\x01\x02\x03\x04\x05\x06\x07abcd"), Ok((&b"abcd"[..], 0x0001020304050607))); +/// assert_eq!(be(b"\x01"), Err(Err::Incomplete(Needed::Size(8)))); +/// +/// named!(le, i64!(Endianness::Little)); +/// +/// assert_eq!(le(b"\x00\x01\x02\x03\x04\x05\x06\x07abcd"), Ok((&b"abcd"[..], 0x0706050403020100))); +/// assert_eq!(le(b"\x01"), Err(Err::Incomplete(Needed::Size(8)))); +/// # } +/// ``` +#[macro_export(local_inner_macros)] +macro_rules! i64 ( ($i:expr, $e:expr) => ( {if $crate::number::Endianness::Big == $e { $crate::number::streaming::be_i64($i) } else { $crate::number::streaming::le_i64($i) } } );); + +/// if the parameter is nom::Endianness::Big, parse a big endian i64 integer, +/// otherwise a little endian i64 integer +/// +/// ```rust +/// # #[macro_use] extern crate nom; +/// # use nom::{Err, Needed}; +/// use nom::number::Endianness; +/// +/// # fn main() { +/// named!(be, i128!(Endianness::Big)); +/// +/// assert_eq!(be(b"\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x10\x11\x12\x13\x14\x15abcd"), Ok((&b"abcd"[..], 0x00010203040506070809101112131415))); +/// assert_eq!(be(b"\x01"), Err(Err::Incomplete(Needed::Size(16)))); +/// +/// named!(le, i128!(Endianness::Little)); +/// +/// assert_eq!(le(b"\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x10\x11\x12\x13\x14\x15abcd"), Ok((&b"abcd"[..], 0x15141312111009080706050403020100))); +/// assert_eq!(le(b"\x01"), Err(Err::Incomplete(Needed::Size(16)))); +/// # } +/// ``` +#[macro_export(local_inner_macros)] +#[cfg(stable_i128)] +macro_rules! i128 ( ($i:expr, $e:expr) => ( {if $crate::number::Endianness::Big == $e { $crate::number::streaming::be_i128($i) } else { $crate::number::streaming::le_i128($i) } } );); + +#[cfg(test)] +mod tests { + use crate::number::Endianness; + + #[test] + fn configurable_endianness() { + named!(be_tst16, u16!(Endianness::Big)); + named!(le_tst16, u16!(Endianness::Little)); + assert_eq!(be_tst16(&[0x80, 0x00]), Ok((&b""[..], 32_768_u16))); + assert_eq!(le_tst16(&[0x80, 0x00]), Ok((&b""[..], 128_u16))); + + named!(be_tst32, u32!(Endianness::Big)); + named!(le_tst32, u32!(Endianness::Little)); + assert_eq!( + be_tst32(&[0x12, 0x00, 0x60, 0x00]), + Ok((&b""[..], 302_014_464_u32)) + ); + assert_eq!( + le_tst32(&[0x12, 0x00, 0x60, 0x00]), + Ok((&b""[..], 6_291_474_u32)) + ); + + named!(be_tst64, u64!(Endianness::Big)); + named!(le_tst64, u64!(Endianness::Little)); + assert_eq!( + be_tst64(&[0x12, 0x00, 0x60, 0x00, 0x12, 0x00, 0x80, 0x00]), + Ok((&b""[..], 1_297_142_246_100_992_000_u64)) + ); + assert_eq!( + le_tst64(&[0x12, 0x00, 0x60, 0x00, 0x12, 0x00, 0x80, 0x00]), + Ok((&b""[..], 36_028_874_334_666_770_u64)) + ); + + named!(be_tsti16, i16!(Endianness::Big)); + named!(le_tsti16, i16!(Endianness::Little)); + assert_eq!(be_tsti16(&[0x00, 0x80]), Ok((&b""[..], 128_i16))); + assert_eq!(le_tsti16(&[0x00, 0x80]), Ok((&b""[..], -32_768_i16))); + + named!(be_tsti32, i32!(Endianness::Big)); + named!(le_tsti32, i32!(Endianness::Little)); + assert_eq!( + be_tsti32(&[0x00, 0x12, 0x60, 0x00]), + Ok((&b""[..], 1_204_224_i32)) + ); + assert_eq!( + le_tsti32(&[0x00, 0x12, 0x60, 0x00]), + Ok((&b""[..], 6_296_064_i32)) + ); + + named!(be_tsti64, i64!(Endianness::Big)); + named!(le_tsti64, i64!(Endianness::Little)); + assert_eq!( + be_tsti64(&[0x00, 0xFF, 0x60, 0x00, 0x12, 0x00, 0x80, 0x00]), + Ok((&b""[..], 71_881_672_479_506_432_i64)) + ); + assert_eq!( + le_tsti64(&[0x00, 0xFF, 0x60, 0x00, 0x12, 0x00, 0x80, 0x00]), + Ok((&b""[..], 36_028_874_334_732_032_i64)) + ); + } + + //FIXME + /* + #[test] + #[cfg(feature = "std")] + fn manual_configurable_endianness_test() { + let x = 1; + let int_parse: Box IResult<&[u8], u16, (&[u8], ErrorKind)>> = if x == 2 { + Box::new(be_u16) + } else { + Box::new(le_u16) + }; + println!("{:?}", int_parse(&b"3"[..])); + assert_eq!(int_parse(&[0x80, 0x00]), Ok((&b""[..], 128_u16))); + } + */ +} diff --git a/third_party/rust/nom/src/number/mod.rs b/third_party/rust/nom/src/number/mod.rs new file mode 100644 index 0000000000..9be4c38a45 --- /dev/null +++ b/third_party/rust/nom/src/number/mod.rs @@ -0,0 +1,17 @@ +//! parsers recognizing numbers + +#[macro_use] +mod macros; + +pub mod streaming; +pub mod complete; + +/// Configurable endianness +#[derive(Debug, PartialEq, Eq, Clone, Copy)] +pub enum Endianness { + /// big endian + Big, + /// little endian + Little, +} + diff --git a/third_party/rust/nom/src/number/streaming.rs b/third_party/rust/nom/src/number/streaming.rs new file mode 100644 index 0000000000..15a89a4976 --- /dev/null +++ b/third_party/rust/nom/src/number/streaming.rs @@ -0,0 +1,1207 @@ +//! parsers recognizing numbers, streaming version + +use crate::internal::*; +use crate::error::{ErrorKind, ParseError}; +use crate::traits::{AsChar, InputIter, InputLength, InputTakeAtPosition}; +use crate::lib::std::ops::{RangeFrom, RangeTo}; +use crate::traits::{Offset, Slice}; +use crate::character::streaming::{char, digit1}; +use crate::sequence::{pair, tuple}; +use crate::combinator::{cut, map, opt, recognize}; +use crate::branch::alt; + +/// Recognizes an unsigned 1 byte integer +/// +/// *streaming version*: will return `Err(nom::Err::Incomplete(_))` if there is not enough data +/// ```rust +/// # use nom::{Err, error::ErrorKind, Needed}; +/// # use nom::Needed::Size; +/// use nom::number::streaming::be_u8; +/// +/// let parser = be_u8::<(_, ErrorKind)>; +/// +/// assert_eq!(parser(b"\x00\x01abcd"), Ok((&b"\x01abcd"[..], 0x00))); +/// assert_eq!(parser(b""), Err(Err::Incomplete(Needed::Size(1)))); +/// ``` +#[inline] +pub fn be_u8<'a, E: ParseError<&'a[u8]>>(i: &'a[u8]) -> IResult<&'a[u8], u8, E> { + if i.len() < 1 { + Err(Err::Incomplete(Needed::Size(1))) + } else { + Ok((&i[1..], i[0])) + } +} + +/// Recognizes a big endian unsigned 2 bytes integer +/// +/// *streaming version*: will return `Err(nom::Err::Incomplete(_))` if there is not enough data +/// +/// ```rust +/// # use nom::{Err, error::ErrorKind, Needed}; +/// # use nom::Needed::Size; +/// use nom::number::streaming::be_u16; +/// +/// let parser = |s| { +/// be_u16::<(_, ErrorKind)>(s) +/// }; +/// +/// assert_eq!(parser(b"\x00\x01abcd"), Ok((&b"abcd"[..], 0x0001))); +/// assert_eq!(parser(b"\x01"), Err(Err::Incomplete(Needed::Size(2)))); +/// ``` +#[inline] +pub fn be_u16<'a, E: ParseError<&'a[u8]>>(i: &'a[u8]) -> IResult<&'a[u8], u16, E> { + if i.len() < 2 { + Err(Err::Incomplete(Needed::Size(2))) + } else { + let res = ((i[0] as u16) << 8) + i[1] as u16; + Ok((&i[2..], res)) + } +} + +/// Recognizes a big endian unsigned 3 byte integer +/// +/// *streaming version*: will return `Err(nom::Err::Incomplete(_))` if there is not enough data +/// +/// ```rust +/// # use nom::{Err, error::ErrorKind, Needed}; +/// # use nom::Needed::Size; +/// use nom::number::streaming::be_u24; +/// +/// let parser = |s| { +/// be_u24::<(_, ErrorKind)>(s) +/// }; +/// +/// assert_eq!(parser(b"\x00\x01\x02abcd"), Ok((&b"abcd"[..], 0x000102))); +/// assert_eq!(parser(b"\x01"), Err(Err::Incomplete(Needed::Size(3)))); +/// ``` +#[inline] +pub fn be_u24<'a, E: ParseError<&'a[u8]>>(i: &'a[u8]) -> IResult<&'a[u8], u32, E> { + if i.len() < 3 { + Err(Err::Incomplete(Needed::Size(3))) + } else { + let res = ((i[0] as u32) << 16) + ((i[1] as u32) << 8) + (i[2] as u32); + Ok((&i[3..], res)) + } +} + +/// Recognizes a big endian unsigned 4 bytes integer +/// +/// *streaming version*: will return `Err(nom::Err::Incomplete(_))` if there is not enough data +/// +/// ```rust +/// # use nom::{Err, error::ErrorKind, Needed}; +/// # use nom::Needed::Size; +/// use nom::number::streaming::be_u32; +/// +/// let parser = |s| { +/// be_u32::<(_, ErrorKind)>(s) +/// }; +/// +/// assert_eq!(parser(b"\x00\x01\x02\x03abcd"), Ok((&b"abcd"[..], 0x00010203))); +/// assert_eq!(parser(b"\x01"), Err(Err::Incomplete(Needed::Size(4)))); +/// ``` +#[inline] +pub fn be_u32<'a, E: ParseError<&'a[u8]>>(i: &'a[u8]) -> IResult<&'a[u8], u32, E> { + if i.len() < 4 { + Err(Err::Incomplete(Needed::Size(4))) + } else { + let res = ((i[0] as u32) << 24) + ((i[1] as u32) << 16) + ((i[2] as u32) << 8) + i[3] as u32; + Ok((&i[4..], res)) + } +} + +/// Recognizes a big endian unsigned 8 bytes integer +/// +/// *streaming version*: will return `Err(nom::Err::Incomplete(_))` if there is not enough data +/// +/// ```rust +/// # use nom::{Err, error::ErrorKind, Needed}; +/// # use nom::Needed::Size; +/// use nom::number::streaming::be_u64; +/// +/// let parser = |s| { +/// be_u64::<(_, ErrorKind)>(s) +/// }; +/// +/// assert_eq!(parser(b"\x00\x01\x02\x03\x04\x05\x06\x07abcd"), Ok((&b"abcd"[..], 0x0001020304050607))); +/// assert_eq!(parser(b"\x01"), Err(Err::Incomplete(Needed::Size(8)))); +/// ``` +#[inline] +pub fn be_u64<'a, E: ParseError<&'a[u8]>>(i: &'a[u8]) -> IResult<&'a[u8], u64, E> { + if i.len() < 8 { + Err(Err::Incomplete(Needed::Size(8))) + } else { + let res = ((i[0] as u64) << 56) + ((i[1] as u64) << 48) + ((i[2] as u64) << 40) + ((i[3] as u64) << 32) + ((i[4] as u64) << 24) + + ((i[5] as u64) << 16) + ((i[6] as u64) << 8) + i[7] as u64; + Ok((&i[8..], res)) + } +} + +/// Recognizes a big endian unsigned 16 bytes integer +/// +/// *streaming version*: will return `Err(nom::Err::Incomplete(_))` if there is not enough data +/// ```rust +/// # use nom::{Err, error::ErrorKind, Needed}; +/// # use nom::Needed::Size; +/// use nom::number::streaming::be_u128; +/// +/// let parser = |s| { +/// be_u128::<(_, ErrorKind)>(s) +/// }; +/// +/// assert_eq!(parser(b"\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x10\x11\x12\x13\x14\x15abcd"), Ok((&b"abcd"[..], 0x00010203040506070809101112131415))); +/// assert_eq!(parser(b"\x01"), Err(Err::Incomplete(Needed::Size(16)))); +/// ``` +#[inline] +#[cfg(stable_i128)] +pub fn be_u128<'a, E: ParseError<&'a[u8]>>(i: &'a[u8]) -> IResult<&'a[u8], u128, E> { + if i.len() < 16 { + Err(Err::Incomplete(Needed::Size(16))) + } else { + let res = ((i[0] as u128) << 120) + + ((i[1] as u128) << 112) + + ((i[2] as u128) << 104) + + ((i[3] as u128) << 96) + + ((i[4] as u128) << 88) + + ((i[5] as u128) << 80) + + ((i[6] as u128) << 72) + + ((i[7] as u128) << 64) + + ((i[8] as u128) << 56) + + ((i[9] as u128) << 48) + + ((i[10] as u128) << 40) + + ((i[11] as u128) << 32) + + ((i[12] as u128) << 24) + + ((i[13] as u128) << 16) + + ((i[14] as u128) << 8) + + i[15] as u128; + Ok((&i[16..], res)) + } +} + +/// Recognizes a signed 1 byte integer +/// +/// *streaming version*: will return `Err(nom::Err::Incomplete(_))` if there is not enough data +/// ```rust +/// # use nom::{Err, error::ErrorKind, Needed}; +/// # use nom::Needed::Size; +/// use nom::number::streaming::be_i8; +/// +/// let parser = be_i8::<(_, ErrorKind)>; +/// +/// assert_eq!(parser(b"\x00\x01abcd"), Ok((&b"\x01abcd"[..], 0x00))); +/// assert_eq!(parser(b""), Err(Err::Incomplete(Needed::Size(1)))); +/// ``` +#[inline] +pub fn be_i8<'a, E: ParseError<&'a[u8]>>(i: &'a[u8]) -> IResult<&'a[u8], i8, E> { + map!(i, be_u8, |x| x as i8) +} + +/// Recognizes a big endian signed 2 bytes integer +/// +/// *streaming version*: will return `Err(nom::Err::Incomplete(_))` if there is not enough data +/// ```rust +/// # use nom::{Err, error::ErrorKind, Needed}; +/// # use nom::Needed::Size; +/// use nom::number::streaming::be_i16; +/// +/// let parser = be_i16::<(_, ErrorKind)>; +/// +/// assert_eq!(parser(b"\x00\x01abcd"), Ok((&b"abcd"[..], 0x0001))); +/// assert_eq!(parser(b""), Err(Err::Incomplete(Needed::Size(2)))); +/// ``` +#[inline] +pub fn be_i16<'a, E: ParseError<&'a [u8]>>(i: &'a[u8]) -> IResult<&'a[u8], i16, E> { + map!(i, be_u16, |x| x as i16) +} + +/// Recognizes a big endian signed 3 bytes integer +/// +/// *streaming version*: will return `Err(nom::Err::Incomplete(_))` if there is not enough data +/// ```rust +/// # use nom::{Err, error::ErrorKind, Needed}; +/// # use nom::Needed::Size; +/// use nom::number::streaming::be_i24; +/// +/// let parser = be_i24::<(_, ErrorKind)>; +/// +/// assert_eq!(parser(b"\x00\x01\x02abcd"), Ok((&b"abcd"[..], 0x000102))); +/// assert_eq!(parser(b""), Err(Err::Incomplete(Needed::Size(3)))); +/// ``` +#[inline] +pub fn be_i24<'a, E: ParseError<&'a[u8]>>(i: &'a[u8]) -> IResult<&'a[u8], i32, E> { + // Same as the unsigned version but we need to sign-extend manually here + map!(i, be_u24, |x| if x & 0x80_00_00 != 0 { + (x | 0xff_00_00_00) as i32 + } else { + x as i32 + }) +} + +/// Recognizes a big endian signed 4 bytes integer +/// +/// *streaming version*: will return `Err(nom::Err::Incomplete(_))` if there is not enough data +/// ```rust +/// # use nom::{Err, error::ErrorKind, Needed}; +/// # use nom::Needed::Size; +/// use nom::number::streaming::be_i32; +/// +/// let parser = be_i32::<(_, ErrorKind)>; +/// +/// assert_eq!(parser(b"\x00\x01\x02\x03abcd"), Ok((&b"abcd"[..], 0x00010203))); +/// assert_eq!(parser(b""), Err(Err::Incomplete(Needed::Size(4)))); +/// ``` +#[inline] +pub fn be_i32<'a, E: ParseError<&'a[u8]>>(i: &'a[u8]) -> IResult<&'a[u8], i32, E> { + map!(i, be_u32, |x| x as i32) +} + +/// Recognizes a big endian signed 8 bytes integer +/// +/// *streaming version*: will return `Err(nom::Err::Incomplete(_))` if there is not enough data +/// +/// ```rust +/// # use nom::{Err, error::ErrorKind, Needed}; +/// # use nom::Needed::Size; +/// use nom::number::streaming::be_i64; +/// +/// let parser = be_i64::<(_, ErrorKind)>; +/// +/// assert_eq!(parser(b"\x00\x01\x02\x03\x04\x05\x06\x07abcd"), Ok((&b"abcd"[..], 0x0001020304050607))); +/// assert_eq!(parser(b"\x01"), Err(Err::Incomplete(Needed::Size(8)))); +/// ``` +#[inline] +pub fn be_i64<'a, E: ParseError<&'a[u8]>>(i: &'a[u8]) -> IResult<&'a[u8], i64, E> { + map!(i, be_u64, |x| x as i64) +} + +/// Recognizes a big endian signed 16 bytes integer +/// +/// *streaming version*: will return `Err(nom::Err::Incomplete(_))` if there is not enough data +/// ```rust +/// # use nom::{Err, error::ErrorKind, Needed}; +/// # use nom::Needed::Size; +/// use nom::number::streaming::be_i128; +/// +/// let parser = be_i128::<(_, ErrorKind)>; +/// +/// assert_eq!(parser(b"\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x10\x11\x12\x13\x14\x15abcd"), Ok((&b"abcd"[..], 0x00010203040506070809101112131415))); +/// assert_eq!(parser(b"\x01"), Err(Err::Incomplete(Needed::Size(16)))); +/// ``` +#[inline] +#[cfg(stable_i128)] +pub fn be_i128<'a, E: ParseError<&'a [u8]>>(i: &'a[u8]) -> IResult<&'a[u8], i128, E> { + map!(i, be_u128, |x| x as i128) +} + +/// Recognizes an unsigned 1 byte integer +/// +/// *streaming version*: will return `Err(nom::Err::Incomplete(_))` if there is not enough data +/// ```rust +/// # use nom::{Err, error::ErrorKind, Needed}; +/// # use nom::Needed::Size; +/// use nom::number::streaming::le_u8; +/// +/// let parser = le_u8::<(_, ErrorKind)>; +/// +/// assert_eq!(parser(b"\x00\x01abcd"), Ok((&b"\x01abcd"[..], 0x00))); +/// assert_eq!(parser(b""), Err(Err::Incomplete(Needed::Size(1)))); +/// ``` +#[inline] +pub fn le_u8<'a, E: ParseError<&'a [u8]>>(i: &'a[u8]) -> IResult<&'a[u8], u8, E> { + if i.len() < 1 { + Err(Err::Incomplete(Needed::Size(1))) + } else { + Ok((&i[1..], i[0])) + } +} + +/// Recognizes a little endian unsigned 2 bytes integer +/// +/// *streaming version*: will return `Err(nom::Err::Incomplete(_))` if there is not enough data +/// +/// ```rust +/// # use nom::{Err, error::ErrorKind, Needed}; +/// # use nom::Needed::Size; +/// use nom::number::streaming::le_u16; +/// +/// let parser = |s| { +/// le_u16::<(_, ErrorKind)>(s) +/// }; +/// +/// assert_eq!(parser(b"\x00\x01abcd"), Ok((&b"abcd"[..], 0x0100))); +/// assert_eq!(parser(b"\x01"), Err(Err::Incomplete(Needed::Size(2)))); +/// ``` +#[inline] +pub fn le_u16<'a, E: ParseError<&'a [u8]>>(i: &'a[u8]) -> IResult<&'a[u8], u16, E> { + if i.len() < 2 { + Err(Err::Incomplete(Needed::Size(2))) + } else { + let res = ((i[1] as u16) << 8) + i[0] as u16; + Ok((&i[2..], res)) + } +} + +/// Recognizes a little endian unsigned 3 bytes integer +/// +/// *streaming version*: will return `Err(nom::Err::Incomplete(_))` if there is not enough data +/// +/// ```rust +/// # use nom::{Err, error::ErrorKind, Needed}; +/// # use nom::Needed::Size; +/// use nom::number::streaming::le_u24; +/// +/// let parser = |s| { +/// le_u24::<(_, ErrorKind)>(s) +/// }; +/// +/// assert_eq!(parser(b"\x00\x01\x02abcd"), Ok((&b"abcd"[..], 0x020100))); +/// assert_eq!(parser(b"\x01"), Err(Err::Incomplete(Needed::Size(3)))); +/// ``` +#[inline] +pub fn le_u24<'a, E: ParseError<&'a [u8]>>(i: &'a[u8]) -> IResult<&'a[u8], u32, E> { + if i.len() < 3 { + Err(Err::Incomplete(Needed::Size(3))) + } else { + let res = (i[0] as u32) + ((i[1] as u32) << 8) + ((i[2] as u32) << 16); + Ok((&i[3..], res)) + } +} + +/// Recognizes a little endian unsigned 4 bytes integer +/// +/// *streaming version*: will return `Err(nom::Err::Incomplete(_))` if there is not enough data +/// +/// ```rust +/// # use nom::{Err, error::ErrorKind, Needed}; +/// # use nom::Needed::Size; +/// use nom::number::streaming::le_u32; +/// +/// let parser = |s| { +/// le_u32::<(_, ErrorKind)>(s) +/// }; +/// +/// assert_eq!(parser(b"\x00\x01\x02\x03abcd"), Ok((&b"abcd"[..], 0x03020100))); +/// assert_eq!(parser(b"\x01"), Err(Err::Incomplete(Needed::Size(4)))); +/// ``` +#[inline] +pub fn le_u32<'a, E: ParseError<&'a [u8]>>(i: &'a[u8]) -> IResult<&'a[u8], u32, E> { + if i.len() < 4 { + Err(Err::Incomplete(Needed::Size(4))) + } else { + let res = ((i[3] as u32) << 24) + ((i[2] as u32) << 16) + ((i[1] as u32) << 8) + i[0] as u32; + Ok((&i[4..], res)) + } +} + +/// Recognizes a little endian unsigned 8 bytes integer +/// +/// *streaming version*: will return `Err(nom::Err::Incomplete(_))` if there is not enough data +/// +/// ```rust +/// # use nom::{Err, error::ErrorKind, Needed}; +/// # use nom::Needed::Size; +/// use nom::number::streaming::le_u64; +/// +/// let parser = |s| { +/// le_u64::<(_, ErrorKind)>(s) +/// }; +/// +/// assert_eq!(parser(b"\x00\x01\x02\x03\x04\x05\x06\x07abcd"), Ok((&b"abcd"[..], 0x0706050403020100))); +/// assert_eq!(parser(b"\x01"), Err(Err::Incomplete(Needed::Size(8)))); +/// ``` +#[inline] +pub fn le_u64<'a, E: ParseError<&'a [u8]>>(i: &'a[u8]) -> IResult<&'a[u8], u64, E> { + if i.len() < 8 { + Err(Err::Incomplete(Needed::Size(8))) + } else { + let res = ((i[7] as u64) << 56) + ((i[6] as u64) << 48) + ((i[5] as u64) << 40) + ((i[4] as u64) << 32) + ((i[3] as u64) << 24) + + ((i[2] as u64) << 16) + ((i[1] as u64) << 8) + i[0] as u64; + Ok((&i[8..], res)) + } +} + +/// Recognizes a little endian unsigned 16 bytes integer +/// +/// *streaming version*: will return `Err(nom::Err::Incomplete(_))` if there is not enough data +/// +/// ```rust +/// # use nom::{Err, error::ErrorKind, Needed}; +/// # use nom::Needed::Size; +/// use nom::number::streaming::le_u128; +/// +/// let parser = |s| { +/// le_u128::<(_, ErrorKind)>(s) +/// }; +/// +/// assert_eq!(parser(b"\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x10\x11\x12\x13\x14\x15abcd"), Ok((&b"abcd"[..], 0x15141312111009080706050403020100))); +/// assert_eq!(parser(b"\x01"), Err(Err::Incomplete(Needed::Size(16)))); +/// ``` +#[inline] +#[cfg(stable_i128)] +pub fn le_u128<'a, E: ParseError<&'a [u8]>>(i: &'a[u8]) -> IResult<&'a[u8], u128, E> { + if i.len() < 16 { + Err(Err::Incomplete(Needed::Size(16))) + } else { + let res = ((i[15] as u128) << 120) + + ((i[14] as u128) << 112) + + ((i[13] as u128) << 104) + + ((i[12] as u128) << 96) + + ((i[11] as u128) << 88) + + ((i[10] as u128) << 80) + + ((i[9] as u128) << 72) + + ((i[8] as u128) << 64) + + ((i[7] as u128) << 56) + + ((i[6] as u128) << 48) + + ((i[5] as u128) << 40) + + ((i[4] as u128) << 32) + + ((i[3] as u128) << 24) + + ((i[2] as u128) << 16) + + ((i[1] as u128) << 8) + + i[0] as u128; + Ok((&i[16..], res)) + } +} + +/// Recognizes a signed 1 byte integer +/// +/// *streaming version*: will return `Err(nom::Err::Incomplete(_))` if there is not enough data +/// ```rust +/// # use nom::{Err, error::ErrorKind, Needed}; +/// # use nom::Needed::Size; +/// use nom::number::streaming::le_i8; +/// +/// let parser = le_i8::<(_, ErrorKind)>; +/// +/// assert_eq!(parser(b"\x00\x01abcd"), Ok((&b"\x01abcd"[..], 0x00))); +/// assert_eq!(parser(b""), Err(Err::Incomplete(Needed::Size(1)))); +/// ``` +#[inline] +pub fn le_i8<'a, E: ParseError<&'a [u8]>>(i: &'a[u8]) -> IResult<&'a[u8], i8, E> { + map!(i, le_u8, |x| x as i8) +} + +/// Recognizes a little endian signed 2 bytes integer +/// +/// *streaming version*: will return `Err(nom::Err::Incomplete(_))` if there is not enough data +/// +/// ```rust +/// # use nom::{Err, error::ErrorKind, Needed}; +/// # use nom::Needed::Size; +/// use nom::number::streaming::le_i16; +/// +/// let parser = |s| { +/// le_i16::<(_, ErrorKind)>(s) +/// }; +/// +/// assert_eq!(parser(b"\x00\x01abcd"), Ok((&b"abcd"[..], 0x0100))); +/// assert_eq!(parser(b"\x01"), Err(Err::Incomplete(Needed::Size(2)))); +/// ``` +#[inline] +pub fn le_i16<'a, E: ParseError<&'a [u8]>>(i: &'a[u8]) -> IResult<&'a[u8], i16, E> { + map!(i, le_u16, |x| x as i16) +} + +/// Recognizes a little endian signed 3 bytes integer +/// +/// *streaming version*: will return `Err(nom::Err::Incomplete(_))` if there is not enough data +/// +/// ```rust +/// # use nom::{Err, error::ErrorKind, Needed}; +/// # use nom::Needed::Size; +/// use nom::number::streaming::le_i24; +/// +/// let parser = |s| { +/// le_i24::<(_, ErrorKind)>(s) +/// }; +/// +/// assert_eq!(parser(b"\x00\x01\x02abcd"), Ok((&b"abcd"[..], 0x020100))); +/// assert_eq!(parser(b"\x01"), Err(Err::Incomplete(Needed::Size(3)))); +/// ``` +#[inline] +pub fn le_i24<'a, E: ParseError<&'a [u8]>>(i: &'a[u8]) -> IResult<&'a[u8], i32, E> { + // Same as the unsigned version but we need to sign-extend manually here + map!(i, le_u24, |x| if x & 0x80_00_00 != 0 { + (x | 0xff_00_00_00) as i32 + } else { + x as i32 + }) +} + +/// Recognizes a little endian signed 4 bytes integer +/// +/// *streaming version*: will return `Err(nom::Err::Incomplete(_))` if there is not enough data +/// +/// ```rust +/// # use nom::{Err, error::ErrorKind, Needed}; +/// # use nom::Needed::Size; +/// use nom::number::streaming::le_i32; +/// +/// let parser = |s| { +/// le_i32::<(_, ErrorKind)>(s) +/// }; +/// +/// assert_eq!(parser(b"\x00\x01\x02\x03abcd"), Ok((&b"abcd"[..], 0x03020100))); +/// assert_eq!(parser(b"\x01"), Err(Err::Incomplete(Needed::Size(4)))); +/// ``` +#[inline] +pub fn le_i32<'a, E: ParseError<&'a [u8]>>(i: &'a[u8]) -> IResult<&'a[u8], i32, E> { + map!(i, le_u32, |x| x as i32) +} + +/// Recognizes a little endian signed 8 bytes integer +/// +/// *streaming version*: will return `Err(nom::Err::Incomplete(_))` if there is not enough data +/// +/// ```rust +/// # use nom::{Err, error::ErrorKind, Needed}; +/// # use nom::Needed::Size; +/// use nom::number::streaming::le_i64; +/// +/// let parser = |s| { +/// le_i64::<(_, ErrorKind)>(s) +/// }; +/// +/// assert_eq!(parser(b"\x00\x01\x02\x03\x04\x05\x06\x07abcd"), Ok((&b"abcd"[..], 0x0706050403020100))); +/// assert_eq!(parser(b"\x01"), Err(Err::Incomplete(Needed::Size(8)))); +/// ``` +#[inline] +pub fn le_i64<'a, E: ParseError<&'a [u8]>>(i: &'a[u8]) -> IResult<&'a[u8], i64, E> { + map!(i, le_u64, |x| x as i64) +} + +/// Recognizes a little endian signed 16 bytes integer +/// +/// *streaming version*: will return `Err(nom::Err::Incomplete(_))` if there is not enough data +/// +/// ```rust +/// # use nom::{Err, error::ErrorKind, Needed}; +/// # use nom::Needed::Size; +/// use nom::number::streaming::le_i128; +/// +/// let parser = |s| { +/// le_i128::<(_, ErrorKind)>(s) +/// }; +/// +/// assert_eq!(parser(b"\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x10\x11\x12\x13\x14\x15abcd"), Ok((&b"abcd"[..], 0x15141312111009080706050403020100))); +/// assert_eq!(parser(b"\x01"), Err(Err::Incomplete(Needed::Size(16)))); +/// ``` +#[inline] +#[cfg(stable_i128)] +pub fn le_i128<'a, E: ParseError<&'a [u8]>>(i: &'a[u8]) -> IResult<&'a[u8], i128, E> { + map!(i, le_u128, |x| x as i128) +} + +/// Recognizes a big endian 4 bytes floating point number +/// +/// *streaming version*: will return `Err(nom::Err::Incomplete(_))` if there is not enough data +/// ```rust +/// # use nom::{Err, error::ErrorKind, Needed}; +/// # use nom::Needed::Size; +/// use nom::number::streaming::be_f32; +/// +/// let parser = |s| { +/// be_f32::<(_, ErrorKind)>(s) +/// }; +/// +/// assert_eq!(parser(&[0x40, 0x29, 0x00, 0x00][..]), Ok((&b""[..], 2.640625))); +/// assert_eq!(parser(&[0x01]), Err(Err::Incomplete(Needed::Size(4)))); +/// ``` +#[inline] +pub fn be_f32<'a, E: ParseError<&'a [u8]>>(input: &'a[u8]) -> IResult<&'a[u8], f32, E> { + match be_u32(input) { + Err(e) => Err(e), + Ok((i, o)) => Ok((i, f32::from_bits(o))), + } +} + +/// Recognizes a big endian 8 bytes floating point number +/// +/// *streaming version*: will return `Err(nom::Err::Incomplete(_))` if there is not enough data +/// ```rust +/// # use nom::{Err, error::ErrorKind, Needed}; +/// # use nom::Needed::Size; +/// use nom::number::streaming::be_f64; +/// +/// let parser = |s| { +/// be_f64::<(_, ErrorKind)>(s) +/// }; +/// +/// assert_eq!(parser(&[0x40, 0x29, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00][..]), Ok((&b""[..], 12.5))); +/// assert_eq!(parser(&[0x01]), Err(Err::Incomplete(Needed::Size(8)))); +/// ``` +#[inline] +pub fn be_f64<'a, E: ParseError<&'a [u8]>>(input: &'a[u8]) -> IResult<&'a[u8], f64, E> { + match be_u64(input) { + Err(e) => Err(e), + Ok((i, o)) => Ok((i, f64::from_bits(o))), + } +} + +/// Recognizes a little endian 4 bytes floating point number +/// +/// *streaming version*: will return `Err(nom::Err::Incomplete(_))` if there is not enough data +/// ```rust +/// # use nom::{Err, error::ErrorKind, Needed}; +/// # use nom::Needed::Size; +/// use nom::number::streaming::le_f32; +/// +/// let parser = |s| { +/// le_f32::<(_, ErrorKind)>(s) +/// }; +/// +/// assert_eq!(parser(&[0x00, 0x00, 0x48, 0x41][..]), Ok((&b""[..], 12.5))); +/// assert_eq!(parser(&[0x01]), Err(Err::Incomplete(Needed::Size(4)))); +/// ``` +#[inline] +pub fn le_f32<'a, E: ParseError<&'a [u8]>>(input: &'a[u8]) -> IResult<&'a[u8], f32, E> { + match le_u32(input) { + Err(e) => Err(e), + Ok((i, o)) => Ok((i, f32::from_bits(o))), + } +} + +/// Recognizes a little endian 8 bytes floating point number +/// +/// *streaming version*: will return `Err(nom::Err::Incomplete(_))` if there is not enough data +/// ```rust +/// # use nom::{Err, error::ErrorKind, Needed}; +/// # use nom::Needed::Size; +/// use nom::number::streaming::le_f64; +/// +/// let parser = |s| { +/// le_f64::<(_, ErrorKind)>(s) +/// }; +/// +/// assert_eq!(parser(&[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x48, 0x41][..]), Ok((&b""[..], 3145728.0))); +/// assert_eq!(parser(&[0x01]), Err(Err::Incomplete(Needed::Size(8)))); +/// ``` +#[inline] +pub fn le_f64<'a, E: ParseError<&'a [u8]>>(input: &'a[u8]) -> IResult<&'a[u8], f64, E> { + match le_u64(input) { + Err(e) => Err(e), + Ok((i, o)) => Ok((i, f64::from_bits(o))), + } +} + +/// Recognizes a hex-encoded integer +/// +/// *streaming version*: will return `Err(nom::Err::Incomplete(_))` if there is not enough data +/// ```rust +/// # use nom::{Err, error::ErrorKind, Needed}; +/// # use nom::Needed::Size; +/// use nom::number::streaming::hex_u32; +/// +/// let parser = |s| { +/// hex_u32(s) +/// }; +/// +/// assert_eq!(parser(b"01AE;"), Ok((&b";"[..], 0x01AE))); +/// assert_eq!(parser(b"abc"), Err(Err::Incomplete(Needed::Size(1)))); +/// assert_eq!(parser(b"ggg"), Err(Err::Error((&b"ggg"[..], ErrorKind::IsA)))); +/// ``` +#[inline] +pub fn hex_u32<'a, E: ParseError<&'a [u8]>>(input: &'a[u8]) -> IResult<&'a[u8], u32, E> { + let (i, o) = crate::bytes::streaming::is_a(&b"0123456789abcdefABCDEF"[..])(input)?; + + // Do not parse more than 8 characters for a u32 + let (parsed, remaining) = if o.len() <= 8 { + (o, i) + } else { + (&input[..8], &input[8..]) + }; + + let res = parsed + .iter() + .rev() + .enumerate() + .map(|(k, &v)| { + let digit = v as char; + digit.to_digit(16).unwrap_or(0) << (k * 4) + }) + .sum(); + + Ok((remaining, res)) +} + +/// Recognizes a floating point number in text format and returns the corresponding part of the input +/// +/// *streaming version*: will return `Err(nom::Err::Incomplete(_))` if it reaches the end of input +/// +/// ```rust +/// # use nom::{Err, error::ErrorKind, Needed}; +/// # use nom::Needed::Size; +/// use nom::number::streaming::recognize_float; +/// +/// let parser = |s| { +/// recognize_float(s) +/// }; +/// +/// assert_eq!(parser("11e-1;"), Ok((";", "11e-1"))); +/// assert_eq!(parser("123E-02;"), Ok((";", "123E-02"))); +/// assert_eq!(parser("123K-01"), Ok(("K-01", "123"))); +/// assert_eq!(parser("abc"), Err(Err::Error(("abc", ErrorKind::Char)))); +/// ``` +#[allow(unused_imports)] +#[cfg_attr(rustfmt, rustfmt_skip)] +pub fn recognize_float>(input: T) -> IResult +where + T: Slice> + Slice>, + T: Clone + Offset, + T: InputIter, + ::Item: AsChar, + T: InputTakeAtPosition, + ::Item: AsChar +{ + recognize( + tuple(( + opt(alt((char('+'), char('-')))), + alt(( + map(tuple((digit1, opt(pair(char('.'), opt(digit1))))), |_| ()), + map(tuple((char('.'), digit1)), |_| ()) + )), + opt(tuple(( + alt((char('e'), char('E'))), + opt(alt((char('+'), char('-')))), + cut(digit1) + ))) + )) + )(input) +} + +/// Recognizes floating point number in a byte string and returns a f32 +/// +/// *streaming version*: will return `Err(nom::Err::Incomplete(_))` if it reaches the end of input +/// ```rust +/// # use nom::{Err, error::ErrorKind, Needed}; +/// # use nom::Needed::Size; +/// use nom::number::streaming::float; +/// +/// let parser = |s| { +/// float(s) +/// }; +/// +/// assert_eq!(parser("11e-1;"), Ok((";", 1.1))); +/// assert_eq!(parser("123E-02;"), Ok((";", 1.23))); +/// assert_eq!(parser("123K-01"), Ok(("K-01", 123.0))); +/// assert_eq!(parser("abc"), Err(Err::Error(("abc", ErrorKind::Char)))); +/// ``` +#[cfg(not(feature = "lexical"))] +pub fn float>(input: T) -> IResult +where + T: Slice> + Slice>, + T: Clone + Offset, + T: InputIter + InputLength + crate::traits::ParseTo, + ::Item: AsChar, + T: InputTakeAtPosition, + ::Item: AsChar +{ + match recognize_float(input) { + Err(e) => Err(e), + Ok((i, s)) => match s.parse_to() { + Some(n) => Ok((i, n)), + None => Err(Err::Error(E::from_error_kind(i, ErrorKind::Float))) + } + } +} + +/// Recognizes floating point number in a byte string and returns a f32 +/// +/// *streaming version*: will return `Err(nom::Err::Incomplete(_))` if it reaches the end of input +/// ```rust +/// # use nom::{Err, error::ErrorKind, Needed}; +/// # use nom::Needed::Size; +/// use nom::number::streaming::float; +/// +/// let parser = |s| { +/// float(s) +/// }; +/// +/// assert_eq!(parser("11e-1;"), Ok((";", 1.1))); +/// assert_eq!(parser("123E-02;"), Ok((";", 1.23))); +/// assert_eq!(parser("123K-01"), Ok(("K-01", 123.0))); +/// assert_eq!(parser("abc"), Err(Err::Error(("abc", ErrorKind::Float)))); +/// ``` +/// +/// this function uses the lexical-core crate for float parsing by default, you +/// can deactivate it by removing the "lexical" feature +#[cfg(feature = "lexical")] +pub fn float>(input: T) -> IResult +where + T: crate::traits::AsBytes + InputLength + Slice>, +{ + match ::lexical_core::parse_partial(input.as_bytes()) { + Ok((value, processed)) => { + if processed == input.input_len() { + Err(Err::Incomplete(Needed::Unknown)) + } else { + Ok((input.slice(processed..), value)) + } + }, + Err(_) => Err(Err::Error(E::from_error_kind(input, ErrorKind::Float))) + } +} + +/// Recognizes floating point number in a byte string and returns a f64 +/// +/// *streaming version*: will return `Err(nom::Err::Incomplete(_))` if it reaches the end of input +/// ```rust +/// # use nom::{Err, error::ErrorKind, Needed}; +/// # use nom::Needed::Size; +/// use nom::number::streaming::double; +/// +/// let parser = |s| { +/// double(s) +/// }; +/// +/// assert_eq!(parser("11e-1;"), Ok((";", 1.1))); +/// assert_eq!(parser("123E-02;"), Ok((";", 1.23))); +/// assert_eq!(parser("123K-01"), Ok(("K-01", 123.0))); +/// assert_eq!(parser("abc"), Err(Err::Error(("abc", ErrorKind::Char)))); +/// ``` +#[cfg(not(feature = "lexical"))] +pub fn double>(input: T) -> IResult +where + T: Slice> + Slice>, + T: Clone + Offset, + T: InputIter + InputLength + crate::traits::ParseTo, + ::Item: AsChar, + T: InputTakeAtPosition, + ::Item: AsChar +{ + match recognize_float(input) { + Err(e) => Err(e), + Ok((i, s)) => match s.parse_to() { + Some(n) => Ok((i, n)), + None => Err(Err::Error(E::from_error_kind(i, ErrorKind::Float))) + } + } +} + +/// Recognizes floating point number in a byte string and returns a f64 +/// +/// *streaming version*: will return `Err(nom::Err::Incomplete(_))` if it reaches the end of input +/// ```rust +/// # use nom::{Err, error::ErrorKind, Needed}; +/// # use nom::Needed::Size; +/// use nom::number::streaming::double; +/// +/// let parser = |s| { +/// double(s) +/// }; +/// +/// assert_eq!(parser("11e-1;"), Ok((";", 1.1))); +/// assert_eq!(parser("123E-02;"), Ok((";", 1.23))); +/// assert_eq!(parser("123K-01"), Ok(("K-01", 123.0))); +/// assert_eq!(parser("abc"), Err(Err::Error(("abc", ErrorKind::Float)))); +/// ``` +/// +/// this function uses the lexical-core crate for float parsing by default, you +/// can deactivate it by removing the "lexical" feature +#[cfg(feature = "lexical")] +pub fn double>(input: T) -> IResult +where + T: crate::traits::AsBytes + InputLength + Slice>, +{ + match ::lexical_core::parse_partial(input.as_bytes()) { + Ok((value, processed)) => { + if processed == input.input_len() { + Err(Err::Incomplete(Needed::Unknown)) + } else { + Ok((input.slice(processed..), value)) + } + }, + Err(_) => Err(Err::Error(E::from_error_kind(input, ErrorKind::Float))) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::internal::{Err, Needed}; + use crate::error::ErrorKind; + + macro_rules! assert_parse( + ($left: expr, $right: expr) => { + let res: $crate::IResult<_, _, (_, ErrorKind)> = $left; + assert_eq!(res, $right); + }; + ); + + #[test] + fn i8_tests() { + assert_parse!(be_i8(&[0x00]), Ok((&b""[..], 0))); + assert_parse!(be_i8(&[0x7f]), Ok((&b""[..], 127))); + assert_parse!(be_i8(&[0xff]), Ok((&b""[..], -1))); + assert_parse!(be_i8(&[0x80]), Ok((&b""[..], -128))); + } + + #[test] + fn i16_tests() { + assert_parse!(be_i16(&[0x00, 0x00]), Ok((&b""[..], 0))); + assert_parse!(be_i16(&[0x7f, 0xff]), Ok((&b""[..], 32_767_i16))); + assert_parse!(be_i16(&[0xff, 0xff]), Ok((&b""[..], -1))); + assert_parse!(be_i16(&[0x80, 0x00]), Ok((&b""[..], -32_768_i16))); + } + + #[test] + fn u24_tests() { + assert_parse!(be_u24(&[0x00, 0x00, 0x00]), Ok((&b""[..], 0))); + assert_parse!(be_u24(&[0x00, 0xFF, 0xFF]), Ok((&b""[..], 65_535_u32))); + assert_parse!(be_u24(&[0x12, 0x34, 0x56]), Ok((&b""[..], 1_193_046_u32))); + } + + #[test] + fn i24_tests() { + assert_parse!(be_i24(&[0xFF, 0xFF, 0xFF]), Ok((&b""[..], -1_i32))); + assert_parse!(be_i24(&[0xFF, 0x00, 0x00]), Ok((&b""[..], -65_536_i32))); + assert_parse!(be_i24(&[0xED, 0xCB, 0xAA]), Ok((&b""[..], -1_193_046_i32))); + } + + #[test] + fn i32_tests() { + assert_parse!(be_i32(&[0x00, 0x00, 0x00, 0x00]), Ok((&b""[..], 0))); + assert_parse!( + be_i32(&[0x7f, 0xff, 0xff, 0xff]), + Ok((&b""[..], 2_147_483_647_i32)) + ); + assert_parse!(be_i32(&[0xff, 0xff, 0xff, 0xff]), Ok((&b""[..], -1))); + assert_parse!( + be_i32(&[0x80, 0x00, 0x00, 0x00]), + Ok((&b""[..], -2_147_483_648_i32)) + ); + } + + #[test] + fn i64_tests() { + assert_parse!( + be_i64(&[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00]), + Ok((&b""[..], 0)) + ); + assert_parse!( + be_i64(&[0x7f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff]), + Ok((&b""[..], 9_223_372_036_854_775_807_i64)) + ); + assert_parse!( + be_i64(&[0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff]), + Ok((&b""[..], -1)) + ); + assert_parse!( + be_i64(&[0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00]), + Ok((&b""[..], -9_223_372_036_854_775_808_i64)) + ); + } + + #[test] + #[cfg(stable_i128)] + fn i128_tests() { + assert_parse!( + be_i128(&[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00]), + Ok((&b""[..], 0)) + ); + assert_parse!( + be_i128(&[0x7f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff]), + Ok((&b""[..], 170_141_183_460_469_231_731_687_303_715_884_105_727_i128)) + ); + assert_parse!( + be_i128(&[0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff]), + Ok((&b""[..], -1)) + ); + assert_parse!( + be_i128(&[0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00]), + Ok((&b""[..], -170_141_183_460_469_231_731_687_303_715_884_105_728_i128)) + ); + } + + #[test] + fn le_i8_tests() { + assert_parse!(le_i8(&[0x00]), Ok((&b""[..], 0))); + assert_parse!(le_i8(&[0x7f]), Ok((&b""[..], 127))); + assert_parse!(le_i8(&[0xff]), Ok((&b""[..], -1))); + assert_parse!(le_i8(&[0x80]), Ok((&b""[..], -128))); + } + + #[test] + fn le_i16_tests() { + assert_parse!(le_i16(&[0x00, 0x00]), Ok((&b""[..], 0))); + assert_parse!(le_i16(&[0xff, 0x7f]), Ok((&b""[..], 32_767_i16))); + assert_parse!(le_i16(&[0xff, 0xff]), Ok((&b""[..], -1))); + assert_parse!(le_i16(&[0x00, 0x80]), Ok((&b""[..], -32_768_i16))); + } + + #[test] + fn le_u24_tests() { + assert_parse!(le_u24(&[0x00, 0x00, 0x00]), Ok((&b""[..], 0))); + assert_parse!(le_u24(&[0xFF, 0xFF, 0x00]), Ok((&b""[..], 65_535_u32))); + assert_parse!(le_u24(&[0x56, 0x34, 0x12]), Ok((&b""[..], 1_193_046_u32))); + } + + #[test] + fn le_i24_tests() { + assert_parse!(le_i24(&[0xFF, 0xFF, 0xFF]), Ok((&b""[..], -1_i32))); + assert_parse!(le_i24(&[0x00, 0x00, 0xFF]), Ok((&b""[..], -65_536_i32))); + assert_parse!(le_i24(&[0xAA, 0xCB, 0xED]), Ok((&b""[..], -1_193_046_i32))); + } + + #[test] + fn le_i32_tests() { + assert_parse!(le_i32(&[0x00, 0x00, 0x00, 0x00]), Ok((&b""[..], 0))); + assert_parse!( + le_i32(&[0xff, 0xff, 0xff, 0x7f]), + Ok((&b""[..], 2_147_483_647_i32)) + ); + assert_parse!(le_i32(&[0xff, 0xff, 0xff, 0xff]), Ok((&b""[..], -1))); + assert_parse!( + le_i32(&[0x00, 0x00, 0x00, 0x80]), + Ok((&b""[..], -2_147_483_648_i32)) + ); + } + + #[test] + fn le_i64_tests() { + assert_parse!( + le_i64(&[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00]), + Ok((&b""[..], 0)) + ); + assert_parse!( + le_i64(&[0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f]), + Ok((&b""[..], 9_223_372_036_854_775_807_i64)) + ); + assert_parse!( + le_i64(&[0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff]), + Ok((&b""[..], -1)) + ); + assert_parse!( + le_i64(&[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80]), + Ok((&b""[..], -9_223_372_036_854_775_808_i64)) + ); + } + + #[test] + #[cfg(stable_i128)] + fn le_i128_tests() { + assert_parse!( + le_i128(&[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00]), + Ok((&b""[..], 0)) + ); + assert_parse!( + le_i128(&[0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f]), + Ok((&b""[..], 170_141_183_460_469_231_731_687_303_715_884_105_727_i128)) + ); + assert_parse!( + le_i128(&[0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff]), + Ok((&b""[..], -1)) + ); + assert_parse!( + le_i128(&[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80]), + Ok((&b""[..], -170_141_183_460_469_231_731_687_303_715_884_105_728_i128)) + ); + } + + #[test] + fn be_f32_tests() { + assert_parse!(be_f32(&[0x00, 0x00, 0x00, 0x00]), Ok((&b""[..], 0_f32))); + assert_parse!( + be_f32(&[0x4d, 0x31, 0x1f, 0xd8]), + Ok((&b""[..], 185_728_392_f32)) + ); + } + + #[test] + fn be_f64_tests() { + assert_parse!( + be_f64(&[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00]), + Ok((&b""[..], 0_f64)) + ); + assert_parse!( + be_f64(&[0x41, 0xa6, 0x23, 0xfb, 0x10, 0x00, 0x00, 0x00]), + Ok((&b""[..], 185_728_392_f64)) + ); + } + + #[test] + fn le_f32_tests() { + assert_parse!(le_f32(&[0x00, 0x00, 0x00, 0x00]), Ok((&b""[..], 0_f32))); + assert_parse!( + le_f32(&[0xd8, 0x1f, 0x31, 0x4d]), + Ok((&b""[..], 185_728_392_f32)) + ); + } + + #[test] + fn le_f64_tests() { + assert_parse!( + le_f64(&[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00]), + Ok((&b""[..], 0_f64)) + ); + assert_parse!( + le_f64(&[0x00, 0x00, 0x00, 0x10, 0xfb, 0x23, 0xa6, 0x41]), + Ok((&b""[..], 185_728_392_f64)) + ); + } + + #[test] + fn hex_u32_tests() { + assert_parse!( + hex_u32(&b";"[..]), + Err(Err::Error(error_position!(&b";"[..], ErrorKind::IsA))) + ); + assert_parse!(hex_u32(&b"ff;"[..]), Ok((&b";"[..], 255))); + assert_parse!(hex_u32(&b"1be2;"[..]), Ok((&b";"[..], 7138))); + assert_parse!(hex_u32(&b"c5a31be2;"[..]), Ok((&b";"[..], 3_315_801_058))); + assert_parse!(hex_u32(&b"C5A31be2;"[..]), Ok((&b";"[..], 3_315_801_058))); + assert_parse!(hex_u32(&b"00c5a31be2;"[..]), Ok((&b"e2;"[..], 12_952_347))); + assert_parse!( + hex_u32(&b"c5a31be201;"[..]), + Ok((&b"01;"[..], 3_315_801_058)) + ); + assert_parse!(hex_u32(&b"ffffffff;"[..]), Ok((&b";"[..], 4_294_967_295))); + assert_parse!(hex_u32(&b"0x1be2;"[..]), Ok((&b"x1be2;"[..], 0))); + assert_parse!(hex_u32(&b"12af"[..]), Err(Err::Incomplete(Needed::Size(1)))); + } + + #[test] + #[cfg(feature = "std")] + fn float_test() { + let mut test_cases = vec![ + "+3.14", + "3.14", + "-3.14", + "0", + "0.0", + "1.", + ".789", + "-.5", + "1e7", + "-1E-7", + ".3e-2", + "1.e4", + "1.2e4", + "12.34", + "-1.234E-12", + "-1.234e-12", + ]; + + for test in test_cases.drain(..) { + let expected32 = str::parse::(test).unwrap(); + let expected64 = str::parse::(test).unwrap(); + + println!("now parsing: {} -> {}", test, expected32); + + let larger = format!("{};", test); + assert_parse!(recognize_float(&larger[..]), Ok((";", test))); + + assert_parse!(float(larger.as_bytes()), Ok((&b";"[..], expected32))); + assert_parse!(float(&larger[..]), Ok((";", expected32))); + + assert_parse!(double(larger.as_bytes()), Ok((&b";"[..], expected64))); + assert_parse!(double(&larger[..]), Ok((";", expected64))); + } + + let remaining_exponent = "-1.234E-"; + assert_parse!( + recognize_float(remaining_exponent), + Err(Err::Incomplete(Needed::Size(1))) + ); + } + +} diff --git a/third_party/rust/nom/src/regexp.rs b/third_party/rust/nom/src/regexp.rs index 6f6b47ea90..fd165e1bbf 100644 --- a/third_party/rust/nom/src/regexp.rs +++ b/third_party/rust/nom/src/regexp.rs @@ -3,7 +3,7 @@ macro_rules! regex ( ($re: ident, $s:expr) => ( lazy_static! { - static ref $re: ::regex::Regex = ::regex::Regex::new($s).unwrap(); + static ref $re: $crate::lib::regex::Regex = $crate::lib::regex::Regex::new($s).unwrap(); } ); ); @@ -13,7 +13,7 @@ macro_rules! regex ( macro_rules! regex_bytes ( ($re: ident, $s:expr) => ( lazy_static! { - static ref $re: ::regex::bytes::Regex = ::regex::bytes::Regex::new($s).unwrap(); + static ref $re: $crate::lib::regex::bytes::Regex = $crate::lib::regex::bytes::Regex::new($s).unwrap(); } ); ); @@ -22,20 +22,20 @@ macro_rules! regex_bytes ( /// Returns the whole input if a match is found /// /// requires the `regexp` feature -#[macro_export] +#[macro_export(local_inner_macros)] macro_rules! re_match ( ($i:expr, $re:expr) => ( { use $crate::lib::std::result::Result::*; - use $crate::{Err,ErrorKind,IResult}; + use $crate::{Err,error::ErrorKind,IResult}; use $crate::InputLength; use $crate::Slice; - let re = ::regex::Regex::new($re).unwrap(); + let re = $crate::lib::regex::Regex::new($re).unwrap(); if re.is_match(&$i) { Ok(($i.slice($i.input_len()..), $i)) } else { - let res: IResult<_,_> = Err(Err::Error(error_position!($i, ErrorKind::RegexpMatch::))); + let res: IResult<_,_> = Err(Err::Error(error_position!($i, ErrorKind::RegexpMatch))); res } } @@ -47,12 +47,12 @@ macro_rules! re_match ( /// Returns the whole input if a match is found. Regular expression calculated at compile time /// /// requires the `regexp_macros` feature -#[macro_export] +#[macro_export(local_inner_macros)] macro_rules! re_match_static ( ($i:expr, $re:expr) => ( { use $crate::lib::std::result::Result::*; - use $crate::{Err,ErrorKind,IResult}; + use $crate::{Err,error::ErrorKind,IResult}; use $crate::InputLength; use $crate::Slice; @@ -60,7 +60,7 @@ macro_rules! re_match_static ( if RE.is_match(&$i) { Ok(($i.slice($i.input_len()..), $i)) } else { - let res: IResult<_,_> = Err(Err::Error(error_position!($i, ErrorKind::RegexpMatch::))); + let res: IResult<_,_> = Err(Err::Error(error_position!($i, ErrorKind::RegexpMatch))); res } } @@ -71,20 +71,20 @@ macro_rules! re_match_static ( /// Returns the whole input if a match is found /// /// requires the `regexp` feature -#[macro_export] +#[macro_export(local_inner_macros)] macro_rules! re_bytes_match ( ($i:expr, $re:expr) => ( { use $crate::lib::std::result::Result::*; - use $crate::{Err,ErrorKind,IResult}; + use $crate::{Err,error::ErrorKind,IResult}; use $crate::InputLength; use $crate::Slice; - let re = ::regex::bytes::Regex::new($re).unwrap(); + let re = $crate::lib::regex::bytes::Regex::new($re).unwrap(); if re.is_match(&$i) { Ok(($i.slice($i.input_len()..), $i)) } else { - let res: IResult<_,_> = Err(Err::Error(error_position!($i, ErrorKind::RegexpMatch::))); + let res: IResult<_,_> = Err(Err::Error(error_position!($i, ErrorKind::RegexpMatch))); res } } @@ -96,12 +96,12 @@ macro_rules! re_bytes_match ( /// Returns the whole input if a match is found. Regular expression calculated at compile time /// /// requires the `regexp_macros` feature -#[macro_export] +#[macro_export(local_inner_macros)] macro_rules! re_bytes_match_static ( ($i:expr, $re:expr) => ( { use $crate::lib::std::result::Result::*; - use $crate::{Err,ErrorKind,IResult}; + use $crate::{Err,error::ErrorKind,IResult}; use $crate::InputLength; use $crate::Slice; @@ -109,7 +109,7 @@ macro_rules! re_bytes_match_static ( if RE.is_match(&$i) { Ok(($i.slice($i.input_len()..), $i)) } else { - let res: IResult<_,_> = Err(Err::Error(error_position!($i, ErrorKind::RegexpMatch::))); + let res: IResult<_,_> = Err(Err::Error(error_position!($i, ErrorKind::RegexpMatch))); res } } @@ -120,19 +120,19 @@ macro_rules! re_bytes_match_static ( /// Returns the first match /// /// requires the `regexp` feature -#[macro_export] +#[macro_export(local_inner_macros)] macro_rules! re_find ( ($i:expr, $re:expr) => ( { use $crate::lib::std::result::Result::*; - use $crate::{Err,ErrorKind,IResult}; + use $crate::{Err,error::ErrorKind,IResult}; use $crate::Slice; - let re = ::regex::Regex::new($re).unwrap(); + let re = $crate::lib::regex::Regex::new($re).unwrap(); if let Some(m) = re.find(&$i) { Ok(($i.slice(m.end()..), $i.slice(m.start()..m.end()))) } else { - let res: IResult<_,_> = Err(Err::Error(error_position!($i, ErrorKind::RegexpFind::))); + let res: IResult<_,_> = Err(Err::Error(error_position!($i, ErrorKind::RegexpFind))); res } } @@ -144,19 +144,19 @@ macro_rules! re_find ( /// Returns the first match. Regular expression calculated at compile time /// /// requires the `regexp_macros` feature -#[macro_export] +#[macro_export(local_inner_macros)] macro_rules! re_find_static ( ($i:expr, $re:expr) => ( { use $crate::lib::std::result::Result::*; - use $crate::{Err,ErrorKind,IResult}; + use $crate::{Err,error::ErrorKind,IResult}; use $crate::Slice; regex!(RE, $re); if let Some(m) = RE.find(&$i) { Ok(($i.slice(m.end()..), $i.slice(m.start()..m.end()))) } else { - let res: IResult<_,_> = Err(Err::Error(error_position!($i, ErrorKind::RegexpFind::))); + let res: IResult<_,_> = Err(Err::Error(error_position!($i, ErrorKind::RegexpFind))); res } } @@ -168,19 +168,19 @@ macro_rules! re_find_static ( /// Returns the first match /// /// requires the `regexp` feature -#[macro_export] +#[macro_export(local_inner_macros)] macro_rules! re_bytes_find ( ($i:expr, $re:expr) => ( { use $crate::lib::std::result::Result::*; - use $crate::{Err,ErrorKind,IResult}; + use $crate::{Err,error::ErrorKind,IResult}; use $crate::Slice; - let re = ::regex::bytes::Regex::new($re).unwrap(); + let re = $crate::lib::regex::bytes::Regex::new($re).unwrap(); if let Some(m) = re.find(&$i) { Ok(($i.slice(m.end()..), $i.slice(m.start()..m.end()))) } else { - let res: IResult<_,_> = Err(Err::Error(error_position!($i, ErrorKind::RegexpFind::))); + let res: IResult<_,_> = Err(Err::Error(error_position!($i, ErrorKind::RegexpFind))); res } } @@ -192,19 +192,19 @@ macro_rules! re_bytes_find ( /// Returns the first match. Regular expression calculated at compile time /// /// requires the `regexp_macros` feature -#[macro_export] +#[macro_export(local_inner_macros)] macro_rules! re_bytes_find_static ( ($i:expr, $re:expr) => ( { use $crate::lib::std::result::Result::*; - use $crate::{Err,ErrorKind,IResult}; + use $crate::{Err,error::ErrorKind,IResult}; use $crate::Slice; regex_bytes!(RE, $re); if let Some(m) = RE.find(&$i) { Ok(($i.slice(m.end()..), $i.slice(m.start()..m.end()))) } else { - let res: IResult<_,_> = Err(Err::Error(error_position!($i, ErrorKind::RegexpFind::))); + let res: IResult<_,_> = Err(Err::Error(error_position!($i, ErrorKind::RegexpFind))); res } } @@ -216,15 +216,15 @@ macro_rules! re_bytes_find_static ( /// Returns all the matched parts /// /// requires the `regexp` feature -#[macro_export] +#[macro_export(local_inner_macros)] macro_rules! re_matches ( ($i:expr, $re:expr) => ( { use $crate::lib::std::result::Result::*; - use $crate::{Err,ErrorKind,IResult}; + use $crate::{Err,error::ErrorKind,IResult}; use $crate::Slice; - let re = ::regex::Regex::new($re).unwrap(); + let re = $crate::lib::regex::Regex::new($re).unwrap(); let v: Vec<_> = re.find_iter(&$i).map(|m| $i.slice(m.start()..m.end())).collect(); if v.len() != 0 { let offset = { @@ -233,7 +233,7 @@ macro_rules! re_matches ( }; Ok(($i.slice(offset..), v)) } else { - let res: IResult<_,_> = Err(Err::Error(error_position!($i, ErrorKind::RegexpMatches::))); + let res: IResult<_,_> = Err(Err::Error(error_position!($i, ErrorKind::RegexpMatches))); res } } @@ -245,12 +245,12 @@ macro_rules! re_matches ( /// Returns all the matched parts. Regular expression calculated at compile time /// /// requires the `regexp_macros` feature -#[macro_export] +#[macro_export(local_inner_macros)] macro_rules! re_matches_static ( ($i:expr, $re:expr) => ( { use $crate::lib::std::result::Result::*; - use $crate::{Err,ErrorKind,IResult}; + use $crate::{Err,error::ErrorKind,IResult}; use $crate::Slice; regex!(RE, $re); @@ -262,7 +262,7 @@ macro_rules! re_matches_static ( }; Ok(($i.slice(offset..), v)) } else { - let res: IResult<_,_> = Err(Err::Error(error_position!($i, ErrorKind::RegexpMatches::))); + let res: IResult<_,_> = Err(Err::Error(error_position!($i, ErrorKind::RegexpMatches))); res } } @@ -273,15 +273,15 @@ macro_rules! re_matches_static ( /// Returns all the matched parts /// /// requires the `regexp` feature -#[macro_export] +#[macro_export(local_inner_macros)] macro_rules! re_bytes_matches ( ($i:expr, $re:expr) => ( { use $crate::lib::std::result::Result::*; - use $crate::{Err,ErrorKind,IResult}; + use $crate::{Err,error::ErrorKind,IResult}; use $crate::Slice; - let re = ::regex::bytes::Regex::new($re).unwrap(); + let re = $crate::lib::regex::bytes::Regex::new($re).unwrap(); let v: Vec<_> = re.find_iter(&$i).map(|m| $i.slice(m.start()..m.end())).collect(); if v.len() != 0 { let offset = { @@ -290,7 +290,7 @@ macro_rules! re_bytes_matches ( }; Ok(($i.slice(offset..), v)) } else { - let res: IResult<_,_> = Err(Err::Error(error_position!($i, ErrorKind::RegexpMatches::))); + let res: IResult<_,_> = Err(Err::Error(error_position!($i, ErrorKind::RegexpMatches))); res } } @@ -302,12 +302,12 @@ macro_rules! re_bytes_matches ( /// Returns all the matched parts. Regular expression calculated at compile time /// /// requires the `regexp_macros` feature -#[macro_export] +#[macro_export(local_inner_macros)] macro_rules! re_bytes_matches_static ( ($i:expr, $re:expr) => ( { use $crate::lib::std::result::Result::*; - use $crate::{Err,ErrorKind,IResult}; + use $crate::{Err,error::ErrorKind,IResult}; use $crate::Slice; regex_bytes!(RE, $re); @@ -319,7 +319,7 @@ macro_rules! re_bytes_matches_static ( }; Ok(($i.slice(offset..), v)) } else { - let res: IResult<_,_> = Err(Err::Error(error_position!($i, ErrorKind::RegexpMatches::))); + let res: IResult<_,_> = Err(Err::Error(error_position!($i, ErrorKind::RegexpMatches))); res } } @@ -330,15 +330,15 @@ macro_rules! re_bytes_matches_static ( /// Returns the first capture group /// /// requires the `regexp` feature -#[macro_export] +#[macro_export(local_inner_macros)] macro_rules! re_capture ( ($i:expr, $re:expr) => ( { use $crate::lib::std::result::Result::*; - use $crate::{Err,ErrorKind,IResult}; + use $crate::{Err,error::ErrorKind,IResult}; use $crate::Slice; - let re = ::regex::Regex::new($re).unwrap(); + let re = $crate::lib::regex::Regex::new($re).unwrap(); if let Some(c) = re.captures(&$i) { let v:Vec<_> = c.iter().filter(|el| el.is_some()).map(|el| el.unwrap()).map(|m| $i.slice(m.start()..m.end())).collect(); let offset = { @@ -347,7 +347,7 @@ macro_rules! re_capture ( }; Ok(($i.slice(offset..), v)) } else { - let res: IResult<_,_> = Err(Err::Error(error_position!($i, ErrorKind::RegexpCapture::))); + let res: IResult<_,_> = Err(Err::Error(error_position!($i, ErrorKind::RegexpCapture))); res } } @@ -359,12 +359,12 @@ macro_rules! re_capture ( /// Returns the first capture group. Regular expression calculated at compile time /// /// requires the `regexp_macros` feature -#[macro_export] +#[macro_export(local_inner_macros)] macro_rules! re_capture_static ( ($i:expr, $re:expr) => ( { use $crate::lib::std::result::Result::*; - use $crate::{Err,ErrorKind,IResult}; + use $crate::{Err,error::ErrorKind,IResult}; use $crate::Slice; regex!(RE, $re); @@ -376,7 +376,7 @@ macro_rules! re_capture_static ( }; Ok(($i.slice(offset..), v)) } else { - let res: IResult<_,_> = Err(Err::Error(error_position!($i, ErrorKind::RegexpCapture::))); + let res: IResult<_,_> = Err(Err::Error(error_position!($i, ErrorKind::RegexpCapture))); res } } @@ -387,15 +387,15 @@ macro_rules! re_capture_static ( /// Returns the first capture group /// /// requires the `regexp` feature -#[macro_export] +#[macro_export(local_inner_macros)] macro_rules! re_bytes_capture ( ($i:expr, $re:expr) => ( { use $crate::lib::std::result::Result::*; - use $crate::{Err,ErrorKind,IResult}; + use $crate::{Err,error::ErrorKind,IResult}; use $crate::Slice; - let re = ::regex::bytes::Regex::new($re).unwrap(); + let re = $crate::lib::regex::bytes::Regex::new($re).unwrap(); if let Some(c) = re.captures(&$i) { let v:Vec<_> = c.iter().filter(|el| el.is_some()).map(|el| el.unwrap()).map(|m| $i.slice(m.start()..m.end())).collect(); let offset = { @@ -404,7 +404,7 @@ macro_rules! re_bytes_capture ( }; Ok(($i.slice(offset..), v)) } else { - let res: IResult<_,_> = Err(Err::Error(error_position!($i, ErrorKind::RegexpCapture::))); + let res: IResult<_,_> = Err(Err::Error(error_position!($i, ErrorKind::RegexpCapture))); res } } @@ -416,12 +416,12 @@ macro_rules! re_bytes_capture ( /// Returns the first capture group. Regular expression calculated at compile time /// /// requires the `regexp_macros` feature -#[macro_export] +#[macro_export(local_inner_macros)] macro_rules! re_bytes_capture_static ( ($i:expr, $re:expr) => ( { use $crate::lib::std::result::Result::*; - use $crate::{Err,ErrorKind,IResult}; + use $crate::{Err,error::ErrorKind,IResult}; use $crate::Slice; regex_bytes!(RE, $re); @@ -433,7 +433,7 @@ macro_rules! re_bytes_capture_static ( }; Ok(($i.slice(offset..), v)) } else { - let res: IResult<_,_> = Err(Err::Error(error_position!($i, ErrorKind::RegexpCapture::))); + let res: IResult<_,_> = Err(Err::Error(error_position!($i, ErrorKind::RegexpCapture))); res } } @@ -444,15 +444,15 @@ macro_rules! re_bytes_capture_static ( /// Returns all the capture groups /// /// requires the `regexp` feature -#[macro_export] +#[macro_export(local_inner_macros)] macro_rules! re_captures ( ($i:expr, $re:expr) => ( { use $crate::lib::std::result::Result::*; - use $crate::{Err,ErrorKind,IResult}; + use $crate::{Err,error::ErrorKind,IResult}; use $crate::Slice; - let re = ::regex::Regex::new($re).unwrap(); + let re = $crate::lib::regex::Regex::new($re).unwrap(); let v:Vec> = re.captures_iter(&$i) .map(|c| c.iter().filter(|el| el.is_some()).map(|el| el.unwrap()) .map(|m| $i.slice(m.start()..m.end())).collect()).collect(); @@ -463,7 +463,7 @@ macro_rules! re_captures ( }; Ok(($i.slice(offset..), v)) } else { - let res: IResult<_,_> = Err(Err::Error(error_position!($i, ErrorKind::RegexpCapture::))); + let res: IResult<_,_> = Err(Err::Error(error_position!($i, ErrorKind::RegexpCapture))); res } } @@ -475,12 +475,12 @@ macro_rules! re_captures ( /// Returns all the capture groups. Regular expression calculated at compile time /// /// requires the `regexp_macros` feature -#[macro_export] +#[macro_export(local_inner_macros)] macro_rules! re_captures_static ( ($i:expr, $re:expr) => ( { use $crate::lib::std::result::Result::*; - use $crate::{Err,ErrorKind,IResult}; + use $crate::{Err,error::ErrorKind,IResult}; use $crate::Slice; regex!(RE, $re); @@ -493,7 +493,7 @@ macro_rules! re_captures_static ( }; Ok(($i.slice(offset..), v)) } else { - let res: IResult<_,_> = Err(Err::Error(error_position!($i, ErrorKind::RegexpCapture::))); + let res: IResult<_,_> = Err(Err::Error(error_position!($i, ErrorKind::RegexpCapture))); res } } @@ -504,15 +504,15 @@ macro_rules! re_captures_static ( /// Returns all the capture groups /// /// requires the `regexp` feature -#[macro_export] +#[macro_export(local_inner_macros)] macro_rules! re_bytes_captures ( ($i:expr, $re:expr) => ( { use $crate::lib::std::result::Result::*; - use $crate::{Err,ErrorKind,IResult}; + use $crate::{Err,error::ErrorKind,IResult}; use $crate::Slice; - let re = ::regex::bytes::Regex::new($re).unwrap(); + let re = $crate::lib::regex::bytes::Regex::new($re).unwrap(); let v:Vec> = re.captures_iter(&$i) .map(|c| c.iter().filter(|el| el.is_some()).map(|el| el.unwrap()).map(|m| $i.slice(m.start()..m.end())).collect()).collect(); if v.len() != 0 { @@ -522,7 +522,7 @@ macro_rules! re_bytes_captures ( }; Ok(($i.slice(offset..), v)) } else { - let res: IResult<_,_> = Err(Err::Error(error_position!($i, ErrorKind::RegexpCapture::))); + let res: IResult<_,_> = Err(Err::Error(error_position!($i, ErrorKind::RegexpCapture))); res } } @@ -534,12 +534,12 @@ macro_rules! re_bytes_captures ( /// Returns all the capture groups. Regular expression calculated at compile time /// /// requires the `regexp_macros` feature -#[macro_export] +#[macro_export(local_inner_macros)] macro_rules! re_bytes_captures_static ( ($i:expr, $re:expr) => ( { use $crate::lib::std::result::Result::*; - use $crate::{Err,ErrorKind,IResult}; + use $crate::{Err,error::ErrorKind,IResult}; use $crate::Slice; regex_bytes!(RE, $re); @@ -552,7 +552,7 @@ macro_rules! re_bytes_captures_static ( }; Ok(($i.slice(offset..), v)) } else { - let res: IResult<_,_> = Err(Err::Error(error_position!($i, ErrorKind::RegexpCapture::))); + let res: IResult<_,_> = Err(Err::Error(error_position!($i, ErrorKind::RegexpCapture))); res } } @@ -561,9 +561,9 @@ macro_rules! re_bytes_captures_static ( #[cfg(test)] mod tests { #[cfg(feature = "alloc")] - use lib::std::vec::Vec; - use util::ErrorKind; - use internal::Err; + use crate::lib::std::vec::Vec; + use crate::error::ErrorKind; + use crate::internal::Err; #[test] fn re_match() { @@ -573,7 +573,7 @@ mod tests { rm("blah"), Err(Err::Error(error_position!( &"blah"[..], - ErrorKind::RegexpMatch:: + ErrorKind::RegexpMatch ),)) ); assert_eq!(rm("2015-09-07blah"), Ok(("", "2015-09-07blah"))); @@ -588,7 +588,7 @@ mod tests { rm("blah"), Err(Err::Error(error_position!( &"blah"[..], - ErrorKind::RegexpMatch:: + ErrorKind::RegexpMatch ),)) ); assert_eq!(rm("2015-09-07blah"), Ok(("", "2015-09-07blah"))); @@ -602,7 +602,7 @@ mod tests { rm("blah"), Err(Err::Error(error_position!( &"blah"[..], - ErrorKind::RegexpFind:: + ErrorKind::RegexpFind ),)) ); assert_eq!(rm("2015-09-07blah"), Ok(("blah", "2015-09-07"))); @@ -617,7 +617,7 @@ mod tests { rm("blah"), Err(Err::Error(error_position!( &"blah"[..], - ErrorKind::RegexpFind:: + ErrorKind::RegexpFind ),)) ); assert_eq!(rm("2015-09-07blah"), Ok(("blah", "2015-09-07"))); @@ -632,7 +632,7 @@ mod tests { rm("blah"), Err(Err::Error(error_position!( &"blah"[..], - ErrorKind::RegexpMatches:: + ErrorKind::RegexpMatches ))) ); assert_eq!( @@ -651,7 +651,7 @@ mod tests { rm("blah"), Err(Err::Error(error_position!( &"blah"[..], - ErrorKind::RegexpMatches:: + ErrorKind::RegexpMatches ))) ); assert_eq!( @@ -672,7 +672,7 @@ mod tests { rm("blah"), Err(Err::Error(error_position!( &"blah"[..], - ErrorKind::RegexpCapture:: + ErrorKind::RegexpCapture ))) ); assert_eq!( @@ -697,7 +697,7 @@ mod tests { rm("blah"), Err(Err::Error(error_position!( &"blah"[..], - ErrorKind::RegexpCapture:: + ErrorKind::RegexpCapture ))) ); assert_eq!( @@ -724,7 +724,7 @@ mod tests { rm("blah"), Err(Err::Error(error_position!( &"blah"[..], - ErrorKind::RegexpCapture:: + ErrorKind::RegexpCapture ))) ); assert_eq!( @@ -755,7 +755,7 @@ mod tests { rm("blah"), Err(Err::Error(error_position!( &"blah"[..], - ErrorKind::RegexpCapture:: + ErrorKind::RegexpCapture ))) ); assert_eq!( @@ -778,7 +778,7 @@ mod tests { rm(&b"blah"[..]), Err(Err::Error(error_position!( &b"blah"[..], - ErrorKind::RegexpMatch:: + ErrorKind::RegexpMatch ))) ); assert_eq!( @@ -796,7 +796,7 @@ mod tests { rm(&b"blah"[..]), Err(Err::Error(error_position!( &b"blah"[..], - ErrorKind::RegexpMatch:: + ErrorKind::RegexpMatch ))) ); assert_eq!( @@ -813,7 +813,7 @@ mod tests { rm(&b"blah"[..]), Err(Err::Error(error_position!( &b"blah"[..], - ErrorKind::RegexpFind:: + ErrorKind::RegexpFind ))) ); assert_eq!( @@ -831,7 +831,7 @@ mod tests { rm(&b"blah"[..]), Err(Err::Error(error_position!( &b"blah"[..], - ErrorKind::RegexpFind:: + ErrorKind::RegexpFind ))) ); assert_eq!( @@ -852,7 +852,7 @@ mod tests { rm(&b"blah"[..]), Err(Err::Error(error_position!( &b"blah"[..], - ErrorKind::RegexpMatches:: + ErrorKind::RegexpMatches ))) ); assert_eq!( @@ -877,7 +877,7 @@ mod tests { rm(&b"blah"[..]), Err(Err::Error(error_position!( &b"blah"[..], - ErrorKind::RegexpMatches:: + ErrorKind::RegexpMatches ))) ); assert_eq!( @@ -911,7 +911,7 @@ mod tests { rm(&b"blah"[..]), Err(Err::Error(error_position!( &b"blah"[..], - ErrorKind::RegexpCapture:: + ErrorKind::RegexpCapture ))) ); assert_eq!( @@ -956,7 +956,7 @@ mod tests { rm(&b"blah"[..]), Err(Err::Error(error_position!( &b"blah"[..], - ErrorKind::RegexpCapture:: + ErrorKind::RegexpCapture ))) ); assert_eq!( @@ -1002,7 +1002,7 @@ mod tests { rm(&b"blah"[..]), Err(Err::Error(error_position!( &b"blah"[..], - ErrorKind::RegexpCapture:: + ErrorKind::RegexpCapture ))) ); assert_eq!( @@ -1059,7 +1059,7 @@ mod tests { rm(&b"blah"[..]), Err(Err::Error(error_position!( &b"blah"[..], - ErrorKind::RegexpCapture:: + ErrorKind::RegexpCapture ))) ); assert_eq!( diff --git a/third_party/rust/nom/src/sequence.rs b/third_party/rust/nom/src/sequence/macros.rs similarity index 65% rename from third_party/rust/nom/src/sequence.rs rename to third_party/rust/nom/src/sequence/macros.rs index df44f3b499..a5a3351499 100644 --- a/third_party/rust/nom/src/sequence.rs +++ b/third_party/rust/nom/src/sequence/macros.rs @@ -8,8 +8,8 @@ /// /// ``` /// # #[macro_use] extern crate nom; -/// # use nom::ErrorKind; -/// # use nom::be_u16; +/// # use nom::error::ErrorKind; +/// # use nom::number::streaming::be_u16; /// // the return type depends of the children parsers /// named!(parser<&[u8], (u16, &[u8], &[u8]) >, /// tuple!( @@ -29,7 +29,7 @@ /// ); /// # } /// ``` -#[macro_export] +#[macro_export(local_inner_macros)] macro_rules! tuple ( ($i:expr, $($rest:tt)*) => ( { @@ -40,7 +40,7 @@ macro_rules! tuple ( /// Internal parser, do not use directly #[doc(hidden)] -#[macro_export] +#[macro_export(local_inner_macros)] macro_rules! tuple_parser ( ($i:expr, ($($parsed:tt),*), $e:path, $($rest:tt)*) => ( tuple_parser!($i, ($($parsed),*), call!($e), $($rest)*); @@ -88,108 +88,140 @@ macro_rules! tuple_parser ( ); /// `pair!(I -> IResult, I -> IResult) => I -> IResult` -/// pair(X,Y), returns (x,y) +/// pair returns a tuple of the results of its two child parsers of both succeed /// -#[macro_export] +/// ``` +/// # #[macro_use] extern crate nom; +/// # use nom::Err; +/// # use nom::error::ErrorKind; +/// # use nom::character::complete::{alpha1, digit1}; +/// named!(parser<&str, (&str, &str)>, pair!(alpha1, digit1)); +/// +/// # fn main() { +/// assert_eq!(parser("abc123"), Ok(("", ("abc", "123")))); +/// assert_eq!(parser("123abc"), Err(Err::Error(("123abc", ErrorKind::Alpha)))); +/// assert_eq!(parser("abc;123"), Err(Err::Error((";123", ErrorKind::Digit)))); +/// # } +/// ``` +#[macro_export(local_inner_macros)] macro_rules! pair( ($i:expr, $submac:ident!( $($args:tt)* ), $submac2:ident!( $($args2:tt)* )) => ( - { - tuple!($i, $submac!($($args)*), $submac2!($($args2)*)) - } + pair!($i, |i| $submac!(i, $($args)*), |i| $submac2!(i, $($args2)*)) ); ($i:expr, $submac:ident!( $($args:tt)* ), $g:expr) => ( - pair!($i, $submac!($($args)*), call!($g)); + pair!($i, |i| $submac!(i, $($args)*), $g); ); ($i:expr, $f:expr, $submac:ident!( $($args:tt)* )) => ( - pair!($i, call!($f), $submac!($($args)*)); + pair!($i, $f, |i| $submac!(i, $($args)*)); ); ($i:expr, $f:expr, $g:expr) => ( - pair!($i, call!($f), call!($g)); + $crate::sequence::pairc($i, $f, $g) ); ); /// `separated_pair!(I -> IResult, I -> IResult, I -> IResult) => I -> IResult` -/// separated_pair(X,sep,Y) returns (x,y) -#[macro_export] +/// separated_pair(X,sep,Y) returns a tuple of its first and third child parsers +/// if all 3 succeed +/// +/// ``` +/// # #[macro_use] extern crate nom; +/// # use nom::Err; +/// # use nom::error::ErrorKind; +/// # use nom::character::complete::{alpha1, digit1}; +/// named!(parser<&str, (&str, &str)>, separated_pair!(alpha1, char!(','), digit1)); +/// +/// # fn main() { +/// assert_eq!(parser("abc,123"), Ok(("", ("abc", "123")))); +/// assert_eq!(parser("123,abc"), Err(Err::Error(("123,abc", ErrorKind::Alpha)))); +/// assert_eq!(parser("abc;123"), Err(Err::Error((";123", ErrorKind::Char)))); +/// # } +/// ``` +#[macro_export(local_inner_macros)] macro_rules! separated_pair( - ($i:expr, $submac:ident!( $($args:tt)* ), $($rest:tt)+) => ( - { - use $crate::lib::std::result::Result::*; - - match tuple_parser!($i, (), $submac!($($args)*), $($rest)*) { - Err(e) => Err(e), - Ok((i1, (o1, _, o2))) => { - Ok((i1, (o1, o2))) - } - } - } + ($i:expr, $submac:ident!( $($args:tt)* ), $($rest:tt)*) => ( + separated_pair!($i, |i| $submac!(i, $($args)*), $($rest)*) ); - - ($i:expr, $f:expr, $($rest:tt)+) => ( - separated_pair!($i, call!($f), $($rest)*); + ($i:expr, $f:expr, $submac:ident!( $($args:tt)* ), $($rest:tt)*) => ( + separated_pair!($i, $f, |i| $submac!(i, $($args)*), $($rest)*) + ); + ($i:expr, $f:expr, $g:expr, $submac:ident!( $($args:tt)* )) => ( + separated_pair!($i, $f, $g, |i| $submac!(i, $($args)*)) + ); + ($i:expr, $f:expr, $g:expr, $h:expr) => ( + $crate::sequence::separated_pairc($i, $f, $g, $h) ); ); /// `preceded!(I -> IResult, I -> IResult) => I -> IResult` -/// preceded(opening, X) returns X -#[macro_export] +/// preceded returns the result of its second parser if both succeed +/// +/// ``` +/// # #[macro_use] extern crate nom; +/// # use nom::Err; +/// # use nom::error::ErrorKind; +/// # use nom::character::complete::{alpha1}; +/// named!(parser<&str, &str>, preceded!(char!('-'), alpha1)); +/// +/// # fn main() { +/// assert_eq!(parser("-abc"), Ok(("", "abc"))); +/// assert_eq!(parser("abc"), Err(Err::Error(("abc", ErrorKind::Char)))); +/// assert_eq!(parser("-123"), Err(Err::Error(("123", ErrorKind::Alpha)))); +/// # } +/// ``` +#[macro_export(local_inner_macros)] macro_rules! preceded( ($i:expr, $submac:ident!( $($args:tt)* ), $submac2:ident!( $($args2:tt)* )) => ( - { - use $crate::lib::std::result::Result::*; - - match tuple!($i, $submac!($($args)*), $submac2!($($args2)*)) { - Err(e) => Err(e), - Ok((remaining, (_,o))) => { - Ok((remaining, o)) - } - } - } + preceded!($i, |i| $submac!(i, $($args)*), |i| $submac2!(i, $($args2)*)) ); ($i:expr, $submac:ident!( $($args:tt)* ), $g:expr) => ( - preceded!($i, $submac!($($args)*), call!($g)); + preceded!($i, |i| $submac!(i, $($args)*), $g); ); ($i:expr, $f:expr, $submac:ident!( $($args:tt)* )) => ( - preceded!($i, call!($f), $submac!($($args)*)); + preceded!($i, $f, |i| $submac!(i, $($args)*)); ); ($i:expr, $f:expr, $g:expr) => ( - preceded!($i, call!($f), call!($g)); + $crate::sequence::precededc($i, $f, $g) ); ); /// `terminated!(I -> IResult, I -> IResult) => I -> IResult` -/// terminated(X, closing) returns X -#[macro_export] +/// terminated returns the result of its first parser if both succeed +/// +/// ``` +/// # #[macro_use] extern crate nom; +/// # use nom::Err; +/// # use nom::error::ErrorKind; +/// # use nom::character::complete::{alpha1}; +/// named!(parser<&str, &str>, terminated!(alpha1, char!(';'))); +/// +/// # fn main() { +/// assert_eq!(parser("abc;"), Ok(("", "abc"))); +/// assert_eq!(parser("abc,"), Err(Err::Error((",", ErrorKind::Char)))); +/// assert_eq!(parser("123;"), Err(Err::Error(("123;", ErrorKind::Alpha)))); +/// # } +/// ``` +#[macro_export(local_inner_macros)] macro_rules! terminated( ($i:expr, $submac:ident!( $($args:tt)* ), $submac2:ident!( $($args2:tt)* )) => ( - { - use $crate::lib::std::result::Result::*; - - match tuple!($i, $submac!($($args)*), $submac2!($($args2)*)) { - Err(e) => Err(e), - Ok((remaining, (o,_))) => { - Ok((remaining, o)) - } - } - } + terminated!($i, |i| $submac!(i, $($args)*), |i| $submac2!(i, $($args2)*)) ); ($i:expr, $submac:ident!( $($args:tt)* ), $g:expr) => ( - terminated!($i, $submac!($($args)*), call!($g)); + terminated!($i, |i| $submac!(i, $($args)*), $g); ); ($i:expr, $f:expr, $submac:ident!( $($args:tt)* )) => ( - terminated!($i, call!($f), $submac!($($args)*)); + terminated!($i, $f, |i| $submac!(i, $($args)*)); ); ($i:expr, $f:expr, $g:expr) => ( - terminated!($i, call!($f), call!($g)); + $crate::sequence::terminatedc($i, $f, $g) ); ); @@ -198,36 +230,33 @@ macro_rules! terminated( /// /// ``` /// # #[macro_use] extern crate nom; -/// named!(bracketed, +/// # use nom::character::complete::{alpha1}; +/// named!(parens, /// delimited!( /// tag!("("), -/// take_until!(")"), +/// alpha1, /// tag!(")") /// ) /// ); /// /// # fn main() { /// let input = &b"(test)"[..]; -/// assert_eq!(bracketed(input), Ok((&b""[..], &b"test"[..]))); +/// assert_eq!(parens(input), Ok((&b""[..], &b"test"[..]))); /// # } /// ``` -#[macro_export] +#[macro_export(local_inner_macros)] macro_rules! delimited( - ($i:expr, $submac:ident!( $($args:tt)* ), $($rest:tt)+) => ( - { - use $crate::lib::std::result::Result::*; - - match tuple_parser!($i, (), $submac!($($args)*), $($rest)*) { - Err(e) => Err(e), - Ok((i1, (_, o, _))) => { - Ok((i1, o)) - } - } - } + ($i:expr, $submac:ident!( $($args:tt)* ), $($rest:tt)*) => ( + delimited!($i, |i| $submac!(i, $($args)*), $($rest)*) ); - - ($i:expr, $f:expr, $($rest:tt)+) => ( - delimited!($i, call!($f), $($rest)*); + ($i:expr, $f:expr, $submac:ident!( $($args:tt)* ), $($rest:tt)*) => ( + delimited!($i, $f, |i| $submac!(i, $($args)*), $($rest)*) + ); + ($i:expr, $f:expr, $g:expr, $submac:ident!( $($args:tt)* )) => ( + delimited!($i, $f, $g, |i| $submac!(i, $($args)*)) + ); + ($i:expr, $f:expr, $g:expr, $h:expr) => ( + $crate::sequence::delimitedc($i, $f, $g, $h) ); ); @@ -244,7 +273,7 @@ macro_rules! delimited( /// ``` /// # #[macro_use] extern crate nom; /// # use nom::{Err,Needed}; -/// use nom::be_u8; +/// use nom::number::streaming::be_u8; /// /// // this parser implements a common pattern in binary formats, /// // the TAG-LENGTH-VALUE, where you first recognize a specific @@ -287,7 +316,7 @@ macro_rules! delimited( /// /// ``` /// # #[macro_use] extern crate nom; -/// use nom::be_u8; +/// use nom::number::streaming::be_u8; /// named!(tag_length_value<(u8, &[u8])>, /// do_parse!( /// tag!( &[ 42u8 ][..] ) >> @@ -301,7 +330,7 @@ macro_rules! delimited( /// # } /// ``` /// -#[macro_export] +#[macro_export(local_inner_macros)] macro_rules! do_parse ( (__impl $i:expr, ( $($rest:expr),* )) => ( $crate::lib::std::result::Result::Ok(($i, ( $($rest),* ))) @@ -312,7 +341,7 @@ macro_rules! do_parse ( ); (__impl $i:expr, $submac:ident!( $($args:tt)* ) ) => ( - compile_error!("do_parse is missing the return value. A do_parse call must end + nom_compile_error!("do_parse is missing the return value. A do_parse call must end with a return value between parenthesis, as follows: do_parse!( @@ -324,10 +353,10 @@ macro_rules! do_parse ( ); (__impl $i:expr, $field:ident : $submac:ident!( $($args:tt)* ) ~ $($rest:tt)* ) => ( - compile_error!("do_parse uses >> as separator, not ~"); + nom_compile_error!("do_parse uses >> as separator, not ~"); ); (__impl $i:expr, $submac:ident!( $($args:tt)* ) ~ $($rest:tt)* ) => ( - compile_error!("do_parse uses >> as separator, not ~"); + nom_compile_error!("do_parse uses >> as separator, not ~"); ); (__impl $i:expr, $field:ident : $e:ident ~ $($rest:tt)*) => ( do_parse!(__impl $i, $field: call!($e) ~ $($rest)*); @@ -422,7 +451,7 @@ macro_rules! do_parse ( } ); ($submac:ident!( $($args:tt)* ) >> $($rest:tt)* ) => ( - compile_error!("if you are using do_parse outside of a named! macro, you must + nom_compile_error!("if you are using do_parse outside of a named! macro, you must pass the input data as first argument, like this: let res = do_parse!(input, @@ -436,14 +465,17 @@ macro_rules! do_parse ( ); ); +#[doc(hidden)] +#[macro_export] +macro_rules! nom_compile_error ( + (( $($args:tt)* )) => ( compile_error!($($args)*) ); +); + #[cfg(test)] mod tests { - use internal::{Err, IResult, Needed}; - use util::ErrorKind; - use nom::be_u16; - #[cfg(feature = "alloc")] - #[cfg(feature = "verbose-errors")] - use lib::std::vec::Vec; + use crate::internal::{Err, IResult, Needed}; + use crate::number::streaming::be_u16; + use crate::error::ErrorKind; // reproduce the tag and take macros, because of module import order macro_rules! tag ( @@ -465,7 +497,6 @@ mod tests { macro_rules! tag_bytes ( ($i:expr, $bytes: expr) => ( { - use $crate::need_more; use $crate::lib::std::cmp::min; let len = $i.len(); @@ -474,10 +505,10 @@ mod tests { let reduced = &$i[..m]; let b = &$bytes[..m]; - let res: IResult<_,_,u32> = if reduced != b { - Err($crate::Err::Error(error_position!($i, $crate::ErrorKind::Tag::))) + let res: IResult<_,_,_> = if reduced != b { + Err($crate::Err::Error(error_position!($i, $crate::error::ErrorKind::Tag))) } else if m < blen { - need_more($i, Needed::Size(blen)) + Err($crate::Err::Incomplete(Needed::Size(blen))) } else { Ok((&$i[blen..], reduced)) }; @@ -489,10 +520,9 @@ mod tests { macro_rules! take ( ($i:expr, $count:expr) => ( { - use $crate::need_more; let cnt = $count as usize; - let res:IResult<&[u8],&[u8],u32> = if $i.len() < cnt { - need_more($i, Needed::Size(cnt)) + let res:IResult<&[u8],&[u8],_> = if $i.len() < cnt { + Err($crate::Err::Incomplete(Needed::Size(cnt))) } else { Ok((&$i[cnt..],&$i[0..cnt])) }; @@ -513,24 +543,19 @@ mod tests { b: Option, } - #[cfg(all(feature = "std", feature = "verbose-errors"))] + /*FIXME: convert code examples to new error management use util::{add_error_pattern, error_to_list, print_error}; - #[cfg(feature = "verbose-errors")] - use verbose_errors::Context; #[cfg(feature = "std")] - #[cfg(feature = "verbose-errors")] #[cfg_attr(rustfmt, rustfmt_skip)] fn error_to_string(e: &Context) -> &'static str { let v: Vec<(P, ErrorKind)> = error_to_list(e); // do it this way if you can use slice patterns - /* - match &v[..] { - [ErrorKind::Custom(42), ErrorKind::Tag] => "missing `ijkl` tag", - [ErrorKind::Custom(42), ErrorKind::Custom(128), ErrorKind::Tag] => "missing `mnop` tag after `ijkl`", - _ => "unrecognized error" - } - */ + //match &v[..] { + // [ErrorKind::Custom(42), ErrorKind::Tag] => "missing `ijkl` tag", + // [ErrorKind::Custom(42), ErrorKind::Custom(128), ErrorKind::Tag] => "missing `mnop` tag after `ijkl`", + // _ => "unrecognized error" + //} let collected: Vec> = v.iter().map(|&(_, ref e)| e.clone()).collect(); if &collected[..] == [ErrorKind::Custom(42), ErrorKind::Tag] { @@ -543,103 +568,19 @@ mod tests { } // do it this way if you can use box patterns - /*use $crate::lib::std::str; - fn error_to_string(e:Err) -> String - match e { - NodePosition(ErrorKind::Custom(42), i1, box Position(ErrorKind::Tag, i2)) => { - format!("missing `ijkl` tag, found '{}' instead", str::from_utf8(i2).unwrap()) - }, - NodePosition(ErrorKind::Custom(42), i1, box NodePosition(ErrorKind::Custom(128), i2, box Position(ErrorKind::Tag, i3))) => { - format!("missing `mnop` tag after `ijkl`, found '{}' instead", str::from_utf8(i3).unwrap()) - }, - _ => "unrecognized error".to_string() - } - }*/ - - #[cfg(feature = "verbose-errors")] - #[cfg(feature = "std")] - use std::collections; - - #[cfg_attr(rustfmt, rustfmt_skip)] - #[cfg(feature = "std")] - #[cfg(feature = "verbose-errors")] - #[test] - fn err() { - named!(err_test, alt!( - tag!("abcd") | - preceded!( - tag!("efgh"), - return_error!( - ErrorKind::Custom(42), - do_parse!( - tag!("ijkl") >> - res: return_error!(ErrorKind::Custom(128), tag!("mnop")) >> - (res) - ) - ) - ) - )); - let a = &b"efghblah"[..]; - let b = &b"efghijklblah"[..]; - let c = &b"efghijklmnop"[..]; - - let blah = &b"blah"[..]; - - let res_a = err_test(a); - let res_b = err_test(b); - let res_c = err_test(c); - assert_eq!(res_a, - Err(Err::Failure(error_node_position!(blah, - ErrorKind::Custom(42), - error_position!(blah, ErrorKind::Tag))))); - assert_eq!(res_b, - Err(Err::Failure(error_node_position!(&b"ijklblah"[..], ErrorKind::Custom(42), - error_node_position!(blah, ErrorKind::Custom(128), error_position!(blah, ErrorKind::Tag)))))); - assert_eq!(res_c, Ok((&b""[..], &b"mnop"[..]))); - - // Merr-like error matching - let mut err_map = collections::HashMap::new(); - assert!(add_error_pattern(&mut err_map, - err_test(&b"efghpouet"[..]), - "missing `ijkl` tag")); - assert!(add_error_pattern(&mut err_map, - err_test(&b"efghijklpouet"[..]), - "missing `mnop` tag after `ijkl`")); - - let res_a2 = res_a.clone(); - match res_a { - Err(Err::Error(e)) | - Err(Err::Failure(e)) => { - let collected: Vec> = - error_to_list(&e).iter().map(|&(_, ref e)| e.clone()).collect(); - assert_eq!(collected, [ErrorKind::Custom(42), ErrorKind::Tag]); - assert_eq!(error_to_string(&e), "missing `ijkl` tag"); - //FIXME: why? - //assert_eq!(err_map.get(&error_to_list(&e)), Some(&"missing `ijkl` tag")); - assert_eq!(err_map.get(&error_to_list(&e)), None); - } - _ => panic!(), - }; - - let res_b2 = res_b.clone(); - match res_b { - Err(Err::Error(e)) | - Err(Err::Failure(e)) => { - let collected: Vec> = - error_to_list(&e).iter().map(|&(_, ref e)| e.clone()).collect(); - assert_eq!(collected, - [ErrorKind::Custom(42), ErrorKind::Custom(128), ErrorKind::Tag]); - assert_eq!(error_to_string(&e), "missing `mnop` tag after `ijkl`"); - //FIXME: why? - //assert_eq!(err_map.get(&error_to_list(&e)), Some(&"missing `mnop` tag after `ijkl`")); - assert_eq!(err_map.get(&error_to_list(&e)), None); - } - _ => panic!(), - }; - - print_error(a, res_a2); - print_error(b, res_b2); - } + //use $crate::lib::std::str; + //fn error_to_string(e:Err) -> String + // match e { + // NodePosition(ErrorKind::Custom(42), i1, box Position(ErrorKind::Tag, i2)) => { + // format!("missing `ijkl` tag, found '{}' instead", str::from_utf8(i2).unwrap()) + // }, + // NodePosition(ErrorKind::Custom(42), i1, box NodePosition(ErrorKind::Custom(128), i2, box Position(ErrorKind::Tag, i3))) => { + // format!("missing `mnop` tag after `ijkl`, found '{}' instead", str::from_utf8(i3).unwrap()) + // }, + // _ => "unrecognized error".to_string() + // } + //} + */ #[cfg_attr(rustfmt, rustfmt_skip)] #[allow(unused_variables)] @@ -649,10 +590,12 @@ mod tests { preceded!( tag!("efgh"), add_return_error!( - ErrorKind::Custom(42u32), + //ErrorKind::Custom(42u32), + ErrorKind::Char, do_parse!( tag!("ijkl") >> - res: add_return_error!(ErrorKind::Custom(128u32), tag!("mnop")) >> + //res: add_return_error!(ErrorKind::Custom(128u32), tag!("mnop")) >> + res: add_return_error!(ErrorKind::Eof, tag!("mnop")) >> (res) ) ) @@ -669,10 +612,13 @@ mod tests { let res_c = err_test(c); assert_eq!(res_a, Err(Err::Error(error_node_position!(blah, - ErrorKind::Custom(42u32), + //ErrorKind::Custom(42u32), + ErrorKind::Eof, error_position!(blah, ErrorKind::Tag))))); - assert_eq!(res_b, Err(Err::Error(error_node_position!(&b"ijklblah"[..], ErrorKind::Custom(42u32), - error_node_position!(blah, ErrorKind::Custom(128u32), error_position!(blah, ErrorKind::Tag)))))); + //assert_eq!(res_b, Err(Err::Error(error_node_position!(&b"ijklblah"[..], ErrorKind::Custom(42u32), + // error_node_position!(blah, ErrorKind::Custom(128u32), error_position!(blah, ErrorKind::Tag)))))); + assert_eq!(res_b, Err(Err::Error(error_node_position!(&b"ijklblah"[..], ErrorKind::Eof, + error_node_position!(blah, ErrorKind::Eof, error_position!(blah, ErrorKind::Tag)))))); assert_eq!(res_c, Ok((&b""[..], &b"mnop"[..]))); } @@ -699,18 +645,9 @@ mod tests { named!(tag_def, tag!("def")); named!( pair_abc_def<&[u8],(&[u8], &[u8])>, pair!(tag_abc, tag_def) ); - assert_eq!( - pair_abc_def(&b"abcdefghijkl"[..]), - Ok((&b"ghijkl"[..], (&b"abc"[..], &b"def"[..]))) - ); - assert_eq!( - pair_abc_def(&b"ab"[..]), - Err(Err::Incomplete(Needed::Size(3))) - ); - assert_eq!( - pair_abc_def(&b"abcd"[..]), - Err(Err::Incomplete(Needed::Size(3))) - ); + assert_eq!(pair_abc_def(&b"abcdefghijkl"[..]), Ok((&b"ghijkl"[..], (&b"abc"[..], &b"def"[..])))); + assert_eq!(pair_abc_def(&b"ab"[..]), Err(Err::Incomplete(Needed::Size(3)))); + assert_eq!(pair_abc_def(&b"abcd"[..]), Err(Err::Incomplete(Needed::Size(3)))); assert_eq!( pair_abc_def(&b"xxx"[..]), Err(Err::Error(error_position!(&b"xxx"[..], ErrorKind::Tag))) @@ -736,14 +673,8 @@ mod tests { sep_pair_abc_def(&b"abc,defghijkl"[..]), Ok((&b"ghijkl"[..], (&b"abc"[..], &b"def"[..]))) ); - assert_eq!( - sep_pair_abc_def(&b"ab"[..]), - Err(Err::Incomplete(Needed::Size(3))) - ); - assert_eq!( - sep_pair_abc_def(&b"abc,d"[..]), - Err(Err::Incomplete(Needed::Size(3))) - ); + assert_eq!(sep_pair_abc_def(&b"ab"[..]), Err(Err::Incomplete(Needed::Size(3)))); + assert_eq!(sep_pair_abc_def(&b"abc,d"[..]), Err(Err::Incomplete(Needed::Size(3)))); assert_eq!( sep_pair_abc_def(&b"xxx"[..]), Err(Err::Error(error_position!(&b"xxx"[..], ErrorKind::Tag))) @@ -764,18 +695,9 @@ mod tests { named!(tag_efgh, tag!("efgh")); named!( preceded_abcd_efgh<&[u8], &[u8]>, preceded!(tag_abcd, tag_efgh) ); - assert_eq!( - preceded_abcd_efgh(&b"abcdefghijkl"[..]), - Ok((&b"ijkl"[..], &b"efgh"[..])) - ); - assert_eq!( - preceded_abcd_efgh(&b"ab"[..]), - Err(Err::Incomplete(Needed::Size(4))) - ); - assert_eq!( - preceded_abcd_efgh(&b"abcde"[..]), - Err(Err::Incomplete(Needed::Size(4))) - ); + assert_eq!(preceded_abcd_efgh(&b"abcdefghijkl"[..]), Ok((&b"ijkl"[..], &b"efgh"[..]))); + assert_eq!(preceded_abcd_efgh(&b"ab"[..]), Err(Err::Incomplete(Needed::Size(4)))); + assert_eq!(preceded_abcd_efgh(&b"abcde"[..]), Err(Err::Incomplete(Needed::Size(4)))); assert_eq!( preceded_abcd_efgh(&b"xxx"[..]), Err(Err::Error(error_position!(&b"xxx"[..], ErrorKind::Tag))) @@ -796,18 +718,9 @@ mod tests { named!(tag_efgh, tag!("efgh")); named!( terminated_abcd_efgh<&[u8], &[u8]>, terminated!(tag_abcd, tag_efgh) ); - assert_eq!( - terminated_abcd_efgh(&b"abcdefghijkl"[..]), - Ok((&b"ijkl"[..], &b"abcd"[..])) - ); - assert_eq!( - terminated_abcd_efgh(&b"ab"[..]), - Err(Err::Incomplete(Needed::Size(4))) - ); - assert_eq!( - terminated_abcd_efgh(&b"abcde"[..]), - Err(Err::Incomplete(Needed::Size(4))) - ); + assert_eq!(terminated_abcd_efgh(&b"abcdefghijkl"[..]), Ok((&b"ijkl"[..], &b"abcd"[..]))); + assert_eq!(terminated_abcd_efgh(&b"ab"[..]), Err(Err::Incomplete(Needed::Size(4)))); + assert_eq!(terminated_abcd_efgh(&b"abcde"[..]), Err(Err::Incomplete(Needed::Size(4)))); assert_eq!( terminated_abcd_efgh(&b"xxx"[..]), Err(Err::Error(error_position!(&b"xxx"[..], ErrorKind::Tag))) @@ -829,32 +742,17 @@ mod tests { named!(tag_ghi, tag!("ghi")); named!( delimited_abc_def_ghi<&[u8], &[u8]>, delimited!(tag_abc, tag_def, tag_ghi) ); - assert_eq!( - delimited_abc_def_ghi(&b"abcdefghijkl"[..]), - Ok((&b"jkl"[..], &b"def"[..])) - ); - assert_eq!( - delimited_abc_def_ghi(&b"ab"[..]), - Err(Err::Incomplete(Needed::Size(3))) - ); - assert_eq!( - delimited_abc_def_ghi(&b"abcde"[..]), - Err(Err::Incomplete(Needed::Size(3))) - ); - assert_eq!( - delimited_abc_def_ghi(&b"abcdefgh"[..]), - Err(Err::Incomplete(Needed::Size(3))) - ); + assert_eq!(delimited_abc_def_ghi(&b"abcdefghijkl"[..]), Ok((&b"jkl"[..], &b"def"[..]))); + assert_eq!(delimited_abc_def_ghi(&b"ab"[..]), Err(Err::Incomplete(Needed::Size(3)))); + assert_eq!(delimited_abc_def_ghi(&b"abcde"[..]), Err(Err::Incomplete(Needed::Size(3)))); + assert_eq!(delimited_abc_def_ghi(&b"abcdefgh"[..]), Err(Err::Incomplete(Needed::Size(3)))); assert_eq!( delimited_abc_def_ghi(&b"xxx"[..]), Err(Err::Error(error_position!(&b"xxx"[..], ErrorKind::Tag))) ); assert_eq!( delimited_abc_def_ghi(&b"xxxdefghi"[..]), - Err(Err::Error(error_position!( - &b"xxxdefghi"[..], - ErrorKind::Tag - ),)) + Err(Err::Error(error_position!(&b"xxxdefghi"[..], ErrorKind::Tag),)) ); assert_eq!( delimited_abc_def_ghi(&b"abcxxxghi"[..]), @@ -871,15 +769,9 @@ mod tests { named!(tuple_3<&[u8], (u16, &[u8], &[u8]) >, tuple!( be_u16 , take!(3), tag!("fg") ) ); - assert_eq!( - tuple_3(&b"abcdefgh"[..]), - Ok((&b"h"[..], (0x6162u16, &b"cde"[..], &b"fg"[..]))) - ); + assert_eq!(tuple_3(&b"abcdefgh"[..]), Ok((&b"h"[..], (0x6162u16, &b"cde"[..], &b"fg"[..])))); assert_eq!(tuple_3(&b"abcd"[..]), Err(Err::Incomplete(Needed::Size(3)))); - assert_eq!( - tuple_3(&b"abcde"[..]), - Err(Err::Incomplete(Needed::Size(2))) - ); + assert_eq!(tuple_3(&b"abcde"[..]), Err(Err::Incomplete(Needed::Size(2)))); assert_eq!( tuple_3(&b"abcdejk"[..]), Err(Err::Error(error_position!(&b"jk"[..], ErrorKind::Tag))) @@ -915,25 +807,16 @@ mod tests { //trace_macros!(false); - assert_eq!( - do_parser(&b"abcdabcdefghefghX"[..]), - Ok((&b"X"[..], (1, 2))) - ); + assert_eq!(do_parser(&b"abcdabcdefghefghX"[..]), Ok((&b"X"[..], (1, 2)))); assert_eq!(do_parser(&b"abcdefghefghX"[..]), Ok((&b"X"[..], (1, 2)))); - assert_eq!( - do_parser(&b"abcdab"[..]), - Err(Err::Incomplete(Needed::Size(4))) - ); - assert_eq!( - do_parser(&b"abcdefghef"[..]), - Err(Err::Incomplete(Needed::Size(4))) - ); + assert_eq!(do_parser(&b"abcdab"[..]), Err(Err::Incomplete(Needed::Size(4)))); + assert_eq!(do_parser(&b"abcdefghef"[..]), Err(Err::Incomplete(Needed::Size(4)))); } #[cfg_attr(rustfmt, rustfmt_skip)] #[test] fn do_parse_dependency() { - use nom::be_u8; + use crate::number::streaming::be_u8; named!(length_value, do_parse!( diff --git a/third_party/rust/nom/src/sequence/mod.rs b/third_party/rust/nom/src/sequence/mod.rs new file mode 100644 index 0000000000..a0eafb97bd --- /dev/null +++ b/third_party/rust/nom/src/sequence/mod.rs @@ -0,0 +1,313 @@ +//! combinators applying parsers in sequence + +#[macro_use] +mod macros; + +use crate::internal::IResult; +use crate::error::ParseError; + +/// Gets an object from the first parser, +/// then gets another object from the second parser. +/// +/// # Arguments +/// * `first` The first parser to apply. +/// * `second` The second parser to apply. +/// ```rust +/// # use nom::{Err, error::ErrorKind, Needed}; +/// # use nom::Needed::Size; +/// use nom::sequence::pair; +/// use nom::bytes::complete::tag; +/// +/// let parser = pair(tag("abc"), tag("efg")); +/// +/// assert_eq!(parser("abcefg"), Ok(("", ("abc", "efg")))); +/// assert_eq!(parser("abcefghij"), Ok(("hij", ("abc", "efg")))); +/// assert_eq!(parser(""), Err(Err::Error(("", ErrorKind::Tag)))); +/// assert_eq!(parser("123"), Err(Err::Error(("123", ErrorKind::Tag)))); +/// ``` +pub fn pair, F, G>(first: F, second: G) -> impl Fn(I) -> IResult +where + F: Fn(I) -> IResult, + G: Fn(I) -> IResult, +{ + move |input: I| { + let (input, o1) = first(input)?; + second(input).map(|(i, o2)| (i, (o1, o2))) + } +} + +// this implementation is used for type inference issues in macros +#[doc(hidden)] +pub fn pairc, F, G>(input: I, first: F, second: G) -> IResult +where + F: Fn(I) -> IResult, + G: Fn(I) -> IResult, +{ + pair(first, second)(input) +} + +/// Matches an object from the first parser and discards it, +/// then gets an object from the second parser. +/// +/// # Arguments +/// * `first` The opening parser. +/// * `second` The second parser to get object. +/// ```rust +/// # use nom::{Err, error::ErrorKind, Needed}; +/// # use nom::Needed::Size; +/// use nom::sequence::preceded; +/// use nom::bytes::complete::tag; +/// +/// let parser = preceded(tag("abc"), tag("efg")); +/// +/// assert_eq!(parser("abcefg"), Ok(("", "efg"))); +/// assert_eq!(parser("abcefghij"), Ok(("hij", "efg"))); +/// assert_eq!(parser(""), Err(Err::Error(("", ErrorKind::Tag)))); +/// assert_eq!(parser("123"), Err(Err::Error(("123", ErrorKind::Tag)))); +/// ``` +pub fn preceded, F, G>(first: F, second: G) -> impl Fn(I) -> IResult +where + F: Fn(I) -> IResult, + G: Fn(I) -> IResult, +{ + move |input: I| { + let (input, _) = first(input)?; + second(input) + } +} + +// this implementation is used for type inference issues in macros +#[doc(hidden)] +pub fn precededc, F, G>(input: I, first: F, second: G) -> IResult +where + F: Fn(I) -> IResult, + G: Fn(I) -> IResult, +{ + preceded(first, second)(input) +} + +/// Gets an object from the first parser, +/// then matches an object from the second parser and discards it. +/// +/// # Arguments +/// * `first` The first parser to apply. +/// * `second` The second parser to match an object. +/// ```rust +/// # use nom::{Err, error::ErrorKind, Needed}; +/// # use nom::Needed::Size; +/// use nom::sequence::terminated; +/// use nom::bytes::complete::tag; +/// +/// let parser = terminated(tag("abc"), tag("efg")); +/// +/// assert_eq!(parser("abcefg"), Ok(("", "abc"))); +/// assert_eq!(parser("abcefghij"), Ok(("hij", "abc"))); +/// assert_eq!(parser(""), Err(Err::Error(("", ErrorKind::Tag)))); +/// assert_eq!(parser("123"), Err(Err::Error(("123", ErrorKind::Tag)))); +/// ``` +pub fn terminated, F, G>(first: F, second: G) -> impl Fn(I) -> IResult +where + F: Fn(I) -> IResult, + G: Fn(I) -> IResult, +{ + move |input: I| { + let (input, o1) = first(input)?; + second(input).map(|(i, _)| (i, o1)) + } +} + +// this implementation is used for type inference issues in macros +#[doc(hidden)] +pub fn terminatedc, F, G>(input: I, first: F, second: G) -> IResult +where + F: Fn(I) -> IResult, + G: Fn(I) -> IResult, +{ + terminated(first, second)(input) +} + +/// Gets an object from the first parser, +/// then matches an object from the sep_parser and discards it, +/// then gets another object from the second parser. +/// +/// # Arguments +/// * `first` The first parser to apply. +/// * `sep` The separator parser to apply. +/// * `second` The second parser to apply. +/// ```rust +/// # use nom::{Err, error::ErrorKind, Needed}; +/// # use nom::Needed::Size; +/// use nom::sequence::separated_pair; +/// use nom::bytes::complete::tag; +/// +/// let parser = separated_pair(tag("abc"), tag("|"), tag("efg")); +/// +/// assert_eq!(parser("abc|efg"), Ok(("", ("abc", "efg")))); +/// assert_eq!(parser("abc|efghij"), Ok(("hij", ("abc", "efg")))); +/// assert_eq!(parser(""), Err(Err::Error(("", ErrorKind::Tag)))); +/// assert_eq!(parser("123"), Err(Err::Error(("123", ErrorKind::Tag)))); +/// ``` +pub fn separated_pair, F, G, H>(first: F, sep: G, second: H) -> impl Fn(I) -> IResult +where + F: Fn(I) -> IResult, + G: Fn(I) -> IResult, + H: Fn(I) -> IResult, +{ + move |input: I| { + let (input, o1) = first(input)?; + let (input, _) = sep(input)?; + second(input).map(|(i, o2)| (i, (o1, o2))) + } +} + +// this implementation is used for type inference issues in macros +#[doc(hidden)] +pub fn separated_pairc, F, G, H>(input: I, first: F, sep: G, second: H) -> IResult +where + F: Fn(I) -> IResult, + G: Fn(I) -> IResult, + H: Fn(I) -> IResult, +{ + separated_pair(first, sep, second)(input) +} + +/// Matches an object from the first parser, +/// then gets an object from the sep_parser, +/// then matches another object from the second parser. +/// +/// # Arguments +/// * `first` The first parser to apply. +/// * `sep` The separator parser to apply. +/// * `second` The second parser to apply. +/// ```rust +/// # use nom::{Err, error::ErrorKind, Needed}; +/// # use nom::Needed::Size; +/// use nom::sequence::delimited; +/// use nom::bytes::complete::tag; +/// +/// let parser = delimited(tag("abc"), tag("|"), tag("efg")); +/// +/// assert_eq!(parser("abc|efg"), Ok(("", "|"))); +/// assert_eq!(parser("abc|efghij"), Ok(("hij", "|"))); +/// assert_eq!(parser(""), Err(Err::Error(("", ErrorKind::Tag)))); +/// assert_eq!(parser("123"), Err(Err::Error(("123", ErrorKind::Tag)))); +/// ``` +pub fn delimited, F, G, H>(first: F, sep: G, second: H) -> impl Fn(I) -> IResult +where + F: Fn(I) -> IResult, + G: Fn(I) -> IResult, + H: Fn(I) -> IResult, +{ + move |input: I| { + let (input, _) = first(input)?; + let (input, o2) = sep(input)?; + second(input).map(|(i, _)| (i, o2)) + } +} + +// this implementation is used for type inference issues in macros +#[doc(hidden)] +pub fn delimitedc, F, G, H>(input: I, first: F, sep: G, second: H) -> IResult +where + F: Fn(I) -> IResult, + G: Fn(I) -> IResult, + H: Fn(I) -> IResult, +{ + delimited(first, sep, second)(input) +} + +/// helper trait for the tuple combinator +/// +/// this trait is implemented for tuples of parsers of up to 21 elements +pub trait Tuple { + /// parses the input and returns a tuple of results of each parser + fn parse(&self, input: I) -> IResult; +} + +impl, F: Fn(Input) -> IResult > Tuple for (F,) { + fn parse(&self, input: Input) -> IResult { + self.0(input).map(|(i,o)| (i, (o,))) + } +} + +macro_rules! tuple_trait( + ($name1:ident $ty1:ident, $name2: ident $ty2:ident, $($name:ident $ty:ident),*) => ( + tuple_trait!(__impl $name1 $ty1, $name2 $ty2; $($name $ty),*); + ); + (__impl $($name:ident $ty: ident),+; $name1:ident $ty1:ident, $($name2:ident $ty2:ident),*) => ( + tuple_trait_impl!($($name $ty),+); + tuple_trait!(__impl $($name $ty),+ , $name1 $ty1; $($name2 $ty2),*); + ); + (__impl $($name:ident $ty: ident),+; $name1:ident $ty1:ident) => ( + tuple_trait_impl!($($name $ty),+); + tuple_trait_impl!($($name $ty),+, $name1 $ty1); + ); +); + +macro_rules! tuple_trait_impl( + ($($name:ident $ty: ident),+) => ( + impl< + Input: Clone, $($ty),+ , Error: ParseError, + $($name: Fn(Input) -> IResult),+ + > Tuple for ( $($name),+ ) { + + fn parse(&self, input: Input) -> IResult { + tuple_trait_inner!(0, self, input, (), $($name)+) + + } + } + ); +); + +macro_rules! tuple_trait_inner( + ($it:tt, $self:expr, $input:expr, (), $head:ident $($id:ident)+) => ({ + let (i, o) = $self.$it($input.clone())?; + + succ!($it, tuple_trait_inner!($self, i, ( o ), $($id)+)) + }); + ($it:tt, $self:expr, $input:expr, ($($parsed:tt)*), $head:ident $($id:ident)+) => ({ + let (i, o) = $self.$it($input.clone())?; + + succ!($it, tuple_trait_inner!($self, i, ($($parsed)* , o), $($id)+)) + }); + ($it:tt, $self:expr, $input:expr, ($($parsed:tt)*), $head:ident) => ({ + let (i, o) = $self.$it($input.clone())?; + + Ok((i, ($($parsed)* , o))) + }); +); + +tuple_trait!(FnA A, FnB B, FnC C, FnD D, FnE E, FnF F, FnG G, FnH H, FnI I, FnJ J, FnK K, FnL L, + FnM M, FnN N, FnO O, FnP P, FnQ Q, FnR R, FnS S, FnT T, FnU U); + +/// applies a tuple of parsers one by one and returns their results as a tuple +/// +/// ```rust +/// # use nom::{Err, error::ErrorKind}; +/// use nom::sequence::tuple; +/// use nom::character::complete::{alpha1, digit1}; +/// let parser = tuple((alpha1, digit1, alpha1)); +/// +/// assert_eq!(parser("abc123def"), Ok(("", ("abc", "123", "def")))); +/// assert_eq!(parser("123def"), Err(Err::Error(("123def", ErrorKind::Alpha)))); +/// ``` +pub fn tuple, List: Tuple>(l: List) -> impl Fn(I) -> IResult { + move |i: I| { + l.parse(i) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn single_element_tuples() { + use crate::character::complete::{alpha1, digit1}; + use crate::{Err, error::ErrorKind}; + + let parser = tuple((alpha1,)); + assert_eq!(parser("abc123def"), Ok(("123def", ("abc",)))); + assert_eq!(parser("123def"), Err(Err::Error(("123def", ErrorKind::Alpha)))); + } +} diff --git a/third_party/rust/nom/src/simple_errors.rs b/third_party/rust/nom/src/simple_errors.rs deleted file mode 100644 index c221d55b35..0000000000 --- a/third_party/rust/nom/src/simple_errors.rs +++ /dev/null @@ -1,202 +0,0 @@ -//! Error management -//! -//! Depending on a compilation flag, the content of the `Context` enum -//! can change. In the default case, it will only have one variant: -//! `Context::Code(I, ErrorKind)` (with `I` and `E` configurable). -//! It contains an error code and the input position that triggered it. -//! -//! If you activate the `verbose-errors` compilation flags, it will add another -//! variant to the enum: `Context::List(Vec<(I, ErrorKind)>)`. -//! This variant aggregates positions and error codes as the code backtracks -//! through the nested parsers. -//! The verbose errors feature allows for very flexible error management: -//! you can know precisely which parser got to which part of the input. -//! The main drawback is that it is a lot slower than default error -//! management. -use util::{Convert, ErrorKind}; -use lib::std::convert::From; - -#[derive(Debug, Clone, PartialEq)] -pub enum Context { - Code(I, ErrorKind), -} - -impl> Convert> for Context { - fn convert(c: Context) -> Self { - let Context::Code(i, e) = c; - - Context::Code(i, ErrorKind::convert(e)) - } -} - -impl Context { - /// Convert Err into ErrorKind. - /// - /// This allows application code to use ErrorKind and stay independent from the verbose-errors features activation. - pub fn into_error_kind(self) -> ErrorKind { - let Context::Code(_, e) = self; - ErrorKind::convert(e) - } -} - -/* -impl IResult { - /// Maps a `IResult` to `IResult` by appling a function - /// to a contained `Error` value, leaving `Done` and `Incomplete` value - /// untouched. - #[inline] - pub fn map_err(self, f: F) -> IResult - where F: FnOnce(Err) -> Err { - match self { - Error(e) => Error(f(e)), - Incomplete(n) => Incomplete(n), - Done(i, o) => Done(i, o), - } - } - - /// Unwrap the contained `Error(E)` value, or panic if the `IResult` is not - /// `Error`. - pub fn unwrap_err(self) -> Err { - match self { - Error(e) => e, - Done(_, _) => panic!("unwrap_err() called on an IResult that is Done"), - Incomplete(_) => panic!("unwrap_err() called on an IResult that is Incomplete"), - } - } - - /// Convert the IResult to a std::result::Result - pub fn to_full_result(self) -> Result> { - match self { - Done(_, o) => Ok(o), - Incomplete(n) => Err(IError::Incomplete(n)), - Error(e) => Err(IError::Error(e)) - } - } - - /// Convert the IResult to a std::result::Result, or panic if the `IResult` is `Incomplete` - pub fn to_result(self) -> Result> { - match self { - Done(_, o) => Ok(o), - Error(e) => Err(e), - Incomplete(_) => panic!("to_result() called on an IResult that is Incomplete") - } - } -} - -#[cfg(feature = "std")] -use $crate::lib::std::any::Any; -#[cfg(feature = "std")] -use $crate::lib::std::{error,fmt}; -#[cfg(feature = "std")] -impl error::Error for Err { - fn description(&self) -> &str { - self.description() - } -} - -#[cfg(feature = "std")] -impl fmt::Display for Err { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "{}", self.description()) - } -} -*/ - -/// translate parser result from IResult to IResult with a custom type -/// -/// ``` -/// # #[macro_use] extern crate nom; -/// # use nom::IResult; -/// # use std::convert::From; -/// # use nom::Context; -/// # use nom::Err; -/// # use nom::ErrorKind; -/// # fn main() { -/// // will add a Custom(42) error to the error chain -/// named!(err_test, add_return_error!(ErrorKind::Custom(42u32), tag!("abcd"))); -/// -/// #[derive(Debug,Clone,PartialEq)] -/// pub struct ErrorStr(String); -/// -/// // Convert to IResult<&[u8], &[u8], ErrorStr> -/// impl From for ErrorStr { -/// fn from(i: u32) -> Self { -/// ErrorStr(format!("custom error code: {}", i)) -/// } -/// } -/// -/// named!(parser<&[u8], &[u8], ErrorStr>, -/// fix_error!(ErrorStr, err_test) -/// ); -/// -/// let a = &b"efghblah"[..]; -/// assert_eq!(parser(a), Err(Err::Error(Context::Code(a, ErrorKind::Custom(ErrorStr("custom error code: 42".to_string())))))); -/// # } -/// ``` -#[macro_export] -macro_rules! fix_error ( - ($i:expr, $t:ty, $submac:ident!( $($args:tt)* )) => ( - { - use $crate::lib::std::result::Result::*; - use $crate::Err; - use $crate::{Convert,Context,ErrorKind}; - - match $submac!($i, $($args)*) { - Ok((i,o)) => Ok((i,o)), - Err(e) => { - let e2 = match e { - Err::Error(err) => { - let Context::Code(i, code) = err; - let code2: ErrorKind<$t> = ErrorKind::convert(code); - Err::Error(Context::Code(i, code2)) - }, - Err::Failure(err) => { - let Context::Code(i, code) = err; - let code2: ErrorKind<$t> = ErrorKind::convert(code); - Err::Failure(Context::Code(i, code2)) - }, - Err::Incomplete(e) => Err::Incomplete(e), - }; - Err(e2) - } - } - } - ); - ($i:expr, $t:ty, $f:expr) => ( - fix_error!($i, $t, call!($f)); - ); -); - -/// `flat_map!(R -> IResult, S -> IResult) => R -> IResult` -/// -/// combines a parser R -> IResult and -/// a parser S -> IResult to return another -/// parser R -> IResult -#[macro_export] -macro_rules! flat_map( - ($i:expr, $submac:ident!( $($args:tt)* ), $submac2:ident!( $($args2:tt)* )) => ( - flat_map!(__impl $i, $submac!($($args)*), $submac2!($($args2)*)); - ); - ($i:expr, $submac:ident!( $($args:tt)* ), $g:expr) => ( - flat_map!(__impl $i, $submac!($($args)*), call!($g)); - ); - ($i:expr, $f:expr, $submac:ident!( $($args:tt)* )) => ( - flat_map!(__impl $i, call!($f), $submac!($($args)*)); - ); - ($i:expr, $f:expr, $g:expr) => ( - flat_map!(__impl $i, call!($f), call!($g)); - ); - (__impl $i:expr, $submac:ident!( $($args:tt)* ), $submac2:ident!( $($args2:tt)* )) => ( - { - use $crate::lib::std::result::Result::*; - use $crate::{Convert,Err}; - - ($submac!($i, $($args)*)).and_then(|(i,o)| { - match $submac2!(o, $($args2)*) { - Err(e) => Err(Err::convert(e)), - Ok((_, o2)) => Ok((i, o2)) - } - }) - } - ); -); diff --git a/third_party/rust/nom/src/str.rs b/third_party/rust/nom/src/str.rs index 0903b9b4ca..71fb1a4aa1 100644 --- a/third_party/rust/nom/src/str.rs +++ b/third_party/rust/nom/src/str.rs @@ -1,280 +1,31 @@ -//! Parsers and helper functions operating on strings, especially useful when writing parsers for -//! text-based formats. - -/// `tag_s!(&str) => &str -> IResult<&str, &str>` -/// declares a string as a suite to recognize -/// -/// consumes the recognized characters -/// -/// ``` -/// # #[macro_use] extern crate nom; -/// # use nom::IResult; -/// # fn main() { -/// fn test(input: &str) -> IResult<&str, &str> { -/// tag_s!(input, "abcd") -/// } -/// let r = test("abcdefgh"); -/// assert_eq!(r,Ok(("efgh", "abcd"))); -/// # } -/// ``` -#[macro_export] -#[deprecated(since = "4.0.0", note = "Please use `tag` instead")] -macro_rules! tag_s ( - ($i:expr, $tag: expr) => ( - { - tag!($i, $tag) - } - ); -); - -/// `tag_no_case_s!(&str) => &str -> IResult<&str, &str>` -/// declares a case-insensitive string as a suite to recognize -/// -/// consumes the recognized characters -/// -/// ``` -/// # #[macro_use] extern crate nom; -/// # use nom::IResult; -/// # use nom::InputLength; -/// #[cfg(feature = "alloc")] -/// # fn main() { -/// fn test(input: &str) -> IResult<&str, &str> { -/// tag_no_case_s!(input, "ABcd") -/// } -/// let r = test("aBCdefgh"); -/// assert_eq!(r,Ok(("efgh", "aBCd"))); -/// # } -/// # #[cfg(not(feature = "alloc"))] -/// # fn main() {} -/// ``` -#[macro_export] -#[deprecated(since = "4.0.0", note = "Please use `tag_no_case` instead")] -macro_rules! tag_no_case_s ( - ($i:expr, $tag: expr) => ( - { - tag_no_case!($i, $tag) - } - ); -); - -/// `take_s!(nb) => &str -> IResult<&str, &str>` -/// generates a parser consuming the specified number of characters -/// -/// ``` -/// # #[macro_use] extern crate nom; -/// # fn main() { -/// // Desmond parser -/// named!(take5<&str,&str>, take_s!( 5 ) ); -/// -/// let a = "abcdefgh"; -/// -/// assert_eq!(take5(a),Ok(("fgh", "abcde"))); -/// -/// let b = "12345"; -/// -/// assert_eq!(take5(b),Ok(("", "12345"))); -/// # } -/// ``` -#[macro_export] -#[deprecated(since = "4.0.0", note = "Please use `take` instead")] -macro_rules! take_s ( - ($i:expr, $count:expr) => ( - { - let input = $i; - let cnt = $count as usize; - take!(input, cnt) - } - ); -); - -/// `is_not_s!(&str) => &str -> IResult<&str, &str>` -/// returns the longest list of characters that do not appear in the provided array -/// -/// ``` -/// # #[macro_use] extern crate nom; -/// # fn main() { -/// named!( not_space<&str,&str>, is_not_s!( " \t\r\n" ) ); -/// -/// let r = not_space("abcdefgh\nijkl"); -/// assert_eq!(r,Ok(("\nijkl", "abcdefgh"))); -/// # } -/// ``` -#[macro_export] -#[deprecated(since = "4.0.0", note = "Please use `is_not` instead")] -macro_rules! is_not_s ( - ($input:expr, $arr:expr) => ( - { - is_not!($input, $arr) - } - ); -); - -/// `is_a_s!(&str) => &str -> IResult<&str, &str>` -/// returns the longest list of characters that appear in the provided array -/// -/// ``` -/// # #[macro_use] extern crate nom; -/// # fn main() { -/// named!(abcd<&str, &str>, is_a_s!( "abcd" )); -/// -/// let r1 = abcd("aaaaefgh"); -/// assert_eq!(r1,Ok(("efgh", "aaaa"))); -/// -/// let r2 = abcd("dcbaefgh"); -/// assert_eq!(r2,Ok(("efgh", "dcba"))); -/// # } -/// ``` -#[macro_export] -#[deprecated(since = "4.0.0", note = "Please use `is_a` instead")] -macro_rules! is_a_s ( - ($input:expr, $arr:expr) => ( - { - is_a!($input, $arr) - } - ); -); - -/// `take_while_s!(char -> bool) => &str -> IResult<&str, &str>` -/// returns the longest list of characters until the provided function fails. -/// -/// The argument is either a function `char -> bool` or a macro returning a `bool -/// -/// ``` -/// # #[macro_use] extern crate nom; -/// # fn main() { -/// fn alphabetic(chr: char) -> bool { (chr >= 0x41 as char && chr <= 0x5A as char) || (chr >= 0x61 as char && chr <= 0x7A as char) } -/// named!( alpha<&str,&str>, take_while_s!( alphabetic ) ); -/// -/// let r = alpha("abcd\nefgh"); -/// assert_eq!(r,Ok(("\nefgh", "abcd"))); -/// # } -/// ``` -#[macro_export] -#[deprecated(since = "4.0.0", note = "Please use `take_while` instead")] -macro_rules! take_while_s ( - ($input:expr, $submac:ident!( $($args:tt)* )) => ( - { - take_while!($input, $submac!($($args)*)) - } - ); - ($input:expr, $f:expr) => ( - take_while_s!($input, call!($f)); - ); -); - -/// `take_while1_s!(char -> bool) => &str -> IResult<&str, &str>` -/// returns the longest (non empty) list of characters until the provided function fails. -/// -/// The argument is either a function `char -> bool` or a macro returning a `bool` -/// -/// ``` -/// # #[macro_use] extern crate nom; -/// # use nom::is_alphanumeric; -/// # fn main() { -/// fn alphabetic(chr: char) -> bool { (chr >= 0x41 as char && chr <= 0x5A as char) || (chr >= 0x61 as char && chr <= 0x7A as char) } -/// named!( alpha<&str,&str>, take_while1_s!( alphabetic ) ); -/// -/// let r = alpha("abcd\nefgh"); -/// assert_eq!(r, Ok(("\nefgh", "abcd"))); -/// # } -/// ``` -#[macro_export] -#[deprecated(since = "4.0.0", note = "Please use `take_while1` instead")] -macro_rules! take_while1_s ( - ($input:expr, $submac:ident!( $($args:tt)* )) => ( - take_while1!($input, $submac!($($args)*)) - ); - ($input:expr, $f:expr) => ( - take_while1_s!($input, call!($f)); - ); -); - -/// `take_till_s!(char -> bool) => &str -> IResult<&str, &str>` -/// returns the longest list of characters until the provided function succeeds -/// -/// The argument is either a function `char -> bool` or a macro returning a `bool -#[macro_export] -#[deprecated(since = "4.0.0", note = "Please use `take_till` instead")] -macro_rules! take_till_s ( - ($input:expr, $submac:ident!( $($args:tt)* )) => ( - { - take_till!($input, $submac!($($args)*)) - } - ); - ($input:expr, $f:expr) => ( - take_till_s!($input, call!($f)); - ); -); - -/// `take_till1_s!(char -> bool) => &str -> IResult<&str, &str>` -/// returns the longest non empty list of characters until the provided function succeeds -/// -/// The argument is either a function `char -> bool` or a macro returning a `bool -#[macro_export] -#[deprecated(since = "4.0.0", note = "Please use `take_till1` instead")] -macro_rules! take_till1_s ( - ($input:expr, $submac:ident!( $($args:tt)* )) => ( - { - take_till1!($input, $submac!($($args)*)) - } - ); - ($input:expr, $f:expr) => ( - take_till1_s!($input, call!($f)); - ); -); - -/// `take_until_and_consume_s!(&str) => &str -> IResult<&str, &str>` -/// generates a parser consuming all chars until the specified string is found and consumes it -#[macro_export] -#[deprecated(since = "4.0.0", note = "Please use `take_until_and_consume` instead")] -macro_rules! take_until_and_consume_s ( - ($input:expr, $substr:expr) => ( - { - take_until_and_consume!($input, $substr) - } - ); -); - -/// `take_until_s!(&str) => &str -> IResult<&str, &str>` -/// generates a parser consuming all chars until the specified string is found and leaves it in the remaining input -#[macro_export] -#[deprecated(since = "4.0.0", note = "Please use `take_until` instead")] -macro_rules! take_until_s ( - ($input:expr, $substr:expr) => ( - { - take_until!($input, $substr) - } - ); -); - #[cfg(test)] mod test { - use {Err, ErrorKind, IResult}; + use crate::{Err, error::ErrorKind, IResult}; #[test] - fn tag_str_succeed() { + fn tagtr_succeed() { const INPUT: &str = "Hello World!"; const TAG: &str = "Hello"; fn test(input: &str) -> IResult<&str, &str> { - tag_s!(input, TAG) + tag!(input, TAG) } match test(INPUT) { Ok((extra, output)) => { assert!( extra == " World!", - "Parser `tag_s` consumed leftover input." + "Parser `tag` consumed leftover input." ); assert!( output == TAG, - "Parser `tag_s` doesn't return the tag it matched on success. \ + "Parser `tag` doesn't return the tag it matched on success. \ Expected `{}`, got `{}`.", TAG, output ); } other => panic!( - "Parser `tag_s` didn't succeed when it should have. \ + "Parser `tag` didn't succeed when it should have. \ Got `{:?}`.", other ), @@ -282,15 +33,16 @@ mod test { } #[test] - fn tag_str_incomplete() { + fn tagtr_incomplete() { const INPUT: &str = "Hello"; const TAG: &str = "Hello World!"; - match tag_s!(INPUT, TAG) { + let res: IResult<_,_,(_, ErrorKind)> = tag!(INPUT, TAG); + match res { Err(Err::Incomplete(_)) => (), other => { panic!( - "Parser `tag_s` didn't require more input when it should have. \ + "Parser `tag` didn't require more input when it should have. \ Got `{:?}`.", other ); @@ -299,15 +51,16 @@ mod test { } #[test] - fn tag_str_error() { + fn tagtr_error() { const INPUT: &str = "Hello World!"; const TAG: &str = "Random"; // TAG must be closer than INPUT. - match tag_s!(INPUT, TAG) { + let res: IResult<_,_,(_, ErrorKind)> = tag!(INPUT, TAG); + match res { Err(Err::Error(_)) => (), other => { panic!( - "Parser `tag_s` didn't fail when it should have. Got `{:?}`.`", + "Parser `tag` didn't fail when it should have. Got `{:?}`.`", other ); } @@ -320,7 +73,8 @@ mod test { const CONSUMED: &str = "βèƒôřèÂßÇ"; const LEFTOVER: &str = "áƒƭèř"; - match take_s!(INPUT, 9) { + let res: IResult<_,_,(_, ErrorKind)> = take!(INPUT, 9); + match res { Ok((extra, output)) => { assert!( extra == LEFTOVER, @@ -343,30 +97,31 @@ mod test { } #[test] - fn take_until_s_succeed() { + fn take_until_succeed() { const INPUT: &str = "βèƒôřèÂßÇ∂áƒƭèř"; const FIND: &str = "ÂßÇ∂"; const CONSUMED: &str = "βèƒôřè"; const LEFTOVER: &str = "ÂßÇ∂áƒƭèř"; - match take_until_s!(INPUT, FIND) { + let res: IResult<_,_,(_, ErrorKind)> = take_until!(INPUT, FIND); + match res { Ok((extra, output)) => { assert!( extra == LEFTOVER, - "Parser `take_until_s`\ + "Parser `take_until`\ consumed leftover input. Leftover `{}`.", extra ); assert!( output == CONSUMED, - "Parser `take_until_s`\ + "Parser `take_until`\ doens't return the string it consumed on success. Expected `{}`, got `{}`.", CONSUMED, output ); } other => panic!( - "Parser `take_until_s` didn't succeed when it should have. \ + "Parser `take_until` didn't succeed when it should have. \ Got `{:?}`.", other ), @@ -377,24 +132,26 @@ mod test { fn take_s_incomplete() { const INPUT: &str = "βèƒôřèÂßÇá"; - match take_s!(INPUT, 13) { + let res: IResult<_,_,(_, ErrorKind)> = take!(INPUT, 13); + match res { Err(Err::Incomplete(_)) => (), other => panic!( - "Parser `take_s` didn't require more input when it should have. \ + "Parser `take` didn't require more input when it should have. \ Got `{:?}`.", other ), } } - use internal::Needed; + use crate::internal::Needed; - pub fn is_alphabetic(c: char) -> bool { + fn is_alphabetic(c: char) -> bool { (c as u8 >= 0x41 && c as u8 <= 0x5A) || (c as u8 >= 0x61 && c as u8 <= 0x7A) } + #[test] - fn take_while_s() { - named!(f<&str,&str>, take_while_s!(is_alphabetic)); + fn take_while() { + named!(f<&str,&str>, take_while!(is_alphabetic)); let a = ""; let b = "abcd"; let c = "abcd123"; @@ -407,8 +164,8 @@ mod test { } #[test] - fn take_while1_s() { - named!(f<&str,&str>, take_while1_s!(is_alphabetic)); + fn take_while1() { + named!(f<&str,&str>, take_while1!(is_alphabetic)); let a = ""; let b = "abcd"; let c = "abcd123"; @@ -432,24 +189,24 @@ mod test { c == 'á' } fn test(input: &str) -> IResult<&str, &str> { - take_till_s!(input, till_s) + take_till!(input, till_s) } match test(INPUT) { Ok((extra, output)) => { assert!( extra == LEFTOVER, - "Parser `take_till_s` consumed leftover input." + "Parser `take_till` consumed leftover input." ); assert!( output == CONSUMED, - "Parser `take_till_s` doesn't return the string it consumed on success. \ + "Parser `take_till` doesn't return the string it consumed on success. \ Expected `{}`, got `{}`.", CONSUMED, output ); } other => panic!( - "Parser `take_till_s` didn't succeed when it should have. \ + "Parser `take_till` didn't succeed when it should have. \ Got `{:?}`.", other ), @@ -457,7 +214,7 @@ mod test { } #[test] - fn take_while_s_succeed_none() { + fn take_while_succeed_none() { const INPUT: &str = "βèƒôřèÂßÇáƒƭèř"; const CONSUMED: &str = ""; const LEFTOVER: &str = "βèƒôřèÂßÇáƒƭèř"; @@ -465,24 +222,24 @@ mod test { c == '9' } fn test(input: &str) -> IResult<&str, &str> { - take_while_s!(input, while_s) + take_while!(input, while_s) } match test(INPUT) { Ok((extra, output)) => { assert!( extra == LEFTOVER, - "Parser `take_while_s` consumed leftover input." + "Parser `take_while` consumed leftover input." ); assert!( output == CONSUMED, - "Parser `take_while_s` doesn't return the string it consumed on success. \ + "Parser `take_while` doesn't return the string it consumed on success. \ Expected `{}`, got `{}`.", CONSUMED, output ); } other => panic!( - "Parser `take_while_s` didn't succeed when it should have. \ + "Parser `take_while` didn't succeed when it should have. \ Got `{:?}`.", other ), @@ -490,30 +247,30 @@ mod test { } #[test] - fn is_not_s_succeed() { + fn is_not_succeed() { const INPUT: &str = "βèƒôřèÂßÇáƒƭèř"; const AVOID: &str = "£úçƙ¥á"; const CONSUMED: &str = "βèƒôřèÂßÇ"; const LEFTOVER: &str = "áƒƭèř"; fn test(input: &str) -> IResult<&str, &str> { - is_not_s!(input, AVOID) + is_not!(input, AVOID) } match test(INPUT) { Ok((extra, output)) => { assert!( extra == LEFTOVER, - "Parser `is_not_s` consumed leftover input. Leftover `{}`.", + "Parser `is_not` consumed leftover input. Leftover `{}`.", extra ); assert!( output == CONSUMED, - "Parser `is_not_s` doens't return the string it consumed on success. Expected `{}`, got `{}`.", + "Parser `is_not` doens't return the string it consumed on success. Expected `{}`, got `{}`.", CONSUMED, output ); } other => panic!( - "Parser `is_not_s` didn't succeed when it should have. \ + "Parser `is_not` didn't succeed when it should have. \ Got `{:?}`.", other ), @@ -521,38 +278,7 @@ mod test { } #[test] - fn take_until_and_consume_s_succeed() { - const INPUT: &str = "βèƒôřèÂßÇáƒƭèř"; - const FIND: &str = "ÂßÇ"; - const OUTPUT: &str = "βèƒôřè"; - const LEFTOVER: &str = "áƒƭèř"; - - match take_until_and_consume_s!(INPUT, FIND) { - Ok((extra, output)) => { - assert!( - extra == LEFTOVER, - "Parser `take_until_and_consume_s`\ - consumed leftover input. Leftover `{}`.", - extra - ); - assert!( - output == OUTPUT, - "Parser `take_until_and_consume_s`\ - doens't return the string it selected on success. Expected `{}`, got `{}`.", - OUTPUT, - output - ); - } - other => panic!( - "Parser `take_until_and_consume_s` didn't succeed when it should have. \ - Got `{:?}`.", - other - ), - }; - } - - #[test] - fn take_while_s_succeed_some() { + fn take_while_succeed_some() { const INPUT: &str = "βèƒôřèÂßÇáƒƭèř"; const CONSUMED: &str = "βèƒôřèÂßÇ"; const LEFTOVER: &str = "áƒƭèř"; @@ -560,24 +286,24 @@ mod test { c == 'β' || c == 'è' || c == 'ƒ' || c == 'ô' || c == 'ř' || c == 'è' || c == 'Â' || c == 'ß' || c == 'Ç' } fn test(input: &str) -> IResult<&str, &str> { - take_while_s!(input, while_s) + take_while!(input, while_s) } match test(INPUT) { Ok((extra, output)) => { assert!( extra == LEFTOVER, - "Parser `take_while_s` consumed leftover input." + "Parser `take_while` consumed leftover input." ); assert!( output == CONSUMED, - "Parser `take_while_s` doesn't return the string it consumed on success. \ + "Parser `take_while` doesn't return the string it consumed on success. \ Expected `{}`, got `{}`.", CONSUMED, output ); } other => panic!( - "Parser `take_while_s` didn't succeed when it should have. \ + "Parser `take_while` didn't succeed when it should have. \ Got `{:?}`.", other ), @@ -585,23 +311,23 @@ mod test { } #[test] - fn is_not_s_fail() { + fn is_not_fail() { const INPUT: &str = "βèƒôřèÂßÇáƒƭèř"; const AVOID: &str = "βúçƙ¥"; fn test(input: &str) -> IResult<&str, &str> { - is_not_s!(input, AVOID) + is_not!(input, AVOID) } match test(INPUT) { Err(Err::Error(_)) => (), other => panic!( - "Parser `is_not_s` didn't fail when it should have. Got `{:?}`.", + "Parser `is_not` didn't fail when it should have. Got `{:?}`.", other ), }; } #[test] - fn take_while1_s_succeed() { + fn take_while1_succeed() { const INPUT: &str = "βèƒôřèÂßÇáƒƭèř"; const CONSUMED: &str = "βèƒôřèÂßÇ"; const LEFTOVER: &str = "áƒƭèř"; @@ -609,39 +335,24 @@ mod test { c == 'β' || c == 'è' || c == 'ƒ' || c == 'ô' || c == 'ř' || c == 'è' || c == 'Â' || c == 'ß' || c == 'Ç' } fn test(input: &str) -> IResult<&str, &str> { - take_while1_s!(input, while1_s) + take_while1!(input, while1_s) } match test(INPUT) { Ok((extra, output)) => { assert!( extra == LEFTOVER, - "Parser `take_while1_s` consumed leftover input." + "Parser `take_while1` consumed leftover input." ); assert!( output == CONSUMED, - "Parser `take_while1_s` doesn't return the string it consumed on success. \ + "Parser `take_while1` doesn't return the string it consumed on success. \ Expected `{}`, got `{}`.", CONSUMED, output ); } other => panic!( - "Parser `take_while1_s` didn't succeed when it should have. \ - Got `{:?}`.", - other - ), - }; - } - - #[test] - fn take_until_and_consume_s_incomplete() { - const INPUT: &str = "βèƒôřè"; - const FIND: &str = "βèƒôřèÂßÇ"; - - match take_until_and_consume_s!(INPUT, FIND) { - Err(Err::Incomplete(_)) => (), - other => panic!( - "Parser `take_until_and_consume_s` didn't require more input when it should have. \ + "Parser `take_while1` didn't succeed when it should have. \ Got `{:?}`.", other ), @@ -649,14 +360,15 @@ mod test { } #[test] - fn take_until_s_incomplete() { + fn take_until_incomplete() { const INPUT: &str = "βèƒôřè"; const FIND: &str = "βèƒôřèÂßÇ"; - match take_until_s!(INPUT, FIND) { + let res: IResult<_,_,(_, ErrorKind)> = take_until!(INPUT, FIND); + match res { Err(Err::Incomplete(_)) => (), other => panic!( - "Parser `take_until_s` didn't require more input when it should have. \ + "Parser `take_until` didn't require more input when it should have. \ Got `{:?}`.", other ), @@ -664,30 +376,30 @@ mod test { } #[test] - fn is_a_s_succeed() { + fn is_a_succeed() { const INPUT: &str = "βèƒôřèÂßÇáƒƭèř"; const MATCH: &str = "βèƒôřèÂßÇ"; const CONSUMED: &str = "βèƒôřèÂßÇ"; const LEFTOVER: &str = "áƒƭèř"; fn test(input: &str) -> IResult<&str, &str> { - is_a_s!(input, MATCH) + is_a!(input, MATCH) } match test(INPUT) { Ok((extra, output)) => { assert!( extra == LEFTOVER, - "Parser `is_a_s` consumed leftover input. Leftover `{}`.", + "Parser `is_a` consumed leftover input. Leftover `{}`.", extra ); assert!( output == CONSUMED, - "Parser `is_a_s` doens't return the string it consumed on success. Expected `{}`, got `{}`.", + "Parser `is_a` doens't return the string it consumed on success. Expected `{}`, got `{}`.", CONSUMED, output ); } other => panic!( - "Parser `is_a_s` didn't succeed when it should have. \ + "Parser `is_a` didn't succeed when it should have. \ Got `{:?}`.", other ), @@ -695,18 +407,18 @@ mod test { } #[test] - fn take_while1_s_fail() { + fn take_while1_fail() { const INPUT: &str = "βèƒôřèÂßÇáƒƭèř"; fn while1_s(c: char) -> bool { c == '9' } fn test(input: &str) -> IResult<&str, &str> { - take_while1_s!(input, while1_s) + take_while1!(input, while1_s) } match test(INPUT) { Err(Err::Error(_)) => (), other => panic!( - "Parser `take_while1_s` didn't fail when it should have. \ + "Parser `take_while1` didn't fail when it should have. \ Got `{:?}`.", other ), @@ -714,45 +426,31 @@ mod test { } #[test] - fn is_a_s_fail() { + fn is_a_fail() { const INPUT: &str = "βèƒôřèÂßÇáƒƭèř"; const MATCH: &str = "Ûñℓúçƙ¥"; fn test(input: &str) -> IResult<&str, &str> { - is_a_s!(input, MATCH) + is_a!(input, MATCH) } match test(INPUT) { Err(Err::Error(_)) => (), other => panic!( - "Parser `is_a_s` didn't fail when it should have. Got `{:?}`.", - other - ), - }; - } - - #[test] - fn take_until_and_consume_s_error() { - const INPUT: &str = "βèƒôřèÂßÇáƒƭèř"; - const FIND: &str = "Ráñδô₥"; - - match take_until_and_consume_s!(INPUT, FIND) { - Err(Err::Incomplete(_)) => (), - other => panic!( - "Parser `take_until_and_consume_s` didn't fail when it should have. \ - Got `{:?}`.", + "Parser `is_a` didn't fail when it should have. Got `{:?}`.", other ), }; } #[test] - fn take_until_s_error() { + fn take_until_error() { const INPUT: &str = "βèƒôřèÂßÇáƒƭèř"; const FIND: &str = "Ráñδô₥"; - match take_until_s!(INPUT, FIND) { + let res: IResult<_,_,(_, ErrorKind)> = take_until!(INPUT, FIND); + match res { Err(Err::Incomplete(_)) => (), other => panic!( - "Parser `take_until_and_consume_s` didn't fail when it should have. \ + "Parser `take_until` didn't fail when it should have. \ Got `{:?}`.", other ), @@ -761,11 +459,11 @@ mod test { #[test] #[cfg(feature = "alloc")] - fn recognize_is_a_s() { + fn recognize_is_a() { let a = "aabbab"; let b = "ababcd"; - named!(f <&str,&str>, recognize!(many1!(complete!(alt!( tag_s!("a") | tag_s!("b") ))))); + named!(f <&str,&str>, recognize!(many1!(complete!(alt!( tag!("a") | tag!("b") ))))); assert_eq!(f(&a[..]), Ok((&a[6..], &a[..]))); assert_eq!(f(&b[..]), Ok((&b[4..], &b[..4]))); @@ -774,7 +472,7 @@ mod test { #[test] fn utf8_indexing() { named!(dot(&str) -> &str, - tag_s!(".") + tag!(".") ); let _ = dot("點"); diff --git a/third_party/rust/nom/src/traits.rs b/third_party/rust/nom/src/traits.rs index 2e4cf7c46e..afd16ba251 100644 --- a/third_party/rust/nom/src/traits.rs +++ b/third_party/rust/nom/src/traits.rs @@ -1,34 +1,26 @@ //! Traits input types have to implement to work with nom combinators //! -use internal::{Err, IResult, Needed}; -use lib::std::ops::{Range, RangeFrom, RangeFull, RangeTo}; -use lib::std::iter::Enumerate; -use lib::std::slice::Iter; -use lib::std::iter::Map; - -use lib::std::str::Chars; -use lib::std::str::CharIndices; -use lib::std::str::FromStr; -use lib::std::str::from_utf8; -#[cfg(feature = "alloc")] -use lib::std::string::String; -#[cfg(feature = "alloc")] -use lib::std::vec::Vec; - +use crate::internal::{Err, IResult, Needed}; +use crate::error::{ParseError, ErrorKind}; +use crate::lib::std::ops::{Range, RangeFrom, RangeFull, RangeTo}; +use crate::lib::std::iter::Enumerate; +use crate::lib::std::slice::Iter; +use crate::lib::std::iter::Map; +use crate::lib::std::str::Chars; +use crate::lib::std::str::CharIndices; +use crate::lib::std::str::FromStr; +use crate::lib::std::str::from_utf8; use memchr; -#[cfg(feature = "verbose-errors")] -use verbose_errors::Context; -#[cfg(not(feature = "verbose-errors"))] -use simple_errors::Context; - -use util::ErrorKind; +#[cfg(feature = "alloc")] +use crate::lib::std::string::String; +#[cfg(feature = "alloc")] +use crate::lib::std::vec::Vec; /// abstract method to calculate the input length pub trait InputLength { /// calculates the input length, as indicated by its name, /// and the name of the trait itself - #[inline] fn input_len(&self) -> usize; } @@ -97,8 +89,9 @@ impl<'a> Offset for &'a str { } } -/// casts the input type to a byte slice +/// Helper trait for types that can be viewed as a byte slice pub trait AsBytes { + /// casts the input type to a byte slice fn as_bytes(&self) -> &[u8]; } @@ -160,31 +153,24 @@ as_bytes_array_impls! { /// transforms common types to a char for basic token parsing pub trait AsChar { /// makes a char from self - #[inline] fn as_char(self) -> char; /// tests that self is an alphabetic character /// /// warning: for `&str` it recognizes alphabetic /// characters outside of the 52 ASCII letters - #[inline] fn is_alpha(self) -> bool; /// tests that self is an alphabetic character /// or a decimal digit - #[inline] fn is_alphanum(self) -> bool; /// tests that self is a decimal digit - #[inline] fn is_dec_digit(self) -> bool; /// tests that self is an hex digit - #[inline] fn is_hex_digit(self) -> bool; /// tests that self is an octal digit - #[inline] fn is_oct_digit(self) -> bool; /// gets the len in bytes for self - #[inline] fn len(self) -> usize; } @@ -254,17 +240,9 @@ impl AsChar for char { fn as_char(self) -> char { self } - #[cfg(feature = "alloc")] - #[inline] - fn is_alpha(self) -> bool { - self.is_alphabetic() - } - #[cfg(not(feature = "alloc"))] #[inline] fn is_alpha(self) -> bool { - unimplemented!( - "error[E0658]: use of unstable library feature 'core_char_ext': the stable interface is `impl char` in later crate (see issue #32110)" - ) + self.is_ascii_alphabetic() } #[inline] fn is_alphanum(self) -> bool { @@ -272,11 +250,11 @@ impl AsChar for char { } #[inline] fn is_dec_digit(self) -> bool { - self.is_digit(10) + self.is_ascii_digit() } #[inline] fn is_hex_digit(self) -> bool { - self.is_digit(16) + self.is_ascii_hexdigit() } #[inline] fn is_oct_digit(self) -> bool { @@ -295,7 +273,7 @@ impl<'a> AsChar for &'a char { } #[inline] fn is_alpha(self) -> bool { - ::is_alpha(*self) + self.is_ascii_alphabetic() } #[inline] fn is_alphanum(self) -> bool { @@ -303,11 +281,11 @@ impl<'a> AsChar for &'a char { } #[inline] fn is_dec_digit(self) -> bool { - self.is_digit(10) + self.is_ascii_digit() } #[inline] fn is_hex_digit(self) -> bool { - self.is_digit(16) + self.is_ascii_hexdigit() } #[inline] fn is_oct_digit(self) -> bool { @@ -320,13 +298,17 @@ impl<'a> AsChar for &'a char { } /// abstracts common iteration operations on the input type -/// -/// it needs a distinction between `Item` and `RawItem` because -/// `&[T]` iterates on references pub trait InputIter { + /// the current input type is a sequence of that `Item` type. + /// + /// example: `u8` for `&[u8]` or `char` for &str` type Item; - type RawItem; + /// an iterator over the input type, producing the item and its position + /// for use with [Slice]. If we're iterating over `&str`, the position + /// corresponds to the byte index of the character type Iter: Iterator; + + /// an iterator over the input type, producing the item type IterElem: Iterator; /// returns an iterator over the elements and their byte offsets @@ -336,7 +318,7 @@ pub trait InputIter { /// finds the byte position of the element fn position

(&self, predicate: P) -> Option where - P: Fn(Self::RawItem) -> bool; + P: Fn(Self::Item) -> bool; /// get the byte offset from the element's position in the stream fn slice_index(&self, count: usize) -> Option; } @@ -355,7 +337,6 @@ fn star(r_u8: &u8) -> u8 { impl<'a> InputIter for &'a [u8] { type Item = u8; - type RawItem = u8; type Iter = Enumerate; type IterElem = Map, fn(&u8) -> u8>; @@ -398,7 +379,6 @@ impl<'a> InputTake for &'a [u8] { impl<'a> InputIter for &'a str { type Item = char; - type RawItem = char; type Iter = CharIndices<'a>; type IterElem = Chars<'a>; #[inline] @@ -411,7 +391,7 @@ impl<'a> InputIter for &'a str { } fn position

(&self, predicate: P) -> Option where - P: Fn(Self::RawItem) -> bool, + P: Fn(Self::Item) -> bool, { for (o, c) in self.char_indices() { if predicate(c) { @@ -453,67 +433,100 @@ impl<'a> InputTake for &'a str { /// /// When implementing a custom input type, it is possible to use directly the /// default implementation: if the input type implements `InputLength`, `InputIter`, -/// `InputTake`, `AtEof` and `Clone`, you can implement `UnspecializedInput` and get +/// `InputTake` and `Clone`, you can implement `UnspecializedInput` and get /// a default version of `InputTakeAtPosition`. /// /// For performance reasons, you might want to write a custom implementation of /// `InputTakeAtPosition` (like the one for `&[u8]`). pub trait UnspecializedInput {} -use types::CompleteStr; -use types::CompleteByteSlice; - /// methods to take as much input as possible until the provided function returns true for the current element /// /// a large part of nom's basic parsers are built using this trait pub trait InputTakeAtPosition: Sized { + /// the current input type is a sequence of that `Item` type. + /// + /// example: `u8` for `&[u8]` or `char` for &str` type Item; - fn split_at_position

(&self, predicate: P) -> IResult + /// looks for the first element of the input type for which the condition returns true, + /// and returns the input up to this position + /// + /// *streaming version*: if no element is found matching the condition, this will return `Incomplete` + fn split_at_position>(&self, predicate: P) -> IResult + where + P: Fn(Self::Item) -> bool; + + /// looks for the first element of the input type for which the condition returns true + /// and returns the input up to this position + /// + /// fails if the produced slice is empty + /// + /// *streaming version*: if no element is found matching the condition, this will return `Incomplete` + fn split_at_position1>(&self, predicate: P, e: ErrorKind) -> IResult where P: Fn(Self::Item) -> bool; - fn split_at_position1

(&self, predicate: P, e: ErrorKind) -> IResult + + /// looks for the first element of the input type for which the condition returns true, + /// and returns the input up to this position + /// + /// *complete version*: if no element is found matching the condition, this will return the whole input + fn split_at_position_complete>(&self, predicate: P) -> IResult + where + P: Fn(Self::Item) -> bool; + + /// looks for the first element of the input type for which the condition returns true + /// and returns the input up to this position + /// + /// fails if the produced slice is empty + /// + /// *complete version*: if no element is found matching the condition, this will return the whole input + fn split_at_position1_complete>(&self, predicate: P, e: ErrorKind) -> IResult where P: Fn(Self::Item) -> bool; } -impl InputTakeAtPosition for T { - type Item = ::RawItem; +impl InputTakeAtPosition for T { + type Item = ::Item; - fn split_at_position

(&self, predicate: P) -> IResult + fn split_at_position>(&self, predicate: P) -> IResult where P: Fn(Self::Item) -> bool, { match self.position(predicate) { Some(n) => Ok(self.take_split(n)), - None => { - if self.at_eof() { - Ok(self.take_split(self.input_len())) - } else { - Err(Err::Incomplete(Needed::Size(1))) - } - } + None => Err(Err::Incomplete(Needed::Size(1))), } } - fn split_at_position1

(&self, predicate: P, e: ErrorKind) -> IResult + fn split_at_position1>(&self, predicate: P, e: ErrorKind) -> IResult where P: Fn(Self::Item) -> bool, { match self.position(predicate) { - Some(0) => Err(Err::Error(Context::Code(self.clone(), e))), + Some(0) => Err(Err::Error(E::from_error_kind(self.clone(), e))), Some(n) => Ok(self.take_split(n)), - None => { - if self.at_eof() { - if self.input_len() == 0 { - Err(Err::Error(Context::Code(self.clone(), e))) - } else { - Ok(self.take_split(self.input_len())) - } - } else { - Err(Err::Incomplete(Needed::Size(1))) - } + None => Err(Err::Incomplete(Needed::Size(1))), + } + } + + fn split_at_position_complete>(&self, predicate: P) -> IResult + where P: Fn(Self::Item) -> bool { + match self.split_at_position(predicate) { + Err(Err::Incomplete(_)) => Ok(self.take_split(self.input_len())), + res => res, + } + } + + fn split_at_position1_complete>(&self, predicate: P, e: ErrorKind) -> IResult + where P: Fn(Self::Item) -> bool { + match self.split_at_position1(predicate, e) { + Err(Err::Incomplete(_)) => if self.input_len() == 0 { + Err(Err::Error(E::from_error_kind(self.clone(), e))) + } else { + Ok(self.take_split(self.input_len())) } + res => res, } } } @@ -521,7 +534,7 @@ impl InputTakeAtPosition for &'a [u8] { type Item = u8; - fn split_at_position

(&self, predicate: P) -> IResult + fn split_at_position>(&self, predicate: P) -> IResult where P: Fn(Self::Item) -> bool, { @@ -531,57 +544,37 @@ impl<'a> InputTakeAtPosition for &'a [u8] { } } - fn split_at_position1

(&self, predicate: P, e: ErrorKind) -> IResult + fn split_at_position1>(&self, predicate: P, e: ErrorKind) -> IResult where P: Fn(Self::Item) -> bool, { match (0..self.len()).find(|b| predicate(self[*b])) { - Some(0) => Err(Err::Error(Context::Code(self, e))), + Some(0) => Err(Err::Error(E::from_error_kind(self, e))), Some(i) => Ok((&self[i..], &self[..i])), None => Err(Err::Incomplete(Needed::Size(1))), } } -} - -impl<'a> InputTakeAtPosition for CompleteByteSlice<'a> { - type Item = u8; - fn split_at_position

(&self, predicate: P) -> IResult - where - P: Fn(Self::Item) -> bool, - { - match (0..self.0.len()).find(|b| predicate(self.0[*b])) { - Some(i) => Ok(( - CompleteByteSlice(&self.0[i..]), - CompleteByteSlice(&self.0[..i]), - )), - None => { - let (i, o) = self.0.take_split(self.0.len()); - Ok((CompleteByteSlice(i), CompleteByteSlice(o))) - } + fn split_at_position_complete>(&self, predicate: P) -> IResult + where P: Fn(Self::Item) -> bool { + match (0..self.len()).find(|b| predicate(self[*b])) { + Some(i) => Ok((&self[i..], &self[..i])), + None => Ok(self.take_split(self.input_len())), } } - fn split_at_position1

(&self, predicate: P, e: ErrorKind) -> IResult - where - P: Fn(Self::Item) -> bool, - { - match (0..self.0.len()).find(|b| predicate(self.0[*b])) { - Some(0) => Err(Err::Error(Context::Code(CompleteByteSlice(self.0), e))), - Some(i) => Ok(( - CompleteByteSlice(&self.0[i..]), - CompleteByteSlice(&self.0[..i]), - )), + fn split_at_position1_complete>(&self, predicate: P, e: ErrorKind) -> IResult + where P: Fn(Self::Item) -> bool { + match (0..self.len()).find(|b| predicate(self[*b])) { + Some(0) => Err(Err::Error(E::from_error_kind(self, e))), + Some(i) => Ok((&self[i..], &self[..i])), None => { - if self.0.len() == 0 { - Err(Err::Error(Context::Code(CompleteByteSlice(self.0), e))) + if self.len() == 0 { + Err(Err::Error(E::from_error_kind(self, e))) } else { - Ok(( - CompleteByteSlice(&self.0[self.0.len()..]), - CompleteByteSlice(self.0), - )) + Ok(self.take_split(self.input_len())) } - } + }, } } } @@ -589,59 +582,47 @@ impl<'a> InputTakeAtPosition for CompleteByteSlice<'a> { impl<'a> InputTakeAtPosition for &'a str { type Item = char; - fn split_at_position

(&self, predicate: P) -> IResult + fn split_at_position>(&self, predicate: P) -> IResult where P: Fn(Self::Item) -> bool, { - match self.char_indices().find(|&(_, c)| predicate(c)) { - Some((i, _)) => Ok((&self[i..], &self[..i])), + match self.find(predicate) { + Some(i) => Ok((&self[i..], &self[..i])), None => Err(Err::Incomplete(Needed::Size(1))), } } - fn split_at_position1

(&self, predicate: P, e: ErrorKind) -> IResult + fn split_at_position1>(&self, predicate: P, e: ErrorKind) -> IResult where P: Fn(Self::Item) -> bool, { - match self.char_indices().find(|&(_, c)| predicate(c)) { - Some((0, _)) => Err(Err::Error(Context::Code(self, e))), - Some((i, _)) => Ok((&self[i..], &self[..i])), + match self.find(predicate) { + Some(0) => Err(Err::Error(E::from_error_kind(self, e))), + Some(i) => Ok((&self[i..], &self[..i])), None => Err(Err::Incomplete(Needed::Size(1))), } } -} - -impl<'a> InputTakeAtPosition for CompleteStr<'a> { - type Item = char; - fn split_at_position

(&self, predicate: P) -> IResult - where - P: Fn(Self::Item) -> bool, - { - match self.0.char_indices().find(|&(_, c)| predicate(c)) { - Some((i, _)) => Ok((CompleteStr(&self.0[i..]), CompleteStr(&self.0[..i]))), - None => { - let (i, o) = self.0.take_split(self.0.len()); - Ok((CompleteStr(i), CompleteStr(o))) - } + fn split_at_position_complete>(&self, predicate: P) -> IResult + where P: Fn(Self::Item) -> bool { + match self.find(predicate) { + Some(i) => Ok((&self[i..], &self[..i])), + None => Ok(self.take_split(self.input_len())) } } - fn split_at_position1

(&self, predicate: P, e: ErrorKind) -> IResult - where - P: Fn(Self::Item) -> bool, - { - match self.0.char_indices().find(|&(_, c)| predicate(c)) { - Some((0, _)) => Err(Err::Error(Context::Code(CompleteStr(self.0), e))), - Some((i, _)) => Ok((CompleteStr(&self.0[i..]), CompleteStr(&self.0[..i]))), + fn split_at_position1_complete>(&self, predicate: P, e: ErrorKind) -> IResult + where P: Fn(Self::Item) -> bool { + match self.find(predicate) { + Some(0) => Err(Err::Error(E::from_error_kind(self, e))), + Some(i) => Ok((&self[i..], &self[..i])), None => { - if self.0.len() == 0 { - Err(Err::Error(Context::Code(CompleteStr(self.0), e))) + if self.len() == 0 { + Err(Err::Error(E::from_error_kind(self, e))) } else { - let (i, o) = self.0.take_split(self.0.len()); - Ok((CompleteStr(i), CompleteStr(o))) + Ok(self.take_split(self.input_len())) } - } + }, } } } @@ -650,8 +631,11 @@ impl<'a> InputTakeAtPosition for CompleteStr<'a> { /// if more data was needed #[derive(Debug, PartialEq)] pub enum CompareResult { + /// comparison was successful Ok, + /// we need more data to be sure Incomplete, + /// comparison failed Error, } @@ -711,8 +695,8 @@ impl<'a, 'b> Compare<&'b [u8]> for &'a [u8] { let other = &t[..m]; if !reduced.iter().zip(other).all(|(a, b)| match (*a, *b) { - (0...64, 0...64) | (91...96, 91...96) | (123...255, 123...255) => a == b, - (65...90, 65...90) | (97...122, 97...122) | (65...90, 97...122) | (97...122, 65...90) => *a | 0b00_10_00_00 == *b | 0b00_10_00_00, + (0..=64, 0..=64) | (91..=96, 91..=96) | (123..=255, 123..=255) => a == b, + (65..=90, 65..=90) | (97..=122, 97..=122) | (65..=90, 97..=122) | (97..=122, 65..=90) => *a | 0b00_10_00_00 == *b | 0b00_10_00_00, _ => false, }) { CompareResult::Error @@ -753,7 +737,6 @@ impl<'a, 'b> Compare<&'b str> for &'a str { } //FIXME: this version is too simple and does not use the current locale - #[cfg(feature = "alloc")] #[inline(always)] fn compare_no_case(&self, t: &'b str) -> CompareResult { let pos = self @@ -772,16 +755,11 @@ impl<'a, 'b> Compare<&'b str> for &'a str { } } } - - #[cfg(not(feature = "alloc"))] - #[inline(always)] - fn compare_no_case(&self, _: &'b str) -> CompareResult { - unimplemented!() - } } -/// look for self in the given input stream +/// look for a token in self pub trait FindToken { + /// returns true if self contains the token fn find_token(&self, token: T) -> bool; } @@ -811,7 +789,12 @@ impl<'a, 'b> FindToken<&'a u8> for &'b str { impl<'a> FindToken for &'a [u8] { fn find_token(&self, token: char) -> bool { - memchr::memchr(token as u8, self).is_some() + for i in self.iter() { + if token as u8 == *i { + return true; + } + } + false } } @@ -828,6 +811,7 @@ impl<'a> FindToken for &'a str { /// look for a substring in self pub trait FindSubstring { + /// returns the byte position of the substring if it is found fn find_substring(&self, substr: T) -> Option; } @@ -883,6 +867,8 @@ impl<'a, 'b> FindSubstring<&'b str> for &'a str { /// used to integrate str's parse() method pub trait ParseTo { + /// succeeds if `parse()` succeeded. The byte slice implementation + /// will first convert it to a &str, then apply the `parse()` function fn parse_to(&self) -> Option; } @@ -904,7 +890,7 @@ impl<'a, R: FromStr> ParseTo for &'a str { /// `Index`, but can actually return /// something else than a `&[T]` or `&str` pub trait Slice { - #[inline(always)] + /// slices self according to the range argument fn slice(&self, range: R) -> Self; } @@ -947,56 +933,6 @@ macro_rules! slice_ranges_impl { slice_ranges_impl! {str} slice_ranges_impl! {[T]} -/// indicates whether more data can come later in input -/// -/// When working with complete data, like a file that was entirely loaded -/// in memory, you should use input types like `CompleteByteSlice` and -/// `CompleteStr` to wrap the data. The `at_eof` method of those types -/// always returns true, thus indicating to nom that it should not handle -/// partial data cases. -/// -/// When working will partial data, like data coming from the network in -/// buffers, the `at_eof` method can indicate if we expect more data to come, -/// and let nom know that some parsers could still handle more data -pub trait AtEof { - fn at_eof(&self) -> bool; -} - -pub fn need_more(input: I, needed: Needed) -> IResult { - if input.at_eof() { - Err(Err::Error(Context::Code(input, ErrorKind::Eof))) - } else { - Err(Err::Incomplete(needed)) - } -} - -pub fn need_more_err(input: I, needed: Needed, err: ErrorKind) -> IResult { - if input.at_eof() { - Err(Err::Error(Context::Code(input, err))) - } else { - Err(Err::Incomplete(needed)) - } -} - -// Tuple for bit parsing -impl AtEof for (I, T) { - fn at_eof(&self) -> bool { - self.0.at_eof() - } -} - -impl<'a, T> AtEof for &'a [T] { - fn at_eof(&self) -> bool { - false - } -} - -impl<'a> AtEof for &'a str { - fn at_eof(&self) -> bool { - false - } -} - macro_rules! array_impls { ($($N:expr)+) => { $( @@ -1060,16 +996,21 @@ array_impls! { 30 31 32 } -/// abtracts something which can extend an `Extend` +/// abstracts something which can extend an `Extend` +/// used to build modified input slices in `escaped_transform` pub trait ExtendInto { + + /// the current input type is a sequence of that `Item` type. + /// + /// example: `u8` for `&[u8]` or `char` for &str` type Item; + + /// the type that will be produced type Extender: Extend; /// create a new `Extend` of the correct type - #[inline] fn new_builder(&self) -> Self::Extender; /// accumulate the input into an accumulator - #[inline] fn extend_into(&self, acc: &mut Self::Extender); } @@ -1088,6 +1029,22 @@ impl ExtendInto for [u8] { } } +#[cfg(feature = "alloc")] +impl ExtendInto for &[u8] { + type Item = u8; + type Extender = Vec; + + #[inline] + fn new_builder(&self) -> Vec { + Vec::new() + } + #[inline] + fn extend_into(&self, acc: &mut Vec) { + acc.extend(self.iter().cloned()); + } +} + + #[cfg(feature = "alloc")] impl ExtendInto for str { type Item = char; @@ -1103,6 +1060,21 @@ impl ExtendInto for str { } } +#[cfg(feature = "alloc")] +impl ExtendInto for &str { + type Item = char; + type Extender = String; + + #[inline] + fn new_builder(&self) -> String { + String::new() + } + #[inline] + fn extend_into(&self, acc: &mut String) { + acc.push_str(self); + } +} + #[cfg(feature = "alloc")] impl ExtendInto for char { type Item = char; @@ -1118,6 +1090,72 @@ impl ExtendInto for char { } } +/// Helper trait to convert numbers to usize +/// +/// by default, usize implements `From` and `From` but not +/// `From` and `From` because that would be invalid on some +/// platforms. This trait implements the conversion for platforms +/// with 32 and 64 bits pointer platforms +pub trait ToUsize { + /// converts self to usize + fn to_usize(&self) -> usize; +} + +impl ToUsize for u8 { + #[inline] + fn to_usize(&self) -> usize { + *self as usize + } +} + +impl ToUsize for u16 { + #[inline] + fn to_usize(&self) -> usize { + *self as usize + } +} + +impl ToUsize for usize { + #[inline] + fn to_usize(&self) -> usize { + *self + } +} + +#[cfg(any(target_pointer_width = "32", target_pointer_width = "64"))] +impl ToUsize for u32 { + #[inline] + fn to_usize(&self) -> usize { + *self as usize + } +} + +#[cfg(target_pointer_width = "64")] +impl ToUsize for u64 { + #[inline] + fn to_usize(&self) -> usize { + *self as usize + } +} + +/// equivalent From implementation to avoid orphan rules in bits parsers +pub trait ErrorConvert { + /// transform to another error type + fn convert(self) -> E; +} + +impl ErrorConvert<(I, ErrorKind)> for ((I, usize), ErrorKind) { + fn convert(self) -> (I, ErrorKind) { + ((self.0).0, self.1) + } +} + +impl ErrorConvert<((I, usize), ErrorKind)> for (I, ErrorKind) { + fn convert(self) -> ((I, usize), ErrorKind) { + ((self.0, 0), self.1) + } +} + #[cfg(test)] mod tests { use super::*; diff --git a/third_party/rust/nom/src/types.rs b/third_party/rust/nom/src/types.rs deleted file mode 100644 index 113d20ef6b..0000000000 --- a/third_party/rust/nom/src/types.rs +++ /dev/null @@ -1,528 +0,0 @@ -//! Custom input types -//! - -use traits::{AsBytes, AtEof, Compare, CompareResult, FindSubstring, FindToken, InputIter, InputLength, InputTake, Offset, ParseTo, Slice}; - -#[cfg(feature = "alloc")] -use traits::ExtendInto; - -use lib::std::iter::{Enumerate, Map}; -use lib::std::ops::{Deref, Range, RangeFrom, RangeFull, RangeTo}; -use lib::std::slice::Iter; -use lib::std::str::{self, CharIndices, Chars, FromStr}; -use lib::std::convert::From; -use lib::std::fmt::{Display, Formatter, Result}; -#[cfg(feature = "alloc")] -use lib::std::string::String; - -/// Holds a complete String, for which the `at_eof` method always returns true -/// -/// This means that this input type will completely avoid nom's streaming features -/// and `Incomplete` results. -#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)] -pub struct CompleteStr<'a>(pub &'a str); - -impl<'a> From<&'a str> for CompleteStr<'a> { - fn from(src: &'a str) -> Self { - CompleteStr(src) - } -} - -impl<'a, 'b> From<&'b &'a str> for CompleteStr<'a> { - fn from(src: &'b &'a str) -> Self { - CompleteStr(*src) - } -} - -impl<'a> Display for CompleteStr<'a> { - fn fmt(&self, f: &mut Formatter) -> Result { - self.0.fmt(f) - } -} - -impl<'a> AsRef for CompleteStr<'a> { - fn as_ref(&self) -> &str { - self.0 - } -} - -impl<'a> Deref for CompleteStr<'a> { - type Target = &'a str; - - #[inline] - fn deref(&self) -> &Self::Target { - &self.0 - } -} - -impl<'a> AtEof for CompleteStr<'a> { - #[inline] - fn at_eof(&self) -> bool { - true - } -} - -impl<'a> Slice> for CompleteStr<'a> { - #[inline] - fn slice(&self, range: Range) -> Self { - CompleteStr(self.0.slice(range)) - } -} - -impl<'a> Slice> for CompleteStr<'a> { - #[inline] - fn slice(&self, range: RangeTo) -> Self { - CompleteStr(self.0.slice(range)) - } -} - -impl<'a> Slice> for CompleteStr<'a> { - #[inline] - fn slice(&self, range: RangeFrom) -> Self { - CompleteStr(self.0.slice(range)) - } -} - -impl<'a> Slice for CompleteStr<'a> { - #[inline] - fn slice(&self, range: RangeFull) -> Self { - CompleteStr(self.0.slice(range)) - } -} - -impl<'a> InputIter for CompleteStr<'a> { - type Item = char; - type RawItem = char; - type Iter = CharIndices<'a>; - type IterElem = Chars<'a>; - - fn iter_indices(&self) -> Self::Iter { - self.0.iter_indices() - } - fn iter_elements(&self) -> Self::IterElem { - self.0.iter_elements() - } - fn position

(&self, predicate: P) -> Option - where - P: Fn(Self::RawItem) -> bool, - { - self.0.position(predicate) - } - fn slice_index(&self, count: usize) -> Option { - self.0.slice_index(count) - } -} - -impl<'a> InputTake for CompleteStr<'a> { - fn take(&self, count: usize) -> Self { - CompleteStr(self.0.take(count)) - } - - fn take_split(&self, count: usize) -> (Self, Self) { - let (left, right) = self.0.take_split(count); - (CompleteStr(left), CompleteStr(right)) - } -} - -impl<'a> InputLength for CompleteStr<'a> { - fn input_len(&self) -> usize { - self.0.input_len() - } -} - -impl<'a, 'b> Compare<&'b str> for CompleteStr<'a> { - fn compare(&self, t: &'b str) -> CompareResult { - self.0.compare(t) - } - fn compare_no_case(&self, t: &'b str) -> CompareResult { - self.0.compare_no_case(t) - } -} - -impl<'a, 'b> FindSubstring<&'b str> for CompleteStr<'a> { - fn find_substring(&self, substr: &'b str) -> Option { - self.0.find_substring(substr) - } -} - -impl<'a> FindToken for CompleteStr<'a> { - fn find_token(&self, token: char) -> bool { - self.0.find_token(token) - } -} - -impl<'a> FindToken for CompleteStr<'a> { - fn find_token(&self, token: u8) -> bool { - self.0.find_token(token) - } -} - -impl<'a, 'b> FindToken<&'a u8> for CompleteStr<'b> { - fn find_token(&self, token: &u8) -> bool { - self.0.find_token(token) - } -} - -impl<'a, R: FromStr> ParseTo for CompleteStr<'a> { - fn parse_to(&self) -> Option { - self.0.parse().ok() - } -} - -impl<'a> Offset for CompleteStr<'a> { - fn offset(&self, second: &CompleteStr<'a>) -> usize { - self.0.offset(second.0) - } -} - -impl<'a> AsBytes for CompleteStr<'a> { - fn as_bytes(&self) -> &[u8] { - AsBytes::as_bytes(self.0) - } -} - -#[cfg(feature = "alloc")] -impl<'a> ExtendInto for CompleteStr<'a> { - type Item = char; - type Extender = String; - - #[inline] - fn new_builder(&self) -> String { - String::new() - } - #[inline] - fn extend_into(&self, acc: &mut String) { - acc.extend(self.0.chars()); - } -} - -/// Holds a complete byte array, for which the `at_eof` method always returns true -/// -/// This means that this input type will completely avoid nom's streaming features -/// and `Incomplete` results. -#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)] -pub struct CompleteByteSlice<'a>(pub &'a [u8]); - -impl<'a> From<&'a [u8]> for CompleteByteSlice<'a> { - fn from(src: &'a [u8]) -> Self { - CompleteByteSlice(src) - } -} - -impl<'a, 'b> From<&'b &'a [u8]> for CompleteByteSlice<'a> { - fn from(src: &'b &'a [u8]) -> Self { - CompleteByteSlice(*src) - } -} - -impl<'a> Deref for CompleteByteSlice<'a> { - type Target = &'a [u8]; - - #[inline] - fn deref(&self) -> &Self::Target { - &self.0 - } -} - -impl<'a> AtEof for CompleteByteSlice<'a> { - #[inline] - fn at_eof(&self) -> bool { - true - } -} - -impl<'a> Slice> for CompleteByteSlice<'a> { - #[inline] - fn slice(&self, range: Range) -> Self { - CompleteByteSlice(self.0.slice(range)) - } -} - -impl<'a> Slice> for CompleteByteSlice<'a> { - #[inline] - fn slice(&self, range: RangeTo) -> Self { - CompleteByteSlice(self.0.slice(range)) - } -} - -impl<'a> Slice> for CompleteByteSlice<'a> { - #[inline] - fn slice(&self, range: RangeFrom) -> Self { - CompleteByteSlice(self.0.slice(range)) - } -} - -impl<'a> Slice for CompleteByteSlice<'a> { - #[inline] - fn slice(&self, range: RangeFull) -> Self { - CompleteByteSlice(self.0.slice(range)) - } -} - -impl<'a> InputIter for CompleteByteSlice<'a> { - type Item = u8; - type RawItem = u8; - type Iter = Enumerate; - type IterElem = Map, fn(&u8) -> u8>; //Iter<'a, Self::RawItem>; - - fn iter_indices(&self) -> Self::Iter { - self.0.iter_indices() - } - fn iter_elements(&self) -> Self::IterElem { - self.0.iter_elements() - } - fn position

(&self, predicate: P) -> Option - where - P: Fn(Self::RawItem) -> bool, - { - self.0.position(predicate) - } - fn slice_index(&self, count: usize) -> Option { - self.0.slice_index(count) - } -} - -impl<'a> InputTake for CompleteByteSlice<'a> { - fn take(&self, count: usize) -> Self { - CompleteByteSlice(self.0.take(count)) - } - - fn take_split(&self, count: usize) -> (Self, Self) { - let (left, right) = self.0.take_split(count); - (CompleteByteSlice(left), CompleteByteSlice(right)) - } -} - -impl<'a> InputLength for CompleteByteSlice<'a> { - fn input_len(&self) -> usize { - self.0.input_len() - } -} - -impl<'a, 'b> Compare<&'b [u8]> for CompleteByteSlice<'a> { - fn compare(&self, t: &'b [u8]) -> CompareResult { - self.0.compare(t) - } - fn compare_no_case(&self, t: &'b [u8]) -> CompareResult { - self.0.compare_no_case(t) - } -} - -impl<'a, 'b> Compare<&'b str> for CompleteByteSlice<'a> { - fn compare(&self, t: &'b str) -> CompareResult { - self.0.compare(t) - } - fn compare_no_case(&self, t: &'b str) -> CompareResult { - self.0.compare_no_case(t) - } -} - -impl<'a, 'b> FindSubstring<&'b [u8]> for CompleteByteSlice<'a> { - fn find_substring(&self, substr: &'b [u8]) -> Option { - self.0.find_substring(substr) - } -} - -impl<'a, 'b> FindSubstring<&'b str> for CompleteByteSlice<'a> { - fn find_substring(&self, substr: &'b str) -> Option { - self.0.find_substring(substr) - } -} - -impl<'a> FindToken for CompleteByteSlice<'a> { - fn find_token(&self, token: char) -> bool { - self.0.find_token(token) - } -} - -impl<'a> FindToken for CompleteByteSlice<'a> { - fn find_token(&self, token: u8) -> bool { - self.0.find_token(token) - } -} - -impl<'a, 'b> FindToken<&'a u8> for CompleteByteSlice<'b> { - fn find_token(&self, token: &u8) -> bool { - self.0.find_token(token) - } -} - -impl<'a, R: FromStr> ParseTo for CompleteByteSlice<'a> { - fn parse_to(&self) -> Option { - self.0.parse_to() - } -} - -impl<'a> Offset for CompleteByteSlice<'a> { - fn offset(&self, second: &CompleteByteSlice<'a>) -> usize { - self.0.offset(second.0) - } -} - -impl<'a> AsBytes for CompleteByteSlice<'a> { - fn as_bytes(&self) -> &[u8] { - self.0.as_bytes() - } -} - -#[cfg(feature = "std")] -impl<'a> super::util::HexDisplay for CompleteByteSlice<'a> { - fn to_hex(&self, chunk_size: usize) -> String { - self.0.to_hex(chunk_size) - } - - fn to_hex_from(&self, chunk_size: usize, from: usize) -> String { - self.0.to_hex_from(chunk_size, from) - } -} - -#[derive(Clone, Copy, Debug, PartialEq, Hash)] -pub struct Input { - pub inner: T, - pub at_eof: bool, -} - -impl AtEof for Input { - fn at_eof(&self) -> bool { - self.at_eof - } -} - -impl>> Slice> for Input { - fn slice(&self, range: Range) -> Self { - Input { - inner: self.inner.slice(range), - at_eof: self.at_eof, - } - } -} - -impl>> Slice> for Input { - fn slice(&self, range: RangeTo) -> Self { - Input { - inner: self.inner.slice(range), - at_eof: self.at_eof, - } - } -} - -impl>> Slice> for Input { - fn slice(&self, range: RangeFrom) -> Self { - Input { - inner: self.inner.slice(range), - at_eof: self.at_eof, - } - } -} - -impl> Slice for Input { - fn slice(&self, range: RangeFull) -> Self { - Input { - inner: self.inner.slice(range), - at_eof: self.at_eof, - } - } -} - -impl InputIter for Input { - type Item = ::Item; - type RawItem = ::RawItem; - type Iter = ::Iter; - type IterElem = ::IterElem; - - fn iter_indices(&self) -> Self::Iter { - self.inner.iter_indices() - } - fn iter_elements(&self) -> Self::IterElem { - self.inner.iter_elements() - } - fn position

(&self, predicate: P) -> Option - where - P: Fn(Self::RawItem) -> bool, - { - self.inner.position(predicate) - } - fn slice_index(&self, count: usize) -> Option { - self.inner.slice_index(count) - } -} - -impl InputTake for Input { - fn take(&self, count: usize) -> Self { - Input { - inner: self.inner.take(count), - at_eof: self.at_eof, - } - } - - fn take_split(&self, count: usize) -> (Self, Self) { - let (left, right) = self.inner.take_split(count); - ( - Input { - inner: left, - at_eof: self.at_eof, - }, - Input { - inner: right, - at_eof: self.at_eof, - }, - ) - } -} - -impl InputLength for Input { - fn input_len(&self) -> usize { - self.inner.input_len() - } -} - -impl<'b, T: Compare<&'b str>> Compare<&'b str> for Input { - fn compare(&self, t: &'b str) -> CompareResult { - self.inner.compare(t) - } - fn compare_no_case(&self, t: &'b str) -> CompareResult { - self.inner.compare_no_case(t) - } -} - -impl<'b, T: FindSubstring<&'b str>> FindSubstring<&'b str> for Input { - fn find_substring(&self, substr: &'b str) -> Option { - self.inner.find_substring(substr) - } -} - -impl> FindToken for Input { - fn find_token(&self, token: char) -> bool { - self.inner.find_token(token) - } -} - -impl> FindToken for Input { - fn find_token(&self, token: u8) -> bool { - self.inner.find_token(token) - } -} - -impl<'a, T: FindToken<&'a u8>> FindToken<&'a u8> for Input { - fn find_token(&self, token: &'a u8) -> bool { - self.inner.find_token(token) - } -} - -impl<'a, R: FromStr, T: ParseTo> ParseTo for Input { - fn parse_to(&self) -> Option { - self.inner.parse_to() - } -} - -impl Offset for Input { - fn offset(&self, second: &Input) -> usize { - self.inner.offset(&second.inner) - } -} - -impl AsBytes for Input { - fn as_bytes(&self) -> &[u8] { - AsBytes::as_bytes(&self.inner) - } -} diff --git a/third_party/rust/nom/src/util.rs b/third_party/rust/nom/src/util.rs index e1a5a48cad..488bbfb7d1 100644 --- a/third_party/rust/nom/src/util.rs +++ b/third_party/rust/nom/src/util.rs @@ -1,18 +1,10 @@ -#[cfg(feature = "verbose-errors")] #[cfg(feature = "std")] -use internal::{Err, IResult}; -#[cfg(feature = "verbose-errors")] -use verbose_errors::Context; - +use crate::internal::IResult; #[cfg(feature = "std")] -use std::collections::HashMap; - -#[cfg(feature = "alloc")] -use lib::std::string::ToString; -#[cfg(feature = "alloc")] -use lib::std::vec::Vec; +use std::fmt::Debug; #[cfg(feature = "std")] +/// Helper trait to show a byte slice as a hex dump pub trait HexDisplay { /// Converts the value of `self` to a hex dump, returning the owned /// string. @@ -87,6 +79,25 @@ impl HexDisplay for str { } } +#[doc(hidden)] +#[macro_export] +macro_rules! nom_line ( + () => (line!()); +); + +#[doc(hidden)] +#[macro_export] +macro_rules! nom_println ( + ($($args:tt)*) => (println!($($args)*)); +); + +#[doc(hidden)] +#[macro_export] +macro_rules! nom_stringify ( + ($($args:tt)*) => (stringify!($($args)*)); +); + + /// Prints a message if the parser fails /// /// The message prints the `Error` or `Incomplete` @@ -104,15 +115,15 @@ impl HexDisplay for str { /// f(a); /// # } /// ``` -#[macro_export] +#[macro_export(local_inner_macros)] macro_rules! dbg ( ($i: expr, $submac:ident!( $($args:tt)* )) => ( { use $crate::lib::std::result::Result::*; - let l = line!(); + let l = nom_line!(); match $submac!($i, $($args)*) { Err(e) => { - println!("Err({:?}) at l.{} by ' {} '", e, l, stringify!($submac!($($args)*))); + nom_println!("Err({:?}) at l.{} by ' {} '", e, l, nom_stringify!($submac!($($args)*))); Err(e) }, a => a, @@ -125,6 +136,41 @@ macro_rules! dbg ( ); ); +/// Prints a message and the input if the parser fails +/// +/// The message prints the `Error` or `Incomplete` +/// and the parser's calling code. +/// +/// It also displays the input in hexdump format +/// +/// ```rust +/// use nom::{IResult, dbg_dmp, bytes::complete::tag}; +/// +/// fn f(i: &[u8]) -> IResult<&[u8], &[u8]> { +/// dbg_dmp(tag("abcd"), "tag")(i) +/// } +/// +/// let a = &b"efghijkl"[..]; +/// +/// // Will print the following message: +/// // Error(Position(0, [101, 102, 103, 104, 105, 106, 107, 108])) at l.5 by ' tag ! ( "abcd" ) ' +/// // 00000000 65 66 67 68 69 6a 6b 6c efghijkl +/// f(a); +/// ``` +#[cfg(feature = "std")] +pub fn dbg_dmp<'a, F, O, E: Debug>(f: F, context: &'static str) -> impl Fn(&'a [u8]) -> IResult<&'a [u8], O, E> + where F: Fn(&'a [u8]) -> IResult<&'a [u8], O, E> { + move |i: &'a [u8]| { + match f(i) { + Err(e) => { + println!("{}: Error({:?}) at:\n{}", context, e, i.to_hex(8)); + Err(e) + }, + a => a, + } + } +} + /// Prints a message and the input if the parser fails /// /// The message prints the `Error` or `Incomplete` @@ -144,15 +190,16 @@ macro_rules! dbg ( /// // 00000000 65 66 67 68 69 6a 6b 6c efghijkl /// f(a); /// # } -#[macro_export] +#[macro_export(local_inner_macros)] +#[cfg(feature = "std")] macro_rules! dbg_dmp ( ($i: expr, $submac:ident!( $($args:tt)* )) => ( { use $crate::HexDisplay; - let l = line!(); + let l = nom_line!(); match $submac!($i, $($args)*) { Err(e) => { - println!("Error({:?}) at l.{} by ' {} '\n{}", e, l, stringify!($submac!($($args)*)), $i.to_hex(8)); + nom_println!("Error({:?}) at l.{} by ' {} '\n{}", e, l, nom_stringify!($submac!($($args)*)), $i.to_hex(8)); Err(e) }, a => a, @@ -165,552 +212,3 @@ macro_rules! dbg_dmp ( ); ); -#[cfg(feature = "verbose-errors")] -pub fn error_to_list(e: &Context) -> Vec<(P, ErrorKind)> { - match e { - &Context::Code(ref i, ref err) => { - let mut v = Vec::new(); - v.push((i.clone(), err.clone())); - return v; - } - &Context::List(ref v) => { - let mut v2 = v.clone(); - v2.reverse(); - v2 - } - } -} - -#[cfg(feature = "verbose-errors")] -pub fn compare_error_paths(e1: &Context, e2: &Context) -> bool { - error_to_list(e1) == error_to_list(e2) -} - -#[cfg(feature = "std")] -#[cfg(feature = "verbose-errors")] -use lib::std::hash::Hash; - -#[cfg(feature = "std")] -#[cfg(feature = "verbose-errors")] -pub fn add_error_pattern<'a, I: Clone + Hash + Eq, O, E: Clone + Hash + Eq>( - h: &mut HashMap)>, &'a str>, - res: IResult, - message: &'a str, -) -> bool { - match res { - Err(Err::Error(e)) | Err(Err::Failure(e)) => { - h.insert(error_to_list(&e), message); - true - } - _ => false, - } -} - -pub fn slice_to_offsets(input: &[u8], s: &[u8]) -> (usize, usize) { - let start = input.as_ptr(); - let off1 = s.as_ptr() as usize - start as usize; - let off2 = off1 + s.len(); - (off1, off2) -} - -#[cfg(feature = "std")] -#[cfg(feature = "verbose-errors")] -pub fn prepare_errors(input: &[u8], res: IResult<&[u8], O, E>) -> Option, usize, usize)>> { - if let Err(Err::Error(e)) = res { - let mut v: Vec<(ErrorKind, usize, usize)> = Vec::new(); - - match e { - Context::Code(p, kind) => { - let (o1, o2) = slice_to_offsets(input, p); - v.push((kind, o1, o2)); - } - Context::List(mut l) => { - for (p, kind) in l.drain(..) { - let (o1, o2) = slice_to_offsets(input, p); - v.push((kind, o1, o2)); - } - - v.reverse() - } - } - - v.sort_by(|a, b| a.1.cmp(&b.1)); - Some(v) - } else { - None - } -} - -#[cfg(feature = "std")] -#[cfg(feature = "verbose-errors")] -pub fn print_error(input: &[u8], res: IResult<&[u8], O, E>) { - if let Some(v) = prepare_errors(input, res) { - let colors = generate_colors(&v); - println!("parser codes: {}", print_codes(&colors, &HashMap::new())); - println!("{}", print_offsets(input, 0, &v)); - } else { - println!("not an error"); - } -} - -#[cfg(feature = "std")] -#[cfg(feature = "verbose-errors")] -pub fn generate_colors(v: &[(ErrorKind, usize, usize)]) -> HashMap { - let mut h: HashMap = HashMap::new(); - let mut color = 0; - - for &(ref c, _, _) in v.iter() { - h.insert(error_to_u32(c), color + 31); - color = color + 1 % 7; - } - - h -} - -pub fn code_from_offset(v: &[(ErrorKind, usize, usize)], offset: usize) -> Option { - let mut acc: Option<(u32, usize, usize)> = None; - for &(ref ek, s, e) in v.iter() { - let c = error_to_u32(ek); - if s <= offset && offset <= e { - if let Some((_, start, end)) = acc { - if start <= s && e <= end { - acc = Some((c, s, e)); - } - } else { - acc = Some((c, s, e)); - } - } - } - if let Some((code, _, _)) = acc { - return Some(code); - } else { - return None; - } -} - -#[cfg(feature = "alloc")] -pub fn reset_color(v: &mut Vec) { - v.push(0x1B); - v.push(b'['); - v.push(0); - v.push(b'm'); -} - -#[cfg(feature = "alloc")] -pub fn write_color(v: &mut Vec, color: u8) { - v.push(0x1B); - v.push(b'['); - v.push(1); - v.push(b';'); - let s = color.to_string(); - let bytes = s.as_bytes(); - v.extend(bytes.iter().cloned()); - v.push(b'm'); -} - -#[cfg(feature = "std")] -#[cfg_attr(feature = "cargo-clippy", allow(implicit_hasher))] -pub fn print_codes(colors: &HashMap, names: &HashMap) -> String { - let mut v = Vec::new(); - for (code, &color) in colors { - if let Some(&s) = names.get(code) { - let bytes = s.as_bytes(); - write_color(&mut v, color); - v.extend(bytes.iter().cloned()); - } else { - let s = code.to_string(); - let bytes = s.as_bytes(); - write_color(&mut v, color); - v.extend(bytes.iter().cloned()); - } - reset_color(&mut v); - v.push(b' '); - } - reset_color(&mut v); - - String::from_utf8_lossy(&v[..]).into_owned() -} - -#[cfg(feature = "std")] -#[cfg(feature = "verbose-errors")] -pub fn print_offsets(input: &[u8], from: usize, offsets: &[(ErrorKind, usize, usize)]) -> String { - let mut v = Vec::with_capacity(input.len() * 3); - let mut i = from; - let chunk_size = 8; - let mut current_code: Option = None; - let mut current_code2: Option = None; - - let colors = generate_colors(&offsets); - - for chunk in input.chunks(chunk_size) { - let s = format!("{:08x}", i); - for &ch in s.as_bytes().iter() { - v.push(ch); - } - v.push(b'\t'); - - let mut k = i; - let mut l = i; - for &byte in chunk { - if let Some(code) = code_from_offset(&offsets, k) { - if let Some(current) = current_code { - if current != code { - reset_color(&mut v); - current_code = Some(code); - if let Some(&color) = colors.get(&code) { - write_color(&mut v, color); - } - } - } else { - current_code = Some(code); - if let Some(&color) = colors.get(&code) { - write_color(&mut v, color); - } - } - } - v.push(CHARS[(byte >> 4) as usize]); - v.push(CHARS[(byte & 0xf) as usize]); - v.push(b' '); - k = k + 1; - } - - reset_color(&mut v); - - if chunk_size > chunk.len() { - for _ in 0..(chunk_size - chunk.len()) { - v.push(b' '); - v.push(b' '); - v.push(b' '); - } - } - v.push(b'\t'); - - for &byte in chunk { - if let Some(code) = code_from_offset(&offsets, l) { - if let Some(current) = current_code2 { - if current != code { - reset_color(&mut v); - current_code2 = Some(code); - if let Some(&color) = colors.get(&code) { - write_color(&mut v, color); - } - } - } else { - current_code2 = Some(code); - if let Some(&color) = colors.get(&code) { - write_color(&mut v, color); - } - } - } - if (byte >= 32 && byte <= 126) || byte >= 128 { - v.push(byte); - } else { - v.push(b'.'); - } - l = l + 1; - } - reset_color(&mut v); - - v.push(b'\n'); - i = i + chunk_size; - } - - String::from_utf8_lossy(&v[..]).into_owned() -} - -/// indicates which parser returned an error -#[cfg_attr(rustfmt, rustfmt_skip)] -#[derive(Debug,PartialEq,Eq,Hash,Clone)] -#[allow(deprecated)] -pub enum ErrorKind { - Custom(E), - Tag, - MapRes, - MapOpt, - Alt, - IsNot, - IsA, - SeparatedList, - SeparatedNonEmptyList, - Many0, - Many1, - ManyTill, - Count, - TakeUntilAndConsume, - TakeUntil, - TakeUntilEitherAndConsume, - TakeUntilEither, - LengthValue, - TagClosure, - Alpha, - Digit, - HexDigit, - OctDigit, - AlphaNumeric, - Space, - MultiSpace, - LengthValueFn, - Eof, - ExprOpt, - ExprRes, - CondReduce, - Switch, - TagBits, - OneOf, - NoneOf, - Char, - CrLf, - RegexpMatch, - RegexpMatches, - RegexpFind, - RegexpCapture, - RegexpCaptures, - TakeWhile1, - Complete, - Fix, - Escaped, - EscapedTransform, - #[deprecated(since = "4.0.0", note = "Please use `Tag` instead")] - TagStr, - #[deprecated(since = "4.0.0", note = "Please use `IsNot` instead")] - IsNotStr, - #[deprecated(since = "4.0.0", note = "Please use `IsA` instead")] - IsAStr, - #[deprecated(since = "4.0.0", note = "Please use `TakeWhile1` instead")] - TakeWhile1Str, - NonEmpty, - ManyMN, - #[deprecated(since = "4.0.0", note = "Please use `TakeUntilAndConsume` instead")] - TakeUntilAndConsumeStr, - #[deprecated(since = "4.0.0", note = "Please use `TakeUntil` instead")] - TakeUntilStr, - Not, - Permutation, - Verify, - TakeTill1, - TakeUntilAndConsume1, - TakeWhileMN, - ParseTo, -} - -#[cfg_attr(rustfmt, rustfmt_skip)] -#[allow(deprecated)] -pub fn error_to_u32(e: &ErrorKind) -> u32 { - match *e { - ErrorKind::Custom(_) => 0, - ErrorKind::Tag => 1, - ErrorKind::MapRes => 2, - ErrorKind::MapOpt => 3, - ErrorKind::Alt => 4, - ErrorKind::IsNot => 5, - ErrorKind::IsA => 6, - ErrorKind::SeparatedList => 7, - ErrorKind::SeparatedNonEmptyList => 8, - ErrorKind::Many1 => 9, - ErrorKind::Count => 10, - ErrorKind::TakeUntilAndConsume => 11, - ErrorKind::TakeUntil => 12, - ErrorKind::TakeUntilEitherAndConsume => 13, - ErrorKind::TakeUntilEither => 14, - ErrorKind::LengthValue => 15, - ErrorKind::TagClosure => 16, - ErrorKind::Alpha => 17, - ErrorKind::Digit => 18, - ErrorKind::AlphaNumeric => 19, - ErrorKind::Space => 20, - ErrorKind::MultiSpace => 21, - ErrorKind::LengthValueFn => 22, - ErrorKind::Eof => 23, - ErrorKind::ExprOpt => 24, - ErrorKind::ExprRes => 25, - ErrorKind::CondReduce => 26, - ErrorKind::Switch => 27, - ErrorKind::TagBits => 28, - ErrorKind::OneOf => 29, - ErrorKind::NoneOf => 30, - ErrorKind::Char => 40, - ErrorKind::CrLf => 41, - ErrorKind::RegexpMatch => 42, - ErrorKind::RegexpMatches => 43, - ErrorKind::RegexpFind => 44, - ErrorKind::RegexpCapture => 45, - ErrorKind::RegexpCaptures => 46, - ErrorKind::TakeWhile1 => 47, - ErrorKind::Complete => 48, - ErrorKind::Fix => 49, - ErrorKind::Escaped => 50, - ErrorKind::EscapedTransform => 51, - ErrorKind::TagStr => 52, - ErrorKind::IsNotStr => 53, - ErrorKind::IsAStr => 54, - ErrorKind::TakeWhile1Str => 55, - ErrorKind::NonEmpty => 56, - ErrorKind::ManyMN => 57, - ErrorKind::TakeUntilAndConsumeStr => 58, - ErrorKind::HexDigit => 59, - ErrorKind::TakeUntilStr => 60, - ErrorKind::OctDigit => 61, - ErrorKind::Many0 => 62, - ErrorKind::Not => 63, - ErrorKind::Permutation => 64, - ErrorKind::ManyTill => 65, - ErrorKind::Verify => 66, - ErrorKind::TakeTill1 => 67, - ErrorKind::TakeUntilAndConsume1 => 68, - ErrorKind::TakeWhileMN => 69, - ErrorKind::ParseTo => 70, - } -} - -impl ErrorKind { - #[cfg_attr(rustfmt, rustfmt_skip)] - #[allow(deprecated)] - pub fn description(&self) -> &str { - match *self { - ErrorKind::Custom(_) => "Custom error", - ErrorKind::Tag => "Tag", - ErrorKind::MapRes => "Map on Result", - ErrorKind::MapOpt => "Map on Option", - ErrorKind::Alt => "Alternative", - ErrorKind::IsNot => "IsNot", - ErrorKind::IsA => "IsA", - ErrorKind::SeparatedList => "Separated list", - ErrorKind::SeparatedNonEmptyList => "Separated non empty list", - ErrorKind::Many0 => "Many0", - ErrorKind::Many1 => "Many1", - ErrorKind::Count => "Count", - ErrorKind::TakeUntilAndConsume => "Take until and consume", - ErrorKind::TakeUntil => "Take until", - ErrorKind::TakeUntilEitherAndConsume => "Take until either and consume", - ErrorKind::TakeUntilEither => "Take until either", - ErrorKind::LengthValue => "Length followed by value", - ErrorKind::TagClosure => "Tag closure", - ErrorKind::Alpha => "Alphabetic", - ErrorKind::Digit => "Digit", - ErrorKind::AlphaNumeric => "AlphaNumeric", - ErrorKind::Space => "Space", - ErrorKind::MultiSpace => "Multiple spaces", - ErrorKind::LengthValueFn => "LengthValueFn", - ErrorKind::Eof => "End of file", - ErrorKind::ExprOpt => "Evaluate Option", - ErrorKind::ExprRes => "Evaluate Result", - ErrorKind::CondReduce => "Condition reduce", - ErrorKind::Switch => "Switch", - ErrorKind::TagBits => "Tag on bitstream", - ErrorKind::OneOf => "OneOf", - ErrorKind::NoneOf => "NoneOf", - ErrorKind::Char => "Char", - ErrorKind::CrLf => "CrLf", - ErrorKind::RegexpMatch => "RegexpMatch", - ErrorKind::RegexpMatches => "RegexpMatches", - ErrorKind::RegexpFind => "RegexpFind", - ErrorKind::RegexpCapture => "RegexpCapture", - ErrorKind::RegexpCaptures => "RegexpCaptures", - ErrorKind::TakeWhile1 => "TakeWhile1", - ErrorKind::Complete => "Complete", - ErrorKind::Fix => "Fix", - ErrorKind::Escaped => "Escaped", - ErrorKind::EscapedTransform => "EscapedTransform", - ErrorKind::TagStr => "Tag on strings", - ErrorKind::IsNotStr => "IsNot on strings", - ErrorKind::IsAStr => "IsA on strings", - ErrorKind::TakeWhile1Str => "TakeWhile1 on strings", - ErrorKind::NonEmpty => "NonEmpty", - ErrorKind::ManyMN => "Many(m, n)", - ErrorKind::TakeUntilAndConsumeStr => "Take until and consume on strings", - ErrorKind::HexDigit => "Hexadecimal Digit", - ErrorKind::TakeUntilStr => "Take until on strings", - ErrorKind::OctDigit => "Octal digit", - ErrorKind::Not => "Negation", - ErrorKind::Permutation => "Permutation", - ErrorKind::ManyTill => "ManyTill", - ErrorKind::Verify => "predicate verification", - ErrorKind::TakeTill1 => "TakeTill1", - ErrorKind::TakeUntilAndConsume1 => "Take at least 1 until and consume", - ErrorKind::TakeWhileMN => "TakeWhileMN", - ErrorKind::ParseTo => "Parse string to the specified type", - } - } - - /// Convert Err into an ErrorKind. - /// - /// This allows application code to use ErrorKind and stay independent from the `verbose-errors` features activation. - pub fn into_error_kind(self) -> ErrorKind { - self - } -} - -pub trait Convert { - fn convert(T) -> Self; -} - -impl> Convert> for ErrorKind { - #[cfg_attr(rustfmt, rustfmt_skip)] - #[allow(deprecated)] - fn convert(e: ErrorKind) -> Self { - match e { - ErrorKind::Custom(c) => ErrorKind::Custom(E::from(c)), - ErrorKind::Tag => ErrorKind::Tag, - ErrorKind::MapRes => ErrorKind::MapRes, - ErrorKind::MapOpt => ErrorKind::MapOpt, - ErrorKind::Alt => ErrorKind::Alt, - ErrorKind::IsNot => ErrorKind::IsNot, - ErrorKind::IsA => ErrorKind::IsA, - ErrorKind::SeparatedList => ErrorKind::SeparatedList, - ErrorKind::SeparatedNonEmptyList => ErrorKind::SeparatedNonEmptyList, - ErrorKind::Many1 => ErrorKind::Many1, - ErrorKind::Count => ErrorKind::Count, - ErrorKind::TakeUntilAndConsume => ErrorKind::TakeUntilAndConsume, - ErrorKind::TakeUntil => ErrorKind::TakeUntil, - ErrorKind::TakeUntilEitherAndConsume => ErrorKind::TakeUntilEitherAndConsume, - ErrorKind::TakeUntilEither => ErrorKind::TakeUntilEither, - ErrorKind::LengthValue => ErrorKind::LengthValue, - ErrorKind::TagClosure => ErrorKind::TagClosure, - ErrorKind::Alpha => ErrorKind::Alpha, - ErrorKind::Digit => ErrorKind::Digit, - ErrorKind::AlphaNumeric => ErrorKind::AlphaNumeric, - ErrorKind::Space => ErrorKind::Space, - ErrorKind::MultiSpace => ErrorKind::MultiSpace, - ErrorKind::LengthValueFn => ErrorKind::LengthValueFn, - ErrorKind::Eof => ErrorKind::Eof, - ErrorKind::ExprOpt => ErrorKind::ExprOpt, - ErrorKind::ExprRes => ErrorKind::ExprRes, - ErrorKind::CondReduce => ErrorKind::CondReduce, - ErrorKind::Switch => ErrorKind::Switch, - ErrorKind::TagBits => ErrorKind::TagBits, - ErrorKind::OneOf => ErrorKind::OneOf, - ErrorKind::NoneOf => ErrorKind::NoneOf, - ErrorKind::Char => ErrorKind::Char, - ErrorKind::CrLf => ErrorKind::CrLf, - ErrorKind::RegexpMatch => ErrorKind::RegexpMatch, - ErrorKind::RegexpMatches => ErrorKind::RegexpMatches, - ErrorKind::RegexpFind => ErrorKind::RegexpFind, - ErrorKind::RegexpCapture => ErrorKind::RegexpCapture, - ErrorKind::RegexpCaptures => ErrorKind::RegexpCaptures, - ErrorKind::TakeWhile1 => ErrorKind::TakeWhile1, - ErrorKind::Complete => ErrorKind::Complete, - ErrorKind::Fix => ErrorKind::Fix, - ErrorKind::Escaped => ErrorKind::Escaped, - ErrorKind::EscapedTransform => ErrorKind::EscapedTransform, - ErrorKind::TagStr => ErrorKind::TagStr, - ErrorKind::IsNotStr => ErrorKind::IsNotStr, - ErrorKind::IsAStr => ErrorKind::IsAStr, - ErrorKind::TakeWhile1Str => ErrorKind::TakeWhile1Str, - ErrorKind::NonEmpty => ErrorKind::NonEmpty, - ErrorKind::ManyMN => ErrorKind::ManyMN, - ErrorKind::TakeUntilAndConsumeStr => ErrorKind::TakeUntilAndConsumeStr, - ErrorKind::HexDigit => ErrorKind::HexDigit, - ErrorKind::TakeUntilStr => ErrorKind::TakeUntilStr, - ErrorKind::OctDigit => ErrorKind::OctDigit, - ErrorKind::Many0 => ErrorKind::Many0, - ErrorKind::Not => ErrorKind::Not, - ErrorKind::Permutation => ErrorKind::Permutation, - ErrorKind::ManyTill => ErrorKind::ManyTill, - ErrorKind::Verify => ErrorKind::Verify, - ErrorKind::TakeTill1 => ErrorKind::TakeTill1, - ErrorKind::TakeUntilAndConsume1 => ErrorKind::TakeUntilAndConsume1, - ErrorKind::TakeWhileMN => ErrorKind::TakeWhileMN, - ErrorKind::ParseTo => ErrorKind::ParseTo, - } - } -} diff --git a/third_party/rust/nom/src/verbose_errors.rs b/third_party/rust/nom/src/verbose_errors.rs deleted file mode 100644 index 66303c9a1f..0000000000 --- a/third_party/rust/nom/src/verbose_errors.rs +++ /dev/null @@ -1,257 +0,0 @@ -//! Error management -//! -//! Depending on a compilation flag, the content of the `Context` enum -//! can change. In the default case, it will only have one variant: -//! `Context::Code(I, ErrorKind)` (with `I` and `E` configurable). -//! It contains an error code and the input position that triggered it. -//! -//! If you activate the `verbose-errors` compilation flags, it will add another -//! variant to the enum: `Context::List(Vec<(I, ErrorKind)>)`. -//! This variant aggregates positions and error codes as the code backtracks -//! through the nested parsers. -//! The verbose errors feature allows for very flexible error management: -//! you can know precisely which parser got to which part of the input. -//! The main drawback is that it is a lot slower than default error -//! management. -use util::{Convert, ErrorKind}; -use lib::std::convert::From; -#[cfg(feature = "alloc")] -use lib::std::vec::Vec; - -/// Contains the error that a parser can return -/// -/// If you use the `verbose-errors` compilation feature, -/// `nom::Err` will be the enum defined here, -/// otherwise, it will amount to a `ErrorKind`. -/// -/// It can represent a linked list of errors, indicating the path taken in the parsing tree, with corresponding position in the input data. -/// It depends on P, the input position (for a &[u8] parser, it would be a &[u8]), and E, the custom error type (by default, u32) -#[derive(Debug, PartialEq, Eq, Clone)] -pub enum Context { - /// An error code, represented by an ErrorKind, which can contain a custom error code represented by E - Code(I, ErrorKind), - List(Vec<(I, ErrorKind)>), -} - -impl> Convert> for Context { - fn convert(c: Context) -> Self { - match c { - Context::Code(i, e) => Context::Code(i, ErrorKind::convert(e)), - Context::List(mut v) => Context::List( - v.drain(..) - .map(|(i, e)| (i, ErrorKind::convert(e))) - .collect(), - ), - } - } -} - -impl Context { - /// Convert Err into ErrorKind. - /// - /// This allows application code to use ErrorKind and stay independent from the verbose-errors features activation. - pub fn into_error_kind(self) -> ErrorKind { - match self { - Context::Code(_, kind) => kind, - Context::List(mut v) => { - let (_, kind) = v.remove(0); - kind - } - } - } -} - -/* -impl IResult { - /// Maps a `IResult` to `IResult` by appling a function - /// to a contained `Error` value, leaving `Done` and `Incomplete` value - /// untouched. - #[inline] - pub fn map_err(self, f: F) -> IResult - where F: FnOnce(Err) -> Err { - match self { - Error(e) => Error(f(e)), - Incomplete(n) => Incomplete(n), - Done(i, o) => Done(i, o), - } - } - - /// Unwrap the contained `Error(I, E)` value, or panic if the `IResult` is not - /// `Error`. - pub fn unwrap_err(self) -> Err { - match self { - Error(e) => e, - Done(_, _) => panic!("unwrap_err() called on an IResult that is Done"), - Incomplete(_) => panic!("unwrap_err() called on an IResult that is Incomplete"), - } - } - - /// Convert the IResult to a std::result::Result - pub fn to_full_result(self) -> Result> { - match self { - Done(_, o) => Ok(o), - Incomplete(n) => Err(IError::Incomplete(n)), - Error(e) => Err(IError::Error(e)) - } - } - - /// Convert the IResult to a std::result::Result - pub fn to_result(self) -> Result> { - match self { - Done(_, o) => Ok(o), - Error(e) => Err(e), - Incomplete(_) => panic!("to_result() called on an IResult that is Incomplete") - } - } -} - -#[cfg(feature = "std")] -use $crate::lib::std::any::Any; -#[cfg(feature = "std")] -use $crate::lib::std::{error,fmt}; -#[cfg(feature = "std")] -use $crate::lib::std::fmt::Debug; -#[cfg(feature = "std")] -impl error::Error for Err { - fn description(&self) -> &str { - let kind = match *self { - Err::Code(ref e) | Err::Node(ref e, _) | Err::Position(ref e, _) | Err::NodePosition(ref e, _, _) => e - }; - kind.description() - } -} - -#[cfg(feature = "std")] -impl fmt::Display for Err { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - match *self { - Err::Code(ref e) | Err::Node(ref e, _) => { - write!(f, "{:?}", e) - }, - Err::Position(ref e, ref p) | Err::NodePosition(ref e, ref p, _) => { - write!(f, "{:?}:{:?}", p, e) - } - } - } -} -*/ - -/// translate parser result from IResult to IResult with a custom type -/// -/// ``` -/// # #[macro_use] extern crate nom; -/// # use nom::ErrorKind; -/// # use nom::Context; -/// # use nom::Err; -/// # fn main() { -/// #[derive(Debug,Clone,PartialEq)] -/// pub struct ErrorStr(String); -/// -/// // Convert to IResult<&[u8], &[u8], ErrorStr> -/// impl From for ErrorStr { -/// fn from(i: u32) -> Self { -/// ErrorStr(format!("custom error code: {}", i)) -/// } -/// } -/// -/// // will add a Custom(42) error to the error chain -/// named!(err_test, add_return_error!(ErrorKind::Custom(42), tag!("abcd"))); -/// -/// // Convert to IResult<&[u8], &[u8], ErrorStr> -/// named!(parser<&[u8], &[u8], ErrorStr>, fix_error!(ErrorStr, err_test)); -/// -/// let a = &b"efghblah"[..]; -/// //assert_eq!(parser(a), Err(Err::Error(Context::Code(a, ErrorKind::Custom(ErrorStr("custom error code: 42".to_string())))))); -/// let list = vec!((a, ErrorKind::Tag), (a, ErrorKind::Custom(ErrorStr("custom error code: 42".to_string())))); -/// assert_eq!( -/// parser(a), -/// Err(Err::Error(Context::List(list))) -/// ); -/// # } -/// ``` -#[macro_export] -macro_rules! fix_error ( - ($i:expr, $t:ty, $submac:ident!( $($args:tt)* )) => ( - { - use $crate::lib::std::result::Result::*; - use $crate::{Err,Convert,ErrorKind,Context}; - - match $submac!($i, $($args)*) { - Err(e) => { - let e2 = match e { - Err::Error(err) => { - let err2 = match err { - Context::Code(i, code) => { - let code2: ErrorKind<$t> = ErrorKind::convert(code); - Context::Code(i, code2) - }, - Context::List(mut v) => { - Context::List(v.drain(..).map(|(i, code)| { - let code2: ErrorKind<$t> = ErrorKind::convert(code); - (i, code2) - }).collect()) - } - }; - Err::Error(err2) - }, - Err::Failure(err) => { - let err2 = match err { - Context::Code(i, code) => { - let code2: ErrorKind<$t> = ErrorKind::convert(code); - Context::Code(i, code2) - }, - Context::List(mut v) => { - Context::List(v.drain(..).map(|(i, code)| { - let code2: ErrorKind<$t> = ErrorKind::convert(code); - (i, code2) - }).collect()) - } - }; - Err::Failure(err2) - }, - Err::Incomplete(i) => Err::Incomplete(i), - }; - Err(e2) - }, - Ok((i, o)) => Ok((i, o)), - } - } - ); - ($i:expr, $t:ty, $f:expr) => ( - fix_error!($i, $t, call!($f)); - ); -); - -/// `flat_map!(R -> IResult, S -> IResult) => R -> IResult` -/// -/// combines a parser R -> IResult and -/// a parser S -> IResult to return another -/// parser R -> IResult -#[macro_export] -macro_rules! flat_map( - ($i:expr, $submac:ident!( $($args:tt)* ), $submac2:ident!( $($args2:tt)* )) => ( - flat_map!(__impl $i, $submac!($($args)*), $submac2!($($args2)*)); - ); - ($i:expr, $submac:ident!( $($args:tt)* ), $g:expr) => ( - flat_map!(__impl $i, $submac!($($args)*), call!($g)); - ); - ($i:expr, $f:expr, $submac:ident!( $($args:tt)* )) => ( - flat_map!(__impl $i, call!($f), $submac!($($args)*)); - ); - ($i:expr, $f:expr, $g:expr) => ( - flat_map!(__impl $i, call!($f), call!($g)); - ); - (__impl $i:expr, $submac:ident!( $($args:tt)* ), $submac2:ident!( $($args2:tt)* )) => ( - { - use $crate::lib::std::result::Result::*; - use $crate::{Err,Convert}; - - ($submac!($i, $($args)*)).and_then(|(i,o)| { - match $submac2!(o, $($args2)*) { - Err(e) => Err(Err::convert(e)), - Ok((_, o2)) => Ok((i, o2)) - } - }) - } - ); -); diff --git a/third_party/rust/nom/src/whitespace.rs b/third_party/rust/nom/src/whitespace.rs index 1b2e9f7347..9ed3f55aad 100644 --- a/third_party/rust/nom/src/whitespace.rs +++ b/third_party/rust/nom/src/whitespace.rs @@ -44,7 +44,6 @@ //! macro_rules! sp ( //! ($i:expr, $($args:tt)*) => ( //! { -//! use nom::Convert; //! use nom::Err; //! //! match sep!($i, space, $($args)*) { @@ -94,11 +93,12 @@ //! ``` //! -#[macro_export] +/// applies the separator parser before the other parser +#[macro_export(local_inner_macros)] macro_rules! wrap_sep ( ($i:expr, $separator:expr, $submac:ident!( $($args:tt)* )) => ({ use $crate::lib::std::result::Result::*; - use $crate::{Err,Convert,IResult}; + use $crate::{Err,IResult}; fn unify_types(_: &IResult, _: &IResult) {} @@ -118,7 +118,7 @@ macro_rules! wrap_sep ( ); #[doc(hidden)] -#[macro_export] +#[macro_export(local_inner_macros)] macro_rules! pair_sep ( ($i:expr, $separator:path, $submac:ident!( $($args:tt)* ), $submac2:ident!( $($args2:tt)* )) => ( tuple!( @@ -139,12 +139,12 @@ macro_rules! pair_sep ( ); #[doc(hidden)] -#[macro_export] +#[macro_export(local_inner_macros)] macro_rules! delimited_sep ( ($i:expr, $separator:path, $submac1:ident!( $($args1:tt)* ), $($rest:tt)+) => ({ use $crate::lib::std::result::Result::*; - match tuple_sep!($i, $separator, (), $submac1!($($args1)*), $($rest)*) { + match tuple_sep!($i, $separator, (), $submac1!($($args1)*), $($rest)+) { Err(e) => Err(e), Ok((remaining, (_,o,_))) => { Ok((remaining, o)) @@ -152,17 +152,17 @@ macro_rules! delimited_sep ( } }); ($i:expr, $separator:path, $f:expr, $($rest:tt)+) => ( - delimited_sep!($i, $separator, call!($f), $($rest)*); + delimited_sep!($i, $separator, call!($f), $($rest)+); ); ); #[doc(hidden)] -#[macro_export] +#[macro_export(local_inner_macros)] macro_rules! separated_pair_sep ( ($i:expr, $separator:path, $submac1:ident!( $($args1:tt)* ), $($rest:tt)+) => ({ use $crate::lib::std::result::Result::*; - match tuple_sep!($i, $separator, (), $submac1!($($args1)*), $($rest)*) { + match tuple_sep!($i, $separator, (), $submac1!($($args1)*), $($rest)+) { Err(e) => Err(e), Ok((remaining, (o1,_,o2))) => { Ok((remaining, (o1,o2))) @@ -170,12 +170,12 @@ macro_rules! separated_pair_sep ( } }); ($i:expr, $separator:path, $f:expr, $($rest:tt)+) => ( - separated_pair_sep!($i, $separator, call!($f), $($rest)*); + separated_pair_sep!($i, $separator, call!($f), $($rest)+); ); ); #[doc(hidden)] -#[macro_export] +#[macro_export(local_inner_macros)] macro_rules! preceded_sep ( ($i:expr, $separator:path, $submac:ident!( $($args:tt)* ), $submac2:ident!( $($args2:tt)* )) => ({ use $crate::lib::std::result::Result::*; @@ -199,7 +199,7 @@ macro_rules! preceded_sep ( ); #[doc(hidden)] -#[macro_export] +#[macro_export(local_inner_macros)] macro_rules! terminated_sep ( ($i:expr, $separator:path, $submac:ident!( $($args:tt)* ), $submac2:ident!( $($args2:tt)* )) => ({ use $crate::lib::std::result::Result::*; @@ -224,7 +224,7 @@ macro_rules! terminated_sep ( /// Internal parser, do not use directly #[doc(hidden)] -#[macro_export] +#[macro_export(local_inner_macros)] macro_rules! tuple_sep ( ($i:expr, $separator:path, ($($parsed:tt),*), $e:path, $($rest:tt)*) => ( tuple_sep!($i, $separator, ($($parsed),*), call!($e), $($rest)*); @@ -288,7 +288,7 @@ macro_rules! tuple_sep ( ); #[doc(hidden)] -#[macro_export] +#[macro_export(local_inner_macros)] macro_rules! do_parse_sep ( (__impl $i:expr, $separator:path, ( $($rest:expr),* )) => ( $crate::lib::std::result::Result::Ok(($i, ( $($rest),* ))) @@ -368,13 +368,13 @@ macro_rules! do_parse_sep ( ); #[doc(hidden)] -#[macro_export] +#[macro_export(local_inner_macros)] macro_rules! permutation_sep ( ($i:expr, $separator:path, $($rest:tt)*) => ( { use $crate::lib::std::result::Result::*; use $crate::lib::std::option::Option::*; - use $crate::{Err,ErrorKind,Convert}; + use $crate::{Err,error::ErrorKind}; let mut res = permutation_init!((), $($rest)*); let mut input = $i; @@ -411,7 +411,7 @@ macro_rules! permutation_sep ( ); #[doc(hidden)] -#[macro_export] +#[macro_export(local_inner_macros)] macro_rules! permutation_iterator_sep ( ($it:tt,$i:expr, $separator:path, $all_done:expr, $needed:expr, $res:expr, $e:ident?, $($rest:tt)*) => ( permutation_iterator_sep!($it, $i, $separator, $all_done, $needed, $res, call!($e), $($rest)*); @@ -427,11 +427,11 @@ macro_rules! permutation_iterator_sep ( use $crate::lib::std::result::Result::*; use $crate::Err; - if acc!($it, $res) == $crate::lib::std::option::Option::None { + if $res.$it == $crate::lib::std::option::Option::None { match {sep!($i, $separator, $submac!($($args)*))} { Ok((i,o)) => { $i = i; - acc!($it, $res) = $crate::lib::std::option::Option::Some(o); + $res.$it = $crate::lib::std::option::Option::Some(o); continue; }, Err(Err::Error(_)) => { @@ -460,11 +460,11 @@ macro_rules! permutation_iterator_sep ( use $crate::lib::std::result::Result::*; use $crate::Err; - if acc!($it, $res) == $crate::lib::std::option::Option::None { + if $res.$it == $crate::lib::std::option::Option::None { match sep!($i, $separator, $submac!($($args)*)) { Ok((i,o)) => { $i = i; - acc!($it, $res) = $crate::lib::std::option::Option::Some(o); + $res.$it = $crate::lib::std::option::Option::Some(o); continue; }, Err(Err::Error(_)) => { @@ -480,7 +480,7 @@ macro_rules! permutation_iterator_sep ( ); #[doc(hidden)] -#[macro_export] +#[macro_export(local_inner_macros)] macro_rules! alt_sep ( (__impl $i:expr, $separator:path, $e:path | $($rest:tt)*) => ( alt_sep!(__impl $i, $separator, call!($e) | $($rest)*); @@ -508,7 +508,7 @@ macro_rules! alt_sep ( match sep!($i, $separator, $subrule!( $($args)* )) { Ok((i,o)) => Ok((i,$gen(o))), Err(Err::Error(_)) => { - alt_sep!(__impl $i, $separator, $($rest)*) + alt_sep!(__impl $i, $separator, $($rest)+) }, Err(e) => Err(e), } @@ -532,7 +532,7 @@ macro_rules! alt_sep ( Ok((i,o)) => Ok((i,$gen(o))), Err(Err::Error(e)) => { fn unify_types(_: &T, _: &T) {} - let e2 = error_position!($i, $crate::ErrorKind::Alt); + let e2 = error_position!($i, $crate::error::ErrorKind::Alt); unify_types(&e, &e2); Err(Err::Error(e2)) }, @@ -554,7 +554,7 @@ macro_rules! alt_sep ( Ok((i,o)) => Ok((i,o)), Err(Err::Error(e)) => { fn unify_types(_: &T, _: &T) {} - let e2 = error_position!($i, $crate::ErrorKind::Alt); + let e2 = error_position!($i, $crate::error::ErrorKind::Alt); unify_types(&e, &e2); Err(Err::Error(e2)) }, @@ -567,14 +567,14 @@ macro_rules! alt_sep ( use $crate::lib::std::result::Result::*; use $crate::{Err,Needed,IResult}; - Err(Err::Error(error_position!($i, $crate::ErrorKind::Alt))) + Err(Err::Error(error_position!($i, $crate::error::ErrorKind::Alt))) }); (__impl $i:expr, $separator:path) => ({ use $crate::lib::std::result::Result::*; use $crate::{Err,Needed,IResult}; - Err(Err::Error(error_position!($i, $crate::ErrorKind::Alt))) + Err(Err::Error(error_position!($i, $crate::error::ErrorKind::Alt))) }); ($i:expr, $separator:path, $($rest:tt)*) => ( @@ -585,61 +585,7 @@ macro_rules! alt_sep ( ); #[doc(hidden)] -#[macro_export] -macro_rules! alt_complete_sep ( - ($i:expr, $separator:path, $e:path | $($rest:tt)*) => ( - alt_complete_sep!($i, $separator, complete!(call!($e)) | $($rest)*); - ); - - ($i:expr, $separator:path, $subrule:ident!( $($args:tt)*) | $($rest:tt)*) => ( - { - use $crate::lib::std::result::Result::*; - - let res = complete!($i, sep!($separator, $subrule!($($args)*))); - match res { - Ok((_,_)) => res, - _ => alt_complete_sep!($i, $separator, $($rest)*), - } - } - ); - - ($i:expr, $separator:path, $subrule:ident!( $($args:tt)* ) => { $gen:expr } | $($rest:tt)+) => ( - { - use $crate::lib::std::result::Result::*; - use $crate::{Err,Needed,IResult}; - - match complete!($i, sep!($separator, $subrule!($($args)*))) { - Ok((i,o)) => Ok((i,$gen(o))), - _ => alt_complete_sep!($i, $separator, $($rest)*), - } - } - ); - - ($i:expr, $separator:path, $e:path => { $gen:expr } | $($rest:tt)*) => ( - alt_complete_sep!($i, $separator, complete!(call!($e)) => { $gen } | $($rest)*); - ); - - // Tail (non-recursive) rules - - ($i:expr, $separator:path, $e:path => { $gen:expr }) => ( - alt_complete_sep!($i, $separator, call!($e) => { $gen }); - ); - - ($i:expr, $separator:path, $subrule:ident!( $($args:tt)* ) => { $gen:expr }) => ( - alt_sep!(__impl $i, $separator, complete!($subrule!($($args)*)) => { $gen }) - ); - - ($i:expr, $separator:path, $e:path) => ( - alt_complete_sep!($i, $separator, call!($e)); - ); - - ($i:expr, $separator:path, $subrule:ident!( $($args:tt)*)) => ( - alt_sep!(__impl $i, $separator, complete!($subrule!($($args)*))) - ); -); - -#[doc(hidden)] -#[macro_export] +#[macro_export(local_inner_macros)] macro_rules! switch_sep ( (__impl $i:expr, $separator:path, $submac:ident!( $($args:tt)* ), $($p:pat => $subrule:ident!( $($args2:tt)* ))|* ) => ( { @@ -648,22 +594,22 @@ macro_rules! switch_sep ( match sep!($i, $separator, $submac!($($args)*)) { Err(Err::Error(e)) => Err(Err::Error(error_node_position!( - $i, $crate::ErrorKind::Switch, e + $i, $crate::error::ErrorKind::Switch, e ))), Err(Err::Failure(e)) => Err(Err::Failure( - error_node_position!($i, $crate::ErrorKind::Switch, e))), + error_node_position!($i, $crate::error::ErrorKind::Switch, e))), Err(e) => Err(e), Ok((i, o)) => { match o { $($p => match sep!(i, $separator, $subrule!($($args2)*)) { Err(Err::Error(e)) => Err(Err::Error(error_node_position!( - $i, $crate::ErrorKind::Switch, e + $i, $crate::error::ErrorKind::Switch, e ))), Err(Err::Failure(e)) => Err(Err::Failure( - error_node_position!($i, $crate::ErrorKind::Switch, e))), + error_node_position!($i, $crate::error::ErrorKind::Switch, e))), a => a, }),*, - _ => Err(Err::Error(error_position!($i, $crate::ErrorKind::Switch))) + _ => Err(Err::Error(error_position!($i, $crate::error::ErrorKind::Switch))) } } } @@ -683,7 +629,7 @@ macro_rules! switch_sep ( #[doc(hidden)] #[cfg(feature = "alloc")] -#[macro_export] +#[macro_export(local_inner_macros)] macro_rules! separated_list_sep ( ($i:expr, $separator:path, $submac:ident!( $($args:tt)* ), $submac2:ident!( $($args2:tt)* )) => ( separated_list!( @@ -710,7 +656,7 @@ macro_rules! separated_list_sep ( /// named!(pub space, eat_separator!(&b" \t"[..])); /// # fn main() {} /// ``` -#[macro_export] +#[macro_export(local_inner_macros)] macro_rules! eat_separator ( ($i:expr, $arr:expr) => ( { @@ -727,7 +673,7 @@ macro_rules! eat_separator ( /// and will intersperse the space parser everywhere /// /// ```ignore -/// #[macro_export] +/// #[macro_export(local_inner_macros)] /// macro_rules! ws ( /// ($i:expr, $($args:tt)*) => ( /// { @@ -737,7 +683,7 @@ macro_rules! eat_separator ( /// ) /// ); /// ``` -#[macro_export] +#[macro_export(local_inner_macros)] macro_rules! sep ( ($i:expr, $separator:path, tuple ! ($($rest:tt)*) ) => { tuple_sep!($i, $separator, (), $($rest)*) @@ -790,12 +736,6 @@ macro_rules! sep ( alt_sep!($separator, $($rest)*) ) }; - ($i:expr, $separator:path, alt_complete ! ($($rest:tt)*) ) => { - wrap_sep!($i, - $separator, - alt_complete_sep!($separator, $($rest)*) - ) - }; ($i:expr, $separator:path, switch ! ($($rest:tt)*) ) => { wrap_sep!($i, $separator, @@ -828,23 +768,6 @@ macro_rules! sep ( }; ); -use internal::IResult; -use traits::{AsChar, FindToken, InputTakeAtPosition}; -#[allow(unused_imports)] -pub fn sp<'a, T>(input: T) -> IResult -where - T: InputTakeAtPosition, - ::Item: AsChar + Clone, - &'a str: FindToken<::Item>, -{ - input.split_at_position(|item| { - let c = item.clone().as_char(); - !(c == ' ' || c == '\t' || c == '\r' || c == '\n') - }) - //this could be written as followed, but not using FindToken is faster - //eat_separator!(input, " \t\r\n") -} - /// `ws!(I -> IResult) => I -> IResult` /// /// transforms a parser to automatically consume @@ -871,19 +794,19 @@ where /// # } /// ``` /// -#[macro_export] +#[macro_export(local_inner_macros)] +#[deprecated(since = "5.0.0", note = "whitespace parsing only works with macros and will not be updated anymore")] macro_rules! ws ( ($i:expr, $($args:tt)*) => ( { - use $crate::sp; - use $crate::Convert; use $crate::Err; use $crate::lib::std::result::Result::*; + use $crate::character::complete::multispace0; - match sep!($i, sp, $($args)*) { + match sep!($i, multispace0, $($args)*) { Err(e) => Err(e), Ok((i1,o)) => { - match (sp)(i1) { + match (multispace0)(i1) { Err(e) => Err(Err::convert(e)), Ok((i2,_)) => Ok((i2, o)) } @@ -897,15 +820,20 @@ macro_rules! ws ( #[allow(dead_code)] mod tests { #[cfg(feature = "alloc")] - use lib::std::string::{String, ToString}; - use internal::{Err, IResult, Needed}; - use super::sp; - use util::ErrorKind; - use types::CompleteStr; + use crate::{ + error::ParseError, + lib::std::{ + string::{String, ToString}, + fmt::Debug + } + }; + use crate::internal::{Err, IResult, Needed}; + use crate::character::complete::multispace0 as sp; + use crate::error::ErrorKind; #[test] fn spaaaaace() { - assert_eq!(sp(&b" \t abc "[..]), Ok((&b"abc "[..], &b" \t "[..]))); + assert_eq!(sp::<_,(_,ErrorKind)>(&b" \t abc "[..]), Ok((&b"abc "[..], &b" \t "[..]))); } #[test] @@ -1052,16 +980,27 @@ mod tests { pub struct ErrorStr(String); #[cfg(feature = "alloc")] - impl From for ErrorStr { - fn from(i: u32) -> Self { - ErrorStr(format!("custom error code: {}", i)) + impl<'a> From<(&'a[u8], ErrorKind)> for ErrorStr { + fn from(i: (&'a[u8], ErrorKind)) -> Self { + ErrorStr(format!("custom error code: {:?}", i)) } } #[cfg(feature = "alloc")] - impl<'a> From<&'a str> for ErrorStr { - fn from(i: &'a str) -> Self { - ErrorStr(format!("custom error message: {}", i)) + impl<'a> From<(&'a str, ErrorKind)> for ErrorStr { + fn from(i: (&'a str, ErrorKind)) -> Self { + ErrorStr(format!("custom error message: {:?}", i)) + } + } + + #[cfg(feature = "alloc")] + impl ParseError for ErrorStr { + fn from_error_kind(input: I, kind: ErrorKind) -> Self { + ErrorStr(format!("custom error message: ({:?}, {:?})", input, kind)) + } + + fn append(input: I, kind: ErrorKind, other: Self) -> Self { + ErrorStr(format!("custom error message: ({:?}, {:?}) - {:?}", input, kind, other)) } } @@ -1074,11 +1013,7 @@ mod tests { #[allow(unused_variables)] fn dont_work(input: &[u8]) -> IResult<&[u8], &[u8], ErrorStr> { - use Context; - Err(Err::Error(Context::Code( - &b""[..], - ErrorKind::Custom(ErrorStr("abcd".to_string())), - ))) + Err(Err::Error(ErrorStr("abcd".to_string()))) } fn work2(input: &[u8]) -> IResult<&[u8], &[u8], ErrorStr> { @@ -1098,66 +1033,11 @@ mod tests { let a = &b"\tabcd"[..]; assert_eq!( alt1(a), - Err(Err::Error(error_position!(a, ErrorKind::Alt::))) + Err(Err::Error(error_position!(a, ErrorKind::Alt))) ); assert_eq!(alt2(a), Ok((&b""[..], a))); assert_eq!(alt3(a), Ok((a, &b""[..]))); - named!(alt4, ws!(alt!(tag!("abcd") | tag!("efgh")))); - assert_eq!( - alt4(CompleteStr("\tabcd")), - Ok((CompleteStr(""), CompleteStr(r"abcd"))) - ); - assert_eq!( - alt4(CompleteStr(" efgh ")), - Ok((CompleteStr(""), CompleteStr("efgh"))) - ); - - // test the alternative syntax - named!(alt5, ws!(alt!(tag!("abcd") => { |_| false } | tag!("efgh") => { |_| true }))); - assert_eq!(alt5(CompleteStr("\tabcd")), Ok((CompleteStr(""), false))); - assert_eq!(alt5(CompleteStr(" efgh ")), Ok((CompleteStr(""), true))); - } - - /*FIXME: alt_complete works, but ws will return Incomplete on end of input - #[test] - fn alt_complete() { - named!(ac<&[u8], &[u8]>, - ws!(alt_complete!(tag!("abcd") | tag!("ef") | tag!("ghi") | tag!("kl"))) - ); - - let a = &b""[..]; - assert_eq!(ac(a), Err(Err::Error(error_position!(a, ErrorKind::Alt)))); - let a = &b" \tef "[..]; - assert_eq!(ac(a),Ok((&b""[..], &b"ef"[..]))); - let a = &b" cde"[..]; - assert_eq!(ac(a), Err(Err::Error(error_position!(&a[1..], ErrorKind::Alt)))); - } - */ - - #[allow(unused_variables)] - #[test] - fn switch() { - named!(sw, - ws!(switch!(take!(4), - CompleteStr("abcd") => take!(2) | - CompleteStr("efgh") => take!(4) - )) - ); - - let a = CompleteStr(" abcd ef gh"); - assert_eq!(sw(a), Ok((CompleteStr("gh"), CompleteStr("ef")))); - - let b = CompleteStr("\tefgh ijkl "); - assert_eq!(sw(b), Ok((CompleteStr(""), CompleteStr("ijkl")))); - let c = CompleteStr("afghijkl"); - assert_eq!( - sw(c), - Err(Err::Error(error_position!( - CompleteStr("afghijkl"), - ErrorKind::Switch - ))) - ); } named!(str_parse(&str) -> &str, ws!(tag!("test"))); diff --git a/third_party/rust/nom/tests/arithmetic.rs b/third_party/rust/nom/tests/arithmetic.rs index 02c90941af..b3c38931df 100644 --- a/third_party/rust/nom/tests/arithmetic.rs +++ b/third_party/rust/nom/tests/arithmetic.rs @@ -1,95 +1,109 @@ -#[macro_use] extern crate nom; -use nom::digit; -use nom::types::CompleteStr; + +use nom::{ + IResult, + branch::alt, + combinator::map_res, + character::complete::char, + bytes::complete::tag, + character::complete::{digit1 as digit, space0 as space}, + multi::fold_many0, + sequence::{delimited, pair} +}; // Parser definition use std::str::FromStr; // We parse any expr surrounded by parens, ignoring all whitespaces around those -named!(parens, ws!(delimited!( tag!("("), expr, tag!(")") )) ); +fn parens(i: &str) -> IResult<&str, i64> { + delimited( + space, + delimited( + tag("("), + expr, + tag(")") + ), + space + )(i) +} // We transform an integer string into a i64, ignoring surrounding whitespaces // We look for a digit suite, and try to convert it. // If either str::from_utf8 or FromStr::from_str fail, // we fallback to the parens parser defined above -named!(factor, alt!( - map_res!( - ws!(digit), - |s:CompleteStr| { FromStr::from_str(s.0) } - ) - | parens - ) -); +fn factor(i: &str) -> IResult<&str, i64> { + alt(( + map_res(delimited(space, digit, space), FromStr::from_str), + parens + ))(i) +} // We read an initial factor and for each time we find // a * or / operator followed by another factor, we do // the math by folding everything -named!(term , do_parse!( - init: factor >> - res: fold_many0!( - pair!(alt!(char!('*') | char!('/')), factor), - init, - |acc, (op, val): (char, i64)| { - if op == '*' { acc * val } else { acc / val } - } - ) >> - (res) - ) -); +fn term(i: &str) -> IResult<&str, i64> { + let (i, init) = factor(i)?; -named!(expr , do_parse!( - init: term >> - res: fold_many0!( - pair!(alt!(char!('+') | char!('-')), term), - init, - |acc, (op, val): (char, i64)| { - if op == '+' { acc + val } else { acc - val } - } - ) >> - (res) - ) -); + fold_many0( + pair(alt((char('*'), char('/'))), factor), + init, + |acc, (op, val): (char, i64)| { + if op == '*' { acc * val } else { acc / val } + } + )(i) +} + +fn expr(i: &str) -> IResult<&str, i64> { + let (i, init) = term(i)?; + + fold_many0( + pair(alt((char('+'), char('-'))), term), + init, + |acc, (op, val): (char, i64)| { + if op == '+' { acc + val } else { acc - val } + } + )(i) +} #[test] fn factor_test() { - assert_eq!(factor(CompleteStr("3")), Ok((CompleteStr(""), 3))); - assert_eq!(factor(CompleteStr(" 12")), Ok((CompleteStr(""), 12))); - assert_eq!(factor(CompleteStr("537 ")), Ok((CompleteStr(""), 537))); - assert_eq!(factor(CompleteStr(" 24 ")), Ok((CompleteStr(""), 24))); + assert_eq!(factor("3"), Ok(("", 3))); + assert_eq!(factor(" 12"), Ok(("", 12))); + assert_eq!(factor("537 "), Ok(("", 537))); + assert_eq!(factor(" 24 "), Ok(("", 24))); } #[test] fn term_test() { - assert_eq!(term(CompleteStr(" 12 *2 / 3")), Ok((CompleteStr(""), 8))); + assert_eq!(term(" 12 *2 / 3"), Ok(("", 8))); assert_eq!( - term(CompleteStr(" 2* 3 *2 *2 / 3")), - Ok((CompleteStr(""), 8)) + term(" 2* 3 *2 *2 / 3"), + Ok(("", 8)) ); - assert_eq!(term(CompleteStr(" 48 / 3/2")), Ok((CompleteStr(""), 8))); + assert_eq!(term(" 48 / 3/2"), Ok(("", 8))); } #[test] fn expr_test() { - assert_eq!(expr(CompleteStr(" 1 + 2 ")), Ok((CompleteStr(""), 3))); + assert_eq!(expr(" 1 + 2 "), Ok(("", 3))); assert_eq!( - expr(CompleteStr(" 12 + 6 - 4+ 3")), - Ok((CompleteStr(""), 17)) + expr(" 12 + 6 - 4+ 3"), + Ok(("", 17)) ); - assert_eq!(expr(CompleteStr(" 1 + 2*3 + 4")), Ok((CompleteStr(""), 11))); + assert_eq!(expr(" 1 + 2*3 + 4"), Ok(("", 11))); } #[test] fn parens_test() { - assert_eq!(expr(CompleteStr(" ( 2 )")), Ok((CompleteStr(""), 2))); + assert_eq!(expr(" ( 2 )"), Ok(("", 2))); assert_eq!( - expr(CompleteStr(" 2* ( 3 + 4 ) ")), - Ok((CompleteStr(""), 14)) + expr(" 2* ( 3 + 4 ) "), + Ok(("", 14)) ); assert_eq!( - expr(CompleteStr(" 2*2 / ( 5 - 1) + 3")), - Ok((CompleteStr(""), 4)) + expr(" 2*2 / ( 5 - 1) + 3"), + Ok(("", 4)) ); } diff --git a/third_party/rust/nom/tests/arithmetic_ast.rs b/third_party/rust/nom/tests/arithmetic_ast.rs index 9502f2fcb0..ecb8d40641 100644 --- a/third_party/rust/nom/tests/arithmetic_ast.rs +++ b/third_party/rust/nom/tests/arithmetic_ast.rs @@ -1,4 +1,3 @@ -#[macro_use] extern crate nom; use std::fmt; @@ -6,8 +5,15 @@ use std::fmt::{Debug, Display, Formatter}; use std::str::FromStr; -use nom::{digit, multispace}; -use nom::types::CompleteStr; +use nom::{ + IResult, + character::complete::{digit1 as digit, multispace0 as multispace}, + sequence::{preceded, delimited}, + combinator::{map, map_res}, + multi::many0, + branch::alt, + bytes::complete::tag, +}; pub enum Expr { Value(i64), @@ -54,24 +60,30 @@ impl Debug for Expr { } } -named!(parens< CompleteStr, Expr >, delimited!( - delimited!(opt!(multispace), tag!("("), opt!(multispace)), - map!(map!(expr, Box::new), Expr::Paren), - delimited!(opt!(multispace), tag!(")"), opt!(multispace)) - ) -); - -named!(factor< CompleteStr, Expr >, alt_complete!( - map!( - map_res!( - delimited!(opt!(multispace), digit, opt!(multispace)), - |s: CompleteStr| { FromStr::from_str(s.0) } +fn parens(i: &str) -> IResult<&str, Expr> { + delimited( + multispace, + delimited( + tag("("), + map(expr, |e| Expr::Paren(Box::new(e))), + tag(")") + ), + multispace + )(i) +} + +fn factor(i: &str) -> IResult<&str, Expr> { + alt(( + map( + map_res( + delimited(multispace, digit, multispace), + FromStr::from_str ), Expr::Value - ) - | parens - ) -); + ), + parens + ))(i) +} fn fold_exprs(initial: Expr, remainder: Vec<(Oper, Expr)>) -> Expr { remainder.into_iter().fold(initial, |acc, pair| { @@ -85,64 +97,76 @@ fn fold_exprs(initial: Expr, remainder: Vec<(Oper, Expr)>) -> Expr { }) } -named!(term< CompleteStr, Expr >, do_parse!( - initial: factor >> - remainder: many0!( - alt!( - do_parse!(tag!("*") >> mul: factor >> (Oper::Mul, mul)) | - do_parse!(tag!("/") >> div: factor >> (Oper::Div, div)) - ) - ) >> - (fold_exprs(initial, remainder)) -)); - -named!(expr< CompleteStr, Expr >, do_parse!( - initial: term >> - remainder: many0!( - alt!( - do_parse!(tag!("+") >> add: term >> (Oper::Add, add)) | - do_parse!(tag!("-") >> sub: term >> (Oper::Sub, sub)) - ) - ) >> - (fold_exprs(initial, remainder)) -)); +fn term(i: &str) -> IResult<&str, Expr> { + let (i, initial) = factor(i)?; + let (i, remainder) = many0(alt(( + |i| { + let(i, mul) = preceded(tag("*"), factor)(i)?; + Ok((i,(Oper::Mul, mul))) + }, + |i| { + let(i, div) = preceded(tag("/"), factor)(i)?; + Ok((i, (Oper::Div, div))) + }, + + )))(i)?; + + Ok((i, fold_exprs(initial, remainder))) +} + +fn expr(i: &str) -> IResult<&str, Expr> { + let (i, initial) = term(i)?; + let (i, remainder) = many0(alt(( + |i| { + let(i, add) = preceded(tag("+"), term)(i)?; + Ok((i,(Oper::Add, add))) + }, + |i| { + let(i, sub) = preceded(tag("-"), term)(i)?; + Ok((i, (Oper::Sub, sub))) + }, + + )))(i)?; + + Ok((i, fold_exprs(initial, remainder))) +} #[test] fn factor_test() { assert_eq!( - factor(CompleteStr(" 3 ")).map(|(i, x)| (i, format!("{:?}", x))), - Ok((CompleteStr(""), String::from("3"))) + factor(" 3 ").map(|(i, x)| (i, format!("{:?}", x))), + Ok(("", String::from("3"))) ); } #[test] fn term_test() { assert_eq!( - term(CompleteStr(" 3 * 5 ")).map(|(i, x)| (i, format!("{:?}", x))), - Ok((CompleteStr(""), String::from("(3 * 5)"))) + term(" 3 * 5 ").map(|(i, x)| (i, format!("{:?}", x))), + Ok(("", String::from("(3 * 5)"))) ); } #[test] fn expr_test() { assert_eq!( - expr(CompleteStr(" 1 + 2 * 3 ")).map(|(i, x)| (i, format!("{:?}", x))), - Ok((CompleteStr(""), String::from("(1 + (2 * 3))"))) + expr(" 1 + 2 * 3 ").map(|(i, x)| (i, format!("{:?}", x))), + Ok(("", String::from("(1 + (2 * 3))"))) ); assert_eq!( - expr(CompleteStr(" 1 + 2 * 3 / 4 - 5 ")).map(|(i, x)| (i, format!("{:?}", x))), - Ok((CompleteStr(""), String::from("((1 + ((2 * 3) / 4)) - 5)"))) + expr(" 1 + 2 * 3 / 4 - 5 ").map(|(i, x)| (i, format!("{:?}", x))), + Ok(("", String::from("((1 + ((2 * 3) / 4)) - 5)"))) ); assert_eq!( - expr(CompleteStr(" 72 / 2 / 3 ")).map(|(i, x)| (i, format!("{:?}", x))), - Ok((CompleteStr(""), String::from("((72 / 2) / 3)"))) + expr(" 72 / 2 / 3 ").map(|(i, x)| (i, format!("{:?}", x))), + Ok(("", String::from("((72 / 2) / 3)"))) ); } #[test] fn parens_test() { assert_eq!( - expr(CompleteStr(" ( 1 + 2 ) * 3 ")).map(|(i, x)| (i, format!("{:?}", x))), - Ok((CompleteStr(""), String::from("([(1 + 2)] * 3)"))) + expr(" ( 1 + 2 ) * 3 ").map(|(i, x)| (i, format!("{:?}", x))), + Ok(("", String::from("([(1 + 2)] * 3)"))) ); } diff --git a/third_party/rust/nom/tests/complete_arithmetic.rs b/third_party/rust/nom/tests/complete_arithmetic.rs deleted file mode 100644 index 3e302e3f7e..0000000000 --- a/third_party/rust/nom/tests/complete_arithmetic.rs +++ /dev/null @@ -1,95 +0,0 @@ -#[macro_use] -extern crate nom; - -use nom::types::CompleteStr; - -use std::str::FromStr; - -#[macro_export] -macro_rules! complete_named ( - ($name:ident, $submac:ident!( $($args:tt)* )) => ( - fn $name( i: CompleteStr ) -> nom::IResult { - $submac!(i, $($args)*) - } - ); - ($name:ident<$o:ty>, $submac:ident!( $($args:tt)* )) => ( - fn $name( i: CompleteStr ) -> nom::IResult { - $submac!(i, $($args)*) - } - ); -); - -complete_named!(digit, is_a!("0123456789")); - -complete_named!(parens, ws!(delimited!(tag!("("), expr, tag!(")")))); - -complete_named!(factor, alt!(map_res!(ws!(digit), to_i64) | parens)); - -complete_named!( - term, - do_parse!( - init: factor - >> res: - fold_many0!( - pair!(alt!(tag!("*") | tag!("/")), factor), - init, - |acc, (op, val): (CompleteStr, i64)| if (op.0.chars().next().unwrap() as char) == '*' { - acc * val - } else { - acc / val - } - ) >> (res) - ) -); - -complete_named!( - expr, - do_parse!( - init: term - >> res: - fold_many0!( - pair!(alt!(tag!("+") | tag!("-")), term), - init, - |acc, (op, val): (CompleteStr, i64)| if (op.0.chars().next().unwrap() as char) == '+' { - acc + val - } else { - acc - val - } - ) >> (res) - ) -); - -complete_named!(root_expr, terminated!(expr, eof!())); - -fn to_i64(input: CompleteStr) -> Result { - match FromStr::from_str(input.0) { - Err(_) => Err(()), - Ok(i) => Ok(i), - } -} - -#[test] -fn factor_test() { - let a = CompleteStr("3"); - println!("calculated: {:?}", factor(a)); -} - -#[test] -fn parens_test() { - use nom::ErrorKind; - - let input1 = CompleteStr(" 2* ( 3 + 4 ) "); - assert_eq!(expr(input1), Ok((CompleteStr(""), 14))); - - let input2 = CompleteStr(" 2*2 / ( 5 - 1) + 3"); - assert_eq!(expr(input2), Ok((CompleteStr(""), 4))); - - let input3 = CompleteStr(" 2*2 / ( 5 - 1) + "); - assert_eq!( - root_expr(input3), - Err(nom::Err::Error(error_position!( - CompleteStr("+ "), - ErrorKind::Eof - ))) - ); -} diff --git a/third_party/rust/nom/tests/complete_float.rs b/third_party/rust/nom/tests/complete_float.rs deleted file mode 100644 index d7a272695b..0000000000 --- a/third_party/rust/nom/tests/complete_float.rs +++ /dev/null @@ -1,80 +0,0 @@ -#[macro_use] -extern crate nom; - -use nom::digit; -use nom::types::CompleteStr; - -#[macro_export] -macro_rules! complete_named ( - ($name:ident, $submac:ident!( $($args:tt)* )) => ( - fn $name( i: CompleteStr ) -> nom::IResult { - $submac!(i, $($args)*) - } - ); - ($name:ident<$o:ty>, $submac:ident!( $($args:tt)* )) => ( - fn $name( i: CompleteStr ) -> nom::IResult { - $submac!(i, $($args)*) - } - ); -); - -complete_named!( - unsigned_float, - flat_map!( - recognize!(alt!( - delimited!(digit, tag!("."), opt!(digit)) | delimited!(opt!(digit), tag!("."), digit) - )), - parse_to!(f32) - ) -); - -complete_named!( - float, - map!( - pair!(opt!(alt!(tag!("+") | tag!("-"))), unsigned_float), - |(sign, value): (Option, f32)| sign - .and_then(|s| s.0.chars().next()) - .and_then(|c| if c == '-' { Some(-1f32) } else { None }) - .unwrap_or(1f32) * value - ) -); - -#[test] -fn unsigned_float_test() { - assert_eq!( - unsigned_float(CompleteStr("123.456")), - Ok((CompleteStr(""), 123.456)) - ); - assert_eq!( - unsigned_float(CompleteStr("0.123")), - Ok((CompleteStr(""), 0.123)) - ); - assert_eq!( - unsigned_float(CompleteStr("123.0")), - Ok((CompleteStr(""), 123.0)) - ); - assert_eq!( - unsigned_float(CompleteStr("123.")), - Ok((CompleteStr(""), 123.0)) - ); - assert_eq!( - unsigned_float(CompleteStr(".123")), - Ok((CompleteStr(""), 0.123)) - ); -} - -#[test] -fn float_test() { - assert_eq!( - float(CompleteStr("123.456")), - Ok((CompleteStr(""), 123.456)) - ); - assert_eq!( - float(CompleteStr("+123.456")), - Ok((CompleteStr(""), 123.456)) - ); - assert_eq!( - float(CompleteStr("-123.456")), - Ok((CompleteStr(""), -123.456)) - ); -} diff --git a/third_party/rust/nom/tests/css.rs b/third_party/rust/nom/tests/css.rs index 03dead897c..0ce7e77b05 100644 --- a/third_party/rust/nom/tests/css.rs +++ b/third_party/rust/nom/tests/css.rs @@ -1,6 +1,10 @@ -#[macro_use] extern crate nom; +use nom::IResult; +use nom::bytes::complete::{tag, take_while_m_n}; +use nom::combinator::map_res; +use nom::sequence::tuple; + #[derive(Debug, PartialEq)] pub struct Color { pub red: u8, @@ -16,19 +20,20 @@ fn is_hex_digit(c: char) -> bool { c.is_digit(16) } -named!(hex_primary<&str, u8>, - map_res!(take_while_m_n!(2, 2, is_hex_digit), from_hex) -); - -named!(hex_color<&str, Color>, - do_parse!( - tag!("#") >> - red: hex_primary >> - green: hex_primary >> - blue: hex_primary >> - (Color { red, green, blue }) - ) -); +fn hex_primary(input: &str) -> IResult<&str, u8> { + map_res( + take_while_m_n(2, 2, is_hex_digit), + from_hex + )(input) +} + +fn hex_color(input: &str) -> IResult<&str, Color> { + let (input, _) = tag("#")(input)?; + let (input, (red, green, blue)) = tuple((hex_primary, hex_primary, hex_primary))(input)?; + + Ok((input, Color { red, green, blue })) +} + #[test] fn parse_color() { diff --git a/third_party/rust/nom/tests/custom_errors.rs b/third_party/rust/nom/tests/custom_errors.rs index 7d13244ec2..89366dc9e1 100644 --- a/third_party/rust/nom/tests/custom_errors.rs +++ b/third_party/rust/nom/tests/custom_errors.rs @@ -5,23 +5,40 @@ extern crate nom; use nom::IResult; -use nom::digit; +use nom::error::{ErrorKind,ParseError}; +use nom::character::streaming::digit1 as digit; use std::convert::From; +#[derive(Debug)] pub struct CustomError(String); -impl From for CustomError { - fn from(error: u32) -> Self { - CustomError(format!("error code was: {}", error)) + +impl<'a> From<(&'a str, ErrorKind)> for CustomError { + fn from(error: (&'a str, ErrorKind)) -> Self { + CustomError(format!("error code was: {:?}", error)) + } +} + +impl<'a> ParseError<&'a str> for CustomError { + fn from_error_kind(_: &'a str, kind: ErrorKind) -> Self { + CustomError(format!("error code was: {:?}", kind)) } + + fn append(_: &'a str, kind: ErrorKind, other: CustomError) -> Self { + CustomError(format!("{:?}\nerror code was: {:?}", other, kind)) + + } + } fn test1(input: &str) -> IResult<&str, &str, CustomError> { - fix_error!(input, CustomError, tag!("abcd")) + //fix_error!(input, CustomError, tag!("abcd")) + tag!(input, "abcd") } fn test2(input: &str) -> IResult<&str, &str, CustomError> { - terminated!(input, test1, fix_error!(CustomError, digit)) + //terminated!(input, test1, fix_error!(CustomError, digit)) + terminated!(input, test1, digit) } fn test3(input: &str) -> IResult<&str, &str, CustomError> { diff --git a/third_party/rust/nom/tests/escaped.rs b/third_party/rust/nom/tests/escaped.rs new file mode 100644 index 0000000000..19c1ed70a4 --- /dev/null +++ b/third_party/rust/nom/tests/escaped.rs @@ -0,0 +1,25 @@ +use nom::{Err, error::ErrorKind, IResult}; +use nom::character::complete::digit1; +use nom::bytes::complete::escaped; +use nom::character::complete::one_of; + +fn esc(s: &str) -> IResult<&str, &str> { + escaped(digit1, '\\', one_of("\"n\\"))(s) +} + +#[cfg(feature="alloc")] +fn esc_trans(s: &str) -> IResult<&str, String> { + use nom::bytes::complete::{escaped_transform, tag}; + escaped_transform(digit1, '\\', |i: &str| tag("n")(i))(s) +} + +#[test] +fn test_escaped() { + assert_eq!(esc("abcd"), Err(Err::Error(("abcd", ErrorKind::Escaped)))); +} + +#[test] +#[cfg(feature="alloc")] +fn test_escaped_transform() { + assert_eq!(esc_trans("abcd"), Err(Err::Error(("abcd", ErrorKind::EscapedTransform)))); +} diff --git a/third_party/rust/nom/tests/float.rs b/third_party/rust/nom/tests/float.rs index 57cfd9407b..eb82804f29 100644 --- a/third_party/rust/nom/tests/float.rs +++ b/third_party/rust/nom/tests/float.rs @@ -1,7 +1,7 @@ #[macro_use] extern crate nom; -use nom::digit; +use nom::character::streaming::digit1 as digit; use std::str; use std::str::FromStr; diff --git a/third_party/rust/nom/tests/inference.rs b/third_party/rust/nom/tests/inference.rs index 0a916d0f6c..851c1382c9 100644 --- a/third_party/rust/nom/tests/inference.rs +++ b/third_party/rust/nom/tests/inference.rs @@ -9,7 +9,7 @@ extern crate nom; use std::str; -use nom::{alpha, is_digit}; +use nom::character::{streaming::alpha1 as alpha, is_digit}; // issue #617 named!(multi<&[u8], () >, fold_many0!( take_while1!( is_digit ), (), |_, _| {})); @@ -19,7 +19,7 @@ named!(multi<&[u8], () >, fold_many0!( take_while1!( is_digit ), (), |_, _| {})) named!( value>>, do_parse!( - first_line: map_res!(is_not_s!("\n"), std::str::from_utf8) + first_line: map_res!(is_not!("\n"), std::str::from_utf8) >> rest: many_m_n!( 0, @@ -46,8 +46,8 @@ fn wrap_suffix(input: &Option>) -> Option { #[cfg(feature = "alloc")] named!(parse_suffix<&[u8],Option>,do_parse!( - u: opt!(many1!(alt_complete!( - tag!("%") | tag!("#") | tag!("@") | alpha + u: opt!(many1!(alt!( + complete!(tag!("%")) | complete!(tag!("#")) | complete!(tag!("@")) | complete!(alpha) ))) >> (wrap_suffix(&u)) )); diff --git a/third_party/rust/nom/tests/ini.rs b/third_party/rust/nom/tests/ini.rs index c7d8ecafb3..c3f9b750d7 100644 --- a/third_party/rust/nom/tests/ini.rs +++ b/third_party/rust/nom/tests/ini.rs @@ -1,26 +1,26 @@ #[macro_use] extern crate nom; -use nom::{alphanumeric, multispace, space}; -use nom::types::CompleteByteSlice; +use nom::{ + IResult, + bytes::complete::take_while, + sequence::delimited, + combinator::map_res, + character::complete::{char, alphanumeric1 as alphanumeric, multispace0 as multispace, space0 as space} +}; use std::str; use std::collections::HashMap; -named!(category, map_res!( - delimited!( - char!('['), - take_while!(call!(|c| c != b']')), - char!(']') - ), - complete_byte_slice_to_str -)); +fn category(i: &[u8]) -> IResult<&[u8], &str> { + map_res(delimited(char('['), take_while(|c| c != b']'), char(']')), str::from_utf8)(i) +} -fn complete_byte_slice_to_str<'a>(s: CompleteByteSlice<'a>) -> Result<&'a str, str::Utf8Error> { - str::from_utf8(s.0) +fn complete_byte_slice_to_str<'a>(s: &'a[u8]) -> Result<&'a str, str::Utf8Error> { + str::from_utf8(s) } -named!(key_value , +named!(key_value <&[u8],(&str,&str)>, do_parse!( key: map_res!(alphanumeric, complete_byte_slice_to_str) >> opt!(space) @@ -35,14 +35,14 @@ named!(key_value , ) ); -named!(keys_and_values >, +named!(keys_and_values<&[u8], HashMap<&str, &str> >, map!( many0!(terminated!(key_value, opt!(multispace))), |vec: Vec<_>| vec.into_iter().collect() ) ); -named!(category_and_keys)>, +named!(category_and_keys<&[u8],(&str,HashMap<&str,&str>)>, do_parse!( category: category >> opt!(multispace) >> @@ -51,7 +51,7 @@ named!(category_and_keys)>, ) ); -named!(categories > >, +named!(categories<&[u8], HashMap<&str, HashMap<&str,&str> > >, map!( many0!( separated_pair!( @@ -69,22 +69,18 @@ named!(categories > >, #[test] fn parse_category_test() { - let ini_file = CompleteByteSlice( - b"[category] + let ini_file = &b"[category] parameter=value -key = value2", - ); +key = value2"[..]; - let ini_without_category = CompleteByteSlice( - b"\n\nparameter=value -key = value2", - ); + let ini_without_category = &b"\n\nparameter=value +key = value2"[..]; let res = category(ini_file); println!("{:?}", res); match res { - Ok((i, o)) => println!("i: {:?} | o: {:?}", str::from_utf8(i.0), o), + Ok((i, o)) => println!("i: {:?} | o: {:?}", str::from_utf8(i), o), _ => println!("error"), } @@ -93,17 +89,15 @@ key = value2", #[test] fn parse_key_value_test() { - let ini_file = CompleteByteSlice( - b"parameter=value -key = value2", - ); + let ini_file = &b"parameter=value +key = value2"[..]; - let ini_without_key_value = CompleteByteSlice(b"\nkey = value2"); + let ini_without_key_value = &b"\nkey = value2"[..]; let res = key_value(ini_file); println!("{:?}", res); match res { - Ok((i, (o1, o2))) => println!("i: {:?} | o: ({:?},{:?})", str::from_utf8(i.0), o1, o2), + Ok((i, (o1, o2))) => println!("i: {:?} | o: ({:?},{:?})", str::from_utf8(i), o1, o2), _ => println!("error"), } @@ -112,17 +106,15 @@ key = value2", #[test] fn parse_key_value_with_space_test() { - let ini_file = CompleteByteSlice( - b"parameter = value -key = value2", - ); + let ini_file = &b"parameter = value +key = value2"[..]; - let ini_without_key_value = CompleteByteSlice(b"\nkey = value2"); + let ini_without_key_value = &b"\nkey = value2"[..]; let res = key_value(ini_file); println!("{:?}", res); match res { - Ok((i, (o1, o2))) => println!("i: {:?} | o: ({:?},{:?})", str::from_utf8(i.0), o1, o2), + Ok((i, (o1, o2))) => println!("i: {:?} | o: ({:?},{:?})", str::from_utf8(i), o1, o2), _ => println!("error"), } @@ -131,17 +123,15 @@ key = value2", #[test] fn parse_key_value_with_comment_test() { - let ini_file = CompleteByteSlice( - b"parameter=value;abc -key = value2", - ); + let ini_file = &b"parameter=value;abc +key = value2"[..]; - let ini_without_key_value = CompleteByteSlice(b"\nkey = value2"); + let ini_without_key_value = &b"\nkey = value2"[..]; let res = key_value(ini_file); println!("{:?}", res); match res { - Ok((i, (o1, o2))) => println!("i: {:?} | o: ({:?},{:?})", str::from_utf8(i.0), o1, o2), + Ok((i, (o1, o2))) => println!("i: {:?} | o: ({:?},{:?})", str::from_utf8(i), o1, o2), _ => println!("error"), } @@ -150,20 +140,18 @@ key = value2", #[test] fn parse_multiple_keys_and_values_test() { - let ini_file = CompleteByteSlice( - b"parameter=value;abc + let ini_file = &b"parameter=value;abc key = value2 -[category]", - ); +[category]"[..]; - let ini_without_key_value = CompleteByteSlice(b"[category]"); + let ini_without_key_value = &b"[category]"[..]; let res = keys_and_values(ini_file); println!("{:?}", res); match res { - Ok((i, ref o)) => println!("i: {:?} | o: {:?}", str::from_utf8(i.0), o), + Ok((i, ref o)) => println!("i: {:?} | o: {:?}", str::from_utf8(i), o), _ => println!("error"), } @@ -176,21 +164,19 @@ key = value2 #[test] fn parse_category_then_multiple_keys_and_values_test() { //FIXME: there can be an empty line or a comment line after a category - let ini_file = CompleteByteSlice( - b"[abcd] + let ini_file = &b"[abcd] parameter=value;abc key = value2 -[category]", - ); +[category]"[..]; - let ini_after_parser = CompleteByteSlice(b"[category]"); + let ini_after_parser = &b"[category]"[..]; let res = category_and_keys(ini_file); println!("{:?}", res); match res { - Ok((i, ref o)) => println!("i: {:?} | o: {:?}", str::from_utf8(i.0), o), + Ok((i, ref o)) => println!("i: {:?} | o: {:?}", str::from_utf8(i), o), _ => println!("error"), } @@ -202,8 +188,7 @@ key = value2 #[test] fn parse_multiple_categories_test() { - let ini_file = CompleteByteSlice( - b"[abcd] + let ini_file = &b"[abcd] parameter=value;abc @@ -212,15 +197,14 @@ key = value2 [category] parameter3=value3 key4 = value4 -", - ); +"[..]; - let ini_after_parser = CompleteByteSlice(b""); + let ini_after_parser = &b""[..]; let res = categories(ini_file); //println!("{:?}", res); match res { - Ok((i, ref o)) => println!("i: {:?} | o: {:?}", str::from_utf8(i.0), o), + Ok((i, ref o)) => println!("i: {:?} | o: {:?}", str::from_utf8(i), o), _ => println!("error"), } diff --git a/third_party/rust/nom/tests/ini_str.rs b/third_party/rust/nom/tests/ini_str.rs index ef7c1ced15..d778918c40 100644 --- a/third_party/rust/nom/tests/ini_str.rs +++ b/third_party/rust/nom/tests/ini_str.rs @@ -1,80 +1,62 @@ #[macro_use] extern crate nom; -use nom::IResult; -use nom::types::CompleteStr; +use nom::{ + IResult, + combinator::opt, + bytes::complete::{take_while, is_a}, + sequence::{delimited, terminated}, + character::complete::{char, alphanumeric1 as alphanumeric, space0 as space} +}; use std::collections::HashMap; -fn is_alphabetic(chr: char) -> bool { - (chr as u8 >= 0x41 && chr as u8 <= 0x5A) || (chr as u8 >= 0x61 && chr as u8 <= 0x7A) -} - -fn is_digit(chr: char) -> bool { - chr as u8 >= 0x30 && chr as u8 <= 0x39 -} - -fn is_alphanumeric(chr: char) -> bool { - is_alphabetic(chr) || is_digit(chr) -} - -fn is_space(chr: char) -> bool { - chr == ' ' || chr == '\t' -} - fn is_line_ending_or_comment(chr: char) -> bool { chr == ';' || chr == '\n' } -named!(alphanumeric, take_while_s!(is_alphanumeric)); -named!(not_line_ending, is_not_s!("\r\n")); -named!(space, take_while_s!(is_space)); -named!(space_or_line_ending, is_a_s!(" \r\n")); +fn not_line_ending(i: &str) -> IResult<&str, &str> { + take_while(|c| c != '\r' && c != '\n')(i) +} -fn right_bracket(c: char) -> bool { - c == ']' +fn space_or_line_ending(i: &str) -> IResult<&str, &str> { + is_a(" \r\n")(i) } -named!(category , - do_parse!( - tag_s!("[") >> - name: take_till_s!(right_bracket) >> - tag_s!("]") >> - opt!(space_or_line_ending) >> - (name.0) - ) -); +fn category(i: &str) -> IResult<&str, &str> { + terminated(delimited(char('['), take_while(|c| c != ']'), char(']')), opt(is_a(" \r\n")))(i) +} -named!(key_value , +named!(key_value <&str,(&str,&str)>, do_parse!( key: alphanumeric >> opt!(space) >> - tag_s!("=") >> + tag!("=") >> opt!(space) >> - val: take_till_s!(is_line_ending_or_comment) >> + val: take_till!(is_line_ending_or_comment) >> opt!(space) >> - opt!(pair!(tag_s!(";"), not_line_ending)) >> + opt!(pair!(tag!(";"), not_line_ending)) >> opt!(space_or_line_ending) >> - (key.0, val.0) + (key, val) ) ); -named!(keys_and_values_aggregator >, many0!(key_value)); +named!(keys_and_values_aggregator<&str, Vec<(&str, &str)> >, many0!(key_value)); -fn keys_and_values(input: CompleteStr) -> IResult> { +fn keys_and_values(input: &str) -> IResult<&str, HashMap<&str, &str>> { match keys_and_values_aggregator(input) { Ok((i, tuple_vec)) => Ok((i, tuple_vec.into_iter().collect())), Err(e) => Err(e), } } -named!(category_and_keys)>, +named!(category_and_keys<&str,(&str,HashMap<&str,&str>)>, pair!(category, keys_and_values) ); -named!(categories_aggregator)> >, many0!(category_and_keys)); +named!(categories_aggregator<&str, Vec<(&str, HashMap<&str,&str>)> >, many0!(category_and_keys)); -fn categories(input: CompleteStr) -> IResult>> { +fn categories(input: &str) -> IResult<&str, HashMap<&str, HashMap<&str, &str>>> { match categories_aggregator(input) { Ok((i, tuple_vec)) => Ok((i, tuple_vec.into_iter().collect())), Err(e) => Err(e), @@ -83,22 +65,18 @@ fn categories(input: CompleteStr) -> IResult println!("i: {} | o: {:?}", i.0, o), + Ok((i, o)) => println!("i: {} | o: {:?}", i, o), _ => println!("error"), } @@ -107,17 +85,15 @@ key = value2", #[test] fn parse_key_value_test() { - let ini_file = CompleteStr( - "parameter=value -key = value2", - ); + let ini_file = "parameter=value +key = value2"; - let ini_without_key_value = CompleteStr("key = value2"); + let ini_without_key_value = "key = value2"; let res = key_value(ini_file); println!("{:?}", res); match res { - Ok((i, (o1, o2))) => println!("i: {} | o: ({:?},{:?})", i.0, o1, o2), + Ok((i, (o1, o2))) => println!("i: {} | o: ({:?},{:?})", i, o1, o2), _ => println!("error"), } @@ -126,17 +102,15 @@ key = value2", #[test] fn parse_key_value_with_space_test() { - let ini_file = CompleteStr( - "parameter = value -key = value2", - ); + let ini_file = "parameter = value +key = value2"; - let ini_without_key_value = CompleteStr("key = value2"); + let ini_without_key_value = "key = value2"; let res = key_value(ini_file); println!("{:?}", res); match res { - Ok((i, (o1, o2))) => println!("i: {} | o: ({:?},{:?})", i.0, o1, o2), + Ok((i, (o1, o2))) => println!("i: {} | o: ({:?},{:?})", i, o1, o2), _ => println!("error"), } @@ -145,17 +119,15 @@ key = value2", #[test] fn parse_key_value_with_comment_test() { - let ini_file = CompleteStr( - "parameter=value;abc -key = value2", - ); + let ini_file = "parameter=value;abc +key = value2"; - let ini_without_key_value = CompleteStr("key = value2"); + let ini_without_key_value = "key = value2"; let res = key_value(ini_file); println!("{:?}", res); match res { - Ok((i, (o1, o2))) => println!("i: {} | o: ({:?},{:?})", i.0, o1, o2), + Ok((i, (o1, o2))) => println!("i: {} | o: ({:?},{:?})", i, o1, o2), _ => println!("error"), } @@ -164,20 +136,18 @@ key = value2", #[test] fn parse_multiple_keys_and_values_test() { - let ini_file = CompleteStr( - "parameter=value;abc + let ini_file = "parameter=value;abc key = value2 -[category]", - ); +[category]"; - let ini_without_key_value = CompleteStr("[category]"); + let ini_without_key_value = "[category]"; let res = keys_and_values(ini_file); println!("{:?}", res); match res { - Ok((i, ref o)) => println!("i: {} | o: {:?}", i.0, o), + Ok((i, ref o)) => println!("i: {} | o: {:?}", i, o), _ => println!("error"), } @@ -190,21 +160,19 @@ key = value2 #[test] fn parse_category_then_multiple_keys_and_values_test() { //FIXME: there can be an empty line or a comment line after a category - let ini_file = CompleteStr( - "[abcd] + let ini_file = "[abcd] parameter=value;abc key = value2 -[category]", - ); +[category]"; - let ini_after_parser = CompleteStr("[category]"); + let ini_after_parser = "[category]"; let res = category_and_keys(ini_file); println!("{:?}", res); match res { - Ok((i, ref o)) => println!("i: {} | o: {:?}", i.0, o), + Ok((i, ref o)) => println!("i: {} | o: {:?}", i, o), _ => println!("error"), } @@ -216,8 +184,7 @@ key = value2 #[test] fn parse_multiple_categories_test() { - let ini_file = CompleteStr( - "[abcd] + let ini_file = "[abcd] parameter=value;abc @@ -226,13 +193,12 @@ key = value2 [category] parameter3=value3 key4 = value4 -", - ); +"; let res = categories(ini_file); //println!("{:?}", res); match res { - Ok((i, ref o)) => println!("i: {} | o: {:?}", i.0, o), + Ok((i, ref o)) => println!("i: {} | o: {:?}", i, o), _ => println!("error"), } @@ -245,5 +211,5 @@ key4 = value4 let mut expected_h: HashMap<&str, HashMap<&str, &str>> = HashMap::new(); expected_h.insert("abcd", expected_1); expected_h.insert("category", expected_2); - assert_eq!(res, Ok((CompleteStr(""), expected_h))); + assert_eq!(res, Ok(("", expected_h))); } diff --git a/third_party/rust/nom/tests/issues.rs b/third_party/rust/nom/tests/issues.rs index 102516f804..62a1a01bc5 100644 --- a/third_party/rust/nom/tests/issues.rs +++ b/third_party/rust/nom/tests/issues.rs @@ -4,10 +4,8 @@ #[macro_use] extern crate nom; -extern crate regex; -use nom::{space, Err, IResult, Needed, le_u64, is_digit}; -use nom::types::{CompleteStr, CompleteByteSlice}; +use nom::{character::{is_digit, streaming::space1 as space}, Err, IResult, Needed, error::ErrorKind, number::streaming::le_u64}; #[allow(dead_code)] struct Range { @@ -70,7 +68,7 @@ fn issue_58() { #[cfg(feature = "std")] mod parse_int { use nom::HexDisplay; - use nom::{digit, space, IResult}; + use nom::{IResult, character::streaming::{digit1 as digit, space1 as space}}; use std::str; named!(parse_ints>, many0!(spaces_or_int)); @@ -106,8 +104,8 @@ mod parse_int { #[test] fn usize_length_bytes_issue() { - use nom::be_u16; - let _: IResult<&[u8], &[u8], u32> = length_bytes!(b"012346", be_u16); + use nom::number::streaming::be_u16; + let _: IResult<&[u8], &[u8], (&[u8], ErrorKind)> = length_data!(b"012346", be_u16); } /* @@ -143,9 +141,9 @@ named!( named!(issue_308(&str) -> bool, do_parse! ( - tag_s! ("foo") >> - b: alt_complete! ( - map! (tag_s! ("1"), |_: &str|->bool {true}) | + tag! ("foo") >> + b: alt! ( + complete!(map! (tag! ("1"), |_: &str|->bool {true})) | value! (false) ) >> (b) )); @@ -157,7 +155,7 @@ fn issue_302(input: &[u8]) -> IResult<&[u8], Option>> { #[test] fn issue_655() { - use nom::{line_ending, not_line_ending}; + use nom::character::streaming::{line_ending, not_line_ending}; named!(twolines(&str) -> (&str, &str), do_parse!( l1 : not_line_ending >> @@ -174,62 +172,14 @@ fn issue_655() { assert_eq!(twolines("foé\r\nbar\n"), Ok(("", ("foé", "bar")))); } -#[cfg(feature = "std")] -named!(issue_666 , dbg_dmp!(tag!("abc"))); - -#[test] -fn issue_667() { - use nom::alpha; - - named!(foo >, - many0!( - alt!(alpha | is_a!("_")) - ) - ); - assert_eq!( - foo(CompleteByteSlice(b"")), - Ok((CompleteByteSlice(b""), vec![])) - ); - assert_eq!( - foo(CompleteByteSlice(b"loremipsum")), - Ok(( - CompleteByteSlice(b""), - vec![CompleteByteSlice(b"loremipsum")] - )) - ); - assert_eq!( - foo(CompleteByteSlice(b"lorem_ipsum")), - Ok(( - CompleteByteSlice(b""), - vec![ - CompleteByteSlice(b"lorem"), - CompleteByteSlice(b"_"), - CompleteByteSlice(b"ipsum"), - ] - )) - ); - assert_eq!( - foo(CompleteByteSlice(b"_lorem_ipsum")), - Ok(( - CompleteByteSlice(b""), - vec![ - CompleteByteSlice(b"_"), - CompleteByteSlice(b"lorem"), - CompleteByteSlice(b"_"), - CompleteByteSlice(b"ipsum"), - ] - )) - ); - assert_eq!( - foo(CompleteByteSlice(b"!@#$")), - Ok((CompleteByteSlice(b"!@#$"), vec![])) - ); -} - #[test] fn issue_721() { - assert_eq!(parse_to!("1234", u16), Ok(("", 1234))); - assert_eq!(parse_to!("foo", String), Ok(("", "foo".to_string()))); + named!(f1<&str, u16>, parse_to!(u16)); + named!(f2<&str, String>, parse_to!(String)); + assert_eq!(f1("1234"), Ok(("", 1234))); + assert_eq!(f2("foo"), Ok(("", "foo".to_string()))); + //assert_eq!(parse_to!("1234", u16), Ok(("", 1234))); + //assert_eq!(parse_to!("foo", String), Ok(("", "foo".to_string()))); } #[cfg(feature = "alloc")] @@ -251,14 +201,10 @@ named!(issue_724<&str, i32>, ) ); -named!(issue_741_str, re_match!(r"^_?[A-Za-z][0-9A-Z_a-z-]*")); -named!(issue_741_bytes, re_bytes_match!(r"^_?[A-Za-z][0-9A-Z_a-z-]*")); - - #[test] fn issue_752() { assert_eq!( - Err::Error(nom::Context::Code("ab", nom::ErrorKind::ParseTo)), + Err::Error(("ab", nom::error::ErrorKind::ParseTo)), parse_to!("ab", usize).unwrap_err() ) } @@ -269,7 +215,7 @@ fn atom_specials(c: u8) -> bool { named!( capability<&str>, - do_parse!(tag_s!(" ") >> _atom: map_res!(take_till1!(atom_specials), std::str::from_utf8) >> ("a")) + do_parse!(tag!(" ") >> _atom: map_res!(take_till1!(atom_specials), std::str::from_utf8) >> ("a")) ); #[test] @@ -278,7 +224,7 @@ fn issue_759() { } named_args!(issue_771(count: usize)>, - length_count!(value!(count), call!(nom::be_u32)) + length_count!(value!(count), call!(nom::number::streaming::be_u32)) ); /// This test is in a separate module to check that all required symbols are imported in @@ -286,7 +232,7 @@ named_args!(issue_771(count: usize)>, /// mask the error ('"Use of undeclared type or module `Needed`" in escaped_transform!'). mod issue_780 { named!(issue_780<&str, String>, - escaped_transform!(call!(::nom::alpha), '\\', tag!("n")) + escaped_transform!(call!(::nom::character::streaming::alpha1), '\\', tag!("n")) ); } @@ -298,7 +244,7 @@ named!(multi_617<&[u8], () >, fold_many0!( digits, (), |_, _| {})); named!(multi_617_fails<&[u8], () >, fold_many0!( take_while1!( is_digit ), (), |_, _| {})); mod issue_647 { - use nom::{Err,be_f64}; + use nom::{Err, number::streaming::be_f64, error::ErrorKind}; pub type Input<'a> = &'a [u8]; #[derive(PartialEq, Debug, Clone)] @@ -307,8 +253,8 @@ mod issue_647 { v: Vec } - fn list<'a,'b>(input: Input<'a>, _cs: &'b f64) -> Result<(Input<'a>,Vec), Err<&'a [u8]>> { - separated_list_complete!(input, tag!(","),be_f64) + fn list<'a,'b>(input: Input<'a>, _cs: &'b f64) -> Result<(Input<'a>,Vec), Err<(&'a [u8], ErrorKind)>> { + separated_list!(input, complete!(tag!(",")), complete!(be_f64)) } named!(data, map!( @@ -327,3 +273,46 @@ mod issue_647 { } named!(issue_775, take_till1!(|_| true)); + +#[test] +fn issue_848_overflow_incomplete_bits_to_bytes() { + named!(take, take!(0x2000000000000000)); + named!(parser<&[u8], &[u8]>, bits!(bytes!(take))); + assert_eq!(parser(&b""[..]), Err(Err::Failure(error_position!(&b""[..], ErrorKind::TooLarge)))); +} + +#[test] +fn issue_942() { + use nom::error::ParseError; + pub fn parser<'a, E: ParseError<&'a str>>(i: &'a str) -> IResult<&'a str, usize, E> { + use nom::{character::complete::char, error::context, multi::many0_count}; + many0_count(context("char_a", char('a')))(i) + } + assert_eq!(parser::<()>("aaa"), Ok(("", 3))); +} + +#[test] +fn issue_many_m_n_with_zeros() { + use nom::multi::many_m_n; + use nom::character::complete::char; + let parser = many_m_n::<_, _, (), _>(0, 0, char('a')); + assert_eq!(parser("aaa"), Ok(("aaa", vec!()))); +} + +#[test] +fn issue_1027_convert_error_panic_nonempty() { + use nom::error::{VerboseError, convert_error}; + use nom::sequence::pair; + use nom::character::complete::char; + + let input = "a"; + + let result: IResult<_, _, VerboseError<&str>> = pair(char('a'), char('b'))(input); + let err = match result.unwrap_err() { + Err::Error(e) => e, + _ => unreachable!(), + }; + + let msg = convert_error(&input, err); + assert_eq!(msg, "0: at line 1:\na\n ^\nexpected \'b\', got end of input\n\n"); +} diff --git a/third_party/rust/nom/tests/json.rs b/third_party/rust/nom/tests/json.rs index 2d5ca04d4d..14329a16ae 100644 --- a/third_party/rust/nom/tests/json.rs +++ b/third_party/rust/nom/tests/json.rs @@ -4,7 +4,7 @@ #[macro_use] extern crate nom; -use nom::{is_alphanumeric, recognize_float}; +use nom::{character::is_alphanumeric, number::complete::recognize_float}; use std::str; use std::collections::HashMap; @@ -23,34 +23,34 @@ named!(float, flat_map!(recognize_float, parse_to!(f32))); named!( string<&str>, delimited!( - tag!("\""), + char!('"'), //map_res!(escaped!(call!(alphanumeric), '\\', is_a!("\"n\\")), str::from_utf8), map_res!( escaped!(take_while1!(is_alphanumeric), '\\', one_of!("\"n\\")), str::from_utf8 ), - tag!("\"") + char!('"') ) ); named!( array>, ws!(delimited!( - tag!("["), - separated_list!(tag!(","), value), - tag!("]") + char!('['), + separated_list!(char!(','), value), + char!(']') )) ); named!( key_value<(&str, JsonValue)>, - ws!(separated_pair!(string, tag!(":"), value)) + ws!(separated_pair!(string, char!(':'), value)) ); named!( hash>, ws!(map!( - delimited!(tag!("{"), separated_list!(tag!(","), key_value), tag!("}")), + delimited!(char!('{'), separated_list!(char!(','), key_value), char!('}')), |tuple_vec| { let mut h: HashMap = HashMap::new(); for (k, v) in tuple_vec { @@ -72,25 +72,34 @@ named!( ); #[test] -fn hash_test() { - let test = &b" { \"a\"\t: 42, - \"b\": \"x\" - }\0"; - - //FIXME: top level value must be an object? - println!("{:?}", value(&test[..]).unwrap()); - //assert!(false); +fn json_object() { + let input = + r#"{ + "a": 42, + "b": "x" + }\0"#; + + let mut expected_map = HashMap::new(); + expected_map.insert(String::from("a"), JsonValue::Num(42f32)); + expected_map.insert(String::from("b"), JsonValue::Str(String::from("x"))); + let expected = JsonValue::Object(expected_map); + + assert_eq!(expected, value(input.as_bytes()).unwrap().1); } #[test] -fn parse_example_test() { - let test = &b" { \"a\"\t: 42, - \"b\": [ \"x\", \"y\", 12 ] , - \"c\": { \"hello\" : \"world\" - } - }\0"; - - //FIXME: top level value must be an object? - println!("{:?}", value(&test[..]).unwrap()); - //assert!(false); +fn json_array() { + let input = + r#"[ + 42, + "x" + ]\0"#; + + let expected_vec = vec![ + JsonValue::Num(42f32), + JsonValue::Str(String::from("x")) + ]; + let expected = JsonValue::Array(expected_vec); + + assert_eq!(expected, value(input.as_bytes()).unwrap().1); } diff --git a/third_party/rust/nom/tests/mp4.rs b/third_party/rust/nom/tests/mp4.rs index 6802441963..1432621611 100644 --- a/third_party/rust/nom/tests/mp4.rs +++ b/third_party/rust/nom/tests/mp4.rs @@ -1,13 +1,13 @@ -#![cfg(feature = "stream")] #![allow(dead_code)] #[macro_use] extern crate nom; -use nom::{IResult, Needed, be_f32, be_u16, be_u32, be_u64}; -//use nom::{Consumer,ConsumerState,Move,Input,Producer,FileProducer,FileProducerState}; -//use nom::IResult; -use nom::{Err, ErrorKind}; +use nom::{ + IResult, Needed, Err, + error::ErrorKind, + number::streaming::{be_u16, be_u32, be_u64, be_f32} +}; use std::str; @@ -261,7 +261,7 @@ fn mvhd_box(input: &[u8]) -> IResult<&[u8], MvhdBox> { } else if input.len() == 112 { mvhd64(input) } else { - Err(Err::Error(error_position!(input, ErrorKind::Custom(32u32)))) + Err(Err::Error(error_position!(input, ErrorKind::TooLarge))) }; println!("res: {:?}", res); res @@ -272,7 +272,7 @@ fn unknown_box_type(input: &[u8]) -> IResult<&[u8], MP4BoxType> { } //named!(box_type<&[u8], MP4BoxType>, -fn box_type(input: &[u8]) -> IResult<&[u8], MP4BoxType, u32> { +fn box_type(input: &[u8]) -> IResult<&[u8], MP4BoxType> { alt!(input, tag!("ftyp") => { |_| MP4BoxType::Ftyp } | tag!("moov") => { |_| MP4BoxType::Moov } | @@ -317,213 +317,3 @@ named!(moov_header<&[u8],MP4BoxHeader>, ) ); -/* -#[derive(Debug,PartialEq,Eq)] -enum MP4State { - Main, - Moov, - Mvhd(usize) -} - -pub struct MP4Consumer { - state: MP4State, - moov_bytes: usize, - c_state: ConsumerState<(), (), Move> -} - -impl MP4Consumer { - fn new() -> MP4Consumer { - MP4Consumer { state: MP4State::Main, moov_bytes: 0, c_state: ConsumerState::Continue(Move::Consume(0)) } - } - - fn consume_main(&mut self, input: Input<&[u8]>) -> ConsumerState<(), (), Move> { - //println!("\nparsing box header:\n{}", input.to_hex(8)); - match input { - Input::Eof(None) => ConsumerState::Done(Move::Consume(0), ()), - Input::Empty => ConsumerState::Continue(Move::Consume(0)), - Input::Element(sl) | Input::Eof(Some(sl)) => { - match box_header(sl) { - Ok((i, header)) => { - match header.tag { - MP4BoxType::Ftyp => { - println!("-> FTYP"); - match filetype_parser(&i[0..(header.length as usize - 8)]) { - Ok((rest, filetype_header)) => { - println!("filetype header: {:?}", filetype_header); - //return ConsumerState::Await(header.length as usize, header.length as usize - 8); - return ConsumerState::Continue(Move::Consume(sl.offset(rest))); - } - Err(Err::Error(a)) => { - println!("ftyp parsing error: {:?}", a); - assert!(false); - return ConsumerState::Error(()); - }, - Err(Err::Incomplete(n)) => { - println!("ftyp incomplete -> await: {}", sl.len()); - return ConsumerState::Continue(Move::Await(n)); - //return ConsumerState::Await(0, input.len() + 100); - } - } - }, - MP4BoxType::Moov => { - println!("-> MOOV"); - self.state = MP4State::Moov; - self.moov_bytes = header.length as usize - 8; - return ConsumerState::Continue(Move::Consume(sl.offset(i))); - }, - MP4BoxType::Mdat => println!("-> MDAT"), - MP4BoxType::Free => println!("-> FREE"), - MP4BoxType::Skip => println!("-> SKIP"), - MP4BoxType::Wide => println!("-> WIDE"), - MP4BoxType::Unknown => { - println!("-> UNKNOWN"); - println!("bytes:\n{}", (sl).to_hex(8)); - //return ConsumerState::Continue(Move::Consume(sl.offset(i))); - }, - _ => { println!("invalid"); return ConsumerState::Error(())} - } - return ConsumerState::Continue(Move::Seek(SeekFrom::Current((header.length) as i64))) - }, - Err(Err::Error(a)) => { - println!("mp4 parsing error: {:?}", a); - assert!(false); - return ConsumerState::Error(()); - }, - Err(Err::Incomplete(i)) => { - // FIXME: incomplete should send the required size - println!("mp4 incomplete -> await: {}", sl.len()); - return ConsumerState::Continue(Move::Await(i)); - } - } - } - } - } - - fn consume_moov(&mut self, input: Input<&[u8]>) -> ConsumerState<(), (), Move> { - //println!("\nparsing moov box(remaining {} bytes):\n{}", self.moov_bytes, input.to_hex(8)); - match input { - Input::Eof(None) => return ConsumerState::Error(()), - Input::Empty => return ConsumerState::Continue(Move::Consume(0)), - Input::Element(sl) | Input::Eof(Some(sl)) => { - if self.moov_bytes == 0 { - //println!("finished parsing moov atom, continuing with main parser"); - self.state = MP4State::Main; - return ConsumerState::Continue(Move::Consume(0)); - } - match moov_header(sl) { - Ok((i, header)) => { - match header.tag { - MP4BoxType::Mvhd => { - println!("-> MVHD"); - self.state = MP4State::Mvhd(header.length as usize - 8); - // TODO: check for overflow here - self.moov_bytes = self.moov_bytes - (sl.len() - i.len()); - println!("remaining moov_bytes: {}", self.moov_bytes); - return ConsumerState::Continue(Move::Consume(sl.offset(i))); - }, - MP4BoxType::Wide => println!("-> WIDE"), - MP4BoxType::Mdra => println!("-> MDRA"), - MP4BoxType::Dref => println!("-> DREF"), - MP4BoxType::Cmov => println!("-> CMOV"), - MP4BoxType::Rmra => println!("-> RMRA"), - MP4BoxType::Iods => println!("-> IODS"), - MP4BoxType::Clip => println!("-> CLIP"), - MP4BoxType::Trak => println!("-> TRAK"), - MP4BoxType::Udta => println!("-> UDTA"), - MP4BoxType::Unknown => println!("-> MOOV UNKNOWN"), - _ => { println!("invalid header here: {:?}", header.tag); return ConsumerState::Error(());} - }; - // TODO: check for overflow here - self.moov_bytes = self.moov_bytes - header.length as usize; - println!("remaining moov_bytes: {}", self.moov_bytes); - return ConsumerState::Continue(Move::Seek(SeekFrom::Current((header.length) as i64))) - }, - Err(Err::Error(a)) => { - println!("moov parsing error: {:?}", a); - println!("data:\n{}", sl.to_hex(8)); - assert!(false); - return ConsumerState::Error(()); - }, - Err(Err::Incomplete(i)) => { - println!("moov incomplete -> await: {}", sl.len()); - return ConsumerState::Continue(Move::Await(i)); - } - } - } - }; - } - -} - -consumer_from_parser!(MvhdConsumer, mvhd_box); - -impl<'a> Consumer<&'a[u8], (), (), Move> for MP4Consumer { - fn handle(&mut self, input: Input<&[u8]>) -> &ConsumerState<(), (), Move> { - match self.state { - MP4State::Main => { - self.c_state = self.consume_main(input); - }, - MP4State::Moov => { - self.c_state = self.consume_moov(input); - }, - MP4State::Mvhd(sz) => { - match input { - Input::Eof(None) => self.c_state = ConsumerState::Error(()), - Input::Empty => self.c_state = ConsumerState::Continue(Move::Consume(0)), - Input::Element(sl) | Input::Eof(Some(sl)) => { - let mut c = MvhdConsumer{ state:ConsumerState::Continue(Move::Consume(0)) }; - self.c_state = c.handle(Input::Element(&sl[..sz])).flat_map(|m, _| { - self.state = MP4State::Moov; - ConsumerState::Continue(m) - }); - println!("found mvhd?: {:?}", c.state()); - match self.c_state { - ConsumerState::Continue(Move::Consume(sz)) => self.moov_bytes = self.moov_bytes - sz, - ConsumerState::Continue(Move::Seek(SeekFrom::Current(sz))) => self.moov_bytes = self.moov_bytes - (sz as usize), - _ => () - }; - println!("remaining moov_bytes: {}", self.moov_bytes); - } - } - } - }; - &self.c_state - } - - fn state(&self) -> &ConsumerState<(), (), Move> { - &self.c_state - } -} - -#[allow(unused_must_use)] -fn explore_mp4_file(filename: &str) { - let mut p = FileProducer::new(filename, 400).unwrap(); - let mut c = MP4Consumer{state: MP4State::Main, moov_bytes: 0, c_state: ConsumerState::Continue(Move::Consume(0))}; - //c.run(&mut p); - while let &ConsumerState::Continue(mv) = p.apply(&mut c) { - println!("move: {:?}", mv); - } - println!("last consumer state: {:?} | last state: {:?}", c.c_state, c.state); - - if let ConsumerState::Done(Move::Consume(0), ()) = c.c_state { - println!("consumer state ok"); - } else { - assert!(false, "consumer should have reached Done state"); - } - assert_eq!(c.state, MP4State::Main); - assert_eq!(p.state(), FileProducerState::Eof); - //assert!(false); -} - - -#[test] -fn small_test() { - explore_mp4_file("assets/small.mp4"); -} - - -#[test] -fn big_bunny_test() { - explore_mp4_file("assets/bigbuckbunny.mp4"); -} -*/ diff --git a/third_party/rust/nom/tests/multiline.rs b/third_party/rust/nom/tests/multiline.rs index e302e28565..cca19ecbad 100644 --- a/third_party/rust/nom/tests/multiline.rs +++ b/third_party/rust/nom/tests/multiline.rs @@ -1,30 +1,36 @@ -#[macro_use] extern crate nom; -use nom::types::CompleteStr; -use nom::{alphanumeric, eol}; -use nom::IResult; +use nom::{ + IResult, + multi::many0, + sequence::terminated, + character::complete::{alphanumeric1 as alphanumeric, line_ending as eol} +}; -pub fn end_of_line(input: CompleteStr) -> IResult { - alt!(input, eof!() | eol) +pub fn end_of_line(input: &str) -> IResult<&str, &str> { + if input.is_empty() { + Ok((input, input)) + } else { + eol(input) + } } -pub fn read_line(input: CompleteStr) -> IResult { - terminated!(input, alphanumeric, end_of_line) +pub fn read_line(input: &str) -> IResult<&str, &str> { + terminated(alphanumeric, end_of_line)(input) } -pub fn read_lines(input: CompleteStr) -> IResult> { - many0!(input, read_line) +pub fn read_lines(input: &str) -> IResult<&str, Vec<&str>> { + many0(read_line)(input) } #[cfg(feature = "alloc")] #[test] fn read_lines_test() { let res = Ok(( - CompleteStr(""), - vec![CompleteStr("Duck"), CompleteStr("Dog"), CompleteStr("Cow")], + "", + vec!["Duck", "Dog", "Cow"], )); - assert_eq!(read_lines(CompleteStr("Duck\nDog\nCow\n")), res); - assert_eq!(read_lines(CompleteStr("Duck\nDog\nCow")), res); + assert_eq!(read_lines("Duck\nDog\nCow\n"), res); + assert_eq!(read_lines("Duck\nDog\nCow"), res); } diff --git a/third_party/rust/nom/tests/named_args.rs b/third_party/rust/nom/tests/named_args.rs index 6aab6517a5..811b0a0dce 100644 --- a/third_party/rust/nom/tests/named_args.rs +++ b/third_party/rust/nom/tests/named_args.rs @@ -1,8 +1,12 @@ #[macro_use] extern crate nom; -use nom::digit; -use nom::types::CompleteByteSlice; +use nom::{ + branch::alt, + sequence::{delimited, pair, preceded}, + character::complete::{digit1 as digit, space0 as space}, + bytes::complete::tag +}; // Parser definition @@ -26,27 +30,32 @@ impl Operator { } // Parse the specified `Operator`. -named_args!(operator(op: Operator) , - tag!(op.to_str()) +named_args!(operator(op: Operator) <&[u8], &[u8]>, + call!(tag(op.to_str())) ); // We parse any expr surrounded by the tags `open_tag` and `close_tag`, ignoring all whitespaces around those -named_args!(brackets<'a>(open_tag: &str, close_tag: &str) , i64>, - ws!(delimited!( tag!(open_tag), expr, tag!(close_tag) )) ); +named_args!(brackets<'a>(open_tag: &str, close_tag: &str) <&'a[u8], i64>, + call!(delimited( + space, + delimited(tag(open_tag), preceded(space, expr), preceded(space, tag(close_tag))), + space + )) +); -fn complete_byte_slice_to_str<'a>(s: CompleteByteSlice<'a>) -> Result<&'a str, str::Utf8Error> { - str::from_utf8(s.0) +fn byte_slice_to_str<'a>(s: &'a[u8]) -> Result<&'a str, str::Utf8Error> { + str::from_utf8(s) } // We transform an integer string into a i64, ignoring surrounding whitespaces // We look for a digit suite, and try to convert it. // If either str::from_utf8 or FromStr::from_str fail, // we fallback to the brackets parser defined above -named!(factor, alt!( +named!(factor<&[u8], i64>, alt!( map_res!( map_res!( - ws!(digit), - complete_byte_slice_to_str + call!(delimited(space, digit, space)), + byte_slice_to_str ), FromStr::from_str ) @@ -57,26 +66,26 @@ named!(factor, alt!( // We read an initial factor and for each time we find // a * or / operator followed by another factor, we do // the math by folding everything -named!(term , do_parse!( +named!(term <&[u8], i64>, do_parse!( init: factor >> res: fold_many0!( pair!(alt!(call!(operator, Star) | call!(operator, Slash)), factor), init, - |acc, (op, val): (CompleteByteSlice, i64)| { - if ((op.0)[0] as char) == '*' { acc * val } else { acc / val } + |acc, (op, val): (&[u8], i64)| { + if (op[0] as char) == '*' { acc * val } else { acc / val } } ) >> (res) ) ); -named!(expr , do_parse!( +named!(expr <&[u8], i64>, do_parse!( init: term >> res: fold_many0!( - pair!(alt!(tag!("+") | tag!("-")), term), + call!(pair(alt((tag("+"), tag("-"))), term)), init, - |acc, (op, val): (CompleteByteSlice, i64)| { - if ((op.0)[0] as char) == '+' { acc + val } else { acc - val } + |acc, (op, val): (&[u8], i64)| { + if (op[0] as char) == '+' { acc + val } else { acc - val } } ) >> (res) @@ -86,67 +95,67 @@ named!(expr , do_parse!( #[test] fn factor_test() { assert_eq!( - factor(CompleteByteSlice(b"3")), - Ok((CompleteByteSlice(b""), 3)) + factor(&b"3"[..]), + Ok((&b""[..], 3)) ); assert_eq!( - factor(CompleteByteSlice(b" 12")), - Ok((CompleteByteSlice(b""), 12)) + factor(&b" 12"[..]), + Ok((&b""[..], 12)) ); assert_eq!( - factor(CompleteByteSlice(b"537 ")), - Ok((CompleteByteSlice(b""), 537)) + factor(&b"537 "[..]), + Ok((&b""[..], 537)) ); assert_eq!( - factor(CompleteByteSlice(b" 24 ")), - Ok((CompleteByteSlice(b""), 24)) + factor(&b" 24 "[..]), + Ok((&b""[..], 24)) ); } #[test] fn term_test() { assert_eq!( - term(CompleteByteSlice(b" 12 *2 / 3")), - Ok((CompleteByteSlice(b""), 8)) + term(&b" 12 *2 / 3"[..]), + Ok((&b""[..], 8)) ); assert_eq!( - term(CompleteByteSlice(b" 2* 3 *2 *2 / 3")), - Ok((CompleteByteSlice(b""), 8)) + term(&b" 2* 3 *2 *2 / 3"[..]), + Ok((&b""[..], 8)) ); assert_eq!( - term(CompleteByteSlice(b" 48 / 3/2")), - Ok((CompleteByteSlice(b""), 8)) + term(&b" 48 / 3/2"[..]), + Ok((&b""[..], 8)) ); } #[test] fn expr_test() { assert_eq!( - expr(CompleteByteSlice(b" 1 + 2 ")), - Ok((CompleteByteSlice(b""), 3)) + expr(&b" 1 + 2 "[..]), + Ok((&b""[..], 3)) ); assert_eq!( - expr(CompleteByteSlice(b" 12 + 6 - 4+ 3")), - Ok((CompleteByteSlice(b""), 17)) + expr(&b" 12 + 6 - 4+ 3"[..]), + Ok((&b""[..], 17)) ); assert_eq!( - expr(CompleteByteSlice(b" 1 + 2*3 + 4")), - Ok((CompleteByteSlice(b""), 11)) + expr(&b" 1 + 2*3 + 4"[..]), + Ok((&b""[..], 11)) ); } #[test] fn parens_test() { assert_eq!( - expr(CompleteByteSlice(b" ( 2 )")), - Ok((CompleteByteSlice(b""), 2)) + expr(&b" ( 2 )"[..]), + Ok((&b""[..], 2)) ); assert_eq!( - expr(CompleteByteSlice(b" 2* ( 3 + 4 ) ")), - Ok((CompleteByteSlice(b""), 14)) + expr(&b" 2* ( 3 + 4 ) "[..]), + Ok((&b""[..], 14)) ); assert_eq!( - expr(CompleteByteSlice(b" 2*2 / ( 5 - 1) + 3")), - Ok((CompleteByteSlice(b""), 4)) + expr(&b" 2*2 / ( 5 - 1) + 3"[..]), + Ok((&b""[..], 4)) ); } diff --git a/third_party/rust/nom/tests/overflow.rs b/third_party/rust/nom/tests/overflow.rs index 067f34b2de..2f4a87f34a 100644 --- a/third_party/rust/nom/tests/overflow.rs +++ b/third_party/rust/nom/tests/overflow.rs @@ -1,8 +1,12 @@ #![cfg_attr(feature = "cargo-clippy", allow(unreadable_literal))] +#![cfg(target_pointer_width = "64")] + #[macro_use] extern crate nom; -use nom::{Err, Needed, be_u64}; +use nom::{Err, Needed}; +#[cfg(feature = "alloc")] +use nom::number::streaming::be_u64; // Parser definition @@ -43,9 +47,9 @@ fn overflow_incomplete_tuple() { #[test] #[cfg(feature = "alloc")] fn overflow_incomplete_length_bytes() { - named!(multi<&[u8], Vec<&[u8]> >, many0!( length_bytes!(be_u64) ) ); + named!(multi<&[u8], Vec<&[u8]> >, many0!( length_data!(be_u64) ) ); - // Trigger an overflow in length_bytes + // Trigger an overflow in length_data assert_eq!( multi(&b"\x00\x00\x00\x00\x00\x00\x00\x01\xaa\xff\xff\xff\xff\xff\xff\xff\xff\xaa"[..]), Err(Err::Incomplete(Needed::Size(18446744073709551615))) @@ -55,7 +59,7 @@ fn overflow_incomplete_length_bytes() { #[test] #[cfg(feature = "alloc")] fn overflow_incomplete_many0() { - named!(multi<&[u8], Vec<&[u8]> >, many0!( length_bytes!(be_u64) ) ); + named!(multi<&[u8], Vec<&[u8]> >, many0!( length_data!(be_u64) ) ); // Trigger an overflow in many0 assert_eq!( @@ -67,7 +71,7 @@ fn overflow_incomplete_many0() { #[test] #[cfg(feature = "alloc")] fn overflow_incomplete_many1() { - named!(multi<&[u8], Vec<&[u8]> >, many1!( length_bytes!(be_u64) ) ); + named!(multi<&[u8], Vec<&[u8]> >, many1!( length_data!(be_u64) ) ); // Trigger an overflow in many1 assert_eq!( @@ -79,7 +83,7 @@ fn overflow_incomplete_many1() { #[test] #[cfg(feature = "alloc")] fn overflow_incomplete_many_till() { - named!(multi<&[u8], (Vec<&[u8]>, &[u8]) >, many_till!( length_bytes!(be_u64), tag!("abc") ) ); + named!(multi<&[u8], (Vec<&[u8]>, &[u8]) >, many_till!( length_data!(be_u64), tag!("abc") ) ); // Trigger an overflow in many_till assert_eq!( @@ -91,7 +95,7 @@ fn overflow_incomplete_many_till() { #[test] #[cfg(feature = "alloc")] fn overflow_incomplete_many_m_n() { - named!(multi<&[u8], Vec<&[u8]> >, many_m_n!(2, 4, length_bytes!(be_u64) ) ); + named!(multi<&[u8], Vec<&[u8]> >, many_m_n!(2, 4, length_data!(be_u64) ) ); // Trigger an overflow in many_m_n assert_eq!( @@ -103,20 +107,7 @@ fn overflow_incomplete_many_m_n() { #[test] #[cfg(feature = "alloc")] fn overflow_incomplete_count() { - named!(counter<&[u8], Vec<&[u8]> >, count!( length_bytes!(be_u64), 2 ) ); - - assert_eq!( - counter(&b"\x00\x00\x00\x00\x00\x00\x00\x01\xaa\xff\xff\xff\xff\xff\xff\xff\xef\xaa"[..]), - Err(Err::Incomplete(Needed::Size(18446744073709551599))) - ); -} - -#[test] -fn overflow_incomplete_count_fixed() { - named!( - counter<[&[u8]; 2]>, - count_fixed!(&[u8], length_bytes!(be_u64), 2) - ); + named!(counter<&[u8], Vec<&[u8]> >, count!( length_data!(be_u64), 2 ) ); assert_eq!( counter(&b"\x00\x00\x00\x00\x00\x00\x00\x01\xaa\xff\xff\xff\xff\xff\xff\xff\xef\xaa"[..]), @@ -127,8 +118,8 @@ fn overflow_incomplete_count_fixed() { #[test] #[cfg(feature = "alloc")] fn overflow_incomplete_length_count() { - use nom::be_u8; - named!(multi<&[u8], Vec<&[u8]> >, length_count!( be_u8, length_bytes!(be_u64) ) ); + use nom::number::streaming::be_u8; + named!(multi<&[u8], Vec<&[u8]> >, length_count!( be_u8, length_data!(be_u64) ) ); assert_eq!( multi(&b"\x04\x00\x00\x00\x00\x00\x00\x00\x01\xaa\xff\xff\xff\xff\xff\xff\xff\xee\xaa"[..]), diff --git a/third_party/rust/nom/tests/reborrow_fold.rs b/third_party/rust/nom/tests/reborrow_fold.rs index d13ad9eeb9..b53555b074 100644 --- a/third_party/rust/nom/tests/reborrow_fold.rs +++ b/third_party/rust/nom/tests/reborrow_fold.rs @@ -7,10 +7,12 @@ extern crate nom; use std::str; named_args!(atom<'a>(tomb: &'a mut ()), - map!(map_res!(is_not_s!(" \t\r\n()"), str::from_utf8), ToString::to_string)); + map!(map_res!(is_not!(" \t\r\n()"), str::from_utf8), ToString::to_string)); +/*FIXME: should we support the use case of borrowing data mutably in a parser? named_args!(list<'a>(tomb: &'a mut ()), delimited!( char!('('), fold_many0!(call!(atom, tomb), "".to_string(), |acc: String, next: String| acc + next.as_str()), char!(')'))); +*/ diff --git a/third_party/rust/object/.cargo-checksum.json b/third_party/rust/object/.cargo-checksum.json index 53386b13ef..192cc437c1 100644 --- a/third_party/rust/object/.cargo-checksum.json +++ b/third_party/rust/object/.cargo-checksum.json @@ -1 +1 @@ -{"files":{"Cargo.lock":"440af8ab126af2257c67fdd2e1f0b144ed63f17a4a0a223211ec31ad1fda4d08","Cargo.toml":"3dd7528275c4d0eef0dd1b918dc6247c7e996383657355b43362f826f59b31fe","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"0b74dfa0bcee5c420c6b7f67b4b2658f9ab8388c97b8e733975f2cecbdd668a6","README.md":"5c498b93ff10c038784c5b62c346858c0d53d9d6d1591841c5b6724b0abc9415","examples/nm.rs":"9885cb85700512d63e537b4b60bd2c840aa076721eae2059ba6ae3651be1282e","examples/objcopy.rs":"a05e1b87318be30b6dd67c061c59d77b97efde2af9d5191c9c97978c0410a5eb","examples/objdump.rs":"07a23a2f74b7e46d4cdcf4dab23357a39410b4c4db43179c17e059011e40d45c","src/common.rs":"143f42a0e085e82a022b85680d42322ac912eefc4ab2cb2bee687368fa8615a5","src/lib.rs":"7e559b0af48faca419086a743e3f99794e10a91e8619f8c6e26f113d1935fe14","src/read/any.rs":"12be08836fb2f66026b34434b47cfe275f82cf31b05039ef0545fc324a3b9bce","src/read/coff.rs":"f3a16d71ec8c5692f5435bf51a3ecda49dc727d5d93f5cdef67e7853e31e6dfa","src/read/elf.rs":"68939fc291b2f2c0b6d3d112fd7edf5eaed8b5987d6fda35a1a843843511d325","src/read/macho.rs":"ee575a49c194fdaa9132e1230266269dc4cb497b9a8f1fed635173bba492ead2","src/read/mod.rs":"efdb99a566a971bca815e1d1dd85b9e9800fbe4e3572cf54a7b0ff54111469c2","src/read/pe.rs":"423527bb5fb5b234057d51925f6ac3ea05603618c1d8c6165de2f9c819978d02","src/read/traits.rs":"c73dd0ca832fc74a9addb414ab5ffe430e6c076a0bd934b31e6608e04c61dc5e","src/read/wasm.rs":"5f6e1e24d53429ac9d80f87e7784183a4608d08b3f465df629c86c68f1af56d4","src/write/coff.rs":"9c9ebc226cb585a61e3c96085099b69de0b2b75877093f644b3caacf03b6d03d","src/write/elf.rs":"d6e7bb6db9b29de1c105dfa75c7e561c85e42a05c75c70cf7baffe25d3009d06","src/write/macho.rs":"1ca4e4d75e45badc4bf5b5dfc8a663d1060d85e6c6a94236ffe9db3c09531c5e","src/write/mod.rs":"248ccbc34aa0cdd84e3c413913f05fe1478a4837ad41e3448414289bb73b2671","src/write/string.rs":"a0640f9e0626ca4618a2ac88e17e0d14b053f880d60635ea932ca78b50a114f5","src/write/util.rs":"9629903d556036aa7d6031cffce1fd38c033453a28c0a30eb34fc77aded4a11d","tests/round_trip.rs":"a28b57931275c31b704aed5350da92e43abf4c09c5fb94360c9ab5db6a7c4a78"},"package":"d89ec45bc6b810c6ee998e22953fbf387a40fcbf5014dcbb9e5ba9a09a81ee15"} \ No newline at end of file +{"files":{"Cargo.lock":"23339a38e8eddf9001484c2ff7664863025e0eb6c642aba87a5b06c4446c2b81","Cargo.toml":"8e68d4cad051cfed2b1ceb6d208bda6f895ef20ed3244b75aff382f4581151d3","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"0b74dfa0bcee5c420c6b7f67b4b2658f9ab8388c97b8e733975f2cecbdd668a6","README.md":"5c498b93ff10c038784c5b62c346858c0d53d9d6d1591841c5b6724b0abc9415","examples/nm.rs":"9885cb85700512d63e537b4b60bd2c840aa076721eae2059ba6ae3651be1282e","examples/objcopy.rs":"6b51b89bea2d9bd5d8651942a2a5c5637e4553b8beeda47221470f676f23ede0","examples/objdump.rs":"07a23a2f74b7e46d4cdcf4dab23357a39410b4c4db43179c17e059011e40d45c","src/common.rs":"835207072807c7b7a84c6d52b285cb257480428940def96c951a0ca219db615f","src/lib.rs":"7e559b0af48faca419086a743e3f99794e10a91e8619f8c6e26f113d1935fe14","src/read/any.rs":"f43c2f2285488eb8d7d4b85b393a1c9bb6afdefaafd08dba5b09065fbf079f99","src/read/coff.rs":"f1208d189dfa1b728a6406eea64af8cef19389b70ab1fbd91e94b08bf8cf3d5e","src/read/elf.rs":"93807a74f3122be99cecebe4810d686c27be10df710dbc9751ed2c8d4f4a2acd","src/read/macho.rs":"ca308b21073b9696dd1176b201a0a262eb1f76dd87bc46aee95e30585d06ebae","src/read/mod.rs":"efdb99a566a971bca815e1d1dd85b9e9800fbe4e3572cf54a7b0ff54111469c2","src/read/pe.rs":"cfe0ad92b3ada363519f4cdc8f4cd8fdb2cee25809867ace9e54113e3a4bd36b","src/read/traits.rs":"a091b9e71a464f596d1c8aa3d4b57d1cdd44793a22cfe0c1cae9f2d07e326026","src/read/wasm.rs":"fd693e2f7fe56c23469c40305a1662e0627f35ee2777d470c3e2c670addf159a","src/write/coff.rs":"906593d3b1a6b609c0293edea66c289d2da1a35f7cce79194bac788054d601db","src/write/elf.rs":"ee50be8c85e12d1c42392564b2b44a1b9a78444b3eaf27965ea08c28fef91e5f","src/write/macho.rs":"d26bc671da61997d3b09d1d7895a4e518e45d0dcb85e948341b1c191eb3cb6bd","src/write/mod.rs":"91dfdea43bbfea5617047c9a3af42910de8298e06e472216c3d182ec4fec9b02","src/write/string.rs":"a0640f9e0626ca4618a2ac88e17e0d14b053f880d60635ea932ca78b50a114f5","src/write/util.rs":"9629903d556036aa7d6031cffce1fd38c033453a28c0a30eb34fc77aded4a11d","tests/round_trip.rs":"b101e86d9be1191a64495511fb0709d85d813ca140d3fd0884e72129b8100ea2","tests/tls.rs":"916d701b88a11237829de73a56f22b0b9b75ed4688f92348c32629928f1eaca3"},"package":"a3c61759aa254402e53c79a68dc519cda1ceee2ff2b6d70b3e58bf64ac2f03e3"} \ No newline at end of file diff --git a/third_party/rust/object/Cargo.lock b/third_party/rust/object/Cargo.lock index fb4a987f9a..7a8546e5a9 100644 --- a/third_party/rust/object/Cargo.lock +++ b/third_party/rust/object/Cargo.lock @@ -36,25 +36,6 @@ dependencies = [ "cfg-if 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)", ] -[[package]] -name = "failure" -version = "0.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "failure_derive 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "failure_derive" -version = "0.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "proc-macro2 0.4.28 (registry+https://github.com/rust-lang/crates.io-index)", - "quote 0.6.12 (registry+https://github.com/rust-lang/crates.io-index)", - "syn 0.15.33 (registry+https://github.com/rust-lang/crates.io-index)", - "synstructure 0.10.2 (registry+https://github.com/rust-lang/crates.io-index)", -] - [[package]] name = "flate2" version = "1.0.7" @@ -68,12 +49,12 @@ dependencies = [ [[package]] name = "goblin" -version = "0.0.24" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)", "plain 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)", - "scroll 0.9.2 (registry+https://github.com/rust-lang/crates.io-index)", + "scroll 0.10.1 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -81,11 +62,6 @@ name = "indexmap" version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -[[package]] -name = "itoa" -version = "0.4.4" -source = "registry+https://github.com/rust-lang/crates.io-index" - [[package]] name = "libc" version = "0.2.51" @@ -138,22 +114,22 @@ dependencies = [ [[package]] name = "object" -version = "0.13.0" +version = "0.16.0" dependencies = [ "crc32fast 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)", "flate2 1.0.7 (registry+https://github.com/rust-lang/crates.io-index)", - "goblin 0.0.24 (registry+https://github.com/rust-lang/crates.io-index)", + "goblin 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", "indexmap 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", "memmap 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", - "parity-wasm 0.40.0 (registry+https://github.com/rust-lang/crates.io-index)", - "scroll 0.9.2 (registry+https://github.com/rust-lang/crates.io-index)", - "target-lexicon 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", - "uuid 0.7.4 (registry+https://github.com/rust-lang/crates.io-index)", + "parity-wasm 0.41.0 (registry+https://github.com/rust-lang/crates.io-index)", + "scroll 0.10.1 (registry+https://github.com/rust-lang/crates.io-index)", + "target-lexicon 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", + "uuid 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "parity-wasm" -version = "0.40.0" +version = "0.41.0" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] @@ -163,119 +139,61 @@ source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "proc-macro2" -version = "0.4.28" +version = "1.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "unicode-xid 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "unicode-xid 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "quote" -version = "0.6.12" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "proc-macro2 0.4.28 (registry+https://github.com/rust-lang/crates.io-index)", + "proc-macro2 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)", ] -[[package]] -name = "rustc_version" -version = "0.2.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "semver 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "ryu" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" - [[package]] name = "scroll" -version = "0.9.2" +version = "0.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "rustc_version 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)", - "scroll_derive 0.9.5 (registry+https://github.com/rust-lang/crates.io-index)", + "scroll_derive 0.10.1 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "scroll_derive" -version = "0.9.5" +version = "0.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "proc-macro2 0.4.28 (registry+https://github.com/rust-lang/crates.io-index)", - "quote 0.6.12 (registry+https://github.com/rust-lang/crates.io-index)", - "syn 0.15.33 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "semver" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "semver-parser 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "semver-parser" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "serde" -version = "1.0.99" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "serde_json" -version = "1.0.40" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "itoa 0.4.4 (registry+https://github.com/rust-lang/crates.io-index)", - "ryu 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", - "serde 1.0.99 (registry+https://github.com/rust-lang/crates.io-index)", + "proc-macro2 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)", + "quote 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", + "syn 1.0.8 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "syn" -version = "0.15.33" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "proc-macro2 0.4.28 (registry+https://github.com/rust-lang/crates.io-index)", - "quote 0.6.12 (registry+https://github.com/rust-lang/crates.io-index)", - "unicode-xid 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "synstructure" -version = "0.10.2" +version = "1.0.8" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "proc-macro2 0.4.28 (registry+https://github.com/rust-lang/crates.io-index)", - "quote 0.6.12 (registry+https://github.com/rust-lang/crates.io-index)", - "syn 0.15.33 (registry+https://github.com/rust-lang/crates.io-index)", - "unicode-xid 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "proc-macro2 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)", + "quote 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", + "unicode-xid 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "target-lexicon" -version = "0.4.0" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "failure 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)", - "failure_derive 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)", - "serde_json 1.0.40 (registry+https://github.com/rust-lang/crates.io-index)", -] [[package]] name = "unicode-xid" -version = "0.1.0" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "uuid" -version = "0.7.4" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] @@ -304,35 +222,25 @@ source = "registry+https://github.com/rust-lang/crates.io-index" "checksum cfg-if 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)" = "11d43355396e872eefb45ce6342e4374ed7bc2b3a502d1b28e36d6e23c05d1f4" "checksum crc 1.8.1 (registry+https://github.com/rust-lang/crates.io-index)" = "d663548de7f5cca343f1e0a48d14dcfb0e9eb4e079ec58883b7251539fa10aeb" "checksum crc32fast 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ba125de2af0df55319f41944744ad91c71113bf74a4646efff39afe1f6842db1" -"checksum failure 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)" = "795bd83d3abeb9220f257e597aa0080a508b27533824adf336529648f6abf7e2" -"checksum failure_derive 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)" = "ea1063915fd7ef4309e222a5a07cf9c319fb9c7836b1f89b85458672dbb127e1" "checksum flate2 1.0.7 (registry+https://github.com/rust-lang/crates.io-index)" = "f87e68aa82b2de08a6e037f1385455759df6e445a8df5e005b4297191dbf18aa" -"checksum goblin 0.0.24 (registry+https://github.com/rust-lang/crates.io-index)" = "e3fa261d919c1ae9d1e4533c4a2f99e10938603c4208d56c05bec7a872b661b0" +"checksum goblin 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "5e6040506480da04a63de51a478e8021892d65d8411f29b2a422c2648bdd8bcb" "checksum indexmap 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "a4d6d89e0948bf10c08b9ecc8ac5b83f07f857ebe2c0cbe38de15b4e4f510356" -"checksum itoa 0.4.4 (registry+https://github.com/rust-lang/crates.io-index)" = "501266b7edd0174f8530248f87f99c88fbe60ca4ef3dd486835b8d8d53136f7f" "checksum libc 0.2.51 (registry+https://github.com/rust-lang/crates.io-index)" = "bedcc7a809076656486ffe045abeeac163da1b558e963a31e29fbfbeba916917" "checksum log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)" = "c84ec4b527950aa83a329754b01dbe3f58361d1c5efacd1f6d68c494d08a17c6" "checksum memmap 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "6585fd95e7bb50d6cc31e20d4cf9afb4e2ba16c5846fc76793f11218da9c475b" "checksum miniz-sys 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)" = "0300eafb20369952951699b68243ab4334f4b10a88f411c221d444b36c40e649" "checksum miniz_oxide 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "c468f2369f07d651a5d0bb2c9079f8488a66d5466efe42d0c5c6466edcb7f71e" "checksum miniz_oxide_c_api 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "b7fe927a42e3807ef71defb191dc87d4e24479b221e67015fe38ae2b7b447bab" -"checksum parity-wasm 0.40.0 (registry+https://github.com/rust-lang/crates.io-index)" = "7ee68a5b3b7c7f818f982ff75579410b4b8c611d58d623c5bb5c70e9cbb6e16a" +"checksum parity-wasm 0.41.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ddfc878dac00da22f8f61e7af3157988424567ab01d9920b962ef7dcbd7cd865" "checksum plain 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)" = "b4596b6d070b27117e987119b4dac604f3c58cfb0b191112e24771b2faeac1a6" -"checksum proc-macro2 0.4.28 (registry+https://github.com/rust-lang/crates.io-index)" = "ba92c84f814b3f9a44c5cfca7d2ad77fa10710867d2bbb1b3d175ab5f47daa12" -"checksum quote 0.6.12 (registry+https://github.com/rust-lang/crates.io-index)" = "faf4799c5d274f3868a4aae320a0a182cbd2baee377b378f080e16a23e9d80db" -"checksum rustc_version 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)" = "138e3e0acb6c9fb258b19b67cb8abd63c00679d2851805ea151465464fe9030a" -"checksum ryu 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "c92464b447c0ee8c4fb3824ecc8383b81717b9f1e74ba2e72540aef7b9f82997" -"checksum scroll 0.9.2 (registry+https://github.com/rust-lang/crates.io-index)" = "2f84d114ef17fd144153d608fba7c446b0145d038985e7a8cc5d08bb0ce20383" -"checksum scroll_derive 0.9.5 (registry+https://github.com/rust-lang/crates.io-index)" = "8f1aa96c45e7f5a91cb7fabe7b279f02fea7126239fc40b732316e8b6a2d0fcb" -"checksum semver 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)" = "1d7eb9ef2c18661902cc47e535f9bc51b78acd254da71d375c2f6720d9a40403" -"checksum semver-parser 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3" -"checksum serde 1.0.99 (registry+https://github.com/rust-lang/crates.io-index)" = "fec2851eb56d010dc9a21b89ca53ee75e6528bab60c11e89d38390904982da9f" -"checksum serde_json 1.0.40 (registry+https://github.com/rust-lang/crates.io-index)" = "051c49229f282f7c6f3813f8286cc1e3323e8051823fce42c7ea80fe13521704" -"checksum syn 0.15.33 (registry+https://github.com/rust-lang/crates.io-index)" = "ec52cd796e5f01d0067225a5392e70084acc4c0013fa71d55166d38a8b307836" -"checksum synstructure 0.10.2 (registry+https://github.com/rust-lang/crates.io-index)" = "02353edf96d6e4dc81aea2d8490a7e9db177bf8acb0e951c24940bf866cb313f" -"checksum target-lexicon 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "1b0ab4982b8945c35cc1c46a83a9094c414f6828a099ce5dcaa8ee2b04642dcb" -"checksum unicode-xid 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "fc72304796d0818e357ead4e000d19c9c174ab23dc11093ac919054d20a6a7fc" -"checksum uuid 0.7.4 (registry+https://github.com/rust-lang/crates.io-index)" = "90dbc611eb48397705a6b0f6e917da23ae517e4d127123d2cf7674206627d32a" +"checksum proc-macro2 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)" = "9c9e470a8dc4aeae2dee2f335e8f533e2d4b347e1434e5671afc49b054592f27" +"checksum quote 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)" = "053a8c8bcc71fcce321828dc897a98ab9760bef03a4fc36693c231e5b3216cfe" +"checksum scroll 0.10.1 (registry+https://github.com/rust-lang/crates.io-index)" = "abb2332cb595d33f7edd5700f4cbf94892e680c7f0ae56adab58a35190b66cb1" +"checksum scroll_derive 0.10.1 (registry+https://github.com/rust-lang/crates.io-index)" = "f8584eea9b9ff42825b46faf46a8c24d2cff13ec152fa2a50df788b87c07ee28" +"checksum syn 1.0.8 (registry+https://github.com/rust-lang/crates.io-index)" = "661641ea2aa15845cddeb97dad000d22070bb5c1fb456b96c1cba883ec691e92" +"checksum target-lexicon 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)" = "6f4c118a7a38378f305a9e111fcb2f7f838c0be324bfb31a77ea04f7f6e684b4" +"checksum unicode-xid 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "826e7639553986605ec5979c7dd957c7895e93eabed50ab2ffa7f6128a75097c" +"checksum uuid 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)" = "9fde2f6a4bea1d6e007c4ad38c6839fa71cbb63b6dbf5b595aa38dc9b1093c11" "checksum winapi 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)" = "f10e386af2b13e47c89e7236a7a14a086791a2b88ebad6df9bf42040195cf770" "checksum winapi-i686-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" "checksum winapi-x86_64-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" diff --git a/third_party/rust/object/Cargo.toml b/third_party/rust/object/Cargo.toml index de7baee0ca..c8688f079b 100644 --- a/third_party/rust/object/Cargo.toml +++ b/third_party/rust/object/Cargo.toml @@ -13,19 +13,21 @@ [package] edition = "2018" name = "object" -version = "0.13.0" +version = "0.16.0" authors = ["Nick Fitzgerald ", "Philip Craig "] exclude = ["/.coveralls.yml", "/.travis.yml"] description = "A unified interface for parsing object file formats." keywords = ["object", "loader", "elf", "mach-o", "pe"] license = "Apache-2.0/MIT" repository = "https://github.com/gimli-rs/object" +[package.metadata.docs.rs] +all-features = true [[example]] name = "objcopy" required-features = ["read", "write"] [dependencies.crc32fast] -version = "1" +version = "1.2" optional = true [dependencies.flate2] @@ -33,27 +35,27 @@ version = "1" optional = true [dependencies.goblin] -version = "0.0.24" +version = "0.1" features = ["endian_fd", "elf32", "elf64", "mach32", "mach64", "pe32", "pe64", "archive"] default-features = false [dependencies.indexmap] -version = "1" +version = "1.1" optional = true [dependencies.parity-wasm] -version = "0.40.0" +version = "0.41.0" optional = true [dependencies.scroll] -version = "0.9" +version = "0.10" default-features = false [dependencies.target-lexicon] -version = "0.4" +version = "0.9" [dependencies.uuid] -version = "0.7" +version = "0.8" default-features = false [dev-dependencies.memmap] version = "0.7" diff --git a/third_party/rust/object/examples/objcopy.rs b/third_party/rust/object/examples/objcopy.rs index 66ab3be600..c3dc44f2e5 100644 --- a/third_party/rust/object/examples/objcopy.rs +++ b/third_party/rust/object/examples/objcopy.rs @@ -37,6 +37,7 @@ fn main() { }; let mut out_object = write::Object::new(in_object.format(), in_object.architecture()); + out_object.mangling = write::Mangling::None; let mut out_sections = HashMap::new(); for in_section in in_object.sections() { diff --git a/third_party/rust/object/src/common.rs b/third_party/rust/object/src/common.rs index baee0c4144..6cc3f61488 100644 --- a/third_party/rust/object/src/common.rs +++ b/third_party/rust/object/src/common.rs @@ -146,8 +146,17 @@ pub enum RelocationKind { SectionOffset, /// The index of the section containing the symbol. SectionIndex, - /// Some other operation and encoding. The value is dependent on file format and machine. - Other(u32), + /// Some other ELF relocation. The value is dependent on the architecture. + Elf(u32), + /// Some other Mach-O relocation. The value is dependent on the architecture. + MachO { + /// The relocation type. + value: u8, + /// Whether the relocation is relative to the place. + relative: bool, + }, + /// Some other COFF relocation. The value is dependent on the architecture. + Coff(u16), } /// Information about how the result of the relocation operation is encoded in the place. diff --git a/third_party/rust/object/src/read/any.rs b/third_party/rust/object/src/read/any.rs index 98b69b0920..74f03f735f 100644 --- a/third_party/rust/object/src/read/any.rs +++ b/third_party/rust/object/src/read/any.rs @@ -319,6 +319,10 @@ impl<'data, 'file> ObjectSegment<'data> for Segment<'data, 'file> { with_inner!(self.inner, SegmentInternal, |x| x.align()) } + fn file_range(&self) -> (u64, u64) { + with_inner!(self.inner, SegmentInternal, |x| x.file_range()) + } + fn data(&self) -> &'data [u8] { with_inner!(self.inner, SegmentInternal, |x| x.data()) } @@ -418,6 +422,10 @@ impl<'data, 'file> ObjectSection<'data> for Section<'data, 'file> { with_inner!(self.inner, SectionInternal, |x| x.align()) } + fn file_range(&self) -> Option<(u64, u64)> { + with_inner!(self.inner, SectionInternal, |x| x.file_range()) + } + fn data(&self) -> Cow<'data, [u8]> { with_inner!(self.inner, SectionInternal, |x| x.data()) } diff --git a/third_party/rust/object/src/read/coff.rs b/third_party/rust/object/src/read/coff.rs index 1015e34537..72f985e458 100644 --- a/third_party/rust/object/src/read/coff.rs +++ b/third_party/rust/object/src/read/coff.rs @@ -234,6 +234,14 @@ impl<'data, 'file> ObjectSegment<'data> for CoffSegment<'data, 'file> { section_alignment(self.section.characteristics) } + #[inline] + fn file_range(&self) -> (u64, u64) { + ( + self.section.pointer_to_raw_data as u64, + self.section.size_of_raw_data as u64, + ) + } + fn data(&self) -> &'data [u8] { let offset = self.section.pointer_to_raw_data as usize; let size = self.section.size_of_raw_data as usize; @@ -293,6 +301,14 @@ impl<'data, 'file> ObjectSection<'data> for CoffSection<'data, 'file> { section_alignment(self.section.characteristics) } + #[inline] + fn file_range(&self) -> Option<(u64, u64)> { + Some(( + self.section.pointer_to_raw_data as u64, + self.section.size_of_raw_data as u64, + )) + } + fn data(&self) -> Cow<'data, [u8]> { Cow::from(self.raw_data()) } @@ -460,7 +476,7 @@ impl<'data, 'file> Iterator for CoffRelocationIterator<'data, 'file> { pe::relocation::IMAGE_REL_I386_SECREL => (RelocationKind::SectionOffset, 32, 0), pe::relocation::IMAGE_REL_I386_SECREL7 => (RelocationKind::SectionOffset, 7, 0), pe::relocation::IMAGE_REL_I386_REL32 => (RelocationKind::Relative, 32, -4), - _ => (RelocationKind::Other(u32::from(relocation.typ)), 0, 0), + _ => (RelocationKind::Coff(relocation.typ), 0, 0), }, pe::header::COFF_MACHINE_X86_64 => match relocation.typ { pe::relocation::IMAGE_REL_AMD64_ADDR64 => (RelocationKind::Absolute, 64, 0), @@ -483,9 +499,9 @@ impl<'data, 'file> Iterator for CoffRelocationIterator<'data, 'file> { pe::relocation::IMAGE_REL_AMD64_SECREL7 => { (RelocationKind::SectionOffset, 7, 0) } - _ => (RelocationKind::Other(u32::from(relocation.typ)), 0, 0), + _ => (RelocationKind::Coff(relocation.typ), 0, 0), }, - _ => (RelocationKind::Other(u32::from(relocation.typ)), 0, 0), + _ => (RelocationKind::Coff(relocation.typ), 0, 0), }; let target = RelocationTarget::Symbol(SymbolIndex(relocation.symbol_table_index as usize)); diff --git a/third_party/rust/object/src/read/elf.rs b/third_party/rust/object/src/read/elf.rs index 8a7e98f819..6af690756a 100644 --- a/third_party/rust/object/src/read/elf.rs +++ b/third_party/rust/object/src/read/elf.rs @@ -10,7 +10,7 @@ use goblin::{elf, strtab}; use scroll::ctx::TryFromCtx; use scroll::{self, Pread}; use std::{iter, slice}; -use target_lexicon::Architecture; +use target_lexicon::{Aarch64Architecture, Architecture, ArmArchitecture}; use crate::read::{ self, Object, ObjectSection, ObjectSegment, Relocation, RelocationEncoding, RelocationKind, @@ -89,8 +89,8 @@ where fn architecture(&self) -> Architecture { match self.elf.header.e_machine { - elf::header::EM_ARM => Architecture::Arm, - elf::header::EM_AARCH64 => Architecture::Aarch64, + elf::header::EM_ARM => Architecture::Arm(ArmArchitecture::Arm), + elf::header::EM_AARCH64 => Architecture::Aarch64(Aarch64Architecture::Aarch64), elf::header::EM_386 => Architecture::I386, elf::header::EM_X86_64 => Architecture::X86_64, elf::header::EM_MIPS => Architecture::Mips, @@ -278,6 +278,11 @@ impl<'data, 'file> ObjectSegment<'data> for ElfSegment<'data, 'file> { self.segment.p_align } + #[inline] + fn file_range(&self) -> (u64, u64) { + (self.segment.p_offset, self.segment.p_filesz) + } + fn data(&self) -> &'data [u8] { &self.file.data[self.segment.p_offset as usize..][..self.segment.p_filesz as usize] } @@ -326,11 +331,19 @@ where } impl<'data, 'file> ElfSection<'data, 'file> { - fn raw_data(&self) -> &'data [u8] { + fn raw_offset(&self) -> Option<(u64, u64)> { if self.section.sh_type == elf::section_header::SHT_NOBITS { - &[] + None } else { - &self.file.data[self.section.sh_offset as usize..][..self.section.sh_size as usize] + Some((self.section.sh_offset, self.section.sh_size)) + } + } + + fn raw_data(&self) -> &'data [u8] { + if let Some((offset, size)) = self.raw_offset() { + &self.file.data[offset as usize..][..size as usize] + } else { + &[] } } @@ -423,6 +436,11 @@ impl<'data, 'file> ObjectSection<'data> for ElfSection<'data, 'file> { self.section.sh_addralign } + #[inline] + fn file_range(&self) -> Option<(u64, u64)> { + self.raw_offset() + } + #[inline] fn data(&self) -> Cow<'data, [u8]> { Cow::from(self.raw_data()) @@ -613,7 +631,7 @@ impl<'data, 'file> Iterator for ElfRelocationIterator<'data, 'file> { let (kind, size) = match self.file.elf.header.e_machine { elf::header::EM_ARM => match reloc.r_type { elf::reloc::R_ARM_ABS32 => (RelocationKind::Absolute, 32), - _ => (RelocationKind::Other(reloc.r_type), 0), + _ => (RelocationKind::Elf(reloc.r_type), 0), }, elf::header::EM_AARCH64 => match reloc.r_type { elf::reloc::R_AARCH64_ABS64 => (RelocationKind::Absolute, 64), @@ -622,7 +640,7 @@ impl<'data, 'file> Iterator for ElfRelocationIterator<'data, 'file> { elf::reloc::R_AARCH64_PREL64 => (RelocationKind::Relative, 64), elf::reloc::R_AARCH64_PREL32 => (RelocationKind::Relative, 32), elf::reloc::R_AARCH64_PREL16 => (RelocationKind::Relative, 16), - _ => (RelocationKind::Other(reloc.r_type), 0), + _ => (RelocationKind::Elf(reloc.r_type), 0), }, elf::header::EM_386 => match reloc.r_type { elf::reloc::R_386_32 => (RelocationKind::Absolute, 32), @@ -635,7 +653,7 @@ impl<'data, 'file> Iterator for ElfRelocationIterator<'data, 'file> { elf::reloc::R_386_PC16 => (RelocationKind::Relative, 16), elf::reloc::R_386_8 => (RelocationKind::Absolute, 8), elf::reloc::R_386_PC8 => (RelocationKind::Relative, 8), - _ => (RelocationKind::Other(reloc.r_type), 0), + _ => (RelocationKind::Elf(reloc.r_type), 0), }, elf::header::EM_X86_64 => match reloc.r_type { elf::reloc::R_X86_64_64 => (RelocationKind::Absolute, 64), @@ -652,9 +670,9 @@ impl<'data, 'file> Iterator for ElfRelocationIterator<'data, 'file> { elf::reloc::R_X86_64_PC16 => (RelocationKind::Relative, 16), elf::reloc::R_X86_64_8 => (RelocationKind::Absolute, 8), elf::reloc::R_X86_64_PC8 => (RelocationKind::Relative, 8), - _ => (RelocationKind::Other(reloc.r_type), 0), + _ => (RelocationKind::Elf(reloc.r_type), 0), }, - _ => (RelocationKind::Other(reloc.r_type), 0), + _ => (RelocationKind::Elf(reloc.r_type), 0), }; let target = RelocationTarget::Symbol(SymbolIndex(reloc.r_sym as usize)); return Some(( diff --git a/third_party/rust/object/src/read/macho.rs b/third_party/rust/object/src/read/macho.rs index 8fe8a508a7..ce54938a10 100644 --- a/third_party/rust/object/src/read/macho.rs +++ b/third_party/rust/object/src/read/macho.rs @@ -4,7 +4,7 @@ use goblin::container; use goblin::mach; use goblin::mach::load_command::CommandVariant; use std::{fmt, iter, ops, slice}; -use target_lexicon::Architecture; +use target_lexicon::{Aarch64Architecture, Architecture, ArmArchitecture}; use uuid::Uuid; use crate::read::{ @@ -77,8 +77,8 @@ where fn architecture(&self) -> Architecture { match self.macho.header.cputype { - mach::cputype::CPU_TYPE_ARM => Architecture::Arm, - mach::cputype::CPU_TYPE_ARM64 => Architecture::Aarch64, + mach::cputype::CPU_TYPE_ARM => Architecture::Arm(ArmArchitecture::Arm), + mach::cputype::CPU_TYPE_ARM64 => Architecture::Aarch64(Aarch64Architecture::Aarch64), mach::cputype::CPU_TYPE_X86 => Architecture::I386, mach::cputype::CPU_TYPE_X86_64 => Architecture::X86_64, mach::cputype::CPU_TYPE_MIPS => Architecture::Mips, @@ -272,6 +272,11 @@ impl<'data, 'file> ObjectSegment<'data> for MachOSegment<'data, 'file> { 0x1000 } + #[inline] + fn file_range(&self) -> (u64, u64) { + (self.segment.fileoff, self.segment.filesize) + } + #[inline] fn data(&self) -> &'data [u8] { self.segment.data @@ -355,6 +360,12 @@ impl<'data, 'file> ObjectSection<'data> for MachOSection<'data, 'file> { 1 << self.internal().section.align } + #[inline] + fn file_range(&self) -> Option<(u64, u64)> { + let internal = &self.internal().section; + Some((internal.offset as u64, internal.size)) + } + #[inline] fn data(&self) -> Cow<'data, [u8]> { Cow::from(self.internal().data) @@ -529,36 +540,51 @@ impl<'data, 'file> Iterator for MachORelocationIterator<'data, 'file> { self.relocations.next()?.ok().map(|reloc| { let mut encoding = RelocationEncoding::Generic; let kind = match self.file.macho.header.cputype { - mach::cputype::CPU_TYPE_ARM => match reloc.r_type() { - mach::relocation::ARM_RELOC_VANILLA => RelocationKind::Absolute, - _ => RelocationKind::Other(reloc.r_info), + mach::cputype::CPU_TYPE_ARM => match (reloc.r_type(), reloc.r_pcrel()) { + (mach::relocation::ARM_RELOC_VANILLA, 0) => RelocationKind::Absolute, + _ => RelocationKind::MachO { + value: reloc.r_type(), + relative: reloc.is_pic(), + }, }, - mach::cputype::CPU_TYPE_ARM64 => match reloc.r_type() { - mach::relocation::ARM64_RELOC_UNSIGNED => RelocationKind::Absolute, - _ => RelocationKind::Other(reloc.r_info), + mach::cputype::CPU_TYPE_ARM64 => match (reloc.r_type(), reloc.r_pcrel()) { + (mach::relocation::ARM64_RELOC_UNSIGNED, 0) => RelocationKind::Absolute, + _ => RelocationKind::MachO { + value: reloc.r_type(), + relative: reloc.is_pic(), + }, }, - mach::cputype::CPU_TYPE_X86 => match reloc.r_type() { - mach::relocation::GENERIC_RELOC_VANILLA => RelocationKind::Absolute, - _ => RelocationKind::Other(reloc.r_info), + mach::cputype::CPU_TYPE_X86 => match (reloc.r_type(), reloc.r_pcrel()) { + (mach::relocation::GENERIC_RELOC_VANILLA, 0) => RelocationKind::Absolute, + _ => RelocationKind::MachO { + value: reloc.r_type(), + relative: reloc.is_pic(), + }, }, - mach::cputype::CPU_TYPE_X86_64 => match reloc.r_type() { - mach::relocation::X86_64_RELOC_UNSIGNED => RelocationKind::Absolute, - mach::relocation::X86_64_RELOC_SIGNED => { + mach::cputype::CPU_TYPE_X86_64 => match (reloc.r_type(), reloc.r_pcrel()) { + (mach::relocation::X86_64_RELOC_UNSIGNED, 0) => RelocationKind::Absolute, + (mach::relocation::X86_64_RELOC_SIGNED, 1) => { encoding = RelocationEncoding::X86RipRelative; RelocationKind::Relative } - mach::relocation::X86_64_RELOC_BRANCH => { + (mach::relocation::X86_64_RELOC_BRANCH, 1) => { encoding = RelocationEncoding::X86Branch; RelocationKind::Relative } - mach::relocation::X86_64_RELOC_GOT => RelocationKind::GotRelative, - mach::relocation::X86_64_RELOC_GOT_LOAD => { + (mach::relocation::X86_64_RELOC_GOT, 1) => RelocationKind::GotRelative, + (mach::relocation::X86_64_RELOC_GOT_LOAD, 1) => { encoding = RelocationEncoding::X86RipRelativeMovq; RelocationKind::GotRelative } - _ => RelocationKind::Other(reloc.r_info), + _ => RelocationKind::MachO { + value: reloc.r_type(), + relative: reloc.is_pic(), + }, + }, + _ => RelocationKind::MachO { + value: reloc.r_type(), + relative: reloc.is_pic(), }, - _ => RelocationKind::Other(reloc.r_info), }; let size = 8 << reloc.r_length(); let target = if reloc.is_extern() { diff --git a/third_party/rust/object/src/read/pe.rs b/third_party/rust/object/src/read/pe.rs index 5b752adbb0..49d3fe83ce 100644 --- a/third_party/rust/object/src/read/pe.rs +++ b/third_party/rust/object/src/read/pe.rs @@ -192,6 +192,14 @@ impl<'data, 'file> ObjectSegment<'data> for PeSegment<'data, 'file> { self.file.section_alignment() } + #[inline] + fn file_range(&self) -> (u64, u64) { + ( + self.section.pointer_to_raw_data as u64, + self.section.size_of_raw_data as u64, + ) + } + fn data(&self) -> &'data [u8] { let offset = self.section.pointer_to_raw_data as usize; let size = cmp::min(self.section.virtual_size, self.section.size_of_raw_data) as usize; @@ -272,6 +280,14 @@ impl<'data, 'file> ObjectSection<'data> for PeSection<'data, 'file> { self.file.section_alignment() } + #[inline] + fn file_range(&self) -> Option<(u64, u64)> { + Some(( + self.section.pointer_to_raw_data as u64, + self.section.size_of_raw_data as u64, + )) + } + fn data(&self) -> Cow<'data, [u8]> { Cow::from(self.raw_data()) } diff --git a/third_party/rust/object/src/read/traits.rs b/third_party/rust/object/src/read/traits.rs index 1a950d0451..70b11e798a 100644 --- a/third_party/rust/object/src/read/traits.rs +++ b/third_party/rust/object/src/read/traits.rs @@ -151,6 +151,9 @@ pub trait ObjectSegment<'data> { /// Returns the alignment of the segment in memory. fn align(&self) -> u64; + /// Returns the offset and size of the segment in the file. + fn file_range(&self) -> (u64, u64); + /// Returns a reference to the file contents of the segment. /// The length of this data may be different from the size of the /// segment in memory. @@ -183,6 +186,9 @@ pub trait ObjectSection<'data> { /// Returns the alignment of the section in memory. fn align(&self) -> u64; + /// Returns offset and size of on-disk segment (if any) + fn file_range(&self) -> Option<(u64, u64)>; + /// Returns the raw contents of the section. /// The length of this data may be different from the size of the /// section in memory. diff --git a/third_party/rust/object/src/read/wasm.rs b/third_party/rust/object/src/read/wasm.rs index 99fe2f5a03..e13343e5b1 100644 --- a/third_party/rust/object/src/read/wasm.rs +++ b/third_party/rust/object/src/read/wasm.rs @@ -144,6 +144,11 @@ impl<'file> ObjectSegment<'static> for WasmSegment<'file> { unreachable!() } + #[inline] + fn file_range(&self) -> (u64, u64) { + unreachable!() + } + fn data(&self) -> &'static [u8] { unreachable!() } @@ -205,6 +210,11 @@ impl<'file> ObjectSection<'static> for WasmSection<'file> { 1 } + #[inline] + fn file_range(&self) -> Option<(u64, u64)> { + None + } + fn data(&self) -> Cow<'static, [u8]> { match *self.section { elements::Section::Custom(ref section) => Some(section.payload().to_vec().into()), diff --git a/third_party/rust/object/src/write/coff.rs b/third_party/rust/object/src/write/coff.rs index 0db43be237..5077bc7f0a 100644 --- a/third_party/rust/object/src/write/coff.rs +++ b/third_party/rust/object/src/write/coff.rs @@ -45,6 +45,15 @@ impl Object { StandardSection::UninitializedData => { (&[], &b".bss"[..], SectionKind::UninitializedData) } + StandardSection::Tls => (&[], &b".tls$"[..], SectionKind::Tls), + StandardSection::UninitializedTls => { + // Unsupported section. + (&[], &[], SectionKind::UninitializedTls) + } + StandardSection::TlsVariables => { + // Unsupported section. + (&[], &[], SectionKind::TlsVariables) + } } } @@ -116,7 +125,7 @@ impl Object { let mut name = b".refptr.".to_vec(); name.extend(&self.symbol(symbol_id).name); - let stub_id = self.add_symbol(Symbol { + let stub_id = self.add_raw_symbol(Symbol { name, value: 0, size: u64::from(stub_size), @@ -337,7 +346,7 @@ impl Object { (RelocationKind::SectionOffset, 32, 0) => coff::IMAGE_REL_I386_SECREL, (RelocationKind::SectionOffset, 7, 0) => coff::IMAGE_REL_I386_SECREL7, (RelocationKind::Relative, 32, -4) => coff::IMAGE_REL_I386_REL32, - (RelocationKind::Other(x), _, _) => x as u16, + (RelocationKind::Coff(x), _, _) => x, _ => return Err(format!("unimplemented relocation {:?}", reloc)), }, Architecture::X86_64 => match (reloc.kind, reloc.size, reloc.addend) { @@ -352,7 +361,7 @@ impl Object { (RelocationKind::Relative, 32, -9) => coff::IMAGE_REL_AMD64_REL32_5, (RelocationKind::SectionOffset, 32, 0) => coff::IMAGE_REL_AMD64_SECREL, (RelocationKind::SectionOffset, 7, 0) => coff::IMAGE_REL_AMD64_SECREL7, - (RelocationKind::Other(x), _, _) => x as u16, + (RelocationKind::Coff(x), _, _) => x, _ => return Err(format!("unimplemented relocation {:?}", reloc)), }, _ => { diff --git a/third_party/rust/object/src/write/elf.rs b/third_party/rust/object/src/write/elf.rs index 3f8c015078..e784794dc8 100644 --- a/third_party/rust/object/src/write/elf.rs +++ b/third_party/rust/object/src/write/elf.rs @@ -46,6 +46,14 @@ impl Object { StandardSection::UninitializedData => { (&[], &b".bss"[..], SectionKind::UninitializedData) } + StandardSection::Tls => (&[], &b".tdata"[..], SectionKind::Tls), + StandardSection::UninitializedTls => { + (&[], &b".tbss"[..], SectionKind::UninitializedTls) + } + StandardSection::TlsVariables => { + // Unsupported section. + (&[], &[], SectionKind::TlsVariables) + } } } @@ -58,8 +66,8 @@ impl Object { fn elf_has_relocation_addend(&self) -> Result { Ok(match self.architecture { - Architecture::Arm => false, - Architecture::Aarch64 => false, + Architecture::Arm(_) => false, + Architecture::Aarch64(_) => false, Architecture::I386 => false, Architecture::X86_64 => true, _ => { @@ -92,7 +100,7 @@ impl Object { | RelocationKind::GotRelative | RelocationKind::GotBaseRelative | RelocationKind::PltRelative - | RelocationKind::Other(_) => return false, + | RelocationKind::Elf(_) => return false, // Absolute relocations are preemptible for non-local data. // TODO: not sure if this rule is exactly correct // This rule was added to handle global data references in debuginfo. @@ -294,8 +302,8 @@ impl Object { // Write file header. let e_machine = match self.architecture { - Architecture::Arm => elf::EM_ARM, - Architecture::Aarch64 => elf::EM_AARCH64, + Architecture::Arm(_) => elf::EM_ARM, + Architecture::Aarch64(_) => elf::EM_AARCH64, Architecture::I386 => elf::EM_386, Architecture::X86_64 => elf::EM_X86_64, _ => { @@ -490,7 +498,7 @@ impl Object { (RelocationKind::Relative, 16) => elf::R_386_PC16, (RelocationKind::Absolute, 8) => elf::R_386_8, (RelocationKind::Relative, 8) => elf::R_386_PC8, - (RelocationKind::Other(x), _) => x, + (RelocationKind::Elf(x), _) => x, _ => return Err(format!("unimplemented relocation {:?}", reloc)), }, Architecture::X86_64 => match (reloc.kind, reloc.encoding, reloc.size) { @@ -511,7 +519,7 @@ impl Object { (RelocationKind::Relative, _, 16) => elf::R_X86_64_PC16, (RelocationKind::Absolute, _, 8) => elf::R_X86_64_8, (RelocationKind::Relative, _, 8) => elf::R_X86_64_PC8, - (RelocationKind::Other(x), _, _) => x, + (RelocationKind::Elf(x), _, _) => x, _ => return Err(format!("unimplemented relocation {:?}", reloc)), }, _ => { diff --git a/third_party/rust/object/src/write/macho.rs b/third_party/rust/object/src/write/macho.rs index dbbe5df495..356a44b34d 100644 --- a/third_party/rust/object/src/write/macho.rs +++ b/third_party/rust/object/src/write/macho.rs @@ -63,9 +63,112 @@ impl Object { &b"__bss"[..], SectionKind::UninitializedData, ), + StandardSection::Tls => (&b"__DATA"[..], &b"__thread_data"[..], SectionKind::Tls), + StandardSection::UninitializedTls => ( + &b"__DATA"[..], + &b"__thread_bss"[..], + SectionKind::UninitializedTls, + ), + StandardSection::TlsVariables => ( + &b"__DATA"[..], + &b"__thread_vars"[..], + SectionKind::TlsVariables, + ), + } + } + + fn macho_tlv_bootstrap(&mut self) -> SymbolId { + match self.tlv_bootstrap { + Some(id) => id, + None => { + let id = self.add_symbol(Symbol { + name: b"_tlv_bootstrap".to_vec(), + value: 0, + size: 0, + kind: SymbolKind::Text, + scope: SymbolScope::Dynamic, + weak: false, + section: None, + }); + self.tlv_bootstrap = Some(id); + id + } } } + /// Create the `__thread_vars` entry for a TLS variable. + /// + /// The symbol given by `symbol_id` will be updated to point to this entry. + /// + /// A new `SymbolId` will be returned. The caller must update this symbol + /// to point to the initializer. + /// + /// If `symbol_id` is not for a TLS variable, then it is returned unchanged. + pub(crate) fn macho_add_thread_var(&mut self, symbol_id: SymbolId) -> SymbolId { + let symbol = self.symbol_mut(symbol_id); + if symbol.kind != SymbolKind::Tls { + return symbol_id; + } + + // Create the initializer symbol. + let mut name = symbol.name.clone(); + name.extend(b"$tlv$init"); + let init_symbol_id = self.add_raw_symbol(Symbol { + name, + value: 0, + size: 0, + kind: SymbolKind::Tls, + scope: SymbolScope::Compilation, + weak: false, + section: None, + }); + + // Add the tlv entry. + // Three pointers in size: + // - __tlv_bootstrap - used to make sure support exists + // - spare pointer - used when mapped by the runtime + // - pointer to symbol initializer + let section = self.section_id(StandardSection::TlsVariables); + let pointer_width = self.architecture.pointer_width().unwrap().bytes(); + let size = u64::from(pointer_width) * 3; + let data = vec![0; size as usize]; + let offset = self.append_section_data(section, &data, u64::from(pointer_width)); + + let tlv_bootstrap = self.macho_tlv_bootstrap(); + self.add_relocation( + section, + Relocation { + offset: offset, + size: pointer_width * 8, + kind: RelocationKind::Absolute, + encoding: RelocationEncoding::Generic, + symbol: tlv_bootstrap, + addend: 0, + }, + ) + .unwrap(); + self.add_relocation( + section, + Relocation { + offset: offset + u64::from(pointer_width) * 2, + size: pointer_width * 8, + kind: RelocationKind::Absolute, + encoding: RelocationEncoding::Generic, + symbol: init_symbol_id, + addend: 0, + }, + ) + .unwrap(); + + // Update the symbol to point to the tlv. + let symbol = self.symbol_mut(symbol_id); + symbol.value = offset; + symbol.size = size; + symbol.section = Some(section); + + init_symbol_id + } + pub(crate) fn macho_fixup_relocation(&mut self, mut relocation: &mut Relocation) -> i64 { let constant = match relocation.kind { RelocationKind::Relative @@ -140,7 +243,7 @@ impl Object { for (index, symbol) in self.symbols.iter().enumerate() { if !symbol.is_undefined() { match symbol.kind { - SymbolKind::Text | SymbolKind::Data => {} + SymbolKind::Text | SymbolKind::Data | SymbolKind::Tls => {} SymbolKind::File | SymbolKind::Section => continue, _ => return Err(format!("unimplemented symbol {:?}", symbol)), } @@ -182,8 +285,8 @@ impl Object { // Write file header. let (cputype, cpusubtype) = match self.architecture { - Architecture::Arm => (mach::CPU_TYPE_ARM, mach::CPU_SUBTYPE_ARM_ALL), - Architecture::Aarch64 => (mach::CPU_TYPE_ARM64, mach::CPU_SUBTYPE_ARM64_ALL), + Architecture::Arm(_) => (mach::CPU_TYPE_ARM, mach::CPU_SUBTYPE_ARM_ALL), + Architecture::Aarch64(_) => (mach::CPU_TYPE_ARM64, mach::CPU_SUBTYPE_ARM64_ALL), Architecture::I386 => (mach::CPU_TYPE_I386, mach::CPU_SUBTYPE_I386_ALL), Architecture::X86_64 => (mach::CPU_TYPE_X86_64, mach::CPU_SUBTYPE_X86_64_ALL), _ => { @@ -309,7 +412,7 @@ impl Object { for (index, symbol) in self.symbols.iter().enumerate() { if !symbol.is_undefined() { match symbol.kind { - SymbolKind::Text | SymbolKind::Data => {} + SymbolKind::Text | SymbolKind::Data | SymbolKind::Tls => {} SymbolKind::File | SymbolKind::Section => continue, _ => return Err(format!("unimplemented symbol {:?}", symbol)), } @@ -400,6 +503,9 @@ impl Object { (RelocationKind::Absolute, RelocationEncoding::Generic, 0) => { (0, mach::X86_64_RELOC_UNSIGNED) } + (RelocationKind::Relative, RelocationEncoding::Generic, -4) => { + (1, mach::X86_64_RELOC_SIGNED) + } (RelocationKind::Relative, RelocationEncoding::X86RipRelative, -4) => { (1, mach::X86_64_RELOC_SIGNED) } @@ -409,13 +515,16 @@ impl Object { (RelocationKind::PltRelative, RelocationEncoding::X86Branch, -4) => { (1, mach::X86_64_RELOC_BRANCH) } + (RelocationKind::GotRelative, RelocationEncoding::Generic, -4) => { + (1, mach::X86_64_RELOC_GOT) + } ( RelocationKind::GotRelative, RelocationEncoding::X86RipRelativeMovq, -4, ) => (1, mach::X86_64_RELOC_GOT_LOAD), - (RelocationKind::GotRelative, RelocationEncoding::Generic, -4) => { - (1, mach::X86_64_RELOC_GOT) + (RelocationKind::MachO { value, relative }, _, _) => { + (u32::from(relative), value) } _ => return Err(format!("unimplemented relocation {:?}", reloc)), }, diff --git a/third_party/rust/object/src/write/mod.rs b/third_party/rust/object/src/write/mod.rs index ce12ee6a59..79ffc16599 100644 --- a/third_party/rust/object/src/write/mod.rs +++ b/third_party/rust/object/src/write/mod.rs @@ -29,6 +29,10 @@ pub struct Object { symbol_map: HashMap, SymbolId>, stub_symbols: HashMap, subsection_via_symbols: bool, + /// The symbol name mangling scheme. + pub mangling: Mangling, + /// Mach-O "_tlv_bootstrap" symbol. + tlv_bootstrap: Option, } impl Object { @@ -43,6 +47,8 @@ impl Object { symbol_map: HashMap::new(), stub_symbols: HashMap::new(), subsection_via_symbols: false, + mangling: Mangling::default(format, architecture), + tlv_bootstrap: None, } } @@ -58,6 +64,18 @@ impl Object { self.architecture } + /// Return the current mangling setting. + #[inline] + pub fn mangling(&self) -> Mangling { + self.mangling + } + + /// Return the current mangling setting. + #[inline] + pub fn set_mangling(&mut self, mangling: Mangling) { + self.mangling = mangling; + } + /// Return the name for a standard segment. /// /// This will vary based on the file format. @@ -210,23 +228,43 @@ impl Object { } /// Add a new symbol and return its `SymbolId`. - pub fn add_symbol(&mut self, symbol: Symbol) -> SymbolId { + pub fn add_symbol(&mut self, mut symbol: Symbol) -> SymbolId { // Defined symbols must have a scope. debug_assert!(symbol.is_undefined() || symbol.scope != SymbolScope::Unknown); if symbol.kind == SymbolKind::Section { return self.section_symbol(symbol.section.unwrap()); } - let symbol_id = SymbolId(self.symbols.len()); - if !symbol.name.is_empty() { - self.symbol_map.insert(symbol.name.clone(), symbol_id); + if !symbol.name.is_empty() + && (symbol.kind == SymbolKind::Text + || symbol.kind == SymbolKind::Data + || symbol.kind == SymbolKind::Tls) + { + let unmangled_name = symbol.name.clone(); + if let Some(prefix) = self.mangling.global_prefix() { + symbol.name.insert(0, prefix); + } + let symbol_id = self.add_raw_symbol(symbol); + self.symbol_map.insert(unmangled_name, symbol_id); + symbol_id + } else { + self.add_raw_symbol(symbol) } + } + + fn add_raw_symbol(&mut self, symbol: Symbol) -> SymbolId { + let symbol_id = SymbolId(self.symbols.len()); self.symbols.push(symbol); symbol_id } + /// Return true if the file format supports `StandardSection::UninitializedTls`. + pub fn has_uninitialized_tls(&self) -> bool { + self.format != BinaryFormat::Coff + } + /// Add a new file symbol and return its `SymbolId`. pub fn add_file_symbol(&mut self, name: Vec) -> SymbolId { - self.add_symbol(Symbol { + self.add_raw_symbol(Symbol { name, value: 0, size: 0, @@ -264,20 +302,77 @@ impl Object { /// Append data to an existing section, and update a symbol to refer to it. /// + /// For Mach-O, this also creates a `__thread_vars` entry for TLS symbols, and the + /// symbol will indirectly point to the added data via the `__thread_vars` entry. + /// /// Returns the section offset of the data. pub fn add_symbol_data( &mut self, - symbol: SymbolId, + symbol_id: SymbolId, section: SectionId, data: &[u8], align: u64, ) -> u64 { let offset = self.append_section_data(section, data, align); - let symbol = self.symbol_mut(symbol); + self.set_symbol_data(symbol_id, section, offset, data.len() as u64); + offset + } + + /// Append zero-initialized data to an existing section, and update a symbol to refer to it. + /// + /// For Mach-O, this also creates a `__thread_vars` entry for TLS symbols, and the + /// symbol will indirectly point to the added data via the `__thread_vars` entry. + /// + /// Returns the section offset of the data. + pub fn add_symbol_bss( + &mut self, + symbol_id: SymbolId, + section: SectionId, + size: u64, + align: u64, + ) -> u64 { + let offset = self.append_section_bss(section, size, align); + self.set_symbol_data(symbol_id, section, offset, size); + offset + } + + /// Update a symbol to refer to the given data within a section. + /// + /// For Mach-O, this also creates a `__thread_vars` entry for TLS symbols, and the + /// symbol will indirectly point to the data via the `__thread_vars` entry. + pub fn set_symbol_data( + &mut self, + mut symbol_id: SymbolId, + section: SectionId, + offset: u64, + size: u64, + ) { + // Defined symbols must have a scope. + debug_assert!(self.symbol(symbol_id).scope != SymbolScope::Unknown); + if self.format == BinaryFormat::Macho { + symbol_id = self.macho_add_thread_var(symbol_id); + } + let symbol = self.symbol_mut(symbol_id); symbol.value = offset; - symbol.size = data.len() as u64; + symbol.size = size; symbol.section = Some(section); - offset + } + + /// Convert a symbol to a section symbol and offset. + /// + /// Returns an error if the symbol is not defined. + pub fn symbol_section_and_offset( + &mut self, + symbol_id: SymbolId, + ) -> Result<(SymbolId, u64), ()> { + let symbol = self.symbol(symbol_id); + if symbol.kind == SymbolKind::Section { + return Ok((symbol_id, 0)); + } + let symbol_offset = symbol.value; + let section = symbol.section.ok_or(())?; + let section_symbol = self.section_symbol(section); + Ok((section_symbol, symbol_offset)) } /// Add a relocation to a section. @@ -366,6 +461,11 @@ pub enum StandardSection { ReadOnlyDataWithRel, ReadOnlyString, UninitializedData, + Tls, + /// Zero-fill TLS initializers. Unsupported for COFF. + UninitializedTls, + /// TLS variable structures. Only supported for Mach-O. + TlsVariables, } impl StandardSection { @@ -379,6 +479,9 @@ impl StandardSection { } StandardSection::ReadOnlyString => SectionKind::ReadOnlyString, StandardSection::UninitializedData => SectionKind::UninitializedData, + StandardSection::Tls => SectionKind::Tls, + StandardSection::UninitializedTls => SectionKind::UninitializedTls, + StandardSection::TlsVariables => SectionKind::TlsVariables, } } @@ -390,6 +493,9 @@ impl StandardSection { StandardSection::ReadOnlyDataWithRel, StandardSection::ReadOnlyString, StandardSection::UninitializedData, + StandardSection::Tls, + StandardSection::UninitializedTls, + StandardSection::TlsVariables, ] } } @@ -530,3 +636,39 @@ pub struct Relocation { /// This may be in addition to an implicit addend stored at the place of the relocation. pub addend: i64, } + +/// The symbol name mangling scheme. +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub enum Mangling { + /// No symbol mangling. + None, + /// Windows COFF symbol mangling. + Coff, + /// Windows COFF i386 symbol mangling. + CoffI386, + /// ELF symbol mangling. + Elf, + /// Mach-O symbol mangling. + Macho, +} + +impl Mangling { + /// Return the default symboling mangling for the given format and architecture. + pub fn default(format: BinaryFormat, architecture: Architecture) -> Self { + match (format, architecture) { + (BinaryFormat::Coff, Architecture::I386) => Mangling::CoffI386, + (BinaryFormat::Coff, _) => Mangling::Coff, + (BinaryFormat::Elf, _) => Mangling::Elf, + (BinaryFormat::Macho, _) => Mangling::Macho, + _ => Mangling::None, + } + } + + /// Return the prefix to use for global symbols. + pub fn global_prefix(self) -> Option { + match self { + Mangling::None | Mangling::Elf | Mangling::Coff => None, + Mangling::CoffI386 | Mangling::Macho => Some(b'_'), + } + } +} diff --git a/third_party/rust/object/tests/round_trip.rs b/third_party/rust/object/tests/round_trip.rs index d15362ba47..7e63c4d85a 100644 --- a/third_party/rust/object/tests/round_trip.rs +++ b/third_party/rust/object/tests/round_trip.rs @@ -51,6 +51,8 @@ fn coff_x86_64() { assert_eq!(text.kind(), SectionKind::Text); assert_eq!(text.address(), 0); assert_eq!(text.size(), 62); + assert_eq!(&text.data()[..30], &[1; 30]); + assert_eq!(&text.data()[32..62], &[1; 30]); let mut symbols = object.symbols(); @@ -132,6 +134,8 @@ fn elf_x86_64() { assert_eq!(text.kind(), SectionKind::Text); assert_eq!(text.address(), 0); assert_eq!(text.size(), 62); + assert_eq!(&text.data()[..30], &[1; 30]); + assert_eq!(&text.data()[32..62], &[1; 30]); let mut symbols = object.symbols(); @@ -201,6 +205,19 @@ fn macho_x86_64() { }, ) .unwrap(); + object + .add_relocation( + text, + write::Relocation { + offset: 16, + size: 32, + kind: RelocationKind::Relative, + encoding: RelocationEncoding::Generic, + symbol: func1_symbol, + addend: -4, + }, + ) + .unwrap(); let bytes = object.write().unwrap(); let object = read::File::parse(&bytes).unwrap(); @@ -217,12 +234,14 @@ fn macho_x86_64() { assert_eq!(text.kind(), SectionKind::Text); assert_eq!(text.address(), 0); assert_eq!(text.size(), 62); + assert_eq!(&text.data()[..30], &[1; 30]); + assert_eq!(&text.data()[32..62], &[1; 30]); let mut symbols = object.symbols(); let (func1_symbol, symbol) = symbols.next().unwrap(); println!("{:?}", symbol); - assert_eq!(symbol.name(), Some("func1")); + assert_eq!(symbol.name(), Some("_func1")); assert_eq!(symbol.address(), func1_offset); assert_eq!(symbol.kind(), SymbolKind::Text); assert_eq!(symbol.section_index(), Some(text_index)); @@ -243,4 +262,16 @@ fn macho_x86_64() { read::RelocationTarget::Symbol(func1_symbol) ); assert_eq!(relocation.addend(), 0); + + let (offset, relocation) = relocations.next().unwrap(); + println!("{:?}", relocation); + assert_eq!(offset, 16); + assert_eq!(relocation.kind(), RelocationKind::Relative); + assert_eq!(relocation.encoding(), RelocationEncoding::X86RipRelative); + assert_eq!(relocation.size(), 32); + assert_eq!( + relocation.target(), + read::RelocationTarget::Symbol(func1_symbol) + ); + assert_eq!(relocation.addend(), -4); } diff --git a/third_party/rust/object/tests/tls.rs b/third_party/rust/object/tests/tls.rs new file mode 100644 index 0000000000..653b7e714f --- /dev/null +++ b/third_party/rust/object/tests/tls.rs @@ -0,0 +1,167 @@ +#![cfg(all(feature = "read", feature = "write"))] + +use object::read::{Object, ObjectSection}; +use object::{read, write}; +use object::{RelocationEncoding, RelocationKind, SectionKind, SymbolKind, SymbolScope}; +use target_lexicon::{Architecture, BinaryFormat}; + +#[test] +fn macho_x86_64_tls() { + let mut object = write::Object::new(BinaryFormat::Macho, Architecture::X86_64); + + let section = object.section_id(write::StandardSection::Tls); + let symbol = object.add_symbol(write::Symbol { + name: b"tls1".to_vec(), + value: 0, + size: 0, + kind: SymbolKind::Tls, + scope: SymbolScope::Linkage, + weak: false, + section: None, + }); + object.add_symbol_data(symbol, section, &[1; 30], 4); + + let section = object.section_id(write::StandardSection::UninitializedTls); + let symbol = object.add_symbol(write::Symbol { + name: b"tls2".to_vec(), + value: 0, + size: 0, + kind: SymbolKind::Tls, + scope: SymbolScope::Linkage, + weak: false, + section: None, + }); + object.add_symbol_bss(symbol, section, 31, 4); + + let bytes = object.write().unwrap(); + + std::fs::write(&"tls.o", &bytes).unwrap(); + + let object = read::File::parse(&bytes).unwrap(); + assert_eq!(object.format(), BinaryFormat::Macho); + assert_eq!(object.architecture(), Architecture::X86_64); + + let mut sections = object.sections(); + + let thread_data = sections.next().unwrap(); + println!("{:?}", section); + let thread_data_index = thread_data.index(); + assert_eq!(thread_data.name(), Some("__thread_data")); + assert_eq!(thread_data.segment_name(), Some("__DATA")); + assert_eq!(thread_data.kind(), SectionKind::Tls); + assert_eq!(thread_data.size(), 30); + assert_eq!(&thread_data.data()[..], &[1; 30]); + + let thread_vars = sections.next().unwrap(); + println!("{:?}", section); + let thread_vars_index = thread_vars.index(); + assert_eq!(thread_vars.name(), Some("__thread_vars")); + assert_eq!(thread_vars.segment_name(), Some("__DATA")); + assert_eq!(thread_vars.kind(), SectionKind::TlsVariables); + assert_eq!(thread_vars.size(), 2 * 3 * 8); + + let thread_bss = sections.next().unwrap(); + println!("{:?}", section); + let thread_bss_index = thread_bss.index(); + assert_eq!(thread_bss.name(), Some("__thread_bss")); + assert_eq!(thread_bss.segment_name(), Some("__DATA")); + assert_eq!(thread_bss.kind(), SectionKind::UninitializedTls); + assert_eq!(thread_bss.size(), 31); + + let mut symbols = object.symbols(); + + let (_, symbol) = symbols.next().unwrap(); + println!("{:?}", symbol); + assert_eq!(symbol.name(), Some("_tls1")); + assert_eq!(symbol.kind(), SymbolKind::Tls); + assert_eq!(symbol.section_index(), Some(thread_vars_index)); + assert_eq!(symbol.scope(), SymbolScope::Linkage); + assert_eq!(symbol.is_weak(), false); + assert_eq!(symbol.is_undefined(), false); + + let (tls1_init_symbol, symbol) = symbols.next().unwrap(); + println!("{:?}", symbol); + assert_eq!(symbol.name(), Some("_tls1$tlv$init")); + assert_eq!(symbol.kind(), SymbolKind::Tls); + assert_eq!(symbol.section_index(), Some(thread_data_index)); + assert_eq!(symbol.scope(), SymbolScope::Compilation); + assert_eq!(symbol.is_weak(), false); + assert_eq!(symbol.is_undefined(), false); + + let (tlv_bootstrap_symbol, symbol) = symbols.next().unwrap(); + println!("{:?}", symbol); + assert_eq!(symbol.name(), Some("__tlv_bootstrap")); + assert_eq!(symbol.kind(), SymbolKind::Unknown); + assert_eq!(symbol.section_index(), None); + assert_eq!(symbol.scope(), SymbolScope::Unknown); + assert_eq!(symbol.is_weak(), false); + assert_eq!(symbol.is_undefined(), true); + + let (_, symbol) = symbols.next().unwrap(); + println!("{:?}", symbol); + assert_eq!(symbol.name(), Some("_tls2")); + assert_eq!(symbol.kind(), SymbolKind::Tls); + assert_eq!(symbol.section_index(), Some(thread_vars_index)); + assert_eq!(symbol.scope(), SymbolScope::Linkage); + assert_eq!(symbol.is_weak(), false); + assert_eq!(symbol.is_undefined(), false); + + let (tls2_init_symbol, symbol) = symbols.next().unwrap(); + println!("{:?}", symbol); + assert_eq!(symbol.name(), Some("_tls2$tlv$init")); + assert_eq!(symbol.kind(), SymbolKind::Tls); + assert_eq!(symbol.section_index(), Some(thread_bss_index)); + assert_eq!(symbol.scope(), SymbolScope::Compilation); + assert_eq!(symbol.is_weak(), false); + assert_eq!(symbol.is_undefined(), false); + + let mut relocations = thread_vars.relocations(); + + let (offset, relocation) = relocations.next().unwrap(); + println!("{:?}", relocation); + assert_eq!(offset, 0); + assert_eq!(relocation.kind(), RelocationKind::Absolute); + assert_eq!(relocation.encoding(), RelocationEncoding::Generic); + assert_eq!(relocation.size(), 64); + assert_eq!( + relocation.target(), + read::RelocationTarget::Symbol(tlv_bootstrap_symbol) + ); + assert_eq!(relocation.addend(), 0); + + let (offset, relocation) = relocations.next().unwrap(); + println!("{:?}", relocation); + assert_eq!(offset, 16); + assert_eq!(relocation.kind(), RelocationKind::Absolute); + assert_eq!(relocation.encoding(), RelocationEncoding::Generic); + assert_eq!(relocation.size(), 64); + assert_eq!( + relocation.target(), + read::RelocationTarget::Symbol(tls1_init_symbol) + ); + assert_eq!(relocation.addend(), 0); + + let (offset, relocation) = relocations.next().unwrap(); + println!("{:?}", relocation); + assert_eq!(offset, 24); + assert_eq!(relocation.kind(), RelocationKind::Absolute); + assert_eq!(relocation.encoding(), RelocationEncoding::Generic); + assert_eq!(relocation.size(), 64); + assert_eq!( + relocation.target(), + read::RelocationTarget::Symbol(tlv_bootstrap_symbol) + ); + assert_eq!(relocation.addend(), 0); + + let (offset, relocation) = relocations.next().unwrap(); + println!("{:?}", relocation); + assert_eq!(offset, 40); + assert_eq!(relocation.kind(), RelocationKind::Absolute); + assert_eq!(relocation.encoding(), RelocationEncoding::Generic); + assert_eq!(relocation.size(), 64); + assert_eq!( + relocation.target(), + read::RelocationTarget::Symbol(tls2_init_symbol) + ); + assert_eq!(relocation.addend(), 0); +} diff --git a/third_party/rust/parking_lot/.cargo-checksum.json b/third_party/rust/parking_lot/.cargo-checksum.json index bf0a89b503..784094cd6f 100644 --- a/third_party/rust/parking_lot/.cargo-checksum.json +++ b/third_party/rust/parking_lot/.cargo-checksum.json @@ -1 +1 @@ -{"files":{"CHANGELOG.md":"d338e1af7a55941072de9e34e2537da86351b55e09f483f4214f63606f1dcad6","Cargo.toml":"d14378cc93973e6b478ee13195f149a2549791bcc61dbd26df45577e1f86cdf9","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"c9a75f18b9ab2927829a208fc6aa2cf4e63b8420887ba29cdb265d6619ae82d5","README.md":"e9af35881a3de707d5e60a15918119e88f7716a09dabe9da7960aa3191e6e7d9","appveyor.yml":"fa41673db7146f34d601a5977d77fe81fd29da706b5981cfd68ce79affd7a667","build.rs":"4ed00d73d71057bcdf6c186559468927fc130fd65cfd806ee5d46d28540bc653","src/condvar.rs":"275f05affa456117f255ccc3de3277c1174e470f307563da0d166915d4a2f68e","src/deadlock.rs":"081dbf009539b113f67ad0a1abd7af889dad684a47aa1a7dc00ae91f08975ef6","src/elision.rs":"ee8735e8695dc90ccc16002a229d5b64ba4e1c867c77f551b8715a1958faaeac","src/lib.rs":"eda1ae488e72f1d514cb7bc19600d3948cb69423a9d68738acd12565346461ec","src/mutex.rs":"e3a48933b7e19d26eab4b5f44ed4e9bcb069b57cdd4a0569d1e65f6c3839b766","src/once.rs":"155694841d62c54b8b489916f14cad887a86138000f3a6925c8d70a4a5711283","src/raw_mutex.rs":"5797de689e5c89eae2b45a4bf15bd42a01345aed0770c56f65846daee083588a","src/raw_rwlock.rs":"f13ff54a30d2fb53f95ab565db4e478f20f0a2b85b2b75f4392dc80e34f5f270","src/remutex.rs":"b62e72028b6d168650a9c3fb9375b3690225126b055a8874a7989b5f8dcb6605","src/rwlock.rs":"b0c92f2c602d13213a5e03f16ecda70dec7ea1d256cc99e4a3d6e2adad1afdd4","src/util.rs":"35f1c1930fb30fca0ceab5e0d68d8c418c7f0bb5b6ac6f21d6019986a3046cca"},"package":"f842b1982eb6c2fe34036a4fbfb06dd185a3f5c8edfaacdf7d1ea10b07de6252"} \ No newline at end of file +{"files":{"CHANGELOG.md":"18a3b6c2a59fb59450362712afae444070b23c2697cf20aa9ee3911dd9f6d981","Cargo.toml":"c0d17dd8decba5afb1495577e0ded39c0228eeb2eb14be4f4d4d5d9c96ebc0c3","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"c9a75f18b9ab2927829a208fc6aa2cf4e63b8420887ba29cdb265d6619ae82d5","README.md":"0f1b45638eb0d2b3f142baec830319a5790f3f2004eac5bc5f3e95bc632bdaed","appveyor.yml":"e2416e65e27442dd8e17c773deee1e45ee96157221bc81b03d9a6d25bfa570e2","bors.toml":"1c81ede536a37edd30fe4e622ff0531b25372403ac9475a5d6c50f14156565a2","src/condvar.rs":"510f96e94b56f0cb0d200a8e94b0487d0799e13b9e126b9b416f22d8dc11b643","src/deadlock.rs":"7d3ebb5b4f63658435df277bb983e352e4bc651a92c4fd48ae68bf103e452d0d","src/elision.rs":"9aceb0b27fd3cdaf4ef76bda63435a96ec2fdef24be098b9e4edbc39db000765","src/fair_mutex.rs":"d0a032e8207919da04b85f1422dfb14aa2af7aad78843c708d2fe3e0478e401a","src/lib.rs":"3d89619878f3c8b6190261321a4e430e0514c97b65e8c911c0764ea57c0605f2","src/mutex.rs":"afc25db5c45da63c743029ee3cb22e262ea7a32b533245b441c0a5835f9f525f","src/once.rs":"a1c38a5d87077e3d112d57e065ee126a24ab19f04fba9cb1f2cb43bc82caf33c","src/raw_fair_mutex.rs":"a7415aa6cbc040a2f886d06dd6c0c0b3be9963936a31f60f1494e718c9d18acb","src/raw_mutex.rs":"f3507478c34b49bd725dfaed6bf4847fc3aec28700960a7823af9e15b06b5e24","src/raw_rwlock.rs":"f8ce7c4f92299cf64cb6e7b69cd46d9ddefd1211535729b6455e82f7c4eb3eae","src/remutex.rs":"7a0de55161cd57497bb52d3aecca69a89eff2e71cdb2d762df53579e0607b489","src/rwlock.rs":"1a782ab4fafc0c542d1c42151b98475829c96da168d2d0e8947181b7f2d7cb07","src/util.rs":"37a2c8b5c9254df83e8f3a5cd831558c1045061a76c2571bdc4d78eb86e467f2","tests/issue_203.rs":"5fbdf6ec63f391d86457df949678c203a1e81e8aa32d4e10037fa76e768702c0"},"package":"d3a704eb390aafdc107b0e392f56a82b668e3a71366993b5340f5833fd62505e"} \ No newline at end of file diff --git a/third_party/rust/parking_lot/CHANGELOG.md b/third_party/rust/parking_lot/CHANGELOG.md index 2090ca71be..c187e4d157 100644 --- a/third_party/rust/parking_lot/CHANGELOG.md +++ b/third_party/rust/parking_lot/CHANGELOG.md @@ -1,8 +1,36 @@ +## parking_lot 0.10.2 (2020-04-10) + +- Update minimum version of `lock_api`. + +## parking_lot 0.10.1, parking_lot_core 0.7.1, lock_api 0.3.4 (2020-04-10) + +- Add methods to construct `Mutex`, `RwLock`, etc in a `const` context. (#217) +- Add `FairMutex` which always uses fair unlocking. (#204) +- Fixed panic with deadlock detection on macOS. (#203) +- Fixed incorrect synchronization in `create_hashtable`. (#210) +- Use `llvm_asm!` instead of the deprecated `asm!`. (#223) + +## lock_api 0.3.3 (2020-01-04) + +- Deprecate unsound `MappedRwLockWriteGuard::downgrade` (#198) + +## parking_lot 0.10.0, parking_lot_core 0.7.0, lock_api 0.3.2 (2019-11-25) + +- Upgrade smallvec dependency to 1.0 in parking_lot_core. +- Replace all usage of `mem::uninitialized` with `mem::MaybeUninit`. +- The minimum required Rust version is bumped to 1.36. Because of the above two changes. +- Make methods on `WaitTimeoutResult` and `OnceState` take `self` by value instead of reference. + +## parking_lot_core 0.6.2 (2019-07-22) + +- Fixed compile error on Windows with old cfg_if version. (#164) + +## parking_lot_core 0.6.1 (2019-07-17) + +- Fixed Android build. (#163) + ## parking_lot 0.9.0, parking_lot_core 0.6.0, lock_api 0.3.1 (2019-07-14) -- The minimum supported rust version (MSRV) is now 1.32. This was primarily - increased for testing with the latest _rand_ crate. Rust 1.31 may continue to - work for normal use of these releases. - Re-export lock_api (0.3.1) from parking_lot (#150) - Removed (non-dev) dependency on rand crate for fairness mechanism, by including a simple xorshift PRNG in core (#144) diff --git a/third_party/rust/parking_lot/Cargo.toml b/third_party/rust/parking_lot/Cargo.toml index 4843960c2c..57940fff19 100644 --- a/third_party/rust/parking_lot/Cargo.toml +++ b/third_party/rust/parking_lot/Cargo.toml @@ -13,7 +13,7 @@ [package] edition = "2018" name = "parking_lot" -version = "0.9.0" +version = "0.10.2" authors = ["Amanieu d'Antras "] description = "More compact and efficient implementations of the standard synchronization primitives." readme = "README.md" @@ -22,20 +22,15 @@ categories = ["concurrency"] license = "Apache-2.0/MIT" repository = "https://github.com/Amanieu/parking_lot" [dependencies.lock_api] -version = "0.3.1" +version = "0.3.4" [dependencies.parking_lot_core] -version = "0.6" +version = "0.7.1" [dev-dependencies.bincode] version = "1.1.3" -[dev-dependencies.lazy_static] -version = "1.0" - [dev-dependencies.rand] version = "0.7" -[build-dependencies.rustc_version] -version = "0.2" [features] deadlock_detection = ["parking_lot_core/deadlock_detection"] diff --git a/third_party/rust/parking_lot/README.md b/third_party/rust/parking_lot/README.md index fb49272aa6..3ec4cb0418 100644 --- a/third_party/rust/parking_lot/README.md +++ b/third_party/rust/parking_lot/README.md @@ -34,7 +34,7 @@ in the Rust standard library: parallelism. 2. Since they consist of just a single atomic variable, have constant initializers and don't need destructors, these primitives can be used as - `static` global variables. The standard library primitives require + `static` global variables. The standard library primitives require dynamic initialization and thus need to be lazily initialized with `lazy_static!`. 3. Uncontended lock acquisition and release is done through fast inline @@ -87,9 +87,9 @@ lock. There are a few restrictions when using this library on stable Rust: -- `Mutex` and `Once` will use 1 word of space instead of 1 byte. -- You will have to use `lazy_static!` to statically initialize `Mutex`, - `Condvar` and `RwLock` types instead of `const fn`. +- You will have to use the `const_*` functions (e.g. `const_mutex(val)`) to + statically initialize the locking primitives. Using e.g. `Mutex::new(val)` + does not work on stable Rust yet. - `RwLock` will not be able to take advantage of hardware lock elision for readers, which improves performance when there are multiple readers. @@ -102,20 +102,14 @@ Add this to your `Cargo.toml`: ```toml [dependencies] -parking_lot = "0.9" -``` - -and this to your crate root: - -```rust -extern crate parking_lot; +parking_lot = "0.10" ``` To enable nightly-only features, add this to your `Cargo.toml` instead: ```toml [dependencies] -parking_lot = {version = "0.9", features = ["nightly"]} +parking_lot = { version = "0.10", features = ["nightly"] } ``` The experimental deadlock detector can be enabled with the @@ -127,7 +121,7 @@ changes to the core API do not cause breaking changes for users of `parking_lot` ## Minimum Rust version -The current minimum required Rust version is 1.32. Any change to this is +The current minimum required Rust version is 1.36. Any change to this is considered a breaking change and will require a major version bump. ## License diff --git a/third_party/rust/parking_lot/appveyor.yml b/third_party/rust/parking_lot/appveyor.yml index 734a893973..a7eb5cfc98 100644 --- a/third_party/rust/parking_lot/appveyor.yml +++ b/third_party/rust/parking_lot/appveyor.yml @@ -25,19 +25,19 @@ environment: - TARGET: x86_64-pc-windows-msvc MSYSTEM: MINGW64 CPU: x86_64 - TOOLCHAIN: 1.32.0 + TOOLCHAIN: 1.36.0 - TARGET: i686-pc-windows-msvc MSYSTEM: MINGW32 CPU: i686 - TOOLCHAIN: 1.32.0 + TOOLCHAIN: 1.36.0 - TARGET: x86_64-pc-windows-gnu MSYSTEM: MINGW64 CPU: x86_64 - TOOLCHAIN: 1.32.0 + TOOLCHAIN: 1.36.0 - TARGET: i686-pc-windows-gnu MSYSTEM: MINGW32 CPU: i686 - TOOLCHAIN: 1.32.0 + TOOLCHAIN: 1.36.0 install: - set PATH=C:\msys64\%MSYSTEM%\bin;c:\msys64\usr\bin;%PATH% diff --git a/third_party/rust/parking_lot/bors.toml b/third_party/rust/parking_lot/bors.toml new file mode 100644 index 0000000000..ca08e818bf --- /dev/null +++ b/third_party/rust/parking_lot/bors.toml @@ -0,0 +1,3 @@ +status = [ + "continuous-integration/travis-ci/push", +] diff --git a/third_party/rust/parking_lot/build.rs b/third_party/rust/parking_lot/build.rs deleted file mode 100644 index 17e76f1562..0000000000 --- a/third_party/rust/parking_lot/build.rs +++ /dev/null @@ -1,8 +0,0 @@ -use rustc_version::{version, Version}; - -fn main() { - if version().unwrap() >= Version::parse("1.34.0").unwrap() { - println!("cargo:rustc-cfg=has_sized_atomics"); - println!("cargo:rustc-cfg=has_checked_instant"); - } -} diff --git a/third_party/rust/parking_lot/src/condvar.rs b/third_party/rust/parking_lot/src/condvar.rs index 341a5d681a..0afda3a5b1 100644 --- a/third_party/rust/parking_lot/src/condvar.rs +++ b/third_party/rust/parking_lot/src/condvar.rs @@ -12,7 +12,7 @@ use core::{ fmt, ptr, sync::atomic::{AtomicPtr, Ordering}, }; -use lock_api::RawMutex as RawMutexTrait; +use lock_api::RawMutex as RawMutex_; use parking_lot_core::{self, ParkResult, RequeueOp, UnparkResult, DEFAULT_PARK_TOKEN}; use std::time::{Duration, Instant}; @@ -24,7 +24,7 @@ pub struct WaitTimeoutResult(bool); impl WaitTimeoutResult { /// Returns whether the wait was known to have timed out. #[inline] - pub fn timed_out(&self) -> bool { + pub fn timed_out(self) -> bool { self.0 } } @@ -78,9 +78,13 @@ impl WaitTimeoutResult { /// // wait for the thread to start up /// let &(ref lock, ref cvar) = &*pair; /// let mut started = lock.lock(); -/// while !*started { +/// if !*started { /// cvar.wait(&mut started); /// } +/// // Note that we used an if instead of a while loop above. This is only +/// // possible because parking_lot's Condvar will never spuriously wake up. +/// // This means that wait() will only return after notify_one or notify_all is +/// // called. /// ``` pub struct Condvar { state: AtomicPtr, @@ -91,7 +95,9 @@ impl Condvar { /// notified. #[inline] pub const fn new() -> Condvar { - Condvar { state: AtomicPtr::new(ptr::null_mut()) } + Condvar { + state: AtomicPtr::new(ptr::null_mut()), + } } /// Wakes up one blocked thread on this condvar. @@ -282,7 +288,10 @@ impl Condvar { mutex_guard: &mut MutexGuard<'_, T>, timeout: Instant, ) -> WaitTimeoutResult { - self.wait_until_internal(unsafe { MutexGuard::mutex(mutex_guard).raw() }, Some(timeout)) + self.wait_until_internal( + unsafe { MutexGuard::mutex(mutex_guard).raw() }, + Some(timeout), + ) } // This is a non-generic function to reduce the monomorphization cost of @@ -573,8 +582,10 @@ mod tests { let _g = m2.lock(); c2.notify_one(); }); - let timeout_res = - c.wait_until(&mut g, Instant::now() + Duration::from_millis(u32::max_value() as u64)); + let timeout_res = c.wait_until( + &mut g, + Instant::now() + Duration::from_millis(u32::max_value() as u64), + ); assert!(!timeout_res.timed_out()); drop(g); } @@ -607,7 +618,7 @@ mod tests { rx.recv().unwrap(); let _g = m.lock(); let _guard = PanicGuard(&*c); - let _ = c.wait(&mut m3.lock()); + c.wait(&mut m3.lock()); } #[test] @@ -681,3 +692,361 @@ mod tests { } } } + +/// This module contains an integration test that is heavily inspired from WebKit's own integration +/// tests for it's own Condvar. +#[cfg(test)] +mod webkit_queue_test { + use crate::{Condvar, Mutex, MutexGuard}; + use std::{collections::VecDeque, sync::Arc, thread, time::Duration}; + + #[derive(Clone, Copy)] + enum Timeout { + Bounded(Duration), + Forever, + } + + #[derive(Clone, Copy)] + enum NotifyStyle { + One, + All, + } + + struct Queue { + items: VecDeque, + should_continue: bool, + } + + impl Queue { + fn new() -> Self { + Self { + items: VecDeque::new(), + should_continue: true, + } + } + } + + fn wait( + condition: &Condvar, + lock: &mut MutexGuard<'_, T>, + predicate: impl Fn(&mut MutexGuard<'_, T>) -> bool, + timeout: &Timeout, + ) { + while !predicate(lock) { + match timeout { + Timeout::Forever => condition.wait(lock), + Timeout::Bounded(bound) => { + condition.wait_for(lock, *bound); + } + } + } + } + + fn notify(style: NotifyStyle, condition: &Condvar, should_notify: bool) { + match style { + NotifyStyle::One => { + condition.notify_one(); + } + NotifyStyle::All => { + if should_notify { + condition.notify_all(); + } + } + } + } + + fn run_queue_test( + num_producers: usize, + num_consumers: usize, + max_queue_size: usize, + messages_per_producer: usize, + notify_style: NotifyStyle, + timeout: Timeout, + delay: Duration, + ) { + let input_queue = Arc::new(Mutex::new(Queue::new())); + let empty_condition = Arc::new(Condvar::new()); + let full_condition = Arc::new(Condvar::new()); + + let output_vec = Arc::new(Mutex::new(vec![])); + + let consumers = (0..num_consumers) + .map(|_| { + consumer_thread( + input_queue.clone(), + empty_condition.clone(), + full_condition.clone(), + timeout, + notify_style, + output_vec.clone(), + max_queue_size, + ) + }) + .collect::>(); + let producers = (0..num_producers) + .map(|_| { + producer_thread( + messages_per_producer, + input_queue.clone(), + empty_condition.clone(), + full_condition.clone(), + timeout, + notify_style, + max_queue_size, + ) + }) + .collect::>(); + + thread::sleep(delay); + + for producer in producers.into_iter() { + producer.join().expect("Producer thread panicked"); + } + + { + let mut input_queue = input_queue.lock(); + input_queue.should_continue = false; + } + empty_condition.notify_all(); + + for consumer in consumers.into_iter() { + consumer.join().expect("Consumer thread panicked"); + } + + let mut output_vec = output_vec.lock(); + assert_eq!(output_vec.len(), num_producers * messages_per_producer); + output_vec.sort(); + for msg_idx in 0..messages_per_producer { + for producer_idx in 0..num_producers { + assert_eq!(msg_idx, output_vec[msg_idx * num_producers + producer_idx]); + } + } + } + + fn consumer_thread( + input_queue: Arc>, + empty_condition: Arc, + full_condition: Arc, + timeout: Timeout, + notify_style: NotifyStyle, + output_queue: Arc>>, + max_queue_size: usize, + ) -> thread::JoinHandle<()> { + thread::spawn(move || loop { + let (should_notify, result) = { + let mut queue = input_queue.lock(); + wait( + &*empty_condition, + &mut queue, + |state| -> bool { !state.items.is_empty() || !state.should_continue }, + &timeout, + ); + if queue.items.is_empty() && !queue.should_continue { + return; + } + let should_notify = queue.items.len() == max_queue_size; + let result = queue.items.pop_front(); + std::mem::drop(queue); + (should_notify, result) + }; + notify(notify_style, &*full_condition, should_notify); + + if let Some(result) = result { + output_queue.lock().push(result); + } + }) + } + + fn producer_thread( + num_messages: usize, + queue: Arc>, + empty_condition: Arc, + full_condition: Arc, + timeout: Timeout, + notify_style: NotifyStyle, + max_queue_size: usize, + ) -> thread::JoinHandle<()> { + thread::spawn(move || { + for message in 0..num_messages { + let should_notify = { + let mut queue = queue.lock(); + wait( + &*full_condition, + &mut queue, + |state| state.items.len() < max_queue_size, + &timeout, + ); + let should_notify = queue.items.is_empty(); + queue.items.push_back(message); + std::mem::drop(queue); + should_notify + }; + notify(notify_style, &*empty_condition, should_notify); + } + }) + } + + macro_rules! run_queue_tests { + ( $( $name:ident( + num_producers: $num_producers:expr, + num_consumers: $num_consumers:expr, + max_queue_size: $max_queue_size:expr, + messages_per_producer: $messages_per_producer:expr, + notification_style: $notification_style:expr, + timeout: $timeout:expr, + delay_seconds: $delay_seconds:expr); + )* ) => { + $(#[test] + fn $name() { + let delay = Duration::from_secs($delay_seconds); + run_queue_test( + $num_producers, + $num_consumers, + $max_queue_size, + $messages_per_producer, + $notification_style, + $timeout, + delay, + ); + })* + }; + } + + run_queue_tests! { + sanity_check_queue( + num_producers: 1, + num_consumers: 1, + max_queue_size: 1, + messages_per_producer: 100_000, + notification_style: NotifyStyle::All, + timeout: Timeout::Bounded(Duration::from_secs(1)), + delay_seconds: 0 + ); + sanity_check_queue_timeout( + num_producers: 1, + num_consumers: 1, + max_queue_size: 1, + messages_per_producer: 100_000, + notification_style: NotifyStyle::All, + timeout: Timeout::Forever, + delay_seconds: 0 + ); + new_test_without_timeout_5( + num_producers: 1, + num_consumers: 5, + max_queue_size: 1, + messages_per_producer: 100_000, + notification_style: NotifyStyle::All, + timeout: Timeout::Forever, + delay_seconds: 0 + ); + one_producer_one_consumer_one_slot( + num_producers: 1, + num_consumers: 1, + max_queue_size: 1, + messages_per_producer: 100_000, + notification_style: NotifyStyle::All, + timeout: Timeout::Forever, + delay_seconds: 0 + ); + one_producer_one_consumer_one_slot_timeout( + num_producers: 1, + num_consumers: 1, + max_queue_size: 1, + messages_per_producer: 100_000, + notification_style: NotifyStyle::All, + timeout: Timeout::Forever, + delay_seconds: 1 + ); + one_producer_one_consumer_hundred_slots( + num_producers: 1, + num_consumers: 1, + max_queue_size: 100, + messages_per_producer: 1_000_000, + notification_style: NotifyStyle::All, + timeout: Timeout::Forever, + delay_seconds: 0 + ); + ten_producers_one_consumer_one_slot( + num_producers: 10, + num_consumers: 1, + max_queue_size: 1, + messages_per_producer: 10000, + notification_style: NotifyStyle::All, + timeout: Timeout::Forever, + delay_seconds: 0 + ); + ten_producers_one_consumer_hundred_slots_notify_all( + num_producers: 10, + num_consumers: 1, + max_queue_size: 100, + messages_per_producer: 10000, + notification_style: NotifyStyle::All, + timeout: Timeout::Forever, + delay_seconds: 0 + ); + ten_producers_one_consumer_hundred_slots_notify_one( + num_producers: 10, + num_consumers: 1, + max_queue_size: 100, + messages_per_producer: 10000, + notification_style: NotifyStyle::One, + timeout: Timeout::Forever, + delay_seconds: 0 + ); + one_producer_ten_consumers_one_slot( + num_producers: 1, + num_consumers: 10, + max_queue_size: 1, + messages_per_producer: 10000, + notification_style: NotifyStyle::All, + timeout: Timeout::Forever, + delay_seconds: 0 + ); + one_producer_ten_consumers_hundred_slots_notify_all( + num_producers: 1, + num_consumers: 10, + max_queue_size: 100, + messages_per_producer: 100_000, + notification_style: NotifyStyle::All, + timeout: Timeout::Forever, + delay_seconds: 0 + ); + one_producer_ten_consumers_hundred_slots_notify_one( + num_producers: 1, + num_consumers: 10, + max_queue_size: 100, + messages_per_producer: 100_000, + notification_style: NotifyStyle::One, + timeout: Timeout::Forever, + delay_seconds: 0 + ); + ten_producers_ten_consumers_one_slot( + num_producers: 10, + num_consumers: 10, + max_queue_size: 1, + messages_per_producer: 50000, + notification_style: NotifyStyle::All, + timeout: Timeout::Forever, + delay_seconds: 0 + ); + ten_producers_ten_consumers_hundred_slots_notify_all( + num_producers: 10, + num_consumers: 10, + max_queue_size: 100, + messages_per_producer: 50000, + notification_style: NotifyStyle::All, + timeout: Timeout::Forever, + delay_seconds: 0 + ); + ten_producers_ten_consumers_hundred_slots_notify_one( + num_producers: 10, + num_consumers: 10, + max_queue_size: 100, + messages_per_producer: 50000, + notification_style: NotifyStyle::One, + timeout: Timeout::Forever, + delay_seconds: 0 + ); + } +} diff --git a/third_party/rust/parking_lot/src/deadlock.rs b/third_party/rust/parking_lot/src/deadlock.rs index 810edf1fde..0fab7228c9 100644 --- a/third_party/rust/parking_lot/src/deadlock.rs +++ b/third_party/rust/parking_lot/src/deadlock.rs @@ -46,9 +46,7 @@ mod tests { use std::time::Duration; // We need to serialize these tests since deadlock detection uses global state - lazy_static::lazy_static! { - static ref DEADLOCK_DETECTION_LOCK: Mutex<()> = Mutex::new(()); - } + static DEADLOCK_DETECTION_LOCK: Mutex<()> = crate::const_mutex(()); fn check_deadlock() -> bool { use parking_lot_core::deadlock::check_deadlock; diff --git a/third_party/rust/parking_lot/src/elision.rs b/third_party/rust/parking_lot/src/elision.rs index 10d45f9fbf..68cfa63c3e 100644 --- a/third_party/rust/parking_lot/src/elision.rs +++ b/third_party/rust/parking_lot/src/elision.rs @@ -25,7 +25,10 @@ pub trait AtomicElisionExt { // Indicates whether the target architecture supports lock elision #[inline] pub fn have_elision() -> bool { - cfg!(all(feature = "nightly", any(target_arch = "x86", target_arch = "x86_64"),)) + cfg!(all( + feature = "nightly", + any(target_arch = "x86", target_arch = "x86_64"), + )) } // This implementation is never actually called because it is guarded by @@ -54,12 +57,16 @@ impl AtomicElisionExt for AtomicUsize { fn elision_compare_exchange_acquire(&self, current: usize, new: usize) -> Result { unsafe { let prev: usize; - asm!("xacquire; lock; cmpxchgl $2, $1" - : "={eax}" (prev), "+*m" (self) - : "r" (new), "{eax}" (current) - : "memory" - : "volatile"); - if prev == current { Ok(prev) } else { Err(prev) } + llvm_asm!("xacquire; lock; cmpxchgl $2, $1" + : "={eax}" (prev), "+*m" (self) + : "r" (new), "{eax}" (current) + : "memory" + : "volatile"); + if prev == current { + Ok(prev) + } else { + Err(prev) + } } } #[cfg(target_pointer_width = "64")] @@ -67,12 +74,16 @@ impl AtomicElisionExt for AtomicUsize { fn elision_compare_exchange_acquire(&self, current: usize, new: usize) -> Result { unsafe { let prev: usize; - asm!("xacquire; lock; cmpxchgq $2, $1" - : "={rax}" (prev), "+*m" (self) - : "r" (new), "{rax}" (current) - : "memory" - : "volatile"); - if prev == current { Ok(prev) } else { Err(prev) } + llvm_asm!("xacquire; lock; cmpxchgq $2, $1" + : "={rax}" (prev), "+*m" (self) + : "r" (new), "{rax}" (current) + : "memory" + : "volatile"); + if prev == current { + Ok(prev) + } else { + Err(prev) + } } } @@ -81,11 +92,11 @@ impl AtomicElisionExt for AtomicUsize { fn elision_fetch_sub_release(&self, val: usize) -> usize { unsafe { let prev: usize; - asm!("xrelease; lock; xaddl $2, $1" - : "=r" (prev), "+*m" (self) - : "0" (val.wrapping_neg()) - : "memory" - : "volatile"); + llvm_asm!("xrelease; lock; xaddl $2, $1" + : "=r" (prev), "+*m" (self) + : "0" (val.wrapping_neg()) + : "memory" + : "volatile"); prev } } @@ -94,11 +105,11 @@ impl AtomicElisionExt for AtomicUsize { fn elision_fetch_sub_release(&self, val: usize) -> usize { unsafe { let prev: usize; - asm!("xrelease; lock; xaddq $2, $1" - : "=r" (prev), "+*m" (self) - : "0" (val.wrapping_neg()) - : "memory" - : "volatile"); + llvm_asm!("xrelease; lock; xaddq $2, $1" + : "=r" (prev), "+*m" (self) + : "0" (val.wrapping_neg()) + : "memory" + : "volatile"); prev } } diff --git a/third_party/rust/parking_lot/src/fair_mutex.rs b/third_party/rust/parking_lot/src/fair_mutex.rs new file mode 100644 index 0000000000..449c53b051 --- /dev/null +++ b/third_party/rust/parking_lot/src/fair_mutex.rs @@ -0,0 +1,278 @@ +// Copyright 2016 Amanieu d'Antras +// +// Licensed under the Apache License, Version 2.0, or the MIT license , at your option. This file may not be +// copied, modified, or distributed except according to those terms. + +use crate::raw_fair_mutex::RawFairMutex; +use lock_api; + +/// A mutual exclusive primitive that is always fair, useful for protecting shared data +/// +/// This mutex will block threads waiting for the lock to become available. The +/// mutex can also be statically initialized or created via a `new` +/// constructor. Each mutex has a type parameter which represents the data that +/// it is protecting. The data can only be accessed through the RAII guards +/// returned from `lock` and `try_lock`, which guarantees that the data is only +/// ever accessed when the mutex is locked. +/// +/// The regular mutex provided by `parking_lot` uses eventual locking fairness +/// (after some time it will default to the fair algorithm), but eventual +/// fairness does not provide the same garantees a always fair method would. +/// Fair mutexes are generally slower, but sometimes needed. This wrapper was +/// created to avoid using a unfair protocol when it's forbidden by mistake. +/// +/// In a fair mutex the lock is provided to whichever thread asked first, +/// they form a queue and always follow the first-in first-out order. This +/// means some thread in the queue won't be able to steal the lock and use it fast +/// to increase throughput, at the cost of latency. Since the response time will grow +/// for some threads that are waiting for the lock and losing to faster but later ones, +/// but it may make sending more responses possible. +/// +/// A fair mutex may not be interesting if threads have different priorities (this is known as +/// priority inversion). +/// +/// # Differences from the standard library `Mutex` +/// +/// - No poisoning, the lock is released normally on panic. +/// - Only requires 1 byte of space, whereas the standard library boxes the +/// `FairMutex` due to platform limitations. +/// - Can be statically constructed (requires the `const_fn` nightly feature). +/// - Does not require any drop glue when dropped. +/// - Inline fast path for the uncontended case. +/// - Efficient handling of micro-contention using adaptive spinning. +/// - Allows raw locking & unlocking without a guard. +/// +/// # Examples +/// +/// ``` +/// use parking_lot::FairMutex; +/// use std::sync::{Arc, mpsc::channel}; +/// use std::thread; +/// +/// const N: usize = 10; +/// +/// // Spawn a few threads to increment a shared variable (non-atomically), and +/// // let the main thread know once all increments are done. +/// // +/// // Here we're using an Arc to share memory among threads, and the data inside +/// // the Arc is protected with a mutex. +/// let data = Arc::new(FairMutex::new(0)); +/// +/// let (tx, rx) = channel(); +/// for _ in 0..10 { +/// let (data, tx) = (Arc::clone(&data), tx.clone()); +/// thread::spawn(move || { +/// // The shared state can only be accessed once the lock is held. +/// // Our non-atomic increment is safe because we're the only thread +/// // which can access the shared state when the lock is held. +/// let mut data = data.lock(); +/// *data += 1; +/// if *data == N { +/// tx.send(()).unwrap(); +/// } +/// // the lock is unlocked here when `data` goes out of scope. +/// }); +/// } +/// +/// rx.recv().unwrap(); +/// ``` +pub type FairMutex = lock_api::Mutex; + +/// Creates a new fair mutex in an unlocked state ready for use. +/// +/// This allows creating a fair mutex in a constant context on stable Rust. +pub const fn const_fair_mutex(val: T) -> FairMutex { + FairMutex::const_new(::INIT, val) +} + +/// An RAII implementation of a "scoped lock" of a mutex. When this structure is +/// dropped (falls out of scope), the lock will be unlocked. +/// +/// The data protected by the mutex can be accessed through this guard via its +/// `Deref` and `DerefMut` implementations. +pub type FairMutexGuard<'a, T> = lock_api::MutexGuard<'a, RawFairMutex, T>; + +/// An RAII mutex guard returned by `FairMutexGuard::map`, which can point to a +/// subfield of the protected data. +/// +/// The main difference between `MappedFairMutexGuard` and `FairMutexGuard` is that the +/// former doesn't support temporarily unlocking and re-locking, since that +/// could introduce soundness issues if the locked object is modified by another +/// thread. +pub type MappedFairMutexGuard<'a, T> = lock_api::MappedMutexGuard<'a, RawFairMutex, T>; + +#[cfg(test)] +mod tests { + use crate::FairMutex; + use std::sync::atomic::{AtomicUsize, Ordering}; + use std::sync::mpsc::channel; + use std::sync::Arc; + use std::thread; + + #[cfg(feature = "serde")] + use bincode::{deserialize, serialize}; + + #[derive(Eq, PartialEq, Debug)] + struct NonCopy(i32); + + #[test] + fn smoke() { + let m = FairMutex::new(()); + drop(m.lock()); + drop(m.lock()); + } + + #[test] + fn lots_and_lots() { + const J: u32 = 1000; + const K: u32 = 3; + + let m = Arc::new(FairMutex::new(0)); + + fn inc(m: &FairMutex) { + for _ in 0..J { + *m.lock() += 1; + } + } + + let (tx, rx) = channel(); + for _ in 0..K { + let tx2 = tx.clone(); + let m2 = m.clone(); + thread::spawn(move || { + inc(&m2); + tx2.send(()).unwrap(); + }); + let tx2 = tx.clone(); + let m2 = m.clone(); + thread::spawn(move || { + inc(&m2); + tx2.send(()).unwrap(); + }); + } + + drop(tx); + for _ in 0..2 * K { + rx.recv().unwrap(); + } + assert_eq!(*m.lock(), J * K * 2); + } + + #[test] + fn try_lock() { + let m = FairMutex::new(()); + *m.try_lock().unwrap() = (); + } + + #[test] + fn test_into_inner() { + let m = FairMutex::new(NonCopy(10)); + assert_eq!(m.into_inner(), NonCopy(10)); + } + + #[test] + fn test_into_inner_drop() { + struct Foo(Arc); + impl Drop for Foo { + fn drop(&mut self) { + self.0.fetch_add(1, Ordering::SeqCst); + } + } + let num_drops = Arc::new(AtomicUsize::new(0)); + let m = FairMutex::new(Foo(num_drops.clone())); + assert_eq!(num_drops.load(Ordering::SeqCst), 0); + { + let _inner = m.into_inner(); + assert_eq!(num_drops.load(Ordering::SeqCst), 0); + } + assert_eq!(num_drops.load(Ordering::SeqCst), 1); + } + + #[test] + fn test_get_mut() { + let mut m = FairMutex::new(NonCopy(10)); + *m.get_mut() = NonCopy(20); + assert_eq!(m.into_inner(), NonCopy(20)); + } + + #[test] + fn test_mutex_arc_nested() { + // Tests nested mutexes and access + // to underlying data. + let arc = Arc::new(FairMutex::new(1)); + let arc2 = Arc::new(FairMutex::new(arc)); + let (tx, rx) = channel(); + let _t = thread::spawn(move || { + let lock = arc2.lock(); + let lock2 = lock.lock(); + assert_eq!(*lock2, 1); + tx.send(()).unwrap(); + }); + rx.recv().unwrap(); + } + + #[test] + fn test_mutex_arc_access_in_unwind() { + let arc = Arc::new(FairMutex::new(1)); + let arc2 = arc.clone(); + let _ = thread::spawn(move || { + struct Unwinder { + i: Arc>, + } + impl Drop for Unwinder { + fn drop(&mut self) { + *self.i.lock() += 1; + } + } + let _u = Unwinder { i: arc2 }; + panic!(); + }) + .join(); + let lock = arc.lock(); + assert_eq!(*lock, 2); + } + + #[test] + fn test_mutex_unsized() { + let mutex: &FairMutex<[i32]> = &FairMutex::new([1, 2, 3]); + { + let b = &mut *mutex.lock(); + b[0] = 4; + b[2] = 5; + } + let comp: &[i32] = &[4, 2, 5]; + assert_eq!(&*mutex.lock(), comp); + } + + #[test] + fn test_mutexguard_sync() { + fn sync(_: T) {} + + let mutex = FairMutex::new(()); + sync(mutex.lock()); + } + + #[test] + fn test_mutex_debug() { + let mutex = FairMutex::new(vec![0u8, 10]); + + assert_eq!(format!("{:?}", mutex), "Mutex { data: [0, 10] }"); + let _lock = mutex.lock(); + assert_eq!(format!("{:?}", mutex), "Mutex { data: }"); + } + + #[cfg(feature = "serde")] + #[test] + fn test_serde() { + let contents: Vec = vec![0, 1, 2]; + let mutex = FairMutex::new(contents.clone()); + + let serialized = serialize(&mutex).unwrap(); + let deserialized: FairMutex> = deserialize(&serialized).unwrap(); + + assert_eq!(*(mutex.lock()), *(deserialized.lock())); + assert_eq!(contents, *(deserialized.lock())); + } +} diff --git a/third_party/rust/parking_lot/src/lib.rs b/third_party/rust/parking_lot/src/lib.rs index 6272a9a731..73246c2e76 100644 --- a/third_party/rust/parking_lot/src/lib.rs +++ b/third_party/rust/parking_lot/src/lib.rs @@ -11,12 +11,14 @@ #![warn(missing_docs)] #![warn(rust_2018_idioms)] -#![cfg_attr(feature = "nightly", feature(asm))] +#![cfg_attr(feature = "nightly", feature(llvm_asm))] mod condvar; mod elision; +mod fair_mutex; mod mutex; mod once; +mod raw_fair_mutex; mod raw_mutex; mod raw_rwlock; mod remutex; @@ -28,16 +30,19 @@ pub mod deadlock; #[cfg(not(feature = "deadlock_detection"))] mod deadlock; -pub use ::lock_api as lock_api; pub use self::condvar::{Condvar, WaitTimeoutResult}; -pub use self::mutex::{MappedMutexGuard, Mutex, MutexGuard}; +pub use self::fair_mutex::{const_fair_mutex, FairMutex, FairMutexGuard, MappedFairMutexGuard}; +pub use self::mutex::{const_mutex, MappedMutexGuard, Mutex, MutexGuard}; pub use self::once::{Once, OnceState}; +pub use self::raw_fair_mutex::RawFairMutex; pub use self::raw_mutex::RawMutex; pub use self::raw_rwlock::RawRwLock; pub use self::remutex::{ - MappedReentrantMutexGuard, RawThreadId, ReentrantMutex, ReentrantMutexGuard, + const_reentrant_mutex, MappedReentrantMutexGuard, RawThreadId, ReentrantMutex, + ReentrantMutexGuard, }; pub use self::rwlock::{ - MappedRwLockReadGuard, MappedRwLockWriteGuard, RwLock, RwLockReadGuard, + const_rwlock, MappedRwLockReadGuard, MappedRwLockWriteGuard, RwLock, RwLockReadGuard, RwLockUpgradableReadGuard, RwLockWriteGuard, }; +pub use ::lock_api; diff --git a/third_party/rust/parking_lot/src/mutex.rs b/third_party/rust/parking_lot/src/mutex.rs index 4f88e58362..36e5ea7ec5 100644 --- a/third_party/rust/parking_lot/src/mutex.rs +++ b/third_party/rust/parking_lot/src/mutex.rs @@ -53,10 +53,9 @@ use lock_api; /// # Examples /// /// ``` -/// use std::sync::Arc; /// use parking_lot::Mutex; +/// use std::sync::{Arc, mpsc::channel}; /// use std::thread; -/// use std::sync::mpsc::channel; /// /// const N: usize = 10; /// @@ -87,6 +86,13 @@ use lock_api; /// ``` pub type Mutex = lock_api::Mutex; +/// Creates a new mutex in an unlocked state ready for use. +/// +/// This allows creating a mutex in a constant context on stable Rust. +pub const fn const_mutex(val: T) -> Mutex { + Mutex::const_new(::INIT, val) +} + /// An RAII implementation of a "scoped lock" of a mutex. When this structure is /// dropped (falls out of scope), the lock will be unlocked. /// @@ -245,7 +251,7 @@ mod tests { fn test_mutex_arc_access_in_unwind() { let arc = Arc::new(Mutex::new(1)); let arc2 = arc.clone(); - let _ = thread::spawn(move || -> () { + let _ = thread::spawn(move || { struct Unwinder { i: Arc>, } diff --git a/third_party/rust/parking_lot/src/once.rs b/third_party/rust/parking_lot/src/once.rs index fa7c1c1b60..f458c9c04b 100644 --- a/third_party/rust/parking_lot/src/once.rs +++ b/third_party/rust/parking_lot/src/once.rs @@ -6,25 +6,16 @@ // copied, modified, or distributed except according to those terms. use crate::util::UncheckedOptionExt; -#[cfg(has_sized_atomics)] -use core::sync::atomic::AtomicU8; -#[cfg(not(has_sized_atomics))] -use core::sync::atomic::AtomicUsize as AtomicU8; use core::{ fmt, mem, - sync::atomic::{fence, Ordering}, + sync::atomic::{fence, AtomicU8, Ordering}, }; use parking_lot_core::{self, SpinWait, DEFAULT_PARK_TOKEN, DEFAULT_UNPARK_TOKEN}; -#[cfg(has_sized_atomics)] -type U8 = u8; -#[cfg(not(has_sized_atomics))] -type U8 = usize; - -const DONE_BIT: U8 = 1; -const POISON_BIT: U8 = 2; -const LOCKED_BIT: U8 = 4; -const PARKED_BIT: U8 = 8; +const DONE_BIT: u8 = 1; +const POISON_BIT: u8 = 2; +const LOCKED_BIT: u8 = 4; +const PARKED_BIT: u8 = 8; /// Current state of a `Once`. #[derive(Copy, Clone, Eq, PartialEq, Debug)] @@ -48,8 +39,8 @@ impl OnceState { /// Once an initialization routine for a `Once` has panicked it will forever /// indicate to future forced initialization routines that it is poisoned. #[inline] - pub fn poisoned(&self) -> bool { - match *self { + pub fn poisoned(self) -> bool { + match self { OnceState::Poisoned => true, _ => false, } @@ -58,8 +49,8 @@ impl OnceState { /// Returns whether the associated `Once` has successfully executed a /// closure. #[inline] - pub fn done(&self) -> bool { - match *self { + pub fn done(self) -> bool { + match self { OnceState::Done => true, _ => false, } @@ -194,7 +185,9 @@ impl Once { } let mut f = Some(f); - self.call_once_slow(true, &mut |state| unsafe { f.take().unchecked_unwrap()(state) }); + self.call_once_slow(true, &mut |state| unsafe { + f.take().unchecked_unwrap()(state) + }); } // This is a non-generic function to reduce the monomorphization cost of @@ -303,7 +296,11 @@ impl Once { // At this point we have the lock, so run the closure. Make sure we // properly clean up if the closure panicks. let guard = PanicGuard(self); - let once_state = if state & POISON_BIT != 0 { OnceState::Poisoned } else { OnceState::New }; + let once_state = if state & POISON_BIT != 0 { + OnceState::Poisoned + } else { + OnceState::New + }; f(once_state); mem::forget(guard); @@ -327,7 +324,9 @@ impl Default for Once { impl fmt::Debug for Once { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("Once").field("state", &self.state()).finish() + f.debug_struct("Once") + .field("state", &self.state()) + .finish() } } diff --git a/third_party/rust/parking_lot/src/raw_fair_mutex.rs b/third_party/rust/parking_lot/src/raw_fair_mutex.rs new file mode 100644 index 0000000000..3eb7ddb779 --- /dev/null +++ b/third_party/rust/parking_lot/src/raw_fair_mutex.rs @@ -0,0 +1,60 @@ +// Copyright 2016 Amanieu d'Antras +// +// Licensed under the Apache License, Version 2.0, or the MIT license , at your option. This file may not be +// copied, modified, or distributed except according to those terms. + +use crate::raw_mutex::RawMutex; +use lock_api::RawMutexFair; + +/// Raw fair mutex type backed by the parking lot. +pub struct RawFairMutex(RawMutex); + +unsafe impl lock_api::RawMutex for RawFairMutex { + const INIT: Self = RawFairMutex(::INIT); + + type GuardMarker = ::GuardMarker; + + #[inline] + fn lock(&self) { + self.0.lock() + } + + #[inline] + fn try_lock(&self) -> bool { + self.0.try_lock() + } + + #[inline] + fn unlock(&self) { + self.unlock_fair() + } +} + +unsafe impl lock_api::RawMutexFair for RawFairMutex { + #[inline] + fn unlock_fair(&self) { + self.0.unlock_fair() + } + + #[inline] + fn bump(&self) { + self.0.bump() + } +} + +unsafe impl lock_api::RawMutexTimed for RawFairMutex { + type Duration = ::Duration; + type Instant = ::Instant; + + #[inline] + fn try_lock_until(&self, timeout: Self::Instant) -> bool { + self.0.try_lock_until(timeout) + } + + #[inline] + fn try_lock_for(&self, timeout: Self::Duration) -> bool { + self.0.try_lock_for(timeout) + } +} diff --git a/third_party/rust/parking_lot/src/raw_mutex.rs b/third_party/rust/parking_lot/src/raw_mutex.rs index 5d0933e575..ee39c3bd96 100644 --- a/third_party/rust/parking_lot/src/raw_mutex.rs +++ b/third_party/rust/parking_lot/src/raw_mutex.rs @@ -6,20 +6,14 @@ // copied, modified, or distributed except according to those terms. use crate::{deadlock, util}; -#[cfg(has_sized_atomics)] -use core::sync::atomic::AtomicU8; -#[cfg(not(has_sized_atomics))] -use core::sync::atomic::AtomicUsize as AtomicU8; -use core::{sync::atomic::Ordering, time::Duration}; -use lock_api::{GuardNoSend, RawMutex as RawMutexTrait, RawMutexFair, RawMutexTimed}; +use core::{ + sync::atomic::{AtomicU8, Ordering}, + time::Duration, +}; +use lock_api::{GuardNoSend, RawMutex as RawMutex_}; use parking_lot_core::{self, ParkResult, SpinWait, UnparkResult, UnparkToken, DEFAULT_PARK_TOKEN}; use std::time::Instant; -#[cfg(has_sized_atomics)] -type U8 = u8; -#[cfg(not(has_sized_atomics))] -type U8 = usize; - // UnparkToken used to indicate that that the target thread should attempt to // lock the mutex again as soon as it is unparked. pub(crate) const TOKEN_NORMAL: UnparkToken = UnparkToken(0); @@ -28,16 +22,43 @@ pub(crate) const TOKEN_NORMAL: UnparkToken = UnparkToken(0); // thread directly without unlocking it. pub(crate) const TOKEN_HANDOFF: UnparkToken = UnparkToken(1); -const LOCKED_BIT: U8 = 1; -const PARKED_BIT: U8 = 2; +/// This bit is set in the `state` of a `RawMutex` when that mutex is locked by some thread. +const LOCKED_BIT: u8 = 0b01; +/// This bit is set in the `state` of a `RawMutex` just before parking a thread. A thread is being +/// parked if it wants to lock the mutex, but it is currently being held by some other thread. +const PARKED_BIT: u8 = 0b10; /// Raw mutex type backed by the parking lot. pub struct RawMutex { + /// This atomic integer holds the current state of the mutex instance. Only the two lowest bits + /// are used. See `LOCKED_BIT` and `PARKED_BIT` for the bitmask for these bits. + /// + /// # State table: + /// + /// PARKED_BIT | LOCKED_BIT | Description + /// 0 | 0 | The mutex is not locked, nor is anyone waiting for it. + /// -----------+------------+------------------------------------------------------------------ + /// 0 | 1 | The mutex is locked by exactly one thread. No other thread is + /// | | waiting for it. + /// -----------+------------+------------------------------------------------------------------ + /// 1 | 0 | The mutex is not locked. One or more thread is parked or about to + /// | | park. At least one of the parked threads are just about to be + /// | | unparked, or a thread heading for parking might abort the park. + /// -----------+------------+------------------------------------------------------------------ + /// 1 | 1 | The mutex is locked by exactly one thread. One or more thread is + /// | | parked or about to park, waiting for the lock to become available. + /// | | In this state, PARKED_BIT is only ever cleared when a bucket lock + /// | | is held (i.e. in a parking_lot_core callback). This ensures that + /// | | we never end up in a situation where there are parked threads but + /// | | PARKED_BIT is not set (which would result in those threads + /// | | potentially never getting woken up). state: AtomicU8, } -unsafe impl RawMutexTrait for RawMutex { - const INIT: RawMutex = RawMutex { state: AtomicU8::new(0) }; +unsafe impl lock_api::RawMutex for RawMutex { + const INIT: RawMutex = RawMutex { + state: AtomicU8::new(0), + }; type GuardMarker = GuardNoSend; @@ -78,7 +99,10 @@ unsafe impl RawMutexTrait for RawMutex { #[inline] fn unlock(&self) { unsafe { deadlock::release_resource(self as *const _ as usize) }; - if self.state.compare_exchange(LOCKED_BIT, 0, Ordering::Release, Ordering::Relaxed).is_ok() + if self + .state + .compare_exchange(LOCKED_BIT, 0, Ordering::Release, Ordering::Relaxed) + .is_ok() { return; } @@ -86,11 +110,14 @@ unsafe impl RawMutexTrait for RawMutex { } } -unsafe impl RawMutexFair for RawMutex { +unsafe impl lock_api::RawMutexFair for RawMutex { #[inline] fn unlock_fair(&self) { unsafe { deadlock::release_resource(self as *const _ as usize) }; - if self.state.compare_exchange(LOCKED_BIT, 0, Ordering::Release, Ordering::Relaxed).is_ok() + if self + .state + .compare_exchange(LOCKED_BIT, 0, Ordering::Release, Ordering::Relaxed) + .is_ok() { return; } @@ -105,7 +132,7 @@ unsafe impl RawMutexFair for RawMutex { } } -unsafe impl RawMutexTimed for RawMutex { +unsafe impl lock_api::RawMutexTimed for RawMutex { type Duration = Duration; type Instant = Instant; @@ -212,37 +239,41 @@ impl RawMutex { } // Park our thread until we are woken up by an unlock - unsafe { - let addr = self as *const _ as usize; - let validate = || self.state.load(Ordering::Relaxed) == LOCKED_BIT | PARKED_BIT; - let before_sleep = || {}; - let timed_out = |_, was_last_thread| { - // Clear the parked bit if we were the last parked thread - if was_last_thread { - self.state.fetch_and(!PARKED_BIT, Ordering::Relaxed); - } - }; - match parking_lot_core::park( + let addr = self as *const _ as usize; + let validate = || self.state.load(Ordering::Relaxed) == LOCKED_BIT | PARKED_BIT; + let before_sleep = || {}; + let timed_out = |_, was_last_thread| { + // Clear the parked bit if we were the last parked thread + if was_last_thread { + self.state.fetch_and(!PARKED_BIT, Ordering::Relaxed); + } + }; + // SAFETY: + // * `addr` is an address we control. + // * `validate`/`timed_out` does not panic or call into any function of `parking_lot`. + // * `before_sleep` does not call `park`, nor does it panic. + match unsafe { + parking_lot_core::park( addr, validate, before_sleep, timed_out, DEFAULT_PARK_TOKEN, timeout, - ) { - // The thread that unparked us passed the lock on to us - // directly without unlocking it. - ParkResult::Unparked(TOKEN_HANDOFF) => return true, + ) + } { + // The thread that unparked us passed the lock on to us + // directly without unlocking it. + ParkResult::Unparked(TOKEN_HANDOFF) => return true, - // We were unparked normally, try acquiring the lock again - ParkResult::Unparked(_) => (), + // We were unparked normally, try acquiring the lock again + ParkResult::Unparked(_) => (), - // The validation function failed, try locking again - ParkResult::Invalid => (), + // The validation function failed, try locking again + ParkResult::Invalid => (), - // Timeout expired - ParkResult::TimedOut => return false, - } + // Timeout expired + ParkResult::TimedOut => return false, } // Loop back and try locking again @@ -255,29 +286,32 @@ impl RawMutex { fn unlock_slow(&self, force_fair: bool) { // Unpark one thread and leave the parked bit set if there might // still be parked threads on this address. - unsafe { - let addr = self as *const _ as usize; - let callback = |result: UnparkResult| { - // If we are using a fair unlock then we should keep the - // mutex locked and hand it off to the unparked thread. - if result.unparked_threads != 0 && (force_fair || result.be_fair) { - // Clear the parked bit if there are no more parked - // threads. - if !result.have_more_threads { - self.state.store(LOCKED_BIT, Ordering::Relaxed); - } - return TOKEN_HANDOFF; + let addr = self as *const _ as usize; + let callback = |result: UnparkResult| { + // If we are using a fair unlock then we should keep the + // mutex locked and hand it off to the unparked thread. + if result.unparked_threads != 0 && (force_fair || result.be_fair) { + // Clear the parked bit if there are no more parked + // threads. + if !result.have_more_threads { + self.state.store(LOCKED_BIT, Ordering::Relaxed); } + return TOKEN_HANDOFF; + } - // Clear the locked bit, and the parked bit as well if there - // are no more parked threads. - if result.have_more_threads { - self.state.store(PARKED_BIT, Ordering::Release); - } else { - self.state.store(0, Ordering::Release); - } - TOKEN_NORMAL - }; + // Clear the locked bit, and the parked bit as well if there + // are no more parked threads. + if result.have_more_threads { + self.state.store(PARKED_BIT, Ordering::Release); + } else { + self.state.store(0, Ordering::Release); + } + TOKEN_NORMAL + }; + // SAFETY: + // * `addr` is an address we control. + // * `callback` does not panic or call into any function of `parking_lot`. + unsafe { parking_lot_core::unpark_one(addr, callback); } } diff --git a/third_party/rust/parking_lot/src/raw_rwlock.rs b/third_party/rust/parking_lot/src/raw_rwlock.rs index afe0c4a052..8c1ad11256 100644 --- a/third_party/rust/parking_lot/src/raw_rwlock.rs +++ b/third_party/rust/parking_lot/src/raw_rwlock.rs @@ -12,11 +12,7 @@ use core::{ cell::Cell, sync::atomic::{AtomicUsize, Ordering}, }; -use lock_api::{ - GuardNoSend, RawRwLock as RawRwLockTrait, RawRwLockDowngrade, RawRwLockFair, - RawRwLockRecursive, RawRwLockRecursiveTimed, RawRwLockTimed, RawRwLockUpgrade, - RawRwLockUpgradeDowngrade, RawRwLockUpgradeFair, RawRwLockUpgradeTimed, -}; +use lock_api::{GuardNoSend, RawRwLock as RawRwLock_, RawRwLockUpgrade}; use parking_lot_core::{ self, deadlock, FilterOp, ParkResult, ParkToken, SpinWait, UnparkResult, UnparkToken, }; @@ -60,8 +56,10 @@ pub struct RawRwLock { state: AtomicUsize, } -unsafe impl RawRwLockTrait for RawRwLock { - const INIT: RawRwLock = RawRwLock { state: AtomicUsize::new(0) }; +unsafe impl lock_api::RawRwLock for RawRwLock { + const INIT: RawRwLock = RawRwLock { + state: AtomicUsize::new(0), + }; type GuardMarker = GuardNoSend; @@ -80,7 +78,10 @@ unsafe impl RawRwLockTrait for RawRwLock { #[inline] fn try_lock_exclusive(&self) -> bool { - if self.state.compare_exchange(0, WRITER_BIT, Ordering::Acquire, Ordering::Relaxed).is_ok() + if self + .state + .compare_exchange(0, WRITER_BIT, Ordering::Acquire, Ordering::Relaxed) + .is_ok() { self.deadlock_acquire(); true @@ -92,7 +93,10 @@ unsafe impl RawRwLockTrait for RawRwLock { #[inline] fn unlock_exclusive(&self) { self.deadlock_release(); - if self.state.compare_exchange(WRITER_BIT, 0, Ordering::Release, Ordering::Relaxed).is_ok() + if self + .state + .compare_exchange(WRITER_BIT, 0, Ordering::Release, Ordering::Relaxed) + .is_ok() { return; } @@ -110,8 +114,11 @@ unsafe impl RawRwLockTrait for RawRwLock { #[inline] fn try_lock_shared(&self) -> bool { - let result = - if self.try_lock_shared_fast(false) { true } else { self.try_lock_shared_slow(false) }; + let result = if self.try_lock_shared_fast(false) { + true + } else { + self.try_lock_shared_slow(false) + }; if result { self.deadlock_acquire(); } @@ -132,7 +139,7 @@ unsafe impl RawRwLockTrait for RawRwLock { } } -unsafe impl RawRwLockFair for RawRwLock { +unsafe impl lock_api::RawRwLockFair for RawRwLock { #[inline] fn unlock_shared_fair(&self) { // Shared unlocking is always fair in this implementation. @@ -142,7 +149,10 @@ unsafe impl RawRwLockFair for RawRwLock { #[inline] fn unlock_exclusive_fair(&self) { self.deadlock_release(); - if self.state.compare_exchange(WRITER_BIT, 0, Ordering::Release, Ordering::Relaxed).is_ok() + if self + .state + .compare_exchange(WRITER_BIT, 0, Ordering::Release, Ordering::Relaxed) + .is_ok() { return; } @@ -166,10 +176,12 @@ unsafe impl RawRwLockFair for RawRwLock { } } -unsafe impl RawRwLockDowngrade for RawRwLock { +unsafe impl lock_api::RawRwLockDowngrade for RawRwLock { #[inline] fn downgrade(&self) { - let state = self.state.fetch_add(ONE_READER - WRITER_BIT, Ordering::Release); + let state = self + .state + .fetch_add(ONE_READER - WRITER_BIT, Ordering::Release); // Wake up parked shared and upgradable threads if there are any if state & PARKED_BIT != 0 { @@ -178,7 +190,7 @@ unsafe impl RawRwLockDowngrade for RawRwLock { } } -unsafe impl RawRwLockTimed for RawRwLock { +unsafe impl lock_api::RawRwLockTimed for RawRwLock { type Duration = Duration; type Instant = Instant; @@ -243,7 +255,7 @@ unsafe impl RawRwLockTimed for RawRwLock { } } -unsafe impl RawRwLockRecursive for RawRwLock { +unsafe impl lock_api::RawRwLockRecursive for RawRwLock { #[inline] fn lock_shared_recursive(&self) { if !self.try_lock_shared_fast(true) { @@ -255,8 +267,11 @@ unsafe impl RawRwLockRecursive for RawRwLock { #[inline] fn try_lock_shared_recursive(&self) -> bool { - let result = - if self.try_lock_shared_fast(true) { true } else { self.try_lock_shared_slow(true) }; + let result = if self.try_lock_shared_fast(true) { + true + } else { + self.try_lock_shared_slow(true) + }; if result { self.deadlock_acquire(); } @@ -264,7 +279,7 @@ unsafe impl RawRwLockRecursive for RawRwLock { } } -unsafe impl RawRwLockRecursiveTimed for RawRwLock { +unsafe impl lock_api::RawRwLockRecursiveTimed for RawRwLock { #[inline] fn try_lock_shared_recursive_for(&self, timeout: Self::Duration) -> bool { let result = if self.try_lock_shared_fast(true) { @@ -292,7 +307,7 @@ unsafe impl RawRwLockRecursiveTimed for RawRwLock { } } -unsafe impl RawRwLockUpgrade for RawRwLock { +unsafe impl lock_api::RawRwLockUpgrade for RawRwLock { #[inline] fn lock_upgradable(&self) { if !self.try_lock_upgradable_fast() { @@ -304,8 +319,11 @@ unsafe impl RawRwLockUpgrade for RawRwLock { #[inline] fn try_lock_upgradable(&self) -> bool { - let result = - if self.try_lock_upgradable_fast() { true } else { self.try_lock_upgradable_slow() }; + let result = if self.try_lock_upgradable_fast() { + true + } else { + self.try_lock_upgradable_slow() + }; if result { self.deadlock_acquire(); } @@ -335,8 +353,10 @@ unsafe impl RawRwLockUpgrade for RawRwLock { #[inline] fn upgrade(&self) { - let state = - self.state.fetch_sub((ONE_READER | UPGRADABLE_BIT) - WRITER_BIT, Ordering::Relaxed); + let state = self.state.fetch_sub( + (ONE_READER | UPGRADABLE_BIT) - WRITER_BIT, + Ordering::Relaxed, + ); if state & READERS_MASK != ONE_READER { let result = self.upgrade_slow(None); debug_assert!(result); @@ -362,7 +382,7 @@ unsafe impl RawRwLockUpgrade for RawRwLock { } } -unsafe impl RawRwLockUpgradeFair for RawRwLock { +unsafe impl lock_api::RawRwLockUpgradeFair for RawRwLock { #[inline] fn unlock_upgradable_fair(&self) { self.deadlock_release(); @@ -392,7 +412,7 @@ unsafe impl RawRwLockUpgradeFair for RawRwLock { } } -unsafe impl RawRwLockUpgradeDowngrade for RawRwLock { +unsafe impl lock_api::RawRwLockUpgradeDowngrade for RawRwLock { #[inline] fn downgrade_upgradable(&self) { let state = self.state.fetch_sub(UPGRADABLE_BIT, Ordering::Relaxed); @@ -405,8 +425,10 @@ unsafe impl RawRwLockUpgradeDowngrade for RawRwLock { #[inline] fn downgrade_to_upgradable(&self) { - let state = - self.state.fetch_add((ONE_READER | UPGRADABLE_BIT) - WRITER_BIT, Ordering::Release); + let state = self.state.fetch_add( + (ONE_READER | UPGRADABLE_BIT) - WRITER_BIT, + Ordering::Release, + ); // Wake up parked shared threads if there are any if state & PARKED_BIT != 0 { @@ -415,7 +437,7 @@ unsafe impl RawRwLockUpgradeDowngrade for RawRwLock { } } -unsafe impl RawRwLockUpgradeTimed for RawRwLock { +unsafe impl lock_api::RawRwLockUpgradeTimed for RawRwLock { #[inline] fn try_lock_upgradable_until(&self, timeout: Instant) -> bool { let result = if self.try_lock_upgradable_fast() { @@ -444,15 +466,23 @@ unsafe impl RawRwLockUpgradeTimed for RawRwLock { #[inline] fn try_upgrade_until(&self, timeout: Instant) -> bool { - let state = - self.state.fetch_sub((ONE_READER | UPGRADABLE_BIT) - WRITER_BIT, Ordering::Relaxed); - if state & READERS_MASK == ONE_READER { true } else { self.upgrade_slow(Some(timeout)) } + let state = self.state.fetch_sub( + (ONE_READER | UPGRADABLE_BIT) - WRITER_BIT, + Ordering::Relaxed, + ); + if state & READERS_MASK == ONE_READER { + true + } else { + self.upgrade_slow(Some(timeout)) + } } #[inline] fn try_upgrade_for(&self, timeout: Duration) -> bool { - let state = - self.state.fetch_sub((ONE_READER | UPGRADABLE_BIT) - WRITER_BIT, Ordering::Relaxed); + let state = self.state.fetch_sub( + (ONE_READER | UPGRADABLE_BIT) - WRITER_BIT, + Ordering::Relaxed, + ); if state & READERS_MASK == ONE_READER { true } else { @@ -481,7 +511,9 @@ impl RawRwLock { // readers try to acquire the lock. We only do this if the lock is // completely empty since elision handles conflicts poorly. if have_elision() && state == 0 { - self.state.elision_compare_exchange_acquire(0, ONE_READER).is_ok() + self.state + .elision_compare_exchange_acquire(0, ONE_READER) + .is_ok() } else if let Some(new_state) = state.checked_add(ONE_READER) { self.state .compare_exchange_weak(state, new_state, Ordering::Acquire, Ordering::Relaxed) @@ -509,7 +541,9 @@ impl RawRwLock { } else { match self.state.compare_exchange_weak( state, - state.checked_add(ONE_READER).expect("RwLock reader count overflow"), + state + .checked_add(ONE_READER) + .expect("RwLock reader count overflow"), Ordering::Acquire, Ordering::Relaxed, ) { @@ -564,29 +598,31 @@ impl RawRwLock { #[cold] fn lock_exclusive_slow(&self, timeout: Option) -> bool { + let try_lock = |state: &mut usize| { + loop { + if *state & (WRITER_BIT | UPGRADABLE_BIT) != 0 { + return false; + } + + // Grab WRITER_BIT if it isn't set, even if there are parked threads. + match self.state.compare_exchange_weak( + *state, + *state | WRITER_BIT, + Ordering::Acquire, + Ordering::Relaxed, + ) { + Ok(_) => return true, + Err(x) => *state = x, + } + } + }; + // Step 1: grab exclusive ownership of WRITER_BIT let timed_out = !self.lock_common( timeout, TOKEN_EXCLUSIVE, - |state| { - loop { - if *state & (WRITER_BIT | UPGRADABLE_BIT) != 0 { - return false; - } - - // Grab WRITER_BIT if it isn't set, even if there are parked threads. - match self.state.compare_exchange_weak( - *state, - *state | WRITER_BIT, - Ordering::Acquire, - Ordering::Relaxed, - ) { - Ok(_) => return true, - Err(x) => *state = x, - } - } - }, - |state| state & (WRITER_BIT | UPGRADABLE_BIT) != 0, + try_lock, + WRITER_BIT | UPGRADABLE_BIT, ); if timed_out { return false; @@ -618,111 +654,115 @@ impl RawRwLock { TOKEN_NORMAL } }; - self.wake_parked_threads(0, callback); + // SAFETY: `callback` does not panic or call into any function of `parking_lot`. + unsafe { + self.wake_parked_threads(0, callback); + } } #[cold] fn lock_shared_slow(&self, recursive: bool, timeout: Option) -> bool { - self.lock_common( - timeout, - TOKEN_SHARED, - |state| { - let mut spinwait_shared = SpinWait::new(); - loop { - // Use hardware lock elision to avoid cache conflicts when multiple - // readers try to acquire the lock. We only do this if the lock is - // completely empty since elision handles conflicts poorly. - if have_elision() && *state == 0 { - match self.state.elision_compare_exchange_acquire(0, ONE_READER) { - Ok(_) => return true, - Err(x) => *state = x, - } - } - - // This is the same condition as try_lock_shared_fast - if *state & WRITER_BIT != 0 { - if !recursive || *state & READERS_MASK == 0 { - return false; - } + let try_lock = |state: &mut usize| { + let mut spinwait_shared = SpinWait::new(); + loop { + // Use hardware lock elision to avoid cache conflicts when multiple + // readers try to acquire the lock. We only do this if the lock is + // completely empty since elision handles conflicts poorly. + if have_elision() && *state == 0 { + match self.state.elision_compare_exchange_acquire(0, ONE_READER) { + Ok(_) => return true, + Err(x) => *state = x, } + } - if self - .state - .compare_exchange_weak( - *state, - state.checked_add(ONE_READER).expect("RwLock reader count overflow"), - Ordering::Acquire, - Ordering::Relaxed, - ) - .is_ok() - { - return true; + // This is the same condition as try_lock_shared_fast + if *state & WRITER_BIT != 0 { + if !recursive || *state & READERS_MASK == 0 { + return false; } + } - // If there is high contention on the reader count then we want - // to leave some time between attempts to acquire the lock to - // let other threads make progress. - spinwait_shared.spin_no_yield(); - *state = self.state.load(Ordering::Relaxed); + if self + .state + .compare_exchange_weak( + *state, + state + .checked_add(ONE_READER) + .expect("RwLock reader count overflow"), + Ordering::Acquire, + Ordering::Relaxed, + ) + .is_ok() + { + return true; } - }, - |state| state & WRITER_BIT != 0, - ) + + // If there is high contention on the reader count then we want + // to leave some time between attempts to acquire the lock to + // let other threads make progress. + spinwait_shared.spin_no_yield(); + *state = self.state.load(Ordering::Relaxed); + } + }; + self.lock_common(timeout, TOKEN_SHARED, try_lock, WRITER_BIT) } #[cold] fn unlock_shared_slow(&self) { // At this point WRITER_PARKED_BIT is set and READER_MASK is empty. We // just need to wake up a potentially sleeping pending writer. + // Using the 2nd key at addr + 1 + let addr = self as *const _ as usize + 1; + let callback = |_result: UnparkResult| { + // Clear the WRITER_PARKED_BIT here since there can only be one + // parked writer thread. + self.state.fetch_and(!WRITER_PARKED_BIT, Ordering::Relaxed); + TOKEN_NORMAL + }; + // SAFETY: + // * `addr` is an address we control. + // * `callback` does not panic or call into any function of `parking_lot`. unsafe { - // Using the 2nd key at addr + 1 - let addr = self as *const _ as usize + 1; - let callback = |result: UnparkResult| { - // Clear the WRITER_PARKED_BIT here since there can only be one - // parked writer thread. - debug_assert!(!result.have_more_threads); - self.state.fetch_and(!WRITER_PARKED_BIT, Ordering::Relaxed); - TOKEN_NORMAL - }; parking_lot_core::unpark_one(addr, callback); } } #[cold] fn lock_upgradable_slow(&self, timeout: Option) -> bool { + let try_lock = |state: &mut usize| { + let mut spinwait_shared = SpinWait::new(); + loop { + if *state & (WRITER_BIT | UPGRADABLE_BIT) != 0 { + return false; + } + + if self + .state + .compare_exchange_weak( + *state, + state + .checked_add(ONE_READER | UPGRADABLE_BIT) + .expect("RwLock reader count overflow"), + Ordering::Acquire, + Ordering::Relaxed, + ) + .is_ok() + { + return true; + } + + // If there is high contention on the reader count then we want + // to leave some time between attempts to acquire the lock to + // let other threads make progress. + spinwait_shared.spin_no_yield(); + *state = self.state.load(Ordering::Relaxed); + } + }; self.lock_common( timeout, TOKEN_UPGRADABLE, - |state| { - let mut spinwait_shared = SpinWait::new(); - loop { - if *state & (WRITER_BIT | UPGRADABLE_BIT) != 0 { - return false; - } - - if self - .state - .compare_exchange_weak( - *state, - state - .checked_add(ONE_READER | UPGRADABLE_BIT) - .expect("RwLock reader count overflow"), - Ordering::Acquire, - Ordering::Relaxed, - ) - .is_ok() - { - return true; - } - - // If there is high contention on the reader count then we want - // to leave some time between attempts to acquire the lock to - // let other threads make progress. - spinwait_shared.spin_no_yield(); - *state = self.state.load(Ordering::Relaxed); - } - }, - |state| state & (WRITER_BIT | UPGRADABLE_BIT) != 0, + try_lock, + WRITER_BIT | UPGRADABLE_BIT, ) } @@ -789,7 +829,10 @@ impl RawRwLock { } } }; - self.wake_parked_threads(0, callback); + // SAFETY: `callback` does not panic or call into any function of `parking_lot`. + unsafe { + self.wake_parked_threads(0, callback); + } } #[cold] @@ -826,7 +869,10 @@ impl RawRwLock { } TOKEN_NORMAL }; - self.wake_parked_threads(ONE_READER, callback); + // SAFETY: `callback` does not panic or call into any function of `parking_lot`. + unsafe { + self.wake_parked_threads(ONE_READER, callback); + } } #[cold] @@ -839,7 +885,10 @@ impl RawRwLock { } TOKEN_NORMAL }; - self.wake_parked_threads(ONE_READER | UPGRADABLE_BIT, callback); + // SAFETY: `callback` does not panic or call into any function of `parking_lot`. + unsafe { + self.wake_parked_threads(ONE_READER | UPGRADABLE_BIT, callback); + } } #[cold] @@ -862,41 +911,49 @@ impl RawRwLock { self.lock_upgradable(); } - // Common code for waking up parked threads after releasing WRITER_BIT or - // UPGRADABLE_BIT. + /// Common code for waking up parked threads after releasing WRITER_BIT or + /// UPGRADABLE_BIT. + /// + /// # Safety + /// + /// `callback` must uphold the requirements of the `callback` parameter to + /// `parking_lot_core::unpark_filter`. Meaning no panics or calls into any function in + /// `parking_lot`. #[inline] - fn wake_parked_threads(&self, new_state: usize, callback: C) - where - C: FnOnce(usize, UnparkResult) -> UnparkToken, - { + unsafe fn wake_parked_threads( + &self, + new_state: usize, + callback: impl FnOnce(usize, UnparkResult) -> UnparkToken, + ) { // We must wake up at least one upgrader or writer if there is one, // otherwise they may end up parked indefinitely since unlock_shared // does not call wake_parked_threads. let new_state = Cell::new(new_state); - unsafe { - let addr = self as *const _ as usize; - let filter = |ParkToken(token)| { - let s = new_state.get(); + let addr = self as *const _ as usize; + let filter = |ParkToken(token)| { + let s = new_state.get(); - // If we are waking up a writer, don't wake anything else. - if s & WRITER_BIT != 0 { - return FilterOp::Stop; - } + // If we are waking up a writer, don't wake anything else. + if s & WRITER_BIT != 0 { + return FilterOp::Stop; + } - // Otherwise wake *all* readers and one upgrader/writer. - if token & (UPGRADABLE_BIT | WRITER_BIT) != 0 && s & UPGRADABLE_BIT != 0 { - // Skip writers and upgradable readers if we already have - // a writer/upgradable reader. - FilterOp::Skip - } else { - new_state.set(s + token); - FilterOp::Unpark - } - }; - parking_lot_core::unpark_filter(addr, filter, |result| { - callback(new_state.get(), result) - }); - } + // Otherwise wake *all* readers and one upgrader/writer. + if token & (UPGRADABLE_BIT | WRITER_BIT) != 0 && s & UPGRADABLE_BIT != 0 { + // Skip writers and upgradable readers if we already have + // a writer/upgradable reader. + FilterOp::Skip + } else { + new_state.set(s + token); + FilterOp::Unpark + } + }; + let callback = |result| callback(new_state.get(), result); + // SAFETY: + // * `addr` is an address we control. + // * `filter` does not panic or call into any function of `parking_lot`. + // * `callback` safety responsibility is on caller + parking_lot_core::unpark_filter(addr, filter, callback); } // Common code for waiting for readers to exit the lock after acquiring @@ -928,71 +985,75 @@ impl RawRwLock { } // Park our thread until we are woken up by an unlock - unsafe { - // Using the 2nd key at addr + 1 - let addr = self as *const _ as usize + 1; - let validate = || { - let state = self.state.load(Ordering::Relaxed); - state & READERS_MASK != 0 && state & WRITER_PARKED_BIT != 0 - }; - let before_sleep = || {}; - let timed_out = |_, _| {}; - match parking_lot_core::park( + // Using the 2nd key at addr + 1 + let addr = self as *const _ as usize + 1; + let validate = || { + let state = self.state.load(Ordering::Relaxed); + state & READERS_MASK != 0 && state & WRITER_PARKED_BIT != 0 + }; + let before_sleep = || {}; + let timed_out = |_, _| {}; + // SAFETY: + // * `addr` is an address we control. + // * `validate`/`timed_out` does not panic or call into any function of `parking_lot`. + // * `before_sleep` does not call `park`, nor does it panic. + let park_result = unsafe { + parking_lot_core::park( addr, validate, before_sleep, timed_out, TOKEN_EXCLUSIVE, timeout, - ) { - // We still need to re-check the state if we are unparked - // since a previous writer timing-out could have allowed - // another reader to sneak in before we parked. - ParkResult::Unparked(_) | ParkResult::Invalid => { - state = self.state.load(Ordering::Relaxed); - continue; - } + ) + }; + match park_result { + // We still need to re-check the state if we are unparked + // since a previous writer timing-out could have allowed + // another reader to sneak in before we parked. + ParkResult::Unparked(_) | ParkResult::Invalid => { + state = self.state.load(Ordering::Relaxed); + continue; + } - // Timeout expired - ParkResult::TimedOut => { - // We need to release WRITER_BIT and revert back to - // our previous value. We also wake up any threads that - // might be waiting on WRITER_BIT. - let state = self.state.fetch_add( - prev_value.wrapping_sub(WRITER_BIT | WRITER_PARKED_BIT), - Ordering::Relaxed, - ); - if state & PARKED_BIT != 0 { - let callback = |_, result: UnparkResult| { - // Clear the parked bit if there no more parked threads - if !result.have_more_threads { - self.state.fetch_and(!PARKED_BIT, Ordering::Relaxed); - } - TOKEN_NORMAL - }; + // Timeout expired + ParkResult::TimedOut => { + // We need to release WRITER_BIT and revert back to + // our previous value. We also wake up any threads that + // might be waiting on WRITER_BIT. + let state = self.state.fetch_add( + prev_value.wrapping_sub(WRITER_BIT | WRITER_PARKED_BIT), + Ordering::Relaxed, + ); + if state & PARKED_BIT != 0 { + let callback = |_, result: UnparkResult| { + // Clear the parked bit if there no more parked threads + if !result.have_more_threads { + self.state.fetch_and(!PARKED_BIT, Ordering::Relaxed); + } + TOKEN_NORMAL + }; + // SAFETY: `callback` does not panic or call any function of `parking_lot`. + unsafe { self.wake_parked_threads(ONE_READER | UPGRADABLE_BIT, callback); } - return false; } + return false; } } } true } - // Common code for acquiring a lock + /// Common code for acquiring a lock #[inline] - fn lock_common( + fn lock_common( &self, timeout: Option, token: ParkToken, - mut try_lock: F, - validate: V, - ) -> bool - where - F: FnMut(&mut usize) -> bool, - V: Fn(usize) -> bool, - { + mut try_lock: impl FnMut(&mut usize) -> bool, + validate_flags: usize, + ) -> bool { let mut spinwait = SpinWait::new(); let mut state = self.state.load(Ordering::Relaxed); loop { @@ -1021,40 +1082,39 @@ impl RawRwLock { } // Park our thread until we are woken up by an unlock - unsafe { - let addr = self as *const _ as usize; - let validate = || { - let state = self.state.load(Ordering::Relaxed); - state & PARKED_BIT != 0 && validate(state) - }; - let before_sleep = || {}; - let timed_out = |_, was_last_thread| { - // Clear the parked bit if we were the last parked thread - if was_last_thread { - self.state.fetch_and(!PARKED_BIT, Ordering::Relaxed); - } - }; - match parking_lot_core::park( - addr, - validate, - before_sleep, - timed_out, - token, - timeout, - ) { - // The thread that unparked us passed the lock on to us - // directly without unlocking it. - ParkResult::Unparked(TOKEN_HANDOFF) => return true, + let addr = self as *const _ as usize; + let validate = || { + let state = self.state.load(Ordering::Relaxed); + state & PARKED_BIT != 0 && (state & validate_flags != 0) + }; + let before_sleep = || {}; + let timed_out = |_, was_last_thread| { + // Clear the parked bit if we were the last parked thread + if was_last_thread { + self.state.fetch_and(!PARKED_BIT, Ordering::Relaxed); + } + }; + + // SAFETY: + // * `addr` is an address we control. + // * `validate`/`timed_out` does not panic or call into any function of `parking_lot`. + // * `before_sleep` does not call `park`, nor does it panic. + let park_result = unsafe { + parking_lot_core::park(addr, validate, before_sleep, timed_out, token, timeout) + }; + match park_result { + // The thread that unparked us passed the lock on to us + // directly without unlocking it. + ParkResult::Unparked(TOKEN_HANDOFF) => return true, - // We were unparked normally, try acquiring the lock again - ParkResult::Unparked(_) => (), + // We were unparked normally, try acquiring the lock again + ParkResult::Unparked(_) => (), - // The validation function failed, try locking again - ParkResult::Invalid => (), + // The validation function failed, try locking again + ParkResult::Invalid => (), - // Timeout expired - ParkResult::TimedOut => return false, - } + // Timeout expired + ParkResult::TimedOut => return false, } // Loop back and try locking again diff --git a/third_party/rust/parking_lot/src/remutex.rs b/third_party/rust/parking_lot/src/remutex.rs index b244f42866..1037923018 100644 --- a/third_party/rust/parking_lot/src/remutex.rs +++ b/third_party/rust/parking_lot/src/remutex.rs @@ -17,8 +17,9 @@ unsafe impl GetThreadId for RawThreadId { fn nonzero_thread_id(&self) -> NonZeroUsize { // The address of a thread-local variable is guaranteed to be unique to the - // current thread, and is also guaranteed to be non-zero. - thread_local!(static KEY: u8 = unsafe { ::std::mem::uninitialized() }); + // current thread, and is also guaranteed to be non-zero. The variable has to have a + // non-zero size to guarantee it has a unique address for each thread. + thread_local!(static KEY: u8 = 0); KEY.with(|x| { NonZeroUsize::new(x as *const _ as usize) .expect("thread-local variable address is null") @@ -35,10 +36,21 @@ unsafe impl GetThreadId for RawThreadId { /// - `ReentrantMutexGuard` does not give mutable references to the locked data. /// Use a `RefCell` if you need this. /// -/// See [`Mutex`](struct.Mutex.html) for more details about the underlying mutex +/// See [`Mutex`](type.Mutex.html) for more details about the underlying mutex /// primitive. pub type ReentrantMutex = lock_api::ReentrantMutex; +/// Creates a new reentrant mutex in an unlocked state ready for use. +/// +/// This allows creating a reentrant mutex in a constant context on stable Rust. +pub const fn const_reentrant_mutex(val: T) -> ReentrantMutex { + ReentrantMutex::const_new( + ::INIT, + ::INIT, + val, + ) +} + /// An RAII implementation of a "scoped lock" of a reentrant mutex. When this structure /// is dropped (falls out of scope), the lock will be unlocked. /// @@ -68,18 +80,18 @@ mod tests { #[test] fn smoke() { - let m = ReentrantMutex::new(()); + let m = ReentrantMutex::new(2); { let a = m.lock(); { let b = m.lock(); { let c = m.lock(); - assert_eq!(*c, ()); + assert_eq!(*c, 2); } - assert_eq!(*b, ()); + assert_eq!(*b, 2); } - assert_eq!(*a, ()); + assert_eq!(*a, 2); } } diff --git a/third_party/rust/parking_lot/src/rwlock.rs b/third_party/rust/parking_lot/src/rwlock.rs index a6e160aad2..0381316eaa 100644 --- a/third_party/rust/parking_lot/src/rwlock.rs +++ b/third_party/rust/parking_lot/src/rwlock.rs @@ -88,6 +88,13 @@ use lock_api; /// ``` pub type RwLock = lock_api::RwLock; +/// Creates a new instance of an `RwLock` which is unlocked. +/// +/// This allows creating a `RwLock` in a constant context on stable Rust. +pub const fn const_rwlock(val: T) -> RwLock { + RwLock::const_new(::INIT, val) +} + /// RAII structure used to release the shared read access of a lock when /// dropped. pub type RwLockReadGuard<'a, T> = lock_api::RwLockReadGuard<'a, RawRwLock, T>; @@ -322,7 +329,7 @@ mod tests { fn test_rw_arc_access_in_unwind() { let arc = Arc::new(RwLock::new(1)); let arc2 = arc.clone(); - let _ = thread::spawn(move || -> () { + let _ = thread::spawn(move || { struct Unwinder { i: Arc>, } @@ -359,7 +366,10 @@ mod tests { let read_guard = lock.read(); let read_result = lock.try_read(); - assert!(read_result.is_some(), "try_read should succeed while read_guard is in scope"); + assert!( + read_result.is_some(), + "try_read should succeed while read_guard is in scope" + ); drop(read_guard); } @@ -378,7 +388,10 @@ mod tests { let write_guard = lock.write(); let read_result = lock.try_read(); - assert!(read_result.is_none(), "try_read should fail while write_guard is in scope"); + assert!( + read_result.is_none(), + "try_read should fail while write_guard is in scope" + ); drop(write_guard); } @@ -391,7 +404,10 @@ mod tests { let read_guard = lock.read(); let write_result = lock.try_write(); - assert!(write_result.is_none(), "try_write should fail while read_guard is in scope"); + assert!( + write_result.is_none(), + "try_write should fail while read_guard is in scope" + ); drop(read_guard); } @@ -410,7 +426,10 @@ mod tests { let write_guard = lock.write(); let write_result = lock.try_write(); - assert!(write_result.is_none(), "try_write should fail while write_guard is in scope"); + assert!( + write_result.is_none(), + "try_write should fail while write_guard is in scope" + ); drop(write_guard); } @@ -567,4 +586,28 @@ mod tests { assert_eq!(*(mutex.read()), *(deserialized.read())); assert_eq!(contents, *(deserialized.read())); } + + #[test] + fn test_issue_203() { + struct Bar(RwLock<()>); + + impl Drop for Bar { + fn drop(&mut self) { + let _n = self.0.write(); + } + } + + thread_local! { + static B: Bar = Bar(RwLock::new(())); + } + + thread::spawn(|| { + B.with(|_| ()); + + let a = RwLock::new(()); + let _a = a.read(); + }) + .join() + .unwrap(); + } } diff --git a/third_party/rust/parking_lot/src/util.rs b/third_party/rust/parking_lot/src/util.rs index 90e008b9cf..c5496fc00b 100644 --- a/third_party/rust/parking_lot/src/util.rs +++ b/third_party/rust/parking_lot/src/util.rs @@ -34,10 +34,5 @@ unsafe fn unreachable() -> ! { #[inline] pub fn to_deadline(timeout: Duration) -> Option { - #[cfg(has_checked_instant)] - let deadline = Instant::now().checked_add(timeout); - #[cfg(not(has_checked_instant))] - let deadline = Some(Instant::now() + timeout); - - deadline + Instant::now().checked_add(timeout) } diff --git a/third_party/rust/parking_lot/tests/issue_203.rs b/third_party/rust/parking_lot/tests/issue_203.rs new file mode 100644 index 0000000000..a77a95f8ae --- /dev/null +++ b/third_party/rust/parking_lot/tests/issue_203.rs @@ -0,0 +1,26 @@ +use parking_lot::RwLock; +use std::thread; + +struct Bar(RwLock<()>); + +impl Drop for Bar { + fn drop(&mut self) { + let _n = self.0.write(); + } +} + +thread_local! { + static B: Bar = Bar(RwLock::new(())); +} + +#[test] +fn main() { + thread::spawn(|| { + B.with(|_| ()); + + let a = RwLock::new(()); + let _a = a.read(); + }) + .join() + .unwrap(); +} diff --git a/third_party/rust/parking_lot_core/.cargo-checksum.json b/third_party/rust/parking_lot_core/.cargo-checksum.json index d052f943c2..bd27ad98d1 100644 --- a/third_party/rust/parking_lot_core/.cargo-checksum.json +++ b/third_party/rust/parking_lot_core/.cargo-checksum.json @@ -1 +1 @@ -{"files":{"Cargo.toml":"b63bbe68314522e15a5bbe3ae70bd92278f96301e3b7bca99bf11375c7914be6","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"c9a75f18b9ab2927829a208fc6aa2cf4e63b8420887ba29cdb265d6619ae82d5","build.rs":"d6aa24b67fdcacf238778c5efaf1f622ec7f7a7ec27fa051f415a1e2d31f3532","src/lib.rs":"5f93085983b6fe90306e2a8b19102a5e5dc495c6628e5bea0806ad6143fdf6a2","src/parking_lot.rs":"fcd9a449ecd98544b3e728c5c0e19eec8963a5131a529f4a89aed96bf2844e5e","src/spinwait.rs":"d568d8a81f9144ec4c4a139dc934d7d04ee1656a4a221eb548742fe7aba09ab1","src/thread_parker/cloudabi.rs":"fe21f7b70a80b5fa0fa3209e56a090bf8b0b7dba26f2199d37477208f3f7e47d","src/thread_parker/generic.rs":"2f501c6e46fcff434ba9e13ae8859e66de3327f601ed92989b310124e4129ff4","src/thread_parker/linux.rs":"853fd22f51215d1f553ad6461ad3c92c4ec9c294e607e69ed5f53b2e8c7a11d7","src/thread_parker/mod.rs":"e23da913e184c12e2f566beabdcbb141df0610dabf3ea83e6b5cefede51c4bcf","src/thread_parker/redox.rs":"081c76af1e24be12da45d8093e261c48d558342ac2ac64dc3f7dd95eaaa1bf11","src/thread_parker/sgx.rs":"3fd71a7066db58189f302d2344e4e425320f82c298ca482ca4318bae44ae37fd","src/thread_parker/unix.rs":"da20f3151add154947054c8c7cab22c93231ade2e5dfe43c78eba7dbfc1aea5d","src/thread_parker/wasm.rs":"b4c9f9e9c1fd636b235a0e8e0227c954b1e7432d8394b58af77b348cdfa2141e","src/thread_parker/wasm_atomic.rs":"a1ab05981a833e72d8d353350ab2b95e6f833cd7224591e595ccdb3692968c23","src/thread_parker/windows/keyed_event.rs":"e0c2ed647e0550bffa003160405b5f4ddd40500134c2eb15c3eb598792c30e84","src/thread_parker/windows/mod.rs":"7702ff9b72ac647ec998a9b205ace961a28839fcd94631fb750ca459e4804260","src/thread_parker/windows/waitaddress.rs":"06d994633006e237dc940f377432ea00cf1609e56096d69d46f7bb3b80eeb857","src/util.rs":"285e6133150645525f2ca1ece41f6d35bad4e7c5e08b42b20c99d2a97e04a974","src/word_lock.rs":"e5af5bdae754f4799d1e0e0bbdcf48b82213ca5bfc785104aa27f3d6ea728dd4"},"package":"b876b1b9e7ac6e1a74a6da34d25c42e17e8862aa409cbbbdcfc8d86c6f3bc62b"} \ No newline at end of file +{"files":{"Cargo.toml":"79dd446832ea5ac3330902a4de04bae062dea978229c5a0bc6117a794ba0c71b","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"c9a75f18b9ab2927829a208fc6aa2cf4e63b8420887ba29cdb265d6619ae82d5","src/lib.rs":"058dddad16d91c8d0160fa2a78bb5f7c2f801f2fd9770fc387c5843395bf0379","src/parking_lot.rs":"540104584ea70aa10425b786e4d49c8a3e3b56496b78a7ba1a356d03d97204e2","src/spinwait.rs":"d568d8a81f9144ec4c4a139dc934d7d04ee1656a4a221eb548742fe7aba09ab1","src/thread_parker/cloudabi.rs":"0668b50898c20e7267ac6cc977e7ad376a18958e2d07faeca8199794d873d2eb","src/thread_parker/generic.rs":"2f501c6e46fcff434ba9e13ae8859e66de3327f601ed92989b310124e4129ff4","src/thread_parker/linux.rs":"853fd22f51215d1f553ad6461ad3c92c4ec9c294e607e69ed5f53b2e8c7a11d7","src/thread_parker/mod.rs":"5bc2100d2f575608b5b76e626ca92ce3ba4830176ecc773c5594cda6ca0905e9","src/thread_parker/redox.rs":"081c76af1e24be12da45d8093e261c48d558342ac2ac64dc3f7dd95eaaa1bf11","src/thread_parker/sgx.rs":"3fd71a7066db58189f302d2344e4e425320f82c298ca482ca4318bae44ae37fd","src/thread_parker/unix.rs":"77e1f049207b7e89b22ef05e5134c7538b31fff99aa9660784136f96fec1845a","src/thread_parker/wasm.rs":"b4c9f9e9c1fd636b235a0e8e0227c954b1e7432d8394b58af77b348cdfa2141e","src/thread_parker/wasm_atomic.rs":"a1ab05981a833e72d8d353350ab2b95e6f833cd7224591e595ccdb3692968c23","src/thread_parker/windows/keyed_event.rs":"34fc4693e7afd69a5c426ae7face83b8363f114a44dece44197cd03861cfdded","src/thread_parker/windows/mod.rs":"7702ff9b72ac647ec998a9b205ace961a28839fcd94631fb750ca459e4804260","src/thread_parker/windows/waitaddress.rs":"06d994633006e237dc940f377432ea00cf1609e56096d69d46f7bb3b80eeb857","src/util.rs":"285e6133150645525f2ca1ece41f6d35bad4e7c5e08b42b20c99d2a97e04a974","src/word_lock.rs":"2c030aedb340ae8ca564365206452c298fe29986d005d6a40e808c9760f91c95"},"package":"0e136c1904604defe99ce5fd71a28d473fa60a12255d511aa78a9ddf11237aeb"} \ No newline at end of file diff --git a/third_party/rust/parking_lot_core/Cargo.toml b/third_party/rust/parking_lot_core/Cargo.toml index ea8d5ee101..4cfd81a244 100644 --- a/third_party/rust/parking_lot_core/Cargo.toml +++ b/third_party/rust/parking_lot_core/Cargo.toml @@ -13,7 +13,7 @@ [package] edition = "2018" name = "parking_lot_core" -version = "0.6.2" +version = "0.7.1" authors = ["Amanieu d'Antras "] description = "An advanced API for creating custom synchronization primitives." keywords = ["mutex", "condvar", "rwlock", "once", "thread"] @@ -28,17 +28,15 @@ optional = true version = "0.1.5" [dependencies.petgraph] -version = "0.4.5" +version = "0.5" optional = true [dependencies.smallvec] -version = "0.6" +version = "1.0" [dependencies.thread-id] version = "3.2.0" optional = true -[build-dependencies.rustc_version] -version = "0.2" [features] deadlock_detection = ["petgraph", "thread-id", "backtrace"] diff --git a/third_party/rust/parking_lot_core/build.rs b/third_party/rust/parking_lot_core/build.rs deleted file mode 100644 index 417a770ae8..0000000000 --- a/third_party/rust/parking_lot_core/build.rs +++ /dev/null @@ -1,7 +0,0 @@ -use rustc_version::{version, Version}; - -fn main() { - if version().unwrap() >= Version::parse("1.34.0").unwrap() { - println!("cargo:rustc-cfg=has_sized_atomics"); - } -} diff --git a/third_party/rust/parking_lot_core/src/lib.rs b/third_party/rust/parking_lot_core/src/lib.rs index 6fad7801f1..ba4cb1e8c9 100644 --- a/third_party/rust/parking_lot_core/src/lib.rs +++ b/third_party/rust/parking_lot_core/src/lib.rs @@ -49,11 +49,11 @@ target_arch = "wasm32", target_feature = "atomics" ), - feature(checked_duration_since, stdsimd) + feature(stdsimd) )] #![cfg_attr( all(feature = "nightly", target_os = "cloudabi",), - feature(thread_local, checked_duration_since) + feature(thread_local) )] mod parking_lot; diff --git a/third_party/rust/parking_lot_core/src/parking_lot.rs b/third_party/rust/parking_lot_core/src/parking_lot.rs index 24962511f0..fb87aefe29 100644 --- a/third_party/rust/parking_lot_core/src/parking_lot.rs +++ b/third_party/rust/parking_lot_core/src/parking_lot.rs @@ -17,6 +17,13 @@ use smallvec::SmallVec; use std::time::{Duration, Instant}; static NUM_THREADS: AtomicUsize = AtomicUsize::new(0); + +/// Holds the pointer to the currently active `HashTable`. +/// +/// # Safety +/// +/// Except for the initial value of null, it must always point to a valid `HashTable` instance. +/// Any `HashTable` this global static has ever pointed to must never be freed. static HASHTABLE: AtomicPtr = AtomicPtr::new(ptr::null_mut()); // Even with 3x more buckets than threads, the memory overhead per thread is @@ -72,7 +79,7 @@ impl Bucket { #[inline] pub fn new(timeout: Instant, seed: u32) -> Self { Self { - mutex: WordLock::INIT, + mutex: WordLock::new(), queue_head: Cell::new(ptr::null()), queue_tail: Cell::new(ptr::null()), fair_timeout: UnsafeCell::new(FairTimeout::new(timeout, seed)), @@ -146,9 +153,7 @@ impl ThreadData { // Keep track of the total number of live ThreadData objects and resize // the hash table accordingly. let num_threads = NUM_THREADS.fetch_add(1, Ordering::Relaxed) + 1; - unsafe { - grow_hashtable(num_threads); - } + grow_hashtable(num_threads); ThreadData { parker: ThreadParker::new(), @@ -184,116 +189,92 @@ impl Drop for ThreadData { } } -// Get a pointer to the latest hash table, creating one if it doesn't exist yet. +/// Returns a reference to the latest hash table, creating one if it doesn't exist yet. +/// The reference is valid forever. However, the `HashTable` it references might become stale +/// at any point. Meaning it still exists, but it is not the instance in active use. #[inline] -fn get_hashtable() -> *mut HashTable { +fn get_hashtable() -> &'static HashTable { let table = HASHTABLE.load(Ordering::Acquire); // If there is no table, create one if table.is_null() { create_hashtable() } else { - table + // SAFETY: when not null, `HASHTABLE` always points to a `HashTable` that is never freed. + unsafe { &*table } } } -// Get a pointer to the latest hash table, creating one if it doesn't exist yet. +/// Returns a reference to the latest hash table, creating one if it doesn't exist yet. +/// The reference is valid forever. However, the `HashTable` it references might become stale +/// at any point. Meaning it still exists, but it is not the instance in active use. #[cold] -fn create_hashtable() -> *mut HashTable { +fn create_hashtable() -> &'static HashTable { let new_table = Box::into_raw(HashTable::new(LOAD_FACTOR, ptr::null())); - // If this fails then it means some other thread created the hash - // table first. - match HASHTABLE.compare_exchange( + // If this fails then it means some other thread created the hash table first. + let table = match HASHTABLE.compare_exchange( ptr::null_mut(), new_table, - Ordering::Release, - Ordering::Relaxed, + Ordering::AcqRel, + Ordering::Acquire, ) { Ok(_) => new_table, Err(old_table) => { // Free the table we created + // SAFETY: `new_table` is created from `Box::into_raw` above and only freed here. unsafe { Box::from_raw(new_table); } old_table } - } + }; + // SAFETY: The `HashTable` behind `table` is never freed. It is either the table pointer we + // created here, or it is one loaded from `HASHTABLE`. + unsafe { &*table } } // Grow the hash table so that it is big enough for the given number of threads. // This isn't performance-critical since it is only done when a ThreadData is // created, which only happens once per thread. -unsafe fn grow_hashtable(num_threads: usize) { - // If there is no table, create one - if HASHTABLE.load(Ordering::Relaxed).is_null() { - let new_table = Box::into_raw(HashTable::new(num_threads, ptr::null())); - - // If this fails then it means some other thread created the hash - // table first. - if HASHTABLE - .compare_exchange( - ptr::null_mut(), - new_table, - Ordering::Release, - Ordering::Relaxed, - ) - .is_ok() - { - return; - } - - // Free the table we created - Box::from_raw(new_table); - } - - let mut old_table; - loop { - old_table = HASHTABLE.load(Ordering::Acquire); +fn grow_hashtable(num_threads: usize) { + // Lock all buckets in the existing table and get a reference to it + let old_table = loop { + let table = get_hashtable(); // Check if we need to resize the existing table - if (*old_table).entries.len() >= LOAD_FACTOR * num_threads { + if table.entries.len() >= LOAD_FACTOR * num_threads { return; } // Lock all buckets in the old table - for b in &(*old_table).entries[..] { - b.mutex.lock(); + for bucket in &table.entries[..] { + bucket.mutex.lock(); } // Now check if our table is still the latest one. Another thread could // have grown the hash table between us reading HASHTABLE and locking // the buckets. - if HASHTABLE.load(Ordering::Relaxed) == old_table { - break; + if HASHTABLE.load(Ordering::Relaxed) == table as *const _ as *mut _ { + break table; } // Unlock buckets and try again - for b in &(*old_table).entries[..] { - b.mutex.unlock(); + for bucket in &table.entries[..] { + // SAFETY: We hold the lock here, as required + unsafe { bucket.mutex.unlock() }; } - } + }; // Create the new table - let new_table = HashTable::new(num_threads, old_table); + let mut new_table = HashTable::new(num_threads, old_table); // Move the entries from the old table to the new one - for b in &(*old_table).entries[..] { - let mut current = b.queue_head.get(); - while !current.is_null() { - let next = (*current).next_in_queue.get(); - let hash = hash((*current).key.load(Ordering::Relaxed), new_table.hash_bits); - if new_table.entries[hash].queue_tail.get().is_null() { - new_table.entries[hash].queue_head.set(current); - } else { - (*new_table.entries[hash].queue_tail.get()) - .next_in_queue - .set(current); - } - new_table.entries[hash].queue_tail.set(current); - (*current).next_in_queue.set(ptr::null()); - current = next; - } + for bucket in &old_table.entries[..] { + // SAFETY: The park, unpark* and check_wait_graph_fast functions create only correct linked + // lists. All `ThreadData` instances in these lists will remain valid as long as they are + // present in the lists, meaning as long as their threads are parked. + unsafe { rehash_bucket_into(bucket, &mut new_table) }; } // Publish the new table. No races are possible at this point because @@ -302,8 +283,36 @@ unsafe fn grow_hashtable(num_threads: usize) { HASHTABLE.store(Box::into_raw(new_table), Ordering::Release); // Unlock all buckets in the old table - for b in &(*old_table).entries[..] { - b.mutex.unlock(); + for bucket in &old_table.entries[..] { + // SAFETY: We hold the lock here, as required + unsafe { bucket.mutex.unlock() }; + } +} + +/// Iterate through all `ThreadData` objects in the bucket and insert them into the given table +/// in the bucket their key correspond to for this table. +/// +/// # Safety +/// +/// The given `bucket` must have a correctly constructed linked list under `queue_head`, containing +/// `ThreadData` instances that must stay valid at least as long as the given `table` is in use. +/// +/// The given `table` must only contain buckets with correctly constructed linked lists. +unsafe fn rehash_bucket_into(bucket: &'static Bucket, table: &mut HashTable) { + let mut current: *const ThreadData = bucket.queue_head.get(); + while !current.is_null() { + let next = (*current).next_in_queue.get(); + let hash = hash((*current).key.load(Ordering::Relaxed), table.hash_bits); + if table.entries[hash].queue_tail.get().is_null() { + table.entries[hash].queue_head.set(current); + } else { + (*table.entries[hash].queue_tail.get()) + .next_in_queue + .set(current); + } + table.entries[hash].queue_tail.set(current); + (*current).next_in_queue.set(ptr::null()); + current = next; } } @@ -319,41 +328,42 @@ fn hash(key: usize, bits: u32) -> usize { key.wrapping_mul(0x9E3779B97F4A7C15) >> (64 - bits) } -// Lock the bucket for the given key +/// Locks the bucket for the given key and returns a reference to it. +/// The returned bucket must be unlocked again in order to not cause deadlocks. #[inline] -unsafe fn lock_bucket<'a>(key: usize) -> &'a Bucket { - let mut bucket; +fn lock_bucket(key: usize) -> &'static Bucket { loop { let hashtable = get_hashtable(); - let hash = hash(key, (*hashtable).hash_bits); - bucket = &(*hashtable).entries[hash]; + let hash = hash(key, hashtable.hash_bits); + let bucket = &hashtable.entries[hash]; // Lock the bucket bucket.mutex.lock(); // If no other thread has rehashed the table before we grabbed the lock // then we are good to go! The lock we grabbed prevents any rehashes. - if HASHTABLE.load(Ordering::Relaxed) == hashtable { + if HASHTABLE.load(Ordering::Relaxed) == hashtable as *const _ as *mut _ { return bucket; } // Unlock the bucket and try again - bucket.mutex.unlock(); + // SAFETY: We hold the lock here, as required + unsafe { bucket.mutex.unlock() }; } } -// Lock the bucket for the given key, but check that the key hasn't been changed -// in the meantime due to a requeue. +/// Locks the bucket for the given key and returns a reference to it. But checks that the key +/// hasn't been changed in the meantime due to a requeue. +/// The returned bucket must be unlocked again in order to not cause deadlocks. #[inline] -unsafe fn lock_bucket_checked<'a>(key: &AtomicUsize) -> (usize, &'a Bucket) { - let mut bucket; +fn lock_bucket_checked(key: &AtomicUsize) -> (usize, &'static Bucket) { loop { let hashtable = get_hashtable(); let current_key = key.load(Ordering::Relaxed); - let hash = hash(current_key, (*hashtable).hash_bits); - bucket = &(*hashtable).entries[hash]; + let hash = hash(current_key, hashtable.hash_bits); + let bucket = &hashtable.entries[hash]; // Lock the bucket bucket.mutex.lock(); @@ -361,59 +371,69 @@ unsafe fn lock_bucket_checked<'a>(key: &AtomicUsize) -> (usize, &'a Bucket) { // Check that both the hash table and key are correct while the bucket // is locked. Note that the key can't change once we locked the proper // bucket for it, so we just keep trying until we have the correct key. - if HASHTABLE.load(Ordering::Relaxed) == hashtable + if HASHTABLE.load(Ordering::Relaxed) == hashtable as *const _ as *mut _ && key.load(Ordering::Relaxed) == current_key { return (current_key, bucket); } // Unlock the bucket and try again - bucket.mutex.unlock(); + // SAFETY: We hold the lock here, as required + unsafe { bucket.mutex.unlock() }; } } -// Lock the two buckets for the given pair of keys +/// Locks the two buckets for the given pair of keys and returns references to them. +/// The returned buckets must be unlocked again in order to not cause deadlocks. +/// +/// If both keys hash to the same value, both returned references will be to the same bucket. Be +/// careful to only unlock it once in this case, always use `unlock_bucket_pair`. #[inline] -unsafe fn lock_bucket_pair<'a>(key1: usize, key2: usize) -> (&'a Bucket, &'a Bucket) { - let mut bucket1; +fn lock_bucket_pair(key1: usize, key2: usize) -> (&'static Bucket, &'static Bucket) { loop { let hashtable = get_hashtable(); - // Get the lowest bucket first - let hash1 = hash(key1, (*hashtable).hash_bits); - let hash2 = hash(key2, (*hashtable).hash_bits); - if hash1 <= hash2 { - bucket1 = &(*hashtable).entries[hash1]; + let hash1 = hash(key1, hashtable.hash_bits); + let hash2 = hash(key2, hashtable.hash_bits); + + // Get the bucket at the lowest hash/index first + let bucket1 = if hash1 <= hash2 { + &hashtable.entries[hash1] } else { - bucket1 = &(*hashtable).entries[hash2]; - } + &hashtable.entries[hash2] + }; // Lock the first bucket bucket1.mutex.lock(); // If no other thread has rehashed the table before we grabbed the lock // then we are good to go! The lock we grabbed prevents any rehashes. - if HASHTABLE.load(Ordering::Relaxed) == hashtable { + if HASHTABLE.load(Ordering::Relaxed) == hashtable as *const _ as *mut _ { // Now lock the second bucket and return the two buckets if hash1 == hash2 { return (bucket1, bucket1); } else if hash1 < hash2 { - let bucket2 = &(*hashtable).entries[hash2]; + let bucket2 = &hashtable.entries[hash2]; bucket2.mutex.lock(); return (bucket1, bucket2); } else { - let bucket2 = &(*hashtable).entries[hash1]; + let bucket2 = &hashtable.entries[hash1]; bucket2.mutex.lock(); return (bucket2, bucket1); } } // Unlock the bucket and try again - bucket1.mutex.unlock(); + // SAFETY: We hold the lock here, as required + unsafe { bucket1.mutex.unlock() }; } } -// Unlock a pair of buckets +/// Unlock a pair of buckets +/// +/// # Safety +/// +/// Both buckets must be locked #[inline] unsafe fn unlock_bucket_pair(bucket1: &Bucket, bucket2: &Bucket) { bucket1.mutex.unlock(); @@ -559,6 +579,7 @@ pub unsafe fn park( // If the validation function fails, just return if !validate() { + // SAFETY: We hold the lock here, as required bucket.mutex.unlock(); return ParkResult::Invalid; } @@ -575,6 +596,7 @@ pub unsafe fn park( bucket.queue_head.set(thread_data); } bucket.queue_tail.set(thread_data); + // SAFETY: We hold the lock here, as required bucket.mutex.unlock(); // Invoke the pre-sleep callback @@ -605,6 +627,7 @@ pub unsafe fn park( // Now we need to check again if we were unparked or timed out. Unlike the // last check this is precise because we hold the bucket lock. if !thread_data.parker.timed_out() { + // SAFETY: We hold the lock here, as required bucket.mutex.unlock(); return ParkResult::Unparked(thread_data.unpark_token.get()); } @@ -652,6 +675,7 @@ pub unsafe fn park( debug_assert!(!current.is_null()); // Unlock the bucket, we are done + // SAFETY: We hold the lock here, as required bucket.mutex.unlock(); ParkResult::TimedOut }) @@ -723,6 +747,7 @@ pub unsafe fn unpark_one( // the queue locked while we perform a system call. Finally we wake // up the parked thread. let handle = (*current).parker.unpark_lock(); + // SAFETY: We hold the lock here, as required bucket.mutex.unlock(); handle.unpark(); @@ -736,6 +761,7 @@ pub unsafe fn unpark_one( // No threads with a matching key were found in the bucket callback(result); + // SAFETY: We hold the lock here, as required bucket.mutex.unlock(); result } @@ -786,6 +812,7 @@ pub unsafe fn unpark_all(key: usize, unpark_token: UnparkToken) -> usize { } // Unlock the bucket + // SAFETY: We hold the lock here, as required bucket.mutex.unlock(); // Now that we are outside the lock, wake up all the threads that we removed @@ -839,6 +866,7 @@ pub unsafe fn unpark_requeue( let mut result = UnparkResult::default(); let op = validate(); if op == RequeueOp::Abort { + // SAFETY: Both buckets are locked, as required. unlock_bucket_pair(bucket_from, bucket_to); return result; } @@ -919,9 +947,11 @@ pub unsafe fn unpark_requeue( if let Some(wakeup_thread) = wakeup_thread { (*wakeup_thread).unpark_token.set(token); let handle = (*wakeup_thread).parker.unpark_lock(); + // SAFETY: Both buckets are locked, as required. unlock_bucket_pair(bucket_from, bucket_to); handle.unpark(); } else { + // SAFETY: Both buckets are locked, as required. unlock_bucket_pair(bucket_from, bucket_to); } @@ -1018,6 +1048,7 @@ pub unsafe fn unpark_filter( t.1 = Some((*t.0).parker.unpark_lock()); } + // SAFETY: We hold the lock here, as required bucket.mutex.unlock(); // Now that we are outside the lock, wake up all the threads that we removed @@ -1041,7 +1072,10 @@ pub mod deadlock { /// Acquire a resource identified by key in the deadlock detector /// Noop if deadlock_detection feature isn't enabled. - /// Note: Call after the resource is acquired + /// + /// # Safety + /// + /// Call after the resource is acquired #[inline] pub unsafe fn acquire_resource(_key: usize) { #[cfg(feature = "deadlock_detection")] @@ -1050,9 +1084,14 @@ pub mod deadlock { /// Release a resource identified by key in the deadlock detector. /// Noop if deadlock_detection feature isn't enabled. - /// Note: Call before the resource is released + /// /// # Panics + /// /// Panics if the resource was already released or wasn't acquired in this thread. + /// + /// # Safety + /// + /// Call before the resource is released #[inline] pub unsafe fn release_resource(_key: usize) { #[cfg(feature = "deadlock_detection")] @@ -1159,10 +1198,14 @@ mod deadlock_impl { pub unsafe fn release_resource(key: usize) { with_thread_data(|thread_data| { let resources = &mut (*thread_data.deadlock_data.resources.get()); - match resources.iter().rposition(|x| *x == key) { - Some(p) => resources.swap_remove(p), - None => panic!("key {} not found in thread resources", key), - }; + + // There is only one situation where we can fail to find the + // resource: we are currently running TLS destructors and our + // ThreadData has already been freed. There isn't much we can do + // about it at this point, so just ignore it. + if let Some(p) = resources.iter().rposition(|x| *x == key) { + resources.swap_remove(p); + } }); } @@ -1202,6 +1245,7 @@ mod deadlock_impl { } current = (*current).next_in_queue.get(); } + // SAFETY: We hold the lock here, as required b.mutex.unlock(); } @@ -1220,25 +1264,26 @@ mod deadlock_impl { // Returns all detected thread wait cycles. // Note that once a cycle is reported it's never reported again. unsafe fn check_wait_graph_slow() -> Vec> { - static DEADLOCK_DETECTION_LOCK: WordLock = WordLock::INIT; + static DEADLOCK_DETECTION_LOCK: WordLock = WordLock::new(); DEADLOCK_DETECTION_LOCK.lock(); let mut table = get_hashtable(); loop { // Lock all buckets in the old table - for b in &(*table).entries[..] { + for b in &table.entries[..] { b.mutex.lock(); } // Now check if our table is still the latest one. Another thread could // have grown the hash table between us getting and locking the hash table. let new_table = get_hashtable(); - if new_table == table { + if new_table as *const _ == table as *const _ { break; } // Unlock buckets and try again - for b in &(*table).entries[..] { + for b in &table.entries[..] { + // SAFETY: We hold the lock here, as required b.mutex.unlock(); } @@ -1249,7 +1294,7 @@ mod deadlock_impl { let mut graph = DiGraphMap::::with_capacity(thread_count * 2, thread_count * 2); - for b in &(*table).entries[..] { + for b in &table.entries[..] { let mut current = b.queue_head.get(); while !current.is_null() { if !(*current).parked_with_timeout.get() @@ -1270,7 +1315,8 @@ mod deadlock_impl { } } - for b in &(*table).entries[..] { + for b in &table.entries[..] { + // SAFETY: We hold the lock here, as required b.mutex.unlock(); } @@ -1286,6 +1332,7 @@ mod deadlock_impl { (*td).deadlock_data.deadlocked.set(true); *(*td).deadlock_data.backtrace_sender.get() = Some(sender.clone()); let handle = (*td).parker.unpark_lock(); + // SAFETY: We hold the lock here, as required bucket.mutex.unlock(); // unpark the deadlocked thread! // on unpark it'll notice the deadlocked flag and report back @@ -1346,3 +1393,276 @@ mod deadlock_impl { cycles.iter().cloned().collect() } } + +#[cfg(test)] +mod tests { + use super::{ThreadData, DEFAULT_PARK_TOKEN, DEFAULT_UNPARK_TOKEN}; + use std::{ + ptr, + sync::{ + atomic::{AtomicIsize, AtomicPtr, AtomicUsize, Ordering}, + Arc, + }, + thread, + time::Duration, + }; + + /// Calls a closure for every `ThreadData` currently parked on a given key + fn for_each(key: usize, mut f: impl FnMut(&ThreadData)) { + let bucket = super::lock_bucket(key); + + let mut current: *const ThreadData = bucket.queue_head.get(); + while !current.is_null() { + let current_ref = unsafe { &*current }; + if current_ref.key.load(Ordering::Relaxed) == key { + f(current_ref); + } + current = current_ref.next_in_queue.get(); + } + + // SAFETY: We hold the lock here, as required + unsafe { bucket.mutex.unlock() }; + } + + macro_rules! test { + ( $( $name:ident( + repeats: $repeats:expr, + latches: $latches:expr, + delay: $delay:expr, + threads: $threads:expr, + single_unparks: $single_unparks:expr); + )* ) => { + $(#[test] + fn $name() { + let delay = Duration::from_micros($delay); + for _ in 0..$repeats { + run_parking_test($latches, delay, $threads, $single_unparks); + } + })* + }; + } + + test! { + unpark_all_one_fast( + repeats: 10000, latches: 1, delay: 0, threads: 1, single_unparks: 0 + ); + unpark_all_hundred_fast( + repeats: 100, latches: 1, delay: 0, threads: 100, single_unparks: 0 + ); + unpark_one_one_fast( + repeats: 1000, latches: 1, delay: 0, threads: 1, single_unparks: 1 + ); + unpark_one_hundred_fast( + repeats: 20, latches: 1, delay: 0, threads: 100, single_unparks: 100 + ); + unpark_one_fifty_then_fifty_all_fast( + repeats: 50, latches: 1, delay: 0, threads: 100, single_unparks: 50 + ); + unpark_all_one( + repeats: 100, latches: 1, delay: 10000, threads: 1, single_unparks: 0 + ); + unpark_all_hundred( + repeats: 100, latches: 1, delay: 10000, threads: 100, single_unparks: 0 + ); + unpark_one_one( + repeats: 10, latches: 1, delay: 10000, threads: 1, single_unparks: 1 + ); + unpark_one_fifty( + repeats: 1, latches: 1, delay: 10000, threads: 50, single_unparks: 50 + ); + unpark_one_fifty_then_fifty_all( + repeats: 2, latches: 1, delay: 10000, threads: 100, single_unparks: 50 + ); + hundred_unpark_all_one_fast( + repeats: 100, latches: 100, delay: 0, threads: 1, single_unparks: 0 + ); + hundred_unpark_all_one( + repeats: 1, latches: 100, delay: 10000, threads: 1, single_unparks: 0 + ); + } + + fn run_parking_test( + num_latches: usize, + delay: Duration, + num_threads: usize, + num_single_unparks: usize, + ) { + let mut tests = Vec::with_capacity(num_latches); + + for _ in 0..num_latches { + let test = Arc::new(SingleLatchTest::new(num_threads)); + let mut threads = Vec::with_capacity(num_threads); + for _ in 0..num_threads { + let test = test.clone(); + threads.push(thread::spawn(move || test.run())); + } + tests.push((test, threads)); + } + + for unpark_index in 0..num_single_unparks { + thread::sleep(delay); + for (test, _) in &tests { + test.unpark_one(unpark_index); + } + } + + for (test, threads) in tests { + test.finish(num_single_unparks); + for thread in threads { + thread.join().expect("Test thread panic"); + } + } + } + + struct SingleLatchTest { + semaphore: AtomicIsize, + num_awake: AtomicUsize, + /// Holds the pointer to the last *unprocessed* woken up thread. + last_awoken: AtomicPtr, + /// Total number of threads participating in this test. + num_threads: usize, + } + + impl SingleLatchTest { + pub fn new(num_threads: usize) -> Self { + Self { + // This implements a fair (FIFO) semaphore, and it starts out unavailable. + semaphore: AtomicIsize::new(0), + num_awake: AtomicUsize::new(0), + last_awoken: AtomicPtr::new(ptr::null_mut()), + num_threads, + } + } + + pub fn run(&self) { + // Get one slot from the semaphore + self.down(); + + // Report back to the test verification code that this thread woke up + let this_thread_ptr = super::with_thread_data(|t| t as *const _ as *mut _); + self.last_awoken.store(this_thread_ptr, Ordering::SeqCst); + self.num_awake.fetch_add(1, Ordering::SeqCst); + } + + pub fn unpark_one(&self, single_unpark_index: usize) { + // last_awoken should be null at all times except between self.up() and at the bottom + // of this method where it's reset to null again + assert!(self.last_awoken.load(Ordering::SeqCst).is_null()); + + let mut queue: Vec<*mut ThreadData> = Vec::with_capacity(self.num_threads); + for_each(self.semaphore_addr(), |thread_data| { + queue.push(thread_data as *const _ as *mut _); + }); + assert!(queue.len() <= self.num_threads - single_unpark_index); + + let num_awake_before_up = self.num_awake.load(Ordering::SeqCst); + + self.up(); + + // Wait for a parked thread to wake up and update num_awake + last_awoken. + while self.num_awake.load(Ordering::SeqCst) != num_awake_before_up + 1 { + thread::yield_now(); + } + + // At this point the other thread should have set last_awoken inside the run() method + let last_awoken = self.last_awoken.load(Ordering::SeqCst); + assert!(!last_awoken.is_null()); + if !queue.is_empty() && queue[0] != last_awoken { + panic!( + "Woke up wrong thread:\n\tqueue: {:?}\n\tlast awoken: {:?}", + queue, last_awoken + ); + } + self.last_awoken.store(ptr::null_mut(), Ordering::SeqCst); + } + + pub fn finish(&self, num_single_unparks: usize) { + // The amount of threads not unparked via unpark_one + let mut num_threads_left = self.num_threads.checked_sub(num_single_unparks).unwrap(); + + // Wake remaining threads up with unpark_all. Has to be in a loop, because there might + // still be threads that has not yet parked. + while num_threads_left > 0 { + let mut num_waiting_on_address = 0; + for_each(self.semaphore_addr(), |_thread_data| { + num_waiting_on_address += 1; + }); + assert!(num_waiting_on_address <= num_threads_left); + + let num_awake_before_unpark = self.num_awake.load(Ordering::SeqCst); + + let num_unparked = + unsafe { super::unpark_all(self.semaphore_addr(), DEFAULT_UNPARK_TOKEN) }; + assert!(num_unparked >= num_waiting_on_address); + assert!(num_unparked <= num_threads_left); + + // Wait for all unparked threads to wake up and update num_awake + last_awoken. + while self.num_awake.load(Ordering::SeqCst) + != num_awake_before_unpark + num_unparked + { + thread::yield_now() + } + + num_threads_left = num_threads_left.checked_sub(num_unparked).unwrap(); + } + // By now, all threads should have been woken up + assert_eq!(self.num_awake.load(Ordering::SeqCst), self.num_threads); + + // Make sure no thread is parked on our semaphore address + let mut num_waiting_on_address = 0; + for_each(self.semaphore_addr(), |_thread_data| { + num_waiting_on_address += 1; + }); + assert_eq!(num_waiting_on_address, 0); + } + + pub fn down(&self) { + let old_semaphore_value = self.semaphore.fetch_sub(1, Ordering::SeqCst); + + if old_semaphore_value > 0 { + // We acquired the semaphore. Done. + return; + } + + // We need to wait. + let validate = || true; + let before_sleep = || {}; + let timed_out = |_, _| {}; + unsafe { + super::park( + self.semaphore_addr(), + validate, + before_sleep, + timed_out, + DEFAULT_PARK_TOKEN, + None, + ); + } + } + + pub fn up(&self) { + let old_semaphore_value = self.semaphore.fetch_add(1, Ordering::SeqCst); + + // Check if anyone was waiting on the semaphore. If they were, then pass ownership to them. + if old_semaphore_value < 0 { + // We need to continue until we have actually unparked someone. It might be that + // the thread we want to pass ownership to has decremented the semaphore counter, + // but not yet parked. + loop { + match unsafe { + super::unpark_one(self.semaphore_addr(), |_| DEFAULT_UNPARK_TOKEN) + .unparked_threads + } { + 1 => break, + 0 => (), + i => panic!("Should not wake up {} threads", i), + } + } + } + } + + fn semaphore_addr(&self) -> usize { + &self.semaphore as *const _ as usize + } + } +} diff --git a/third_party/rust/parking_lot_core/src/thread_parker/cloudabi.rs b/third_party/rust/parking_lot_core/src/thread_parker/cloudabi.rs index d5be2f26cb..520cc72990 100644 --- a/third_party/rust/parking_lot_core/src/thread_parker/cloudabi.rs +++ b/third_party/rust/parking_lot_core/src/thread_parker/cloudabi.rs @@ -8,7 +8,7 @@ use cloudabi as abi; use core::{ cell::Cell, - mem, + mem::{self, MaybeUninit}, sync::atomic::{AtomicU32, Ordering}, }; use std::{convert::TryFrom, thread, time::Instant}; @@ -70,11 +70,11 @@ impl Lock { }, ..mem::zeroed() }; - let mut event: abi::event = mem::uninitialized(); - let mut nevents: usize = mem::uninitialized(); - let ret = abi::poll(&subscription, &mut event, 1, &mut nevents); + let mut event = MaybeUninit::::uninit(); + let mut nevents: usize = 0; + let ret = abi::poll(&subscription, event.as_mut_ptr(), 1, &mut nevents); debug_assert_eq!(ret, abi::errno::SUCCESS); - debug_assert_eq!(event.error, abi::errno::SUCCESS); + debug_assert_eq!(event.assume_init().error, abi::errno::SUCCESS); LockGuard { lock: &self.lock } }) @@ -146,12 +146,12 @@ impl Condvar { }, ..mem::zeroed() }; - let mut event: abi::event = mem::uninitialized(); - let mut nevents: usize = mem::uninitialized(); + let mut event = MaybeUninit::::uninit(); + let mut nevents: usize = 0; - let ret = abi::poll(&subscription, &mut event, 1, &mut nevents); + let ret = abi::poll(&subscription, event.as_mut_ptr(), 1, &mut nevents); debug_assert_eq!(ret, abi::errno::SUCCESS); - debug_assert_eq!(event.error, abi::errno::SUCCESS); + debug_assert_eq!(event.assume_init().error, abi::errno::SUCCESS); } } @@ -184,11 +184,17 @@ impl Condvar { ..mem::zeroed() }, ]; - let mut events: [abi::event; 2] = mem::uninitialized(); - let mut nevents: usize = mem::uninitialized(); - - let ret = abi::poll(subscriptions.as_ptr(), events.as_mut_ptr(), 2, &mut nevents); + let mut events = MaybeUninit::<[abi::event; 2]>::uninit(); + let mut nevents: usize = 0; + + let ret = abi::poll( + subscriptions.as_ptr(), + events.as_mut_ptr() as *mut _, + 2, + &mut nevents, + ); debug_assert_eq!(ret, abi::errno::SUCCESS); + let events = events.assume_init(); for i in 0..nevents { debug_assert_eq!(events[i].error, abi::errno::SUCCESS); if events[i].type_ == abi::eventtype::CONDVAR { diff --git a/third_party/rust/parking_lot_core/src/thread_parker/mod.rs b/third_party/rust/parking_lot_core/src/thread_parker/mod.rs index f39d639ef4..4c721c3da9 100644 --- a/third_party/rust/parking_lot_core/src/thread_parker/mod.rs +++ b/third_party/rust/parking_lot_core/src/thread_parker/mod.rs @@ -47,12 +47,11 @@ pub trait UnparkHandleT { /// /// This method is unsafe for the same reason as the unsafe methods in /// `ThreadParkerT`. - #[inline] unsafe fn unpark(self); } cfg_if! { - if #[cfg(all(has_sized_atomics, any(target_os = "linux", target_os = "android")))] { + if #[cfg(any(target_os = "linux", target_os = "android"))] { #[path = "linux.rs"] mod imp; } else if #[cfg(unix)] { @@ -61,7 +60,7 @@ cfg_if! { } else if #[cfg(windows)] { #[path = "windows/mod.rs"] mod imp; - } else if #[cfg(all(has_sized_atomics, target_os = "redox"))] { + } else if #[cfg(target_os = "redox")] { #[path = "redox.rs"] mod imp; } else if #[cfg(all(target_env = "sgx", target_vendor = "fortanix"))] { diff --git a/third_party/rust/parking_lot_core/src/thread_parker/unix.rs b/third_party/rust/parking_lot_core/src/thread_parker/unix.rs index 42c0f3124d..e61ab623ef 100644 --- a/third_party/rust/parking_lot_core/src/thread_parker/unix.rs +++ b/third_party/rust/parking_lot_core/src/thread_parker/unix.rs @@ -9,7 +9,7 @@ use core::ptr; use core::{ cell::{Cell, UnsafeCell}, - mem, + mem::MaybeUninit, }; use libc; use std::{ @@ -137,14 +137,14 @@ impl ThreadParker { #[cfg(not(any(target_os = "macos", target_os = "ios", target_os = "android")))] #[inline] unsafe fn init(&self) { - let mut attr: libc::pthread_condattr_t = mem::uninitialized(); - let r = libc::pthread_condattr_init(&mut attr); + let mut attr = MaybeUninit::::uninit(); + let r = libc::pthread_condattr_init(attr.as_mut_ptr()); debug_assert_eq!(r, 0); - let r = libc::pthread_condattr_setclock(&mut attr, libc::CLOCK_MONOTONIC); + let r = libc::pthread_condattr_setclock(attr.as_mut_ptr(), libc::CLOCK_MONOTONIC); debug_assert_eq!(r, 0); - let r = libc::pthread_cond_init(self.condvar.get(), &attr); + let r = libc::pthread_cond_init(self.condvar.get(), attr.as_ptr()); debug_assert_eq!(r, 0); - let r = libc::pthread_condattr_destroy(&mut attr); + let r = libc::pthread_condattr_destroy(attr.as_mut_ptr()); debug_assert_eq!(r, 0); } } @@ -196,9 +196,11 @@ impl super::UnparkHandleT for UnparkHandle { #[cfg(any(target_os = "macos", target_os = "ios"))] #[inline] fn timespec_now() -> libc::timespec { - let mut now: libc::timeval = unsafe { mem::uninitialized() }; - let r = unsafe { libc::gettimeofday(&mut now, ptr::null_mut()) }; + let mut now = MaybeUninit::::uninit(); + let r = unsafe { libc::gettimeofday(now.as_mut_ptr(), ptr::null_mut()) }; debug_assert_eq!(r, 0); + // SAFETY: We know `libc::gettimeofday` has initialized the value. + let now = unsafe { now.assume_init() }; libc::timespec { tv_sec: now.tv_sec, tv_nsec: now.tv_usec as tv_nsec_t * 1000, @@ -207,7 +209,7 @@ fn timespec_now() -> libc::timespec { #[cfg(not(any(target_os = "macos", target_os = "ios")))] #[inline] fn timespec_now() -> libc::timespec { - let mut now: libc::timespec = unsafe { mem::uninitialized() }; + let mut now = MaybeUninit::::uninit(); let clock = if cfg!(target_os = "android") { // Android doesn't support pthread_condattr_setclock, so we need to // specify the timeout in CLOCK_REALTIME. @@ -215,9 +217,10 @@ fn timespec_now() -> libc::timespec { } else { libc::CLOCK_MONOTONIC }; - let r = unsafe { libc::clock_gettime(clock, &mut now) }; + let r = unsafe { libc::clock_gettime(clock, now.as_mut_ptr()) }; debug_assert_eq!(r, 0); - now + // SAFETY: We know `libc::clock_gettime` has initialized the value. + unsafe { now.assume_init() } } // Converts a relative timeout into an absolute timeout in the clock used by diff --git a/third_party/rust/parking_lot_core/src/thread_parker/windows/keyed_event.rs b/third_party/rust/parking_lot_core/src/thread_parker/windows/keyed_event.rs index 71b55f0f3e..7b516fe196 100644 --- a/third_party/rust/parking_lot_core/src/thread_parker/windows/keyed_event.rs +++ b/third_party/rust/parking_lot_core/src/thread_parker/windows/keyed_event.rs @@ -5,7 +5,10 @@ // http://opensource.org/licenses/MIT>, at your option. This file may not be // copied, modified, or distributed except according to those terms. -use core::{mem, ptr}; +use core::{ + mem::{self, MaybeUninit}, + ptr, +}; use std::{ sync::atomic::{AtomicUsize, Ordering}, time::Instant, @@ -88,9 +91,9 @@ impl KeyedEvent { ObjectAttributes: PVOID, Flags: ULONG, ) -> NTSTATUS = mem::transmute(NtCreateKeyedEvent); - let mut handle = mem::uninitialized(); + let mut handle = MaybeUninit::uninit(); let status = NtCreateKeyedEvent( - &mut handle, + handle.as_mut_ptr(), GENERIC_READ | GENERIC_WRITE, ptr::null_mut(), 0, @@ -100,7 +103,7 @@ impl KeyedEvent { } Some(KeyedEvent { - handle, + handle: handle.assume_init(), NtReleaseKeyedEvent: mem::transmute(NtReleaseKeyedEvent), NtWaitForKeyedEvent: mem::transmute(NtWaitForKeyedEvent), }) diff --git a/third_party/rust/parking_lot_core/src/word_lock.rs b/third_party/rust/parking_lot_core/src/word_lock.rs index 9b1411de1e..450e98556c 100644 --- a/third_party/rust/parking_lot_core/src/word_lock.rs +++ b/third_party/rust/parking_lot_core/src/word_lock.rs @@ -78,9 +78,12 @@ pub struct WordLock { } impl WordLock { - pub const INIT: WordLock = WordLock { - state: AtomicUsize::new(0), - }; + /// Returns a new, unlocked, WordLock. + pub const fn new() -> Self { + WordLock { + state: AtomicUsize::new(0), + } + } #[inline] pub fn lock(&self) { diff --git a/third_party/rust/proc-macro2-0.4.27/.cargo-checksum.json b/third_party/rust/proc-macro2-0.4.27/.cargo-checksum.json deleted file mode 100644 index f7d080fba0..0000000000 --- a/third_party/rust/proc-macro2-0.4.27/.cargo-checksum.json +++ /dev/null @@ -1 +0,0 @@ -{"files":{"Cargo.toml":"b523856472549844b4bf20eca0473d955a7e5eeb95c70eddd31a05ac455427bb","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"378f5840b258e2779c39418f3f2d7b2ba96f1c7917dd6be0713f88305dbda397","README.md":"89857eaaa305afe540abcf56fabae0194dfb4e7906a8098b7206acb23ed11ce8","build.rs":"36fa668f3bf309f243d0e977e8428446cc424303139c1f63410b3c2e30445aec","src/fallback.rs":"e4d1bcb1e92383a2285e6c947dd74b0e34144904948db68127faea627f5dd6ff","src/lib.rs":"896a1d212e30902ff051313808007406ca4471c27880a6ef19508f0ebb8333ee","src/strnom.rs":"60f5380106dbe568cca7abd09877e133c874fbee95d502e4830425c4613a640d","src/wrapper.rs":"0d7fe28ab2b7ee02b8eb8c5a636da364c60f6704b23e7db0a1ddd57c742f54b1","tests/marker.rs":"0227d07bbc7f2e2ad34662a6acb65668b7dc2f79141c4faa672703a04e27bea0","tests/test.rs":"166d35835355bdaa85bcf69de4dfb56ccddd8acf2e1a8cbc506782632b151674"},"package":"4d317f9caece796be1980837fd5cb3dfec5613ebdb04ad0956deea83ce168915"} \ No newline at end of file diff --git a/third_party/rust/proc-macro2-0.4.27/Cargo.toml b/third_party/rust/proc-macro2-0.4.27/Cargo.toml deleted file mode 100644 index 6b11c1e274..0000000000 --- a/third_party/rust/proc-macro2-0.4.27/Cargo.toml +++ /dev/null @@ -1,39 +0,0 @@ -# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO -# -# When uploading crates to the registry Cargo will automatically -# "normalize" Cargo.toml files for maximal compatibility -# with all versions of Cargo and also rewrite `path` dependencies -# to registry (e.g. crates.io) dependencies -# -# If you believe there's an error in this file please file an -# issue against the rust-lang/cargo repository. If you're -# editing this file be aware that the upstream Cargo.toml -# will likely look very different (and much more reasonable) - -[package] -name = "proc-macro2" -version = "0.4.27" -authors = ["Alex Crichton "] -build = "build.rs" -description = "A stable implementation of the upcoming new `proc_macro` API. Comes with an\noption, off by default, to also reimplement itself in terms of the upstream\nunstable API.\n" -homepage = "https://github.com/alexcrichton/proc-macro2" -documentation = "https://docs.rs/proc-macro2" -readme = "README.md" -keywords = ["macros"] -license = "MIT/Apache-2.0" -repository = "https://github.com/alexcrichton/proc-macro2" -[package.metadata.docs.rs] -rustc-args = ["--cfg", "procmacro2_semver_exempt"] -rustdoc-args = ["--cfg", "procmacro2_semver_exempt"] -[dependencies.unicode-xid] -version = "0.1" -[dev-dependencies.quote] -version = "0.6" - -[features] -default = ["proc-macro"] -nightly = [] -proc-macro = [] -span-locations = [] -[badges.travis-ci] -repository = "alexcrichton/proc-macro2" diff --git a/third_party/rust/proc-macro2-0.4.27/LICENSE-APACHE b/third_party/rust/proc-macro2-0.4.27/LICENSE-APACHE deleted file mode 100644 index 16fe87b06e..0000000000 --- a/third_party/rust/proc-macro2-0.4.27/LICENSE-APACHE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - -TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - -1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - -2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - -3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - -4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - -5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - -6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - -7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - -8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - -9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - -END OF TERMS AND CONDITIONS - -APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - -Copyright [yyyy] [name of copyright owner] - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. diff --git a/third_party/rust/proc-macro2-0.4.27/README.md b/third_party/rust/proc-macro2-0.4.27/README.md deleted file mode 100644 index d887d23786..0000000000 --- a/third_party/rust/proc-macro2-0.4.27/README.md +++ /dev/null @@ -1,100 +0,0 @@ -# proc-macro2 - -[![Build Status](https://api.travis-ci.com/alexcrichton/proc-macro2.svg?branch=master)](https://travis-ci.com/alexcrichton/proc-macro2) -[![Latest Version](https://img.shields.io/crates/v/proc-macro2.svg)](https://crates.io/crates/proc-macro2) -[![Rust Documentation](https://img.shields.io/badge/api-rustdoc-blue.svg)](https://docs.rs/proc-macro2) - -A wrapper around the procedural macro API of the compiler's `proc_macro` crate. -This library serves three purposes: - -- **Bring proc-macro-like functionality to other contexts like build.rs and - main.rs.** Types from `proc_macro` are entirely specific to procedural macros - and cannot ever exist in code outside of a procedural macro. Meanwhile - `proc_macro2` types may exist anywhere including non-macro code. By developing - foundational libraries like [syn] and [quote] against `proc_macro2` rather - than `proc_macro`, the procedural macro ecosystem becomes easily applicable to - many other use cases and we avoid reimplementing non-macro equivalents of - those libraries. - -- **Make procedural macros unit testable.** As a consequence of being specific - to procedural macros, nothing that uses `proc_macro` can be executed from a - unit test. In order for helper libraries or components of a macro to be - testable in isolation, they must be implemented using `proc_macro2`. - -- **Provide the latest and greatest APIs across all compiler versions.** - Procedural macros were first introduced to Rust in 1.15.0 with an extremely - minimal interface. Since then, many improvements have landed to make macros - more flexible and easier to write. This library tracks the procedural macro - API of the most recent stable compiler but employs a polyfill to provide that - API consistently across any compiler since 1.15.0. - -[syn]: https://github.com/dtolnay/syn -[quote]: https://github.com/dtolnay/quote - -## Usage - -```toml -[dependencies] -proc-macro2 = "0.4" -``` - -The skeleton of a typical procedural macro typically looks like this: - -```rust -extern crate proc_macro; - -#[proc_macro_derive(MyDerive)] -pub fn my_derive(input: proc_macro::TokenStream) -> proc_macro::TokenStream { - let input = proc_macro2::TokenStream::from(input); - - let output: proc_macro2::TokenStream = { - /* transform input */ - }; - - proc_macro::TokenStream::from(output) -} -``` - -If parsing with [Syn], you'll use [`parse_macro_input!`] instead to propagate -parse errors correctly back to the compiler when parsing fails. - -[`parse_macro_input!`]: https://docs.rs/syn/0.15/syn/macro.parse_macro_input.html - -## Unstable features - -The default feature set of proc-macro2 tracks the most recent stable compiler -API. Functionality in `proc_macro` that is not yet stable is not exposed by -proc-macro2 by default. - -To opt into the additional APIs available in the most recent nightly compiler, -the `procmacro2_semver_exempt` config flag must be passed to rustc. As usual, we -will polyfill those nightly-only APIs all the way back to Rust 1.15.0. As these -are unstable APIs that track the nightly compiler, minor versions of proc-macro2 -may make breaking changes to them at any time. - -``` -RUSTFLAGS='--cfg procmacro2_semver_exempt' cargo build -``` - -Note that this must not only be done for your crate, but for any crate that -depends on your crate. This infectious nature is intentional, as it serves as a -reminder that you are outside of the normal semver guarantees. - -Semver exempt methods are marked as such in the proc-macro2 documentation. - -# License - -This project is licensed under either of - - * Apache License, Version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or - http://www.apache.org/licenses/LICENSE-2.0) - * MIT license ([LICENSE-MIT](LICENSE-MIT) or - http://opensource.org/licenses/MIT) - -at your option. - -### Contribution - -Unless you explicitly state otherwise, any contribution intentionally submitted -for inclusion in Serde by you, as defined in the Apache-2.0 license, shall be -dual licensed as above, without any additional terms or conditions. diff --git a/third_party/rust/proc-macro2-0.4.27/build.rs b/third_party/rust/proc-macro2-0.4.27/build.rs deleted file mode 100644 index 6c0112153c..0000000000 --- a/third_party/rust/proc-macro2-0.4.27/build.rs +++ /dev/null @@ -1,133 +0,0 @@ -// rustc-cfg emitted by the build script: -// -// "u128" -// Include u128 and i128 constructors for proc_macro2::Literal. Enabled on -// any compiler 1.26+. -// -// "use_proc_macro" -// Link to extern crate proc_macro. Available on any compiler and any target -// except wasm32. Requires "proc-macro" Cargo cfg to be enabled (default is -// enabled). On wasm32 we never link to proc_macro even if "proc-macro" cfg -// is enabled. -// -// "wrap_proc_macro" -// Wrap types from libproc_macro rather than polyfilling the whole API. -// Enabled on rustc 1.29+ as long as procmacro2_semver_exempt is not set, -// because we can't emulate the unstable API without emulating everything -// else. Also enabled unconditionally on nightly, in which case the -// procmacro2_semver_exempt surface area is implemented by using the -// nightly-only proc_macro API. -// -// "slow_extend" -// Fallback when `impl Extend for TokenStream` is not available. These impls -// were added one version later than the rest of the proc_macro token API. -// Enabled on rustc 1.29 only. -// -// "nightly" -// Enable the Span::unwrap method. This is to support proc_macro_span and -// proc_macro_diagnostic use on the nightly channel without requiring the -// semver exemption opt-in. Enabled when building with nightly. -// -// "super_unstable" -// Implement the semver exempt API in terms of the nightly-only proc_macro -// API. Enabled when using procmacro2_semver_exempt on a nightly compiler. -// -// "span_locations" -// Provide methods Span::start and Span::end which give the line/column -// location of a token. Enabled by procmacro2_semver_exempt or the -// "span-locations" Cargo cfg. This is behind a cfg because tracking -// location inside spans is a performance hit. - -use std::env; -use std::process::Command; -use std::str; - -fn main() { - println!("cargo:rerun-if-changed=build.rs"); - - let target = env::var("TARGET").unwrap(); - - let version = match rustc_version() { - Some(version) => version, - None => return, - }; - - if version.minor >= 26 { - println!("cargo:rustc-cfg=u128"); - } - - let semver_exempt = cfg!(procmacro2_semver_exempt); - if semver_exempt { - // https://github.com/alexcrichton/proc-macro2/issues/147 - println!("cargo:rustc-cfg=procmacro2_semver_exempt"); - } - - if semver_exempt || cfg!(feature = "span-locations") { - println!("cargo:rustc-cfg=span_locations"); - } - - if !enable_use_proc_macro(&target) { - return; - } - - println!("cargo:rustc-cfg=use_proc_macro"); - - // Rust 1.29 stabilized the necessary APIs in the `proc_macro` crate - if version.nightly || version.minor >= 29 && !semver_exempt { - println!("cargo:rustc-cfg=wrap_proc_macro"); - } - - if version.minor == 29 { - println!("cargo:rustc-cfg=slow_extend"); - } - - if version.nightly { - println!("cargo:rustc-cfg=nightly"); - } - - if semver_exempt && version.nightly { - println!("cargo:rustc-cfg=super_unstable"); - } -} - -fn enable_use_proc_macro(target: &str) -> bool { - // wasm targets don't have the `proc_macro` crate, disable this feature. - if target.contains("wasm32") { - return false; - } - - // Otherwise, only enable it if our feature is actually enabled. - cfg!(feature = "proc-macro") -} - -struct RustcVersion { - minor: u32, - nightly: bool, -} - -fn rustc_version() -> Option { - macro_rules! otry { - ($e:expr) => { - match $e { - Some(e) => e, - None => return None, - } - }; - } - - let rustc = otry!(env::var_os("RUSTC")); - let output = otry!(Command::new(rustc).arg("--version").output().ok()); - let version = otry!(str::from_utf8(&output.stdout).ok()); - let nightly = version.contains("nightly"); - let mut pieces = version.split('.'); - if pieces.next() != Some("rustc 1") { - return None; - } - let minor = otry!(pieces.next()); - let minor = otry!(minor.parse().ok()); - - Some(RustcVersion { - minor: minor, - nightly: nightly, - }) -} diff --git a/third_party/rust/proc-macro2-0.4.27/src/fallback.rs b/third_party/rust/proc-macro2-0.4.27/src/fallback.rs deleted file mode 100644 index 928c7472c4..0000000000 --- a/third_party/rust/proc-macro2-0.4.27/src/fallback.rs +++ /dev/null @@ -1,1421 +0,0 @@ -#[cfg(span_locations)] -use std::cell::RefCell; -#[cfg(procmacro2_semver_exempt)] -use std::cmp; -use std::fmt; -use std::iter; -#[cfg(procmacro2_semver_exempt)] -use std::path::Path; -use std::path::PathBuf; -use std::str::FromStr; -use std::vec; - -use strnom::{block_comment, skip_whitespace, whitespace, word_break, Cursor, PResult}; -use unicode_xid::UnicodeXID; - -use {Delimiter, Punct, Spacing, TokenTree}; - -#[derive(Clone)] -pub struct TokenStream { - inner: Vec, -} - -#[derive(Debug)] -pub struct LexError; - -impl TokenStream { - pub fn new() -> TokenStream { - TokenStream { inner: Vec::new() } - } - - pub fn is_empty(&self) -> bool { - self.inner.len() == 0 - } -} - -#[cfg(span_locations)] -fn get_cursor(src: &str) -> Cursor { - // Create a dummy file & add it to the codemap - CODEMAP.with(|cm| { - let mut cm = cm.borrow_mut(); - let name = format!("", cm.files.len()); - let span = cm.add_file(&name, src); - Cursor { - rest: src, - off: span.lo, - } - }) -} - -#[cfg(not(span_locations))] -fn get_cursor(src: &str) -> Cursor { - Cursor { rest: src } -} - -impl FromStr for TokenStream { - type Err = LexError; - - fn from_str(src: &str) -> Result { - // Create a dummy file & add it to the codemap - let cursor = get_cursor(src); - - match token_stream(cursor) { - Ok((input, output)) => { - if skip_whitespace(input).len() != 0 { - Err(LexError) - } else { - Ok(output) - } - } - Err(LexError) => Err(LexError), - } - } -} - -impl fmt::Display for TokenStream { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - let mut joint = false; - for (i, tt) in self.inner.iter().enumerate() { - if i != 0 && !joint { - write!(f, " ")?; - } - joint = false; - match *tt { - TokenTree::Group(ref tt) => { - let (start, end) = match tt.delimiter() { - Delimiter::Parenthesis => ("(", ")"), - Delimiter::Brace => ("{", "}"), - Delimiter::Bracket => ("[", "]"), - Delimiter::None => ("", ""), - }; - if tt.stream().into_iter().next().is_none() { - write!(f, "{} {}", start, end)? - } else { - write!(f, "{} {} {}", start, tt.stream(), end)? - } - } - TokenTree::Ident(ref tt) => write!(f, "{}", tt)?, - TokenTree::Punct(ref tt) => { - write!(f, "{}", tt.as_char())?; - match tt.spacing() { - Spacing::Alone => {} - Spacing::Joint => joint = true, - } - } - TokenTree::Literal(ref tt) => write!(f, "{}", tt)?, - } - } - - Ok(()) - } -} - -impl fmt::Debug for TokenStream { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.write_str("TokenStream ")?; - f.debug_list().entries(self.clone()).finish() - } -} - -#[cfg(use_proc_macro)] -impl From<::proc_macro::TokenStream> for TokenStream { - fn from(inner: ::proc_macro::TokenStream) -> TokenStream { - inner - .to_string() - .parse() - .expect("compiler token stream parse failed") - } -} - -#[cfg(use_proc_macro)] -impl From for ::proc_macro::TokenStream { - fn from(inner: TokenStream) -> ::proc_macro::TokenStream { - inner - .to_string() - .parse() - .expect("failed to parse to compiler tokens") - } -} - -impl From for TokenStream { - fn from(tree: TokenTree) -> TokenStream { - TokenStream { inner: vec![tree] } - } -} - -impl iter::FromIterator for TokenStream { - fn from_iter>(streams: I) -> Self { - let mut v = Vec::new(); - - for token in streams.into_iter() { - v.push(token); - } - - TokenStream { inner: v } - } -} - -impl iter::FromIterator for TokenStream { - fn from_iter>(streams: I) -> Self { - let mut v = Vec::new(); - - for stream in streams.into_iter() { - v.extend(stream.inner); - } - - TokenStream { inner: v } - } -} - -impl Extend for TokenStream { - fn extend>(&mut self, streams: I) { - self.inner.extend(streams); - } -} - -impl Extend for TokenStream { - fn extend>(&mut self, streams: I) { - self.inner - .extend(streams.into_iter().flat_map(|stream| stream)); - } -} - -pub type TokenTreeIter = vec::IntoIter; - -impl IntoIterator for TokenStream { - type Item = TokenTree; - type IntoIter = TokenTreeIter; - - fn into_iter(self) -> TokenTreeIter { - self.inner.into_iter() - } -} - -#[derive(Clone, PartialEq, Eq)] -pub struct SourceFile { - path: PathBuf, -} - -impl SourceFile { - /// Get the path to this source file as a string. - pub fn path(&self) -> PathBuf { - self.path.clone() - } - - pub fn is_real(&self) -> bool { - // XXX(nika): Support real files in the future? - false - } -} - -impl fmt::Debug for SourceFile { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.debug_struct("SourceFile") - .field("path", &self.path()) - .field("is_real", &self.is_real()) - .finish() - } -} - -#[derive(Clone, Copy, Debug, PartialEq, Eq)] -pub struct LineColumn { - pub line: usize, - pub column: usize, -} - -#[cfg(span_locations)] -thread_local! { - static CODEMAP: RefCell = RefCell::new(Codemap { - // NOTE: We start with a single dummy file which all call_site() and - // def_site() spans reference. - files: vec![{ - #[cfg(procmacro2_semver_exempt)] - { - FileInfo { - name: "".to_owned(), - span: Span { lo: 0, hi: 0 }, - lines: vec![0], - } - } - - #[cfg(not(procmacro2_semver_exempt))] - { - FileInfo { - span: Span { lo: 0, hi: 0 }, - lines: vec![0], - } - } - }], - }); -} - -#[cfg(span_locations)] -struct FileInfo { - #[cfg(procmacro2_semver_exempt)] - name: String, - span: Span, - lines: Vec, -} - -#[cfg(span_locations)] -impl FileInfo { - fn offset_line_column(&self, offset: usize) -> LineColumn { - assert!(self.span_within(Span { - lo: offset as u32, - hi: offset as u32 - })); - let offset = offset - self.span.lo as usize; - match self.lines.binary_search(&offset) { - Ok(found) => LineColumn { - line: found + 1, - column: 0, - }, - Err(idx) => LineColumn { - line: idx, - column: offset - self.lines[idx - 1], - }, - } - } - - fn span_within(&self, span: Span) -> bool { - span.lo >= self.span.lo && span.hi <= self.span.hi - } -} - -/// Computesthe offsets of each line in the given source string. -#[cfg(span_locations)] -fn lines_offsets(s: &str) -> Vec { - let mut lines = vec![0]; - let mut prev = 0; - while let Some(len) = s[prev..].find('\n') { - prev += len + 1; - lines.push(prev); - } - lines -} - -#[cfg(span_locations)] -struct Codemap { - files: Vec, -} - -#[cfg(span_locations)] -impl Codemap { - fn next_start_pos(&self) -> u32 { - // Add 1 so there's always space between files. - // - // We'll always have at least 1 file, as we initialize our files list - // with a dummy file. - self.files.last().unwrap().span.hi + 1 - } - - fn add_file(&mut self, name: &str, src: &str) -> Span { - let lines = lines_offsets(src); - let lo = self.next_start_pos(); - // XXX(nika): Shouild we bother doing a checked cast or checked add here? - let span = Span { - lo: lo, - hi: lo + (src.len() as u32), - }; - - #[cfg(procmacro2_semver_exempt)] - self.files.push(FileInfo { - name: name.to_owned(), - span: span, - lines: lines, - }); - - #[cfg(not(procmacro2_semver_exempt))] - self.files.push(FileInfo { - span: span, - lines: lines, - }); - let _ = name; - - span - } - - fn fileinfo(&self, span: Span) -> &FileInfo { - for file in &self.files { - if file.span_within(span) { - return file; - } - } - panic!("Invalid span with no related FileInfo!"); - } -} - -#[derive(Clone, Copy, PartialEq, Eq)] -pub struct Span { - #[cfg(span_locations)] - lo: u32, - #[cfg(span_locations)] - hi: u32, -} - -impl Span { - #[cfg(not(span_locations))] - pub fn call_site() -> Span { - Span {} - } - - #[cfg(span_locations)] - pub fn call_site() -> Span { - Span { lo: 0, hi: 0 } - } - - #[cfg(procmacro2_semver_exempt)] - pub fn def_site() -> Span { - Span::call_site() - } - - #[cfg(procmacro2_semver_exempt)] - pub fn resolved_at(&self, _other: Span) -> Span { - // Stable spans consist only of line/column information, so - // `resolved_at` and `located_at` only select which span the - // caller wants line/column information from. - *self - } - - #[cfg(procmacro2_semver_exempt)] - pub fn located_at(&self, other: Span) -> Span { - other - } - - #[cfg(procmacro2_semver_exempt)] - pub fn source_file(&self) -> SourceFile { - CODEMAP.with(|cm| { - let cm = cm.borrow(); - let fi = cm.fileinfo(*self); - SourceFile { - path: Path::new(&fi.name).to_owned(), - } - }) - } - - #[cfg(span_locations)] - pub fn start(&self) -> LineColumn { - CODEMAP.with(|cm| { - let cm = cm.borrow(); - let fi = cm.fileinfo(*self); - fi.offset_line_column(self.lo as usize) - }) - } - - #[cfg(span_locations)] - pub fn end(&self) -> LineColumn { - CODEMAP.with(|cm| { - let cm = cm.borrow(); - let fi = cm.fileinfo(*self); - fi.offset_line_column(self.hi as usize) - }) - } - - #[cfg(procmacro2_semver_exempt)] - pub fn join(&self, other: Span) -> Option { - CODEMAP.with(|cm| { - let cm = cm.borrow(); - // If `other` is not within the same FileInfo as us, return None. - if !cm.fileinfo(*self).span_within(other) { - return None; - } - Some(Span { - lo: cmp::min(self.lo, other.lo), - hi: cmp::max(self.hi, other.hi), - }) - }) - } -} - -impl fmt::Debug for Span { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - #[cfg(procmacro2_semver_exempt)] - return write!(f, "bytes({}..{})", self.lo, self.hi); - - #[cfg(not(procmacro2_semver_exempt))] - write!(f, "Span") - } -} - -pub fn debug_span_field_if_nontrivial(debug: &mut fmt::DebugStruct, span: Span) { - if cfg!(procmacro2_semver_exempt) { - debug.field("span", &span); - } -} - -#[derive(Clone)] -pub struct Group { - delimiter: Delimiter, - stream: TokenStream, - span: Span, -} - -impl Group { - pub fn new(delimiter: Delimiter, stream: TokenStream) -> Group { - Group { - delimiter: delimiter, - stream: stream, - span: Span::call_site(), - } - } - - pub fn delimiter(&self) -> Delimiter { - self.delimiter - } - - pub fn stream(&self) -> TokenStream { - self.stream.clone() - } - - pub fn span(&self) -> Span { - self.span - } - - #[cfg(procmacro2_semver_exempt)] - pub fn span_open(&self) -> Span { - self.span - } - - #[cfg(procmacro2_semver_exempt)] - pub fn span_close(&self) -> Span { - self.span - } - - pub fn set_span(&mut self, span: Span) { - self.span = span; - } -} - -impl fmt::Display for Group { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - let (left, right) = match self.delimiter { - Delimiter::Parenthesis => ("(", ")"), - Delimiter::Brace => ("{", "}"), - Delimiter::Bracket => ("[", "]"), - Delimiter::None => ("", ""), - }; - - f.write_str(left)?; - self.stream.fmt(f)?; - f.write_str(right)?; - - Ok(()) - } -} - -impl fmt::Debug for Group { - fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { - let mut debug = fmt.debug_struct("Group"); - debug.field("delimiter", &self.delimiter); - debug.field("stream", &self.stream); - #[cfg(procmacro2_semver_exempt)] - debug.field("span", &self.span); - debug.finish() - } -} - -#[derive(Clone)] -pub struct Ident { - sym: String, - span: Span, - raw: bool, -} - -impl Ident { - fn _new(string: &str, raw: bool, span: Span) -> Ident { - validate_term(string); - - Ident { - sym: string.to_owned(), - span: span, - raw: raw, - } - } - - pub fn new(string: &str, span: Span) -> Ident { - Ident::_new(string, false, span) - } - - pub fn new_raw(string: &str, span: Span) -> Ident { - Ident::_new(string, true, span) - } - - pub fn span(&self) -> Span { - self.span - } - - pub fn set_span(&mut self, span: Span) { - self.span = span; - } -} - -#[inline] -fn is_ident_start(c: char) -> bool { - ('a' <= c && c <= 'z') - || ('A' <= c && c <= 'Z') - || c == '_' - || (c > '\x7f' && UnicodeXID::is_xid_start(c)) -} - -#[inline] -fn is_ident_continue(c: char) -> bool { - ('a' <= c && c <= 'z') - || ('A' <= c && c <= 'Z') - || c == '_' - || ('0' <= c && c <= '9') - || (c > '\x7f' && UnicodeXID::is_xid_continue(c)) -} - -fn validate_term(string: &str) { - let validate = string; - if validate.is_empty() { - panic!("Ident is not allowed to be empty; use Option"); - } - - if validate.bytes().all(|digit| digit >= b'0' && digit <= b'9') { - panic!("Ident cannot be a number; use Literal instead"); - } - - fn ident_ok(string: &str) -> bool { - let mut chars = string.chars(); - let first = chars.next().unwrap(); - if !is_ident_start(first) { - return false; - } - for ch in chars { - if !is_ident_continue(ch) { - return false; - } - } - true - } - - if !ident_ok(validate) { - panic!("{:?} is not a valid Ident", string); - } -} - -impl PartialEq for Ident { - fn eq(&self, other: &Ident) -> bool { - self.sym == other.sym && self.raw == other.raw - } -} - -impl PartialEq for Ident -where - T: ?Sized + AsRef, -{ - fn eq(&self, other: &T) -> bool { - let other = other.as_ref(); - if self.raw { - other.starts_with("r#") && self.sym == other[2..] - } else { - self.sym == other - } - } -} - -impl fmt::Display for Ident { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - if self.raw { - "r#".fmt(f)?; - } - self.sym.fmt(f) - } -} - -impl fmt::Debug for Ident { - // Ident(proc_macro), Ident(r#union) - #[cfg(not(procmacro2_semver_exempt))] - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - let mut debug = f.debug_tuple("Ident"); - debug.field(&format_args!("{}", self)); - debug.finish() - } - - // Ident { - // sym: proc_macro, - // span: bytes(128..138) - // } - #[cfg(procmacro2_semver_exempt)] - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - let mut debug = f.debug_struct("Ident"); - debug.field("sym", &format_args!("{}", self)); - debug.field("span", &self.span); - debug.finish() - } -} - -#[derive(Clone)] -pub struct Literal { - text: String, - span: Span, -} - -macro_rules! suffixed_numbers { - ($($name:ident => $kind:ident,)*) => ($( - pub fn $name(n: $kind) -> Literal { - Literal::_new(format!(concat!("{}", stringify!($kind)), n)) - } - )*) -} - -macro_rules! unsuffixed_numbers { - ($($name:ident => $kind:ident,)*) => ($( - pub fn $name(n: $kind) -> Literal { - Literal::_new(n.to_string()) - } - )*) -} - -impl Literal { - fn _new(text: String) -> Literal { - Literal { - text: text, - span: Span::call_site(), - } - } - - suffixed_numbers! { - u8_suffixed => u8, - u16_suffixed => u16, - u32_suffixed => u32, - u64_suffixed => u64, - usize_suffixed => usize, - i8_suffixed => i8, - i16_suffixed => i16, - i32_suffixed => i32, - i64_suffixed => i64, - isize_suffixed => isize, - - f32_suffixed => f32, - f64_suffixed => f64, - } - - #[cfg(u128)] - suffixed_numbers! { - u128_suffixed => u128, - i128_suffixed => i128, - } - - unsuffixed_numbers! { - u8_unsuffixed => u8, - u16_unsuffixed => u16, - u32_unsuffixed => u32, - u64_unsuffixed => u64, - usize_unsuffixed => usize, - i8_unsuffixed => i8, - i16_unsuffixed => i16, - i32_unsuffixed => i32, - i64_unsuffixed => i64, - isize_unsuffixed => isize, - } - - #[cfg(u128)] - unsuffixed_numbers! { - u128_unsuffixed => u128, - i128_unsuffixed => i128, - } - - pub fn f32_unsuffixed(f: f32) -> Literal { - let mut s = f.to_string(); - if !s.contains(".") { - s.push_str(".0"); - } - Literal::_new(s) - } - - pub fn f64_unsuffixed(f: f64) -> Literal { - let mut s = f.to_string(); - if !s.contains(".") { - s.push_str(".0"); - } - Literal::_new(s) - } - - pub fn string(t: &str) -> Literal { - let mut s = t - .chars() - .flat_map(|c| c.escape_default()) - .collect::(); - s.push('"'); - s.insert(0, '"'); - Literal::_new(s) - } - - pub fn character(t: char) -> Literal { - Literal::_new(format!("'{}'", t.escape_default().collect::())) - } - - pub fn byte_string(bytes: &[u8]) -> Literal { - let mut escaped = "b\"".to_string(); - for b in bytes { - match *b { - b'\0' => escaped.push_str(r"\0"), - b'\t' => escaped.push_str(r"\t"), - b'\n' => escaped.push_str(r"\n"), - b'\r' => escaped.push_str(r"\r"), - b'"' => escaped.push_str("\\\""), - b'\\' => escaped.push_str("\\\\"), - b'\x20'...b'\x7E' => escaped.push(*b as char), - _ => escaped.push_str(&format!("\\x{:02X}", b)), - } - } - escaped.push('"'); - Literal::_new(escaped) - } - - pub fn span(&self) -> Span { - self.span - } - - pub fn set_span(&mut self, span: Span) { - self.span = span; - } -} - -impl fmt::Display for Literal { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - self.text.fmt(f) - } -} - -impl fmt::Debug for Literal { - fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { - let mut debug = fmt.debug_struct("Literal"); - debug.field("lit", &format_args!("{}", self.text)); - #[cfg(procmacro2_semver_exempt)] - debug.field("span", &self.span); - debug.finish() - } -} - -fn token_stream(mut input: Cursor) -> PResult { - let mut trees = Vec::new(); - loop { - let input_no_ws = skip_whitespace(input); - if input_no_ws.rest.len() == 0 { - break; - } - if let Ok((a, tokens)) = doc_comment(input_no_ws) { - input = a; - trees.extend(tokens); - continue; - } - - let (a, tt) = match token_tree(input_no_ws) { - Ok(p) => p, - Err(_) => break, - }; - trees.push(tt); - input = a; - } - Ok((input, TokenStream { inner: trees })) -} - -#[cfg(not(span_locations))] -fn spanned<'a, T>( - input: Cursor<'a>, - f: fn(Cursor<'a>) -> PResult<'a, T>, -) -> PResult<'a, (T, ::Span)> { - let (a, b) = f(skip_whitespace(input))?; - Ok((a, ((b, ::Span::_new_stable(Span::call_site()))))) -} - -#[cfg(span_locations)] -fn spanned<'a, T>( - input: Cursor<'a>, - f: fn(Cursor<'a>) -> PResult<'a, T>, -) -> PResult<'a, (T, ::Span)> { - let input = skip_whitespace(input); - let lo = input.off; - let (a, b) = f(input)?; - let hi = a.off; - let span = ::Span::_new_stable(Span { lo: lo, hi: hi }); - Ok((a, (b, span))) -} - -fn token_tree(input: Cursor) -> PResult { - let (rest, (mut tt, span)) = spanned(input, token_kind)?; - tt.set_span(span); - Ok((rest, tt)) -} - -named!(token_kind -> TokenTree, alt!( - map!(group, |g| TokenTree::Group(::Group::_new_stable(g))) - | - map!(literal, |l| TokenTree::Literal(::Literal::_new_stable(l))) // must be before symbol - | - map!(op, TokenTree::Punct) - | - symbol_leading_ws -)); - -named!(group -> Group, alt!( - delimited!( - punct!("("), - token_stream, - punct!(")") - ) => { |ts| Group::new(Delimiter::Parenthesis, ts) } - | - delimited!( - punct!("["), - token_stream, - punct!("]") - ) => { |ts| Group::new(Delimiter::Bracket, ts) } - | - delimited!( - punct!("{"), - token_stream, - punct!("}") - ) => { |ts| Group::new(Delimiter::Brace, ts) } -)); - -fn symbol_leading_ws(input: Cursor) -> PResult { - symbol(skip_whitespace(input)) -} - -fn symbol(input: Cursor) -> PResult { - let mut chars = input.char_indices(); - - let raw = input.starts_with("r#"); - if raw { - chars.next(); - chars.next(); - } - - match chars.next() { - Some((_, ch)) if is_ident_start(ch) => {} - _ => return Err(LexError), - } - - let mut end = input.len(); - for (i, ch) in chars { - if !is_ident_continue(ch) { - end = i; - break; - } - } - - let a = &input.rest[..end]; - if a == "r#_" { - Err(LexError) - } else { - let ident = if raw { - ::Ident::_new_raw(&a[2..], ::Span::call_site()) - } else { - ::Ident::new(a, ::Span::call_site()) - }; - Ok((input.advance(end), ident.into())) - } -} - -fn literal(input: Cursor) -> PResult { - let input_no_ws = skip_whitespace(input); - - match literal_nocapture(input_no_ws) { - Ok((a, ())) => { - let start = input.len() - input_no_ws.len(); - let len = input_no_ws.len() - a.len(); - let end = start + len; - Ok((a, Literal::_new(input.rest[start..end].to_string()))) - } - Err(LexError) => Err(LexError), - } -} - -named!(literal_nocapture -> (), alt!( - string - | - byte_string - | - byte - | - character - | - float - | - int -)); - -named!(string -> (), alt!( - quoted_string - | - preceded!( - punct!("r"), - raw_string - ) => { |_| () } -)); - -named!(quoted_string -> (), delimited!( - punct!("\""), - cooked_string, - tag!("\"") -)); - -fn cooked_string(input: Cursor) -> PResult<()> { - let mut chars = input.char_indices().peekable(); - while let Some((byte_offset, ch)) = chars.next() { - match ch { - '"' => { - return Ok((input.advance(byte_offset), ())); - } - '\r' => { - if let Some((_, '\n')) = chars.next() { - // ... - } else { - break; - } - } - '\\' => match chars.next() { - Some((_, 'x')) => { - if !backslash_x_char(&mut chars) { - break; - } - } - Some((_, 'n')) | Some((_, 'r')) | Some((_, 't')) | Some((_, '\\')) - | Some((_, '\'')) | Some((_, '"')) | Some((_, '0')) => {} - Some((_, 'u')) => { - if !backslash_u(&mut chars) { - break; - } - } - Some((_, '\n')) | Some((_, '\r')) => { - while let Some(&(_, ch)) = chars.peek() { - if ch.is_whitespace() { - chars.next(); - } else { - break; - } - } - } - _ => break, - }, - _ch => {} - } - } - Err(LexError) -} - -named!(byte_string -> (), alt!( - delimited!( - punct!("b\""), - cooked_byte_string, - tag!("\"") - ) => { |_| () } - | - preceded!( - punct!("br"), - raw_string - ) => { |_| () } -)); - -fn cooked_byte_string(mut input: Cursor) -> PResult<()> { - let mut bytes = input.bytes().enumerate(); - 'outer: while let Some((offset, b)) = bytes.next() { - match b { - b'"' => { - return Ok((input.advance(offset), ())); - } - b'\r' => { - if let Some((_, b'\n')) = bytes.next() { - // ... - } else { - break; - } - } - b'\\' => match bytes.next() { - Some((_, b'x')) => { - if !backslash_x_byte(&mut bytes) { - break; - } - } - Some((_, b'n')) | Some((_, b'r')) | Some((_, b't')) | Some((_, b'\\')) - | Some((_, b'0')) | Some((_, b'\'')) | Some((_, b'"')) => {} - Some((newline, b'\n')) | Some((newline, b'\r')) => { - let rest = input.advance(newline + 1); - for (offset, ch) in rest.char_indices() { - if !ch.is_whitespace() { - input = rest.advance(offset); - bytes = input.bytes().enumerate(); - continue 'outer; - } - } - break; - } - _ => break, - }, - b if b < 0x80 => {} - _ => break, - } - } - Err(LexError) -} - -fn raw_string(input: Cursor) -> PResult<()> { - let mut chars = input.char_indices(); - let mut n = 0; - while let Some((byte_offset, ch)) = chars.next() { - match ch { - '"' => { - n = byte_offset; - break; - } - '#' => {} - _ => return Err(LexError), - } - } - for (byte_offset, ch) in chars { - match ch { - '"' if input.advance(byte_offset + 1).starts_with(&input.rest[..n]) => { - let rest = input.advance(byte_offset + 1 + n); - return Ok((rest, ())); - } - '\r' => {} - _ => {} - } - } - Err(LexError) -} - -named!(byte -> (), do_parse!( - punct!("b") >> - tag!("'") >> - cooked_byte >> - tag!("'") >> - (()) -)); - -fn cooked_byte(input: Cursor) -> PResult<()> { - let mut bytes = input.bytes().enumerate(); - let ok = match bytes.next().map(|(_, b)| b) { - Some(b'\\') => match bytes.next().map(|(_, b)| b) { - Some(b'x') => backslash_x_byte(&mut bytes), - Some(b'n') | Some(b'r') | Some(b't') | Some(b'\\') | Some(b'0') | Some(b'\'') - | Some(b'"') => true, - _ => false, - }, - b => b.is_some(), - }; - if ok { - match bytes.next() { - Some((offset, _)) => { - if input.chars().as_str().is_char_boundary(offset) { - Ok((input.advance(offset), ())) - } else { - Err(LexError) - } - } - None => Ok((input.advance(input.len()), ())), - } - } else { - Err(LexError) - } -} - -named!(character -> (), do_parse!( - punct!("'") >> - cooked_char >> - tag!("'") >> - (()) -)); - -fn cooked_char(input: Cursor) -> PResult<()> { - let mut chars = input.char_indices(); - let ok = match chars.next().map(|(_, ch)| ch) { - Some('\\') => match chars.next().map(|(_, ch)| ch) { - Some('x') => backslash_x_char(&mut chars), - Some('u') => backslash_u(&mut chars), - Some('n') | Some('r') | Some('t') | Some('\\') | Some('0') | Some('\'') | Some('"') => { - true - } - _ => false, - }, - ch => ch.is_some(), - }; - if ok { - match chars.next() { - Some((idx, _)) => Ok((input.advance(idx), ())), - None => Ok((input.advance(input.len()), ())), - } - } else { - Err(LexError) - } -} - -macro_rules! next_ch { - ($chars:ident @ $pat:pat $(| $rest:pat)*) => { - match $chars.next() { - Some((_, ch)) => match ch { - $pat $(| $rest)* => ch, - _ => return false, - }, - None => return false - } - }; -} - -fn backslash_x_char(chars: &mut I) -> bool -where - I: Iterator, -{ - next_ch!(chars @ '0'...'7'); - next_ch!(chars @ '0'...'9' | 'a'...'f' | 'A'...'F'); - true -} - -fn backslash_x_byte(chars: &mut I) -> bool -where - I: Iterator, -{ - next_ch!(chars @ b'0'...b'9' | b'a'...b'f' | b'A'...b'F'); - next_ch!(chars @ b'0'...b'9' | b'a'...b'f' | b'A'...b'F'); - true -} - -fn backslash_u(chars: &mut I) -> bool -where - I: Iterator, -{ - next_ch!(chars @ '{'); - next_ch!(chars @ '0'...'9' | 'a'...'f' | 'A'...'F'); - loop { - let c = next_ch!(chars @ '0'...'9' | 'a'...'f' | 'A'...'F' | '_' | '}'); - if c == '}' { - return true; - } - } -} - -fn float(input: Cursor) -> PResult<()> { - let (rest, ()) = float_digits(input)?; - for suffix in &["f32", "f64"] { - if rest.starts_with(suffix) { - return word_break(rest.advance(suffix.len())); - } - } - word_break(rest) -} - -fn float_digits(input: Cursor) -> PResult<()> { - let mut chars = input.chars().peekable(); - match chars.next() { - Some(ch) if ch >= '0' && ch <= '9' => {} - _ => return Err(LexError), - } - - let mut len = 1; - let mut has_dot = false; - let mut has_exp = false; - while let Some(&ch) = chars.peek() { - match ch { - '0'...'9' | '_' => { - chars.next(); - len += 1; - } - '.' => { - if has_dot { - break; - } - chars.next(); - if chars - .peek() - .map(|&ch| ch == '.' || UnicodeXID::is_xid_start(ch)) - .unwrap_or(false) - { - return Err(LexError); - } - len += 1; - has_dot = true; - } - 'e' | 'E' => { - chars.next(); - len += 1; - has_exp = true; - break; - } - _ => break, - } - } - - let rest = input.advance(len); - if !(has_dot || has_exp || rest.starts_with("f32") || rest.starts_with("f64")) { - return Err(LexError); - } - - if has_exp { - let mut has_exp_value = false; - while let Some(&ch) = chars.peek() { - match ch { - '+' | '-' => { - if has_exp_value { - break; - } - chars.next(); - len += 1; - } - '0'...'9' => { - chars.next(); - len += 1; - has_exp_value = true; - } - '_' => { - chars.next(); - len += 1; - } - _ => break, - } - } - if !has_exp_value { - return Err(LexError); - } - } - - Ok((input.advance(len), ())) -} - -fn int(input: Cursor) -> PResult<()> { - let (rest, ()) = digits(input)?; - for suffix in &[ - "isize", "i8", "i16", "i32", "i64", "i128", "usize", "u8", "u16", "u32", "u64", "u128", - ] { - if rest.starts_with(suffix) { - return word_break(rest.advance(suffix.len())); - } - } - word_break(rest) -} - -fn digits(mut input: Cursor) -> PResult<()> { - let base = if input.starts_with("0x") { - input = input.advance(2); - 16 - } else if input.starts_with("0o") { - input = input.advance(2); - 8 - } else if input.starts_with("0b") { - input = input.advance(2); - 2 - } else { - 10 - }; - - let mut len = 0; - let mut empty = true; - for b in input.bytes() { - let digit = match b { - b'0'...b'9' => (b - b'0') as u64, - b'a'...b'f' => 10 + (b - b'a') as u64, - b'A'...b'F' => 10 + (b - b'A') as u64, - b'_' => { - if empty && base == 10 { - return Err(LexError); - } - len += 1; - continue; - } - _ => break, - }; - if digit >= base { - return Err(LexError); - } - len += 1; - empty = false; - } - if empty { - Err(LexError) - } else { - Ok((input.advance(len), ())) - } -} - -fn op(input: Cursor) -> PResult { - let input = skip_whitespace(input); - match op_char(input) { - Ok((rest, '\'')) => { - symbol(rest)?; - Ok((rest, Punct::new('\'', Spacing::Joint))) - } - Ok((rest, ch)) => { - let kind = match op_char(rest) { - Ok(_) => Spacing::Joint, - Err(LexError) => Spacing::Alone, - }; - Ok((rest, Punct::new(ch, kind))) - } - Err(LexError) => Err(LexError), - } -} - -fn op_char(input: Cursor) -> PResult { - if input.starts_with("//") || input.starts_with("/*") { - // Do not accept `/` of a comment as an op. - return Err(LexError); - } - - let mut chars = input.chars(); - let first = match chars.next() { - Some(ch) => ch, - None => { - return Err(LexError); - } - }; - let recognized = "~!@#$%^&*-=+|;:,<.>/?'"; - if recognized.contains(first) { - Ok((input.advance(first.len_utf8()), first)) - } else { - Err(LexError) - } -} - -fn doc_comment(input: Cursor) -> PResult> { - let mut trees = Vec::new(); - let (rest, ((comment, inner), span)) = spanned(input, doc_comment_contents)?; - trees.push(TokenTree::Punct(Punct::new('#', Spacing::Alone))); - if inner { - trees.push(Punct::new('!', Spacing::Alone).into()); - } - let mut stream = vec![ - TokenTree::Ident(::Ident::new("doc", span)), - TokenTree::Punct(Punct::new('=', Spacing::Alone)), - TokenTree::Literal(::Literal::string(comment)), - ]; - for tt in stream.iter_mut() { - tt.set_span(span); - } - let group = Group::new(Delimiter::Bracket, stream.into_iter().collect()); - trees.push(::Group::_new_stable(group).into()); - for tt in trees.iter_mut() { - tt.set_span(span); - } - Ok((rest, trees)) -} - -named!(doc_comment_contents -> (&str, bool), alt!( - do_parse!( - punct!("//!") >> - s: take_until_newline_or_eof!() >> - ((s, true)) - ) - | - do_parse!( - option!(whitespace) >> - peek!(tag!("/*!")) >> - s: block_comment >> - ((s, true)) - ) - | - do_parse!( - punct!("///") >> - not!(tag!("/")) >> - s: take_until_newline_or_eof!() >> - ((s, false)) - ) - | - do_parse!( - option!(whitespace) >> - peek!(tuple!(tag!("/**"), not!(tag!("*")))) >> - s: block_comment >> - ((s, false)) - ) -)); diff --git a/third_party/rust/proc-macro2-0.4.27/src/lib.rs b/third_party/rust/proc-macro2-0.4.27/src/lib.rs deleted file mode 100644 index 25dd705c2e..0000000000 --- a/third_party/rust/proc-macro2-0.4.27/src/lib.rs +++ /dev/null @@ -1,1149 +0,0 @@ -//! A wrapper around the procedural macro API of the compiler's [`proc_macro`] -//! crate. This library serves three purposes: -//! -//! [`proc_macro`]: https://doc.rust-lang.org/proc_macro/ -//! -//! - **Bring proc-macro-like functionality to other contexts like build.rs and -//! main.rs.** Types from `proc_macro` are entirely specific to procedural -//! macros and cannot ever exist in code outside of a procedural macro. -//! Meanwhile `proc_macro2` types may exist anywhere including non-macro code. -//! By developing foundational libraries like [syn] and [quote] against -//! `proc_macro2` rather than `proc_macro`, the procedural macro ecosystem -//! becomes easily applicable to many other use cases and we avoid -//! reimplementing non-macro equivalents of those libraries. -//! -//! - **Make procedural macros unit testable.** As a consequence of being -//! specific to procedural macros, nothing that uses `proc_macro` can be -//! executed from a unit test. In order for helper libraries or components of -//! a macro to be testable in isolation, they must be implemented using -//! `proc_macro2`. -//! -//! - **Provide the latest and greatest APIs across all compiler versions.** -//! Procedural macros were first introduced to Rust in 1.15.0 with an -//! extremely minimal interface. Since then, many improvements have landed to -//! make macros more flexible and easier to write. This library tracks the -//! procedural macro API of the most recent stable compiler but employs a -//! polyfill to provide that API consistently across any compiler since -//! 1.15.0. -//! -//! [syn]: https://github.com/dtolnay/syn -//! [quote]: https://github.com/dtolnay/quote -//! -//! # Usage -//! -//! The skeleton of a typical procedural macro typically looks like this: -//! -//! ```edition2018 -//! extern crate proc_macro; -//! -//! # const IGNORE: &str = stringify! { -//! #[proc_macro_derive(MyDerive)] -//! # }; -//! pub fn my_derive(input: proc_macro::TokenStream) -> proc_macro::TokenStream { -//! let input = proc_macro2::TokenStream::from(input); -//! -//! let output: proc_macro2::TokenStream = { -//! /* transform input */ -//! # input -//! }; -//! -//! proc_macro::TokenStream::from(output) -//! } -//! ``` -//! -//! If parsing with [Syn], you'll use [`parse_macro_input!`] instead to -//! propagate parse errors correctly back to the compiler when parsing fails. -//! -//! [`parse_macro_input!`]: https://docs.rs/syn/0.15/syn/macro.parse_macro_input.html -//! -//! # Unstable features -//! -//! The default feature set of proc-macro2 tracks the most recent stable -//! compiler API. Functionality in `proc_macro` that is not yet stable is not -//! exposed by proc-macro2 by default. -//! -//! To opt into the additional APIs available in the most recent nightly -//! compiler, the `procmacro2_semver_exempt` config flag must be passed to -//! rustc. As usual, we will polyfill those nightly-only APIs all the way back -//! to Rust 1.15.0. As these are unstable APIs that track the nightly compiler, -//! minor versions of proc-macro2 may make breaking changes to them at any time. -//! -//! ```sh -//! RUSTFLAGS='--cfg procmacro2_semver_exempt' cargo build -//! ``` -//! -//! Note that this must not only be done for your crate, but for any crate that -//! depends on your crate. This infectious nature is intentional, as it serves -//! as a reminder that you are outside of the normal semver guarantees. -//! -//! Semver exempt methods are marked as such in the proc-macro2 documentation. - -// Proc-macro2 types in rustdoc of other crates get linked to here. -#![doc(html_root_url = "https://docs.rs/proc-macro2/0.4.27")] -#![cfg_attr(nightly, feature(proc_macro_span))] -#![cfg_attr(super_unstable, feature(proc_macro_raw_ident, proc_macro_def_site))] - -#[cfg(use_proc_macro)] -extern crate proc_macro; -extern crate unicode_xid; - -use std::cmp::Ordering; -use std::fmt; -use std::hash::{Hash, Hasher}; -use std::iter::FromIterator; -use std::marker; -#[cfg(procmacro2_semver_exempt)] -use std::path::PathBuf; -use std::rc::Rc; -use std::str::FromStr; - -#[macro_use] -mod strnom; -mod fallback; - -#[cfg(not(wrap_proc_macro))] -use fallback as imp; -#[path = "wrapper.rs"] -#[cfg(wrap_proc_macro)] -mod imp; - -/// An abstract stream of tokens, or more concretely a sequence of token trees. -/// -/// This type provides interfaces for iterating over token trees and for -/// collecting token trees into one stream. -/// -/// Token stream is both the input and output of `#[proc_macro]`, -/// `#[proc_macro_attribute]` and `#[proc_macro_derive]` definitions. -#[derive(Clone)] -pub struct TokenStream { - inner: imp::TokenStream, - _marker: marker::PhantomData>, -} - -/// Error returned from `TokenStream::from_str`. -pub struct LexError { - inner: imp::LexError, - _marker: marker::PhantomData>, -} - -impl TokenStream { - fn _new(inner: imp::TokenStream) -> TokenStream { - TokenStream { - inner: inner, - _marker: marker::PhantomData, - } - } - - fn _new_stable(inner: fallback::TokenStream) -> TokenStream { - TokenStream { - inner: inner.into(), - _marker: marker::PhantomData, - } - } - - /// Returns an empty `TokenStream` containing no token trees. - pub fn new() -> TokenStream { - TokenStream::_new(imp::TokenStream::new()) - } - - #[deprecated(since = "0.4.4", note = "please use TokenStream::new")] - pub fn empty() -> TokenStream { - TokenStream::new() - } - - /// Checks if this `TokenStream` is empty. - pub fn is_empty(&self) -> bool { - self.inner.is_empty() - } -} - -/// `TokenStream::default()` returns an empty stream, -/// i.e. this is equivalent with `TokenStream::new()`. -impl Default for TokenStream { - fn default() -> Self { - TokenStream::new() - } -} - -/// Attempts to break the string into tokens and parse those tokens into a token -/// stream. -/// -/// May fail for a number of reasons, for example, if the string contains -/// unbalanced delimiters or characters not existing in the language. -/// -/// NOTE: Some errors may cause panics instead of returning `LexError`. We -/// reserve the right to change these errors into `LexError`s later. -impl FromStr for TokenStream { - type Err = LexError; - - fn from_str(src: &str) -> Result { - let e = src.parse().map_err(|e| LexError { - inner: e, - _marker: marker::PhantomData, - })?; - Ok(TokenStream::_new(e)) - } -} - -#[cfg(use_proc_macro)] -impl From for TokenStream { - fn from(inner: proc_macro::TokenStream) -> TokenStream { - TokenStream::_new(inner.into()) - } -} - -#[cfg(use_proc_macro)] -impl From for proc_macro::TokenStream { - fn from(inner: TokenStream) -> proc_macro::TokenStream { - inner.inner.into() - } -} - -impl Extend for TokenStream { - fn extend>(&mut self, streams: I) { - self.inner.extend(streams) - } -} - -impl Extend for TokenStream { - fn extend>(&mut self, streams: I) { - self.inner - .extend(streams.into_iter().map(|stream| stream.inner)) - } -} - -/// Collects a number of token trees into a single stream. -impl FromIterator for TokenStream { - fn from_iter>(streams: I) -> Self { - TokenStream::_new(streams.into_iter().collect()) - } -} -impl FromIterator for TokenStream { - fn from_iter>(streams: I) -> Self { - TokenStream::_new(streams.into_iter().map(|i| i.inner).collect()) - } -} - -/// Prints the token stream as a string that is supposed to be losslessly -/// convertible back into the same token stream (modulo spans), except for -/// possibly `TokenTree::Group`s with `Delimiter::None` delimiters and negative -/// numeric literals. -impl fmt::Display for TokenStream { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - self.inner.fmt(f) - } -} - -/// Prints token in a form convenient for debugging. -impl fmt::Debug for TokenStream { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - self.inner.fmt(f) - } -} - -impl fmt::Debug for LexError { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - self.inner.fmt(f) - } -} - -/// The source file of a given `Span`. -/// -/// This type is semver exempt and not exposed by default. -#[cfg(procmacro2_semver_exempt)] -#[derive(Clone, PartialEq, Eq)] -pub struct SourceFile { - inner: imp::SourceFile, - _marker: marker::PhantomData>, -} - -#[cfg(procmacro2_semver_exempt)] -impl SourceFile { - fn _new(inner: imp::SourceFile) -> Self { - SourceFile { - inner: inner, - _marker: marker::PhantomData, - } - } - - /// Get the path to this source file. - /// - /// ### Note - /// - /// If the code span associated with this `SourceFile` was generated by an - /// external macro, this may not be an actual path on the filesystem. Use - /// [`is_real`] to check. - /// - /// Also note that even if `is_real` returns `true`, if - /// `--remap-path-prefix` was passed on the command line, the path as given - /// may not actually be valid. - /// - /// [`is_real`]: #method.is_real - pub fn path(&self) -> PathBuf { - self.inner.path() - } - - /// Returns `true` if this source file is a real source file, and not - /// generated by an external macro's expansion. - pub fn is_real(&self) -> bool { - self.inner.is_real() - } -} - -#[cfg(procmacro2_semver_exempt)] -impl fmt::Debug for SourceFile { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - self.inner.fmt(f) - } -} - -/// A line-column pair representing the start or end of a `Span`. -/// -/// This type is semver exempt and not exposed by default. -#[cfg(span_locations)] -pub struct LineColumn { - /// The 1-indexed line in the source file on which the span starts or ends - /// (inclusive). - pub line: usize, - /// The 0-indexed column (in UTF-8 characters) in the source file on which - /// the span starts or ends (inclusive). - pub column: usize, -} - -/// A region of source code, along with macro expansion information. -#[derive(Copy, Clone)] -pub struct Span { - inner: imp::Span, - _marker: marker::PhantomData>, -} - -impl Span { - fn _new(inner: imp::Span) -> Span { - Span { - inner: inner, - _marker: marker::PhantomData, - } - } - - fn _new_stable(inner: fallback::Span) -> Span { - Span { - inner: inner.into(), - _marker: marker::PhantomData, - } - } - - /// The span of the invocation of the current procedural macro. - /// - /// Identifiers created with this span will be resolved as if they were - /// written directly at the macro call location (call-site hygiene) and - /// other code at the macro call site will be able to refer to them as well. - pub fn call_site() -> Span { - Span::_new(imp::Span::call_site()) - } - - /// A span that resolves at the macro definition site. - /// - /// This method is semver exempt and not exposed by default. - #[cfg(procmacro2_semver_exempt)] - pub fn def_site() -> Span { - Span::_new(imp::Span::def_site()) - } - - /// Creates a new span with the same line/column information as `self` but - /// that resolves symbols as though it were at `other`. - /// - /// This method is semver exempt and not exposed by default. - #[cfg(procmacro2_semver_exempt)] - pub fn resolved_at(&self, other: Span) -> Span { - Span::_new(self.inner.resolved_at(other.inner)) - } - - /// Creates a new span with the same name resolution behavior as `self` but - /// with the line/column information of `other`. - /// - /// This method is semver exempt and not exposed by default. - #[cfg(procmacro2_semver_exempt)] - pub fn located_at(&self, other: Span) -> Span { - Span::_new(self.inner.located_at(other.inner)) - } - - /// Convert `proc_macro2::Span` to `proc_macro::Span`. - /// - /// This method is available when building with a nightly compiler, or when - /// building with rustc 1.29+ *without* semver exempt features. - /// - /// # Panics - /// - /// Panics if called from outside of a procedural macro. Unlike - /// `proc_macro2::Span`, the `proc_macro::Span` type can only exist within - /// the context of a procedural macro invocation. - #[cfg(wrap_proc_macro)] - pub fn unwrap(self) -> proc_macro::Span { - self.inner.unwrap() - } - - // Soft deprecated. Please use Span::unwrap. - #[cfg(wrap_proc_macro)] - #[doc(hidden)] - pub fn unstable(self) -> proc_macro::Span { - self.unwrap() - } - - /// The original source file into which this span points. - /// - /// This method is semver exempt and not exposed by default. - #[cfg(procmacro2_semver_exempt)] - pub fn source_file(&self) -> SourceFile { - SourceFile::_new(self.inner.source_file()) - } - - /// Get the starting line/column in the source file for this span. - /// - /// This method requires the `"span-locations"` feature to be enabled. - #[cfg(span_locations)] - pub fn start(&self) -> LineColumn { - let imp::LineColumn { line, column } = self.inner.start(); - LineColumn { - line: line, - column: column, - } - } - - /// Get the ending line/column in the source file for this span. - /// - /// This method requires the `"span-locations"` feature to be enabled. - #[cfg(span_locations)] - pub fn end(&self) -> LineColumn { - let imp::LineColumn { line, column } = self.inner.end(); - LineColumn { - line: line, - column: column, - } - } - - /// Create a new span encompassing `self` and `other`. - /// - /// Returns `None` if `self` and `other` are from different files. - /// - /// This method is semver exempt and not exposed by default. - #[cfg(procmacro2_semver_exempt)] - pub fn join(&self, other: Span) -> Option { - self.inner.join(other.inner).map(Span::_new) - } - - /// Compares to spans to see if they're equal. - /// - /// This method is semver exempt and not exposed by default. - #[cfg(procmacro2_semver_exempt)] - pub fn eq(&self, other: &Span) -> bool { - self.inner.eq(&other.inner) - } -} - -/// Prints a span in a form convenient for debugging. -impl fmt::Debug for Span { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - self.inner.fmt(f) - } -} - -/// A single token or a delimited sequence of token trees (e.g. `[1, (), ..]`). -#[derive(Clone)] -pub enum TokenTree { - /// A token stream surrounded by bracket delimiters. - Group(Group), - /// An identifier. - Ident(Ident), - /// A single punctuation character (`+`, `,`, `$`, etc.). - Punct(Punct), - /// A literal character (`'a'`), string (`"hello"`), number (`2.3`), etc. - Literal(Literal), -} - -impl TokenTree { - /// Returns the span of this tree, delegating to the `span` method of - /// the contained token or a delimited stream. - pub fn span(&self) -> Span { - match *self { - TokenTree::Group(ref t) => t.span(), - TokenTree::Ident(ref t) => t.span(), - TokenTree::Punct(ref t) => t.span(), - TokenTree::Literal(ref t) => t.span(), - } - } - - /// Configures the span for *only this token*. - /// - /// Note that if this token is a `Group` then this method will not configure - /// the span of each of the internal tokens, this will simply delegate to - /// the `set_span` method of each variant. - pub fn set_span(&mut self, span: Span) { - match *self { - TokenTree::Group(ref mut t) => t.set_span(span), - TokenTree::Ident(ref mut t) => t.set_span(span), - TokenTree::Punct(ref mut t) => t.set_span(span), - TokenTree::Literal(ref mut t) => t.set_span(span), - } - } -} - -impl From for TokenTree { - fn from(g: Group) -> TokenTree { - TokenTree::Group(g) - } -} - -impl From for TokenTree { - fn from(g: Ident) -> TokenTree { - TokenTree::Ident(g) - } -} - -impl From for TokenTree { - fn from(g: Punct) -> TokenTree { - TokenTree::Punct(g) - } -} - -impl From for TokenTree { - fn from(g: Literal) -> TokenTree { - TokenTree::Literal(g) - } -} - -/// Prints the token tree as a string that is supposed to be losslessly -/// convertible back into the same token tree (modulo spans), except for -/// possibly `TokenTree::Group`s with `Delimiter::None` delimiters and negative -/// numeric literals. -impl fmt::Display for TokenTree { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - match *self { - TokenTree::Group(ref t) => t.fmt(f), - TokenTree::Ident(ref t) => t.fmt(f), - TokenTree::Punct(ref t) => t.fmt(f), - TokenTree::Literal(ref t) => t.fmt(f), - } - } -} - -/// Prints token tree in a form convenient for debugging. -impl fmt::Debug for TokenTree { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - // Each of these has the name in the struct type in the derived debug, - // so don't bother with an extra layer of indirection - match *self { - TokenTree::Group(ref t) => t.fmt(f), - TokenTree::Ident(ref t) => { - let mut debug = f.debug_struct("Ident"); - debug.field("sym", &format_args!("{}", t)); - imp::debug_span_field_if_nontrivial(&mut debug, t.span().inner); - debug.finish() - } - TokenTree::Punct(ref t) => t.fmt(f), - TokenTree::Literal(ref t) => t.fmt(f), - } - } -} - -/// A delimited token stream. -/// -/// A `Group` internally contains a `TokenStream` which is surrounded by -/// `Delimiter`s. -#[derive(Clone)] -pub struct Group { - inner: imp::Group, -} - -/// Describes how a sequence of token trees is delimited. -#[derive(Copy, Clone, Debug, Eq, PartialEq)] -pub enum Delimiter { - /// `( ... )` - Parenthesis, - /// `{ ... }` - Brace, - /// `[ ... ]` - Bracket, - /// `Ø ... Ø` - /// - /// An implicit delimiter, that may, for example, appear around tokens - /// coming from a "macro variable" `$var`. It is important to preserve - /// operator priorities in cases like `$var * 3` where `$var` is `1 + 2`. - /// Implicit delimiters may not survive roundtrip of a token stream through - /// a string. - None, -} - -impl Group { - fn _new(inner: imp::Group) -> Self { - Group { inner: inner } - } - - fn _new_stable(inner: fallback::Group) -> Self { - Group { - inner: inner.into(), - } - } - - /// Creates a new `Group` with the given delimiter and token stream. - /// - /// This constructor will set the span for this group to - /// `Span::call_site()`. To change the span you can use the `set_span` - /// method below. - pub fn new(delimiter: Delimiter, stream: TokenStream) -> Group { - Group { - inner: imp::Group::new(delimiter, stream.inner), - } - } - - /// Returns the delimiter of this `Group` - pub fn delimiter(&self) -> Delimiter { - self.inner.delimiter() - } - - /// Returns the `TokenStream` of tokens that are delimited in this `Group`. - /// - /// Note that the returned token stream does not include the delimiter - /// returned above. - pub fn stream(&self) -> TokenStream { - TokenStream::_new(self.inner.stream()) - } - - /// Returns the span for the delimiters of this token stream, spanning the - /// entire `Group`. - /// - /// ```text - /// pub fn span(&self) -> Span { - /// ^^^^^^^ - /// ``` - pub fn span(&self) -> Span { - Span::_new(self.inner.span()) - } - - /// Returns the span pointing to the opening delimiter of this group. - /// - /// ```text - /// pub fn span_open(&self) -> Span { - /// ^ - /// ``` - #[cfg(procmacro2_semver_exempt)] - pub fn span_open(&self) -> Span { - Span::_new(self.inner.span_open()) - } - - /// Returns the span pointing to the closing delimiter of this group. - /// - /// ```text - /// pub fn span_close(&self) -> Span { - /// ^ - /// ``` - #[cfg(procmacro2_semver_exempt)] - pub fn span_close(&self) -> Span { - Span::_new(self.inner.span_close()) - } - - /// Configures the span for this `Group`'s delimiters, but not its internal - /// tokens. - /// - /// This method will **not** set the span of all the internal tokens spanned - /// by this group, but rather it will only set the span of the delimiter - /// tokens at the level of the `Group`. - pub fn set_span(&mut self, span: Span) { - self.inner.set_span(span.inner) - } -} - -/// Prints the group as a string that should be losslessly convertible back -/// into the same group (modulo spans), except for possibly `TokenTree::Group`s -/// with `Delimiter::None` delimiters. -impl fmt::Display for Group { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - fmt::Display::fmt(&self.inner, formatter) - } -} - -impl fmt::Debug for Group { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - fmt::Debug::fmt(&self.inner, formatter) - } -} - -/// An `Punct` is an single punctuation character like `+`, `-` or `#`. -/// -/// Multicharacter operators like `+=` are represented as two instances of -/// `Punct` with different forms of `Spacing` returned. -#[derive(Clone)] -pub struct Punct { - op: char, - spacing: Spacing, - span: Span, -} - -/// Whether an `Punct` is followed immediately by another `Punct` or followed by -/// another token or whitespace. -#[derive(Copy, Clone, Debug, Eq, PartialEq)] -pub enum Spacing { - /// E.g. `+` is `Alone` in `+ =`, `+ident` or `+()`. - Alone, - /// E.g. `+` is `Joint` in `+=` or `'#`. - /// - /// Additionally, single quote `'` can join with identifiers to form - /// lifetimes `'ident`. - Joint, -} - -impl Punct { - /// Creates a new `Punct` from the given character and spacing. - /// - /// The `ch` argument must be a valid punctuation character permitted by the - /// language, otherwise the function will panic. - /// - /// The returned `Punct` will have the default span of `Span::call_site()` - /// which can be further configured with the `set_span` method below. - pub fn new(op: char, spacing: Spacing) -> Punct { - Punct { - op: op, - spacing: spacing, - span: Span::call_site(), - } - } - - /// Returns the value of this punctuation character as `char`. - pub fn as_char(&self) -> char { - self.op - } - - /// Returns the spacing of this punctuation character, indicating whether - /// it's immediately followed by another `Punct` in the token stream, so - /// they can potentially be combined into a multicharacter operator - /// (`Joint`), or it's followed by some other token or whitespace (`Alone`) - /// so the operator has certainly ended. - pub fn spacing(&self) -> Spacing { - self.spacing - } - - /// Returns the span for this punctuation character. - pub fn span(&self) -> Span { - self.span - } - - /// Configure the span for this punctuation character. - pub fn set_span(&mut self, span: Span) { - self.span = span; - } -} - -/// Prints the punctuation character as a string that should be losslessly -/// convertible back into the same character. -impl fmt::Display for Punct { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - self.op.fmt(f) - } -} - -impl fmt::Debug for Punct { - fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { - let mut debug = fmt.debug_struct("Punct"); - debug.field("op", &self.op); - debug.field("spacing", &self.spacing); - imp::debug_span_field_if_nontrivial(&mut debug, self.span.inner); - debug.finish() - } -} - -/// A word of Rust code, which may be a keyword or legal variable name. -/// -/// An identifier consists of at least one Unicode code point, the first of -/// which has the XID_Start property and the rest of which have the XID_Continue -/// property. -/// -/// - The empty string is not an identifier. Use `Option`. -/// - A lifetime is not an identifier. Use `syn::Lifetime` instead. -/// -/// An identifier constructed with `Ident::new` is permitted to be a Rust -/// keyword, though parsing one through its [`Parse`] implementation rejects -/// Rust keywords. Use `input.call(Ident::parse_any)` when parsing to match the -/// behaviour of `Ident::new`. -/// -/// [`Parse`]: https://docs.rs/syn/0.15/syn/parse/trait.Parse.html -/// -/// # Examples -/// -/// A new ident can be created from a string using the `Ident::new` function. -/// A span must be provided explicitly which governs the name resolution -/// behavior of the resulting identifier. -/// -/// ```edition2018 -/// use proc_macro2::{Ident, Span}; -/// -/// fn main() { -/// let call_ident = Ident::new("calligraphy", Span::call_site()); -/// -/// println!("{}", call_ident); -/// } -/// ``` -/// -/// An ident can be interpolated into a token stream using the `quote!` macro. -/// -/// ```edition2018 -/// use proc_macro2::{Ident, Span}; -/// use quote::quote; -/// -/// fn main() { -/// let ident = Ident::new("demo", Span::call_site()); -/// -/// // Create a variable binding whose name is this ident. -/// let expanded = quote! { let #ident = 10; }; -/// -/// // Create a variable binding with a slightly different name. -/// let temp_ident = Ident::new(&format!("new_{}", ident), Span::call_site()); -/// let expanded = quote! { let #temp_ident = 10; }; -/// } -/// ``` -/// -/// A string representation of the ident is available through the `to_string()` -/// method. -/// -/// ```edition2018 -/// # use proc_macro2::{Ident, Span}; -/// # -/// # let ident = Ident::new("another_identifier", Span::call_site()); -/// # -/// // Examine the ident as a string. -/// let ident_string = ident.to_string(); -/// if ident_string.len() > 60 { -/// println!("Very long identifier: {}", ident_string) -/// } -/// ``` -#[derive(Clone)] -pub struct Ident { - inner: imp::Ident, - _marker: marker::PhantomData>, -} - -impl Ident { - fn _new(inner: imp::Ident) -> Ident { - Ident { - inner: inner, - _marker: marker::PhantomData, - } - } - - /// Creates a new `Ident` with the given `string` as well as the specified - /// `span`. - /// - /// The `string` argument must be a valid identifier permitted by the - /// language, otherwise the function will panic. - /// - /// Note that `span`, currently in rustc, configures the hygiene information - /// for this identifier. - /// - /// As of this time `Span::call_site()` explicitly opts-in to "call-site" - /// hygiene meaning that identifiers created with this span will be resolved - /// as if they were written directly at the location of the macro call, and - /// other code at the macro call site will be able to refer to them as well. - /// - /// Later spans like `Span::def_site()` will allow to opt-in to - /// "definition-site" hygiene meaning that identifiers created with this - /// span will be resolved at the location of the macro definition and other - /// code at the macro call site will not be able to refer to them. - /// - /// Due to the current importance of hygiene this constructor, unlike other - /// tokens, requires a `Span` to be specified at construction. - /// - /// # Panics - /// - /// Panics if the input string is neither a keyword nor a legal variable - /// name. - pub fn new(string: &str, span: Span) -> Ident { - Ident::_new(imp::Ident::new(string, span.inner)) - } - - /// Same as `Ident::new`, but creates a raw identifier (`r#ident`). - /// - /// This method is semver exempt and not exposed by default. - #[cfg(procmacro2_semver_exempt)] - pub fn new_raw(string: &str, span: Span) -> Ident { - Ident::_new_raw(string, span) - } - - fn _new_raw(string: &str, span: Span) -> Ident { - Ident::_new(imp::Ident::new_raw(string, span.inner)) - } - - /// Returns the span of this `Ident`. - pub fn span(&self) -> Span { - Span::_new(self.inner.span()) - } - - /// Configures the span of this `Ident`, possibly changing its hygiene - /// context. - pub fn set_span(&mut self, span: Span) { - self.inner.set_span(span.inner); - } -} - -impl PartialEq for Ident { - fn eq(&self, other: &Ident) -> bool { - self.inner == other.inner - } -} - -impl PartialEq for Ident -where - T: ?Sized + AsRef, -{ - fn eq(&self, other: &T) -> bool { - self.inner == other - } -} - -impl Eq for Ident {} - -impl PartialOrd for Ident { - fn partial_cmp(&self, other: &Ident) -> Option { - Some(self.cmp(other)) - } -} - -impl Ord for Ident { - fn cmp(&self, other: &Ident) -> Ordering { - self.to_string().cmp(&other.to_string()) - } -} - -impl Hash for Ident { - fn hash(&self, hasher: &mut H) { - self.to_string().hash(hasher) - } -} - -/// Prints the identifier as a string that should be losslessly convertible back -/// into the same identifier. -impl fmt::Display for Ident { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - self.inner.fmt(f) - } -} - -impl fmt::Debug for Ident { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - self.inner.fmt(f) - } -} - -/// A literal string (`"hello"`), byte string (`b"hello"`), character (`'a'`), -/// byte character (`b'a'`), an integer or floating point number with or without -/// a suffix (`1`, `1u8`, `2.3`, `2.3f32`). -/// -/// Boolean literals like `true` and `false` do not belong here, they are -/// `Ident`s. -#[derive(Clone)] -pub struct Literal { - inner: imp::Literal, - _marker: marker::PhantomData>, -} - -macro_rules! suffixed_int_literals { - ($($name:ident => $kind:ident,)*) => ($( - /// Creates a new suffixed integer literal with the specified value. - /// - /// This function will create an integer like `1u32` where the integer - /// value specified is the first part of the token and the integral is - /// also suffixed at the end. Literals created from negative numbers may - /// not survive rountrips through `TokenStream` or strings and may be - /// broken into two tokens (`-` and positive literal). - /// - /// Literals created through this method have the `Span::call_site()` - /// span by default, which can be configured with the `set_span` method - /// below. - pub fn $name(n: $kind) -> Literal { - Literal::_new(imp::Literal::$name(n)) - } - )*) -} - -macro_rules! unsuffixed_int_literals { - ($($name:ident => $kind:ident,)*) => ($( - /// Creates a new unsuffixed integer literal with the specified value. - /// - /// This function will create an integer like `1` where the integer - /// value specified is the first part of the token. No suffix is - /// specified on this token, meaning that invocations like - /// `Literal::i8_unsuffixed(1)` are equivalent to - /// `Literal::u32_unsuffixed(1)`. Literals created from negative numbers - /// may not survive rountrips through `TokenStream` or strings and may - /// be broken into two tokens (`-` and positive literal). - /// - /// Literals created through this method have the `Span::call_site()` - /// span by default, which can be configured with the `set_span` method - /// below. - pub fn $name(n: $kind) -> Literal { - Literal::_new(imp::Literal::$name(n)) - } - )*) -} - -impl Literal { - fn _new(inner: imp::Literal) -> Literal { - Literal { - inner: inner, - _marker: marker::PhantomData, - } - } - - fn _new_stable(inner: fallback::Literal) -> Literal { - Literal { - inner: inner.into(), - _marker: marker::PhantomData, - } - } - - suffixed_int_literals! { - u8_suffixed => u8, - u16_suffixed => u16, - u32_suffixed => u32, - u64_suffixed => u64, - usize_suffixed => usize, - i8_suffixed => i8, - i16_suffixed => i16, - i32_suffixed => i32, - i64_suffixed => i64, - isize_suffixed => isize, - } - - #[cfg(u128)] - suffixed_int_literals! { - u128_suffixed => u128, - i128_suffixed => i128, - } - - unsuffixed_int_literals! { - u8_unsuffixed => u8, - u16_unsuffixed => u16, - u32_unsuffixed => u32, - u64_unsuffixed => u64, - usize_unsuffixed => usize, - i8_unsuffixed => i8, - i16_unsuffixed => i16, - i32_unsuffixed => i32, - i64_unsuffixed => i64, - isize_unsuffixed => isize, - } - - #[cfg(u128)] - unsuffixed_int_literals! { - u128_unsuffixed => u128, - i128_unsuffixed => i128, - } - - pub fn f64_unsuffixed(f: f64) -> Literal { - assert!(f.is_finite()); - Literal::_new(imp::Literal::f64_unsuffixed(f)) - } - - pub fn f64_suffixed(f: f64) -> Literal { - assert!(f.is_finite()); - Literal::_new(imp::Literal::f64_suffixed(f)) - } - - /// Creates a new unsuffixed floating-point literal. - /// - /// This constructor is similar to those like `Literal::i8_unsuffixed` where - /// the float's value is emitted directly into the token but no suffix is - /// used, so it may be inferred to be a `f64` later in the compiler. - /// Literals created from negative numbers may not survive rountrips through - /// `TokenStream` or strings and may be broken into two tokens (`-` and - /// positive literal). - /// - /// # Panics - /// - /// This function requires that the specified float is finite, for example - /// if it is infinity or NaN this function will panic. - pub fn f32_unsuffixed(f: f32) -> Literal { - assert!(f.is_finite()); - Literal::_new(imp::Literal::f32_unsuffixed(f)) - } - - pub fn f32_suffixed(f: f32) -> Literal { - assert!(f.is_finite()); - Literal::_new(imp::Literal::f32_suffixed(f)) - } - - pub fn string(string: &str) -> Literal { - Literal::_new(imp::Literal::string(string)) - } - - pub fn character(ch: char) -> Literal { - Literal::_new(imp::Literal::character(ch)) - } - - pub fn byte_string(s: &[u8]) -> Literal { - Literal::_new(imp::Literal::byte_string(s)) - } - - pub fn span(&self) -> Span { - Span::_new(self.inner.span()) - } - - pub fn set_span(&mut self, span: Span) { - self.inner.set_span(span.inner); - } -} - -impl fmt::Debug for Literal { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - self.inner.fmt(f) - } -} - -impl fmt::Display for Literal { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - self.inner.fmt(f) - } -} - -/// Public implementation details for the `TokenStream` type, such as iterators. -pub mod token_stream { - use std::fmt; - use std::marker; - use std::rc::Rc; - - use imp; - pub use TokenStream; - use TokenTree; - - /// An iterator over `TokenStream`'s `TokenTree`s. - /// - /// The iteration is "shallow", e.g. the iterator doesn't recurse into - /// delimited groups, and returns whole groups as token trees. - pub struct IntoIter { - inner: imp::TokenTreeIter, - _marker: marker::PhantomData>, - } - - impl Iterator for IntoIter { - type Item = TokenTree; - - fn next(&mut self) -> Option { - self.inner.next() - } - } - - impl fmt::Debug for IntoIter { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - self.inner.fmt(f) - } - } - - impl IntoIterator for TokenStream { - type Item = TokenTree; - type IntoIter = IntoIter; - - fn into_iter(self) -> IntoIter { - IntoIter { - inner: self.inner.into_iter(), - _marker: marker::PhantomData, - } - } - } -} diff --git a/third_party/rust/proc-macro2-0.4.27/src/strnom.rs b/third_party/rust/proc-macro2-0.4.27/src/strnom.rs deleted file mode 100644 index 96789d5691..0000000000 --- a/third_party/rust/proc-macro2-0.4.27/src/strnom.rs +++ /dev/null @@ -1,393 +0,0 @@ -//! Adapted from [`nom`](https://github.com/Geal/nom). - -use std::str::{Bytes, CharIndices, Chars}; - -use unicode_xid::UnicodeXID; - -use fallback::LexError; - -#[derive(Copy, Clone, Eq, PartialEq)] -pub struct Cursor<'a> { - pub rest: &'a str, - #[cfg(span_locations)] - pub off: u32, -} - -impl<'a> Cursor<'a> { - #[cfg(not(span_locations))] - pub fn advance(&self, amt: usize) -> Cursor<'a> { - Cursor { - rest: &self.rest[amt..], - } - } - #[cfg(span_locations)] - pub fn advance(&self, amt: usize) -> Cursor<'a> { - Cursor { - rest: &self.rest[amt..], - off: self.off + (amt as u32), - } - } - - pub fn find(&self, p: char) -> Option { - self.rest.find(p) - } - - pub fn starts_with(&self, s: &str) -> bool { - self.rest.starts_with(s) - } - - pub fn is_empty(&self) -> bool { - self.rest.is_empty() - } - - pub fn len(&self) -> usize { - self.rest.len() - } - - pub fn as_bytes(&self) -> &'a [u8] { - self.rest.as_bytes() - } - - pub fn bytes(&self) -> Bytes<'a> { - self.rest.bytes() - } - - pub fn chars(&self) -> Chars<'a> { - self.rest.chars() - } - - pub fn char_indices(&self) -> CharIndices<'a> { - self.rest.char_indices() - } -} - -pub type PResult<'a, O> = Result<(Cursor<'a>, O), LexError>; - -pub fn whitespace(input: Cursor) -> PResult<()> { - if input.is_empty() { - return Err(LexError); - } - - let bytes = input.as_bytes(); - let mut i = 0; - while i < bytes.len() { - let s = input.advance(i); - if bytes[i] == b'/' { - if s.starts_with("//") - && (!s.starts_with("///") || s.starts_with("////")) - && !s.starts_with("//!") - { - if let Some(len) = s.find('\n') { - i += len + 1; - continue; - } - break; - } else if s.starts_with("/**/") { - i += 4; - continue; - } else if s.starts_with("/*") - && (!s.starts_with("/**") || s.starts_with("/***")) - && !s.starts_with("/*!") - { - let (_, com) = block_comment(s)?; - i += com.len(); - continue; - } - } - match bytes[i] { - b' ' | 0x09...0x0d => { - i += 1; - continue; - } - b if b <= 0x7f => {} - _ => { - let ch = s.chars().next().unwrap(); - if is_whitespace(ch) { - i += ch.len_utf8(); - continue; - } - } - } - return if i > 0 { Ok((s, ())) } else { Err(LexError) }; - } - Ok((input.advance(input.len()), ())) -} - -pub fn block_comment(input: Cursor) -> PResult<&str> { - if !input.starts_with("/*") { - return Err(LexError); - } - - let mut depth = 0; - let bytes = input.as_bytes(); - let mut i = 0; - let upper = bytes.len() - 1; - while i < upper { - if bytes[i] == b'/' && bytes[i + 1] == b'*' { - depth += 1; - i += 1; // eat '*' - } else if bytes[i] == b'*' && bytes[i + 1] == b'/' { - depth -= 1; - if depth == 0 { - return Ok((input.advance(i + 2), &input.rest[..i + 2])); - } - i += 1; // eat '/' - } - i += 1; - } - Err(LexError) -} - -pub fn skip_whitespace(input: Cursor) -> Cursor { - match whitespace(input) { - Ok((rest, _)) => rest, - Err(LexError) => input, - } -} - -fn is_whitespace(ch: char) -> bool { - // Rust treats left-to-right mark and right-to-left mark as whitespace - ch.is_whitespace() || ch == '\u{200e}' || ch == '\u{200f}' -} - -pub fn word_break(input: Cursor) -> PResult<()> { - match input.chars().next() { - Some(ch) if UnicodeXID::is_xid_continue(ch) => Err(LexError), - Some(_) | None => Ok((input, ())), - } -} - -macro_rules! named { - ($name:ident -> $o:ty, $submac:ident!( $($args:tt)* )) => { - fn $name<'a>(i: Cursor<'a>) -> $crate::strnom::PResult<'a, $o> { - $submac!(i, $($args)*) - } - }; -} - -macro_rules! alt { - ($i:expr, $e:ident | $($rest:tt)*) => { - alt!($i, call!($e) | $($rest)*) - }; - - ($i:expr, $subrule:ident!( $($args:tt)*) | $($rest:tt)*) => { - match $subrule!($i, $($args)*) { - res @ Ok(_) => res, - _ => alt!($i, $($rest)*) - } - }; - - ($i:expr, $subrule:ident!( $($args:tt)* ) => { $gen:expr } | $($rest:tt)+) => { - match $subrule!($i, $($args)*) { - Ok((i, o)) => Ok((i, $gen(o))), - Err(LexError) => alt!($i, $($rest)*) - } - }; - - ($i:expr, $e:ident => { $gen:expr } | $($rest:tt)*) => { - alt!($i, call!($e) => { $gen } | $($rest)*) - }; - - ($i:expr, $e:ident => { $gen:expr }) => { - alt!($i, call!($e) => { $gen }) - }; - - ($i:expr, $subrule:ident!( $($args:tt)* ) => { $gen:expr }) => { - match $subrule!($i, $($args)*) { - Ok((i, o)) => Ok((i, $gen(o))), - Err(LexError) => Err(LexError), - } - }; - - ($i:expr, $e:ident) => { - alt!($i, call!($e)) - }; - - ($i:expr, $subrule:ident!( $($args:tt)*)) => { - $subrule!($i, $($args)*) - }; -} - -macro_rules! do_parse { - ($i:expr, ( $($rest:expr),* )) => { - Ok(($i, ( $($rest),* ))) - }; - - ($i:expr, $e:ident >> $($rest:tt)*) => { - do_parse!($i, call!($e) >> $($rest)*) - }; - - ($i:expr, $submac:ident!( $($args:tt)* ) >> $($rest:tt)*) => { - match $submac!($i, $($args)*) { - Err(LexError) => Err(LexError), - Ok((i, _)) => do_parse!(i, $($rest)*), - } - }; - - ($i:expr, $field:ident : $e:ident >> $($rest:tt)*) => { - do_parse!($i, $field: call!($e) >> $($rest)*) - }; - - ($i:expr, $field:ident : $submac:ident!( $($args:tt)* ) >> $($rest:tt)*) => { - match $submac!($i, $($args)*) { - Err(LexError) => Err(LexError), - Ok((i, o)) => { - let $field = o; - do_parse!(i, $($rest)*) - }, - } - }; -} - -macro_rules! peek { - ($i:expr, $submac:ident!( $($args:tt)* )) => { - match $submac!($i, $($args)*) { - Ok((_, o)) => Ok(($i, o)), - Err(LexError) => Err(LexError), - } - }; -} - -macro_rules! call { - ($i:expr, $fun:expr $(, $args:expr)*) => { - $fun($i $(, $args)*) - }; -} - -macro_rules! option { - ($i:expr, $f:expr) => { - match $f($i) { - Ok((i, o)) => Ok((i, Some(o))), - Err(LexError) => Ok(($i, None)), - } - }; -} - -macro_rules! take_until_newline_or_eof { - ($i:expr,) => {{ - if $i.len() == 0 { - Ok(($i, "")) - } else { - match $i.find('\n') { - Some(i) => Ok(($i.advance(i), &$i.rest[..i])), - None => Ok(($i.advance($i.len()), &$i.rest[..$i.len()])), - } - } - }}; -} - -macro_rules! tuple { - ($i:expr, $($rest:tt)*) => { - tuple_parser!($i, (), $($rest)*) - }; -} - -/// Do not use directly. Use `tuple!`. -macro_rules! tuple_parser { - ($i:expr, ($($parsed:tt),*), $e:ident, $($rest:tt)*) => { - tuple_parser!($i, ($($parsed),*), call!($e), $($rest)*) - }; - - ($i:expr, (), $submac:ident!( $($args:tt)* ), $($rest:tt)*) => { - match $submac!($i, $($args)*) { - Err(LexError) => Err(LexError), - Ok((i, o)) => tuple_parser!(i, (o), $($rest)*), - } - }; - - ($i:expr, ($($parsed:tt)*), $submac:ident!( $($args:tt)* ), $($rest:tt)*) => { - match $submac!($i, $($args)*) { - Err(LexError) => Err(LexError), - Ok((i, o)) => tuple_parser!(i, ($($parsed)* , o), $($rest)*), - } - }; - - ($i:expr, ($($parsed:tt),*), $e:ident) => { - tuple_parser!($i, ($($parsed),*), call!($e)) - }; - - ($i:expr, (), $submac:ident!( $($args:tt)* )) => { - $submac!($i, $($args)*) - }; - - ($i:expr, ($($parsed:expr),*), $submac:ident!( $($args:tt)* )) => { - match $submac!($i, $($args)*) { - Err(LexError) => Err(LexError), - Ok((i, o)) => Ok((i, ($($parsed),*, o))) - } - }; - - ($i:expr, ($($parsed:expr),*)) => { - Ok(($i, ($($parsed),*))) - }; -} - -macro_rules! not { - ($i:expr, $submac:ident!( $($args:tt)* )) => { - match $submac!($i, $($args)*) { - Ok((_, _)) => Err(LexError), - Err(LexError) => Ok(($i, ())), - } - }; -} - -macro_rules! tag { - ($i:expr, $tag:expr) => { - if $i.starts_with($tag) { - Ok(($i.advance($tag.len()), &$i.rest[..$tag.len()])) - } else { - Err(LexError) - } - }; -} - -macro_rules! punct { - ($i:expr, $punct:expr) => { - $crate::strnom::punct($i, $punct) - }; -} - -/// Do not use directly. Use `punct!`. -pub fn punct<'a>(input: Cursor<'a>, token: &'static str) -> PResult<'a, &'a str> { - let input = skip_whitespace(input); - if input.starts_with(token) { - Ok((input.advance(token.len()), token)) - } else { - Err(LexError) - } -} - -macro_rules! preceded { - ($i:expr, $submac:ident!( $($args:tt)* ), $submac2:ident!( $($args2:tt)* )) => { - match tuple!($i, $submac!($($args)*), $submac2!($($args2)*)) { - Ok((remaining, (_, o))) => Ok((remaining, o)), - Err(LexError) => Err(LexError), - } - }; - - ($i:expr, $submac:ident!( $($args:tt)* ), $g:expr) => { - preceded!($i, $submac!($($args)*), call!($g)) - }; -} - -macro_rules! delimited { - ($i:expr, $submac:ident!( $($args:tt)* ), $($rest:tt)+) => { - match tuple_parser!($i, (), $submac!($($args)*), $($rest)*) { - Err(LexError) => Err(LexError), - Ok((i1, (_, o, _))) => Ok((i1, o)) - } - }; -} - -macro_rules! map { - ($i:expr, $submac:ident!( $($args:tt)* ), $g:expr) => { - match $submac!($i, $($args)*) { - Err(LexError) => Err(LexError), - Ok((i, o)) => Ok((i, call!(o, $g))) - } - }; - - ($i:expr, $f:expr, $g:expr) => { - map!($i, call!($f), $g) - }; -} diff --git a/third_party/rust/proc-macro2-0.4.27/src/wrapper.rs b/third_party/rust/proc-macro2-0.4.27/src/wrapper.rs deleted file mode 100644 index c45dff89d7..0000000000 --- a/third_party/rust/proc-macro2-0.4.27/src/wrapper.rs +++ /dev/null @@ -1,926 +0,0 @@ -use std::fmt; -use std::iter; -use std::panic::{self, PanicInfo}; -#[cfg(super_unstable)] -use std::path::PathBuf; -use std::str::FromStr; - -use fallback; -use proc_macro; - -use {Delimiter, Punct, Spacing, TokenTree}; - -#[derive(Clone)] -pub enum TokenStream { - Compiler(proc_macro::TokenStream), - Fallback(fallback::TokenStream), -} - -pub enum LexError { - Compiler(proc_macro::LexError), - Fallback(fallback::LexError), -} - -fn nightly_works() -> bool { - use std::sync::atomic::*; - use std::sync::Once; - - static WORKS: AtomicUsize = ATOMIC_USIZE_INIT; - static INIT: Once = Once::new(); - - match WORKS.load(Ordering::SeqCst) { - 1 => return false, - 2 => return true, - _ => {} - } - - // Swap in a null panic hook to avoid printing "thread panicked" to stderr, - // then use catch_unwind to determine whether the compiler's proc_macro is - // working. When proc-macro2 is used from outside of a procedural macro all - // of the proc_macro crate's APIs currently panic. - // - // The Once is to prevent the possibility of this ordering: - // - // thread 1 calls take_hook, gets the user's original hook - // thread 1 calls set_hook with the null hook - // thread 2 calls take_hook, thinks null hook is the original hook - // thread 2 calls set_hook with the null hook - // thread 1 calls set_hook with the actual original hook - // thread 2 calls set_hook with what it thinks is the original hook - // - // in which the user's hook has been lost. - // - // There is still a race condition where a panic in a different thread can - // happen during the interval that the user's original panic hook is - // unregistered such that their hook is incorrectly not called. This is - // sufficiently unlikely and less bad than printing panic messages to stderr - // on correct use of this crate. Maybe there is a libstd feature request - // here. For now, if a user needs to guarantee that this failure mode does - // not occur, they need to call e.g. `proc_macro2::Span::call_site()` from - // the main thread before launching any other threads. - INIT.call_once(|| { - type PanicHook = Fn(&PanicInfo) + Sync + Send + 'static; - - let null_hook: Box = Box::new(|_panic_info| { /* ignore */ }); - let sanity_check = &*null_hook as *const PanicHook; - let original_hook = panic::take_hook(); - panic::set_hook(null_hook); - - let works = panic::catch_unwind(|| proc_macro::Span::call_site()).is_ok(); - WORKS.store(works as usize + 1, Ordering::SeqCst); - - let hopefully_null_hook = panic::take_hook(); - panic::set_hook(original_hook); - if sanity_check != &*hopefully_null_hook { - panic!("observed race condition in proc_macro2::nightly_works"); - } - }); - nightly_works() -} - -fn mismatch() -> ! { - panic!("stable/nightly mismatch") -} - -impl TokenStream { - pub fn new() -> TokenStream { - if nightly_works() { - TokenStream::Compiler(proc_macro::TokenStream::new()) - } else { - TokenStream::Fallback(fallback::TokenStream::new()) - } - } - - pub fn is_empty(&self) -> bool { - match self { - TokenStream::Compiler(tts) => tts.is_empty(), - TokenStream::Fallback(tts) => tts.is_empty(), - } - } - - fn unwrap_nightly(self) -> proc_macro::TokenStream { - match self { - TokenStream::Compiler(s) => s, - TokenStream::Fallback(_) => mismatch(), - } - } - - fn unwrap_stable(self) -> fallback::TokenStream { - match self { - TokenStream::Compiler(_) => mismatch(), - TokenStream::Fallback(s) => s, - } - } -} - -impl FromStr for TokenStream { - type Err = LexError; - - fn from_str(src: &str) -> Result { - if nightly_works() { - Ok(TokenStream::Compiler(src.parse()?)) - } else { - Ok(TokenStream::Fallback(src.parse()?)) - } - } -} - -impl fmt::Display for TokenStream { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - match self { - TokenStream::Compiler(tts) => tts.fmt(f), - TokenStream::Fallback(tts) => tts.fmt(f), - } - } -} - -impl From for TokenStream { - fn from(inner: proc_macro::TokenStream) -> TokenStream { - TokenStream::Compiler(inner) - } -} - -impl From for proc_macro::TokenStream { - fn from(inner: TokenStream) -> proc_macro::TokenStream { - match inner { - TokenStream::Compiler(inner) => inner, - TokenStream::Fallback(inner) => inner.to_string().parse().unwrap(), - } - } -} - -impl From for TokenStream { - fn from(inner: fallback::TokenStream) -> TokenStream { - TokenStream::Fallback(inner) - } -} - -impl From for TokenStream { - fn from(token: TokenTree) -> TokenStream { - if !nightly_works() { - return TokenStream::Fallback(token.into()); - } - let tt: proc_macro::TokenTree = match token { - TokenTree::Group(tt) => tt.inner.unwrap_nightly().into(), - TokenTree::Punct(tt) => { - let spacing = match tt.spacing() { - Spacing::Joint => proc_macro::Spacing::Joint, - Spacing::Alone => proc_macro::Spacing::Alone, - }; - let mut op = proc_macro::Punct::new(tt.as_char(), spacing); - op.set_span(tt.span().inner.unwrap_nightly()); - op.into() - } - TokenTree::Ident(tt) => tt.inner.unwrap_nightly().into(), - TokenTree::Literal(tt) => tt.inner.unwrap_nightly().into(), - }; - TokenStream::Compiler(tt.into()) - } -} - -impl iter::FromIterator for TokenStream { - fn from_iter>(trees: I) -> Self { - if nightly_works() { - let trees = trees - .into_iter() - .map(TokenStream::from) - .flat_map(|t| match t { - TokenStream::Compiler(s) => s, - TokenStream::Fallback(_) => mismatch(), - }); - TokenStream::Compiler(trees.collect()) - } else { - TokenStream::Fallback(trees.into_iter().collect()) - } - } -} - -impl iter::FromIterator for TokenStream { - fn from_iter>(streams: I) -> Self { - let mut streams = streams.into_iter(); - match streams.next() { - #[cfg(slow_extend)] - Some(TokenStream::Compiler(first)) => { - let stream = iter::once(first) - .chain(streams.map(|s| match s { - TokenStream::Compiler(s) => s, - TokenStream::Fallback(_) => mismatch(), - })) - .collect(); - TokenStream::Compiler(stream) - } - #[cfg(not(slow_extend))] - Some(TokenStream::Compiler(mut first)) => { - first.extend(streams.map(|s| match s { - TokenStream::Compiler(s) => s, - TokenStream::Fallback(_) => mismatch(), - })); - TokenStream::Compiler(first) - } - Some(TokenStream::Fallback(mut first)) => { - first.extend(streams.map(|s| match s { - TokenStream::Fallback(s) => s, - TokenStream::Compiler(_) => mismatch(), - })); - TokenStream::Fallback(first) - } - None => TokenStream::new(), - } - } -} - -impl Extend for TokenStream { - fn extend>(&mut self, streams: I) { - match self { - TokenStream::Compiler(tts) => { - #[cfg(not(slow_extend))] - { - tts.extend( - streams - .into_iter() - .map(|t| TokenStream::from(t).unwrap_nightly()), - ); - } - #[cfg(slow_extend)] - { - *tts = - tts.clone() - .into_iter() - .chain(streams.into_iter().map(TokenStream::from).flat_map( - |t| match t { - TokenStream::Compiler(tts) => tts.into_iter(), - _ => mismatch(), - }, - )) - .collect(); - } - } - TokenStream::Fallback(tts) => tts.extend(streams), - } - } -} - -impl Extend for TokenStream { - fn extend>(&mut self, streams: I) { - match self { - TokenStream::Compiler(tts) => { - #[cfg(not(slow_extend))] - { - tts.extend(streams.into_iter().map(|stream| stream.unwrap_nightly())); - } - #[cfg(slow_extend)] - { - *tts = tts - .clone() - .into_iter() - .chain(streams.into_iter().flat_map(|t| match t { - TokenStream::Compiler(tts) => tts.into_iter(), - _ => mismatch(), - })) - .collect(); - } - } - TokenStream::Fallback(tts) => { - tts.extend(streams.into_iter().map(|stream| stream.unwrap_stable())) - } - } - } -} - -impl fmt::Debug for TokenStream { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - match self { - TokenStream::Compiler(tts) => tts.fmt(f), - TokenStream::Fallback(tts) => tts.fmt(f), - } - } -} - -impl From for LexError { - fn from(e: proc_macro::LexError) -> LexError { - LexError::Compiler(e) - } -} - -impl From for LexError { - fn from(e: fallback::LexError) -> LexError { - LexError::Fallback(e) - } -} - -impl fmt::Debug for LexError { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - match self { - LexError::Compiler(e) => e.fmt(f), - LexError::Fallback(e) => e.fmt(f), - } - } -} - -pub enum TokenTreeIter { - Compiler(proc_macro::token_stream::IntoIter), - Fallback(fallback::TokenTreeIter), -} - -impl IntoIterator for TokenStream { - type Item = TokenTree; - type IntoIter = TokenTreeIter; - - fn into_iter(self) -> TokenTreeIter { - match self { - TokenStream::Compiler(tts) => TokenTreeIter::Compiler(tts.into_iter()), - TokenStream::Fallback(tts) => TokenTreeIter::Fallback(tts.into_iter()), - } - } -} - -impl Iterator for TokenTreeIter { - type Item = TokenTree; - - fn next(&mut self) -> Option { - let token = match self { - TokenTreeIter::Compiler(iter) => iter.next()?, - TokenTreeIter::Fallback(iter) => return iter.next(), - }; - Some(match token { - proc_macro::TokenTree::Group(tt) => ::Group::_new(Group::Compiler(tt)).into(), - proc_macro::TokenTree::Punct(tt) => { - let spacing = match tt.spacing() { - proc_macro::Spacing::Joint => Spacing::Joint, - proc_macro::Spacing::Alone => Spacing::Alone, - }; - let mut o = Punct::new(tt.as_char(), spacing); - o.set_span(::Span::_new(Span::Compiler(tt.span()))); - o.into() - } - proc_macro::TokenTree::Ident(s) => ::Ident::_new(Ident::Compiler(s)).into(), - proc_macro::TokenTree::Literal(l) => ::Literal::_new(Literal::Compiler(l)).into(), - }) - } - - fn size_hint(&self) -> (usize, Option) { - match self { - TokenTreeIter::Compiler(tts) => tts.size_hint(), - TokenTreeIter::Fallback(tts) => tts.size_hint(), - } - } -} - -impl fmt::Debug for TokenTreeIter { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.debug_struct("TokenTreeIter").finish() - } -} - -#[derive(Clone, PartialEq, Eq)] -#[cfg(super_unstable)] -pub enum SourceFile { - Compiler(proc_macro::SourceFile), - Fallback(fallback::SourceFile), -} - -#[cfg(super_unstable)] -impl SourceFile { - fn nightly(sf: proc_macro::SourceFile) -> Self { - SourceFile::Compiler(sf) - } - - /// Get the path to this source file as a string. - pub fn path(&self) -> PathBuf { - match self { - SourceFile::Compiler(a) => a.path(), - SourceFile::Fallback(a) => a.path(), - } - } - - pub fn is_real(&self) -> bool { - match self { - SourceFile::Compiler(a) => a.is_real(), - SourceFile::Fallback(a) => a.is_real(), - } - } -} - -#[cfg(super_unstable)] -impl fmt::Debug for SourceFile { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - match self { - SourceFile::Compiler(a) => a.fmt(f), - SourceFile::Fallback(a) => a.fmt(f), - } - } -} - -#[cfg(any(super_unstable, feature = "span-locations"))] -pub struct LineColumn { - pub line: usize, - pub column: usize, -} - -#[derive(Copy, Clone)] -pub enum Span { - Compiler(proc_macro::Span), - Fallback(fallback::Span), -} - -impl Span { - pub fn call_site() -> Span { - if nightly_works() { - Span::Compiler(proc_macro::Span::call_site()) - } else { - Span::Fallback(fallback::Span::call_site()) - } - } - - #[cfg(super_unstable)] - pub fn def_site() -> Span { - if nightly_works() { - Span::Compiler(proc_macro::Span::def_site()) - } else { - Span::Fallback(fallback::Span::def_site()) - } - } - - #[cfg(super_unstable)] - pub fn resolved_at(&self, other: Span) -> Span { - match (self, other) { - (Span::Compiler(a), Span::Compiler(b)) => Span::Compiler(a.resolved_at(b)), - (Span::Fallback(a), Span::Fallback(b)) => Span::Fallback(a.resolved_at(b)), - _ => mismatch(), - } - } - - #[cfg(super_unstable)] - pub fn located_at(&self, other: Span) -> Span { - match (self, other) { - (Span::Compiler(a), Span::Compiler(b)) => Span::Compiler(a.located_at(b)), - (Span::Fallback(a), Span::Fallback(b)) => Span::Fallback(a.located_at(b)), - _ => mismatch(), - } - } - - pub fn unwrap(self) -> proc_macro::Span { - match self { - Span::Compiler(s) => s, - Span::Fallback(_) => panic!("proc_macro::Span is only available in procedural macros"), - } - } - - #[cfg(super_unstable)] - pub fn source_file(&self) -> SourceFile { - match self { - Span::Compiler(s) => SourceFile::nightly(s.source_file()), - Span::Fallback(s) => SourceFile::Fallback(s.source_file()), - } - } - - #[cfg(any(super_unstable, feature = "span-locations"))] - pub fn start(&self) -> LineColumn { - match self { - #[cfg(nightly)] - Span::Compiler(s) => { - let proc_macro::LineColumn { line, column } = s.start(); - LineColumn { line, column } - } - #[cfg(not(nightly))] - Span::Compiler(_) => LineColumn { line: 0, column: 0 }, - Span::Fallback(s) => { - let fallback::LineColumn { line, column } = s.start(); - LineColumn { line, column } - } - } - } - - #[cfg(any(super_unstable, feature = "span-locations"))] - pub fn end(&self) -> LineColumn { - match self { - #[cfg(nightly)] - Span::Compiler(s) => { - let proc_macro::LineColumn { line, column } = s.end(); - LineColumn { line, column } - } - #[cfg(not(nightly))] - Span::Compiler(_) => LineColumn { line: 0, column: 0 }, - Span::Fallback(s) => { - let fallback::LineColumn { line, column } = s.end(); - LineColumn { line, column } - } - } - } - - #[cfg(super_unstable)] - pub fn join(&self, other: Span) -> Option { - let ret = match (self, other) { - (Span::Compiler(a), Span::Compiler(b)) => Span::Compiler(a.join(b)?), - (Span::Fallback(a), Span::Fallback(b)) => Span::Fallback(a.join(b)?), - _ => return None, - }; - Some(ret) - } - - #[cfg(super_unstable)] - pub fn eq(&self, other: &Span) -> bool { - match (self, other) { - (Span::Compiler(a), Span::Compiler(b)) => a.eq(b), - (Span::Fallback(a), Span::Fallback(b)) => a.eq(b), - _ => false, - } - } - - fn unwrap_nightly(self) -> proc_macro::Span { - match self { - Span::Compiler(s) => s, - Span::Fallback(_) => mismatch(), - } - } -} - -impl From for ::Span { - fn from(proc_span: proc_macro::Span) -> ::Span { - ::Span::_new(Span::Compiler(proc_span)) - } -} - -impl From for Span { - fn from(inner: fallback::Span) -> Span { - Span::Fallback(inner) - } -} - -impl fmt::Debug for Span { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - match self { - Span::Compiler(s) => s.fmt(f), - Span::Fallback(s) => s.fmt(f), - } - } -} - -pub fn debug_span_field_if_nontrivial(debug: &mut fmt::DebugStruct, span: Span) { - match span { - Span::Compiler(s) => { - debug.field("span", &s); - } - Span::Fallback(s) => fallback::debug_span_field_if_nontrivial(debug, s), - } -} - -#[derive(Clone)] -pub enum Group { - Compiler(proc_macro::Group), - Fallback(fallback::Group), -} - -impl Group { - pub fn new(delimiter: Delimiter, stream: TokenStream) -> Group { - match stream { - TokenStream::Compiler(stream) => { - let delimiter = match delimiter { - Delimiter::Parenthesis => proc_macro::Delimiter::Parenthesis, - Delimiter::Bracket => proc_macro::Delimiter::Bracket, - Delimiter::Brace => proc_macro::Delimiter::Brace, - Delimiter::None => proc_macro::Delimiter::None, - }; - Group::Compiler(proc_macro::Group::new(delimiter, stream)) - } - TokenStream::Fallback(stream) => { - Group::Fallback(fallback::Group::new(delimiter, stream)) - } - } - } - - pub fn delimiter(&self) -> Delimiter { - match self { - Group::Compiler(g) => match g.delimiter() { - proc_macro::Delimiter::Parenthesis => Delimiter::Parenthesis, - proc_macro::Delimiter::Bracket => Delimiter::Bracket, - proc_macro::Delimiter::Brace => Delimiter::Brace, - proc_macro::Delimiter::None => Delimiter::None, - }, - Group::Fallback(g) => g.delimiter(), - } - } - - pub fn stream(&self) -> TokenStream { - match self { - Group::Compiler(g) => TokenStream::Compiler(g.stream()), - Group::Fallback(g) => TokenStream::Fallback(g.stream()), - } - } - - pub fn span(&self) -> Span { - match self { - Group::Compiler(g) => Span::Compiler(g.span()), - Group::Fallback(g) => Span::Fallback(g.span()), - } - } - - #[cfg(super_unstable)] - pub fn span_open(&self) -> Span { - match self { - Group::Compiler(g) => Span::Compiler(g.span_open()), - Group::Fallback(g) => Span::Fallback(g.span_open()), - } - } - - #[cfg(super_unstable)] - pub fn span_close(&self) -> Span { - match self { - Group::Compiler(g) => Span::Compiler(g.span_close()), - Group::Fallback(g) => Span::Fallback(g.span_close()), - } - } - - pub fn set_span(&mut self, span: Span) { - match (self, span) { - (Group::Compiler(g), Span::Compiler(s)) => g.set_span(s), - (Group::Fallback(g), Span::Fallback(s)) => g.set_span(s), - _ => mismatch(), - } - } - - fn unwrap_nightly(self) -> proc_macro::Group { - match self { - Group::Compiler(g) => g, - Group::Fallback(_) => mismatch(), - } - } -} - -impl From for Group { - fn from(g: fallback::Group) -> Self { - Group::Fallback(g) - } -} - -impl fmt::Display for Group { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - match self { - Group::Compiler(group) => group.fmt(formatter), - Group::Fallback(group) => group.fmt(formatter), - } - } -} - -impl fmt::Debug for Group { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - match self { - Group::Compiler(group) => group.fmt(formatter), - Group::Fallback(group) => group.fmt(formatter), - } - } -} - -#[derive(Clone)] -pub enum Ident { - Compiler(proc_macro::Ident), - Fallback(fallback::Ident), -} - -impl Ident { - pub fn new(string: &str, span: Span) -> Ident { - match span { - Span::Compiler(s) => Ident::Compiler(proc_macro::Ident::new(string, s)), - Span::Fallback(s) => Ident::Fallback(fallback::Ident::new(string, s)), - } - } - - pub fn new_raw(string: &str, span: Span) -> Ident { - match span { - Span::Compiler(s) => { - let p: proc_macro::TokenStream = string.parse().unwrap(); - let ident = match p.into_iter().next() { - Some(proc_macro::TokenTree::Ident(mut i)) => { - i.set_span(s); - i - } - _ => panic!(), - }; - Ident::Compiler(ident) - } - Span::Fallback(s) => Ident::Fallback(fallback::Ident::new_raw(string, s)), - } - } - - pub fn span(&self) -> Span { - match self { - Ident::Compiler(t) => Span::Compiler(t.span()), - Ident::Fallback(t) => Span::Fallback(t.span()), - } - } - - pub fn set_span(&mut self, span: Span) { - match (self, span) { - (Ident::Compiler(t), Span::Compiler(s)) => t.set_span(s), - (Ident::Fallback(t), Span::Fallback(s)) => t.set_span(s), - _ => mismatch(), - } - } - - fn unwrap_nightly(self) -> proc_macro::Ident { - match self { - Ident::Compiler(s) => s, - Ident::Fallback(_) => mismatch(), - } - } -} - -impl PartialEq for Ident { - fn eq(&self, other: &Ident) -> bool { - match (self, other) { - (Ident::Compiler(t), Ident::Compiler(o)) => t.to_string() == o.to_string(), - (Ident::Fallback(t), Ident::Fallback(o)) => t == o, - _ => mismatch(), - } - } -} - -impl PartialEq for Ident -where - T: ?Sized + AsRef, -{ - fn eq(&self, other: &T) -> bool { - let other = other.as_ref(); - match self { - Ident::Compiler(t) => t.to_string() == other, - Ident::Fallback(t) => t == other, - } - } -} - -impl fmt::Display for Ident { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - match self { - Ident::Compiler(t) => t.fmt(f), - Ident::Fallback(t) => t.fmt(f), - } - } -} - -impl fmt::Debug for Ident { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - match self { - Ident::Compiler(t) => t.fmt(f), - Ident::Fallback(t) => t.fmt(f), - } - } -} - -#[derive(Clone)] -pub enum Literal { - Compiler(proc_macro::Literal), - Fallback(fallback::Literal), -} - -macro_rules! suffixed_numbers { - ($($name:ident => $kind:ident,)*) => ($( - pub fn $name(n: $kind) -> Literal { - if nightly_works() { - Literal::Compiler(proc_macro::Literal::$name(n)) - } else { - Literal::Fallback(fallback::Literal::$name(n)) - } - } - )*) -} - -macro_rules! unsuffixed_integers { - ($($name:ident => $kind:ident,)*) => ($( - pub fn $name(n: $kind) -> Literal { - if nightly_works() { - Literal::Compiler(proc_macro::Literal::$name(n)) - } else { - Literal::Fallback(fallback::Literal::$name(n)) - } - } - )*) -} - -impl Literal { - suffixed_numbers! { - u8_suffixed => u8, - u16_suffixed => u16, - u32_suffixed => u32, - u64_suffixed => u64, - usize_suffixed => usize, - i8_suffixed => i8, - i16_suffixed => i16, - i32_suffixed => i32, - i64_suffixed => i64, - isize_suffixed => isize, - - f32_suffixed => f32, - f64_suffixed => f64, - } - - #[cfg(u128)] - suffixed_numbers! { - i128_suffixed => i128, - u128_suffixed => u128, - } - - unsuffixed_integers! { - u8_unsuffixed => u8, - u16_unsuffixed => u16, - u32_unsuffixed => u32, - u64_unsuffixed => u64, - usize_unsuffixed => usize, - i8_unsuffixed => i8, - i16_unsuffixed => i16, - i32_unsuffixed => i32, - i64_unsuffixed => i64, - isize_unsuffixed => isize, - } - - #[cfg(u128)] - unsuffixed_integers! { - i128_unsuffixed => i128, - u128_unsuffixed => u128, - } - - pub fn f32_unsuffixed(f: f32) -> Literal { - if nightly_works() { - Literal::Compiler(proc_macro::Literal::f32_unsuffixed(f)) - } else { - Literal::Fallback(fallback::Literal::f32_unsuffixed(f)) - } - } - - pub fn f64_unsuffixed(f: f64) -> Literal { - if nightly_works() { - Literal::Compiler(proc_macro::Literal::f64_unsuffixed(f)) - } else { - Literal::Fallback(fallback::Literal::f64_unsuffixed(f)) - } - } - - pub fn string(t: &str) -> Literal { - if nightly_works() { - Literal::Compiler(proc_macro::Literal::string(t)) - } else { - Literal::Fallback(fallback::Literal::string(t)) - } - } - - pub fn character(t: char) -> Literal { - if nightly_works() { - Literal::Compiler(proc_macro::Literal::character(t)) - } else { - Literal::Fallback(fallback::Literal::character(t)) - } - } - - pub fn byte_string(bytes: &[u8]) -> Literal { - if nightly_works() { - Literal::Compiler(proc_macro::Literal::byte_string(bytes)) - } else { - Literal::Fallback(fallback::Literal::byte_string(bytes)) - } - } - - pub fn span(&self) -> Span { - match self { - Literal::Compiler(lit) => Span::Compiler(lit.span()), - Literal::Fallback(lit) => Span::Fallback(lit.span()), - } - } - - pub fn set_span(&mut self, span: Span) { - match (self, span) { - (Literal::Compiler(lit), Span::Compiler(s)) => lit.set_span(s), - (Literal::Fallback(lit), Span::Fallback(s)) => lit.set_span(s), - _ => mismatch(), - } - } - - fn unwrap_nightly(self) -> proc_macro::Literal { - match self { - Literal::Compiler(s) => s, - Literal::Fallback(_) => mismatch(), - } - } -} - -impl From for Literal { - fn from(s: fallback::Literal) -> Literal { - Literal::Fallback(s) - } -} - -impl fmt::Display for Literal { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - match self { - Literal::Compiler(t) => t.fmt(f), - Literal::Fallback(t) => t.fmt(f), - } - } -} - -impl fmt::Debug for Literal { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - match self { - Literal::Compiler(t) => t.fmt(f), - Literal::Fallback(t) => t.fmt(f), - } - } -} diff --git a/third_party/rust/proc-macro2-0.4.27/tests/marker.rs b/third_party/rust/proc-macro2-0.4.27/tests/marker.rs deleted file mode 100644 index 7bb5027621..0000000000 --- a/third_party/rust/proc-macro2-0.4.27/tests/marker.rs +++ /dev/null @@ -1,61 +0,0 @@ -extern crate proc_macro2; - -use proc_macro2::*; - -macro_rules! assert_impl { - ($ty:ident is $($marker:ident) and +) => { - #[test] - #[allow(non_snake_case)] - fn $ty() { - fn assert_implemented() {} - assert_implemented::<$ty>(); - } - }; - - ($ty:ident is not $($marker:ident) or +) => { - #[test] - #[allow(non_snake_case)] - fn $ty() { - $( - { - // Implemented for types that implement $marker. - trait IsNotImplemented { - fn assert_not_implemented() {} - } - impl IsNotImplemented for T {} - - // Implemented for the type being tested. - trait IsImplemented { - fn assert_not_implemented() {} - } - impl IsImplemented for $ty {} - - // If $ty does not implement $marker, there is no ambiguity - // in the following trait method call. - <$ty>::assert_not_implemented(); - } - )+ - } - }; -} - -assert_impl!(Delimiter is Send and Sync); -assert_impl!(Spacing is Send and Sync); - -assert_impl!(Group is not Send or Sync); -assert_impl!(Ident is not Send or Sync); -assert_impl!(LexError is not Send or Sync); -assert_impl!(Literal is not Send or Sync); -assert_impl!(Punct is not Send or Sync); -assert_impl!(Span is not Send or Sync); -assert_impl!(TokenStream is not Send or Sync); -assert_impl!(TokenTree is not Send or Sync); - -#[cfg(procmacro2_semver_exempt)] -mod semver_exempt { - use super::*; - - assert_impl!(LineColumn is Send and Sync); - - assert_impl!(SourceFile is not Send or Sync); -} diff --git a/third_party/rust/proc-macro2-0.4.27/tests/test.rs b/third_party/rust/proc-macro2-0.4.27/tests/test.rs deleted file mode 100644 index 055ebfdc50..0000000000 --- a/third_party/rust/proc-macro2-0.4.27/tests/test.rs +++ /dev/null @@ -1,389 +0,0 @@ -extern crate proc_macro2; - -use std::str::{self, FromStr}; - -use proc_macro2::{Ident, Literal, Spacing, Span, TokenStream, TokenTree}; - -#[test] -fn terms() { - assert_eq!( - Ident::new("String", Span::call_site()).to_string(), - "String" - ); - assert_eq!(Ident::new("fn", Span::call_site()).to_string(), "fn"); - assert_eq!(Ident::new("_", Span::call_site()).to_string(), "_"); -} - -#[test] -#[cfg(procmacro2_semver_exempt)] -fn raw_terms() { - assert_eq!( - Ident::new_raw("String", Span::call_site()).to_string(), - "r#String" - ); - assert_eq!(Ident::new_raw("fn", Span::call_site()).to_string(), "r#fn"); - assert_eq!(Ident::new_raw("_", Span::call_site()).to_string(), "r#_"); -} - -#[test] -#[should_panic(expected = "Ident is not allowed to be empty; use Option")] -fn term_empty() { - Ident::new("", Span::call_site()); -} - -#[test] -#[should_panic(expected = "Ident cannot be a number; use Literal instead")] -fn term_number() { - Ident::new("255", Span::call_site()); -} - -#[test] -#[should_panic(expected = "\"a#\" is not a valid Ident")] -fn term_invalid() { - Ident::new("a#", Span::call_site()); -} - -#[test] -#[should_panic(expected = "not a valid Ident")] -fn raw_term_empty() { - Ident::new("r#", Span::call_site()); -} - -#[test] -#[should_panic(expected = "not a valid Ident")] -fn raw_term_number() { - Ident::new("r#255", Span::call_site()); -} - -#[test] -#[should_panic(expected = "\"r#a#\" is not a valid Ident")] -fn raw_term_invalid() { - Ident::new("r#a#", Span::call_site()); -} - -#[test] -#[should_panic(expected = "not a valid Ident")] -fn lifetime_empty() { - Ident::new("'", Span::call_site()); -} - -#[test] -#[should_panic(expected = "not a valid Ident")] -fn lifetime_number() { - Ident::new("'255", Span::call_site()); -} - -#[test] -#[should_panic(expected = r#""\'a#" is not a valid Ident"#)] -fn lifetime_invalid() { - Ident::new("'a#", Span::call_site()); -} - -#[test] -fn literals() { - assert_eq!(Literal::string("foo").to_string(), "\"foo\""); - assert_eq!(Literal::string("\"").to_string(), "\"\\\"\""); - assert_eq!(Literal::f32_unsuffixed(10.0).to_string(), "10.0"); -} - -#[test] -fn roundtrip() { - fn roundtrip(p: &str) { - println!("parse: {}", p); - let s = p.parse::().unwrap().to_string(); - println!("first: {}", s); - let s2 = s.to_string().parse::().unwrap().to_string(); - assert_eq!(s, s2); - } - roundtrip("a"); - roundtrip("<<"); - roundtrip("<<="); - roundtrip( - " - 1 - 1.0 - 1f32 - 2f64 - 1usize - 4isize - 4e10 - 1_000 - 1_0i32 - 8u8 - 9 - 0 - 0xffffffffffffffffffffffffffffffff - ", - ); - roundtrip("'a"); - roundtrip("'_"); - roundtrip("'static"); - roundtrip("'\\u{10__FFFF}'"); - roundtrip("\"\\u{10_F0FF__}foo\\u{1_0_0_0__}\""); -} - -#[test] -fn fail() { - fn fail(p: &str) { - if let Ok(s) = p.parse::() { - panic!("should have failed to parse: {}\n{:#?}", p, s); - } - } - fail("1x"); - fail("1u80"); - fail("1f320"); - fail("' static"); - fail("r#1"); - fail("r#_"); -} - -#[cfg(span_locations)] -#[test] -fn span_test() { - use proc_macro2::TokenTree; - - fn check_spans(p: &str, mut lines: &[(usize, usize, usize, usize)]) { - let ts = p.parse::().unwrap(); - check_spans_internal(ts, &mut lines); - } - - fn check_spans_internal(ts: TokenStream, lines: &mut &[(usize, usize, usize, usize)]) { - for i in ts { - if let Some((&(sline, scol, eline, ecol), rest)) = lines.split_first() { - *lines = rest; - - let start = i.span().start(); - assert_eq!(start.line, sline, "sline did not match for {}", i); - assert_eq!(start.column, scol, "scol did not match for {}", i); - - let end = i.span().end(); - assert_eq!(end.line, eline, "eline did not match for {}", i); - assert_eq!(end.column, ecol, "ecol did not match for {}", i); - - match i { - TokenTree::Group(ref g) => { - check_spans_internal(g.stream().clone(), lines); - } - _ => {} - } - } - } - } - - check_spans( - "\ -/// This is a document comment -testing 123 -{ - testing 234 -}", - &[ - (1, 0, 1, 30), // # - (1, 0, 1, 30), // [ ... ] - (1, 0, 1, 30), // doc - (1, 0, 1, 30), // = - (1, 0, 1, 30), // "This is..." - (2, 0, 2, 7), // testing - (2, 8, 2, 11), // 123 - (3, 0, 5, 1), // { ... } - (4, 2, 4, 9), // testing - (4, 10, 4, 13), // 234 - ], - ); -} - -#[cfg(procmacro2_semver_exempt)] -#[cfg(not(nightly))] -#[test] -fn default_span() { - let start = Span::call_site().start(); - assert_eq!(start.line, 1); - assert_eq!(start.column, 0); - let end = Span::call_site().end(); - assert_eq!(end.line, 1); - assert_eq!(end.column, 0); - let source_file = Span::call_site().source_file(); - assert_eq!(source_file.path().to_string_lossy(), ""); - assert!(!source_file.is_real()); -} - -#[cfg(procmacro2_semver_exempt)] -#[test] -fn span_join() { - let source1 = "aaa\nbbb" - .parse::() - .unwrap() - .into_iter() - .collect::>(); - let source2 = "ccc\nddd" - .parse::() - .unwrap() - .into_iter() - .collect::>(); - - assert!(source1[0].span().source_file() != source2[0].span().source_file()); - assert_eq!( - source1[0].span().source_file(), - source1[1].span().source_file() - ); - - let joined1 = source1[0].span().join(source1[1].span()); - let joined2 = source1[0].span().join(source2[0].span()); - assert!(joined1.is_some()); - assert!(joined2.is_none()); - - let start = joined1.unwrap().start(); - let end = joined1.unwrap().end(); - assert_eq!(start.line, 1); - assert_eq!(start.column, 0); - assert_eq!(end.line, 2); - assert_eq!(end.column, 3); - - assert_eq!( - joined1.unwrap().source_file(), - source1[0].span().source_file() - ); -} - -#[test] -fn no_panic() { - let s = str::from_utf8(b"b\'\xc2\x86 \x00\x00\x00^\"").unwrap(); - assert!(s.parse::().is_err()); -} - -#[test] -fn tricky_doc_comment() { - let stream = "/**/".parse::().unwrap(); - let tokens = stream.into_iter().collect::>(); - assert!(tokens.is_empty(), "not empty -- {:?}", tokens); - - let stream = "/// doc".parse::().unwrap(); - let tokens = stream.into_iter().collect::>(); - assert!(tokens.len() == 2, "not length 2 -- {:?}", tokens); - match tokens[0] { - proc_macro2::TokenTree::Punct(ref tt) => assert_eq!(tt.as_char(), '#'), - _ => panic!("wrong token {:?}", tokens[0]), - } - let mut tokens = match tokens[1] { - proc_macro2::TokenTree::Group(ref tt) => { - assert_eq!(tt.delimiter(), proc_macro2::Delimiter::Bracket); - tt.stream().into_iter() - } - _ => panic!("wrong token {:?}", tokens[0]), - }; - - match tokens.next().unwrap() { - proc_macro2::TokenTree::Ident(ref tt) => assert_eq!(tt.to_string(), "doc"), - t => panic!("wrong token {:?}", t), - } - match tokens.next().unwrap() { - proc_macro2::TokenTree::Punct(ref tt) => assert_eq!(tt.as_char(), '='), - t => panic!("wrong token {:?}", t), - } - match tokens.next().unwrap() { - proc_macro2::TokenTree::Literal(ref tt) => { - assert_eq!(tt.to_string(), "\" doc\""); - } - t => panic!("wrong token {:?}", t), - } - assert!(tokens.next().is_none()); - - let stream = "//! doc".parse::().unwrap(); - let tokens = stream.into_iter().collect::>(); - assert!(tokens.len() == 3, "not length 3 -- {:?}", tokens); -} - -#[test] -fn op_before_comment() { - let mut tts = TokenStream::from_str("~// comment").unwrap().into_iter(); - match tts.next().unwrap() { - TokenTree::Punct(tt) => { - assert_eq!(tt.as_char(), '~'); - assert_eq!(tt.spacing(), Spacing::Alone); - } - wrong => panic!("wrong token {:?}", wrong), - } -} - -#[test] -fn raw_identifier() { - let mut tts = TokenStream::from_str("r#dyn").unwrap().into_iter(); - match tts.next().unwrap() { - TokenTree::Ident(raw) => assert_eq!("r#dyn", raw.to_string()), - wrong => panic!("wrong token {:?}", wrong), - } - assert!(tts.next().is_none()); -} - -#[test] -fn test_debug_ident() { - let ident = Ident::new("proc_macro", Span::call_site()); - - #[cfg(not(procmacro2_semver_exempt))] - let expected = "Ident(proc_macro)"; - - #[cfg(procmacro2_semver_exempt)] - let expected = "Ident { sym: proc_macro, span: bytes(0..0) }"; - - assert_eq!(expected, format!("{:?}", ident)); -} - -#[test] -fn test_debug_tokenstream() { - let tts = TokenStream::from_str("[a + 1]").unwrap(); - - #[cfg(not(procmacro2_semver_exempt))] - let expected = "\ -TokenStream [ - Group { - delimiter: Bracket, - stream: TokenStream [ - Ident { - sym: a - }, - Punct { - op: '+', - spacing: Alone - }, - Literal { - lit: 1 - } - ] - } -]\ - "; - - #[cfg(procmacro2_semver_exempt)] - let expected = "\ -TokenStream [ - Group { - delimiter: Bracket, - stream: TokenStream [ - Ident { - sym: a, - span: bytes(2..3) - }, - Punct { - op: '+', - spacing: Alone, - span: bytes(4..5) - }, - Literal { - lit: 1, - span: bytes(6..7) - } - ], - span: bytes(1..8) - } -]\ - "; - - assert_eq!(expected, format!("{:#?}", tts)); -} - -#[test] -fn default_tokenstream_is_empty() { - let default_token_stream: TokenStream = Default::default(); - - assert!(default_token_stream.is_empty()); -} diff --git a/third_party/rust/quote-0.6.11/.cargo-checksum.json b/third_party/rust/quote-0.6.11/.cargo-checksum.json deleted file mode 100644 index d9a2d15006..0000000000 --- a/third_party/rust/quote-0.6.11/.cargo-checksum.json +++ /dev/null @@ -1 +0,0 @@ -{"files":{"Cargo.toml":"68f4dc89836a05a2347086addab1849567ef8073c552ec0dfca8f96fd20550f9","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"c9a75f18b9ab2927829a208fc6aa2cf4e63b8420887ba29cdb265d6619ae82d5","README.md":"d9392d4c7af3bf9714f0a95801d64de46ffd4558cdfeea0eb85b414e555abb72","src/ext.rs":"03919239a20f8393288783a21bf6fdee12e405d13d162c9faa6f8f5ce54b003b","src/lib.rs":"5345b4d2e6f923724cec35c62d7397e6f04d5503d2d813bff7bbaa7ffc39a9cf","src/to_tokens.rs":"0dcd15cba2aa83abeb47b9a1babce7a29643b5efa2fe620b070cb37bb21a84f1","tests/conditional/integer128.rs":"d83e21a91efbaa801a82ae499111bdda2d31edaa620e78c0199eba42d69c9ee6","tests/test.rs":"810013d7fd77b738abd0ace90ce2f2f3e219c757652eabab29bc1c0ce4a73b24"},"package":"cdd8e04bd9c52e0342b406469d494fcb033be4bdbe5c606016defbb1681411e1"} \ No newline at end of file diff --git a/third_party/rust/quote-0.6.11/LICENSE-APACHE b/third_party/rust/quote-0.6.11/LICENSE-APACHE deleted file mode 100644 index 16fe87b06e..0000000000 --- a/third_party/rust/quote-0.6.11/LICENSE-APACHE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - -TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - -1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - -2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - -3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - -4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - -5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - -6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - -7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - -8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - -9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - -END OF TERMS AND CONDITIONS - -APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - -Copyright [yyyy] [name of copyright owner] - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. diff --git a/third_party/rust/quote-0.6.11/LICENSE-MIT b/third_party/rust/quote-0.6.11/LICENSE-MIT deleted file mode 100644 index 40b8817a47..0000000000 --- a/third_party/rust/quote-0.6.11/LICENSE-MIT +++ /dev/null @@ -1,25 +0,0 @@ -Copyright (c) 2016 The Rust Project Developers - -Permission is hereby granted, free of charge, to any -person obtaining a copy of this software and associated -documentation files (the "Software"), to deal in the -Software without restriction, including without -limitation the rights to use, copy, modify, merge, -publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software -is furnished to do so, subject to the following -conditions: - -The above copyright notice and this permission notice -shall be included in all copies or substantial portions -of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -DEALINGS IN THE SOFTWARE. diff --git a/third_party/rust/quote-0.6.11/README.md b/third_party/rust/quote-0.6.11/README.md deleted file mode 100644 index 759916ace2..0000000000 --- a/third_party/rust/quote-0.6.11/README.md +++ /dev/null @@ -1,241 +0,0 @@ -Rust Quasi-Quoting -================== - -[![Build Status](https://api.travis-ci.org/dtolnay/quote.svg?branch=master)](https://travis-ci.org/dtolnay/quote) -[![Latest Version](https://img.shields.io/crates/v/quote.svg)](https://crates.io/crates/quote) -[![Rust Documentation](https://img.shields.io/badge/api-rustdoc-blue.svg)](https://docs.rs/quote/) - -This crate provides the [`quote!`] macro for turning Rust syntax tree data -structures into tokens of source code. - -[`quote!`]: https://docs.rs/quote/0.6/quote/macro.quote.html - -Procedural macros in Rust receive a stream of tokens as input, execute arbitrary -Rust code to determine how to manipulate those tokens, and produce a stream of -tokens to hand back to the compiler to compile into the caller's crate. -Quasi-quoting is a solution to one piece of that -- producing tokens to return -to the compiler. - -The idea of quasi-quoting is that we write *code* that we treat as *data*. -Within the `quote!` macro, we can write what looks like code to our text editor -or IDE. We get all the benefits of the editor's brace matching, syntax -highlighting, indentation, and maybe autocompletion. But rather than compiling -that as code into the current crate, we can treat it as data, pass it around, -mutate it, and eventually hand it back to the compiler as tokens to compile into -the macro caller's crate. - -This crate is motivated by the procedural macro use case, but is a -general-purpose Rust quasi-quoting library and is not specific to procedural -macros. - -*Version requirement: Quote supports any compiler version back to Rust's very -first support for procedural macros in Rust 1.15.0.* - -[*Release notes*](https://github.com/dtolnay/quote/releases) - -```toml -[dependencies] -quote = "0.6" -``` - -## Syntax - -The quote crate provides a [`quote!`] macro within which you can write Rust code -that gets packaged into a [`TokenStream`] and can be treated as data. You should -think of `TokenStream` as representing a fragment of Rust source code. - -[`TokenStream`]: https://docs.rs/proc-macro2/0.4/proc_macro2/struct.TokenStream.html - -Within the `quote!` macro, interpolation is done with `#var`. Any type -implementing the [`quote::ToTokens`] trait can be interpolated. This includes -most Rust primitive types as well as most of the syntax tree types from [`syn`]. - -[`quote::ToTokens`]: https://docs.rs/quote/0.6/quote/trait.ToTokens.html -[`syn`]: https://github.com/dtolnay/syn - -```rust -let tokens = quote! { - struct SerializeWith #generics #where_clause { - value: &'a #field_ty, - phantom: core::marker::PhantomData<#item_ty>, - } - - impl #generics serde::Serialize for SerializeWith #generics #where_clause { - fn serialize(&self, serializer: S) -> Result - where - S: serde::Serializer, - { - #path(self.value, serializer) - } - } - - SerializeWith { - value: #value, - phantom: core::marker::PhantomData::<#item_ty>, - } -}; -``` - -## Repetition - -Repetition is done using `#(...)*` or `#(...),*` similar to `macro_rules!`. This -iterates through the elements of any variable interpolated within the repetition -and inserts a copy of the repetition body for each one. The variables in an -interpolation may be anything that implements `IntoIterator`, including `Vec` or -a pre-existing iterator. - -- `#(#var)*` — no separators -- `#(#var),*` — the character before the asterisk is used as a separator -- `#( struct #var; )*` — the repetition can contain other things -- `#( #k => println!("{}", #v), )*` — even multiple interpolations - -Note that there is a difference between `#(#var ,)*` and `#(#var),*`—the latter -does not produce a trailing comma. This matches the behavior of delimiters in -`macro_rules!`. - -## Returning tokens to the compiler - -The `quote!` macro evaluates to an expression of type -`proc_macro2::TokenStream`. Meanwhile Rust procedural macros are expected to -return the type `proc_macro::TokenStream`. - -The difference between the two types is that `proc_macro` types are entirely -specific to procedural macros and cannot ever exist in code outside of a -procedural macro, while `proc_macro2` types may exist anywhere including tests -and non-macro code like main.rs and build.rs. This is why even the procedural -macro ecosystem is largely built around `proc_macro2`, because that ensures the -libraries are unit testable and accessible in non-macro contexts. - -There is a [`From`]-conversion in both directions so returning the output of -`quote!` from a procedural macro usually looks like `tokens.into()` or -`proc_macro::TokenStream::from(tokens)`. - -[`From`]: https://doc.rust-lang.org/std/convert/trait.From.html - -## Examples - -### Combining quoted fragments - -Usually you don't end up constructing an entire final `TokenStream` in one -piece. Different parts may come from different helper functions. The tokens -produced by `quote!` themselves implement `ToTokens` and so can be interpolated -into later `quote!` invocations to build up a final result. - -```rust -let type_definition = quote! {...}; -let methods = quote! {...}; - -let tokens = quote! { - #type_definition - #methods -}; -``` - -### Constructing identifiers - -Suppose we have an identifier `ident` which came from somewhere in a macro -input and we need to modify it in some way for the macro output. Let's consider -prepending the identifier with an underscore. - -Simply interpolating the identifier next to an underscore will not have the -behavior of concatenating them. The underscore and the identifier will continue -to be two separate tokens as if you had written `_ x`. - -```rust -// incorrect -quote! { - let mut _#ident = 0; -} -``` - -The solution is to perform token-level manipulations using the APIs provided by -Syn and proc-macro2. - -```rust -let concatenated = format!("_{}", ident); -let varname = syn::Ident::new(&concatenated, ident.span()); -quote! { - let mut #varname = 0; -} -``` - -### Making method calls - -Let's say our macro requires some type specified in the macro input to have a -constructor called `new`. We have the type in a variable called `field_type` of -type `syn::Type` and want to invoke the constructor. - -```rust -// incorrect -quote! { - let value = #field_type::new(); -} -``` - -This works only sometimes. If `field_type` is `String`, the expanded code -contains `String::new()` which is fine. But if `field_type` is something like -`Vec` then the expanded code is `Vec::new()` which is invalid syntax. -Ordinarily in handwritten Rust we would write `Vec::::new()` but for macros -often the following is more convenient. - -```rust -quote! { - let value = <#field_type>::new(); -} -``` - -This expands to `>::new()` which behaves correctly. - -A similar pattern is appropriate for trait methods. - -```rust -quote! { - let value = <#field_type as core::default::Default>::default(); -} -``` - -## Hygiene - -Any interpolated tokens preserve the `Span` information provided by their -`ToTokens` implementation. Tokens that originate within a `quote!` invocation -are spanned with [`Span::call_site()`]. - -[`Span::call_site()`]: https://docs.rs/proc-macro2/0.4/proc_macro2/struct.Span.html#method.call_site - -A different span can be provided explicitly through the [`quote_spanned!`] -macro. - -[`quote_spanned!`]: https://docs.rs/quote/0.6/quote/macro.quote_spanned.html - -### Limitations - -- A non-repeating variable may not be interpolated inside of a repeating block - ([#7]). -- The same variable may not be interpolated more than once inside of a repeating - block ([#8]). - -[#7]: https://github.com/dtolnay/quote/issues/7 -[#8]: https://github.com/dtolnay/quote/issues/8 - -### Recursion limit - -The `quote!` macro relies on deep recursion so some large invocations may fail -with "recursion limit reached" when you compile. If it fails, bump up the -recursion limit by adding `#![recursion_limit = "128"]` to your crate. An even -higher limit may be necessary for especially large invocations. You don't need -this unless the compiler tells you that you need it. - -## License - -Licensed under either of - - * Apache License, Version 2.0 ([LICENSE-APACHE](LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0) - * MIT license ([LICENSE-MIT](LICENSE-MIT) or http://opensource.org/licenses/MIT) - -at your option. - -### Contribution - -Unless you explicitly state otherwise, any contribution intentionally submitted -for inclusion in this crate by you, as defined in the Apache-2.0 license, shall -be dual licensed as above, without any additional terms or conditions. diff --git a/third_party/rust/quote-0.6.11/src/ext.rs b/third_party/rust/quote-0.6.11/src/ext.rs deleted file mode 100644 index 7ebbe30a1b..0000000000 --- a/third_party/rust/quote-0.6.11/src/ext.rs +++ /dev/null @@ -1,112 +0,0 @@ -use super::ToTokens; - -use std::iter; - -use proc_macro2::{TokenStream, TokenTree}; - -/// TokenStream extension trait with methods for appending tokens. -/// -/// This trait is sealed and cannot be implemented outside of the `quote` crate. -pub trait TokenStreamExt: private::Sealed { - /// For use by `ToTokens` implementations. - /// - /// Appends the token specified to this list of tokens. - fn append(&mut self, token: U) - where - U: Into; - - /// For use by `ToTokens` implementations. - /// - /// ```edition2018 - /// # use quote::{quote, TokenStreamExt, ToTokens}; - /// # use proc_macro2::TokenStream; - /// # - /// struct X; - /// - /// impl ToTokens for X { - /// fn to_tokens(&self, tokens: &mut TokenStream) { - /// tokens.append_all(&[true, false]); - /// } - /// } - /// - /// let tokens = quote!(#X); - /// assert_eq!(tokens.to_string(), "true false"); - /// ``` - fn append_all(&mut self, iter: I) - where - T: ToTokens, - I: IntoIterator; - - /// For use by `ToTokens` implementations. - /// - /// Appends all of the items in the iterator `I`, separated by the tokens - /// `U`. - fn append_separated(&mut self, iter: I, op: U) - where - T: ToTokens, - I: IntoIterator, - U: ToTokens; - - /// For use by `ToTokens` implementations. - /// - /// Appends all tokens in the iterator `I`, appending `U` after each - /// element, including after the last element of the iterator. - fn append_terminated(&mut self, iter: I, term: U) - where - T: ToTokens, - I: IntoIterator, - U: ToTokens; -} - -impl TokenStreamExt for TokenStream { - fn append(&mut self, token: U) - where - U: Into, - { - self.extend(iter::once(token.into())); - } - - fn append_all(&mut self, iter: I) - where - T: ToTokens, - I: IntoIterator, - { - for token in iter { - token.to_tokens(self); - } - } - - fn append_separated(&mut self, iter: I, op: U) - where - T: ToTokens, - I: IntoIterator, - U: ToTokens, - { - for (i, token) in iter.into_iter().enumerate() { - if i > 0 { - op.to_tokens(self); - } - token.to_tokens(self); - } - } - - fn append_terminated(&mut self, iter: I, term: U) - where - T: ToTokens, - I: IntoIterator, - U: ToTokens, - { - for token in iter { - token.to_tokens(self); - term.to_tokens(self); - } - } -} - -mod private { - use proc_macro2::TokenStream; - - pub trait Sealed {} - - impl Sealed for TokenStream {} -} diff --git a/third_party/rust/quote-0.6.11/src/lib.rs b/third_party/rust/quote-0.6.11/src/lib.rs deleted file mode 100644 index 4fd7edee48..0000000000 --- a/third_party/rust/quote-0.6.11/src/lib.rs +++ /dev/null @@ -1,969 +0,0 @@ -//! This crate provides the [`quote!`] macro for turning Rust syntax tree data -//! structures into tokens of source code. -//! -//! [`quote!`]: macro.quote.html -//! -//! Procedural macros in Rust receive a stream of tokens as input, execute -//! arbitrary Rust code to determine how to manipulate those tokens, and produce -//! a stream of tokens to hand back to the compiler to compile into the caller's -//! crate. Quasi-quoting is a solution to one piece of that -- producing tokens -//! to return to the compiler. -//! -//! The idea of quasi-quoting is that we write *code* that we treat as *data*. -//! Within the `quote!` macro, we can write what looks like code to our text -//! editor or IDE. We get all the benefits of the editor's brace matching, -//! syntax highlighting, indentation, and maybe autocompletion. But rather than -//! compiling that as code into the current crate, we can treat it as data, pass -//! it around, mutate it, and eventually hand it back to the compiler as tokens -//! to compile into the macro caller's crate. -//! -//! This crate is motivated by the procedural macro use case, but is a -//! general-purpose Rust quasi-quoting library and is not specific to procedural -//! macros. -//! -//! *Version requirement: Quote supports any compiler version back to Rust's -//! very first support for procedural macros in Rust 1.15.0.* -//! -//! ```toml -//! [dependencies] -//! quote = "0.6" -//! ``` -//! -//! # Example -//! -//! The following quasi-quoted block of code is something you might find in [a] -//! procedural macro having to do with data structure serialization. The `#var` -//! syntax performs interpolation of runtime variables into the quoted tokens. -//! Check out the documentation of the [`quote!`] macro for more detail about -//! the syntax. See also the [`quote_spanned!`] macro which is important for -//! implementing hygienic procedural macros. -//! -//! [a]: https://serde.rs/ -//! [`quote_spanned!`]: macro.quote_spanned.html -//! -//! ```edition2018 -//! # use quote::quote; -//! # -//! # let generics = ""; -//! # let where_clause = ""; -//! # let field_ty = ""; -//! # let item_ty = ""; -//! # let path = ""; -//! # let value = ""; -//! # -//! let tokens = quote! { -//! struct SerializeWith #generics #where_clause { -//! value: &'a #field_ty, -//! phantom: core::marker::PhantomData<#item_ty>, -//! } -//! -//! impl #generics serde::Serialize for SerializeWith #generics #where_clause { -//! fn serialize(&self, serializer: S) -> Result -//! where -//! S: serde::Serializer, -//! { -//! #path(self.value, serializer) -//! } -//! } -//! -//! SerializeWith { -//! value: #value, -//! phantom: core::marker::PhantomData::<#item_ty>, -//! } -//! }; -//! ``` -//! -//! # Recursion limit -//! -//! The `quote!` macro relies on deep recursion so some large invocations may -//! fail with "recursion limit reached" when you compile. If it fails, bump up -//! the recursion limit by adding `#![recursion_limit = "128"]` to your crate. -//! An even higher limit may be necessary for especially large invocations. - -// Quote types in rustdoc of other crates get linked to here. -#![doc(html_root_url = "https://docs.rs/quote/0.6.11")] - -#[cfg(all( - not(all(target_arch = "wasm32", target_os = "unknown")), - feature = "proc-macro" -))] -extern crate proc_macro; -extern crate proc_macro2; - -mod ext; -pub use ext::TokenStreamExt; - -mod to_tokens; -pub use to_tokens::ToTokens; - -// Not public API. -#[doc(hidden)] -pub mod __rt { - use ext::TokenStreamExt; - pub use proc_macro2::*; - - fn is_ident_start(c: u8) -> bool { - (b'a' <= c && c <= b'z') || (b'A' <= c && c <= b'Z') || c == b'_' - } - - fn is_ident_continue(c: u8) -> bool { - (b'a' <= c && c <= b'z') - || (b'A' <= c && c <= b'Z') - || c == b'_' - || (b'0' <= c && c <= b'9') - } - - fn is_ident(token: &str) -> bool { - if token.bytes().all(|digit| digit >= b'0' && digit <= b'9') { - return false; - } - - let mut bytes = token.bytes(); - let first = bytes.next().unwrap(); - if !is_ident_start(first) { - return false; - } - for ch in bytes { - if !is_ident_continue(ch) { - return false; - } - } - true - } - - pub fn parse(tokens: &mut TokenStream, span: Span, s: &str) { - if is_ident(s) { - // Fast path, since idents are the most common token. - tokens.append(Ident::new(s, span)); - } else { - let s: TokenStream = s.parse().expect("invalid token stream"); - tokens.extend(s.into_iter().map(|mut t| { - t.set_span(span); - t - })); - } - } - - macro_rules! push_punct { - ($name:ident $char1:tt) => { - pub fn $name(tokens: &mut TokenStream, span: Span) { - let mut punct = Punct::new($char1, Spacing::Alone); - punct.set_span(span); - tokens.append(punct); - } - }; - ($name:ident $char1:tt $char2:tt) => { - pub fn $name(tokens: &mut TokenStream, span: Span) { - let mut punct = Punct::new($char1, Spacing::Joint); - punct.set_span(span); - tokens.append(punct); - let mut punct = Punct::new($char2, Spacing::Alone); - punct.set_span(span); - tokens.append(punct); - } - }; - ($name:ident $char1:tt $char2:tt $char3:tt) => { - pub fn $name(tokens: &mut TokenStream, span: Span) { - let mut punct = Punct::new($char1, Spacing::Joint); - punct.set_span(span); - tokens.append(punct); - let mut punct = Punct::new($char2, Spacing::Joint); - punct.set_span(span); - tokens.append(punct); - let mut punct = Punct::new($char3, Spacing::Alone); - punct.set_span(span); - tokens.append(punct); - } - }; - } - - push_punct!(push_add '+'); - push_punct!(push_add_eq '+' '='); - push_punct!(push_and '&'); - push_punct!(push_and_and '&' '&'); - push_punct!(push_and_eq '&' '='); - push_punct!(push_at '@'); - push_punct!(push_bang '!'); - push_punct!(push_caret '^'); - push_punct!(push_caret_eq '^' '='); - push_punct!(push_colon ':'); - push_punct!(push_colon2 ':' ':'); - push_punct!(push_comma ','); - push_punct!(push_div '/'); - push_punct!(push_div_eq '/' '='); - push_punct!(push_dot '.'); - push_punct!(push_dot2 '.' '.'); - push_punct!(push_dot3 '.' '.' '.'); - push_punct!(push_dot_dot_eq '.' '.' '='); - push_punct!(push_eq '='); - push_punct!(push_eq_eq '=' '='); - push_punct!(push_ge '>' '='); - push_punct!(push_gt '>'); - push_punct!(push_le '<' '='); - push_punct!(push_lt '<'); - push_punct!(push_mul_eq '*' '='); - push_punct!(push_ne '!' '='); - push_punct!(push_or '|'); - push_punct!(push_or_eq '|' '='); - push_punct!(push_or_or '|' '|'); - push_punct!(push_pound '#'); - push_punct!(push_question '?'); - push_punct!(push_rarrow '-' '>'); - push_punct!(push_larrow '<' '-'); - push_punct!(push_rem '%'); - push_punct!(push_rem_eq '%' '='); - push_punct!(push_fat_arrow '=' '>'); - push_punct!(push_semi ';'); - push_punct!(push_shl '<' '<'); - push_punct!(push_shl_eq '<' '<' '='); - push_punct!(push_shr '>' '>'); - push_punct!(push_shr_eq '>' '>' '='); - push_punct!(push_star '*'); - push_punct!(push_sub '-'); - push_punct!(push_sub_eq '-' '='); -} - -/// The whole point. -/// -/// Performs variable interpolation against the input and produces it as -/// [`TokenStream`]. For returning tokens to the compiler in a procedural macro, use -/// `into()` to build a `TokenStream`. -/// -/// [`TokenStream`]: https://docs.rs/proc-macro2/0.4/proc_macro2/struct.TokenStream.html -/// -/// # Interpolation -/// -/// Variable interpolation is done with `#var` (similar to `$var` in -/// `macro_rules!` macros). This grabs the `var` variable that is currently in -/// scope and inserts it in that location in the output tokens. Any type -/// implementing the [`ToTokens`] trait can be interpolated. This includes most -/// Rust primitive types as well as most of the syntax tree types from the [Syn] -/// crate. -/// -/// [`ToTokens`]: trait.ToTokens.html -/// [Syn]: https://github.com/dtolnay/syn -/// -/// Repetition is done using `#(...)*` or `#(...),*` again similar to -/// `macro_rules!`. This iterates through the elements of any variable -/// interpolated within the repetition and inserts a copy of the repetition body -/// for each one. The variables in an interpolation may be anything that -/// implements `IntoIterator`, including `Vec` or a pre-existing iterator. -/// -/// - `#(#var)*` — no separators -/// - `#(#var),*` — the character before the asterisk is used as a separator -/// - `#( struct #var; )*` — the repetition can contain other tokens -/// - `#( #k => println!("{}", #v), )*` — even multiple interpolations -/// -/// # Hygiene -/// -/// Any interpolated tokens preserve the `Span` information provided by their -/// `ToTokens` implementation. Tokens that originate within the `quote!` -/// invocation are spanned with [`Span::call_site()`]. -/// -/// [`Span::call_site()`]: https://docs.rs/proc-macro2/0.4/proc_macro2/struct.Span.html#method.call_site -/// -/// A different span can be provided through the [`quote_spanned!`] macro. -/// -/// [`quote_spanned!`]: macro.quote_spanned.html -/// -/// # Return type -/// -/// The macro evaluates to an expression of type `proc_macro2::TokenStream`. -/// Meanwhile Rust procedural macros are expected to return the type -/// `proc_macro::TokenStream`. -/// -/// The difference between the two types is that `proc_macro` types are entirely -/// specific to procedural macros and cannot ever exist in code outside of a -/// procedural macro, while `proc_macro2` types may exist anywhere including -/// tests and non-macro code like main.rs and build.rs. This is why even the -/// procedural macro ecosystem is largely built around `proc_macro2`, because -/// that ensures the libraries are unit testable and accessible in non-macro -/// contexts. -/// -/// There is a [`From`]-conversion in both directions so returning the output of -/// `quote!` from a procedural macro usually looks like `tokens.into()` or -/// `proc_macro::TokenStream::from(tokens)`. -/// -/// [`From`]: https://doc.rust-lang.org/std/convert/trait.From.html -/// -/// # Examples -/// -/// ## Procedural macro -/// -/// The structure of a basic procedural macro is as follows. Refer to the [Syn] -/// crate for further useful guidance on using `quote!` as part of a procedural -/// macro. -/// -/// [Syn]: https://github.com/dtolnay/syn -/// -/// ```edition2018 -/// # #[cfg(any())] -/// extern crate proc_macro; -/// # use proc_macro2 as proc_macro; -/// -/// use proc_macro::TokenStream; -/// use quote::quote; -/// -/// # const IGNORE_TOKENS: &'static str = stringify! { -/// #[proc_macro_derive(HeapSize)] -/// # }; -/// pub fn derive_heap_size(input: TokenStream) -> TokenStream { -/// // Parse the input and figure out what implementation to generate... -/// # const IGNORE_TOKENS: &'static str = stringify! { -/// let name = /* ... */; -/// let expr = /* ... */; -/// # }; -/// # -/// # let name = 0; -/// # let expr = 0; -/// -/// let expanded = quote! { -/// // The generated impl. -/// impl heapsize::HeapSize for #name { -/// fn heap_size_of_children(&self) -> usize { -/// #expr -/// } -/// } -/// }; -/// -/// // Hand the output tokens back to the compiler. -/// TokenStream::from(expanded) -/// } -/// ``` -/// -/// ## Combining quoted fragments -/// -/// Usually you don't end up constructing an entire final `TokenStream` in one -/// piece. Different parts may come from different helper functions. The tokens -/// produced by `quote!` themselves implement `ToTokens` and so can be -/// interpolated into later `quote!` invocations to build up a final result. -/// -/// ```edition2018 -/// # use quote::quote; -/// # -/// let type_definition = quote! {...}; -/// let methods = quote! {...}; -/// -/// let tokens = quote! { -/// #type_definition -/// #methods -/// }; -/// ``` -/// -/// ## Constructing identifiers -/// -/// Suppose we have an identifier `ident` which came from somewhere in a macro -/// input and we need to modify it in some way for the macro output. Let's -/// consider prepending the identifier with an underscore. -/// -/// Simply interpolating the identifier next to an underscore will not have the -/// behavior of concatenating them. The underscore and the identifier will -/// continue to be two separate tokens as if you had written `_ x`. -/// -/// ```edition2018 -/// # use proc_macro2::{self as syn, Span}; -/// # use quote::quote; -/// # -/// # let ident = syn::Ident::new("i", Span::call_site()); -/// # -/// // incorrect -/// quote! { -/// let mut _#ident = 0; -/// } -/// # ; -/// ``` -/// -/// The solution is to perform token-level manipulations using the APIs provided -/// by Syn and proc-macro2. -/// -/// ```edition2018 -/// # use proc_macro2::{self as syn, Span}; -/// # use quote::quote; -/// # -/// # let ident = syn::Ident::new("i", Span::call_site()); -/// # -/// let concatenated = format!("_{}", ident); -/// let varname = syn::Ident::new(&concatenated, ident.span()); -/// quote! { -/// let mut #varname = 0; -/// } -/// # ; -/// ``` -/// -/// ## Making method calls -/// -/// Let's say our macro requires some type specified in the macro input to have -/// a constructor called `new`. We have the type in a variable called -/// `field_type` of type `syn::Type` and want to invoke the constructor. -/// -/// ```edition2018 -/// # use quote::quote; -/// # -/// # let field_type = quote!(...); -/// # -/// // incorrect -/// quote! { -/// let value = #field_type::new(); -/// } -/// # ; -/// ``` -/// -/// This works only sometimes. If `field_type` is `String`, the expanded code -/// contains `String::new()` which is fine. But if `field_type` is something -/// like `Vec` then the expanded code is `Vec::new()` which is invalid -/// syntax. Ordinarily in handwritten Rust we would write `Vec::::new()` -/// but for macros often the following is more convenient. -/// -/// ```edition2018 -/// # use quote::quote; -/// # -/// # let field_type = quote!(...); -/// # -/// quote! { -/// let value = <#field_type>::new(); -/// } -/// # ; -/// ``` -/// -/// This expands to `>::new()` which behaves correctly. -/// -/// A similar pattern is appropriate for trait methods. -/// -/// ```edition2018 -/// # use quote::quote; -/// # -/// # let field_type = quote!(...); -/// # -/// quote! { -/// let value = <#field_type as core::default::Default>::default(); -/// } -/// # ; -/// ``` -#[macro_export(local_inner_macros)] -macro_rules! quote { - ($($tt:tt)*) => (quote_spanned!($crate::__rt::Span::call_site()=> $($tt)*)); -} - -/// Same as `quote!`, but applies a given span to all tokens originating within -/// the macro invocation. -/// -/// # Syntax -/// -/// A span expression of type [`Span`], followed by `=>`, followed by the tokens -/// to quote. The span expression should be brief -- use a variable for anything -/// more than a few characters. There should be no space before the `=>` token. -/// -/// [`Span`]: https://docs.rs/proc-macro2/0.4/proc_macro2/struct.Span.html -/// -/// ```edition2018 -/// # use proc_macro2::Span; -/// # use quote::quote_spanned; -/// # -/// # const IGNORE_TOKENS: &'static str = stringify! { -/// let span = /* ... */; -/// # }; -/// # let span = Span::call_site(); -/// # let init = 0; -/// -/// // On one line, use parentheses. -/// let tokens = quote_spanned!(span=> Box::into_raw(Box::new(#init))); -/// -/// // On multiple lines, place the span at the top and use braces. -/// let tokens = quote_spanned! {span=> -/// Box::into_raw(Box::new(#init)) -/// }; -/// ``` -/// -/// The lack of space before the `=>` should look jarring to Rust programmers -/// and this is intentional. The formatting is designed to be visibly -/// off-balance and draw the eye a particular way, due to the span expression -/// being evaluated in the context of the procedural macro and the remaining -/// tokens being evaluated in the generated code. -/// -/// # Hygiene -/// -/// Any interpolated tokens preserve the `Span` information provided by their -/// `ToTokens` implementation. Tokens that originate within the `quote_spanned!` -/// invocation are spanned with the given span argument. -/// -/// # Example -/// -/// The following procedural macro code uses `quote_spanned!` to assert that a -/// particular Rust type implements the [`Sync`] trait so that references can be -/// safely shared between threads. -/// -/// [`Sync`]: https://doc.rust-lang.org/std/marker/trait.Sync.html -/// -/// ```edition2018 -/// # use quote::{quote_spanned, TokenStreamExt, ToTokens}; -/// # use proc_macro2::{Span, TokenStream}; -/// # -/// # struct Type; -/// # -/// # impl Type { -/// # fn span(&self) -> Span { -/// # Span::call_site() -/// # } -/// # } -/// # -/// # impl ToTokens for Type { -/// # fn to_tokens(&self, _tokens: &mut TokenStream) {} -/// # } -/// # -/// # let ty = Type; -/// # let call_site = Span::call_site(); -/// # -/// let ty_span = ty.span(); -/// let assert_sync = quote_spanned! {ty_span=> -/// struct _AssertSync where #ty: Sync; -/// }; -/// ``` -/// -/// If the assertion fails, the user will see an error like the following. The -/// input span of their type is hightlighted in the error. -/// -/// ```text -/// error[E0277]: the trait bound `*const (): std::marker::Sync` is not satisfied -/// --> src/main.rs:10:21 -/// | -/// 10 | static ref PTR: *const () = &(); -/// | ^^^^^^^^^ `*const ()` cannot be shared between threads safely -/// ``` -/// -/// In this example it is important for the where-clause to be spanned with the -/// line/column information of the user's input type so that error messages are -/// placed appropriately by the compiler. But it is also incredibly important -/// that `Sync` resolves at the macro definition site and not the macro call -/// site. If we resolve `Sync` at the same span that the user's type is going to -/// be resolved, then they could bypass our check by defining their own trait -/// named `Sync` that is implemented for their type. -#[macro_export(local_inner_macros)] -macro_rules! quote_spanned { - ($span:expr=> $($tt:tt)*) => { - { - let mut _s = $crate::__rt::TokenStream::new(); - let _span = $span; - quote_each_token!(_s _span $($tt)*); - _s - } - }; -} - -// Extract the names of all #metavariables and pass them to the $finish macro. -// -// in: pounded_var_names!(then () a #b c #( #d )* #e) -// out: then!(() b d e) -#[macro_export(local_inner_macros)] -#[doc(hidden)] -macro_rules! pounded_var_names { - ($finish:ident ($($found:ident)*) # ( $($inner:tt)* ) $($rest:tt)*) => { - pounded_var_names!($finish ($($found)*) $($inner)* $($rest)*) - }; - - ($finish:ident ($($found:ident)*) # [ $($inner:tt)* ] $($rest:tt)*) => { - pounded_var_names!($finish ($($found)*) $($inner)* $($rest)*) - }; - - ($finish:ident ($($found:ident)*) # { $($inner:tt)* } $($rest:tt)*) => { - pounded_var_names!($finish ($($found)*) $($inner)* $($rest)*) - }; - - ($finish:ident ($($found:ident)*) # $first:ident $($rest:tt)*) => { - pounded_var_names!($finish ($($found)* $first) $($rest)*) - }; - - ($finish:ident ($($found:ident)*) ( $($inner:tt)* ) $($rest:tt)*) => { - pounded_var_names!($finish ($($found)*) $($inner)* $($rest)*) - }; - - ($finish:ident ($($found:ident)*) [ $($inner:tt)* ] $($rest:tt)*) => { - pounded_var_names!($finish ($($found)*) $($inner)* $($rest)*) - }; - - ($finish:ident ($($found:ident)*) { $($inner:tt)* } $($rest:tt)*) => { - pounded_var_names!($finish ($($found)*) $($inner)* $($rest)*) - }; - - ($finish:ident ($($found:ident)*) $ignore:tt $($rest:tt)*) => { - pounded_var_names!($finish ($($found)*) $($rest)*) - }; - - ($finish:ident ($($found:ident)*)) => { - $finish!(() $($found)*) - }; -} - -// in: nested_tuples_pat!(() a b c d e) -// out: ((((a b) c) d) e) -// -// in: nested_tuples_pat!(() a) -// out: a -#[macro_export(local_inner_macros)] -#[doc(hidden)] -macro_rules! nested_tuples_pat { - (()) => { - &() - }; - - (() $first:ident $($rest:ident)*) => { - nested_tuples_pat!(($first) $($rest)*) - }; - - (($pat:pat) $first:ident $($rest:ident)*) => { - nested_tuples_pat!((($pat, $first)) $($rest)*) - }; - - (($done:pat)) => { - $done - }; -} - -// in: multi_zip_expr!(() a b c d e) -// out: a.into_iter().zip(b).zip(c).zip(d).zip(e) -// -// in: multi_zip_iter!(() a) -// out: a -#[macro_export(local_inner_macros)] -#[doc(hidden)] -macro_rules! multi_zip_expr { - (()) => { - &[] - }; - - (() $single:ident) => { - $single - }; - - (() $first:ident $($rest:ident)*) => { - multi_zip_expr!(($first.into_iter()) $($rest)*) - }; - - (($zips:expr) $first:ident $($rest:ident)*) => { - multi_zip_expr!(($zips.zip($first)) $($rest)*) - }; - - (($done:expr)) => { - $done - }; -} - -#[macro_export(local_inner_macros)] -#[doc(hidden)] -macro_rules! quote_each_token { - ($tokens:ident $span:ident) => {}; - - ($tokens:ident $span:ident # ! $($rest:tt)*) => { - quote_each_token!($tokens $span #); - quote_each_token!($tokens $span !); - quote_each_token!($tokens $span $($rest)*); - }; - - ($tokens:ident $span:ident # ( $($inner:tt)* ) * $($rest:tt)*) => { - for pounded_var_names!(nested_tuples_pat () $($inner)*) - in pounded_var_names!(multi_zip_expr () $($inner)*) { - quote_each_token!($tokens $span $($inner)*); - } - quote_each_token!($tokens $span $($rest)*); - }; - - ($tokens:ident $span:ident # ( $($inner:tt)* ) $sep:tt * $($rest:tt)*) => { - for (_i, pounded_var_names!(nested_tuples_pat () $($inner)*)) - in pounded_var_names!(multi_zip_expr () $($inner)*).into_iter().enumerate() { - if _i > 0 { - quote_each_token!($tokens $span $sep); - } - quote_each_token!($tokens $span $($inner)*); - } - quote_each_token!($tokens $span $($rest)*); - }; - - ($tokens:ident $span:ident # [ $($inner:tt)* ] $($rest:tt)*) => { - quote_each_token!($tokens $span #); - $tokens.extend({ - let mut g = $crate::__rt::Group::new( - $crate::__rt::Delimiter::Bracket, - quote_spanned!($span=> $($inner)*), - ); - g.set_span($span); - Some($crate::__rt::TokenTree::from(g)) - }); - quote_each_token!($tokens $span $($rest)*); - }; - - ($tokens:ident $span:ident # $first:ident $($rest:tt)*) => { - $crate::ToTokens::to_tokens(&$first, &mut $tokens); - quote_each_token!($tokens $span $($rest)*); - }; - - ($tokens:ident $span:ident ( $($first:tt)* ) $($rest:tt)*) => { - $tokens.extend({ - let mut g = $crate::__rt::Group::new( - $crate::__rt::Delimiter::Parenthesis, - quote_spanned!($span=> $($first)*), - ); - g.set_span($span); - Some($crate::__rt::TokenTree::from(g)) - }); - quote_each_token!($tokens $span $($rest)*); - }; - - ($tokens:ident $span:ident [ $($first:tt)* ] $($rest:tt)*) => { - $tokens.extend({ - let mut g = $crate::__rt::Group::new( - $crate::__rt::Delimiter::Bracket, - quote_spanned!($span=> $($first)*), - ); - g.set_span($span); - Some($crate::__rt::TokenTree::from(g)) - }); - quote_each_token!($tokens $span $($rest)*); - }; - - ($tokens:ident $span:ident { $($first:tt)* } $($rest:tt)*) => { - $tokens.extend({ - let mut g = $crate::__rt::Group::new( - $crate::__rt::Delimiter::Brace, - quote_spanned!($span=> $($first)*), - ); - g.set_span($span); - Some($crate::__rt::TokenTree::from(g)) - }); - quote_each_token!($tokens $span $($rest)*); - }; - - ($tokens:ident $span:ident + $($rest:tt)*) => { - $crate::__rt::push_add(&mut $tokens, $span); - quote_each_token!($tokens $span $($rest)*); - }; - - ($tokens:ident $span:ident += $($rest:tt)*) => { - $crate::__rt::push_add_eq(&mut $tokens, $span); - quote_each_token!($tokens $span $($rest)*); - }; - - ($tokens:ident $span:ident & $($rest:tt)*) => { - $crate::__rt::push_and(&mut $tokens, $span); - quote_each_token!($tokens $span $($rest)*); - }; - - ($tokens:ident $span:ident && $($rest:tt)*) => { - $crate::__rt::push_and_and(&mut $tokens, $span); - quote_each_token!($tokens $span $($rest)*); - }; - - ($tokens:ident $span:ident &= $($rest:tt)*) => { - $crate::__rt::push_and_eq(&mut $tokens, $span); - quote_each_token!($tokens $span $($rest)*); - }; - - ($tokens:ident $span:ident @ $($rest:tt)*) => { - $crate::__rt::push_at(&mut $tokens, $span); - quote_each_token!($tokens $span $($rest)*); - }; - - ($tokens:ident $span:ident ! $($rest:tt)*) => { - $crate::__rt::push_bang(&mut $tokens, $span); - quote_each_token!($tokens $span $($rest)*); - }; - - ($tokens:ident $span:ident ^ $($rest:tt)*) => { - $crate::__rt::push_caret(&mut $tokens, $span); - quote_each_token!($tokens $span $($rest)*); - }; - - ($tokens:ident $span:ident ^= $($rest:tt)*) => { - $crate::__rt::push_caret_eq(&mut $tokens, $span); - quote_each_token!($tokens $span $($rest)*); - }; - - ($tokens:ident $span:ident : $($rest:tt)*) => { - $crate::__rt::push_colon(&mut $tokens, $span); - quote_each_token!($tokens $span $($rest)*); - }; - - ($tokens:ident $span:ident :: $($rest:tt)*) => { - $crate::__rt::push_colon2(&mut $tokens, $span); - quote_each_token!($tokens $span $($rest)*); - }; - - ($tokens:ident $span:ident , $($rest:tt)*) => { - $crate::__rt::push_comma(&mut $tokens, $span); - quote_each_token!($tokens $span $($rest)*); - }; - - ($tokens:ident $span:ident / $($rest:tt)*) => { - $crate::__rt::push_div(&mut $tokens, $span); - quote_each_token!($tokens $span $($rest)*); - }; - - ($tokens:ident $span:ident /= $($rest:tt)*) => { - $crate::__rt::push_div_eq(&mut $tokens, $span); - quote_each_token!($tokens $span $($rest)*); - }; - - ($tokens:ident $span:ident . $($rest:tt)*) => { - $crate::__rt::push_dot(&mut $tokens, $span); - quote_each_token!($tokens $span $($rest)*); - }; - - ($tokens:ident $span:ident .. $($rest:tt)*) => { - $crate::__rt::push_dot2(&mut $tokens, $span); - quote_each_token!($tokens $span $($rest)*); - }; - - ($tokens:ident $span:ident ... $($rest:tt)*) => { - $crate::__rt::push_dot3(&mut $tokens, $span); - quote_each_token!($tokens $span $($rest)*); - }; - - ($tokens:ident $span:ident ..= $($rest:tt)*) => { - $crate::__rt::push_dot_dot_eq(&mut $tokens, $span); - quote_each_token!($tokens $span $($rest)*); - }; - - ($tokens:ident $span:ident = $($rest:tt)*) => { - $crate::__rt::push_eq(&mut $tokens, $span); - quote_each_token!($tokens $span $($rest)*); - }; - - ($tokens:ident $span:ident == $($rest:tt)*) => { - $crate::__rt::push_eq_eq(&mut $tokens, $span); - quote_each_token!($tokens $span $($rest)*); - }; - - ($tokens:ident $span:ident >= $($rest:tt)*) => { - $crate::__rt::push_ge(&mut $tokens, $span); - quote_each_token!($tokens $span $($rest)*); - }; - - ($tokens:ident $span:ident > $($rest:tt)*) => { - $crate::__rt::push_gt(&mut $tokens, $span); - quote_each_token!($tokens $span $($rest)*); - }; - - ($tokens:ident $span:ident <= $($rest:tt)*) => { - $crate::__rt::push_le(&mut $tokens, $span); - quote_each_token!($tokens $span $($rest)*); - }; - - ($tokens:ident $span:ident < $($rest:tt)*) => { - $crate::__rt::push_lt(&mut $tokens, $span); - quote_each_token!($tokens $span $($rest)*); - }; - - ($tokens:ident $span:ident *= $($rest:tt)*) => { - $crate::__rt::push_mul_eq(&mut $tokens, $span); - quote_each_token!($tokens $span $($rest)*); - }; - - ($tokens:ident $span:ident != $($rest:tt)*) => { - $crate::__rt::push_ne(&mut $tokens, $span); - quote_each_token!($tokens $span $($rest)*); - }; - - ($tokens:ident $span:ident | $($rest:tt)*) => { - $crate::__rt::push_or(&mut $tokens, $span); - quote_each_token!($tokens $span $($rest)*); - }; - - ($tokens:ident $span:ident |= $($rest:tt)*) => { - $crate::__rt::push_or_eq(&mut $tokens, $span); - quote_each_token!($tokens $span $($rest)*); - }; - - ($tokens:ident $span:ident || $($rest:tt)*) => { - $crate::__rt::push_or_or(&mut $tokens, $span); - quote_each_token!($tokens $span $($rest)*); - }; - - ($tokens:ident $span:ident # $($rest:tt)*) => { - $crate::__rt::push_pound(&mut $tokens, $span); - quote_each_token!($tokens $span $($rest)*); - }; - - ($tokens:ident $span:ident ? $($rest:tt)*) => { - $crate::__rt::push_question(&mut $tokens, $span); - quote_each_token!($tokens $span $($rest)*); - }; - - ($tokens:ident $span:ident -> $($rest:tt)*) => { - $crate::__rt::push_rarrow(&mut $tokens, $span); - quote_each_token!($tokens $span $($rest)*); - }; - - ($tokens:ident $span:ident <- $($rest:tt)*) => { - $crate::__rt::push_larrow(&mut $tokens, $span); - quote_each_token!($tokens $span $($rest)*); - }; - - ($tokens:ident $span:ident % $($rest:tt)*) => { - $crate::__rt::push_rem(&mut $tokens, $span); - quote_each_token!($tokens $span $($rest)*); - }; - - ($tokens:ident $span:ident %= $($rest:tt)*) => { - $crate::__rt::push_rem_eq(&mut $tokens, $span); - quote_each_token!($tokens $span $($rest)*); - }; - - ($tokens:ident $span:ident => $($rest:tt)*) => { - $crate::__rt::push_fat_arrow(&mut $tokens, $span); - quote_each_token!($tokens $span $($rest)*); - }; - - ($tokens:ident $span:ident ; $($rest:tt)*) => { - $crate::__rt::push_semi(&mut $tokens, $span); - quote_each_token!($tokens $span $($rest)*); - }; - - ($tokens:ident $span:ident << $($rest:tt)*) => { - $crate::__rt::push_shl(&mut $tokens, $span); - quote_each_token!($tokens $span $($rest)*); - }; - - ($tokens:ident $span:ident <<= $($rest:tt)*) => { - $crate::__rt::push_shl_eq(&mut $tokens, $span); - quote_each_token!($tokens $span $($rest)*); - }; - - ($tokens:ident $span:ident >> $($rest:tt)*) => { - $crate::__rt::push_shr(&mut $tokens, $span); - quote_each_token!($tokens $span $($rest)*); - }; - - ($tokens:ident $span:ident >>= $($rest:tt)*) => { - $crate::__rt::push_shr_eq(&mut $tokens, $span); - quote_each_token!($tokens $span $($rest)*); - }; - - ($tokens:ident $span:ident * $($rest:tt)*) => { - $crate::__rt::push_star(&mut $tokens, $span); - quote_each_token!($tokens $span $($rest)*); - }; - - ($tokens:ident $span:ident - $($rest:tt)*) => { - $crate::__rt::push_sub(&mut $tokens, $span); - quote_each_token!($tokens $span $($rest)*); - }; - - ($tokens:ident $span:ident -= $($rest:tt)*) => { - $crate::__rt::push_sub_eq(&mut $tokens, $span); - quote_each_token!($tokens $span $($rest)*); - }; - - ($tokens:ident $span:ident $first:tt $($rest:tt)*) => { - $crate::__rt::parse(&mut $tokens, $span, quote_stringify!($first)); - quote_each_token!($tokens $span $($rest)*); - }; -} - -// Unhygienically invoke whatever `stringify` the caller has in scope i.e. not a -// local macro. The macros marked `local_inner_macros` above cannot invoke -// `stringify` directly. -#[macro_export] -#[doc(hidden)] -macro_rules! quote_stringify { - ($tt:tt) => { - stringify!($tt) - }; -} diff --git a/third_party/rust/quote-0.6.11/src/to_tokens.rs b/third_party/rust/quote-0.6.11/src/to_tokens.rs deleted file mode 100644 index d80fb4b67d..0000000000 --- a/third_party/rust/quote-0.6.11/src/to_tokens.rs +++ /dev/null @@ -1,198 +0,0 @@ -use super::TokenStreamExt; - -use std::borrow::Cow; -use std::iter; - -use proc_macro2::{Group, Ident, Literal, Punct, Span, TokenStream, TokenTree}; - -/// Types that can be interpolated inside a [`quote!`] invocation. -/// -/// [`quote!`]: macro.quote.html -pub trait ToTokens { - /// Write `self` to the given `TokenStream`. - /// - /// The token append methods provided by the [`TokenStreamExt`] extension - /// trait may be useful for implementing `ToTokens`. - /// - /// [`TokenStreamExt`]: trait.TokenStreamExt.html - /// - /// # Example - /// - /// Example implementation for a struct representing Rust paths like - /// `std::cmp::PartialEq`: - /// - /// ```edition2018 - /// use proc_macro2::{TokenTree, Spacing, Span, Punct, TokenStream}; - /// use quote::{TokenStreamExt, ToTokens}; - /// - /// pub struct Path { - /// pub global: bool, - /// pub segments: Vec, - /// } - /// - /// impl ToTokens for Path { - /// fn to_tokens(&self, tokens: &mut TokenStream) { - /// for (i, segment) in self.segments.iter().enumerate() { - /// if i > 0 || self.global { - /// // Double colon `::` - /// tokens.append(Punct::new(':', Spacing::Joint)); - /// tokens.append(Punct::new(':', Spacing::Alone)); - /// } - /// segment.to_tokens(tokens); - /// } - /// } - /// } - /// # - /// # pub struct PathSegment; - /// # - /// # impl ToTokens for PathSegment { - /// # fn to_tokens(&self, tokens: &mut TokenStream) { - /// # unimplemented!() - /// # } - /// # } - /// ``` - fn to_tokens(&self, tokens: &mut TokenStream); - - /// Convert `self` directly into a `TokenStream` object. - /// - /// This method is implicitly implemented using `to_tokens`, and acts as a - /// convenience method for consumers of the `ToTokens` trait. - fn into_token_stream(self) -> TokenStream - where - Self: Sized, - { - let mut tokens = TokenStream::new(); - self.to_tokens(&mut tokens); - tokens - } -} - -impl<'a, T: ?Sized + ToTokens> ToTokens for &'a T { - fn to_tokens(&self, tokens: &mut TokenStream) { - (**self).to_tokens(tokens); - } -} - -impl<'a, T: ?Sized + ToTokens> ToTokens for &'a mut T { - fn to_tokens(&self, tokens: &mut TokenStream) { - (**self).to_tokens(tokens); - } -} - -impl<'a, T: ?Sized + ToOwned + ToTokens> ToTokens for Cow<'a, T> { - fn to_tokens(&self, tokens: &mut TokenStream) { - (**self).to_tokens(tokens); - } -} - -impl ToTokens for Box { - fn to_tokens(&self, tokens: &mut TokenStream) { - (**self).to_tokens(tokens); - } -} - -impl ToTokens for Option { - fn to_tokens(&self, tokens: &mut TokenStream) { - if let Some(ref t) = *self { - t.to_tokens(tokens); - } - } -} - -impl ToTokens for str { - fn to_tokens(&self, tokens: &mut TokenStream) { - tokens.append(Literal::string(self)); - } -} - -impl ToTokens for String { - fn to_tokens(&self, tokens: &mut TokenStream) { - self.as_str().to_tokens(tokens); - } -} - -macro_rules! primitive { - ($($t:ident => $name:ident)*) => ($( - impl ToTokens for $t { - fn to_tokens(&self, tokens: &mut TokenStream) { - tokens.append(Literal::$name(*self)); - } - } - )*) -} - -primitive! { - i8 => i8_suffixed - i16 => i16_suffixed - i32 => i32_suffixed - i64 => i64_suffixed - isize => isize_suffixed - - u8 => u8_suffixed - u16 => u16_suffixed - u32 => u32_suffixed - u64 => u64_suffixed - usize => usize_suffixed - - f32 => f32_suffixed - f64 => f64_suffixed -} - -#[cfg(integer128)] -primitive! { - i128 => i128_suffixed - u128 => u128_suffixed -} - -impl ToTokens for char { - fn to_tokens(&self, tokens: &mut TokenStream) { - tokens.append(Literal::character(*self)); - } -} - -impl ToTokens for bool { - fn to_tokens(&self, tokens: &mut TokenStream) { - let word = if *self { "true" } else { "false" }; - tokens.append(Ident::new(word, Span::call_site())); - } -} - -impl ToTokens for Group { - fn to_tokens(&self, tokens: &mut TokenStream) { - tokens.append(self.clone()); - } -} - -impl ToTokens for Ident { - fn to_tokens(&self, tokens: &mut TokenStream) { - tokens.append(self.clone()); - } -} - -impl ToTokens for Punct { - fn to_tokens(&self, tokens: &mut TokenStream) { - tokens.append(self.clone()); - } -} - -impl ToTokens for Literal { - fn to_tokens(&self, tokens: &mut TokenStream) { - tokens.append(self.clone()); - } -} - -impl ToTokens for TokenTree { - fn to_tokens(&self, dst: &mut TokenStream) { - dst.append(self.clone()); - } -} - -impl ToTokens for TokenStream { - fn to_tokens(&self, dst: &mut TokenStream) { - dst.extend(iter::once(self.clone())); - } - - fn into_token_stream(self) -> TokenStream { - self - } -} diff --git a/third_party/rust/quote-0.6.11/tests/conditional/integer128.rs b/third_party/rust/quote-0.6.11/tests/conditional/integer128.rs deleted file mode 100644 index 61e2274468..0000000000 --- a/third_party/rust/quote-0.6.11/tests/conditional/integer128.rs +++ /dev/null @@ -1,11 +0,0 @@ -#[test] -fn test_integer128() { - let ii128 = -1i128; - let uu128 = 1u128; - - let tokens = quote! { - #ii128 #uu128 - }; - let expected = "-1i128 1u128"; - assert_eq!(expected, tokens.to_string()); -} diff --git a/third_party/rust/quote-0.6.11/tests/test.rs b/third_party/rust/quote-0.6.11/tests/test.rs deleted file mode 100644 index f832da596b..0000000000 --- a/third_party/rust/quote-0.6.11/tests/test.rs +++ /dev/null @@ -1,295 +0,0 @@ -#![cfg_attr(feature = "cargo-clippy", allow(blacklisted_name))] - -use std::borrow::Cow; - -extern crate proc_macro2; -#[macro_use] -extern crate quote; - -use proc_macro2::{Ident, Span, TokenStream}; -use quote::TokenStreamExt; - -mod conditional { - #[cfg(integer128)] - mod integer128; -} - -struct X; - -impl quote::ToTokens for X { - fn to_tokens(&self, tokens: &mut TokenStream) { - tokens.append(Ident::new("X", Span::call_site())); - } -} - -#[test] -fn test_quote_impl() { - let tokens = quote! { - impl<'a, T: ToTokens> ToTokens for &'a T { - fn to_tokens(&self, tokens: &mut TokenStream) { - (**self).to_tokens(tokens) - } - } - }; - - let expected = concat!( - "impl < 'a , T : ToTokens > ToTokens for & 'a T { ", - "fn to_tokens ( & self , tokens : & mut TokenStream ) { ", - "( * * self ) . to_tokens ( tokens ) ", - "} ", - "}" - ); - - assert_eq!(expected, tokens.to_string()); -} - -#[test] -fn test_substitution() { - let x = X; - let tokens = quote!(#x <#x> (#x) [#x] {#x}); - - let expected = "X < X > ( X ) [ X ] { X }"; - - assert_eq!(expected, tokens.to_string()); -} - -#[test] -fn test_iter() { - let primes = &[X, X, X, X]; - - assert_eq!("X X X X", quote!(#(#primes)*).to_string()); - - assert_eq!("X , X , X , X ,", quote!(#(#primes,)*).to_string()); - - assert_eq!("X , X , X , X", quote!(#(#primes),*).to_string()); -} - -#[test] -fn test_advanced() { - let generics = quote!( <'a, T> ); - - let where_clause = quote!( where T: Serialize ); - - let field_ty = quote!(String); - - let item_ty = quote!(Cow<'a, str>); - - let path = quote!(SomeTrait::serialize_with); - - let value = quote!(self.x); - - let tokens = quote! { - struct SerializeWith #generics #where_clause { - value: &'a #field_ty, - phantom: ::std::marker::PhantomData<#item_ty>, - } - - impl #generics ::serde::Serialize for SerializeWith #generics #where_clause { - fn serialize(&self, s: &mut S) -> Result<(), S::Error> - where S: ::serde::Serializer - { - #path(self.value, s) - } - } - - SerializeWith { - value: #value, - phantom: ::std::marker::PhantomData::<#item_ty>, - } - }; - - let expected = concat!( - "struct SerializeWith < 'a , T > where T : Serialize { ", - "value : & 'a String , ", - "phantom : :: std :: marker :: PhantomData < Cow < 'a , str > > , ", - "} ", - "impl < 'a , T > :: serde :: Serialize for SerializeWith < 'a , T > where T : Serialize { ", - "fn serialize < S > ( & self , s : & mut S ) -> Result < ( ) , S :: Error > ", - "where S : :: serde :: Serializer ", - "{ ", - "SomeTrait :: serialize_with ( self . value , s ) ", - "} ", - "} ", - "SerializeWith { ", - "value : self . x , ", - "phantom : :: std :: marker :: PhantomData :: < Cow < 'a , str > > , ", - "}" - ); - - assert_eq!(expected, tokens.to_string()); -} - -#[test] -fn test_integer() { - let ii8 = -1i8; - let ii16 = -1i16; - let ii32 = -1i32; - let ii64 = -1i64; - let iisize = -1isize; - let uu8 = 1u8; - let uu16 = 1u16; - let uu32 = 1u32; - let uu64 = 1u64; - let uusize = 1usize; - - let tokens = quote! { - #ii8 #ii16 #ii32 #ii64 #iisize - #uu8 #uu16 #uu32 #uu64 #uusize - }; - let expected = "-1i8 -1i16 -1i32 -1i64 -1isize 1u8 1u16 1u32 1u64 1usize"; - assert_eq!(expected, tokens.to_string()); -} - -#[test] -fn test_floating() { - let e32 = 2.345f32; - - let e64 = 2.345f64; - - let tokens = quote! { - #e32 - #e64 - }; - let expected = concat!("2.345f32 2.345f64"); - assert_eq!(expected, tokens.to_string()); -} - -#[test] -fn test_char() { - let zero = '\0'; - let pound = '#'; - let quote = '"'; - let apost = '\''; - let newline = '\n'; - let heart = '\u{2764}'; - - let tokens = quote! { - #zero #pound #quote #apost #newline #heart - }; - let expected = "'\\u{0}' '#' '\\\"' '\\'' '\\n' '\\u{2764}'"; - assert_eq!(expected, tokens.to_string()); -} - -#[test] -fn test_str() { - let s = "\0 a 'b \" c"; - let tokens = quote!(#s); - let expected = "\"\\u{0} a \\'b \\\" c\""; - assert_eq!(expected, tokens.to_string()); -} - -#[test] -fn test_string() { - let s = "\0 a 'b \" c".to_string(); - let tokens = quote!(#s); - let expected = "\"\\u{0} a \\'b \\\" c\""; - assert_eq!(expected, tokens.to_string()); -} - -#[test] -fn test_ident() { - let foo = Ident::new("Foo", Span::call_site()); - let bar = Ident::new(&format!("Bar{}", 7), Span::call_site()); - let tokens = quote!(struct #foo; enum #bar {}); - let expected = "struct Foo ; enum Bar7 { }"; - assert_eq!(expected, tokens.to_string()); -} - -#[test] -fn test_duplicate() { - let ch = 'x'; - - let tokens = quote!(#ch #ch); - - let expected = "'x' 'x'"; - assert_eq!(expected, tokens.to_string()); -} - -#[test] -fn test_fancy_repetition() { - let foo = vec!["a", "b"]; - let bar = vec![true, false]; - - let tokens = quote! { - #(#foo: #bar),* - }; - - let expected = r#""a" : true , "b" : false"#; - assert_eq!(expected, tokens.to_string()); -} - -#[test] -fn test_nested_fancy_repetition() { - let nested = vec![vec!['a', 'b', 'c'], vec!['x', 'y', 'z']]; - - let tokens = quote! { - #( - #(#nested)* - ),* - }; - - let expected = "'a' 'b' 'c' , 'x' 'y' 'z'"; - assert_eq!(expected, tokens.to_string()); -} - -#[test] -fn test_empty_repetition() { - let tokens = quote!(#(a b)* #(c d),*); - assert_eq!("", tokens.to_string()); -} - -#[test] -fn test_variable_name_conflict() { - // The implementation of `#(...),*` uses the variable `_i` but it should be - // fine, if a little confusing when debugging. - let _i = vec!['a', 'b']; - let tokens = quote! { #(#_i),* }; - let expected = "'a' , 'b'"; - assert_eq!(expected, tokens.to_string()); -} - -#[test] -fn test_empty_quote() { - let tokens = quote!(); - assert_eq!("", tokens.to_string()); -} - -#[test] -fn test_box_str() { - let b = "str".to_owned().into_boxed_str(); - let tokens = quote! { #b }; - assert_eq!("\"str\"", tokens.to_string()); -} - -#[test] -fn test_cow() { - let owned: Cow = Cow::Owned(Ident::new("owned", Span::call_site())); - - let ident = Ident::new("borrowed", Span::call_site()); - let borrowed = Cow::Borrowed(&ident); - - let tokens = quote! { #owned #borrowed }; - assert_eq!("owned borrowed", tokens.to_string()); -} - -#[test] -fn test_closure() { - fn field_i(i: usize) -> Ident { - Ident::new(&format!("__field{}", i), Span::call_site()) - } - - let fields = (0usize..3) - .map(field_i as fn(_) -> _) - .map(|var| quote! { #var }); - - let tokens = quote! { #(#fields)* }; - assert_eq!("__field0 __field1 __field2", tokens.to_string()); -} - -#[test] -fn test_append_tokens() { - let mut a = quote!(a); - let b = quote!(b); - a.append_all(b); - assert_eq!("a b", a.to_string()); -} diff --git a/third_party/rust/regex/.cargo-checksum.json b/third_party/rust/regex/.cargo-checksum.json index 9af85b9c1f..4e3807bd2f 100644 --- a/third_party/rust/regex/.cargo-checksum.json +++ b/third_party/rust/regex/.cargo-checksum.json @@ -1 +1 @@ -{"files":{"CHANGELOG.md":"ab6b40e79e35a9d717bdd08fd13f2b1785c50706dd4f9ab0fb2ac288cf463957","Cargo.lock":"915378cef5b425ee0ac365212a7d060a5ad5b64b4eaeba235bc6532dacf0546c","Cargo.toml":"7a79c1d1cf985db0a9cfecdca7092ec27ad1d40c7d95824e6d0d8eb853bd650e","HACKING.md":"17818f7a17723608f6bdbe6388ad0a913d4f96f76a16649aaf4e274b1fa0ea97","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"6485b8ed310d3f0340bf1ad1f47645069ce4069dcc6bb46c7d5c6faf41de1fdb","PERFORMANCE.md":"c776b18758b6dc8f2f9d37e9a95261f75c5f744925e8ddf216b83953bf7467b7","README.md":"105087e75c7d1ef789508e2d28d0e38f9591449c576870eca96c852a46b8c1fe","UNICODE.md":"27be97e94d0819ae18ae71029559c3de21c8ffd343d4a791959d0fc22b06dfe6","examples/regexdna-input.txt":"156a49710bb3e1ed4bc2bbb0af0f383b747b3d0281453cfff39c296124c598f8","examples/regexdna-output.txt":"35e85b19b70a893d752fd43e54e1e9da08bac43559191cea85b33387c24c4cc1","examples/shootout-regex-dna-bytes.rs":"e4f0d94e6d9cd4c7f6e340ed51b163f3331ce616dba4539d09f40919548d81d3","examples/shootout-regex-dna-cheat.rs":"bd5f4832c9a2e6f175d79064b2c4ee28dcae4b971f9618586664f99963bd0f8e","examples/shootout-regex-dna-replace.rs":"056e9ee51327a1cf9bd1da007e8c0af9c80a3e7fc2b3a3f63d0cf1b6b244ac74","examples/shootout-regex-dna-single-cheat.rs":"0beeb210972597db3a5c41901c78d78635681a833eba059064b28902e41d6f86","examples/shootout-regex-dna-single.rs":"a5d1ad8ebcfe9dd58bddc0d1ad838112cbce780f8f93c46892a70a1e25fd7c7f","examples/shootout-regex-dna.rs":"7724deec3d94cc5b55077380f7b9664edb5a49eaaa4cc7ba8356579c2ade7129","rustfmt.toml":"1ca600239a27401c4a43f363cf3f38183a212affc1f31bff3ae93234bbaec228","src/backtrack.rs":"bb24a32bc0d2f2f1c94439d8d0879ad8d15c910d4b9ab86c2833d93945cb088c","src/cache.rs":"94f1d20992aef5a8038dedfe33aaa68a1f8fdae74c397aeab0e80b82ac93a4a8","src/compile.rs":"05c74b053277ea7f3bed707db00593d0931c753de2239a38da8c1e3accaacc3d","src/dfa.rs":"69cd7ec8e766f4c7f056a4525aa28d102068f68d03da1d0e6aa98d40fe5c7d44","src/error.rs":"5ea92ea5081fccec2d8c5d6c4e92e583c1a683e10d2ad9243a8d1080b207b3f7","src/exec.rs":"2c02f760c2f08984ce046fb806de0ea09ae2c07d12274be1c17e5e81694c6969","src/expand.rs":"f34190d84ea59b7aee07ad2e3c9b09a3fdeef29b5e99443223397402cc68a6b4","src/find_byte.rs":"b387247b77e3269f057c3399aefe5a815032c3af918c876f80eb4b282e4eb95e","src/freqs.rs":"255555f3d95b08a5bb3bc2f38d5a06cc100a39c0f0127fe4f50c33afa1cadc65","src/input.rs":"0bb20717cb630803f78b921260c96984a38a57685f07d9aee16b12deaa1d233b","src/lib.rs":"58290928047bdd668b335cee0fe2bd1ae6213192cedc7f4cd1d07bda850c92e7","src/literal/imp.rs":"f4efeb980b3ca9760bbbf77a6d1936b98ec75ff5a2a6ae31d8255d963b0c7e7d","src/literal/mod.rs":"46739643baa0cd5d1b8fc4521df3d727815c61e583af2ffca8b7b63bad3fa50e","src/pattern.rs":"e6124b403c18344675aa341faf9ae2f592193ef89f1c4a5e1cee135b8b34dd21","src/pikevm.rs":"1de81a5e785167d63a0a53e339c85b31ed323ac88d8ef117d57b03082b83175f","src/prog.rs":"61c5ee6e4b1de58b268339398e7b6de364546dbc672fd538a86796ea5247b689","src/re_builder.rs":"3ee35fa798c824be941c03e40e5dbeef47c96c2d19aeac2e2f7ca59727ff213b","src/re_bytes.rs":"5baad135091bd5dd4c66e435266ffd18d64eaf3c954e92b435cbc8f498c75503","src/re_set.rs":"b8abdd3bab254ad53a74d114feec8f7a728ef03c3b213b02efd863649e1fb92a","src/re_trait.rs":"ad63982bf34cde29038be8c9113e0681f49df455eac3de36e2d3a8a974d59048","src/re_unicode.rs":"790a9a59c78ffc59626d160a8cd085cb97aedf89e0cb9dc6e9a365d1f1330561","src/sparse.rs":"55db5a233eb266a1d4ffc611529d90e99369282854e9153ed39d7b650573295f","src/testdata/LICENSE":"58cf078acc03da3e280a938c2bd9943f554fc9b6ced89ad93ba35ca436872899","src/testdata/README":"45f869e37f798905c773bfbe0ef19a5fb7e585cbf0b7c21b5b5a784e8cec3c14","src/testdata/basic.dat":"b5b33aa89d48a61cd67cb1fbfd8f70e62c83e30b86256f9f915a5190dd38ff06","src/testdata/nullsubexpr.dat":"496ac0278eec3b6d9170faace14554569032dd3d909618364d9326156de39ecf","src/testdata/repetition.dat":"1f7959063015b284b18a4a2c1c8b416d438a2d6c4b1a362da43406b865f50e69","src/utf8.rs":"708615a4859110cc9766b342a9c1da6c5c4a8a04ad239046b2725385db977efe","test":"bd7ca64a788d1f622ae10c5ac77160b10fc5f0d6adb7f3ba1828637f0ca68c81","tests/api.rs":"be89c1367591b6c2657437c452e3d4a100a9d2b37e07a8de19011f405263f4ed","tests/api_str.rs":"2ae38c04e7e8fac008b609a820d0b1561ba75f39b0edc0987d6d3d06132da77f","tests/bytes.rs":"db578e566aab2e2783f6d9cf425a58496693c06f58206ce0961b55f1617c929e","tests/consistent.rs":"b857808f44642e050850349393e97b4c84f8a2d3084157457af8d0e109bd029d","tests/crates_regex.rs":"91a59d470e0700b4bcb3ff735d06799f3107b8ef4875a2e9904607b164be0326","tests/crazy.rs":"ef13b7d5c88ef18375d201df8b1ec2161eaa8e0efa470b5cc36995303645728f","tests/flags.rs":"05caace2c81a99d2168037f3a38035d4dffe9f85ef3ebd7ef18b1bc6612f1ea8","tests/fowler.rs":"2ef56016d61e4631a53d8ed562a70e914583ef3f65836fc5451010345e71b703","tests/macros.rs":"f021b62e3ce8d122f6450a5aab36972eccf4c21c62a53c604924d4d01d90c0d8","tests/macros_bytes.rs":"5574c19bff5dfa45802238b65f642605f1a8b8adc6a353660a251c80490ad68e","tests/macros_str.rs":"518bde310ceb9fb689b20294279483454601ba3a3fd7c95a04d07331f05d476d","tests/misc.rs":"395f52793fa022e4cdda78675b6a6fba1a3106b4b99c834c39f7801574054bd1","tests/multiline.rs":"1b1a3326ed976437c1357f01d81833ece7ea244f38826246eab55cacd5d0862a","tests/noparse.rs":"8850d31cb95e413e3a67edecce8590cd158f49779abcc2e5722381113346179c","tests/regression.rs":"a0f48b457c606eb572816730e41f473bbe1bfcd699bd4fdac1c98fa6477ee522","tests/replace.rs":"fb3dd97c2877b71973065a74916cdef40d65a19a92c1ecfc117edfcd9eab3ced","tests/searcher.rs":"ce35e47b0a276a7e8c9060c6a0b225ffba163aebc61fbc15555a6897fa0e552c","tests/set.rs":"cc1c5431d81b43eb7d89bf786d32657c32d7c73098c1df7dc998416c81b84591","tests/shortest_match.rs":"a2c94390c0d61bc24796b4c1288c924e90c8c9c6156fdebb858175177a194a42","tests/suffix_reverse.rs":"b95f89397404871227d9efe6df23b9ded147f183db81597e608f693955c668b5","tests/test_backtrack.rs":"1e286679d643887f7b78ef5a74f5392ff27fa462b99891ce31938db1eacb3842","tests/test_backtrack_bytes.rs":"f66f2ed407795e92ae66eaa3b4b946acfe00d9ea422944c86353dd28812df5bb","tests/test_backtrack_utf8bytes.rs":"9d8c236fbdfa5092230f19ca4d2a7d647f68811d9ffa0813899d924e11d8fc0b","tests/test_crates_regex.rs":"b385d7ca10ed308b8d57074adb9525e18d3faadb5aa883e831dfa4f6669ba58d","tests/test_default.rs":"ea1049e7bc9e40fb0da31a584aeb7f0e2b5dfd61ad10fb58184de2bd44b542cd","tests/test_default_bytes.rs":"fb3310fc315f7282358dcd98d156c31d66ca2432199afe012cf8921f459167a2","tests/test_nfa.rs":"97266c5edb15e5cde1496049a3ad0be4de33352dc32492f086d5351411ebe1ab","tests/test_nfa_bytes.rs":"9aa510e9583f847467c26ce902bd7e1c66cdd610cd50da4a1c1379afa35d4a53","tests/test_nfa_utf8bytes.rs":"2c22e16fa75edb5dbd278f2b8619404404a95b0792b31c440c9751c4aa2f7086","tests/unicode.rs":"59616c713ebd7d9db4c912fd9b7159042ccf4d51c183d8d73e09d2aaea209ab9","tests/word_boundary.rs":"7081317ddcec1e82dd4a2090a571c6abf2ff4bbfa8cd10395e1eb3f386157fae","tests/word_boundary_ascii.rs":"cd0be5b5b485de0ba7994b42e2864585556c3d2d8bf5eab05b58931d9aaf4b87","tests/word_boundary_unicode.rs":"75dbcc35d3abc0f9795c2ea99e216dc227b0a5b58e9ca5eef767815ff0513921"},"package":"dc220bd33bdce8f093101afe22a037b8eb0e5af33592e6a9caafff0d4cb81cbd"} \ No newline at end of file +{"files":{"CHANGELOG.md":"3d2c376a75a686c9dc6b50dcb2334008bc3228c3f75f01368d4cc8f16ff1f284","Cargo.lock":"82c252eb0303e1ed3e2f095e3f43adc8cc22113edd1ae609f48d3eedc0a1aafa","Cargo.toml":"a661a3a159a0838f517a26bb0ba82aa80e61e50acbfc458890d14a9dcc2afa61","HACKING.md":"17818f7a17723608f6bdbe6388ad0a913d4f96f76a16649aaf4e274b1fa0ea97","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"6485b8ed310d3f0340bf1ad1f47645069ce4069dcc6bb46c7d5c6faf41de1fdb","PERFORMANCE.md":"c776b18758b6dc8f2f9d37e9a95261f75c5f744925e8ddf216b83953bf7467b7","README.md":"105087e75c7d1ef789508e2d28d0e38f9591449c576870eca96c852a46b8c1fe","UNICODE.md":"27be97e94d0819ae18ae71029559c3de21c8ffd343d4a791959d0fc22b06dfe6","examples/regexdna-input.txt":"156a49710bb3e1ed4bc2bbb0af0f383b747b3d0281453cfff39c296124c598f8","examples/regexdna-output.txt":"35e85b19b70a893d752fd43e54e1e9da08bac43559191cea85b33387c24c4cc1","examples/shootout-regex-dna-bytes.rs":"e4f0d94e6d9cd4c7f6e340ed51b163f3331ce616dba4539d09f40919548d81d3","examples/shootout-regex-dna-cheat.rs":"bd5f4832c9a2e6f175d79064b2c4ee28dcae4b971f9618586664f99963bd0f8e","examples/shootout-regex-dna-replace.rs":"056e9ee51327a1cf9bd1da007e8c0af9c80a3e7fc2b3a3f63d0cf1b6b244ac74","examples/shootout-regex-dna-single-cheat.rs":"0beeb210972597db3a5c41901c78d78635681a833eba059064b28902e41d6f86","examples/shootout-regex-dna-single.rs":"a5d1ad8ebcfe9dd58bddc0d1ad838112cbce780f8f93c46892a70a1e25fd7c7f","examples/shootout-regex-dna.rs":"7724deec3d94cc5b55077380f7b9664edb5a49eaaa4cc7ba8356579c2ade7129","rustfmt.toml":"1ca600239a27401c4a43f363cf3f38183a212affc1f31bff3ae93234bbaec228","src/backtrack.rs":"bb24a32bc0d2f2f1c94439d8d0879ad8d15c910d4b9ab86c2833d93945cb088c","src/cache.rs":"df7b83b4f7278705ad1ecdf1284b962cc3b145797f1676c9a59e32c5944c848c","src/compile.rs":"a582b97efaf0fedca0b116c3124e067f2b28ff9b0e6240c905359cd7d5d8f5e9","src/dfa.rs":"69cd7ec8e766f4c7f056a4525aa28d102068f68d03da1d0e6aa98d40fe5c7d44","src/error.rs":"4d03ca3205e373125a0c9a4270fc1c5aa7b5a661b11db3508b563945a0827301","src/exec.rs":"a27ed0cb907dab6540fdf8cc785c799d7ea17f15f1236b80341934586874cd58","src/expand.rs":"f34190d84ea59b7aee07ad2e3c9b09a3fdeef29b5e99443223397402cc68a6b4","src/find_byte.rs":"b387247b77e3269f057c3399aefe5a815032c3af918c876f80eb4b282e4eb95e","src/freqs.rs":"255555f3d95b08a5bb3bc2f38d5a06cc100a39c0f0127fe4f50c33afa1cadc65","src/input.rs":"0bb20717cb630803f78b921260c96984a38a57685f07d9aee16b12deaa1d233b","src/lib.rs":"58290928047bdd668b335cee0fe2bd1ae6213192cedc7f4cd1d07bda850c92e7","src/literal/imp.rs":"f242b29261423342e8cd941bdeb26b95eed73357fc313f8a019032374b77e056","src/literal/mod.rs":"46739643baa0cd5d1b8fc4521df3d727815c61e583af2ffca8b7b63bad3fa50e","src/pattern.rs":"e6124b403c18344675aa341faf9ae2f592193ef89f1c4a5e1cee135b8b34dd21","src/pikevm.rs":"1de81a5e785167d63a0a53e339c85b31ed323ac88d8ef117d57b03082b83175f","src/prog.rs":"f47ac3accf7bd3456bd20902c6315f4063bbb6d9570c9b2f8309a0a1566d6323","src/re_builder.rs":"3ee35fa798c824be941c03e40e5dbeef47c96c2d19aeac2e2f7ca59727ff213b","src/re_bytes.rs":"68cac90eda08b9eda8068b3fe9cd3dbbc87673b415850fd5b8c76bc4b2d2dfe6","src/re_set.rs":"b8abdd3bab254ad53a74d114feec8f7a728ef03c3b213b02efd863649e1fb92a","src/re_trait.rs":"ad63982bf34cde29038be8c9113e0681f49df455eac3de36e2d3a8a974d59048","src/re_unicode.rs":"106c86f116e1c8db77efebf4d727822e16448fa45911c8e0c071485522246ab3","src/sparse.rs":"55db5a233eb266a1d4ffc611529d90e99369282854e9153ed39d7b650573295f","src/testdata/LICENSE":"58cf078acc03da3e280a938c2bd9943f554fc9b6ced89ad93ba35ca436872899","src/testdata/README":"45f869e37f798905c773bfbe0ef19a5fb7e585cbf0b7c21b5b5a784e8cec3c14","src/testdata/basic.dat":"b5b33aa89d48a61cd67cb1fbfd8f70e62c83e30b86256f9f915a5190dd38ff06","src/testdata/nullsubexpr.dat":"496ac0278eec3b6d9170faace14554569032dd3d909618364d9326156de39ecf","src/testdata/repetition.dat":"1f7959063015b284b18a4a2c1c8b416d438a2d6c4b1a362da43406b865f50e69","src/utf8.rs":"708615a4859110cc9766b342a9c1da6c5c4a8a04ad239046b2725385db977efe","test":"bd7ca64a788d1f622ae10c5ac77160b10fc5f0d6adb7f3ba1828637f0ca68c81","tests/api.rs":"f814376a0fdc79a95d915137ba0d80e85a6b25d253870d4b9ca4e1d9ae90f317","tests/api_str.rs":"2ae38c04e7e8fac008b609a820d0b1561ba75f39b0edc0987d6d3d06132da77f","tests/bytes.rs":"edc50f526c5fee43df89d639ef18b237e4eb91e9d533bfc43f3cbab7417d38ba","tests/consistent.rs":"8dadc60727306de8a539325d1d2af84172e5da2cbf50a71bb0c53eafe8039912","tests/crates_regex.rs":"91a59d470e0700b4bcb3ff735d06799f3107b8ef4875a2e9904607b164be0326","tests/crazy.rs":"5f1e925ed35ffa0b3c7e3bde691a7ae7ca75133a8956f5cab4ece01de1ba28b5","tests/flags.rs":"05caace2c81a99d2168037f3a38035d4dffe9f85ef3ebd7ef18b1bc6612f1ea8","tests/fowler.rs":"d78cf914de40b1e125cc92b65ccb444d462586bd07b5e05de4e4a1b5de16aa76","tests/macros.rs":"6db70c16fc90df13e6b30d2b606f8b6dd4dc976697967f6ee001b15aab6d0b19","tests/macros_bytes.rs":"73e2979f60acb009066c3a9a6c5809a0069a60c67fff7db675ea4e59299a7292","tests/macros_str.rs":"4c79ff4f2f5c379503794dd7e4116b3e7d4ac7679e47b81959fa054110a76c1a","tests/misc.rs":"395f52793fa022e4cdda78675b6a6fba1a3106b4b99c834c39f7801574054bd1","tests/multiline.rs":"1b1a3326ed976437c1357f01d81833ece7ea244f38826246eab55cacd5d0862a","tests/noparse.rs":"8850d31cb95e413e3a67edecce8590cd158f49779abcc2e5722381113346179c","tests/regression.rs":"a0f48b457c606eb572816730e41f473bbe1bfcd699bd4fdac1c98fa6477ee522","tests/replace.rs":"fb3dd97c2877b71973065a74916cdef40d65a19a92c1ecfc117edfcd9eab3ced","tests/searcher.rs":"ce35e47b0a276a7e8c9060c6a0b225ffba163aebc61fbc15555a6897fa0e552c","tests/set.rs":"cc1c5431d81b43eb7d89bf786d32657c32d7c73098c1df7dc998416c81b84591","tests/shortest_match.rs":"a2c94390c0d61bc24796b4c1288c924e90c8c9c6156fdebb858175177a194a42","tests/suffix_reverse.rs":"b95f89397404871227d9efe6df23b9ded147f183db81597e608f693955c668b5","tests/test_backtrack.rs":"1e286679d643887f7b78ef5a74f5392ff27fa462b99891ce31938db1eacb3842","tests/test_backtrack_bytes.rs":"f66f2ed407795e92ae66eaa3b4b946acfe00d9ea422944c86353dd28812df5bb","tests/test_backtrack_utf8bytes.rs":"9d8c236fbdfa5092230f19ca4d2a7d647f68811d9ffa0813899d924e11d8fc0b","tests/test_crates_regex.rs":"b385d7ca10ed308b8d57074adb9525e18d3faadb5aa883e831dfa4f6669ba58d","tests/test_default.rs":"ea1049e7bc9e40fb0da31a584aeb7f0e2b5dfd61ad10fb58184de2bd44b542cd","tests/test_default_bytes.rs":"fb3310fc315f7282358dcd98d156c31d66ca2432199afe012cf8921f459167a2","tests/test_nfa.rs":"97266c5edb15e5cde1496049a3ad0be4de33352dc32492f086d5351411ebe1ab","tests/test_nfa_bytes.rs":"9aa510e9583f847467c26ce902bd7e1c66cdd610cd50da4a1c1379afa35d4a53","tests/test_nfa_utf8bytes.rs":"2c22e16fa75edb5dbd278f2b8619404404a95b0792b31c440c9751c4aa2f7086","tests/unicode.rs":"7127b140a00cdcc46b57a59139d13bda57675a55a6604bcd10e134c32e84fa52","tests/word_boundary.rs":"7081317ddcec1e82dd4a2090a571c6abf2ff4bbfa8cd10395e1eb3f386157fae","tests/word_boundary_ascii.rs":"cd0be5b5b485de0ba7994b42e2864585556c3d2d8bf5eab05b58931d9aaf4b87","tests/word_boundary_unicode.rs":"75dbcc35d3abc0f9795c2ea99e216dc227b0a5b58e9ca5eef767815ff0513921"},"package":"b5508c1941e4e7cb19965abef075d35a9a8b5cdf0846f30b4050e9b55dc55e87"} \ No newline at end of file diff --git a/third_party/rust/regex/CHANGELOG.md b/third_party/rust/regex/CHANGELOG.md index 9063a22a3c..33d90bfc15 100644 --- a/third_party/rust/regex/CHANGELOG.md +++ b/third_party/rust/regex/CHANGELOG.md @@ -1,3 +1,31 @@ +1.3.3 (2020-01-09) +================== +This is a small maintenance release that upgrades the dependency on +`thread_local` from `0.3` to `1.0`. The minimum supported Rust version remains +at Rust 1.28. + + +1.3.2 (2020-01-09) +================== +This is a small maintenance release with some house cleaning and bug fixes. + +New features: + +* [FEATURE #631](https://github.com/rust-lang/regex/issues/631): + Add a `Match::range` method an a `From for Range` impl. + +Bug fixes: + +* [BUG #521](https://github.com/rust-lang/regex/issues/521): + Corrects `/-/.splitn("a", 2)` to return `["a"]` instead of `["a", ""]`. +* [BUG #594](https://github.com/rust-lang/regex/pull/594): + Improve error reporting when writing `\p\`. +* [BUG #627](https://github.com/rust-lang/regex/issues/627): + Corrects `/-/.split("a-")` to return `["a", ""]` instead of `["a"]`. +* [BUG #633](https://github.com/rust-lang/regex/pull/633): + Squash deprecation warnings for the `std::error::Error::description` method. + + 1.3.1 (2019-09-04) ================== This is a maintenance release with no changes in order to try to work-around diff --git a/third_party/rust/regex/Cargo.lock b/third_party/rust/regex/Cargo.lock index 76292d8f3b..bb7392318f 100644 --- a/third_party/rust/regex/Cargo.lock +++ b/third_party/rust/regex/Cargo.lock @@ -4,236 +4,234 @@ name = "aho-corasick" version = "0.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "58fb5e95d83b38284460a5fda7d6470aa0b8844d283a0b614b8535e880800d2d" dependencies = [ - "memchr 2.2.1 (registry+https://github.com/rust-lang/crates.io-index)", + "memchr", ] [[package]] name = "autocfg" -version = "0.1.6" +version = "0.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d49d90015b3c36167a20fe2810c5cd875ad504b39cff3d4eae7977e6b7c1cb2" [[package]] name = "bitflags" -version = "1.1.0" +version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cf1de2fe8c75bc145a2f577add951f8134889b4795d47466a54a5c846d691693" [[package]] name = "cloudabi" version = "0.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ddfc5b9aa5d4507acaf872de71051dfd0e309860e88966e1051e462a077aac4f" dependencies = [ - "bitflags 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "bitflags", ] [[package]] name = "doc-comment" version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "923dea538cea0aa3025e8685b20d6ee21ef99c4f77e954a30febbaac5ec73a97" [[package]] name = "fuchsia-cprng" version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a06f77d526c1a601b7c4cdd98f54b5eaabffc14d5f2f0296febdc7f357c6d3ba" [[package]] name = "lazy_static" version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" [[package]] name = "libc" -version = "0.2.62" +version = "0.2.66" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d515b1f41455adea1313a4a2ac8a8a477634fbae63cc6100e3aebb207ce61558" [[package]] name = "memchr" version = "2.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "88579771288728879b57485cc7d6b07d648c9f0141eb955f8ab7f9d45394468e" [[package]] name = "quickcheck" version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9c35d9c36a562f37eca96e79f66d5fd56eefbc22560dacc4a864cabd2d277456" dependencies = [ - "rand 0.6.5 (registry+https://github.com/rust-lang/crates.io-index)", - "rand_core 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", + "rand", + "rand_core 0.4.2", ] [[package]] name = "rand" version = "0.6.5" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6d71dacdc3c88c1fde3885a3be3fbab9f35724e6ce99467f7d9c5026132184ca" dependencies = [ - "autocfg 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.62 (registry+https://github.com/rust-lang/crates.io-index)", - "rand_chacha 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", - "rand_core 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", - "rand_hc 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", - "rand_isaac 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", - "rand_jitter 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", - "rand_os 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", - "rand_pcg 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", - "rand_xorshift 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", - "winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", + "autocfg", + "libc", + "rand_chacha", + "rand_core 0.4.2", + "rand_hc", + "rand_isaac", + "rand_jitter", + "rand_os", + "rand_pcg", + "rand_xorshift", + "winapi", ] [[package]] name = "rand_chacha" version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "556d3a1ca6600bfcbab7c7c91ccb085ac7fbbcd70e008a98742e7847f4f7bcef" dependencies = [ - "autocfg 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)", - "rand_core 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", + "autocfg", + "rand_core 0.3.1", ] [[package]] name = "rand_core" version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7a6fdeb83b075e8266dcc8762c22776f6877a63111121f5f8c7411e5be7eed4b" dependencies = [ - "rand_core 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", + "rand_core 0.4.2", ] [[package]] name = "rand_core" version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9c33a3c44ca05fa6f1807d8e6743f3824e8509beca625669633be0acbdf509dc" [[package]] name = "rand_hc" version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7b40677c7be09ae76218dc623efbf7b18e34bced3f38883af07bb75630a21bc4" dependencies = [ - "rand_core 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", + "rand_core 0.3.1", ] [[package]] name = "rand_isaac" version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ded997c9d5f13925be2a6fd7e66bf1872597f759fd9dd93513dd7e92e5a5ee08" dependencies = [ - "rand_core 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", + "rand_core 0.3.1", ] [[package]] name = "rand_jitter" version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1166d5c91dc97b88d1decc3285bb0a99ed84b05cfd0bc2341bdf2d43fc41e39b" dependencies = [ - "libc 0.2.62 (registry+https://github.com/rust-lang/crates.io-index)", - "rand_core 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", - "winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", + "libc", + "rand_core 0.4.2", + "winapi", ] [[package]] name = "rand_os" version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7b75f676a1e053fc562eafbb47838d67c84801e38fc1ba459e8f180deabd5071" dependencies = [ - "cloudabi 0.0.3 (registry+https://github.com/rust-lang/crates.io-index)", - "fuchsia-cprng 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.62 (registry+https://github.com/rust-lang/crates.io-index)", - "rand_core 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", - "rdrand 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", - "winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", + "cloudabi", + "fuchsia-cprng", + "libc", + "rand_core 0.4.2", + "rdrand", + "winapi", ] [[package]] name = "rand_pcg" version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "abf9b09b01790cfe0364f52bf32995ea3c39f4d2dd011eac241d2914146d0b44" dependencies = [ - "autocfg 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)", - "rand_core 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", + "autocfg", + "rand_core 0.4.2", ] [[package]] name = "rand_xorshift" version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cbf7e9e623549b0e21f6e97cf8ecf247c1a8fd2e8a992ae265314300b2455d5c" dependencies = [ - "rand_core 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", + "rand_core 0.3.1", ] [[package]] name = "rdrand" version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "678054eb77286b51581ba43620cc911abf02758c91f93f479767aed0f90458b2" dependencies = [ - "rand_core 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", + "rand_core 0.3.1", ] [[package]] name = "regex" -version = "1.3.1" +version = "1.3.3" dependencies = [ - "aho-corasick 0.7.6 (registry+https://github.com/rust-lang/crates.io-index)", - "doc-comment 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", - "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", - "memchr 2.2.1 (registry+https://github.com/rust-lang/crates.io-index)", - "quickcheck 0.8.5 (registry+https://github.com/rust-lang/crates.io-index)", - "rand 0.6.5 (registry+https://github.com/rust-lang/crates.io-index)", - "regex-syntax 0.6.12 (registry+https://github.com/rust-lang/crates.io-index)", - "thread_local 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", + "aho-corasick", + "doc-comment", + "lazy_static", + "memchr", + "quickcheck", + "rand", + "regex-syntax", + "thread_local", ] [[package]] name = "regex-syntax" -version = "0.6.12" +version = "0.6.13" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e734e891f5b408a29efbf8309e656876276f49ab6a6ac208600b4419bd893d90" [[package]] name = "thread_local" -version = "0.3.6" +version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "88ddf1ad580c7e3d1efff877d972bcc93f995556b9087a5a259630985c88ceab" dependencies = [ - "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", + "lazy_static", ] [[package]] name = "winapi" version = "0.3.8" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8093091eeb260906a183e6ae1abdba2ef5ef2257a21801128899c3fc699229c6" dependencies = [ - "winapi-i686-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", - "winapi-x86_64-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", + "winapi-i686-pc-windows-gnu", + "winapi-x86_64-pc-windows-gnu", ] [[package]] name = "winapi-i686-pc-windows-gnu" version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" [[package]] name = "winapi-x86_64-pc-windows-gnu" version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" - -[metadata] -"checksum aho-corasick 0.7.6 (registry+https://github.com/rust-lang/crates.io-index)" = "58fb5e95d83b38284460a5fda7d6470aa0b8844d283a0b614b8535e880800d2d" -"checksum autocfg 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)" = "b671c8fb71b457dd4ae18c4ba1e59aa81793daacc361d82fcd410cef0d491875" -"checksum bitflags 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "3d155346769a6855b86399e9bc3814ab343cd3d62c7e985113d46a0ec3c281fd" -"checksum cloudabi 0.0.3 (registry+https://github.com/rust-lang/crates.io-index)" = "ddfc5b9aa5d4507acaf872de71051dfd0e309860e88966e1051e462a077aac4f" -"checksum doc-comment 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "923dea538cea0aa3025e8685b20d6ee21ef99c4f77e954a30febbaac5ec73a97" -"checksum fuchsia-cprng 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "a06f77d526c1a601b7c4cdd98f54b5eaabffc14d5f2f0296febdc7f357c6d3ba" -"checksum lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" -"checksum libc 0.2.62 (registry+https://github.com/rust-lang/crates.io-index)" = "34fcd2c08d2f832f376f4173a231990fa5aef4e99fb569867318a227ef4c06ba" -"checksum memchr 2.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "88579771288728879b57485cc7d6b07d648c9f0141eb955f8ab7f9d45394468e" -"checksum quickcheck 0.8.5 (registry+https://github.com/rust-lang/crates.io-index)" = "9c35d9c36a562f37eca96e79f66d5fd56eefbc22560dacc4a864cabd2d277456" -"checksum rand 0.6.5 (registry+https://github.com/rust-lang/crates.io-index)" = "6d71dacdc3c88c1fde3885a3be3fbab9f35724e6ce99467f7d9c5026132184ca" -"checksum rand_chacha 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "556d3a1ca6600bfcbab7c7c91ccb085ac7fbbcd70e008a98742e7847f4f7bcef" -"checksum rand_core 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "7a6fdeb83b075e8266dcc8762c22776f6877a63111121f5f8c7411e5be7eed4b" -"checksum rand_core 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)" = "9c33a3c44ca05fa6f1807d8e6743f3824e8509beca625669633be0acbdf509dc" -"checksum rand_hc 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "7b40677c7be09ae76218dc623efbf7b18e34bced3f38883af07bb75630a21bc4" -"checksum rand_isaac 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "ded997c9d5f13925be2a6fd7e66bf1872597f759fd9dd93513dd7e92e5a5ee08" -"checksum rand_jitter 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)" = "1166d5c91dc97b88d1decc3285bb0a99ed84b05cfd0bc2341bdf2d43fc41e39b" -"checksum rand_os 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "7b75f676a1e053fc562eafbb47838d67c84801e38fc1ba459e8f180deabd5071" -"checksum rand_pcg 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "abf9b09b01790cfe0364f52bf32995ea3c39f4d2dd011eac241d2914146d0b44" -"checksum rand_xorshift 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "cbf7e9e623549b0e21f6e97cf8ecf247c1a8fd2e8a992ae265314300b2455d5c" -"checksum rdrand 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "678054eb77286b51581ba43620cc911abf02758c91f93f479767aed0f90458b2" -"checksum regex-syntax 0.6.12 (registry+https://github.com/rust-lang/crates.io-index)" = "11a7e20d1cce64ef2fed88b66d347f88bd9babb82845b2b858f3edbf59a4f716" -"checksum thread_local 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)" = "c6b53e329000edc2b34dbe8545fd20e55a333362d0a321909685a19bd28c3f1b" -"checksum winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)" = "8093091eeb260906a183e6ae1abdba2ef5ef2257a21801128899c3fc699229c6" -"checksum winapi-i686-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" -"checksum winapi-x86_64-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" +checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" diff --git a/third_party/rust/regex/Cargo.toml b/third_party/rust/regex/Cargo.toml index 7407357839..e15680576e 100644 --- a/third_party/rust/regex/Cargo.toml +++ b/third_party/rust/regex/Cargo.toml @@ -12,7 +12,7 @@ [package] name = "regex" -version = "1.3.1" +version = "1.3.3" authors = ["The Rust Project Developers"] exclude = ["/.travis.yml", "/appveyor.yml", "/ci/*", "/scripts/*"] autotests = false @@ -21,17 +21,17 @@ homepage = "https://github.com/rust-lang/regex" documentation = "https://docs.rs/regex" readme = "README.md" categories = ["text-processing"] -license = "MIT/Apache-2.0" +license = "MIT OR Apache-2.0" repository = "https://github.com/rust-lang/regex" -[profile.test] -debug = true - [profile.bench] debug = true [profile.release] debug = true +[profile.test] +debug = true + [lib] doctest = false bench = false @@ -84,7 +84,7 @@ version = "0.6.12" default-features = false [dependencies.thread_local] -version = "0.3.6" +version = "1" optional = true [dev-dependencies.doc-comment] version = "0.3" diff --git a/third_party/rust/regex/src/cache.rs b/third_party/rust/regex/src/cache.rs index d8991ce4a3..dbb7e64eb8 100644 --- a/third_party/rust/regex/src/cache.rs +++ b/third_party/rust/regex/src/cache.rs @@ -26,7 +26,7 @@ mod imp { } pub fn get_or(&self, create: impl FnOnce() -> T) -> CachedGuard { - CachedGuard(self.0.get_or(|| Box::new(create()))) + CachedGuard(self.0.get_or(|| create())) } } diff --git a/third_party/rust/regex/src/compile.rs b/third_party/rust/regex/src/compile.rs index 1f69967192..ac706f8bad 100644 --- a/third_party/rust/regex/src/compile.rs +++ b/third_party/rust/regex/src/compile.rs @@ -110,7 +110,7 @@ impl Compiler { /// specified size limit. If the size limit is exceeded, then compilation /// stops and returns an error. pub fn compile(mut self, exprs: &[Hir]) -> result::Result { - debug_assert!(exprs.len() >= 1); + debug_assert!(!exprs.is_empty()); self.num_exprs = exprs.len(); if exprs.len() == 1 { self.compile_one(&exprs[0]) diff --git a/third_party/rust/regex/src/error.rs b/third_party/rust/regex/src/error.rs index eb7f121a83..1c32c85b99 100644 --- a/third_party/rust/regex/src/error.rs +++ b/third_party/rust/regex/src/error.rs @@ -19,6 +19,8 @@ pub enum Error { } impl ::std::error::Error for Error { + // TODO: Remove this method entirely on the next breaking semver release. + #[allow(deprecated)] fn description(&self) -> &str { match *self { Error::Syntax(ref err) => err, diff --git a/third_party/rust/regex/src/exec.rs b/third_party/rust/regex/src/exec.rs index 2ae7842204..acca2dccb6 100644 --- a/third_party/rust/regex/src/exec.rs +++ b/third_party/rust/regex/src/exec.rs @@ -691,9 +691,7 @@ impl<'c> ExecNoSync<'c> { } AnchoredStart => { let lits = &self.ro.nfa.prefixes; - if !self.ro.nfa.is_anchored_start - || (self.ro.nfa.is_anchored_start && start == 0) - { + if start == 0 || !self.ro.nfa.is_anchored_start { lits.find_start(&text[start..]) .map(|(s, e)| (start + s, start + e)) } else { diff --git a/third_party/rust/regex/src/literal/imp.rs b/third_party/rust/regex/src/literal/imp.rs index 38ebd295f4..fe07ffccd8 100644 --- a/third_party/rust/regex/src/literal/imp.rs +++ b/third_party/rust/regex/src/literal/imp.rs @@ -570,7 +570,7 @@ impl BoyerMooreSearch { /// Create a new string searcher, performing whatever /// compilation steps are required. fn new(pattern: Vec) -> Self { - debug_assert!(pattern.len() > 0); + debug_assert!(!pattern.is_empty()); let (g, gi) = Self::select_guard(pattern.as_slice()); let skip_table = Self::compile_skip_table(pattern.as_slice()); diff --git a/third_party/rust/regex/src/prog.rs b/third_party/rust/regex/src/prog.rs index 6cf4961830..74e5f2f6f8 100644 --- a/third_party/rust/regex/src/prog.rs +++ b/third_party/rust/regex/src/prog.rs @@ -410,7 +410,7 @@ impl InstRanges { self.ranges .iter() .map(|&(s, e)| 1 + (e as u32) - (s as u32)) - .fold(0, |acc, len| acc + len) as usize + .sum::() as usize } } diff --git a/third_party/rust/regex/src/re_bytes.rs b/third_party/rust/regex/src/re_bytes.rs index 2e38c10ca8..69f0b335de 100644 --- a/third_party/rust/regex/src/re_bytes.rs +++ b/third_party/rust/regex/src/re_bytes.rs @@ -1,7 +1,7 @@ use std::borrow::Cow; use std::collections::HashMap; use std::fmt; -use std::ops::Index; +use std::ops::{Index, Range}; use std::str::FromStr; use std::sync::Arc; @@ -36,10 +36,17 @@ impl<'t> Match<'t> { self.end } + /// Returns the range over the starting and ending byte offsets of the + /// match in the haystack. + #[inline] + pub fn range(&self) -> Range { + self.start..self.end + } + /// Returns the matched text. #[inline] pub fn as_bytes(&self) -> &'t [u8] { - &self.text[self.start..self.end] + &self.text[self.range()] } /// Creates a new match from the given haystack and byte offsets. @@ -49,6 +56,12 @@ impl<'t> Match<'t> { } } +impl<'t> From> for Range { + fn from(m: Match<'t>) -> Range { + m.range() + } +} + /// A compiled regular expression for matching arbitrary bytes. /// /// It can be used to search, split or replace text. All searching is done with @@ -726,11 +739,11 @@ impl<'r, 't> Iterator for Split<'r, 't> { let text = self.finder.0.text(); match self.finder.next() { None => { - if self.last >= text.len() { + if self.last > text.len() { None } else { let s = &text[self.last..]; - self.last = text.len(); + self.last = text.len() + 1; // Next call will return None Some(s) } } @@ -761,12 +774,19 @@ impl<'r, 't> Iterator for SplitN<'r, 't> { if self.n == 0 { return None; } + self.n -= 1; - if self.n == 0 { - let text = self.splits.finder.0.text(); - Some(&text[self.splits.last..]) + if self.n > 0 { + return self.splits.next(); + } + + let text = self.splits.finder.0.text(); + if self.splits.last > text.len() { + // We've already returned all substrings. + None } else { - self.splits.next() + // self.n == 0, so future calls will return None immediately + Some(&text[self.splits.last..]) } } } diff --git a/third_party/rust/regex/src/re_unicode.rs b/third_party/rust/regex/src/re_unicode.rs index 81aac15260..b746599088 100644 --- a/third_party/rust/regex/src/re_unicode.rs +++ b/third_party/rust/regex/src/re_unicode.rs @@ -1,7 +1,7 @@ use std::borrow::Cow; use std::collections::HashMap; use std::fmt; -use std::ops::Index; +use std::ops::{Index, Range}; use std::str::FromStr; use std::sync::Arc; @@ -45,10 +45,17 @@ impl<'t> Match<'t> { self.end } + /// Returns the range over the starting and ending byte offsets of the + /// match in the haystack. + #[inline] + pub fn range(&self) -> Range { + self.start..self.end + } + /// Returns the matched text. #[inline] pub fn as_str(&self) -> &'t str { - &self.text[self.start..self.end] + &self.text[self.range()] } /// Creates a new match from the given haystack and byte offsets. @@ -64,6 +71,12 @@ impl<'t> From> for &'t str { } } +impl<'t> From> for Range { + fn from(m: Match<'t>) -> Range { + m.range() + } +} + /// A compiled regular expression for matching Unicode strings. /// /// It is represented as either a sequence of bytecode instructions (dynamic) @@ -766,11 +779,11 @@ impl<'r, 't> Iterator for Split<'r, 't> { let text = self.finder.0.text(); match self.finder.next() { None => { - if self.last >= text.len() { + if self.last > text.len() { None } else { let s = &text[self.last..]; - self.last = text.len(); + self.last = text.len() + 1; // Next call will return None Some(s) } } @@ -801,12 +814,19 @@ impl<'r, 't> Iterator for SplitN<'r, 't> { if self.n == 0 { return None; } + self.n -= 1; - if self.n == 0 { - let text = self.splits.finder.0.text(); - Some(&text[self.splits.last..]) + if self.n > 0 { + return self.splits.next(); + } + + let text = self.splits.finder.0.text(); + if self.splits.last > text.len() { + // We've already returned all substrings. + None } else { - self.splits.next() + // self.n == 0, so future calls will return None immediately + Some(&text[self.splits.last..]) } } } diff --git a/third_party/rust/regex/tests/api.rs b/third_party/rust/regex/tests/api.rs index ff136217e1..0d4962cc9f 100644 --- a/third_party/rust/regex/tests/api.rs +++ b/third_party/rust/regex/tests/api.rs @@ -205,6 +205,18 @@ split!( split2, r"(?-u)\b", "a b c", - &[t!(""), t!("a"), t!(" "), t!("b"), t!(" "), t!("c")] + &[t!(""), t!("a"), t!(" "), t!("b"), t!(" "), t!("c"), t!("")] ); -split!(split3, r"a$", "a", &[t!("")]); +split!(split3, r"a$", "a", &[t!(""), t!("")]); +split!(split_none, r"-", r"a", &[t!("a")]); +split!(split_trailing_blank, r"-", r"a-", &[t!("a"), t!("")]); +split!(split_trailing_blanks, r"-", r"a--", &[t!("a"), t!(""), t!("")]); +split!(split_empty, r"-", r"", &[t!("")]); + +splitn!(splitn_below_limit, r"-", r"a", 2, &[t!("a")]); +splitn!(splitn_at_limit, r"-", r"a-b", 2, &[t!("a"), t!("b")]); +splitn!(splitn_above_limit, r"-", r"a-b-c", 2, &[t!("a"), t!("b-c")]); +splitn!(splitn_zero_limit, r"-", r"a-b", 0, empty_vec!()); +splitn!(splitn_trailing_blank, r"-", r"a-", 2, &[t!("a"), t!("")]); +splitn!(splitn_trailing_separator, r"-", r"a--", 2, &[t!("a"), t!("-")]); +splitn!(splitn_empty, r"-", r"", 1, &[t!("")]); diff --git a/third_party/rust/regex/tests/bytes.rs b/third_party/rust/regex/tests/bytes.rs index 6c5a11ac77..d05f138edf 100644 --- a/third_party/rust/regex/tests/bytes.rs +++ b/third_party/rust/regex/tests/bytes.rs @@ -69,10 +69,12 @@ matiter!( R(b"\x8d#;\x1a\xa4s3\x05foobarX\\\x0f0t\xe4\x9b\xa4"), (0, 0) ); -matiter!(invalidutf8_anchor2, - r"(?-u)^\xf7|4\xff\d\x8a\x8a\x8a\x8a\x8a\x8a\x8a\x8a\x8a\x8a\x8a\x8a\x8a##########[] d\x8a\x8a\x8a\x8a\x8a\x8a\x8a\x8a\x8a\x8a\x8a\x8a\x8a##########\[] #####\x80\S7|$", - R(b"\x8d#;\x1a\xa4s3\x05foobarX\\\x0f0t\xe4\x9b\xa4"), - (22, 22)); +matiter!( + invalidutf8_anchor2, + r"(?-u)^\xf7|4\xff\d\x8a\x8a\x8a\x8a\x8a\x8a\x8a\x8a\x8a\x8a\x8a\x8a\x8a##########[] d\x8a\x8a\x8a\x8a\x8a\x8a\x8a\x8a\x8a\x8a\x8a\x8a\x8a##########\[] #####\x80\S7|$", + R(b"\x8d#;\x1a\xa4s3\x05foobarX\\\x0f0t\xe4\x9b\xa4"), + (22, 22) +); matiter!( invalidutf8_anchor3, r"(?-u)^|ddp\xff\xffdddddlQd@\x80", diff --git a/third_party/rust/regex/tests/consistent.rs b/third_party/rust/regex/tests/consistent.rs index 2d7bdcf8d1..0f9ea53f35 100644 --- a/third_party/rust/regex/tests/consistent.rs +++ b/third_party/rust/regex/tests/consistent.rs @@ -231,7 +231,6 @@ macro_rules! checker { TestResult::from_bool(true) } } - } // mod }; // rule case } // macro_rules! diff --git a/third_party/rust/regex/tests/crazy.rs b/third_party/rust/regex/tests/crazy.rs index 20a3371b2e..8c72273d93 100644 --- a/third_party/rust/regex/tests/crazy.rs +++ b/third_party/rust/regex/tests/crazy.rs @@ -29,8 +29,12 @@ mat!( "mine is jam.slam@gmail ", None ); -mat!(match_email_big, r"[a-z0-9!#$%&'*+/=?^_`{|}~-]+(?:\.[a-z0-9!#$%&'*+/=?^_`{|}~-]+)*@(?:[a-z0-9](?:[a-z0-9-]*[a-z0-9])?\.)+[a-z0-9](?:[a-z0-9-]*[a-z0-9])?", - "mine is jam.slam@gmail.com ", Some((8, 26))); +mat!( + match_email_big, + r"[a-z0-9!#$%&'*+/=?^_`{|}~-]+(?:\.[a-z0-9!#$%&'*+/=?^_`{|}~-]+)*@(?:[a-z0-9](?:[a-z0-9-]*[a-z0-9])?\.)+[a-z0-9](?:[a-z0-9-]*[a-z0-9])?", + "mine is jam.slam@gmail.com ", + Some((8, 26)) +); mat!( match_date1, r"(?-u)^(19|20)\d\d[- /.](0[1-9]|1[012])[- /.](0[1-9]|[12][0-9]|3[01])$", diff --git a/third_party/rust/regex/tests/fowler.rs b/third_party/rust/regex/tests/fowler.rs index 5da32935e7..7f56a758d3 100644 --- a/third_party/rust/regex/tests/fowler.rs +++ b/third_party/rust/regex/tests/fowler.rs @@ -215,7 +215,13 @@ mat!( Some((1, 2)), Some((1, 2)) ); -mat!(match_basic_76, r"a?(ab|ba)*", r"ababababababababababababababababababababababababababababababababababababababababa", Some((0, 81)), Some((79, 81))); +mat!( + match_basic_76, + r"a?(ab|ba)*", + r"ababababababababababababababababababababababababababababababababababababababababa", + Some((0, 81)), + Some((79, 81)) +); mat!( match_basic_77, r"abaa|abbaa|abbbaa|abbbbaa", diff --git a/third_party/rust/regex/tests/macros.rs b/third_party/rust/regex/tests/macros.rs index 3c4b888b20..e70e9489fd 100644 --- a/third_party/rust/regex/tests/macros.rs +++ b/third_party/rust/regex/tests/macros.rs @@ -147,3 +147,14 @@ macro_rules! split { } } } + +macro_rules! splitn { + ($name:ident, $re:expr, $text:expr, $limit:expr, $expected:expr) => { + #[test] + fn $name() { + let re = regex!($re); + let splitted: Vec<_> = re.splitn(t!($text), $limit).collect(); + assert_eq!($expected, &*splitted); + } + } +} diff --git a/third_party/rust/regex/tests/macros_bytes.rs b/third_party/rust/regex/tests/macros_bytes.rs index 7605d69b21..03c370d698 100644 --- a/third_party/rust/regex/tests/macros_bytes.rs +++ b/third_party/rust/regex/tests/macros_bytes.rs @@ -3,6 +3,7 @@ macro_rules! text { ($text:expr) => { $text.as_bytes() } } macro_rules! t { ($re:expr) => { text!($re) } } macro_rules! match_text { ($text:expr) => { $text.as_bytes() } } macro_rules! use_ { ($($path: tt)*) => { use regex::bytes::$($path)*; } } +macro_rules! empty_vec { () => { >::new() } } macro_rules! bytes { ($text:expr) => { $text } } diff --git a/third_party/rust/regex/tests/macros_str.rs b/third_party/rust/regex/tests/macros_str.rs index fda5814b8c..9b996b33b9 100644 --- a/third_party/rust/regex/tests/macros_str.rs +++ b/third_party/rust/regex/tests/macros_str.rs @@ -3,6 +3,7 @@ macro_rules! text { ($text:expr) => { $text } } macro_rules! t { ($text:expr) => { text!($text) } } macro_rules! match_text { ($text:expr) => { $text.as_str() } } macro_rules! use_ { ($($path: tt)*) => { use regex::$($path)*; } } +macro_rules! empty_vec { () => { >::new() } } macro_rules! no_expand { ($text:expr) => {{ diff --git a/third_party/rust/regex/tests/unicode.rs b/third_party/rust/regex/tests/unicode.rs index 597f86873a..52522f41c6 100644 --- a/third_party/rust/regex/tests/unicode.rs +++ b/third_party/rust/regex/tests/unicode.rs @@ -60,12 +60,7 @@ mat!( "〰", Some((0, 3)) ); -mat!( - uni_class_gencat_decimal_numer, - r"\p{Decimal_Number}", - "𑓙", - Some((0, 4)) -); +mat!(uni_class_gencat_decimal_numer, r"\p{Decimal_Number}", "𑓙", Some((0, 4))); mat!( uni_class_gencat_enclosing_mark, r"\p{Enclosing_Mark}", @@ -86,12 +81,7 @@ mat!( Some((0, 3)) ); mat!(uni_class_gencat_letter, r"\p{Letter}", "Έ", Some((0, 2))); -mat!( - uni_class_gencat_letter_number, - r"\p{Letter_Number}", - "ↂ", - Some((0, 3)) -); +mat!(uni_class_gencat_letter_number, r"\p{Letter_Number}", "ↂ", Some((0, 3))); mat!( uni_class_gencat_line_separator, r"\p{Line_Separator}", diff --git a/third_party/rust/rkv/.cargo-checksum.json b/third_party/rust/rkv/.cargo-checksum.json index 31e631b435..6e930172ea 100644 --- a/third_party/rust/rkv/.cargo-checksum.json +++ b/third_party/rust/rkv/.cargo-checksum.json @@ -1 +1 @@ -{"files":{"CODE_OF_CONDUCT.md":"902d5357af363426631d907e641e220b3ec89039164743f8442b3f120479b7cf","Cargo.lock":"c95c530d76b891215cce4342a806bbc1747ab4d62f54330d932dafb542fa1a56","Cargo.toml":"00eb8afcb73a205013caf49fff1378a2304269f87f9d79beece7039f9bfb5ccf","LICENSE":"cfc7749b96f63bd31c3c42b5c471bf756814053e847c10f3eb003417bc523d30","README.md":"e28eb7d26ddd6dd71e1757f4eab63044b5c430932ef3c3a24e9772ddc78ebf85","examples/README.md":"143767fc145bf167ce269a65138cb3f7086cb715b8bc4f73626da82966e646f4","examples/iterator.rs":"ddc3997e394a30ad82d78d2675a48c4617353f88b89bb9a3df5a3804d59b8ef9","examples/simple-store.rs":"cae63e39f2f98ee6ac2f387dcb02d6b929828a74f32f7d18d69c7fc9c3cce765","run-all-examples.sh":"7f9d11d01017f77e1c9d26e3e82dfca8c6930deaec85e864458e33a7fa267de0","src/bin/dump.rs":"da8543848e57893902751f4c4745e835b9c86263da2344af18d5717014f645f5","src/bin/rand.rs":"3da924fa0f1a118f606e2b94aee3a0553d9ebdbd17ee0152b85148adbf521bba","src/env.rs":"5deac6b35e49da1d47d7c852ed2e30ef96b6d15998fe7a79479cec64697626fc","src/error.rs":"f2cbab99691f36c98c24d297de3a303de258ddd3a06e2f54cb5efce20eb3740b","src/lib.rs":"4fe4e7d6a912a850b709ed23e372acd4f214890066322b4720376f7772bb776e","src/manager.rs":"ff2d76056e3a7200035b2e75c5bc2159f337e59c076dddd2476e3094b6ae3741","src/migrate.rs":"674cee0d027fc2eed3b09cebe686c837a97725099c967d8c2f49d19e793e6bfd","src/readwrite.rs":"fde695333e4845f4f53d63da6281f585919e2a3ac5cfe00d173cc139bc822763","src/store.rs":"409d13b1ea0d1254dae947ecbce50e741fb71c3ca118a78803b734336dce6a8f","src/store/integer.rs":"f386474c971f671c9b316a16ebff5b586be6837c886f443753ae13277a7e0070","src/store/integermulti.rs":"1a0912f97619297da31cc8c146e38941b88539d2857df81191a49c8dbd18625d","src/store/multi.rs":"2dec01c2202a2c9069cced4e1e42906b01d0b85df25d17e0ea810c05fa8395d0","src/store/single.rs":"c55c3600714f5ed9e820b16c2335ae00a0071174e0a32b9df89a34182a4b908c","src/value.rs":"7fae77a8291b951591e557ec694bfdadc9eb78557dad36a970cfcdcfb83fd238","tests/integer-store.rs":"f7e06c71b0dead2323c7c61fc8bcbffbdd3a4796eebf6138db9cce3dbba716a3","tests/manager.rs":"97ec61145dc227f4f5fbcb6449c096bbe5b9a09db4e61ff4491c0443fe9adf26","tests/multi-integer-store.rs":"83295b0135c502321304aa06b05d5a9eeab41b1438ed7ddf2cb1a3613dfef4d9","tests/test_txn.rs":"f486d8bd485398e49ae64eac59ca3b44dfa7f8340aab17483cd3e9864fadd88b"},"package":"9aab7c645d32e977e186448b0a5c2c3139a91a7f630cfd8a8c314d1d145e78bf"} \ No newline at end of file +{"files":{"CODE_OF_CONDUCT.md":"902d5357af363426631d907e641e220b3ec89039164743f8442b3f120479b7cf","Cargo.lock":"a53a3ee5e3aa691db3a5580e6d623cd0762dba8d081eb6de893211a7791669f9","Cargo.toml":"0bb154d5139e53f095919e033abd41a72ea22db685a7a052c96087a09819d479","LICENSE":"cfc7749b96f63bd31c3c42b5c471bf756814053e847c10f3eb003417bc523d30","README.md":"c75566740e7f581fe4cbd44ad82cabad5f2665bd72a4519c17a7f98fc69a5703","examples/README.md":"143767fc145bf167ce269a65138cb3f7086cb715b8bc4f73626da82966e646f4","examples/iterator.rs":"ddc3997e394a30ad82d78d2675a48c4617353f88b89bb9a3df5a3804d59b8ef9","examples/simple-store.rs":"cae63e39f2f98ee6ac2f387dcb02d6b929828a74f32f7d18d69c7fc9c3cce765","run-all-examples.sh":"7f9d11d01017f77e1c9d26e3e82dfca8c6930deaec85e864458e33a7fa267de0","src/bin/dump.rs":"da8543848e57893902751f4c4745e835b9c86263da2344af18d5717014f645f5","src/bin/rand.rs":"3da924fa0f1a118f606e2b94aee3a0553d9ebdbd17ee0152b85148adbf521bba","src/env.rs":"5deac6b35e49da1d47d7c852ed2e30ef96b6d15998fe7a79479cec64697626fc","src/error.rs":"f2cbab99691f36c98c24d297de3a303de258ddd3a06e2f54cb5efce20eb3740b","src/lib.rs":"4fe4e7d6a912a850b709ed23e372acd4f214890066322b4720376f7772bb776e","src/manager.rs":"ff2d76056e3a7200035b2e75c5bc2159f337e59c076dddd2476e3094b6ae3741","src/migrate.rs":"674cee0d027fc2eed3b09cebe686c837a97725099c967d8c2f49d19e793e6bfd","src/readwrite.rs":"fde695333e4845f4f53d63da6281f585919e2a3ac5cfe00d173cc139bc822763","src/store.rs":"409d13b1ea0d1254dae947ecbce50e741fb71c3ca118a78803b734336dce6a8f","src/store/integer.rs":"f386474c971f671c9b316a16ebff5b586be6837c886f443753ae13277a7e0070","src/store/integermulti.rs":"1a0912f97619297da31cc8c146e38941b88539d2857df81191a49c8dbd18625d","src/store/multi.rs":"2dec01c2202a2c9069cced4e1e42906b01d0b85df25d17e0ea810c05fa8395d0","src/store/single.rs":"c55c3600714f5ed9e820b16c2335ae00a0071174e0a32b9df89a34182a4b908c","src/value.rs":"7fae77a8291b951591e557ec694bfdadc9eb78557dad36a970cfcdcfb83fd238","tests/integer-store.rs":"f7e06c71b0dead2323c7c61fc8bcbffbdd3a4796eebf6138db9cce3dbba716a3","tests/manager.rs":"97ec61145dc227f4f5fbcb6449c096bbe5b9a09db4e61ff4491c0443fe9adf26","tests/multi-integer-store.rs":"83295b0135c502321304aa06b05d5a9eeab41b1438ed7ddf2cb1a3613dfef4d9","tests/test_txn.rs":"f486d8bd485398e49ae64eac59ca3b44dfa7f8340aab17483cd3e9864fadd88b"},"package":"30a3dbc1f4971372545ed4175f23ef206c81e5874cd574d153646e7ee78f6793"} \ No newline at end of file diff --git a/third_party/rust/rkv/Cargo.lock b/third_party/rust/rkv/Cargo.lock index a3e7ad21e4..aaf852af9f 100644 --- a/third_party/rust/rkv/Cargo.lock +++ b/third_party/rust/rkv/Cargo.lock @@ -1,734 +1,418 @@ # This file is automatically @generated by Cargo. # It is not intended for manual editing. -[[package]] -name = "aho-corasick" -version = "0.7.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "memchr 2.2.1 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "ansi_term" -version = "0.11.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "winapi 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)", -] - [[package]] name = "arrayref" -version = "0.3.5" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "atty" -version = "0.2.13" +version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "libc 0.2.62 (registry+https://github.com/rust-lang/crates.io-index)", - "winapi 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)", -] +checksum = "a4c527152e37cf757a3f78aae5a06fbeefdb07ccc535c980a3208ee3060dd544" [[package]] name = "autocfg" -version = "0.1.5" +version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f8aac770f1885fd7e387acedd76065302551364496e46b3dd00860b2f8359b9d" [[package]] name = "backtrace" -version = "0.3.34" +version = "0.3.45" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ad235dabf00f36301792cfe82499880ba54c6486be094d1047b02bacb67c14e8" dependencies = [ - "backtrace-sys 0.1.31 (registry+https://github.com/rust-lang/crates.io-index)", - "cfg-if 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.62 (registry+https://github.com/rust-lang/crates.io-index)", - "rustc-demangle 0.1.15 (registry+https://github.com/rust-lang/crates.io-index)", + "backtrace-sys", + "cfg-if", + "libc", + "rustc-demangle", ] [[package]] name = "backtrace-sys" -version = "0.1.31" +version = "0.1.33" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e17b52e737c40a7d75abca20b29a19a0eb7ba9fc72c5a72dd282a0a3c2c0dc35" dependencies = [ - "cc 1.0.45 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.62 (registry+https://github.com/rust-lang/crates.io-index)", + "cc", + "libc", ] [[package]] name = "bincode" -version = "1.1.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "autocfg 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)", - "byteorder 1.3.2 (registry+https://github.com/rust-lang/crates.io-index)", - "serde 1.0.100 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "bindgen" -version = "0.51.0" +version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5753e2a71534719bf3f4e57006c3a4f0d2c672a4b676eec84161f763eca87dbf" dependencies = [ - "bitflags 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", - "cexpr 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)", - "cfg-if 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)", - "clang-sys 0.28.1 (registry+https://github.com/rust-lang/crates.io-index)", - "clap 2.33.0 (registry+https://github.com/rust-lang/crates.io-index)", - "env_logger 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)", - "fxhash 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", - "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", - "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", - "peeking_take_while 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", - "proc-macro2 0.4.30 (registry+https://github.com/rust-lang/crates.io-index)", - "quote 0.6.13 (registry+https://github.com/rust-lang/crates.io-index)", - "regex 1.3.1 (registry+https://github.com/rust-lang/crates.io-index)", - "shlex 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", - "which 2.0.1 (registry+https://github.com/rust-lang/crates.io-index)", + "byteorder", + "serde", ] [[package]] name = "bitflags" -version = "1.1.0" +version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cf1de2fe8c75bc145a2f577add951f8134889b4795d47466a54a5c846d691693" [[package]] name = "byteorder" -version = "1.3.2" +version = "1.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "c2-chacha" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", - "ppv-lite86 0.2.5 (registry+https://github.com/rust-lang/crates.io-index)", -] +checksum = "08c48aae112d48ed9f069b33538ea9e3e90aa263cfa3d1c24309612b1f7472de" [[package]] name = "cc" -version = "1.0.45" +version = "1.0.50" source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "cexpr" -version = "0.3.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "nom 4.2.3 (registry+https://github.com/rust-lang/crates.io-index)", -] +checksum = "95e28fa049fda1c330bcf9d723be7663a899c4679724b34c81e9f5a326aab8cd" [[package]] name = "cfg-if" -version = "0.1.9" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "clang-sys" -version = "0.28.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "glob 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.62 (registry+https://github.com/rust-lang/crates.io-index)", - "libloading 0.5.2 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "clap" -version = "2.33.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "ansi_term 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)", - "atty 0.2.13 (registry+https://github.com/rust-lang/crates.io-index)", - "bitflags 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", - "strsim 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)", - "textwrap 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)", - "unicode-width 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)", - "vec_map 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "env_logger" -version = "0.6.2" +version = "0.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "atty 0.2.13 (registry+https://github.com/rust-lang/crates.io-index)", - "humantime 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)", - "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", - "regex 1.3.1 (registry+https://github.com/rust-lang/crates.io-index)", - "termcolor 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)", -] +checksum = "4785bdd1c96b2a846b2bd7cc02e86b6b3dbf14e7e53446c4f54c92a361040822" [[package]] name = "failure" -version = "0.1.5" +version = "0.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b8529c2421efa3066a5cbd8063d2244603824daccb6936b079010bb2aa89464b" dependencies = [ - "backtrace 0.3.34 (registry+https://github.com/rust-lang/crates.io-index)", - "failure_derive 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)", + "backtrace", + "failure_derive", ] [[package]] name = "failure_derive" -version = "0.1.5" +version = "0.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "030a733c8287d6213886dd487564ff5c8f6aae10278b3588ed177f9d18f8d231" dependencies = [ - "proc-macro2 0.4.30 (registry+https://github.com/rust-lang/crates.io-index)", - "quote 0.6.13 (registry+https://github.com/rust-lang/crates.io-index)", - "syn 0.15.43 (registry+https://github.com/rust-lang/crates.io-index)", - "synstructure 0.10.2 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "fxhash" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "byteorder 1.3.2 (registry+https://github.com/rust-lang/crates.io-index)", + "proc-macro2", + "quote", + "syn", + "synstructure", ] [[package]] name = "getrandom" -version = "0.1.8" +version = "0.1.14" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7abc8dd8451921606d809ba32e95b6111925cd2906060d2dcc29c070220503eb" dependencies = [ - "cfg-if 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.62 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "glob" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "humantime" -version = "1.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "quick-error 1.2.2 (registry+https://github.com/rust-lang/crates.io-index)", + "cfg-if", + "libc", + "wasi", ] [[package]] name = "idna" version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "02e2673c30ee86b5b96a9cb52ad15718aa1f966f5ab9ad54a8b95d5ca33120a9" dependencies = [ - "matches 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)", - "unicode-bidi 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)", - "unicode-normalization 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)", + "matches", + "unicode-bidi", + "unicode-normalization", ] [[package]] name = "lazy_static" version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" [[package]] name = "libc" -version = "0.2.62" +version = "0.2.67" source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "libloading" -version = "0.5.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "cc 1.0.45 (registry+https://github.com/rust-lang/crates.io-index)", - "winapi 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)", -] +checksum = "eb147597cdf94ed43ab7a9038716637d2d1bf2bc571da995d0028dec06bd3018" [[package]] name = "lmdb-rkv" -version = "0.12.3" +version = "0.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "447a296f7aca299cfbb50f4e4f3d49451549af655fb7215d7f8c0c3d64bad42b" dependencies = [ - "bitflags 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", - "byteorder 1.3.2 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.62 (registry+https://github.com/rust-lang/crates.io-index)", - "lmdb-rkv-sys 0.9.3 (registry+https://github.com/rust-lang/crates.io-index)", + "bitflags", + "byteorder", + "libc", + "lmdb-rkv-sys", ] [[package]] name = "lmdb-rkv-sys" -version = "0.9.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "bindgen 0.51.0 (registry+https://github.com/rust-lang/crates.io-index)", - "cc 1.0.45 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.62 (registry+https://github.com/rust-lang/crates.io-index)", - "pkg-config 0.3.15 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "log" -version = "0.4.8" +version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b27470ac25167b3afdfb6af8fcd3bc1be67de50ffbdaf4073378cfded6ae24a5" dependencies = [ - "cfg-if 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)", + "cc", + "libc", + "pkg-config", ] [[package]] name = "matches" version = "0.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "memchr" -version = "2.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "nom" -version = "4.2.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "memchr 2.2.1 (registry+https://github.com/rust-lang/crates.io-index)", - "version_check 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)", -] +checksum = "7ffc5c5338469d4d3ea17d269fa8ea3512ad247247c30bd2df69e68309ed0a08" [[package]] name = "num-traits" -version = "0.2.8" +version = "0.2.11" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c62be47e61d1842b9170f0fdeec8eba98e60e90e5446449a0545e5152acd7096" dependencies = [ - "autocfg 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)", + "autocfg", ] [[package]] name = "ordered-float" version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "18869315e81473c951eb56ad5558bbc56978562d3ecfb87abb7a1e944cea4518" dependencies = [ - "num-traits 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", + "num-traits", ] -[[package]] -name = "peeking_take_while" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" - [[package]] name = "percent-encoding" version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d4fd5641d01c8f18a23da7b6fe29298ff4b55afcccdf78973b24cf3175fee32e" [[package]] name = "pkg-config" -version = "0.3.15" +version = "0.3.17" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "05da548ad6865900e60eaba7f589cc0783590a92e940c26953ff81ddbab2d677" [[package]] name = "ppv-lite86" -version = "0.2.5" +version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "74490b50b9fbe561ac330df47c08f3f33073d2d00c150f719147d7c54522fa1b" [[package]] name = "proc-macro2" -version = "0.4.30" +version = "1.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6c09721c6781493a2a492a96b5a5bf19b65917fe6728884e7c44dd0c60ca3435" dependencies = [ - "unicode-xid 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "proc-macro2" -version = "1.0.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "unicode-xid 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "quick-error" -version = "1.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "quote" -version = "0.6.13" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "proc-macro2 0.4.30 (registry+https://github.com/rust-lang/crates.io-index)", + "unicode-xid", ] [[package]] name = "quote" -version = "1.0.2" +version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2bdc6c187c65bca4260c9011c9e3132efe4909da44726bad24cf7572ae338d7f" dependencies = [ - "proc-macro2 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)", + "proc-macro2", ] [[package]] name = "rand" -version = "0.7.0" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6a6b1679d49b24bbfe0c803429aa1874472f50d9b363131f0e89fc356b544d03" dependencies = [ - "getrandom 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.62 (registry+https://github.com/rust-lang/crates.io-index)", - "rand_chacha 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", - "rand_core 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)", - "rand_hc 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", + "getrandom", + "libc", + "rand_chacha", + "rand_core", + "rand_hc", ] [[package]] name = "rand_chacha" -version = "0.2.1" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f4c8ed856279c9737206bf725bf36935d8666ead7aa69b52be55af369d193402" dependencies = [ - "c2-chacha 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", - "rand_core 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)", + "ppv-lite86", + "rand_core", ] [[package]] name = "rand_core" -version = "0.5.0" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "90bde5296fc891b0cef12a6d03ddccc162ce7b2aff54160af9338f8d40df6d19" dependencies = [ - "getrandom 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)", + "getrandom", ] [[package]] name = "rand_hc" version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ca3129af7b92a17112d59ad498c6f81eaf463253766b90396d39ea7a39d6613c" dependencies = [ - "rand_core 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)", + "rand_core", ] [[package]] name = "redox_syscall" version = "0.1.56" source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "regex" -version = "1.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "aho-corasick 0.7.6 (registry+https://github.com/rust-lang/crates.io-index)", - "memchr 2.2.1 (registry+https://github.com/rust-lang/crates.io-index)", - "regex-syntax 0.6.12 (registry+https://github.com/rust-lang/crates.io-index)", - "thread_local 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "regex-syntax" -version = "0.6.12" -source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2439c63f3f6139d1b57529d16bc3b8bb855230c8efcc5d3a896c8bea7c3b1e84" [[package]] name = "remove_dir_all" version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4a83fa3702a688b9359eccba92d153ac33fd2e8462f9e0e3fdf155239ea7792e" dependencies = [ - "winapi 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)", + "winapi", ] [[package]] name = "rkv" -version = "0.10.2" +version = "0.10.4" dependencies = [ - "arrayref 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)", - "bincode 1.1.4 (registry+https://github.com/rust-lang/crates.io-index)", - "bitflags 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", - "byteorder 1.3.2 (registry+https://github.com/rust-lang/crates.io-index)", - "failure 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)", - "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", - "lmdb-rkv 0.12.3 (registry+https://github.com/rust-lang/crates.io-index)", - "ordered-float 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", - "serde 1.0.100 (registry+https://github.com/rust-lang/crates.io-index)", - "serde_derive 1.0.100 (registry+https://github.com/rust-lang/crates.io-index)", - "tempfile 3.1.0 (registry+https://github.com/rust-lang/crates.io-index)", - "url 2.1.0 (registry+https://github.com/rust-lang/crates.io-index)", - "uuid 0.7.4 (registry+https://github.com/rust-lang/crates.io-index)", + "arrayref", + "bincode", + "bitflags", + "byteorder", + "failure", + "lazy_static", + "lmdb-rkv", + "ordered-float", + "serde", + "serde_derive", + "tempfile", + "url", + "uuid", ] [[package]] name = "rustc-demangle" -version = "0.1.15" +version = "0.1.16" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4c691c0e608126e00913e33f0ccf3727d5fc84573623b8d65b2df340b5201783" [[package]] name = "serde" -version = "1.0.100" +version = "1.0.104" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "414115f25f818d7dfccec8ee535d76949ae78584fc4f79a6f45a904bf8ab4449" [[package]] name = "serde_derive" -version = "1.0.100" +version = "1.0.104" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "128f9e303a5a29922045a830221b8f78ec74a5f544944f3d5984f8ec3895ef64" dependencies = [ - "proc-macro2 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)", - "quote 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", - "syn 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)", + "proc-macro2", + "quote", + "syn", ] -[[package]] -name = "shlex" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" - [[package]] name = "smallvec" -version = "0.6.10" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "strsim" -version = "0.8.0" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c2fb2ec9bcd216a5b0d0ccf31ab17b5ed1d627960edff65bbe95d3ce221cefc" [[package]] name = "syn" -version = "0.15.43" +version = "1.0.16" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "123bd9499cfb380418d509322d7a6d52e5315f064fe4b3ad18a53d6b92c07859" dependencies = [ - "proc-macro2 0.4.30 (registry+https://github.com/rust-lang/crates.io-index)", - "quote 0.6.13 (registry+https://github.com/rust-lang/crates.io-index)", - "unicode-xid 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "syn" -version = "1.0.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "proc-macro2 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)", - "quote 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", - "unicode-xid 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", + "proc-macro2", + "quote", + "unicode-xid", ] [[package]] name = "synstructure" -version = "0.10.2" +version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "67656ea1dc1b41b1451851562ea232ec2e5a80242139f7e679ceccfb5d61f545" dependencies = [ - "proc-macro2 0.4.30 (registry+https://github.com/rust-lang/crates.io-index)", - "quote 0.6.13 (registry+https://github.com/rust-lang/crates.io-index)", - "syn 0.15.43 (registry+https://github.com/rust-lang/crates.io-index)", - "unicode-xid 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "proc-macro2", + "quote", + "syn", + "unicode-xid", ] [[package]] name = "tempfile" version = "3.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7a6e24d9338a0a5be79593e2fa15a648add6138caa803e2d5bc782c371732ca9" dependencies = [ - "cfg-if 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.62 (registry+https://github.com/rust-lang/crates.io-index)", - "rand 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", - "redox_syscall 0.1.56 (registry+https://github.com/rust-lang/crates.io-index)", - "remove_dir_all 0.5.2 (registry+https://github.com/rust-lang/crates.io-index)", - "winapi 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "termcolor" -version = "1.0.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "wincolor 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "textwrap" -version = "0.11.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "unicode-width 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "thread_local" -version = "0.3.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", + "cfg-if", + "libc", + "rand", + "redox_syscall", + "remove_dir_all", + "winapi", ] [[package]] name = "unicode-bidi" version = "0.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "49f2bd0c6468a8230e1db229cff8029217cf623c767ea5d60bfbd42729ea54d5" dependencies = [ - "matches 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)", + "matches", ] [[package]] name = "unicode-normalization" -version = "0.1.8" +version = "0.1.12" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5479532badd04e128284890390c1e876ef7a993d0570b3597ae43dfa1d59afa4" dependencies = [ - "smallvec 0.6.10 (registry+https://github.com/rust-lang/crates.io-index)", + "smallvec", ] -[[package]] -name = "unicode-width" -version = "0.1.6" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "unicode-xid" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" - [[package]] name = "unicode-xid" version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "826e7639553986605ec5979c7dd957c7895e93eabed50ab2ffa7f6128a75097c" [[package]] name = "url" -version = "2.1.0" +version = "2.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "829d4a8476c35c9bf0bbce5a3b23f4106f79728039b726d292bb93bc106787cb" dependencies = [ - "idna 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", - "matches 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)", - "percent-encoding 2.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "idna", + "matches", + "percent-encoding", ] [[package]] name = "uuid" -version = "0.7.4" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "vec_map" version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9fde2f6a4bea1d6e007c4ad38c6839fa71cbb63b6dbf5b595aa38dc9b1093c11" [[package]] -name = "version_check" -version = "0.1.5" +name = "wasi" +version = "0.9.0+wasi-snapshot-preview1" source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "which" -version = "2.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "failure 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.62 (registry+https://github.com/rust-lang/crates.io-index)", -] +checksum = "cccddf32554fecc6acb585f82a32a72e28b48f8c4c1883ddfeeeaa96f7d8e519" [[package]] name = "winapi" -version = "0.3.7" +version = "0.3.8" source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8093091eeb260906a183e6ae1abdba2ef5ef2257a21801128899c3fc699229c6" dependencies = [ - "winapi-i686-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", - "winapi-x86_64-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", + "winapi-i686-pc-windows-gnu", + "winapi-x86_64-pc-windows-gnu", ] [[package]] name = "winapi-i686-pc-windows-gnu" version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "winapi-util" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "winapi 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)", -] +checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" [[package]] name = "winapi-x86_64-pc-windows-gnu" version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "wincolor" -version = "1.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "winapi 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)", - "winapi-util 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[metadata] -"checksum aho-corasick 0.7.6 (registry+https://github.com/rust-lang/crates.io-index)" = "58fb5e95d83b38284460a5fda7d6470aa0b8844d283a0b614b8535e880800d2d" -"checksum ansi_term 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ee49baf6cb617b853aa8d93bf420db2383fab46d314482ca2803b40d5fde979b" -"checksum arrayref 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)" = "0d382e583f07208808f6b1249e60848879ba3543f57c32277bf52d69c2f0f0ee" -"checksum atty 0.2.13 (registry+https://github.com/rust-lang/crates.io-index)" = "1803c647a3ec87095e7ae7acfca019e98de5ec9a7d01343f611cf3152ed71a90" -"checksum autocfg 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)" = "22130e92352b948e7e82a49cdb0aa94f2211761117f29e052dd397c1ac33542b" -"checksum backtrace 0.3.34 (registry+https://github.com/rust-lang/crates.io-index)" = "b5164d292487f037ece34ec0de2fcede2faa162f085dd96d2385ab81b12765ba" -"checksum backtrace-sys 0.1.31 (registry+https://github.com/rust-lang/crates.io-index)" = "82a830b4ef2d1124a711c71d263c5abdc710ef8e907bd508c88be475cebc422b" -"checksum bincode 1.1.4 (registry+https://github.com/rust-lang/crates.io-index)" = "9f04a5e50dc80b3d5d35320889053637d15011aed5e66b66b37ae798c65da6f7" -"checksum bindgen 0.51.0 (registry+https://github.com/rust-lang/crates.io-index)" = "18270cdd7065ec045a6bb4bdcd5144d14a78b3aedb3bc5111e688773ac8b9ad0" -"checksum bitflags 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "3d155346769a6855b86399e9bc3814ab343cd3d62c7e985113d46a0ec3c281fd" -"checksum byteorder 1.3.2 (registry+https://github.com/rust-lang/crates.io-index)" = "a7c3dd8985a7111efc5c80b44e23ecdd8c007de8ade3b96595387e812b957cf5" -"checksum c2-chacha 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "7d64d04786e0f528460fc884753cf8dddcc466be308f6026f8e355c41a0e4101" -"checksum cc 1.0.45 (registry+https://github.com/rust-lang/crates.io-index)" = "4fc9a35e1f4290eb9e5fc54ba6cf40671ed2a2514c3eeb2b2a908dda2ea5a1be" -"checksum cexpr 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)" = "a7fa24eb00d5ffab90eaeaf1092ac85c04c64aaf358ea6f84505b8116d24c6af" -"checksum cfg-if 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)" = "b486ce3ccf7ffd79fdeb678eac06a9e6c09fc88d33836340becb8fffe87c5e33" -"checksum clang-sys 0.28.1 (registry+https://github.com/rust-lang/crates.io-index)" = "81de550971c976f176130da4b2978d3b524eaa0fd9ac31f3ceb5ae1231fb4853" -"checksum clap 2.33.0 (registry+https://github.com/rust-lang/crates.io-index)" = "5067f5bb2d80ef5d68b4c87db81601f0b75bca627bc2ef76b141d7b846a3c6d9" -"checksum env_logger 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)" = "aafcde04e90a5226a6443b7aabdb016ba2f8307c847d524724bd9b346dd1a2d3" -"checksum failure 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)" = "795bd83d3abeb9220f257e597aa0080a508b27533824adf336529648f6abf7e2" -"checksum failure_derive 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)" = "ea1063915fd7ef4309e222a5a07cf9c319fb9c7836b1f89b85458672dbb127e1" -"checksum fxhash 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "c31b6d751ae2c7f11320402d34e41349dd1016f8d5d45e48c4312bc8625af50c" -"checksum getrandom 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)" = "34f33de6f0ae7c9cb5e574502a562e2b512799e32abb801cd1e79ad952b62b49" -"checksum glob 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "9b919933a397b79c37e33b77bb2aa3dc8eb6e165ad809e58ff75bc7db2e34574" -"checksum humantime 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "df004cfca50ef23c36850aaaa59ad52cc70d0e90243c3c7737a4dd32dc7a3c4f" -"checksum idna 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "02e2673c30ee86b5b96a9cb52ad15718aa1f966f5ab9ad54a8b95d5ca33120a9" -"checksum lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" -"checksum libc 0.2.62 (registry+https://github.com/rust-lang/crates.io-index)" = "34fcd2c08d2f832f376f4173a231990fa5aef4e99fb569867318a227ef4c06ba" -"checksum libloading 0.5.2 (registry+https://github.com/rust-lang/crates.io-index)" = "f2b111a074963af1d37a139918ac6d49ad1d0d5e47f72fd55388619691a7d753" -"checksum lmdb-rkv 0.12.3 (registry+https://github.com/rust-lang/crates.io-index)" = "605061e5465304475be2041f19967a900175ea1b6d8f47fbab84a84fb8c48452" -"checksum lmdb-rkv-sys 0.9.3 (registry+https://github.com/rust-lang/crates.io-index)" = "cad9a69dc385f7d2b77786bc41f3dd80f02fba6edc547e93af637f58d440ec8d" -"checksum log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)" = "14b6052be84e6b71ab17edffc2eeabf5c2c3ae1fdb464aae35ac50c67a44e1f7" -"checksum matches 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)" = "7ffc5c5338469d4d3ea17d269fa8ea3512ad247247c30bd2df69e68309ed0a08" -"checksum memchr 2.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "88579771288728879b57485cc7d6b07d648c9f0141eb955f8ab7f9d45394468e" -"checksum nom 4.2.3 (registry+https://github.com/rust-lang/crates.io-index)" = "2ad2a91a8e869eeb30b9cb3119ae87773a8f4ae617f41b1eb9c154b2905f7bd6" -"checksum num-traits 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)" = "6ba9a427cfca2be13aa6f6403b0b7e7368fe982bfa16fccc450ce74c46cd9b32" -"checksum ordered-float 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)" = "18869315e81473c951eb56ad5558bbc56978562d3ecfb87abb7a1e944cea4518" -"checksum peeking_take_while 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "19b17cddbe7ec3f8bc800887bab5e717348c95ea2ca0b1bf0837fb964dc67099" -"checksum percent-encoding 2.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "d4fd5641d01c8f18a23da7b6fe29298ff4b55afcccdf78973b24cf3175fee32e" -"checksum pkg-config 0.3.15 (registry+https://github.com/rust-lang/crates.io-index)" = "a7c1d2cfa5a714db3b5f24f0915e74fcdf91d09d496ba61329705dda7774d2af" -"checksum ppv-lite86 0.2.5 (registry+https://github.com/rust-lang/crates.io-index)" = "e3cbf9f658cdb5000fcf6f362b8ea2ba154b9f146a61c7a20d647034c6b6561b" -"checksum proc-macro2 0.4.30 (registry+https://github.com/rust-lang/crates.io-index)" = "cf3d2011ab5c909338f7887f4fc896d35932e29146c12c8d01da6b22a80ba759" -"checksum proc-macro2 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)" = "e98a83a9f9b331f54b924e68a66acb1bb35cb01fb0a23645139967abefb697e8" -"checksum quick-error 1.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "9274b940887ce9addde99c4eee6b5c44cc494b182b97e73dc8ffdcb3397fd3f0" -"checksum quote 0.6.13 (registry+https://github.com/rust-lang/crates.io-index)" = "6ce23b6b870e8f94f81fb0a363d65d86675884b34a09043c81e5562f11c1f8e1" -"checksum quote 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)" = "053a8c8bcc71fcce321828dc897a98ab9760bef03a4fc36693c231e5b3216cfe" -"checksum rand 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "d47eab0e83d9693d40f825f86948aa16eff6750ead4bdffc4ab95b8b3a7f052c" -"checksum rand_chacha 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "03a2a90da8c7523f554344f921aa97283eadf6ac484a6d2a7d0212fa7f8d6853" -"checksum rand_core 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)" = "615e683324e75af5d43d8f7a39ffe3ee4a9dc42c5c701167a71dc59c3a493aca" -"checksum rand_hc 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ca3129af7b92a17112d59ad498c6f81eaf463253766b90396d39ea7a39d6613c" -"checksum redox_syscall 0.1.56 (registry+https://github.com/rust-lang/crates.io-index)" = "2439c63f3f6139d1b57529d16bc3b8bb855230c8efcc5d3a896c8bea7c3b1e84" -"checksum regex 1.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "dc220bd33bdce8f093101afe22a037b8eb0e5af33592e6a9caafff0d4cb81cbd" -"checksum regex-syntax 0.6.12 (registry+https://github.com/rust-lang/crates.io-index)" = "11a7e20d1cce64ef2fed88b66d347f88bd9babb82845b2b858f3edbf59a4f716" -"checksum remove_dir_all 0.5.2 (registry+https://github.com/rust-lang/crates.io-index)" = "4a83fa3702a688b9359eccba92d153ac33fd2e8462f9e0e3fdf155239ea7792e" -"checksum rustc-demangle 0.1.15 (registry+https://github.com/rust-lang/crates.io-index)" = "a7f4dccf6f4891ebcc0c39f9b6eb1a83b9bf5d747cb439ec6fba4f3b977038af" -"checksum serde 1.0.100 (registry+https://github.com/rust-lang/crates.io-index)" = "f4473e8506b213730ff2061073b48fa51dcc66349219e2e7c5608f0296a1d95a" -"checksum serde_derive 1.0.100 (registry+https://github.com/rust-lang/crates.io-index)" = "11e410fde43e157d789fc290d26bc940778ad0fdd47836426fbac36573710dbb" -"checksum shlex 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "7fdf1b9db47230893d76faad238fd6097fd6d6a9245cd7a4d90dbd639536bbd2" -"checksum smallvec 0.6.10 (registry+https://github.com/rust-lang/crates.io-index)" = "ab606a9c5e214920bb66c458cd7be8ef094f813f20fe77a54cc7dbfff220d4b7" -"checksum strsim 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)" = "8ea5119cdb4c55b55d432abb513a0429384878c15dde60cc77b1c99de1a95a6a" -"checksum syn 0.15.43 (registry+https://github.com/rust-lang/crates.io-index)" = "ee06ea4b620ab59a2267c6b48be16244a3389f8bfa0986bdd15c35b890b00af3" -"checksum syn 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)" = "66850e97125af79138385e9b88339cbcd037e3f28ceab8c5ad98e64f0f1f80bf" -"checksum synstructure 0.10.2 (registry+https://github.com/rust-lang/crates.io-index)" = "02353edf96d6e4dc81aea2d8490a7e9db177bf8acb0e951c24940bf866cb313f" -"checksum tempfile 3.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "7a6e24d9338a0a5be79593e2fa15a648add6138caa803e2d5bc782c371732ca9" -"checksum termcolor 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)" = "96d6098003bde162e4277c70665bd87c326f5a0c3f3fbfb285787fa482d54e6e" -"checksum textwrap 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)" = "d326610f408c7a4eb6f51c37c330e496b08506c9457c9d34287ecc38809fb060" -"checksum thread_local 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)" = "c6b53e329000edc2b34dbe8545fd20e55a333362d0a321909685a19bd28c3f1b" -"checksum unicode-bidi 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)" = "49f2bd0c6468a8230e1db229cff8029217cf623c767ea5d60bfbd42729ea54d5" -"checksum unicode-normalization 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)" = "141339a08b982d942be2ca06ff8b076563cbe223d1befd5450716790d44e2426" -"checksum unicode-width 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)" = "7007dbd421b92cc6e28410fe7362e2e0a2503394908f417b68ec8d1c364c4e20" -"checksum unicode-xid 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "fc72304796d0818e357ead4e000d19c9c174ab23dc11093ac919054d20a6a7fc" -"checksum unicode-xid 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "826e7639553986605ec5979c7dd957c7895e93eabed50ab2ffa7f6128a75097c" -"checksum url 2.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "75b414f6c464c879d7f9babf951f23bc3743fb7313c081b2e6ca719067ea9d61" -"checksum uuid 0.7.4 (registry+https://github.com/rust-lang/crates.io-index)" = "90dbc611eb48397705a6b0f6e917da23ae517e4d127123d2cf7674206627d32a" -"checksum vec_map 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)" = "05c78687fb1a80548ae3250346c3db86a80a7cdd77bda190189f2d0a0987c81a" -"checksum version_check 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)" = "914b1a6776c4c929a602fafd8bc742e06365d4bcbe48c30f9cca5824f70dc9dd" -"checksum which 2.0.1 (registry+https://github.com/rust-lang/crates.io-index)" = "b57acb10231b9493c8472b20cb57317d0679a49e0bdbee44b3b803a6473af164" -"checksum winapi 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)" = "f10e386af2b13e47c89e7236a7a14a086791a2b88ebad6df9bf42040195cf770" -"checksum winapi-i686-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" -"checksum winapi-util 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "7168bab6e1daee33b4557efd0e95d5ca70a03706d39fa5f3fe7a236f584b03c9" -"checksum winapi-x86_64-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" -"checksum wincolor 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)" = "96f5016b18804d24db43cebf3c77269e7569b8954a8464501c216cc5e070eaa9" +checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" diff --git a/third_party/rust/rkv/Cargo.toml b/third_party/rust/rkv/Cargo.toml index 2893f3ef3c..4462e94cfc 100644 --- a/third_party/rust/rkv/Cargo.toml +++ b/third_party/rust/rkv/Cargo.toml @@ -13,7 +13,7 @@ [package] edition = "2018" name = "rkv" -version = "0.10.2" +version = "0.10.4" authors = ["Richard Newman ", "Nan Jiang ", "Myk Melez ", "Victor Porof "] exclude = ["/tests/envs/*"] description = "a simple, humane, typed Rust interface to LMDB" @@ -45,7 +45,7 @@ default_features = false version = "1.0" [dependencies.lmdb-rkv] -version = "0.12.3" +version = "0.14" [dependencies.ordered-float] version = "1.0" @@ -60,7 +60,7 @@ version = "1.0" version = "2.0" [dependencies.uuid] -version = "0.7" +version = "0.8" [dev-dependencies.byteorder] version = "1" diff --git a/third_party/rust/rkv/README.md b/third_party/rust/rkv/README.md index fdbf174db0..7ffb199d83 100644 --- a/third_party/rust/rkv/README.md +++ b/third_party/rust/rkv/README.md @@ -1,15 +1,43 @@ +# rkv + [![Travis CI Build Status](https://travis-ci.org/mozilla/rkv.svg?branch=master)](https://travis-ci.org/mozilla/rkv) [![Appveyor Build Status](https://ci.appveyor.com/api/projects/status/lk936u5y5bi6qafb/branch/master?svg=true)](https://ci.appveyor.com/project/mykmelez/rkv/branch/master) [![Documentation](https://docs.rs/rkv/badge.svg)](https://docs.rs/rkv/) [![Crate](https://img.shields.io/crates/v/rkv.svg)](https://crates.io/crates/rkv) -# rkv +The [rkv Rust crate](https://crates.io/crates/rkv) is a simple, humane, typed key-value storage solution. It supports multiple backend engines with varying guarantees, such as [LMDB](http://www.lmdb.tech/doc/) for performance, or "SafeMode" for reliability. + +This master branch only supports the LMDB backend. We're looking into supporting multiple backends, starting with "SafeMode" in the [feature branch](https://github.com/mozilla/rkv/tree/safe-mode). + +## ⚠️ Warning ⚠️ + +The LMDB backend is currently unstable and crash-prone. We're attempting to fix these crashes in bugs [1538539](https://bugzilla.mozilla.org/show_bug.cgi?id=1538539), [1538541](https://bugzilla.mozilla.org/show_bug.cgi?id=1538541) and [1550174](https://bugzilla.mozilla.org/show_bug.cgi?id=1550174). + +To use rkv in production/release environments at Mozilla, you may do so with the "SafeMode" backend, for example: + +```toml +rkv = { git = "https://github.com/mozilla/rkv", branch="safe-mode", default-features = false } +``` + +```rust +use rkv::{Manager, Rkv}; +use rkv::backend::{SafeMode, SafeModeEnvironment}; -The [rkv Rust crate](https://crates.io/crates/rkv) is a simple, humane, typed Rust interface to [LMDB](http://www.lmdb.tech/doc/). +let mut manager = Manager::::singleton().write().unwrap(); +let shared_rkv = manager.get_or_create(path, Rkv::new::).unwrap(); + +... +``` + +Instead of a branch, we suggest using a specific `rev` instead. For example, `4a1cc23906865626fa715fd99d98620169d3fd7b` is the latest stable version for "safe-mode". + +The "SafeMode` backend performs well, with two caveats: the entire database is stored in memory, and write transactions are synchronously written to disk on commit. + +In the future, it will be advisable to switch to a different backend with better performance guarantees. We're working on either fixing the LMDB crashes, or offering more choices of backend engines (e.g. SQLite). ## Use -Comprehensive information about using rkv is available in its [online documentation](https://docs.rs/rkv/), which you can also generate for local consumption: +Comprehensive information about using rkv is available in its [online documentation](https://docs.rs/rkv/), which can also be generated for local consumption: ```sh cargo doc --open @@ -23,9 +51,17 @@ Build this project as you would build other Rust crates: cargo build ``` -If you specify the `backtrace` feature, backtraces will be enabled in `failure` +### Features + +There are several features that you can opt-in and out of when using rkv: + +By default, `db-dup-sort` and `db-int-key` features offer high level database APIs which allow multiple values per key, and optimizations around integer-based keys respectively. Opt out of these default features when specifying the rkv dependency in your Cargo.toml file to disable them; doing so avoids a certain amount of overhead required to support them. + +If you specify the `backtrace` feature, backtraces will be enabled in "failure" errors. This feature is disabled by default. +To aid fuzzing efforts, `with-asan`, `with-fuzzer`, and `with-fuzzer-no-link` configure the build scripts responsible with compiling the underlying backing engines (e.g. LMDB) to build with these LLMV features enabled. Please refer to the official LLVM/Clang documentation on them for more informatiuon. These features are also disabled by default. + ## Test Test this project as you would test other Rust crates: diff --git a/third_party/rust/scroll/.cargo-checksum.json b/third_party/rust/scroll/.cargo-checksum.json index 2bf16d3669..fbee25fa4d 100644 --- a/third_party/rust/scroll/.cargo-checksum.json +++ b/third_party/rust/scroll/.cargo-checksum.json @@ -1 +1 @@ -{"files":{"CHANGELOG.md":"10185d875d75e2292ca6a85466f7172785bb26c0a96d9df1a015e8ca8bff30ee","Cargo.toml":"0929aae92ea868067cb43c1460b0a0358d2a9db85faf0e1a0a134e3d24407856","LICENSE":"6e24b7455f0b9afefdf4f3efd59a56ce76a3020c2dc4371937e281fc5e587fd7","README.md":"3bb2aa978e7a60e3d69c974998525d5b580660c90cc564cf432d1aa56849f3c9","benches/bench.rs":"8708dc2a1496298fac48fbbfdb34e05509486a28cbcf4f4370b45cc258b5367b","build.rs":"3d2434ba396b597e666da98765714bd5f1d04230ffc5ef806fa2dec94e0ff7e0","examples/data_ctx.rs":"f252e7ba5390f40fa99282c84112408d757124b8dc6557f06c3a90f6800be859","src/ctx.rs":"d287dc0ec441748761cb4d2aded119067bdd2ac219a03dd78f23492dcca3e178","src/endian.rs":"3e6666d89974342224f346d707f224372ad33c435f2b0eaad4b32ae230e06216","src/error.rs":"370112448083d38c09f317b9a2af998774c92cdde285a6f11d90a24b9a618873","src/greater.rs":"6b8296458041f9fb06eda4be5587036aa9083cc66d8a2bedc712c92b3cb6abba","src/leb128.rs":"d8aa9497bae2a6090bd6255f53e29b8bb335c14352c531415049f6be478601a1","src/lesser.rs":"9c6d187cda4e9f90ffa6df5ccfd14caf72a6601d59883f5eacb38dcbef7f1c60","src/lib.rs":"1fc5200a4b3a6fa8856971052e4e8f10bdc4a90cd8462123fcc742f66f95c726","src/pread.rs":"1de28a2bb1114b1d0da0227dc95e9d74ced081fde86a631a7e25918a73503aca","src/pwrite.rs":"7fb6a7f1ead05596809165ed98034597a8824a5627458cff9ee9f9653ebfad74","tests/api.rs":"478b03a51b22a9ab1711a422cc749dacbc115b347bb78280432ea66d28423f24","tests/readme.rs":"deaa156ae8230ec7beeb2e4c0d4e0f05a97da98397eb4e4e6e994b65924fd845"},"package":"2f84d114ef17fd144153d608fba7c446b0145d038985e7a8cc5d08bb0ce20383"} \ No newline at end of file +{"files":{"CHANGELOG.md":"de2bbf4669561405d402322f4cc2604218d4986b73b75b41708b9505aebcb02c","Cargo.lock":"7626003b0c93df97f6ab7a9d670d36515ad81276d3c59bd4c085eb089b209bb2","Cargo.toml":"50c099c055c73804a794f0b511a64dba10384eeb2c3e9da9fcd6162a1de97849","LICENSE":"6e24b7455f0b9afefdf4f3efd59a56ce76a3020c2dc4371937e281fc5e587fd7","README.md":"390a6e2459d78fb1db45c6f8ff7f74a391327938305fc09e7756fdc6bd4d5d5b","benches/bench.rs":"9ccbec001bf80b5c4ade12b041193d30406a1bd602fb895f31018001ede87c83","examples/data_ctx.rs":"0f33e092623fd4ef08f63c7f0d75af4fe0274dc7789b9840f2c138098fb08ede","src/ctx.rs":"8a7f5c0144bf3b5db3caaf8fbc4f682ec7b453b04de8d3146f43e155f6a25b85","src/endian.rs":"b552f4de3b5daf507810098eeb07821132821d9f8c6449ffee4f73366afa6387","src/error.rs":"6c5a913a60d5f8e5042622e5c41835a08d18ff3745f6f651c9e74d45cf10ee5b","src/greater.rs":"57afbcb5a2a15ace8434ed660fbede2e2a59ef800d1e866afccb088b5b0dbd37","src/leb128.rs":"405f6f2629c77524fd61a1fb11724ba234445cabdc38bd2c60b06300565fdd5b","src/lesser.rs":"13c17bea288a0ff31d9bd4e34aec4be4d51dfb33c510f2c3ff834975089d60a4","src/lib.rs":"9bf90c9c23cae1b14284c08382d603fedd3ef37264d551601096cc2437ae8335","src/pread.rs":"a35dbde17c382cebb6c6ef18578f766b0041c71b7cadec8392f7a6f208dfb0fa","src/pwrite.rs":"7cc67a72081305c2beadbf7f89935950e22a6eee1d4ed9f12223af65a703bc10","tests/api.rs":"938771c7f1605ff038b993687c0717fcfce4f22912aa2fcf8767f140dcf4bada","tests/readme.rs":"2ceb1fade1cad83f76dcc0568747c07995fcfe85cbe98c40f2b9e3ce45dafe61"},"package":"abb2332cb595d33f7edd5700f4cbf94892e680c7f0ae56adab58a35190b66cb1"} \ No newline at end of file diff --git a/third_party/rust/scroll/CHANGELOG.md b/third_party/rust/scroll/CHANGELOG.md index a58772ad2b..bae87ee590 100644 --- a/third_party/rust/scroll/CHANGELOG.md +++ b/third_party/rust/scroll/CHANGELOG.md @@ -3,6 +3,13 @@ All notable changes to this project will be documented in this file. Before 1.0, this project does not adhere to [Semantic Versioning](http://semver.org/spec/v2.0.0.html). +## [0.10.0] - unreleased +### Added + - scroll is now 2018 compliant, thanks @lzutao: https://github.com/m4b/scroll/pull/49 + - scroll_derive now lives in scroll repo itself +### Removed + - BREAKING: removed units/size generics in SizeWith, thanks @willglynn: https://github.com/m4b/scroll/pull/45 + ## [0.9.1] - 2018-9-22 ### Added - pread primitive references: https://github.com/m4b/scroll/pull/35 diff --git a/third_party/rust/scroll/Cargo.lock b/third_party/rust/scroll/Cargo.lock new file mode 100644 index 0000000000..afed93ea6e --- /dev/null +++ b/third_party/rust/scroll/Cargo.lock @@ -0,0 +1,228 @@ +# This file is automatically @generated by Cargo. +# It is not intended for manual editing. +[[package]] +name = "arrayvec" +version = "0.4.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "nodrop 0.1.14 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "byteorder" +version = "1.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "cfg-if" +version = "0.1.10" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "crossbeam-deque" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "crossbeam-epoch 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)", + "crossbeam-utils 0.6.6 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "crossbeam-epoch" +version = "0.7.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "arrayvec 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)", + "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", + "crossbeam-utils 0.6.6 (registry+https://github.com/rust-lang/crates.io-index)", + "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", + "memoffset 0.5.2 (registry+https://github.com/rust-lang/crates.io-index)", + "scopeguard 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "crossbeam-queue" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "crossbeam-utils 0.6.6 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "crossbeam-utils" +version = "0.6.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", + "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "either" +version = "1.5.3" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "hermit-abi" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "libc 0.2.65 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "lazy_static" +version = "1.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "libc" +version = "0.2.65" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "memoffset" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "rustc_version 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "nodrop" +version = "0.1.14" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "num_cpus" +version = "1.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "hermit-abi 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.65 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "proc-macro2" +version = "1.0.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "unicode-xid 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "quote" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "proc-macro2 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "rayon" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "crossbeam-deque 0.7.1 (registry+https://github.com/rust-lang/crates.io-index)", + "either 1.5.3 (registry+https://github.com/rust-lang/crates.io-index)", + "rayon-core 1.6.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "rayon-core" +version = "1.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "crossbeam-deque 0.7.1 (registry+https://github.com/rust-lang/crates.io-index)", + "crossbeam-queue 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", + "crossbeam-utils 0.6.6 (registry+https://github.com/rust-lang/crates.io-index)", + "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", + "num_cpus 1.11.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "rustc_version" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "semver 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "scopeguard" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "scroll" +version = "0.10.1" +dependencies = [ + "byteorder 1.3.2 (registry+https://github.com/rust-lang/crates.io-index)", + "rayon 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)", + "scroll_derive 0.10.1 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "scroll_derive" +version = "0.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "proc-macro2 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)", + "quote 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", + "syn 1.0.7 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "semver" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "semver-parser 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "semver-parser" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "syn" +version = "1.0.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "proc-macro2 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)", + "quote 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", + "unicode-xid 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "unicode-xid" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[metadata] +"checksum arrayvec 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)" = "cd9fd44efafa8690358b7408d253adf110036b88f55672a933f01d616ad9b1b9" +"checksum byteorder 1.3.2 (registry+https://github.com/rust-lang/crates.io-index)" = "a7c3dd8985a7111efc5c80b44e23ecdd8c007de8ade3b96595387e812b957cf5" +"checksum cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)" = "4785bdd1c96b2a846b2bd7cc02e86b6b3dbf14e7e53446c4f54c92a361040822" +"checksum crossbeam-deque 0.7.1 (registry+https://github.com/rust-lang/crates.io-index)" = "b18cd2e169ad86297e6bc0ad9aa679aee9daa4f19e8163860faf7c164e4f5a71" +"checksum crossbeam-epoch 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)" = "fedcd6772e37f3da2a9af9bf12ebe046c0dfe657992377b4df982a2b54cd37a9" +"checksum crossbeam-queue 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "7c979cd6cfe72335896575c6b5688da489e420d36a27a0b9eb0c73db574b4a4b" +"checksum crossbeam-utils 0.6.6 (registry+https://github.com/rust-lang/crates.io-index)" = "04973fa96e96579258a5091af6003abde64af786b860f18622b82e026cca60e6" +"checksum either 1.5.3 (registry+https://github.com/rust-lang/crates.io-index)" = "bb1f6b1ce1c140482ea30ddd3335fc0024ac7ee112895426e0a629a6c20adfe3" +"checksum hermit-abi 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "307c3c9f937f38e3534b1d6447ecf090cafcc9744e4a6360e8b037b2cf5af120" +"checksum lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" +"checksum libc 0.2.65 (registry+https://github.com/rust-lang/crates.io-index)" = "1a31a0627fdf1f6a39ec0dd577e101440b7db22672c0901fe00a9a6fbb5c24e8" +"checksum memoffset 0.5.2 (registry+https://github.com/rust-lang/crates.io-index)" = "4a85c1a8c329f11437034d7313dca647c79096523533a1c79e86f1d0f657c7cc" +"checksum nodrop 0.1.14 (registry+https://github.com/rust-lang/crates.io-index)" = "72ef4a56884ca558e5ddb05a1d1e7e1bfd9a68d9ed024c21704cc98872dae1bb" +"checksum num_cpus 1.11.0 (registry+https://github.com/rust-lang/crates.io-index)" = "155394f924cdddf08149da25bfb932d226b4a593ca7468b08191ff6335941af5" +"checksum proc-macro2 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)" = "9c9e470a8dc4aeae2dee2f335e8f533e2d4b347e1434e5671afc49b054592f27" +"checksum quote 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)" = "053a8c8bcc71fcce321828dc897a98ab9760bef03a4fc36693c231e5b3216cfe" +"checksum rayon 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "83a27732a533a1be0a0035a111fe76db89ad312f6f0347004c220c57f209a123" +"checksum rayon-core 1.6.0 (registry+https://github.com/rust-lang/crates.io-index)" = "98dcf634205083b17d0861252431eb2acbfb698ab7478a2d20de07954f47ec7b" +"checksum rustc_version 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)" = "138e3e0acb6c9fb258b19b67cb8abd63c00679d2851805ea151465464fe9030a" +"checksum scopeguard 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "b42e15e59b18a828bbf5c58ea01debb36b9b096346de35d941dcb89009f24a0d" +"checksum scroll_derive 0.10.1 (registry+https://github.com/rust-lang/crates.io-index)" = "f8584eea9b9ff42825b46faf46a8c24d2cff13ec152fa2a50df788b87c07ee28" +"checksum semver 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)" = "1d7eb9ef2c18661902cc47e535f9bc51b78acd254da71d375c2f6720d9a40403" +"checksum semver-parser 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3" +"checksum syn 1.0.7 (registry+https://github.com/rust-lang/crates.io-index)" = "0e7bedb3320d0f3035594b0b723c8a28d7d336a3eda3881db79e61d676fb644c" +"checksum unicode-xid 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "826e7639553986605ec5979c7dd957c7895e93eabed50ab2ffa7f6128a75097c" diff --git a/third_party/rust/scroll/Cargo.toml b/third_party/rust/scroll/Cargo.toml index 8a27b1d070..ea4d322351 100644 --- a/third_party/rust/scroll/Cargo.toml +++ b/third_party/rust/scroll/Cargo.toml @@ -3,7 +3,7 @@ # When uploading crates to the registry Cargo will automatically # "normalize" Cargo.toml files for maximal compatibility # with all versions of Cargo and also rewrite `path` dependencies -# to registry (e.g. crates.io) dependencies +# to registry (e.g., crates.io) dependencies # # If you believe there's an error in this file please file an # issue against the rust-lang/cargo repository. If you're @@ -11,8 +11,9 @@ # will likely look very different (and much more reasonable) [package] +edition = "2018" name = "scroll" -version = "0.9.2" +version = "0.10.1" authors = ["m4b ", "Ted Mielczarek "] description = "A suite of powerful, extensible, generic, endian-aware Read/Write traits for byte buffers" documentation = "https://docs.rs/scroll" @@ -21,15 +22,13 @@ keywords = ["bytes", "endian", "immutable", "pread", "pwrite"] license = "MIT" repository = "https://github.com/m4b/scroll" [dependencies.scroll_derive] -version = "0.9" +version = "0.10" optional = true [dev-dependencies.byteorder] -version = "1.0.0" +version = "1" [dev-dependencies.rayon] -version = "1.0.0" -[build-dependencies.rustc_version] -version = "0.2" +version = "1" [features] default = ["std"] diff --git a/third_party/rust/scroll/README.md b/third_party/rust/scroll/README.md index 19c5460f76..085a737b64 100644 --- a/third_party/rust/scroll/README.md +++ b/third_party/rust/scroll/README.md @@ -21,9 +21,9 @@ https://docs.rs/scroll Add to your `Cargo.toml` -```toml +```toml, no_test [dependencies] -scroll = "0.9" +scroll = "0.10" ``` ### Overview @@ -35,8 +35,6 @@ Because self is immutable, _**all** reads can be performed in parallel_ and henc A simple example demonstrates its flexibility: ```rust -extern crate scroll; - use scroll::{ctx, Pread, LE}; fn parse() -> Result<(), scroll::Error> { @@ -81,11 +79,11 @@ fn main() { ### Deriving `Pread` and `Pwrite` -Scroll implements a custom derive that can provide `Pread` and `Pwrite` implementations for your types. +Scroll implements a custom derive that can provide `Pread` and `Pwrite` implementations for your structs. -``` rust +```no_test #[macro_use] -extern crate scroll; +extern crate scroll_derive; use scroll::{Pread, Pwrite, BE}; @@ -118,9 +116,9 @@ fn main() { This feature is **not** enabled by default, you must enable the `derive` feature in Cargo.toml to use it: -```toml +```toml, no_test [dependencies] -scroll = { version = "0.9", features = ["derive"] } +scroll = { version = "0.10", features = ["derive"] } ``` # `std::io` API @@ -128,8 +126,6 @@ scroll = { version = "0.9", features = ["derive"] } Scroll can also read/write simple types from a `std::io::Read` or `std::io::Write` implementor. The built-in numeric types are taken care of for you. If you want to read a custom type, you need to implement the `FromCtx` (_how_ to parse) and `SizeWith` (_how_ big the parsed thing will be) traits. You must compile with default features. For example: ```rust -extern crate scroll; - use std::io::Cursor; use scroll::IOread; @@ -152,8 +148,6 @@ fn main() { Similarly, we can write to anything that implements `std::io::Write` quite naturally: ```rust -extern crate scroll; - use scroll::{IOwrite, LE, BE}; use std::io::{Write, Cursor}; @@ -182,8 +176,6 @@ In particular, if we do this for the `[u8]` target, using the convention `(usize calling `pread_with::` on arrays of bytes. ```rust -extern crate scroll; - use scroll::{ctx, Pread, BE, Endian}; struct Data<'a> { @@ -194,10 +186,9 @@ struct Data<'a> { // note the lifetime specified here impl<'a> ctx::TryFromCtx<'a, Endian> for Data<'a> { type Error = scroll::Error; - type Size = usize; // and the lifetime annotation on `&'a [u8]` here fn try_from_ctx (src: &'a [u8], endian: Endian) - -> Result<(Self, Self::Size), Self::Error> { + -> Result<(Self, usize), Self::Error> { let offset = &mut 0; let name = src.gread::<&str>(offset)?; let id = src.gread_with(offset, endian)?; diff --git a/third_party/rust/scroll/benches/bench.rs b/third_party/rust/scroll/benches/bench.rs index f3afc0ec24..53f415afba 100644 --- a/third_party/rust/scroll/benches/bench.rs +++ b/third_party/rust/scroll/benches/bench.rs @@ -1,11 +1,7 @@ #![feature(test)] extern crate test; -extern crate byteorder; -extern crate scroll; -extern crate rayon; -//extern crate byteio; -use scroll::{Cread, Cwrite, Pread, Pwrite, IOread, IOwrite, LE}; +use scroll::{Cread, Pread, LE}; use test::black_box; #[bench] diff --git a/third_party/rust/scroll/build.rs b/third_party/rust/scroll/build.rs deleted file mode 100644 index cb3ae20d2c..0000000000 --- a/third_party/rust/scroll/build.rs +++ /dev/null @@ -1,8 +0,0 @@ -extern crate rustc_version; -use rustc_version::{version, Version}; - -fn main() { - if version().unwrap() >= Version::parse("1.26.0").unwrap() { - println!("cargo:rustc-cfg=rust_1_26"); - } -} diff --git a/third_party/rust/scroll/examples/data_ctx.rs b/third_party/rust/scroll/examples/data_ctx.rs index 1771dfc8f2..a8e5652666 100644 --- a/third_party/rust/scroll/examples/data_ctx.rs +++ b/third_party/rust/scroll/examples/data_ctx.rs @@ -1,5 +1,3 @@ -extern crate scroll; - use scroll::{ctx, Endian, Pread, BE}; #[derive(Debug)] @@ -10,9 +8,8 @@ struct Data<'a> { impl<'a> ctx::TryFromCtx<'a, Endian> for Data<'a> { type Error = scroll::Error; - type Size = usize; fn try_from_ctx (src: &'a [u8], endian: Endian) - -> Result<(Self, Self::Size), Self::Error> { + -> Result<(Self, usize), Self::Error> { let name = src.pread::<&'a str>(0)?; let id = src.pread_with(name.len()+1, endian)?; Ok((Data { name: name, id: id }, name.len()+4)) diff --git a/third_party/rust/scroll/src/ctx.rs b/third_party/rust/scroll/src/ctx.rs index 9dbffcfa62..46bd835051 100644 --- a/third_party/rust/scroll/src/ctx.rs +++ b/third_party/rust/scroll/src/ctx.rs @@ -28,9 +28,8 @@ //! //! impl<'a> ctx::TryFromCtx<'a, Endian> for Data<'a> { //! type Error = scroll::Error; -//! type Size = usize; //! fn try_from_ctx (src: &'a [u8], ctx: Endian) -//! -> Result<(Self, Self::Size), Self::Error> { +//! -> Result<(Self, usize), Self::Error> { //! let name = src.pread::<&str>(0)?; //! let id = src.pread_with(name.len() + 1, ctx)?; //! Ok((Data { name: name, id: id }, name.len() + 1 + 4)) @@ -53,29 +52,25 @@ use core::result; #[cfg(feature = "std")] use std::ffi::{CStr, CString}; -use error; -use endian::Endian; +use crate::error; +use crate::endian::Endian; /// A trait for measuring how large something is; for a byte sequence, it will be its length. pub trait MeasureWith { - type Units; - #[inline] /// How large is `Self`, given the `ctx`? - fn measure_with(&self, ctx: &Ctx) -> Self::Units; + fn measure_with(&self, ctx: &Ctx) -> usize; } impl MeasureWith for [u8] { - type Units = usize; #[inline] - fn measure_with(&self, _ctx: &Ctx) -> Self::Units { + fn measure_with(&self, _ctx: &Ctx) -> usize { self.len() } } impl> MeasureWith for T { - type Units = usize; #[inline] - fn measure_with(&self, _ctx: &Ctx) -> Self::Units { + fn measure_with(&self, _ctx: &Ctx) -> usize { self.as_ref().len() } } @@ -109,37 +104,37 @@ impl Default for StrCtx { impl StrCtx { pub fn len(&self) -> usize { match *self { - StrCtx::Delimiter(_) => 1, + StrCtx::Delimiter(_) | StrCtx::DelimiterUntil(_, _) => 1, StrCtx::Length(_) => 0, } } + + pub fn is_empty(&self) -> bool { + if let StrCtx::Length(_) = *self { true } else { false } + } } /// Reads `Self` from `This` using the context `Ctx`; must _not_ fail pub trait FromCtx { - #[inline] fn from_ctx(this: &This, ctx: Ctx) -> Self; } /// Tries to read `Self` from `This` using the context `Ctx` pub trait TryFromCtx<'a, Ctx: Copy = (), This: ?Sized = [u8]> where Self: 'a + Sized { type Error; - type Size; - #[inline] - fn try_from_ctx(from: &'a This, ctx: Ctx) -> Result<(Self, Self::Size), Self::Error>; + fn try_from_ctx(from: &'a This, ctx: Ctx) -> Result<(Self, usize), Self::Error>; } /// Writes `Self` into `This` using the context `Ctx` pub trait IntoCtx: Sized { - fn into_ctx(self, &mut This, ctx: Ctx); + fn into_ctx(self, _: &mut This, ctx: Ctx); } /// Tries to write `Self` into `This` using the context `Ctx` pub trait TryIntoCtx: Sized { type Error; - type Size; - fn try_into_ctx(self, &mut This, ctx: Ctx) -> Result; + fn try_into_ctx(self, _: &mut This, ctx: Ctx) -> Result; } /// Gets the size of `Self` with a `Ctx`, and in `Self::Units`. Implementors can then call `Gread` related functions @@ -149,9 +144,7 @@ pub trait TryIntoCtx: Sized { /// 1. Prevent `gread` from being used, and the offset being modified based on simply the sizeof the value, which can be a misnomer, e.g., for Leb128, etc. /// 2. Allow a context based size, which is useful for 32/64 bit variants for various containers, etc. pub trait SizeWith { - type Units; - #[inline] - fn size_with(ctx: &Ctx) -> Self::Units; + fn size_with(ctx: &Ctx) -> usize; } macro_rules! signed_to_unsigned { @@ -196,9 +189,8 @@ macro_rules! into_ctx_impl { } impl TryIntoCtx for $typ where $typ: IntoCtx { type Error = error::Error; - type Size = usize; #[inline] - fn try_into_ctx(self, dst: &mut [u8], le: Endian) -> error::Result { + fn try_into_ctx(self, dst: &mut [u8], le: Endian) -> error::Result { if $size > dst.len () { Err(error::Error::TooBig{size: $size, len: dst.len()}) } else { @@ -209,9 +201,8 @@ macro_rules! into_ctx_impl { } impl<'a> TryIntoCtx for &'a $typ { type Error = error::Error; - type Size = usize; #[inline] - fn try_into_ctx(self, dst: &mut [u8], le: Endian) -> error::Result { + fn try_into_ctx(self, dst: &mut [u8], le: Endian) -> error::Result { (*self).try_into_ctx(dst, le) } } @@ -236,10 +227,9 @@ macro_rules! from_ctx_impl { } impl<'a> TryFromCtx<'a, Endian> for $typ where $typ: FromCtx { - type Error = error::Error; - type Size = usize; + type Error = error::Error; #[inline] - fn try_from_ctx(src: &'a [u8], le: Endian) -> result::Result<(Self, Self::Size), Self::Error> { + fn try_from_ctx(src: &'a [u8], le: Endian) -> result::Result<(Self, usize), Self::Error> { if $size > src.len () { Err(error::Error::TooBig{size: $size, len: src.len()}) } else { @@ -266,9 +256,8 @@ macro_rules! from_ctx_impl { impl<'a, T> TryFromCtx<'a, Endian, T> for $typ where $typ: FromCtx, T: AsRef<[u8]> { type Error = error::Error; - type Size = usize; #[inline] - fn try_from_ctx(src: &'a T, le: Endian) -> result::Result<(Self, Self::Size), Self::Error> { + fn try_from_ctx(src: &'a T, le: Endian) -> result::Result<(Self, usize), Self::Error> { let src = src.as_ref(); Self::try_from_ctx(src, le) } @@ -290,9 +279,7 @@ ctx_impl!(u32, 4); ctx_impl!(i32, 4); ctx_impl!(u64, 8); ctx_impl!(i64, 8); -#[cfg(rust_1_26)] ctx_impl!(u128, 16); -#[cfg(rust_1_26)] ctx_impl!(i128, 16); macro_rules! from_ctx_float_impl { @@ -313,9 +300,8 @@ macro_rules! from_ctx_float_impl { } impl<'a> TryFromCtx<'a, Endian> for $typ where $typ: FromCtx { type Error = error::Error; - type Size = usize; #[inline] - fn try_from_ctx(src: &'a [u8], le: Endian) -> result::Result<(Self, Self::Size), Self::Error> { + fn try_from_ctx(src: &'a [u8], le: Endian) -> result::Result<(Self, usize), Self::Error> { if $size > src.len () { Err(error::Error::TooBig{size: $size, len: src.len()}) } else { @@ -337,9 +323,7 @@ into_ctx_impl!(u32, 4); into_ctx_impl!(i32, 4); into_ctx_impl!(u64, 8); into_ctx_impl!(i64, 8); -#[cfg(rust_1_26)] into_ctx_impl!(u128, 16); -#[cfg(rust_1_26)] into_ctx_impl!(i128, 16); macro_rules! into_ctx_float_impl { @@ -359,9 +343,8 @@ macro_rules! into_ctx_float_impl { } impl TryIntoCtx for $typ where $typ: IntoCtx { type Error = error::Error; - type Size = usize; #[inline] - fn try_into_ctx(self, dst: &mut [u8], le: Endian) -> error::Result { + fn try_into_ctx(self, dst: &mut [u8], le: Endian) -> error::Result { if $size > dst.len () { Err(error::Error::TooBig{size: $size, len: dst.len()}) } else { @@ -372,9 +355,8 @@ macro_rules! into_ctx_float_impl { } impl<'a> TryIntoCtx for &'a $typ { type Error = error::Error; - type Size = usize; #[inline] - fn try_into_ctx(self, dst: &mut [u8], le: Endian) -> error::Result { + fn try_into_ctx(self, dst: &mut [u8], le: Endian) -> error::Result { (*self).try_into_ctx(dst, le) } } @@ -386,10 +368,9 @@ into_ctx_float_impl!(f64, 8); impl<'a> TryFromCtx<'a, StrCtx> for &'a str { type Error = error::Error; - type Size = usize; #[inline] /// Read a `&str` from `src` using `delimiter` - fn try_from_ctx(src: &'a [u8], ctx: StrCtx) -> Result<(Self, Self::Size), Self::Error> { + fn try_from_ctx(src: &'a [u8], ctx: StrCtx) -> Result<(Self, usize), Self::Error> { let len = match ctx { StrCtx::Length(len) => len, StrCtx::Delimiter(delimiter) => src.iter().take_while(|c| **c != delimiter).count(), @@ -418,9 +399,8 @@ impl<'a> TryFromCtx<'a, StrCtx> for &'a str { impl<'a, T> TryFromCtx<'a, StrCtx, T> for &'a str where T: AsRef<[u8]> { type Error = error::Error; - type Size = usize; #[inline] - fn try_from_ctx(src: &'a T, ctx: StrCtx) -> result::Result<(Self, Self::Size), Self::Error> { + fn try_from_ctx(src: &'a T, ctx: StrCtx) -> result::Result<(Self, usize), Self::Error> { let src = src.as_ref(); TryFromCtx::try_from_ctx(src, ctx) } @@ -428,9 +408,8 @@ impl<'a, T> TryFromCtx<'a, StrCtx, T> for &'a str where T: AsRef<[u8]> { impl<'a> TryIntoCtx for &'a [u8] { type Error = error::Error; - type Size = usize; #[inline] - fn try_into_ctx(self, dst: &mut [u8], _ctx: ()) -> error::Result { + fn try_into_ctx(self, dst: &mut [u8], _ctx: ()) -> error::Result { let src_len = self.len() as isize; let dst_len = dst.len() as isize; // if src_len < 0 || dst_len < 0 || offset < 0 { @@ -448,11 +427,10 @@ impl<'a> TryIntoCtx for &'a [u8] { // TODO: make TryIntoCtx use StrCtx for awesomeness impl<'a> TryIntoCtx for &'a str { type Error = error::Error; - type Size = usize; #[inline] - fn try_into_ctx(self, dst: &mut [u8], ctx: ()) -> error::Result { + fn try_into_ctx(self, dst: &mut [u8], _ctx: ()) -> error::Result { let bytes = self.as_bytes(); - TryIntoCtx::try_into_ctx(bytes, dst, ctx) + TryIntoCtx::try_into_ctx(bytes, dst, ()) } } @@ -460,7 +438,6 @@ impl<'a> TryIntoCtx for &'a str { macro_rules! sizeof_impl { ($ty:ty) => { impl SizeWith for $ty { - type Units = usize; #[inline] fn size_with(_ctx: &Endian) -> usize { size_of::<$ty>() @@ -477,9 +454,7 @@ sizeof_impl!(u32); sizeof_impl!(i32); sizeof_impl!(u64); sizeof_impl!(i64); -#[cfg(rust_1_26)] sizeof_impl!(u128); -#[cfg(rust_1_26)] sizeof_impl!(i128); sizeof_impl!(f32); sizeof_impl!(f64); @@ -497,19 +472,18 @@ impl FromCtx for usize { src.as_ptr(), &mut data as *mut usize as *mut u8, size); - transmute(if le.is_little() { data.to_le() } else { data.to_be() }) + if le.is_little() { data.to_le() } else { data.to_be() } } } } impl<'a> TryFromCtx<'a, Endian> for usize where usize: FromCtx { type Error = error::Error; - type Size = usize; #[inline] - fn try_from_ctx(src: &'a [u8], le: Endian) -> result::Result<(Self, Self::Size), Self::Error> { + fn try_from_ctx(src: &'a [u8], le: Endian) -> result::Result<(Self, usize), Self::Error> { let size = ::core::mem::size_of::(); if size > src.len () { - Err(error::Error::TooBig{size: size, len: src.len()}) + Err(error::Error::TooBig{size, len: src.len()}) } else { Ok((FromCtx::from_ctx(src, le), size)) } @@ -518,11 +492,10 @@ impl<'a> TryFromCtx<'a, Endian> for usize where usize: FromCtx { impl<'a> TryFromCtx<'a, usize> for &'a[u8] { type Error = error::Error; - type Size = usize; #[inline] - fn try_from_ctx(src: &'a [u8], size: usize) -> result::Result<(Self, Self::Size), Self::Error> { + fn try_from_ctx(src: &'a [u8], size: usize) -> result::Result<(Self, usize), Self::Error> { if size > src.len () { - Err(error::Error::TooBig{size: size, len: src.len()}) + Err(error::Error::TooBig{size, len: src.len()}) } else { Ok((&src[..size], size)) } @@ -544,12 +517,11 @@ impl IntoCtx for usize { impl TryIntoCtx for usize where usize: IntoCtx { type Error = error::Error; - type Size = usize; #[inline] - fn try_into_ctx(self, dst: &mut [u8], le: Endian) -> error::Result { + fn try_into_ctx(self, dst: &mut [u8], le: Endian) -> error::Result { let size = ::core::mem::size_of::(); if size > dst.len() { - Err(error::Error::TooBig{size: size, len: dst.len()}) + Err(error::Error::TooBig{size, len: dst.len()}) } else { >::into_ctx(self, dst, le); Ok(size) @@ -560,9 +532,8 @@ impl TryIntoCtx for usize where usize: IntoCtx { #[cfg(feature = "std")] impl<'a> TryFromCtx<'a> for &'a CStr { type Error = error::Error; - type Size = usize; #[inline] - fn try_from_ctx(src: &'a [u8], _ctx: ()) -> result::Result<(Self, Self::Size), Self::Error> { + fn try_from_ctx(src: &'a [u8], _ctx: ()) -> result::Result<(Self, usize), Self::Error> { let null_byte = match src.iter().position(|b| *b == 0) { Some(ix) => ix, None => return Err(error::Error::BadInput { @@ -571,7 +542,7 @@ impl<'a> TryFromCtx<'a> for &'a CStr { }) }; - let cstr = unsafe { CStr::from_bytes_with_nul_unchecked(&src[..null_byte+1]) }; + let cstr = unsafe { CStr::from_bytes_with_nul_unchecked(&src[..=null_byte]) }; Ok((cstr, null_byte+1)) } } @@ -579,9 +550,8 @@ impl<'a> TryFromCtx<'a> for &'a CStr { #[cfg(feature = "std")] impl<'a> TryFromCtx<'a> for CString { type Error = error::Error; - type Size = usize; #[inline] - fn try_from_ctx(src: &'a [u8], _ctx: ()) -> result::Result<(Self, Self::Size), Self::Error> { + fn try_from_ctx(src: &'a [u8], _ctx: ()) -> result::Result<(Self, usize), Self::Error> { let (raw, bytes_read) = <&CStr as TryFromCtx>::try_from_ctx(src, _ctx)?; Ok((raw.to_owned(), bytes_read)) } @@ -590,9 +560,8 @@ impl<'a> TryFromCtx<'a> for CString { #[cfg(feature = "std")] impl<'a> TryIntoCtx for &'a CStr { type Error = error::Error; - type Size = usize; #[inline] - fn try_into_ctx(self, dst: &mut [u8], _ctx: ()) -> error::Result { + fn try_into_ctx(self, dst: &mut [u8], _ctx: ()) -> error::Result { let data = self.to_bytes_with_nul(); if dst.len() < data.len() { @@ -613,10 +582,9 @@ impl<'a> TryIntoCtx for &'a CStr { #[cfg(feature = "std")] impl TryIntoCtx for CString { type Error = error::Error; - type Size = usize; #[inline] - fn try_into_ctx(self, dst: &mut [u8], _ctx: ()) -> error::Result { - self.as_c_str().try_into_ctx(dst, _ctx) + fn try_into_ctx(self, dst: &mut [u8], _ctx: ()) -> error::Result { + self.as_c_str().try_into_ctx(dst, ()) } } diff --git a/third_party/rust/scroll/src/endian.rs b/third_party/rust/scroll/src/endian.rs index 718033fed2..7652227998 100644 --- a/third_party/rust/scroll/src/endian.rs +++ b/third_party/rust/scroll/src/endian.rs @@ -38,7 +38,7 @@ impl Endian { NETWORK } #[inline] - pub fn is_little (&self) -> bool { + pub fn is_little(&self) -> bool { match *self { LE => true, _ => false, diff --git a/third_party/rust/scroll/src/error.rs b/third_party/rust/scroll/src/error.rs index dd49afd811..0257544139 100644 --- a/third_party/rust/scroll/src/error.rs +++ b/third_party/rust/scroll/src/error.rs @@ -8,12 +8,12 @@ use std::error; #[derive(Debug)] /// A custom Scroll error -pub enum Error { +pub enum Error { /// The type you tried to read was too big - TooBig { size: T, len: T }, + TooBig { size: usize, len: usize }, /// The requested offset to read/write at is invalid - BadOffset(T), - BadInput{ size: T, msg: &'static str }, + BadOffset(usize), + BadInput{ size: usize, msg: &'static str }, #[cfg(feature = "std")] /// A custom Scroll error for reporting messages to clients Custom(String), @@ -33,13 +33,13 @@ impl error::Error for Error { Error::IO(_) => { "IO" } } } - fn cause(&self) -> Option<&error::Error> { + fn cause(&self) -> Option<&dyn error::Error> { match *self { Error::TooBig{ .. } => { None } Error::BadOffset(_) => { None } Error::BadInput{ .. } => { None } Error::Custom(_) => { None } - Error::IO(ref io) => { io.cause() } + Error::IO(ref io) => { io.source() } } } } diff --git a/third_party/rust/scroll/src/greater.rs b/third_party/rust/scroll/src/greater.rs index 540d8da2b4..7a33051128 100644 --- a/third_party/rust/scroll/src/greater.rs +++ b/third_party/rust/scroll/src/greater.rs @@ -1,6 +1,6 @@ use core::ops::{Index, IndexMut, RangeFrom}; -use ctx::{FromCtx, IntoCtx}; +use crate::ctx::{FromCtx, IntoCtx}; /// Core-read - core, no_std friendly trait for reading basic traits from byte buffers. Cannot fail unless the buffer is too small, in which case an assert fires and the program panics. /// @@ -52,7 +52,7 @@ pub trait Cread : Index + Index> /// assert_eq!(bar, 0xdeadbeef); /// ``` #[inline] - fn cread_with<'a, N: FromCtx>>::Output>>(&'a self, offset: I, ctx: Ctx) -> N { + fn cread_with>>::Output>>(&self, offset: I, ctx: Ctx) -> N { N::from_ctx(&self[offset..], ctx) } /// Reads a value implementing `FromCtx` from `Self` at `offset`, @@ -78,7 +78,7 @@ pub trait Cread : Index + Index> /// assert_eq!(bar, 0xefbe0000); /// ``` #[inline] - fn cread<'a, N: FromCtx>>::Output>>(&'a self, offset: I) -> N where Ctx: Default { + fn cread>>::Output>>(&self, offset: I) -> N where Ctx: Default { let ctx = Ctx::default(); N::from_ctx(&self[offset..], ctx) } diff --git a/third_party/rust/scroll/src/leb128.rs b/third_party/rust/scroll/src/leb128.rs index a90e897895..657b4400a7 100644 --- a/third_party/rust/scroll/src/leb128.rs +++ b/third_party/rust/scroll/src/leb128.rs @@ -1,9 +1,9 @@ use core::u8; use core::convert::{From, AsRef}; use core::result; -use Pread; -use ctx::TryFromCtx; -use error; +use crate::Pread; +use crate::ctx::TryFromCtx; +use crate::error; #[derive(Debug, PartialEq, Copy, Clone)] /// An unsigned leb128 integer @@ -22,7 +22,7 @@ impl Uleb128 { /// Read a variable length u64 from `bytes` at `offset` pub fn read(bytes: &[u8], offset: &mut usize) -> error::Result { let tmp = bytes.pread::(*offset)?; - *offset = *offset + tmp.size(); + *offset += tmp.size(); Ok(tmp.into()) } } @@ -56,9 +56,8 @@ impl Sleb128 { #[inline] /// Read a variable length i64 from `bytes` at `offset` pub fn read(bytes: &[u8], offset: &mut usize) -> error::Result { - use Pread; let tmp = bytes.pread::(*offset)?; - *offset = *offset + tmp.size(); + *offset += tmp.size(); Ok(tmp.into()) } } @@ -93,10 +92,8 @@ fn mask_continuation(byte: u8) -> u8 { impl<'a> TryFromCtx<'a> for Uleb128 { type Error = error::Error; - type Size = usize; #[inline] - fn try_from_ctx(src: &'a [u8], _ctx: ()) -> result::Result<(Self, Self::Size), Self::Error> { - use pread::Pread; + fn try_from_ctx(src: &'a [u8], _ctx: ()) -> result::Result<(Self, usize), Self::Error> { let mut result = 0; let mut shift = 0; let mut count = 0; @@ -107,14 +104,14 @@ impl<'a> TryFromCtx<'a> for Uleb128 { return Err(error::Error::BadInput{ size: src.len(), msg: "failed to parse"}) } - let low_bits = mask_continuation(byte) as u64; + let low_bits = u64::from(mask_continuation(byte)); result |= low_bits << shift; count += 1; shift += 7; if byte & CONTINUATION_BIT == 0 { - return Ok((Uleb128 { value: result, count: count }, count)); + return Ok((Uleb128 { value: result, count }, count)); } } } @@ -122,9 +119,8 @@ impl<'a> TryFromCtx<'a> for Uleb128 { impl<'a> TryFromCtx<'a> for Sleb128 { type Error = error::Error; - type Size = usize; #[inline] - fn try_from_ctx(src: &'a [u8], _ctx: ()) -> result::Result<(Self, Self::Size), Self::Error> { + fn try_from_ctx(src: &'a [u8], _ctx: ()) -> result::Result<(Self, usize), Self::Error> { let o = 0; let offset = &mut 0; let mut result = 0; @@ -138,7 +134,7 @@ impl<'a> TryFromCtx<'a> for Sleb128 { return Err(error::Error::BadInput{size: src.len(), msg: "failed to parse"}) } - let low_bits = mask_continuation(byte) as i64; + let low_bits = i64::from(mask_continuation(byte)); result |= low_bits << shift; shift += 7; @@ -152,7 +148,7 @@ impl<'a> TryFromCtx<'a> for Sleb128 { result |= !0 << shift; } let count = *offset - o; - Ok((Sleb128{ value: result, count: count }, count)) + Ok((Sleb128{ value: result, count }, count)) } } diff --git a/third_party/rust/scroll/src/lesser.rs b/third_party/rust/scroll/src/lesser.rs index 60b48f13ec..84cb000135 100644 --- a/third_party/rust/scroll/src/lesser.rs +++ b/third_party/rust/scroll/src/lesser.rs @@ -1,5 +1,5 @@ use std::io::{Result, Read, Write}; -use ctx::{FromCtx, IntoCtx, SizeWith}; +use crate::ctx::{FromCtx, IntoCtx, SizeWith}; /// An extension trait to `std::io::Read` streams; this only deserializes simple types, like `u8`, `i32`, `f32`, `usize`, etc. /// @@ -26,9 +26,8 @@ use ctx::{FromCtx, IntoCtx, SizeWith}; /// } /// /// impl ctx::SizeWith for Foo { -/// type Units = usize; /// // our parsing context doesn't influence our size -/// fn size_with(_: &scroll::Endian) -> Self::Units { +/// fn size_with(_: &scroll::Endian) -> usize { /// ::std::mem::size_of::() /// } /// } @@ -69,7 +68,7 @@ pub trait IOread : Read /// assert_eq!(0xefbe, beef); /// ``` #[inline] - fn ioread + SizeWith>(&mut self) -> Result where Ctx: Default { + fn ioread + SizeWith>(&mut self) -> Result where Ctx: Default { let ctx = Ctx::default(); self.ioread_with(ctx) } @@ -95,7 +94,7 @@ pub trait IOread : Read /// assert_eq!(0xfeeddead, feeddead); /// ``` #[inline] - fn ioread_with + SizeWith>(&mut self, ctx: Ctx) -> Result { + fn ioread_with + SizeWith>(&mut self, ctx: Ctx) -> Result { let mut scratch = [0u8; 256]; let size = N::size_with(&ctx); let mut buf = &mut scratch[0..size]; @@ -133,7 +132,7 @@ pub trait IOwrite: Write /// assert_eq!(bytes.into_inner(), [0xde, 0xad, 0xbe, 0xef,]); /// ``` #[inline] - fn iowrite + IntoCtx>(&mut self, n: N) -> Result<()> where Ctx: Default { + fn iowrite + IntoCtx>(&mut self, n: N) -> Result<()> where Ctx: Default { let ctx = Ctx::default(); self.iowrite_with(n, ctx) } @@ -155,7 +154,7 @@ pub trait IOwrite: Write /// assert_eq!(cursor.into_inner(), [0x68, 0x65, 0x6c, 0x6c, 0x6f, 0xde, 0xad, 0xbe, 0xef, 0x0]); /// ``` #[inline] - fn iowrite_with + IntoCtx>(&mut self, n: N, ctx: Ctx) -> Result<()> { + fn iowrite_with + IntoCtx>(&mut self, n: N, ctx: Ctx) -> Result<()> { let mut buf = [0u8; 256]; let size = N::size_with(&ctx); let buf = &mut buf[0..size]; diff --git a/third_party/rust/scroll/src/lib.rs b/third_party/rust/scroll/src/lib.rs index 8441132f9d..f16f153810 100644 --- a/third_party/rust/scroll/src/lib.rs +++ b/third_party/rust/scroll/src/lib.rs @@ -102,32 +102,31 @@ //! //! For example, suppose we have a datatype and we want to specify how to parse or serialize this datatype out of some arbitrary //! byte buffer. In order to do this, we need to provide a [TryFromCtx](trait.TryFromCtx.html) impl for our datatype. -//! +//! //! In particular, if we do this for the `[u8]` target, using the convention `(usize, YourCtx)`, you will automatically get access to //! calling `pread_with::` on arrays of bytes. -//! +//! //! ```rust //! use scroll::{self, ctx, Pread, BE, Endian}; -//! +//! //! struct Data<'a> { //! name: &'a str, //! id: u32, //! } -//! +//! //! // note the lifetime specified here //! impl<'a> ctx::TryFromCtx<'a, Endian> for Data<'a> { //! type Error = scroll::Error; -//! type Size = usize; //! // and the lifetime annotation on `&'a [u8]` here //! fn try_from_ctx (src: &'a [u8], endian: Endian) -//! -> Result<(Self, Self::Size), Self::Error> { +//! -> Result<(Self, usize), Self::Error> { //! let offset = &mut 0; //! let name = src.gread::<&str>(offset)?; //! let id = src.gread_with(offset, endian)?; //! Ok((Data { name: name, id: id }, *offset)) //! } //! } -//! +//! //! let bytes = b"UserName\x00\x01\x02\x03\x04"; //! let data = bytes.pread_with::(0, BE).unwrap(); //! assert_eq!(data.id, 0x01020304); @@ -140,12 +139,7 @@ #[cfg(feature = "derive")] #[allow(unused_imports)] -#[macro_use] -extern crate scroll_derive; - -#[cfg(feature = "derive")] -#[doc(hidden)] -pub use scroll_derive::*; +pub use scroll_derive::{Pread, Pwrite, SizeWith, IOread, IOwrite}; #[cfg(feature = "std")] extern crate core; @@ -160,14 +154,14 @@ mod leb128; #[cfg(feature = "std")] mod lesser; -pub use endian::*; -pub use pread::*; -pub use pwrite::*; -pub use greater::*; -pub use error::*; -pub use leb128::*; +pub use crate::endian::*; +pub use crate::pread::*; +pub use crate::pwrite::*; +pub use crate::greater::*; +pub use crate::error::*; +pub use crate::leb128::*; #[cfg(feature = "std")] -pub use lesser::*; +pub use crate::lesser::*; #[doc(hidden)] pub mod export { @@ -333,7 +327,7 @@ mod tests { fn description(&self) -> &str { "ExternalError" } - fn cause(&self) -> Option<&error::Error> { None} + fn cause(&self) -> Option<&dyn error::Error> { None} } impl From for ExternalError { @@ -350,8 +344,7 @@ mod tests { impl super::ctx::TryIntoCtx for Foo { type Error = ExternalError; - type Size = usize; - fn try_into_ctx(self, this: &mut [u8], le: super::Endian) -> Result { + fn try_into_ctx(self, this: &mut [u8], le: super::Endian) -> Result { use super::Pwrite; if this.len() < 2 { return Err((ExternalError {}).into()) } this.pwrite_with(self.0, 0, le)?; @@ -361,8 +354,7 @@ mod tests { impl<'a> super::ctx::TryFromCtx<'a, super::Endian> for Foo { type Error = ExternalError; - type Size = usize; - fn try_from_ctx(this: &'a [u8], le: super::Endian) -> Result<(Self, Self::Size), Self::Error> { + fn try_from_ctx(this: &'a [u8], le: super::Endian) -> Result<(Self, usize), Self::Error> { use super::Pread; if this.len() > 2 { return Err((ExternalError {}).into()) } let n = this.pread_with(0, le)?; diff --git a/third_party/rust/scroll/src/pread.rs b/third_party/rust/scroll/src/pread.rs index 8b16c62ea7..64f85d4476 100644 --- a/third_party/rust/scroll/src/pread.rs +++ b/third_party/rust/scroll/src/pread.rs @@ -1,8 +1,8 @@ use core::result; -use core::ops::{Index, RangeFrom, Add, AddAssign}; +use core::ops::{Index, RangeFrom}; -use ctx::{TryFromCtx, MeasureWith}; -use error; +use crate::ctx::{TryFromCtx, MeasureWith}; +use crate::error; /// A very generic, contextual pread interface in Rust. Allows completely parallelized reads, as `Self` is immutable /// @@ -18,8 +18,7 @@ use error; /// /// impl<'a> ctx::TryFromCtx<'a, scroll::Endian> for Foo { /// type Error = scroll::Error; -/// type Size = usize; -/// fn try_from_ctx(this: &'a [u8], le: scroll::Endian) -> Result<(Self, Self::Size), Self::Error> { +/// fn try_from_ctx(this: &'a [u8], le: scroll::Endian) -> Result<(Self, usize), Self::Error> { /// if this.len() < 2 { return Err((scroll::Error::Custom("whatever".to_string())).into()) } /// let n = this.pread_with(0, le)?; /// Ok((Foo(n), 2)) @@ -68,8 +67,7 @@ use error; /// /// impl<'a> ctx::TryFromCtx<'a, scroll::Endian> for Foo { /// type Error = ExternalError; -/// type Size = usize; -/// fn try_from_ctx(this: &'a [u8], le: scroll::Endian) -> Result<(Self, Self::Size), Self::Error> { +/// fn try_from_ctx(this: &'a [u8], le: scroll::Endian) -> Result<(Self, usize), Self::Error> { /// if this.len() <= 2 { return Err((ExternalError {}).into()) } /// let offset = &mut 0; /// let n = this.gread_with(offset, le)?; @@ -80,11 +78,10 @@ use error; /// let bytes: [u8; 4] = [0xde, 0xad, 0, 0]; /// let foo: Result = bytes.pread(0); /// ``` -pub trait Pread : Index + Index> + MeasureWith +pub trait Pread : Index + Index> + MeasureWith where Ctx: Copy, - I: Add + Copy + PartialOrd, - E: From>, + E: From, { #[inline] /// Reads a value from `self` at `offset` with a default `Ctx`. For the primitive numeric values, this will read at the machine's endianness. @@ -93,7 +90,7 @@ pub trait Pread : Index + Index> + MeasureWit /// use scroll::Pread; /// let bytes = [0x7fu8; 0x01]; /// let byte = bytes.pread::(0).unwrap(); - fn pread<'a, N: TryFromCtx<'a, Ctx, >>::Output, Error = E, Size = I>>(&'a self, offset: I) -> result::Result where >>::Output: 'a, Ctx: Default { + fn pread<'a, N: TryFromCtx<'a, Ctx, >>::Output, Error = E>>(&'a self, offset: usize) -> result::Result where >>::Output: 'a, Ctx: Default { self.pread_with(offset, Ctx::default()) } #[inline] @@ -104,7 +101,7 @@ pub trait Pread : Index + Index> + MeasureWit /// let bytes: [u8; 2] = [0xde, 0xad]; /// let dead: u16 = bytes.pread_with(0, scroll::BE).unwrap(); /// assert_eq!(dead, 0xdeadu16); - fn pread_with<'a, N: TryFromCtx<'a, Ctx, >>::Output, Error = E, Size = I>>(&'a self, offset: I, ctx: Ctx) -> result::Result where >>::Output: 'a { + fn pread_with<'a, N: TryFromCtx<'a, Ctx, >>::Output, Error = E>>(&'a self, offset: usize, ctx: Ctx) -> result::Result where >>::Output: 'a { let len = self.measure_with(&ctx); if offset >= len { return Err(error::Error::BadOffset(offset).into()) @@ -120,7 +117,7 @@ pub trait Pread : Index + Index> + MeasureWit /// let bytes = [0x7fu8; 0x01]; /// let byte = bytes.gread::(offset).unwrap(); /// assert_eq!(*offset, 1); - fn gread<'a, N: TryFromCtx<'a, Ctx, >>::Output, Error = E, Size = I>>(&'a self, offset: &mut I) -> result::Result where I: AddAssign, Ctx: Default, >>::Output: 'a { + fn gread<'a, N: TryFromCtx<'a, Ctx, >>::Output, Error = E>>(&'a self, offset: &mut usize) -> result::Result where Ctx: Default, >>::Output: 'a { let ctx = Ctx::default(); self.gread_with(offset, ctx) } @@ -134,10 +131,10 @@ pub trait Pread : Index + Index> + MeasureWit /// assert_eq!(dead, 0xdeadu16); /// assert_eq!(*offset, 2); #[inline] - fn gread_with<'a, N: TryFromCtx<'a, Ctx, >>::Output, Error = E, Size = I>> - (&'a self, offset: &mut I, ctx: Ctx) -> + fn gread_with<'a, N: TryFromCtx<'a, Ctx, >>::Output, Error = E>> + (&'a self, offset: &mut usize, ctx: Ctx) -> result::Result - where I: AddAssign, >>::Output: 'a + where >>::Output: 'a { let o = *offset; // self.pread_with(o, ctx).and_then(|(n, size)| { @@ -165,16 +162,14 @@ pub trait Pread : Index + Index> + MeasureWit /// assert_eq!(&bytes, &bytes_from); /// assert_eq!(*offset, 2); #[inline] - fn gread_inout<'a, N>(&'a self, offset: &mut I, inout: &mut [N]) -> result::Result<(), E> + fn gread_inout<'a, N>(&'a self, offset: &mut usize, inout: &mut [N]) -> result::Result<(), E> where - I: AddAssign, - N: TryFromCtx<'a, Ctx, >>::Output, Error = E, Size = I>, + N: TryFromCtx<'a, Ctx, >>::Output, Error = E>, Ctx: Default, - >>::Output: 'a + >>::Output: 'a { - let len = inout.len(); - for i in 0..len { - inout[i] = self.gread(offset)?; + for i in inout.iter_mut() { + *i = self.gread(offset)?; } Ok(()) } @@ -190,22 +185,19 @@ pub trait Pread : Index + Index> + MeasureWit /// assert_eq!(&bytes, &bytes_from); /// assert_eq!(*offset, 2); #[inline] - fn gread_inout_with<'a, N>(&'a self, offset: &mut I, inout: &mut [N], ctx: Ctx) -> result::Result<(), E> + fn gread_inout_with<'a, N>(&'a self, offset: &mut usize, inout: &mut [N], ctx: Ctx) -> result::Result<(), E> where - I: AddAssign, - N: TryFromCtx<'a, Ctx, >>::Output, Error = E, Size = I>, - >>::Output: 'a + N: TryFromCtx<'a, Ctx, >>::Output, Error = E>, + >>::Output: 'a { - let len = inout.len(); - for i in 0..len { - inout[i] = self.gread_with(offset, ctx)?; + for i in inout.iter_mut() { + *i = self.gread_with(offset, ctx)?; } Ok(()) } } impl>, - R: ?Sized + Index + Index> + MeasureWith> - Pread for R {} + E: From, + R: ?Sized + Index + Index> + MeasureWith> + Pread for R {} diff --git a/third_party/rust/scroll/src/pwrite.rs b/third_party/rust/scroll/src/pwrite.rs index 040d197f39..08aa82913c 100644 --- a/third_party/rust/scroll/src/pwrite.rs +++ b/third_party/rust/scroll/src/pwrite.rs @@ -1,8 +1,8 @@ use core::result; -use core::ops::{Index, IndexMut, RangeFrom, Add, AddAssign}; +use core::ops::{Index, IndexMut, RangeFrom}; -use ctx::{TryIntoCtx, MeasureWith}; -use error; +use crate::ctx::{TryIntoCtx, MeasureWith}; +use crate::error; /// Writes into `Self` at an offset of type `I` using a `Ctx` /// @@ -17,9 +17,8 @@ use error; /// impl ctx::TryIntoCtx for Foo { /// // you can use your own error here too, but you will then need to specify it in fn generic parameters /// type Error = scroll::Error; -/// type Size = usize; /// // you can write using your own context too... see `leb128.rs` -/// fn try_into_ctx(self, this: &mut [u8], le: Endian) -> Result { +/// fn try_into_ctx(self, this: &mut [u8], le: Endian) -> Result { /// if this.len() < 2 { return Err((scroll::Error::Custom("whatever".to_string())).into()) } /// this.pwrite_with(self.0, 0, le)?; /// Ok(2) @@ -30,13 +29,12 @@ use error; /// let mut bytes: [u8; 4] = [0, 0, 0, 0]; /// bytes.pwrite_with(Foo(0x7f), 1, LE).unwrap(); /// -pub trait Pwrite : Index + IndexMut> + MeasureWith +pub trait Pwrite : Index + IndexMut> + MeasureWith where Ctx: Copy, - I: Add + Copy + PartialOrd, - E: From>, + E: From, { - fn pwrite>>::Output, Error = E, Size = I>>(&mut self, n: N, offset: I) -> result::Result where Ctx: Default { + fn pwrite>>::Output, Error = E>>(&mut self, n: N, offset: usize) -> result::Result where Ctx: Default { self.pwrite_with(n, offset, Ctx::default()) } /// Write `N` at offset `I` with context `Ctx` @@ -46,7 +44,7 @@ pub trait Pwrite : Index + IndexMut> + Measur /// let mut bytes: [u8; 8] = [0, 0, 0, 0, 0, 0, 0, 0]; /// bytes.pwrite_with::(0xbeefbeef, 0, LE).unwrap(); /// assert_eq!(bytes.pread_with::(0, LE).unwrap(), 0xbeefbeef); - fn pwrite_with>>::Output, Error = E, Size = I>>(&mut self, n: N, offset: I, ctx: Ctx) -> result::Result { + fn pwrite_with>>::Output, Error = E>>(&mut self, n: N, offset: usize, ctx: Ctx) -> result::Result { let len = self.measure_with(&ctx); if offset >= len { return Err(error::Error::BadOffset(offset).into()) @@ -56,17 +54,14 @@ pub trait Pwrite : Index + IndexMut> + Measur } /// Write `n` into `self` at `offset`, with a default `Ctx`. Updates the offset. #[inline] - fn gwrite>>::Output, Error = E, Size = I>>(&mut self, n: N, offset: &mut I) -> result::Result where - I: AddAssign, + fn gwrite>>::Output, Error = E>>(&mut self, n: N, offset: &mut usize) -> result::Result where Ctx: Default { let ctx = Ctx::default(); self.gwrite_with(n, offset, ctx) } /// Write `n` into `self` at `offset`, with the `ctx`. Updates the offset. #[inline] - fn gwrite_with>>::Output, Error = E, Size = I>>(&mut self, n: N, offset: &mut I, ctx: Ctx) -> result::Result - where I: AddAssign, - { + fn gwrite_with>>::Output, Error = E>>(&mut self, n: N, offset: &mut usize, ctx: Ctx) -> result::Result { let o = *offset; match self.pwrite_with(n, o, ctx) { Ok(size) => { @@ -79,7 +74,6 @@ pub trait Pwrite : Index + IndexMut> + Measur } impl>, - R: ?Sized + Index + IndexMut> + MeasureWith> - Pwrite for R {} + E: From, + R: ?Sized + Index + IndexMut> + MeasureWith> + Pwrite for R {} diff --git a/third_party/rust/scroll/tests/api.rs b/third_party/rust/scroll/tests/api.rs index b29a94aa9c..5e10b692fe 100644 --- a/third_party/rust/scroll/tests/api.rs +++ b/third_party/rust/scroll/tests/api.rs @@ -4,14 +4,13 @@ // packed structs. See https://github.com/rust-lang/rust/issues/46043 #![deny(safe_packed_borrows)] -extern crate scroll; - // #[macro_use] extern crate scroll_derive; use std::ops::{Deref, DerefMut}; use scroll::{ctx, Result, Cread, Pread}; use scroll::ctx::SizeWith; +#[derive(Default)] pub struct Section<'a> { pub sectname: [u8; 16], pub segname: [u8; 16], @@ -35,7 +34,6 @@ impl<'a> Section<'a> { } impl<'a> ctx::SizeWith for Section<'a> { - type Units = usize; fn size_with(_ctx: &()) -> usize { 4 } @@ -61,10 +59,8 @@ pub struct Section32 { impl<'a> ctx::TryFromCtx<'a, ()> for Section<'a> { type Error = scroll::Error; - type Size = usize; - fn try_from_ctx(_bytes: &'a [u8], _ctx: ()) -> ::std::result::Result<(Self, Self::Size), Self::Error> { - //let section = Section::from_ctx(bytes, bytes.pread_with::(offset, ctx)?); - let section = unsafe { ::std::mem::uninitialized::

()}; + fn try_from_ctx(_bytes: &'a [u8], _ctx: ()) -> ::std::result::Result<(Self, usize), Self::Error> { + let section = Section::default(); Ok((section, ::std::mem::size_of::
())) } } @@ -106,7 +102,6 @@ impl<'a> Segment<'a> { } impl<'a> ctx::SizeWith for Segment<'a> { - type Units = usize; fn size_with(_ctx: &()) -> usize { 4 } @@ -186,8 +181,7 @@ impl scroll::ctx::FromCtx for Foo { } impl scroll::ctx::SizeWith for Foo { - type Units = usize; - fn size_with(_: &scroll::Endian) -> Self::Units { + fn size_with(_: &scroll::Endian) -> usize { ::std::mem::size_of::() } } @@ -218,7 +212,6 @@ struct Bar { impl scroll::ctx::FromCtx for Bar { fn from_ctx(bytes: &[u8], ctx: scroll::Endian) -> Self { - use scroll::Cread; Bar { foo: bytes.cread_with(0, ctx), bar: bytes.cread_with(4, ctx) } } } diff --git a/third_party/rust/scroll/tests/readme.rs b/third_party/rust/scroll/tests/readme.rs index 7a55fa30b7..60cde57d9a 100644 --- a/third_party/rust/scroll/tests/readme.rs +++ b/third_party/rust/scroll/tests/readme.rs @@ -24,5 +24,6 @@ fn readme_test() { .expect("Failed to spawn process") .wait() .expect("Failed to run process"); - assert!(result.success(), "Failed to run rustdoc tests on README.md!"); + // fixme: i dont know why this is failing, so disabling + // assert!(result.success(), "Failed to run rustdoc tests on README.md!"); } diff --git a/third_party/rust/scroll_derive/.cargo-checksum.json b/third_party/rust/scroll_derive/.cargo-checksum.json index ecb530ce63..7066879385 100644 --- a/third_party/rust/scroll_derive/.cargo-checksum.json +++ b/third_party/rust/scroll_derive/.cargo-checksum.json @@ -1 +1 @@ -{"files":{"Cargo.toml":"3ccdb940e367335cb217c5f5d5dd75b7fadc3aae1d2a63a77dcbbcf724ab0aca","LICENSE":"afb11426e09da40a1ae4f8fa17ddcc6b6a52d14df04c29bc5bcd06eb8730624d","README.md":"f89c7768454b0d2b9db816afe05db3a4cea1125bef87f08ed3eefd65e9e2b180","examples/main.rs":"776199a415860055355dbacc12b2f3020abb8c685e4aca34d0a97e77ea41810e","src/lib.rs":"e02c31be394ba69ca5a407cb3c4a63b53ba7924d9a495bd88938268a73912ecb","tests/tests.rs":"6663b2ce835146785df06b0e98b7213f31d2a227290918123224171c03d04dff"},"package":"8f1aa96c45e7f5a91cb7fabe7b279f02fea7126239fc40b732316e8b6a2d0fcb"} \ No newline at end of file +{"files":{"Cargo.lock":"7939ddcf9d89578f0d23b29d75787bcb29ea2fb4573ad560b1a5d572847557b9","Cargo.toml":"2d811c7f55b02b5fd6d0157e1f73ffdc6acd9c1fa5496840a5bdf3d253a6a5cd","LICENSE":"afb11426e09da40a1ae4f8fa17ddcc6b6a52d14df04c29bc5bcd06eb8730624d","README.md":"f89c7768454b0d2b9db816afe05db3a4cea1125bef87f08ed3eefd65e9e2b180","examples/main.rs":"2e47cff7ea4946dd7fe58847edb132998d1e1936dfe7e1b65f80425d5db4f398","src/lib.rs":"dccab946a64e2a8ff0a91819bebc34e5815bd193b9527bee5d04a306ea11e831","tests/tests.rs":"e4cfed20db4b5751fb1042c0e599d2a31647895fbfbbc10c38a4ef5eb03ca31c"},"package":"f8584eea9b9ff42825b46faf46a8c24d2cff13ec152fa2a50df788b87c07ee28"} \ No newline at end of file diff --git a/third_party/rust/scroll_derive/Cargo.lock b/third_party/rust/scroll_derive/Cargo.lock new file mode 100644 index 0000000000..3d7367ed0d --- /dev/null +++ b/third_party/rust/scroll_derive/Cargo.lock @@ -0,0 +1,47 @@ +# This file is automatically @generated by Cargo. +# It is not intended for manual editing. +[[package]] +name = "proc-macro2" +version = "1.0.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "unicode-xid 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "quote" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "proc-macro2 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "scroll_derive" +version = "0.10.1" +dependencies = [ + "proc-macro2 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)", + "quote 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", + "syn 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "syn" +version = "1.0.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "proc-macro2 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)", + "quote 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", + "unicode-xid 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "unicode-xid" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[metadata] +"checksum proc-macro2 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)" = "9c9e470a8dc4aeae2dee2f335e8f533e2d4b347e1434e5671afc49b054592f27" +"checksum quote 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)" = "053a8c8bcc71fcce321828dc897a98ab9760bef03a4fc36693c231e5b3216cfe" +"checksum syn 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)" = "66850e97125af79138385e9b88339cbcd037e3f28ceab8c5ad98e64f0f1f80bf" +"checksum unicode-xid 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "826e7639553986605ec5979c7dd957c7895e93eabed50ab2ffa7f6128a75097c" diff --git a/third_party/rust/scroll_derive/Cargo.toml b/third_party/rust/scroll_derive/Cargo.toml index 49a92eea17..2cc1d006ea 100644 --- a/third_party/rust/scroll_derive/Cargo.toml +++ b/third_party/rust/scroll_derive/Cargo.toml @@ -3,7 +3,7 @@ # When uploading crates to the registry Cargo will automatically # "normalize" Cargo.toml files for maximal compatibility # with all versions of Cargo and also rewrite `path` dependencies -# to registry (e.g. crates.io) dependencies +# to registry (e.g., crates.io) dependencies # # If you believe there's an error in this file please file an # issue against the rust-lang/cargo repository. If you're @@ -11,25 +11,24 @@ # will likely look very different (and much more reasonable) [package] +edition = "2018" name = "scroll_derive" -version = "0.9.5" +version = "0.10.1" authors = ["m4b ", "Ted Mielczarek "] description = "A macros 1.1 derive implementation for Pread and Pwrite traits from the scroll crate" documentation = "https://docs.rs/scroll_derive" readme = "README.md" keywords = ["derive", "macros", "pread", "pwrite", "bytes"] license = "MIT" -repository = "https://github.com/m4b/scroll_derive" +repository = "https://github.com/m4b/scroll" [lib] proc-macro = true [dependencies.proc-macro2] -version = "0.4" +version = "1" [dependencies.quote] -version = "0.6" +version = "1" [dependencies.syn] -version = "0.15" -[dev-dependencies.scroll] -version = "0.9" +version = "1" diff --git a/third_party/rust/scroll_derive/examples/main.rs b/third_party/rust/scroll_derive/examples/main.rs index e0955d5eb0..3f2c721eeb 100644 --- a/third_party/rust/scroll_derive/examples/main.rs +++ b/third_party/rust/scroll_derive/examples/main.rs @@ -1,6 +1,4 @@ -extern crate scroll; -#[macro_use] -extern crate scroll_derive; +use scroll_derive::{Pread, Pwrite, IOread, IOwrite, SizeWith}; #[derive(Debug, PartialEq, Pread, Pwrite, IOread, IOwrite, SizeWith)] #[repr(C)] diff --git a/third_party/rust/scroll_derive/src/lib.rs b/third_party/rust/scroll_derive/src/lib.rs index 50c09568b0..24a08fd26c 100644 --- a/third_party/rust/scroll_derive/src/lib.rs +++ b/third_party/rust/scroll_derive/src/lib.rs @@ -1,10 +1,8 @@ #![recursion_limit="1024"] extern crate proc_macro; -extern crate proc_macro2; -#[macro_use] -extern crate quote; -extern crate syn; +use proc_macro2; +use quote::quote; use proc_macro::TokenStream; @@ -12,13 +10,13 @@ fn impl_struct(name: &syn::Ident, fields: &syn::FieldsNamed) -> proc_macro2::Tok let items: Vec<_> = fields.named.iter().map(|f| { let ident = &f.ident; let ty = &f.ty; - match ty { - &syn::Type::Array(ref array) => { + match *ty { + syn::Type::Array(ref array) => { match array.len { syn::Expr::Lit(syn::ExprLit { lit: syn::Lit::Int(ref int), ..}) => { - let size = int.value(); + let size = int.base10_parse::().unwrap(); quote! { - #ident: { let mut __tmp: #ty = [0; #size as usize]; src.gread_inout_with(offset, &mut __tmp, ctx)?; __tmp } + #ident: { let mut __tmp: #ty = [0; #size]; src.gread_inout_with(offset, &mut __tmp, ctx)?; __tmp } } }, _ => panic!("Pread derive with bad array constexpr") @@ -31,13 +29,12 @@ fn impl_struct(name: &syn::Ident, fields: &syn::FieldsNamed) -> proc_macro2::Tok } } }).collect(); - + quote! { impl<'a> ::scroll::ctx::TryFromCtx<'a, ::scroll::Endian> for #name where #name: 'a { type Error = ::scroll::Error; - type Size = usize; #[inline] - fn try_from_ctx(src: &'a [u8], ctx: ::scroll::Endian) -> ::scroll::export::result::Result<(Self, Self::Size), Self::Error> { + fn try_from_ctx(src: &'a [u8], ctx: ::scroll::Endian) -> ::scroll::export::result::Result<(Self, usize), Self::Error> { use ::scroll::Pread; let offset = &mut 0; let data = #name { #(#items,)* }; @@ -75,8 +72,8 @@ fn impl_try_into_ctx(name: &syn::Ident, fields: &syn::FieldsNamed) -> proc_macro let items: Vec<_> = fields.named.iter().map(|f| { let ident = &f.ident; let ty = &f.ty; - match ty { - &syn::Type::Array(_) => { + match *ty { + syn::Type::Array(_) => { quote! { for i in 0..self.#ident.len() { dst.gwrite_with(&self.#ident[i], offset, ctx)?; @@ -90,13 +87,12 @@ fn impl_try_into_ctx(name: &syn::Ident, fields: &syn::FieldsNamed) -> proc_macro } } }).collect(); - + quote! { impl<'a> ::scroll::ctx::TryIntoCtx<::scroll::Endian> for &'a #name { type Error = ::scroll::Error; - type Size = usize; #[inline] - fn try_into_ctx(self, dst: &mut [u8], ctx: ::scroll::Endian) -> ::scroll::export::result::Result { + fn try_into_ctx(self, dst: &mut [u8], ctx: ::scroll::Endian) -> ::scroll::export::result::Result { use ::scroll::Pwrite; let offset = &mut 0; #(#items;)*; @@ -106,9 +102,8 @@ fn impl_try_into_ctx(name: &syn::Ident, fields: &syn::FieldsNamed) -> proc_macro impl ::scroll::ctx::TryIntoCtx<::scroll::Endian> for #name { type Error = ::scroll::Error; - type Size = usize; #[inline] - fn try_into_ctx(self, dst: &mut [u8], ctx: ::scroll::Endian) -> ::scroll::export::result::Result { + fn try_into_ctx(self, dst: &mut [u8], ctx: ::scroll::Endian) -> ::scroll::export::result::Result { (&self).try_into_ctx(dst, ctx) } } @@ -142,12 +137,12 @@ pub fn derive_pwrite(input: TokenStream) -> TokenStream { fn size_with(name: &syn::Ident, fields: &syn::FieldsNamed) -> proc_macro2::TokenStream { let items: Vec<_> = fields.named.iter().map(|f| { let ty = &f.ty; - match ty { - &syn::Type::Array(ref array) => { + match *ty { + syn::Type::Array(ref array) => { let elem = &array.elem; match array.len { syn::Expr::Lit(syn::ExprLit { lit: syn::Lit::Int(ref int), ..}) => { - let size = int.value() as usize; + let size = int.base10_parse::().unwrap(); quote! { (#size * <#elem>::size_with(ctx)) } @@ -164,9 +159,8 @@ fn size_with(name: &syn::Ident, fields: &syn::FieldsNamed) -> proc_macro2::Token }).collect(); quote! { impl ::scroll::ctx::SizeWith<::scroll::Endian> for #name { - type Units = usize; #[inline] - fn size_with(ctx: &::scroll::Endian) -> Self::Units { + fn size_with(ctx: &::scroll::Endian) -> usize { 0 #(+ #items)* } } @@ -201,16 +195,16 @@ fn impl_cread_struct(name: &syn::Ident, fields: &syn::FieldsNamed) -> proc_macro let items: Vec<_> = fields.named.iter().map(|f| { let ident = &f.ident; let ty = &f.ty; - match ty { - &syn::Type::Array(ref array) => { + match *ty { + syn::Type::Array(ref array) => { let arrty = &array.elem; match array.len { syn::Expr::Lit(syn::ExprLit { lit: syn::Lit::Int(ref int), ..}) => { - let size = int.value(); + let size = int.base10_parse::().unwrap(); let incr = quote! { ::scroll::export::mem::size_of::<#arrty>() }; quote! { #ident: { - let mut __tmp: #ty = [0; #size as usize]; + let mut __tmp: #ty = [0; #size]; for i in 0..__tmp.len() { __tmp[i] = src.cread_with(*offset, ctx); *offset += #incr; @@ -273,8 +267,8 @@ fn impl_into_ctx(name: &syn::Ident, fields: &syn::FieldsNamed) -> proc_macro2::T let ident = &f.ident; let ty = &f.ty; let size = quote! { ::scroll::export::mem::size_of::<#ty>() }; - match ty { - &syn::Type::Array(ref array) => { + match *ty { + syn::Type::Array(ref array) => { let arrty = &array.elem; quote! { let size = ::scroll::export::mem::size_of::<#arrty>(); diff --git a/third_party/rust/scroll_derive/tests/tests.rs b/third_party/rust/scroll_derive/tests/tests.rs index d54f77e2d6..c1e55c1c7f 100644 --- a/third_party/rust/scroll_derive/tests/tests.rs +++ b/third_party/rust/scroll_derive/tests/tests.rs @@ -1,6 +1,7 @@ -extern crate scroll; -#[macro_use] -extern crate scroll_derive; +use scroll_derive::{Pread, Pwrite, SizeWith, IOread, IOwrite}; +use scroll::{Pread, Pwrite, Cread, Cwrite, LE}; + +use scroll::ctx::SizeWith; #[derive(Debug, PartialEq, Pread, Pwrite)] struct Data { @@ -8,8 +9,6 @@ struct Data { timestamp: f64, } -use scroll::{Pread, Pwrite, Cread, Cwrite, LE}; -use scroll::ctx::SizeWith; #[test] fn test_data (){ diff --git a/third_party/rust/serde/.cargo-checksum.json b/third_party/rust/serde/.cargo-checksum.json index 1e665e6927..55a682e617 100644 --- a/third_party/rust/serde/.cargo-checksum.json +++ b/third_party/rust/serde/.cargo-checksum.json @@ -1 +1 @@ -{"files":{"Cargo.toml":"5225bf9886cf381fca9b37d89070bb62d7b53ce7abf6df6f0fab2648df97db55","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"23f18e03dc49df91622fe2a76176497404e46ced8a715d9d2b67a7446571cca3","README.md":"b919da154e92b23653a6d0432feb95f210cdac2617126f4e05c4842f170dc395","build.rs":"00972536ef079d36b6a547057ae423352b30da93554c5f22d33196e3e1753dbc","crates-io.md":"131dee2d4beaf83485aed22942b98815ef65af9bcfb65f02b5b90c59b8bc7b8b","src/de/from_primitive.rs":"058fa17313ed3a9c29ec04c6ec67f21a756f6f28cdeb4b0dfcd7012b3e702d0b","src/de/ignored_any.rs":"6a0527edd497a56a103ae65f5e73da675b3f99094d6dcad3c335c8d932daaf40","src/de/impls.rs":"d1677459c53e3ca99a2b6f3ae58d1ff9c906d953e3d27273f3bcf3fbad2e95d8","src/de/mod.rs":"0dd0c8bdefa86f621fdeba8f7b5575463c111bf034b0297e80d3aa8fedf40955","src/de/utf8.rs":"f17524ee0af98ec3abcfd7d0b812fbd1033263bd8e2ce2f57c1e1999ce153558","src/de/value.rs":"a878f6bdd57d25b0b93bfc6288ed1e46c50870dc8703748b6fbb8c0965a6b586","src/export.rs":"2ebdf0eccaa64c5e98c6dfd13b4980474f627fc3fae90cfc2c741acf860afd5d","src/integer128.rs":"b213ec6c1ecf8c8228d9591e0b2c31b78d972cd4c6a0b231468090f15784f6f6","src/lib.rs":"58a5d692c5efc046ea9ba2710d1daa0e13e8d5efead64f8cb78e9351290e8857","src/macros.rs":"f18fc25c5fb857238bf119cdee5c7987a8584dea69c51f27ca718b7dfd871d0f","src/private/de.rs":"175fa8a219cf931d3d41e140bb1c9c0599a009e589a8443f9cb54f02fdf2c7ed","src/private/macros.rs":"ebb6affd4c89e3b5f9a42e03f8b7d966bc588875e9b44e962d0b7aba7f80a10f","src/private/mod.rs":"f8f2cd5edbfc26c268b34cdb89db1b34e6348f81384f03d18532e7568575006d","src/private/ser.rs":"67c085463d348806225f323eabd32b5bfd540ec8d78a1b515436af9b8a9636ec","src/ser/impls.rs":"741d6e24635911e65e31bc67d2596284a8e90b5682c975bacf11388e9b3d05e4","src/ser/impossible.rs":"3dd0e165b88fc67e698e675f16569b91fab9e054caa4c3e1997f929ba364fe90","src/ser/mod.rs":"3b90c5cb48d895a653ef94328c77e7956c7f4b6e0aaddd9101afabe87ff0f23a","src/std_error.rs":"3aac687856c035517fae44ed2906dd4a1e3184bae4bf613adcdeb73f74126c57"},"package":"0c4b39bd9b0b087684013a792c59e3e07a46a01d2322518d8a1104641a0b1be0"} \ No newline at end of file +{"files":{"Cargo.toml":"7bfd3feb65f16a263364f337fc01ae2d89323fd60976ba503ae8131bab2948ff","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"23f18e03dc49df91622fe2a76176497404e46ced8a715d9d2b67a7446571cca3","README.md":"b919da154e92b23653a6d0432feb95f210cdac2617126f4e05c4842f170dc395","build.rs":"00972536ef079d36b6a547057ae423352b30da93554c5f22d33196e3e1753dbc","crates-io.md":"131dee2d4beaf83485aed22942b98815ef65af9bcfb65f02b5b90c59b8bc7b8b","src/de/from_primitive.rs":"058fa17313ed3a9c29ec04c6ec67f21a756f6f28cdeb4b0dfcd7012b3e702d0b","src/de/ignored_any.rs":"6a0527edd497a56a103ae65f5e73da675b3f99094d6dcad3c335c8d932daaf40","src/de/impls.rs":"d1677459c53e3ca99a2b6f3ae58d1ff9c906d953e3d27273f3bcf3fbad2e95d8","src/de/mod.rs":"0dd0c8bdefa86f621fdeba8f7b5575463c111bf034b0297e80d3aa8fedf40955","src/de/utf8.rs":"f17524ee0af98ec3abcfd7d0b812fbd1033263bd8e2ce2f57c1e1999ce153558","src/de/value.rs":"a878f6bdd57d25b0b93bfc6288ed1e46c50870dc8703748b6fbb8c0965a6b586","src/export.rs":"2ebdf0eccaa64c5e98c6dfd13b4980474f627fc3fae90cfc2c741acf860afd5d","src/integer128.rs":"b213ec6c1ecf8c8228d9591e0b2c31b78d972cd4c6a0b231468090f15784f6f6","src/lib.rs":"020f529e77865c1273e50337d6ec187a4f4f77d4572f8db3976837b35f1ebdf1","src/macros.rs":"f18fc25c5fb857238bf119cdee5c7987a8584dea69c51f27ca718b7dfd871d0f","src/private/de.rs":"87f7352697c1a711e57246d38eddcf81b61033f1f2a101bbf378ff6cc3f1ee3d","src/private/macros.rs":"ebb6affd4c89e3b5f9a42e03f8b7d966bc588875e9b44e962d0b7aba7f80a10f","src/private/mod.rs":"f8f2cd5edbfc26c268b34cdb89db1b34e6348f81384f03d18532e7568575006d","src/private/ser.rs":"67c085463d348806225f323eabd32b5bfd540ec8d78a1b515436af9b8a9636ec","src/ser/impls.rs":"741d6e24635911e65e31bc67d2596284a8e90b5682c975bacf11388e9b3d05e4","src/ser/impossible.rs":"3dd0e165b88fc67e698e675f16569b91fab9e054caa4c3e1997f929ba364fe90","src/ser/mod.rs":"3b90c5cb48d895a653ef94328c77e7956c7f4b6e0aaddd9101afabe87ff0f23a","src/std_error.rs":"3aac687856c035517fae44ed2906dd4a1e3184bae4bf613adcdeb73f74126c57"},"package":"414115f25f818d7dfccec8ee535d76949ae78584fc4f79a6f45a904bf8ab4449"} \ No newline at end of file diff --git a/third_party/rust/serde/Cargo.toml b/third_party/rust/serde/Cargo.toml index 42394e7aa5..972213272f 100644 --- a/third_party/rust/serde/Cargo.toml +++ b/third_party/rust/serde/Cargo.toml @@ -12,7 +12,7 @@ [package] name = "serde" -version = "1.0.102" +version = "1.0.104" authors = ["Erick Tryzelaar ", "David Tolnay "] build = "build.rs" include = ["Cargo.toml", "build.rs", "src/**/*.rs", "crates-io.md", "README.md", "LICENSE-APACHE", "LICENSE-MIT"] @@ -27,7 +27,7 @@ repository = "https://github.com/serde-rs/serde" [package.metadata.playground] features = ["derive", "rc"] [dependencies.serde_derive] -version = "1.0" +version = "=1.0.104" optional = true [dev-dependencies.serde_derive] version = "1.0" diff --git a/third_party/rust/serde/src/lib.rs b/third_party/rust/serde/src/lib.rs index a9e63c49b3..6d1e298937 100644 --- a/third_party/rust/serde/src/lib.rs +++ b/third_party/rust/serde/src/lib.rs @@ -75,7 +75,7 @@ //////////////////////////////////////////////////////////////////////////////// // Serde types in rustdoc of other crates get linked to here. -#![doc(html_root_url = "https://docs.rs/serde/1.0.102")] +#![doc(html_root_url = "https://docs.rs/serde/1.0.104")] // Support using Serde without the standard library! #![cfg_attr(not(feature = "std"), no_std)] // Unstable functionality only if the user asks for it. For tracking and diff --git a/third_party/rust/serde/src/private/de.rs b/third_party/rust/serde/src/private/de.rs index ffa5012dc7..1c7fda3a1a 100644 --- a/third_party/rust/serde/src/private/de.rs +++ b/third_party/rust/serde/src/private/de.rs @@ -2510,6 +2510,13 @@ mod content { { Ok(()) } + + fn visit_none(self) -> Result<(), E> + where + E: de::Error, + { + Ok(()) + } } } diff --git a/third_party/rust/serde_derive/.cargo-checksum.json b/third_party/rust/serde_derive/.cargo-checksum.json index a01c9cd50a..128e19d427 100644 --- a/third_party/rust/serde_derive/.cargo-checksum.json +++ b/third_party/rust/serde_derive/.cargo-checksum.json @@ -1 +1 @@ -{"files":{"Cargo.toml":"e9963541e46395263ef692e06e0eef0106c098045d5586ce848674df6155e890","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"23f18e03dc49df91622fe2a76176497404e46ced8a715d9d2b67a7446571cca3","README.md":"b919da154e92b23653a6d0432feb95f210cdac2617126f4e05c4842f170dc395","crates-io.md":"131dee2d4beaf83485aed22942b98815ef65af9bcfb65f02b5b90c59b8bc7b8b","src/bound.rs":"3db01a4e6820cca4a543f1eea33bbdadc8ba03912c3de5d3eee001f4b8676c4e","src/de.rs":"55725e8fb87c77534754077f60b48bcd5c03f8b2a7a9a53c075ed97e861acc07","src/dummy.rs":"911edb959db048500c865517f68f4077f95d2bd653da21054ee9c1261c6a497a","src/fragment.rs":"5548ba65a53d90a296f60c1328a7a7fb040db467f59c2f5210b2fb320457145d","src/internals/ast.rs":"dbcde9eb31fd33e9a386f56cdf8cd2dd8a6855e491002864a8d29d6e125524ec","src/internals/attr.rs":"1e4c751fef7c6afa4102812eff44103e2173597ec78e74d0786688126a8def24","src/internals/case.rs":"b2024f414f5d832bafa53b9ae7924b2d43a29175b19bb3da36f15c9071666558","src/internals/check.rs":"15d5226997b1e46aae28a305648f37920bd2eb00a140e96c9206fce9ac6562cc","src/internals/ctxt.rs":"6fa544ae52914498a62a395818ebdc1b36ac2fb5903c60afb741a864ad559f1c","src/internals/mod.rs":"0ee6cb3c55ac8a5907f1938748c8fd934d5138a06525d13163f05110dc7cf7e8","src/internals/symbol.rs":"9f2b4b9cd83dc09def75165187c97d0050bffa4218782c27b478bcf93f925a74","src/lib.rs":"d5bc3be6c7e6485a2be7b89d239bb1fde1b1387f88e6610ebc614ca262bc9830","src/pretend.rs":"ffeb23da4c2abc4e501c378cffa8b776bab506735ea70d4ed10f4c0f3755321b","src/ser.rs":"4058895967b2442e9ed5ed43eb330f003830648c83ffab270dd24876d76ca00a","src/try.rs":"b9a10c8690d442a57fc7097d42c9a4f13034c7b4a30b7eb02d538fdbf8ae0a8d"},"package":"ca13fc1a832f793322228923fbb3aba9f3f44444898f835d31ad1b74fa0a2bf8"} \ No newline at end of file +{"files":{"Cargo.toml":"8af4d75ed437af2996977a82edd6cec6581323408be0d5bce95bc4a9b06e1abd","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"23f18e03dc49df91622fe2a76176497404e46ced8a715d9d2b67a7446571cca3","README.md":"b919da154e92b23653a6d0432feb95f210cdac2617126f4e05c4842f170dc395","crates-io.md":"131dee2d4beaf83485aed22942b98815ef65af9bcfb65f02b5b90c59b8bc7b8b","src/bound.rs":"3db01a4e6820cca4a543f1eea33bbdadc8ba03912c3de5d3eee001f4b8676c4e","src/de.rs":"55725e8fb87c77534754077f60b48bcd5c03f8b2a7a9a53c075ed97e861acc07","src/dummy.rs":"911edb959db048500c865517f68f4077f95d2bd653da21054ee9c1261c6a497a","src/fragment.rs":"5548ba65a53d90a296f60c1328a7a7fb040db467f59c2f5210b2fb320457145d","src/internals/ast.rs":"dbcde9eb31fd33e9a386f56cdf8cd2dd8a6855e491002864a8d29d6e125524ec","src/internals/attr.rs":"1e4c751fef7c6afa4102812eff44103e2173597ec78e74d0786688126a8def24","src/internals/case.rs":"b2024f414f5d832bafa53b9ae7924b2d43a29175b19bb3da36f15c9071666558","src/internals/check.rs":"15d5226997b1e46aae28a305648f37920bd2eb00a140e96c9206fce9ac6562cc","src/internals/ctxt.rs":"6fa544ae52914498a62a395818ebdc1b36ac2fb5903c60afb741a864ad559f1c","src/internals/mod.rs":"0ee6cb3c55ac8a5907f1938748c8fd934d5138a06525d13163f05110dc7cf7e8","src/internals/symbol.rs":"9f2b4b9cd83dc09def75165187c97d0050bffa4218782c27b478bcf93f925a74","src/lib.rs":"d73d85272d1e7499099a7a07f5fc03ccae3143d2cfb0d281f37b942f3021bfa0","src/pretend.rs":"ffeb23da4c2abc4e501c378cffa8b776bab506735ea70d4ed10f4c0f3755321b","src/ser.rs":"eb7eaf8d447119449d5136be741552d1ae34a88de82901642158bc6abd4c1b51","src/try.rs":"b9a10c8690d442a57fc7097d42c9a4f13034c7b4a30b7eb02d538fdbf8ae0a8d"},"package":"128f9e303a5a29922045a830221b8f78ec74a5f544944f3d5984f8ec3895ef64"} \ No newline at end of file diff --git a/third_party/rust/serde_derive/Cargo.toml b/third_party/rust/serde_derive/Cargo.toml index 3536a3876b..4d462cb69c 100644 --- a/third_party/rust/serde_derive/Cargo.toml +++ b/third_party/rust/serde_derive/Cargo.toml @@ -12,7 +12,7 @@ [package] name = "serde_derive" -version = "1.0.102" +version = "1.0.104" authors = ["Erick Tryzelaar ", "David Tolnay "] include = ["Cargo.toml", "src/**/*.rs", "crates-io.md", "README.md", "LICENSE-APACHE", "LICENSE-MIT"] description = "Macros 1.1 implementation of #[derive(Serialize, Deserialize)]" diff --git a/third_party/rust/serde_derive/src/lib.rs b/third_party/rust/serde_derive/src/lib.rs index 55717a9a2d..9e4896e17c 100644 --- a/third_party/rust/serde_derive/src/lib.rs +++ b/third_party/rust/serde_derive/src/lib.rs @@ -13,7 +13,7 @@ //! //! [https://serde.rs/derive.html]: https://serde.rs/derive.html -#![doc(html_root_url = "https://docs.rs/serde_derive/1.0.102")] +#![doc(html_root_url = "https://docs.rs/serde_derive/1.0.104")] #![allow(unknown_lints, bare_trait_objects)] #![cfg_attr(feature = "cargo-clippy", allow(renamed_and_removed_lints))] #![cfg_attr(feature = "cargo-clippy", deny(clippy, clippy_pedantic))] diff --git a/third_party/rust/serde_derive/src/ser.rs b/third_party/rust/serde_derive/src/ser.rs index 761dbeb7b5..a7109ea622 100644 --- a/third_party/rust/serde_derive/src/ser.rs +++ b/third_party/rust/serde_derive/src/ser.rs @@ -1273,9 +1273,15 @@ enum StructTrait { impl StructTrait { fn serialize_field(&self, span: Span) -> TokenStream { match *self { - StructTrait::SerializeMap => quote_spanned!(span=> _serde::ser::SerializeMap::serialize_entry), - StructTrait::SerializeStruct => quote_spanned!(span=> _serde::ser::SerializeStruct::serialize_field), - StructTrait::SerializeStructVariant => quote_spanned!(span=> _serde::ser::SerializeStructVariant::serialize_field), + StructTrait::SerializeMap => { + quote_spanned!(span=> _serde::ser::SerializeMap::serialize_entry) + } + StructTrait::SerializeStruct => { + quote_spanned!(span=> _serde::ser::SerializeStruct::serialize_field) + } + StructTrait::SerializeStructVariant => { + quote_spanned!(span=> _serde::ser::SerializeStructVariant::serialize_field) + } } } @@ -1301,9 +1307,15 @@ enum TupleTrait { impl TupleTrait { fn serialize_element(&self, span: Span) -> TokenStream { match *self { - TupleTrait::SerializeTuple => quote_spanned!(span=> _serde::ser::SerializeTuple::serialize_element), - TupleTrait::SerializeTupleStruct => quote_spanned!(span=> _serde::ser::SerializeTupleStruct::serialize_field), - TupleTrait::SerializeTupleVariant => quote_spanned!(span=> _serde::ser::SerializeTupleVariant::serialize_field), + TupleTrait::SerializeTuple => { + quote_spanned!(span=> _serde::ser::SerializeTuple::serialize_element) + } + TupleTrait::SerializeTupleStruct => { + quote_spanned!(span=> _serde::ser::SerializeTupleStruct::serialize_field) + } + TupleTrait::SerializeTupleVariant => { + quote_spanned!(span=> _serde::ser::SerializeTupleVariant::serialize_field) + } } } } diff --git a/third_party/rust/smallbitvec/.cargo-checksum.json b/third_party/rust/smallbitvec/.cargo-checksum.json index 0f98909330..e637443044 100644 --- a/third_party/rust/smallbitvec/.cargo-checksum.json +++ b/third_party/rust/smallbitvec/.cargo-checksum.json @@ -1 +1 @@ -{"files":{"Cargo.toml":"04bb81776575ff83d73fd27b8b8a1c3db8ecf9696c30a9a7def7dc79158914bd","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"daa94322de7eab889e055932396160395bd8e3af82f56ae8c419d3049111da72","README.md":"4ac9c9b88726f6bcc3b454d61ce75a8224bd430584b765e304be9aa21815c327","benches/bench.rs":"9691c531845f2741bcb6485641ee3fd3e39980925ec6e5f716464e94fd5adfd0","src/lib.rs":"f05c517845d5836ce3900a883be3ee2f91c38af9dac5defaae7fdce37f0bb6df","src/tests.rs":"5105852aa97ca9569adba05e99b7080b1c6970d2fe9b9eff43beaa4bbe371838"},"package":"1764fe2b30ee783bfe3b9b37b2649d8d590b3148bb12e0079715d4d5c673562e"} \ No newline at end of file +{"files":{"Cargo.toml":"aa3657b05ee80a8534070f53b33f01a5159ff85596bae6fbe210d564b6502d3e","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"daa94322de7eab889e055932396160395bd8e3af82f56ae8c419d3049111da72","README.md":"4ac9c9b88726f6bcc3b454d61ce75a8224bd430584b765e304be9aa21815c327","benches/bench.rs":"9691c531845f2741bcb6485641ee3fd3e39980925ec6e5f716464e94fd5adfd0","src/lib.rs":"38067257863268685f22e4982161b0ee30138a37e747c8d58c84e649f4341a62","src/tests.rs":"debfa662f6b911f8b87f234e5cfc5c10656d06c2db12ae77e9f79ecce6a10fae"},"package":"797a4eaffb90d896f29698d45676f9f940a71936d7574996a7df54593ba209fa"} \ No newline at end of file diff --git a/third_party/rust/smallbitvec/Cargo.toml b/third_party/rust/smallbitvec/Cargo.toml index 19d64cdf69..26f29b3752 100644 --- a/third_party/rust/smallbitvec/Cargo.toml +++ b/third_party/rust/smallbitvec/Cargo.toml @@ -3,7 +3,7 @@ # When uploading crates to the registry Cargo will automatically # "normalize" Cargo.toml files for maximal compatibility # with all versions of Cargo and also rewrite `path` dependencies -# to registry (e.g. crates.io) dependencies +# to registry (e.g., crates.io) dependencies # # If you believe there's an error in this file please file an # issue against the rust-lang/cargo repository. If you're @@ -12,7 +12,7 @@ [package] name = "smallbitvec" -version = "2.3.0" +version = "2.5.0" authors = ["Matt Brubeck "] description = "A bit vector optimized for size and inline storage" documentation = "https://docs.rs/smallbitvec" diff --git a/third_party/rust/smallbitvec/src/lib.rs b/third_party/rust/smallbitvec/src/lib.rs index 022393430e..6876feadae 100644 --- a/third_party/rust/smallbitvec/src/lib.rs +++ b/third_party/rust/smallbitvec/src/lib.rs @@ -28,13 +28,19 @@ //! assert_eq!(v[1], false); //! ``` -use std::cmp::max; -use std::fmt; -use std::hash; -use std::iter::{DoubleEndedIterator, ExactSizeIterator, FromIterator}; -use std::mem::{forget, replace, size_of}; -use std::ops::{Index, Range}; -use std::slice; +#![no_std] + +extern crate alloc; + +use alloc::{vec, vec::Vec, boxed::Box}; + +use core::cmp::max; +use core::fmt; +use core::hash; +use core::iter::{DoubleEndedIterator, ExactSizeIterator, FromIterator}; +use core::mem::{forget, replace, size_of}; +use core::ops::{Index, Range}; +use core::slice; /// Creates a [`SmallBitVec`] containing the arguments. /// @@ -78,6 +84,18 @@ macro_rules! sbvec { ); } + +// FIXME: replace this with `debug_assert!` when it’s usable in `const`: +// * https://github.com/rust-lang/rust/issues/49146 +// * https://github.com/rust-lang/rust/issues/51999 +macro_rules! const_debug_assert_le { + ($left: ident <= $right: expr) => { + #[cfg(debug_assertions)] + // Causes an `index out of bounds` panic if `$left` is too large + [(); $right + 1][$left]; + } +} + #[cfg(test)] mod tests; @@ -91,7 +109,7 @@ pub struct SmallBitVec { /// Total number of bits per word. #[inline(always)] -fn inline_bits() -> usize { +const fn inline_bits() -> usize { size_of::() * 8 } @@ -100,21 +118,21 @@ fn inline_bits() -> usize { /// - The rightmost bit is set to zero to signal an inline vector. /// - The position of the rightmost nonzero bit encodes the length. #[inline(always)] -fn inline_capacity() -> usize { +const fn inline_capacity() -> usize { inline_bits() - 2 } /// Left shift amount to access the nth bit #[inline(always)] -fn inline_shift(n: usize) -> usize { - debug_assert!(n <= inline_capacity()); +const fn inline_shift(n: usize) -> usize { + const_debug_assert_le!(n <= inline_capacity()); // The storage starts at the leftmost bit. inline_bits() - 1 - n } /// An inline vector with the nth bit set. #[inline(always)] -fn inline_index(n: usize) -> usize { +const fn inline_index(n: usize) -> usize { 1 << inline_shift(n) } @@ -201,7 +219,7 @@ pub enum InternalStorage { impl SmallBitVec { /// Create an empty vector. #[inline] - pub fn new() -> SmallBitVec { + pub const fn new() -> SmallBitVec { SmallBitVec { data: inline_index(0), } @@ -279,6 +297,12 @@ impl SmallBitVec { } } + /// Get the last bit in this bit vector. + #[inline] + pub fn last(&self) -> Option { + self.len().checked_sub(1).map(|n| unsafe { self.get_unchecked(n) }) + } + /// Get the nth bit in this bit vector, without bounds checks. #[inline] pub unsafe fn get_unchecked(&self, n: usize) -> bool { @@ -357,15 +381,11 @@ impl SmallBitVec { /// ``` #[inline] pub fn pop(&mut self) -> Option { - let old_len = self.len(); - if old_len == 0 { - return None; - } - unsafe { - let val = self.get_unchecked(old_len - 1); - self.set_len(old_len - 1); - Some(val) - } + self.len().checked_sub(1).map(|last| unsafe { + let val = self.get_unchecked(last); + self.set_len(last); + val + }) } /// Remove and return the bit at index `idx`, shifting all later bits toward the front. @@ -751,6 +771,7 @@ impl fmt::Debug for SmallBitVec { } impl Default for SmallBitVec { + #[inline] fn default() -> Self { Self::new() } @@ -947,6 +968,17 @@ pub struct Iter<'a> { range: Range, } +impl<'a> Default for Iter<'a> { + #[inline] + fn default() -> Self { + const EMPTY: &'static SmallBitVec = &SmallBitVec::new(); + Self { + vec: EMPTY, + range: 0..0, + } + } +} + impl<'a> Iterator for Iter<'a> { type Item = bool; diff --git a/third_party/rust/smallbitvec/src/tests.rs b/third_party/rust/smallbitvec/src/tests.rs index d05c376a7f..2378a5be4a 100644 --- a/third_party/rust/smallbitvec/src/tests.rs +++ b/third_party/rust/smallbitvec/src/tests.rs @@ -9,6 +9,8 @@ use super::*; +use alloc::format; + #[cfg(target_pointer_width = "32")] #[test] fn test_inline_capacity() { diff --git a/third_party/rust/smallvec-0.6.10/.cargo-checksum.json b/third_party/rust/smallvec-0.6.10/.cargo-checksum.json deleted file mode 100644 index 57d87f2e00..0000000000 --- a/third_party/rust/smallvec-0.6.10/.cargo-checksum.json +++ /dev/null @@ -1 +0,0 @@ -{"files":{"Cargo.toml":"a064123fc22a52158c37be025c3b70d413c4b1ee743e92a2e80ed419e2992d65","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"0b28172679e0009b655da42797c03fd163a3379d5cfa67ba1f1655e974a2a1a9","README.md":"38eef4ebde6fe6effa12a2dbca3bd69d6446b2935f19a329ac4926f1cb2e5013","benches/bench.rs":"9dca7122a3dcb2c099e49807e4d3b8f01d9220e2b3db0a54e9901ee74392866f","lib.rs":"4d6998b0b80a85e85cf00bd317a88518067e9e8ba191185418263dec67069c16"},"package":"ab606a9c5e214920bb66c458cd7be8ef094f813f20fe77a54cc7dbfff220d4b7"} \ No newline at end of file diff --git a/third_party/rust/smallvec-0.6.10/Cargo.toml b/third_party/rust/smallvec-0.6.10/Cargo.toml deleted file mode 100644 index 5b23e5a16b..0000000000 --- a/third_party/rust/smallvec-0.6.10/Cargo.toml +++ /dev/null @@ -1,39 +0,0 @@ -# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO -# -# When uploading crates to the registry Cargo will automatically -# "normalize" Cargo.toml files for maximal compatibility -# with all versions of Cargo and also rewrite `path` dependencies -# to registry (e.g., crates.io) dependencies -# -# If you believe there's an error in this file please file an -# issue against the rust-lang/cargo repository. If you're -# editing this file be aware that the upstream Cargo.toml -# will likely look very different (and much more reasonable) - -[package] -name = "smallvec" -version = "0.6.10" -authors = ["Simon Sapin "] -description = "'Small vector' optimization: store up to a small number of items on the stack" -documentation = "https://doc.servo.org/smallvec/" -readme = "README.md" -keywords = ["small", "vec", "vector", "stack", "no_std"] -categories = ["data-structures"] -license = "MIT/Apache-2.0" -repository = "https://github.com/servo/rust-smallvec" - -[lib] -name = "smallvec" -path = "lib.rs" -[dependencies.serde] -version = "1" -optional = true -[dev-dependencies.bincode] -version = "1.0.1" - -[features] -default = ["std"] -may_dangle = [] -specialization = [] -std = [] -union = [] diff --git a/third_party/rust/smallvec-0.6.10/LICENSE-APACHE b/third_party/rust/smallvec-0.6.10/LICENSE-APACHE deleted file mode 100644 index 16fe87b06e..0000000000 --- a/third_party/rust/smallvec-0.6.10/LICENSE-APACHE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - -TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - -1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - -2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - -3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - -4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - -5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - -6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - -7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - -8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - -9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - -END OF TERMS AND CONDITIONS - -APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - -Copyright [yyyy] [name of copyright owner] - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. diff --git a/third_party/rust/smallvec-0.6.10/LICENSE-MIT b/third_party/rust/smallvec-0.6.10/LICENSE-MIT deleted file mode 100644 index 9729c1284e..0000000000 --- a/third_party/rust/smallvec-0.6.10/LICENSE-MIT +++ /dev/null @@ -1,25 +0,0 @@ -Copyright (c) 2018 The Servo Project Developers - -Permission is hereby granted, free of charge, to any -person obtaining a copy of this software and associated -documentation files (the "Software"), to deal in the -Software without restriction, including without -limitation the rights to use, copy, modify, merge, -publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software -is furnished to do so, subject to the following -conditions: - -The above copyright notice and this permission notice -shall be included in all copies or substantial portions -of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -DEALINGS IN THE SOFTWARE. diff --git a/third_party/rust/smallvec-0.6.10/README.md b/third_party/rust/smallvec-0.6.10/README.md deleted file mode 100644 index fda7fd4d2c..0000000000 --- a/third_party/rust/smallvec-0.6.10/README.md +++ /dev/null @@ -1,8 +0,0 @@ -rust-smallvec -============= - -[Documentation](https://docs.rs/smallvec/) - -[Release notes](https://github.com/servo/rust-smallvec/releases) - -"Small vector" optimization for Rust: store up to a small number of items on the stack diff --git a/third_party/rust/smallvec-0.6.10/benches/bench.rs b/third_party/rust/smallvec-0.6.10/benches/bench.rs deleted file mode 100644 index 36cb1333fa..0000000000 --- a/third_party/rust/smallvec-0.6.10/benches/bench.rs +++ /dev/null @@ -1,295 +0,0 @@ -#![feature(test)] - -#[macro_use] -extern crate smallvec; -extern crate test; - -use self::test::Bencher; -use smallvec::{ExtendFromSlice, SmallVec}; - -const VEC_SIZE: usize = 16; -const SPILLED_SIZE: usize = 100; - -trait Vector: for<'a> From<&'a [T]> + Extend + ExtendFromSlice { - fn new() -> Self; - fn push(&mut self, val: T); - fn pop(&mut self) -> Option; - fn remove(&mut self, p: usize) -> T; - fn insert(&mut self, n: usize, val: T); - fn from_elem(val: T, n: usize) -> Self; - fn from_elems(val: &[T]) -> Self; -} - -impl Vector for Vec { - fn new() -> Self { - Self::with_capacity(VEC_SIZE) - } - - fn push(&mut self, val: T) { - self.push(val) - } - - fn pop(&mut self) -> Option { - self.pop() - } - - fn remove(&mut self, p: usize) -> T { - self.remove(p) - } - - fn insert(&mut self, n: usize, val: T) { - self.insert(n, val) - } - - fn from_elem(val: T, n: usize) -> Self { - vec![val; n] - } - - fn from_elems(val: &[T]) -> Self { - val.to_owned() - } -} - -impl Vector for SmallVec<[T; VEC_SIZE]> { - fn new() -> Self { - Self::new() - } - - fn push(&mut self, val: T) { - self.push(val) - } - - fn pop(&mut self) -> Option { - self.pop() - } - - fn remove(&mut self, p: usize) -> T { - self.remove(p) - } - - fn insert(&mut self, n: usize, val: T) { - self.insert(n, val) - } - - fn from_elem(val: T, n: usize) -> Self { - smallvec![val; n] - } - - fn from_elems(val: &[T]) -> Self { - SmallVec::from_slice(val) - } -} - -macro_rules! make_benches { - ($typ:ty { $($b_name:ident => $g_name:ident($($args:expr),*),)* }) => { - $( - #[bench] - fn $b_name(b: &mut Bencher) { - $g_name::<$typ>($($args,)* b) - } - )* - } -} - -make_benches! { - SmallVec<[u64; VEC_SIZE]> { - bench_push => gen_push(SPILLED_SIZE as _), - bench_push_small => gen_push(VEC_SIZE as _), - bench_insert => gen_insert(SPILLED_SIZE as _), - bench_insert_small => gen_insert(VEC_SIZE as _), - bench_remove => gen_remove(SPILLED_SIZE as _), - bench_remove_small => gen_remove(VEC_SIZE as _), - bench_extend => gen_extend(SPILLED_SIZE as _), - bench_extend_small => gen_extend(VEC_SIZE as _), - bench_from_iter => gen_from_iter(SPILLED_SIZE as _), - bench_from_iter_small => gen_from_iter(VEC_SIZE as _), - bench_from_slice => gen_from_slice(SPILLED_SIZE as _), - bench_from_slice_small => gen_from_slice(VEC_SIZE as _), - bench_extend_from_slice => gen_extend_from_slice(SPILLED_SIZE as _), - bench_extend_from_slice_small => gen_extend_from_slice(VEC_SIZE as _), - bench_macro_from_elem => gen_from_elem(SPILLED_SIZE as _), - bench_macro_from_elem_small => gen_from_elem(VEC_SIZE as _), - bench_pushpop => gen_pushpop(), - } -} - -make_benches! { - Vec { - bench_push_vec => gen_push(SPILLED_SIZE as _), - bench_push_vec_small => gen_push(VEC_SIZE as _), - bench_insert_vec => gen_insert(SPILLED_SIZE as _), - bench_insert_vec_small => gen_insert(VEC_SIZE as _), - bench_remove_vec => gen_remove(SPILLED_SIZE as _), - bench_remove_vec_small => gen_remove(VEC_SIZE as _), - bench_extend_vec => gen_extend(SPILLED_SIZE as _), - bench_extend_vec_small => gen_extend(VEC_SIZE as _), - bench_from_iter_vec => gen_from_iter(SPILLED_SIZE as _), - bench_from_iter_vec_small => gen_from_iter(VEC_SIZE as _), - bench_from_slice_vec => gen_from_slice(SPILLED_SIZE as _), - bench_from_slice_vec_small => gen_from_slice(VEC_SIZE as _), - bench_extend_from_slice_vec => gen_extend_from_slice(SPILLED_SIZE as _), - bench_extend_from_slice_vec_small => gen_extend_from_slice(VEC_SIZE as _), - bench_macro_from_elem_vec => gen_from_elem(SPILLED_SIZE as _), - bench_macro_from_elem_vec_small => gen_from_elem(VEC_SIZE as _), - bench_pushpop_vec => gen_pushpop(), - } -} - -fn gen_push>(n: u64, b: &mut Bencher) { - #[inline(never)] - fn push_noinline>(vec: &mut V, x: u64) { - vec.push(x); - } - - b.iter(|| { - let mut vec = V::new(); - for x in 0..n { - push_noinline(&mut vec, x); - } - vec - }); -} - -fn gen_insert>(n: u64, b: &mut Bencher) { - #[inline(never)] - fn insert_noinline>(vec: &mut V, p: usize, x: u64) { - vec.insert(p, x) - } - - b.iter(|| { - let mut vec = V::new(); - // Add one element, with each iteration we insert one before the end. - // This means that we benchmark the insertion operation and not the - // time it takes to `ptr::copy` the data. - vec.push(0); - for x in 0..n { - insert_noinline(&mut vec, x as _, x); - } - vec - }); -} - -fn gen_remove>(n: usize, b: &mut Bencher) { - #[inline(never)] - fn remove_noinline>(vec: &mut V, p: usize) -> u64 { - vec.remove(p) - } - - b.iter(|| { - let mut vec = V::from_elem(0, n as _); - - for x in (0..n - 1).rev() { - remove_noinline(&mut vec, x); - } - }); -} - -fn gen_extend>(n: u64, b: &mut Bencher) { - b.iter(|| { - let mut vec = V::new(); - vec.extend(0..n); - vec - }); -} - -fn gen_from_iter>(n: u64, b: &mut Bencher) { - let v: Vec = (0..n).collect(); - b.iter(|| { - let vec = V::from(&v); - vec - }); -} - -fn gen_from_slice>(n: u64, b: &mut Bencher) { - let v: Vec = (0..n).collect(); - b.iter(|| { - let vec = V::from_elems(&v); - vec - }); -} - -fn gen_extend_from_slice>(n: u64, b: &mut Bencher) { - let v: Vec = (0..n).collect(); - b.iter(|| { - let mut vec = V::new(); - vec.extend_from_slice(&v); - vec - }); -} - -fn gen_pushpop>(b: &mut Bencher) { - #[inline(never)] - fn pushpop_noinline>(vec: &mut V, x: u64) -> Option { - vec.push(x); - vec.pop() - } - - b.iter(|| { - let mut vec = V::new(); - for x in 0..SPILLED_SIZE as _ { - pushpop_noinline(&mut vec, x); - } - vec - }); -} - -fn gen_from_elem>(n: usize, b: &mut Bencher) { - b.iter(|| { - let vec = V::from_elem(42, n); - vec - }); -} - -#[bench] -fn bench_insert_many(b: &mut Bencher) { - #[inline(never)] - fn insert_many_noinline>( - vec: &mut SmallVec<[u64; VEC_SIZE]>, - index: usize, - iterable: I, - ) { - vec.insert_many(index, iterable) - } - - b.iter(|| { - let mut vec = SmallVec::<[u64; VEC_SIZE]>::new(); - insert_many_noinline(&mut vec, 0, 0..SPILLED_SIZE as _); - insert_many_noinline(&mut vec, 0, 0..SPILLED_SIZE as _); - vec - }); -} - -#[bench] -fn bench_insert_from_slice(b: &mut Bencher) { - let v: Vec = (0..SPILLED_SIZE as _).collect(); - b.iter(|| { - let mut vec = SmallVec::<[u64; VEC_SIZE]>::new(); - vec.insert_from_slice(0, &v); - vec.insert_from_slice(0, &v); - vec - }); -} - -#[bench] -fn bench_macro_from_list(b: &mut Bencher) { - b.iter(|| { - let vec: SmallVec<[u64; 16]> = smallvec![ - 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 20, 24, 32, 36, 0x40, 0x80, - 0x100, 0x200, 0x400, 0x800, 0x1000, 0x2000, 0x4000, 0x8000, 0x10000, 0x20000, 0x40000, - 0x80000, 0x100000, - ]; - vec - }); -} - -#[bench] -fn bench_macro_from_list_vec(b: &mut Bencher) { - b.iter(|| { - let vec: Vec = vec![ - 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 20, 24, 32, 36, 0x40, 0x80, - 0x100, 0x200, 0x400, 0x800, 0x1000, 0x2000, 0x4000, 0x8000, 0x10000, 0x20000, 0x40000, - 0x80000, 0x100000, - ]; - vec - }); -} diff --git a/third_party/rust/smallvec-0.6.10/lib.rs b/third_party/rust/smallvec-0.6.10/lib.rs deleted file mode 100644 index e45ca7aebd..0000000000 --- a/third_party/rust/smallvec-0.6.10/lib.rs +++ /dev/null @@ -1,2360 +0,0 @@ -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -//! Small vectors in various sizes. These store a certain number of elements inline, and fall back -//! to the heap for larger allocations. This can be a useful optimization for improving cache -//! locality and reducing allocator traffic for workloads that fit within the inline buffer. -//! -//! ## no_std support -//! -//! By default, `smallvec` depends on `libstd`. However, it can be configured to use the unstable -//! `liballoc` API instead, for use on platforms that have `liballoc` but not `libstd`. This -//! configuration is currently unstable and is not guaranteed to work on all versions of Rust. -//! -//! To depend on `smallvec` without `libstd`, use `default-features = false` in the `smallvec` -//! section of Cargo.toml to disable its `"std"` feature. -//! -//! ## `union` feature -//! -//! When the `union` feature is enabled `smallvec` will track its state (inline or spilled) -//! without the use of an enum tag, reducing the size of the `smallvec` by one machine word. -//! This means that there is potentially no space overhead compared to `Vec`. -//! Note that `smallvec` can still be larger than `Vec` if the inline buffer is larger than two -//! machine words. -//! -//! To use this feature add `features = ["union"]` in the `smallvec` section of Cargo.toml. -//! Note that this feature requires a nightly compiler (for now). - -#![cfg_attr(not(feature = "std"), no_std)] -#![cfg_attr(not(feature = "std"), feature(alloc))] -#![cfg_attr(feature = "union", feature(untagged_unions))] -#![cfg_attr(feature = "specialization", feature(specialization))] -#![cfg_attr(feature = "may_dangle", feature(dropck_eyepatch))] -#![deny(missing_docs)] - - -#[cfg(not(feature = "std"))] -#[macro_use] -extern crate alloc; - -#[cfg(not(feature = "std"))] -use alloc::vec::Vec; - -#[cfg(feature = "serde")] -extern crate serde; - -#[cfg(not(feature = "std"))] -mod std { - pub use core::*; -} - -use std::borrow::{Borrow, BorrowMut}; -use std::cmp; -use std::fmt; -use std::hash::{Hash, Hasher}; -use std::iter::{IntoIterator, FromIterator, repeat}; -use std::mem; -use std::mem::ManuallyDrop; -use std::ops; -use std::ptr; -use std::slice; -#[cfg(feature = "std")] -use std::io; -#[cfg(feature = "serde")] -use serde::ser::{Serialize, Serializer, SerializeSeq}; -#[cfg(feature = "serde")] -use serde::de::{Deserialize, Deserializer, SeqAccess, Visitor}; -#[cfg(feature = "serde")] -use std::marker::PhantomData; - -/// Creates a [`SmallVec`] containing the arguments. -/// -/// `smallvec!` allows `SmallVec`s to be defined with the same syntax as array expressions. -/// There are two forms of this macro: -/// -/// - Create a [`SmallVec`] containing a given list of elements: -/// -/// ``` -/// # #[macro_use] extern crate smallvec; -/// # use smallvec::SmallVec; -/// # fn main() { -/// let v: SmallVec<[_; 128]> = smallvec![1, 2, 3]; -/// assert_eq!(v[0], 1); -/// assert_eq!(v[1], 2); -/// assert_eq!(v[2], 3); -/// # } -/// ``` -/// -/// - Create a [`SmallVec`] from a given element and size: -/// -/// ``` -/// # #[macro_use] extern crate smallvec; -/// # use smallvec::SmallVec; -/// # fn main() { -/// let v: SmallVec<[_; 0x8000]> = smallvec![1; 3]; -/// assert_eq!(v, SmallVec::from_buf([1, 1, 1])); -/// # } -/// ``` -/// -/// Note that unlike array expressions this syntax supports all elements -/// which implement [`Clone`] and the number of elements doesn't have to be -/// a constant. -/// -/// This will use `clone` to duplicate an expression, so one should be careful -/// using this with types having a nonstandard `Clone` implementation. For -/// example, `smallvec![Rc::new(1); 5]` will create a vector of five references -/// to the same boxed integer value, not five references pointing to independently -/// boxed integers. - -#[macro_export] -macro_rules! smallvec { - // count helper: transform any expression into 1 - (@one $x:expr) => (1usize); - ($elem:expr; $n:expr) => ({ - $crate::SmallVec::from_elem($elem, $n) - }); - ($($x:expr),*$(,)*) => ({ - let count = 0usize $(+ smallvec!(@one $x))*; - let mut vec = $crate::SmallVec::new(); - if count <= vec.inline_size() { - $(vec.push($x);)* - vec - } else { - $crate::SmallVec::from_vec(vec![$($x,)*]) - } - }); -} - -/// Hint to the optimizer that any code path which calls this function is -/// statically unreachable and can be removed. -/// -/// Equivalent to `std::hint::unreachable_unchecked` but works in older versions of Rust. -#[inline] -pub unsafe fn unreachable() -> ! { - enum Void {} - let x: &Void = mem::transmute(1usize); - match *x {} -} - -/// `panic!()` in debug builds, optimization hint in release. -#[cfg(not(feature = "union"))] -macro_rules! debug_unreachable { - () => { debug_unreachable!("entered unreachable code") }; - ($e:expr) => { - if cfg!(not(debug_assertions)) { - unreachable(); - } else { - panic!($e); - } - } -} - -/// Common operations implemented by both `Vec` and `SmallVec`. -/// -/// This can be used to write generic code that works with both `Vec` and `SmallVec`. -/// -/// ## Example -/// -/// ```rust -/// use smallvec::{VecLike, SmallVec}; -/// -/// fn initialize>(v: &mut V) { -/// for i in 0..5 { -/// v.push(i); -/// } -/// } -/// -/// let mut vec = Vec::new(); -/// initialize(&mut vec); -/// -/// let mut small_vec = SmallVec::<[u8; 8]>::new(); -/// initialize(&mut small_vec); -/// ``` -#[deprecated(note = "Use `Extend` and `Deref<[T]>` instead")] -pub trait VecLike: - ops::Index + - ops::IndexMut + - ops::Index, Output=[T]> + - ops::IndexMut> + - ops::Index, Output=[T]> + - ops::IndexMut> + - ops::Index, Output=[T]> + - ops::IndexMut> + - ops::Index + - ops::IndexMut + - ops::DerefMut + - Extend { - - /// Append an element to the vector. - fn push(&mut self, value: T); -} - -#[allow(deprecated)] -impl VecLike for Vec { - #[inline] - fn push(&mut self, value: T) { - Vec::push(self, value); - } -} - -/// Trait to be implemented by a collection that can be extended from a slice -/// -/// ## Example -/// -/// ```rust -/// use smallvec::{ExtendFromSlice, SmallVec}; -/// -/// fn initialize>(v: &mut V) { -/// v.extend_from_slice(b"Test!"); -/// } -/// -/// let mut vec = Vec::new(); -/// initialize(&mut vec); -/// assert_eq!(&vec, b"Test!"); -/// -/// let mut small_vec = SmallVec::<[u8; 8]>::new(); -/// initialize(&mut small_vec); -/// assert_eq!(&small_vec as &[_], b"Test!"); -/// ``` -pub trait ExtendFromSlice { - /// Extends a collection from a slice of its element type - fn extend_from_slice(&mut self, other: &[T]); -} - -impl ExtendFromSlice for Vec { - fn extend_from_slice(&mut self, other: &[T]) { - Vec::extend_from_slice(self, other) - } -} - -unsafe fn deallocate(ptr: *mut T, capacity: usize) { - let _vec: Vec = Vec::from_raw_parts(ptr, 0, capacity); - // Let it drop. -} - -/// An iterator that removes the items from a `SmallVec` and yields them by value. -/// -/// Returned from [`SmallVec::drain`][1]. -/// -/// [1]: struct.SmallVec.html#method.drain -pub struct Drain<'a, T: 'a> { - iter: slice::IterMut<'a,T>, -} - -impl<'a, T: 'a> Iterator for Drain<'a,T> { - type Item = T; - - #[inline] - fn next(&mut self) -> Option { - self.iter.next().map(|reference| unsafe { ptr::read(reference) }) - } - - #[inline] - fn size_hint(&self) -> (usize, Option) { - self.iter.size_hint() - } -} - -impl<'a, T: 'a> DoubleEndedIterator for Drain<'a, T> { - #[inline] - fn next_back(&mut self) -> Option { - self.iter.next_back().map(|reference| unsafe { ptr::read(reference) }) - } -} - -impl<'a, T> ExactSizeIterator for Drain<'a, T> { } - -impl<'a, T: 'a> Drop for Drain<'a,T> { - fn drop(&mut self) { - // Destroy the remaining elements. - for _ in self.by_ref() {} - } -} - -#[cfg(feature = "union")] -union SmallVecData { - inline: ManuallyDrop, - heap: (*mut A::Item, usize), -} - -#[cfg(feature = "union")] -impl SmallVecData { - #[inline] - unsafe fn inline(&self) -> &A { - &self.inline - } - #[inline] - unsafe fn inline_mut(&mut self) -> &mut A { - &mut self.inline - } - #[inline] - fn from_inline(inline: A) -> SmallVecData { - SmallVecData { inline: ManuallyDrop::new(inline) } - } - #[inline] - unsafe fn into_inline(self) -> A { ManuallyDrop::into_inner(self.inline) } - #[inline] - unsafe fn heap(&self) -> (*mut A::Item, usize) { - self.heap - } - #[inline] - unsafe fn heap_mut(&mut self) -> &mut (*mut A::Item, usize) { - &mut self.heap - } - #[inline] - fn from_heap(ptr: *mut A::Item, len: usize) -> SmallVecData { - SmallVecData { heap: (ptr, len) } - } -} - -#[cfg(not(feature = "union"))] -enum SmallVecData { - Inline(ManuallyDrop), - Heap((*mut A::Item, usize)), -} - -#[cfg(not(feature = "union"))] -impl SmallVecData { - #[inline] - unsafe fn inline(&self) -> &A { - match *self { - SmallVecData::Inline(ref a) => a, - _ => debug_unreachable!(), - } - } - #[inline] - unsafe fn inline_mut(&mut self) -> &mut A { - match *self { - SmallVecData::Inline(ref mut a) => a, - _ => debug_unreachable!(), - } - } - #[inline] - fn from_inline(inline: A) -> SmallVecData { - SmallVecData::Inline(ManuallyDrop::new(inline)) - } - #[inline] - unsafe fn into_inline(self) -> A { - match self { - SmallVecData::Inline(a) => ManuallyDrop::into_inner(a), - _ => debug_unreachable!(), - } - } - #[inline] - unsafe fn heap(&self) -> (*mut A::Item, usize) { - match *self { - SmallVecData::Heap(data) => data, - _ => debug_unreachable!(), - } - } - #[inline] - unsafe fn heap_mut(&mut self) -> &mut (*mut A::Item, usize) { - match *self { - SmallVecData::Heap(ref mut data) => data, - _ => debug_unreachable!(), - } - } - #[inline] - fn from_heap(ptr: *mut A::Item, len: usize) -> SmallVecData { - SmallVecData::Heap((ptr, len)) - } -} - -unsafe impl Send for SmallVecData {} -unsafe impl Sync for SmallVecData {} - -/// A `Vec`-like container that can store a small number of elements inline. -/// -/// `SmallVec` acts like a vector, but can store a limited amount of data inline within the -/// `SmallVec` struct rather than in a separate allocation. If the data exceeds this limit, the -/// `SmallVec` will "spill" its data onto the heap, allocating a new buffer to hold it. -/// -/// The amount of data that a `SmallVec` can store inline depends on its backing store. The backing -/// store can be any type that implements the `Array` trait; usually it is a small fixed-sized -/// array. For example a `SmallVec<[u64; 8]>` can hold up to eight 64-bit integers inline. -/// -/// ## Example -/// -/// ```rust -/// use smallvec::SmallVec; -/// let mut v = SmallVec::<[u8; 4]>::new(); // initialize an empty vector -/// -/// // The vector can hold up to 4 items without spilling onto the heap. -/// v.extend(0..4); -/// assert_eq!(v.len(), 4); -/// assert!(!v.spilled()); -/// -/// // Pushing another element will force the buffer to spill: -/// v.push(4); -/// assert_eq!(v.len(), 5); -/// assert!(v.spilled()); -/// ``` -pub struct SmallVec { - // The capacity field is used to determine which of the storage variants is active: - // If capacity <= A::size() then the inline variant is used and capacity holds the current length of the vector (number of elements actually in use). - // If capacity > A::size() then the heap variant is used and capacity holds the size of the memory allocation. - capacity: usize, - data: SmallVecData, -} - -impl SmallVec { - /// Construct an empty vector - #[inline] - pub fn new() -> SmallVec { - unsafe { - SmallVec { - capacity: 0, - data: SmallVecData::from_inline(mem::uninitialized()), - } - } - } - - /// Construct an empty vector with enough capacity pre-allocated to store at least `n` - /// elements. - /// - /// Will create a heap allocation only if `n` is larger than the inline capacity. - /// - /// ``` - /// # use smallvec::SmallVec; - /// - /// let v: SmallVec<[u8; 3]> = SmallVec::with_capacity(100); - /// - /// assert!(v.is_empty()); - /// assert!(v.capacity() >= 100); - /// ``` - #[inline] - pub fn with_capacity(n: usize) -> Self { - let mut v = SmallVec::new(); - v.reserve_exact(n); - v - } - - /// Construct a new `SmallVec` from a `Vec`. - /// - /// Elements will be copied to the inline buffer if vec.capacity() <= A::size(). - /// - /// ```rust - /// use smallvec::SmallVec; - /// - /// let vec = vec![1, 2, 3, 4, 5]; - /// let small_vec: SmallVec<[_; 3]> = SmallVec::from_vec(vec); - /// - /// assert_eq!(&*small_vec, &[1, 2, 3, 4, 5]); - /// ``` - #[inline] - pub fn from_vec(mut vec: Vec) -> SmallVec { - if vec.capacity() <= A::size() { - unsafe { - let mut data = SmallVecData::::from_inline(mem::uninitialized()); - let len = vec.len(); - vec.set_len(0); - ptr::copy_nonoverlapping(vec.as_ptr(), data.inline_mut().ptr_mut(), len); - - SmallVec { - capacity: len, - data, - } - } - } else { - let (ptr, cap, len) = (vec.as_mut_ptr(), vec.capacity(), vec.len()); - mem::forget(vec); - - SmallVec { - capacity: cap, - data: SmallVecData::from_heap(ptr, len), - } - } - } - - /// Constructs a new `SmallVec` on the stack from an `A` without - /// copying elements. - /// - /// ```rust - /// use smallvec::SmallVec; - /// - /// let buf = [1, 2, 3, 4, 5]; - /// let small_vec: SmallVec<_> = SmallVec::from_buf(buf); - /// - /// assert_eq!(&*small_vec, &[1, 2, 3, 4, 5]); - /// ``` - #[inline] - pub fn from_buf(buf: A) -> SmallVec { - SmallVec { - capacity: A::size(), - data: SmallVecData::from_inline(buf), - } - } - - /// Constructs a new `SmallVec` on the stack from an `A` without - /// copying elements. Also sets the length, which must be less or - /// equal to the size of `buf`. - /// - /// ```rust - /// use smallvec::SmallVec; - /// - /// let buf = [1, 2, 3, 4, 5, 0, 0, 0]; - /// let small_vec: SmallVec<_> = SmallVec::from_buf_and_len(buf, 5); - /// - /// assert_eq!(&*small_vec, &[1, 2, 3, 4, 5]); - /// ``` - #[inline] - pub fn from_buf_and_len(buf: A, len: usize) -> SmallVec { - assert!(len <= A::size()); - unsafe { SmallVec::from_buf_and_len_unchecked(buf, len) } - } - - /// Constructs a new `SmallVec` on the stack from an `A` without - /// copying elements. Also sets the length. The user is responsible - /// for ensuring that `len <= A::size()`. - /// - /// ```rust - /// use smallvec::SmallVec; - /// - /// let buf = [1, 2, 3, 4, 5, 0, 0, 0]; - /// let small_vec: SmallVec<_> = unsafe { - /// SmallVec::from_buf_and_len_unchecked(buf, 5) - /// }; - /// - /// assert_eq!(&*small_vec, &[1, 2, 3, 4, 5]); - /// ``` - #[inline] - pub unsafe fn from_buf_and_len_unchecked(buf: A, len: usize) -> SmallVec { - SmallVec { - capacity: len, - data: SmallVecData::from_inline(buf), - } - } - - - /// Sets the length of a vector. - /// - /// This will explicitly set the size of the vector, without actually - /// modifying its buffers, so it is up to the caller to ensure that the - /// vector is actually the specified size. - pub unsafe fn set_len(&mut self, new_len: usize) { - let (_, len_ptr, _) = self.triple_mut(); - *len_ptr = new_len; - } - - /// The maximum number of elements this vector can hold inline - #[inline] - pub fn inline_size(&self) -> usize { - A::size() - } - - /// The number of elements stored in the vector - #[inline] - pub fn len(&self) -> usize { - self.triple().1 - } - - /// Returns `true` if the vector is empty - #[inline] - pub fn is_empty(&self) -> bool { - self.len() == 0 - } - - /// The number of items the vector can hold without reallocating - #[inline] - pub fn capacity(&self) -> usize { - self.triple().2 - } - - /// Returns a tuple with (data ptr, len, capacity) - /// Useful to get all SmallVec properties with a single check of the current storage variant. - #[inline] - fn triple(&self) -> (*const A::Item, usize, usize) { - unsafe { - if self.spilled() { - let (ptr, len) = self.data.heap(); - (ptr, len, self.capacity) - } else { - (self.data.inline().ptr(), self.capacity, A::size()) - } - } - } - - /// Returns a tuple with (data ptr, len ptr, capacity) - #[inline] - fn triple_mut(&mut self) -> (*mut A::Item, &mut usize, usize) { - unsafe { - if self.spilled() { - let &mut (ptr, ref mut len_ptr) = self.data.heap_mut(); - (ptr, len_ptr, self.capacity) - } else { - (self.data.inline_mut().ptr_mut(), &mut self.capacity, A::size()) - } - } - } - - /// Returns `true` if the data has spilled into a separate heap-allocated buffer. - #[inline] - pub fn spilled(&self) -> bool { - self.capacity > A::size() - } - - /// Empty the vector and return an iterator over its former contents. - pub fn drain(&mut self) -> Drain { - unsafe { - let ptr = self.as_mut_ptr(); - - let current_len = self.len(); - self.set_len(0); - - let slice = slice::from_raw_parts_mut(ptr, current_len); - - Drain { - iter: slice.iter_mut(), - } - } - } - - /// Append an item to the vector. - #[inline] - pub fn push(&mut self, value: A::Item) { - unsafe { - let (_, &mut len, cap) = self.triple_mut(); - if len == cap { - self.reserve(1); - } - let (ptr, len_ptr, _) = self.triple_mut(); - *len_ptr = len + 1; - ptr::write(ptr.offset(len as isize), value); - } - } - - /// Remove an item from the end of the vector and return it, or None if empty. - #[inline] - pub fn pop(&mut self) -> Option { - unsafe { - let (ptr, len_ptr, _) = self.triple_mut(); - if *len_ptr == 0 { - return None; - } - let last_index = *len_ptr - 1; - *len_ptr = last_index; - Some(ptr::read(ptr.offset(last_index as isize))) - } - } - - /// Re-allocate to set the capacity to `max(new_cap, inline_size())`. - /// - /// Panics if `new_cap` is less than the vector's length. - pub fn grow(&mut self, new_cap: usize) { - unsafe { - let (ptr, &mut len, cap) = self.triple_mut(); - let unspilled = !self.spilled(); - assert!(new_cap >= len); - if new_cap <= self.inline_size() { - if unspilled { - return; - } - self.data = SmallVecData::from_inline(mem::uninitialized()); - ptr::copy_nonoverlapping(ptr, self.data.inline_mut().ptr_mut(), len); - self.capacity = len; - } else if new_cap != cap { - let mut vec = Vec::with_capacity(new_cap); - let new_alloc = vec.as_mut_ptr(); - mem::forget(vec); - ptr::copy_nonoverlapping(ptr, new_alloc, len); - self.data = SmallVecData::from_heap(new_alloc, len); - self.capacity = new_cap; - if unspilled { - return; - } - } else { - return; - } - deallocate(ptr, cap); - } - } - - /// Reserve capacity for `additional` more elements to be inserted. - /// - /// May reserve more space to avoid frequent reallocations. - /// - /// If the new capacity would overflow `usize` then it will be set to `usize::max_value()` - /// instead. (This means that inserting `additional` new elements is not guaranteed to be - /// possible after calling this function.) - #[inline] - pub fn reserve(&mut self, additional: usize) { - // prefer triple_mut() even if triple() would work - // so that the optimizer removes duplicated calls to it - // from callers like insert() - let (_, &mut len, cap) = self.triple_mut(); - if cap - len < additional { - let new_cap = len.checked_add(additional). - and_then(usize::checked_next_power_of_two). - unwrap_or(usize::max_value()); - self.grow(new_cap); - } - } - - /// Reserve the minimum capacity for `additional` more elements to be inserted. - /// - /// Panics if the new capacity overflows `usize`. - pub fn reserve_exact(&mut self, additional: usize) { - let (_, &mut len, cap) = self.triple_mut(); - if cap - len < additional { - match len.checked_add(additional) { - Some(cap) => self.grow(cap), - None => panic!("reserve_exact overflow"), - } - } - } - - /// Shrink the capacity of the vector as much as possible. - /// - /// When possible, this will move data from an external heap buffer to the vector's inline - /// storage. - pub fn shrink_to_fit(&mut self) { - if !self.spilled() { - return; - } - let len = self.len(); - if self.inline_size() >= len { - unsafe { - let (ptr, len) = self.data.heap(); - self.data = SmallVecData::from_inline(mem::uninitialized()); - ptr::copy_nonoverlapping(ptr, self.data.inline_mut().ptr_mut(), len); - deallocate(ptr, self.capacity); - self.capacity = len; - } - } else if self.capacity() > len { - self.grow(len); - } - } - - /// Shorten the vector, keeping the first `len` elements and dropping the rest. - /// - /// If `len` is greater than or equal to the vector's current length, this has no - /// effect. - /// - /// This does not re-allocate. If you want the vector's capacity to shrink, call - /// `shrink_to_fit` after truncating. - pub fn truncate(&mut self, len: usize) { - unsafe { - let (ptr, len_ptr, _) = self.triple_mut(); - while len < *len_ptr { - let last_index = *len_ptr - 1; - *len_ptr = last_index; - ptr::drop_in_place(ptr.offset(last_index as isize)); - } - } - } - - /// Extracts a slice containing the entire vector. - /// - /// Equivalent to `&s[..]`. - pub fn as_slice(&self) -> &[A::Item] { - self - } - - /// Extracts a mutable slice of the entire vector. - /// - /// Equivalent to `&mut s[..]`. - pub fn as_mut_slice(&mut self) -> &mut [A::Item] { - self - } - - /// Remove the element at position `index`, replacing it with the last element. - /// - /// This does not preserve ordering, but is O(1). - /// - /// Panics if `index` is out of bounds. - #[inline] - pub fn swap_remove(&mut self, index: usize) -> A::Item { - let len = self.len(); - self.swap(len - 1, index); - self.pop().unwrap_or_else(|| unsafe { unreachable() }) - } - - /// Remove all elements from the vector. - #[inline] - pub fn clear(&mut self) { - self.truncate(0); - } - - /// Remove and return the element at position `index`, shifting all elements after it to the - /// left. - /// - /// Panics if `index` is out of bounds. - pub fn remove(&mut self, index: usize) -> A::Item { - unsafe { - let (mut ptr, len_ptr, _) = self.triple_mut(); - let len = *len_ptr; - assert!(index < len); - *len_ptr = len - 1; - ptr = ptr.offset(index as isize); - let item = ptr::read(ptr); - ptr::copy(ptr.offset(1), ptr, len - index - 1); - item - } - } - - /// Insert an element at position `index`, shifting all elements after it to the right. - /// - /// Panics if `index` is out of bounds. - pub fn insert(&mut self, index: usize, element: A::Item) { - self.reserve(1); - - unsafe { - let (mut ptr, len_ptr, _) = self.triple_mut(); - let len = *len_ptr; - assert!(index <= len); - *len_ptr = len + 1; - ptr = ptr.offset(index as isize); - ptr::copy(ptr, ptr.offset(1), len - index); - ptr::write(ptr, element); - } - } - - /// Insert multiple elements at position `index`, shifting all following elements toward the - /// back. - pub fn insert_many>(&mut self, index: usize, iterable: I) { - let iter = iterable.into_iter(); - if index == self.len() { - return self.extend(iter); - } - - let (lower_size_bound, _) = iter.size_hint(); - assert!(lower_size_bound <= std::isize::MAX as usize); // Ensure offset is indexable - assert!(index + lower_size_bound >= index); // Protect against overflow - self.reserve(lower_size_bound); - - unsafe { - let old_len = self.len(); - assert!(index <= old_len); - let mut ptr = self.as_mut_ptr().offset(index as isize); - - // Move the trailing elements. - ptr::copy(ptr, ptr.offset(lower_size_bound as isize), old_len - index); - - // In case the iterator panics, don't double-drop the items we just copied above. - self.set_len(index); - - let mut num_added = 0; - for element in iter { - let mut cur = ptr.offset(num_added as isize); - if num_added >= lower_size_bound { - // Iterator provided more elements than the hint. Move trailing items again. - self.reserve(1); - ptr = self.as_mut_ptr().offset(index as isize); - cur = ptr.offset(num_added as isize); - ptr::copy(cur, cur.offset(1), old_len - index); - } - ptr::write(cur, element); - num_added += 1; - } - if num_added < lower_size_bound { - // Iterator provided fewer elements than the hint - ptr::copy(ptr.offset(lower_size_bound as isize), ptr.offset(num_added as isize), old_len - index); - } - - self.set_len(old_len + num_added); - } - } - - /// Convert a SmallVec to a Vec, without reallocating if the SmallVec has already spilled onto - /// the heap. - pub fn into_vec(self) -> Vec { - if self.spilled() { - unsafe { - let (ptr, len) = self.data.heap(); - let v = Vec::from_raw_parts(ptr, len, self.capacity); - mem::forget(self); - v - } - } else { - self.into_iter().collect() - } - } - - /// Convert the SmallVec into an `A` if possible. Otherwise return `Err(Self)`. - /// - /// This method returns `Err(Self)` if the SmallVec is too short (and the `A` contains uninitialized elements), - /// or if the SmallVec is too long (and all the elements were spilled to the heap). - pub fn into_inner(self) -> Result { - if self.spilled() || self.len() != A::size() { - Err(self) - } else { - unsafe { - let data = ptr::read(&self.data); - mem::forget(self); - Ok(data.into_inline()) - } - } - } - - /// Retains only the elements specified by the predicate. - /// - /// In other words, remove all elements `e` such that `f(&e)` returns `false`. - /// This method operates in place and preserves the order of the retained - /// elements. - pub fn retain bool>(&mut self, mut f: F) { - let mut del = 0; - let len = self.len(); - for i in 0..len { - if !f(&mut self[i]) { - del += 1; - } else if del > 0 { - self.swap(i - del, i); - } - } - self.truncate(len - del); - } - - /// Removes consecutive duplicate elements. - pub fn dedup(&mut self) where A::Item: PartialEq { - self.dedup_by(|a, b| a == b); - } - - /// Removes consecutive duplicate elements using the given equality relation. - pub fn dedup_by(&mut self, mut same_bucket: F) - where F: FnMut(&mut A::Item, &mut A::Item) -> bool - { - // See the implementation of Vec::dedup_by in the - // standard library for an explanation of this algorithm. - let len = self.len(); - if len <= 1 { - return; - } - - let ptr = self.as_mut_ptr(); - let mut w: usize = 1; - - unsafe { - for r in 1..len { - let p_r = ptr.offset(r as isize); - let p_wm1 = ptr.offset((w - 1) as isize); - if !same_bucket(&mut *p_r, &mut *p_wm1) { - if r != w { - let p_w = p_wm1.offset(1); - mem::swap(&mut *p_r, &mut *p_w); - } - w += 1; - } - } - } - - self.truncate(w); - } - - /// Removes consecutive elements that map to the same key. - pub fn dedup_by_key(&mut self, mut key: F) - where F: FnMut(&mut A::Item) -> K, - K: PartialEq - { - self.dedup_by(|a, b| key(a) == key(b)); - } - - /// Creates a `SmallVec` directly from the raw components of another - /// `SmallVec`. - /// - /// # Safety - /// - /// This is highly unsafe, due to the number of invariants that aren't - /// checked: - /// - /// * `ptr` needs to have been previously allocated via `SmallVec` for its - /// spilled storage (at least, it's highly likely to be incorrect if it - /// wasn't). - /// * `ptr`'s `A::Item` type needs to be the same size and alignment that - /// it was allocated with - /// * `length` needs to be less than or equal to `capacity`. - /// * `capacity` needs to be the capacity that the pointer was allocated - /// with. - /// - /// Violating these may cause problems like corrupting the allocator's - /// internal data structures. - /// - /// Additionally, `capacity` must be greater than the amount of inline - /// storage `A` has; that is, the new `SmallVec` must need to spill over - /// into heap allocated storage. This condition is asserted against. - /// - /// The ownership of `ptr` is effectively transferred to the - /// `SmallVec` which may then deallocate, reallocate or change the - /// contents of memory pointed to by the pointer at will. Ensure - /// that nothing else uses the pointer after calling this - /// function. - /// - /// # Examples - /// - /// ``` - /// # #[macro_use] extern crate smallvec; - /// # use smallvec::SmallVec; - /// use std::mem; - /// use std::ptr; - /// - /// fn main() { - /// let mut v: SmallVec<[_; 1]> = smallvec![1, 2, 3]; - /// - /// // Pull out the important parts of `v`. - /// let p = v.as_mut_ptr(); - /// let len = v.len(); - /// let cap = v.capacity(); - /// let spilled = v.spilled(); - /// - /// unsafe { - /// // Forget all about `v`. The heap allocation that stored the - /// // three values won't be deallocated. - /// mem::forget(v); - /// - /// // Overwrite memory with [4, 5, 6]. - /// // - /// // This is only safe if `spilled` is true! Otherwise, we are - /// // writing into the old `SmallVec`'s inline storage on the - /// // stack. - /// assert!(spilled); - /// for i in 0..len as isize { - /// ptr::write(p.offset(i), 4 + i); - /// } - /// - /// // Put everything back together into a SmallVec with a different - /// // amount of inline storage, but which is still less than `cap`. - /// let rebuilt = SmallVec::<[_; 2]>::from_raw_parts(p, len, cap); - /// assert_eq!(&*rebuilt, &[4, 5, 6]); - /// } - /// } - pub unsafe fn from_raw_parts( - ptr: *mut A::Item, - length: usize, - capacity: usize, - ) -> SmallVec { - assert!(capacity > A::size()); - SmallVec { - capacity, - data: SmallVecData::from_heap(ptr, length), - } - } -} - -impl SmallVec where A::Item: Copy { - /// Copy the elements from a slice into a new `SmallVec`. - /// - /// For slices of `Copy` types, this is more efficient than `SmallVec::from(slice)`. - pub fn from_slice(slice: &[A::Item]) -> Self { - let len = slice.len(); - if len <= A::size() { - SmallVec { - capacity: len, - data: SmallVecData::from_inline(unsafe { - let mut data: A = mem::uninitialized(); - ptr::copy_nonoverlapping(slice.as_ptr(), data.ptr_mut(), len); - data - }) - } - } else { - let mut b = slice.to_vec(); - let (ptr, cap) = (b.as_mut_ptr(), b.capacity()); - mem::forget(b); - SmallVec { - capacity: cap, - data: SmallVecData::from_heap(ptr, len), - } - } - } - - /// Copy elements from a slice into the vector at position `index`, shifting any following - /// elements toward the back. - /// - /// For slices of `Copy` types, this is more efficient than `insert`. - pub fn insert_from_slice(&mut self, index: usize, slice: &[A::Item]) { - self.reserve(slice.len()); - - let len = self.len(); - assert!(index <= len); - - unsafe { - let slice_ptr = slice.as_ptr(); - let ptr = self.as_mut_ptr().offset(index as isize); - ptr::copy(ptr, ptr.offset(slice.len() as isize), len - index); - ptr::copy_nonoverlapping(slice_ptr, ptr, slice.len()); - self.set_len(len + slice.len()); - } - } - - /// Copy elements from a slice and append them to the vector. - /// - /// For slices of `Copy` types, this is more efficient than `extend`. - #[inline] - pub fn extend_from_slice(&mut self, slice: &[A::Item]) { - let len = self.len(); - self.insert_from_slice(len, slice); - } -} - -impl SmallVec where A::Item: Clone { - /// Resizes the vector so that its length is equal to `len`. - /// - /// If `len` is less than the current length, the vector simply truncated. - /// - /// If `len` is greater than the current length, `value` is appended to the - /// vector until its length equals `len`. - pub fn resize(&mut self, len: usize, value: A::Item) { - let old_len = self.len(); - - if len > old_len { - self.extend(repeat(value).take(len - old_len)); - } else { - self.truncate(len); - } - } - - /// Creates a `SmallVec` with `n` copies of `elem`. - /// ``` - /// use smallvec::SmallVec; - /// - /// let v = SmallVec::<[char; 128]>::from_elem('d', 2); - /// assert_eq!(v, SmallVec::from_buf(['d', 'd'])); - /// ``` - pub fn from_elem(elem: A::Item, n: usize) -> Self { - if n > A::size() { - vec![elem; n].into() - } else { - let mut v = SmallVec::::new(); - unsafe { - let (ptr, len_ptr, _) = v.triple_mut(); - let mut local_len = SetLenOnDrop::new(len_ptr); - - for i in 0..n as isize { - ::std::ptr::write(ptr.offset(i), elem.clone()); - local_len.increment_len(1); - } - } - v - } - } -} - -impl ops::Deref for SmallVec { - type Target = [A::Item]; - #[inline] - fn deref(&self) -> &[A::Item] { - unsafe { - let (ptr, len, _) = self.triple(); - slice::from_raw_parts(ptr, len) - } - } -} - -impl ops::DerefMut for SmallVec { - #[inline] - fn deref_mut(&mut self) -> &mut [A::Item] { - unsafe { - let (ptr, &mut len, _) = self.triple_mut(); - slice::from_raw_parts_mut(ptr, len) - } - } -} - -impl AsRef<[A::Item]> for SmallVec { - #[inline] - fn as_ref(&self) -> &[A::Item] { - self - } -} - -impl AsMut<[A::Item]> for SmallVec { - #[inline] - fn as_mut(&mut self) -> &mut [A::Item] { - self - } -} - -impl Borrow<[A::Item]> for SmallVec { - #[inline] - fn borrow(&self) -> &[A::Item] { - self - } -} - -impl BorrowMut<[A::Item]> for SmallVec { - #[inline] - fn borrow_mut(&mut self) -> &mut [A::Item] { - self - } -} - -#[cfg(feature = "std")] -impl> io::Write for SmallVec { - #[inline] - fn write(&mut self, buf: &[u8]) -> io::Result { - self.extend_from_slice(buf); - Ok(buf.len()) - } - - #[inline] - fn write_all(&mut self, buf: &[u8]) -> io::Result<()> { - self.extend_from_slice(buf); - Ok(()) - } - - #[inline] - fn flush(&mut self) -> io::Result<()> { - Ok(()) - } -} - -#[cfg(feature = "serde")] -impl Serialize for SmallVec where A::Item: Serialize { - fn serialize(&self, serializer: S) -> Result { - let mut state = serializer.serialize_seq(Some(self.len()))?; - for item in self { - state.serialize_element(&item)?; - } - state.end() - } -} - -#[cfg(feature = "serde")] -impl<'de, A: Array> Deserialize<'de> for SmallVec where A::Item: Deserialize<'de> { - fn deserialize>(deserializer: D) -> Result { - deserializer.deserialize_seq(SmallVecVisitor{phantom: PhantomData}) - } -} - -#[cfg(feature = "serde")] -struct SmallVecVisitor { - phantom: PhantomData -} - -#[cfg(feature = "serde")] -impl<'de, A: Array> Visitor<'de> for SmallVecVisitor -where A::Item: Deserialize<'de>, -{ - type Value = SmallVec; - - fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("a sequence") - } - - fn visit_seq(self, mut seq: B) -> Result - where - B: SeqAccess<'de>, - { - let len = seq.size_hint().unwrap_or(0); - let mut values = SmallVec::with_capacity(len); - - while let Some(value) = seq.next_element()? { - values.push(value); - } - - Ok(values) - } -} - - -#[cfg(feature = "specialization")] -trait SpecFrom { - fn spec_from(slice: S) -> SmallVec; -} - -#[cfg(feature = "specialization")] -impl<'a, A: Array> SpecFrom for SmallVec where A::Item: Clone { - #[inline] - default fn spec_from(slice: &'a [A::Item]) -> SmallVec { - slice.into_iter().cloned().collect() - } -} - -#[cfg(feature = "specialization")] -impl<'a, A: Array> SpecFrom for SmallVec where A::Item: Copy { - #[inline] - fn spec_from(slice: &'a [A::Item]) -> SmallVec { - SmallVec::from_slice(slice) - } -} - -impl<'a, A: Array> From<&'a [A::Item]> for SmallVec where A::Item: Clone { - #[cfg(not(feature = "specialization"))] - #[inline] - fn from(slice: &'a [A::Item]) -> SmallVec { - slice.into_iter().cloned().collect() - } - - #[cfg(feature = "specialization")] - #[inline] - fn from(slice: &'a [A::Item]) -> SmallVec { - SmallVec::spec_from(slice) - } -} - -impl From> for SmallVec { - #[inline] - fn from(vec: Vec) -> SmallVec { - SmallVec::from_vec(vec) - } -} - -impl From for SmallVec { - #[inline] - fn from(array: A) -> SmallVec { - SmallVec::from_buf(array) - } -} - -macro_rules! impl_index { - ($index_type: ty, $output_type: ty) => { - impl ops::Index<$index_type> for SmallVec { - type Output = $output_type; - #[inline] - fn index(&self, index: $index_type) -> &$output_type { - &(&**self)[index] - } - } - - impl ops::IndexMut<$index_type> for SmallVec { - #[inline] - fn index_mut(&mut self, index: $index_type) -> &mut $output_type { - &mut (&mut **self)[index] - } - } - } -} - -impl_index!(usize, A::Item); -impl_index!(ops::Range, [A::Item]); -impl_index!(ops::RangeFrom, [A::Item]); -impl_index!(ops::RangeTo, [A::Item]); -impl_index!(ops::RangeFull, [A::Item]); - -impl ExtendFromSlice for SmallVec where A::Item: Copy { - fn extend_from_slice(&mut self, other: &[A::Item]) { - SmallVec::extend_from_slice(self, other) - } -} - -#[allow(deprecated)] -impl VecLike for SmallVec { - #[inline] - fn push(&mut self, value: A::Item) { - SmallVec::push(self, value); - } -} - -impl FromIterator for SmallVec { - fn from_iter>(iterable: I) -> SmallVec { - let mut v = SmallVec::new(); - v.extend(iterable); - v - } -} - -impl Extend for SmallVec { - fn extend>(&mut self, iterable: I) { - let mut iter = iterable.into_iter(); - let (lower_size_bound, _) = iter.size_hint(); - self.reserve(lower_size_bound); - - unsafe { - let (ptr, len_ptr, cap) = self.triple_mut(); - let mut len = SetLenOnDrop::new(len_ptr); - while len.get() < cap { - if let Some(out) = iter.next() { - ptr::write(ptr.offset(len.get() as isize), out); - len.increment_len(1); - } else { - return; - } - } - } - - for elem in iter { - self.push(elem); - } - } -} - -impl fmt::Debug for SmallVec where A::Item: fmt::Debug { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.debug_list().entries(self.iter()).finish() - } -} - -impl Default for SmallVec { - #[inline] - fn default() -> SmallVec { - SmallVec::new() - } -} - -#[cfg(feature = "may_dangle")] -unsafe impl<#[may_dangle] A: Array> Drop for SmallVec { - fn drop(&mut self) { - unsafe { - if self.spilled() { - let (ptr, len) = self.data.heap(); - Vec::from_raw_parts(ptr, len, self.capacity); - } else { - ptr::drop_in_place(&mut self[..]); - } - } - } -} - -#[cfg(not(feature = "may_dangle"))] -impl Drop for SmallVec { - fn drop(&mut self) { - unsafe { - if self.spilled() { - let (ptr, len) = self.data.heap(); - Vec::from_raw_parts(ptr, len, self.capacity); - } else { - ptr::drop_in_place(&mut self[..]); - } - } - } -} - -impl Clone for SmallVec where A::Item: Clone { - fn clone(&self) -> SmallVec { - let mut new_vector = SmallVec::with_capacity(self.len()); - for element in self.iter() { - new_vector.push((*element).clone()) - } - new_vector - } -} - -impl PartialEq> for SmallVec - where A::Item: PartialEq { - #[inline] - fn eq(&self, other: &SmallVec) -> bool { self[..] == other[..] } - #[inline] - fn ne(&self, other: &SmallVec) -> bool { self[..] != other[..] } -} - -impl Eq for SmallVec where A::Item: Eq {} - -impl PartialOrd for SmallVec where A::Item: PartialOrd { - #[inline] - fn partial_cmp(&self, other: &SmallVec) -> Option { - PartialOrd::partial_cmp(&**self, &**other) - } -} - -impl Ord for SmallVec where A::Item: Ord { - #[inline] - fn cmp(&self, other: &SmallVec) -> cmp::Ordering { - Ord::cmp(&**self, &**other) - } -} - -impl Hash for SmallVec where A::Item: Hash { - fn hash(&self, state: &mut H) { - (**self).hash(state) - } -} - -unsafe impl Send for SmallVec where A::Item: Send {} - -/// An iterator that consumes a `SmallVec` and yields its items by value. -/// -/// Returned from [`SmallVec::into_iter`][1]. -/// -/// [1]: struct.SmallVec.html#method.into_iter -pub struct IntoIter { - data: SmallVec, - current: usize, - end: usize, -} - -impl Drop for IntoIter { - fn drop(&mut self) { - for _ in self { } - } -} - -impl Iterator for IntoIter { - type Item = A::Item; - - #[inline] - fn next(&mut self) -> Option { - if self.current == self.end { - None - } - else { - unsafe { - let current = self.current as isize; - self.current += 1; - Some(ptr::read(self.data.as_ptr().offset(current))) - } - } - } - - #[inline] - fn size_hint(&self) -> (usize, Option) { - let size = self.end - self.current; - (size, Some(size)) - } -} - -impl DoubleEndedIterator for IntoIter { - #[inline] - fn next_back(&mut self) -> Option { - if self.current == self.end { - None - } - else { - unsafe { - self.end -= 1; - Some(ptr::read(self.data.as_ptr().offset(self.end as isize))) - } - } - } -} - -impl ExactSizeIterator for IntoIter { } - -impl IntoIterator for SmallVec { - type IntoIter = IntoIter; - type Item = A::Item; - fn into_iter(mut self) -> Self::IntoIter { - unsafe { - // Set SmallVec len to zero as `IntoIter` drop handles dropping of the elements - let len = self.len(); - self.set_len(0); - IntoIter { - data: self, - current: 0, - end: len, - } - } - } -} - -impl<'a, A: Array> IntoIterator for &'a SmallVec { - type IntoIter = slice::Iter<'a, A::Item>; - type Item = &'a A::Item; - fn into_iter(self) -> Self::IntoIter { - self.iter() - } -} - -impl<'a, A: Array> IntoIterator for &'a mut SmallVec { - type IntoIter = slice::IterMut<'a, A::Item>; - type Item = &'a mut A::Item; - fn into_iter(self) -> Self::IntoIter { - self.iter_mut() - } -} - -/// Types that can be used as the backing store for a SmallVec -pub unsafe trait Array { - /// The type of the array's elements. - type Item; - /// Returns the number of items the array can hold. - fn size() -> usize; - /// Returns a pointer to the first element of the array. - fn ptr(&self) -> *const Self::Item; - /// Returns a mutable pointer to the first element of the array. - fn ptr_mut(&mut self) -> *mut Self::Item; -} - -/// Set the length of the vec when the `SetLenOnDrop` value goes out of scope. -/// -/// Copied from https://github.com/rust-lang/rust/pull/36355 -struct SetLenOnDrop<'a> { - len: &'a mut usize, - local_len: usize, -} - -impl<'a> SetLenOnDrop<'a> { - #[inline] - fn new(len: &'a mut usize) -> Self { - SetLenOnDrop { local_len: *len, len: len } - } - - #[inline] - fn get(&self) -> usize { - self.local_len - } - - #[inline] - fn increment_len(&mut self, increment: usize) { - self.local_len += increment; - } -} - -impl<'a> Drop for SetLenOnDrop<'a> { - #[inline] - fn drop(&mut self) { - *self.len = self.local_len; - } -} - -macro_rules! impl_array( - ($($size:expr),+) => { - $( - unsafe impl Array for [T; $size] { - type Item = T; - fn size() -> usize { $size } - fn ptr(&self) -> *const T { self.as_ptr() } - fn ptr_mut(&mut self) -> *mut T { self.as_mut_ptr() } - } - )+ - } -); - -impl_array!(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 20, 24, 32, 36, - 0x40, 0x80, 0x100, 0x200, 0x400, 0x800, 0x1000, 0x2000, 0x4000, 0x8000, - 0x10000, 0x20000, 0x40000, 0x80000, 0x100000); - -#[cfg(test)] -mod tests { - use SmallVec; - - use std::iter::FromIterator; - - #[cfg(feature = "std")] - use std::borrow::ToOwned; - #[cfg(not(feature = "std"))] - use alloc::borrow::ToOwned; - #[cfg(feature = "std")] - use std::rc::Rc; - #[cfg(not(feature = "std"))] - use alloc::rc::Rc; - #[cfg(not(feature = "std"))] - use alloc::boxed::Box; - #[cfg(not(feature = "std"))] - use alloc::vec::Vec; - - #[test] - pub fn test_zero() { - let mut v = SmallVec::<[_; 0]>::new(); - assert!(!v.spilled()); - v.push(0usize); - assert!(v.spilled()); - assert_eq!(&*v, &[0]); - } - - // We heap allocate all these strings so that double frees will show up under valgrind. - - #[test] - pub fn test_inline() { - let mut v = SmallVec::<[_; 16]>::new(); - v.push("hello".to_owned()); - v.push("there".to_owned()); - assert_eq!(&*v, &[ - "hello".to_owned(), - "there".to_owned(), - ][..]); - } - - #[test] - pub fn test_spill() { - let mut v = SmallVec::<[_; 2]>::new(); - v.push("hello".to_owned()); - assert_eq!(v[0], "hello"); - v.push("there".to_owned()); - v.push("burma".to_owned()); - assert_eq!(v[0], "hello"); - v.push("shave".to_owned()); - assert_eq!(&*v, &[ - "hello".to_owned(), - "there".to_owned(), - "burma".to_owned(), - "shave".to_owned(), - ][..]); - } - - #[test] - pub fn test_double_spill() { - let mut v = SmallVec::<[_; 2]>::new(); - v.push("hello".to_owned()); - v.push("there".to_owned()); - v.push("burma".to_owned()); - v.push("shave".to_owned()); - v.push("hello".to_owned()); - v.push("there".to_owned()); - v.push("burma".to_owned()); - v.push("shave".to_owned()); - assert_eq!(&*v, &[ - "hello".to_owned(), - "there".to_owned(), - "burma".to_owned(), - "shave".to_owned(), - "hello".to_owned(), - "there".to_owned(), - "burma".to_owned(), - "shave".to_owned(), - ][..]); - } - - /// https://github.com/servo/rust-smallvec/issues/4 - #[test] - fn issue_4() { - SmallVec::<[Box; 2]>::new(); - } - - /// https://github.com/servo/rust-smallvec/issues/5 - #[test] - fn issue_5() { - assert!(Some(SmallVec::<[&u32; 2]>::new()).is_some()); - } - - #[test] - fn test_with_capacity() { - let v: SmallVec<[u8; 3]> = SmallVec::with_capacity(1); - assert!(v.is_empty()); - assert!(!v.spilled()); - assert_eq!(v.capacity(), 3); - - let v: SmallVec<[u8; 3]> = SmallVec::with_capacity(10); - assert!(v.is_empty()); - assert!(v.spilled()); - assert_eq!(v.capacity(), 10); - } - - #[test] - fn drain() { - let mut v: SmallVec<[u8; 2]> = SmallVec::new(); - v.push(3); - assert_eq!(v.drain().collect::>(), &[3]); - - // spilling the vec - v.push(3); - v.push(4); - v.push(5); - assert_eq!(v.drain().collect::>(), &[3, 4, 5]); - } - - #[test] - fn drain_rev() { - let mut v: SmallVec<[u8; 2]> = SmallVec::new(); - v.push(3); - assert_eq!(v.drain().rev().collect::>(), &[3]); - - // spilling the vec - v.push(3); - v.push(4); - v.push(5); - assert_eq!(v.drain().rev().collect::>(), &[5, 4, 3]); - } - - #[test] - fn into_iter() { - let mut v: SmallVec<[u8; 2]> = SmallVec::new(); - v.push(3); - assert_eq!(v.into_iter().collect::>(), &[3]); - - // spilling the vec - let mut v: SmallVec<[u8; 2]> = SmallVec::new(); - v.push(3); - v.push(4); - v.push(5); - assert_eq!(v.into_iter().collect::>(), &[3, 4, 5]); - } - - #[test] - fn into_iter_rev() { - let mut v: SmallVec<[u8; 2]> = SmallVec::new(); - v.push(3); - assert_eq!(v.into_iter().rev().collect::>(), &[3]); - - // spilling the vec - let mut v: SmallVec<[u8; 2]> = SmallVec::new(); - v.push(3); - v.push(4); - v.push(5); - assert_eq!(v.into_iter().rev().collect::>(), &[5, 4, 3]); - } - - #[test] - fn into_iter_drop() { - use std::cell::Cell; - - struct DropCounter<'a>(&'a Cell); - - impl<'a> Drop for DropCounter<'a> { - fn drop(&mut self) { - self.0.set(self.0.get() + 1); - } - } - - { - let cell = Cell::new(0); - let mut v: SmallVec<[DropCounter; 2]> = SmallVec::new(); - v.push(DropCounter(&cell)); - v.into_iter(); - assert_eq!(cell.get(), 1); - } - - { - let cell = Cell::new(0); - let mut v: SmallVec<[DropCounter; 2]> = SmallVec::new(); - v.push(DropCounter(&cell)); - v.push(DropCounter(&cell)); - assert!(v.into_iter().next().is_some()); - assert_eq!(cell.get(), 2); - } - - { - let cell = Cell::new(0); - let mut v: SmallVec<[DropCounter; 2]> = SmallVec::new(); - v.push(DropCounter(&cell)); - v.push(DropCounter(&cell)); - v.push(DropCounter(&cell)); - assert!(v.into_iter().next().is_some()); - assert_eq!(cell.get(), 3); - } - { - let cell = Cell::new(0); - let mut v: SmallVec<[DropCounter; 2]> = SmallVec::new(); - v.push(DropCounter(&cell)); - v.push(DropCounter(&cell)); - v.push(DropCounter(&cell)); - { - let mut it = v.into_iter(); - assert!(it.next().is_some()); - assert!(it.next_back().is_some()); - } - assert_eq!(cell.get(), 3); - } - } - - #[test] - fn test_capacity() { - let mut v: SmallVec<[u8; 2]> = SmallVec::new(); - v.reserve(1); - assert_eq!(v.capacity(), 2); - assert!(!v.spilled()); - - v.reserve_exact(0x100); - assert!(v.capacity() >= 0x100); - - v.push(0); - v.push(1); - v.push(2); - v.push(3); - - v.shrink_to_fit(); - assert!(v.capacity() < 0x100); - } - - #[test] - fn test_truncate() { - let mut v: SmallVec<[Box; 8]> = SmallVec::new(); - - for x in 0..8 { - v.push(Box::new(x)); - } - v.truncate(4); - - assert_eq!(v.len(), 4); - assert!(!v.spilled()); - - assert_eq!(*v.swap_remove(1), 1); - assert_eq!(*v.remove(1), 3); - v.insert(1, Box::new(3)); - - assert_eq!(&v.iter().map(|v| **v).collect::>(), &[0, 3, 2]); - } - - #[test] - fn test_insert_many() { - let mut v: SmallVec<[u8; 8]> = SmallVec::new(); - for x in 0..4 { - v.push(x); - } - assert_eq!(v.len(), 4); - v.insert_many(1, [5, 6].iter().cloned()); - assert_eq!(&v.iter().map(|v| *v).collect::>(), &[0, 5, 6, 1, 2, 3]); - } - - struct MockHintIter{x: T, hint: usize} - impl Iterator for MockHintIter { - type Item = T::Item; - fn next(&mut self) -> Option {self.x.next()} - fn size_hint(&self) -> (usize, Option) {(self.hint, None)} - } - - #[test] - fn test_insert_many_short_hint() { - let mut v: SmallVec<[u8; 8]> = SmallVec::new(); - for x in 0..4 { - v.push(x); - } - assert_eq!(v.len(), 4); - v.insert_many(1, MockHintIter{x: [5, 6].iter().cloned(), hint: 5}); - assert_eq!(&v.iter().map(|v| *v).collect::>(), &[0, 5, 6, 1, 2, 3]); - } - - #[test] - fn test_insert_many_long_hint() { - let mut v: SmallVec<[u8; 8]> = SmallVec::new(); - for x in 0..4 { - v.push(x); - } - assert_eq!(v.len(), 4); - v.insert_many(1, MockHintIter{x: [5, 6].iter().cloned(), hint: 1}); - assert_eq!(&v.iter().map(|v| *v).collect::>(), &[0, 5, 6, 1, 2, 3]); - } - - #[cfg(feature = "std")] - #[test] - // https://github.com/servo/rust-smallvec/issues/96 - fn test_insert_many_panic() { - struct PanicOnDoubleDrop { - dropped: Box - } - - impl Drop for PanicOnDoubleDrop { - fn drop(&mut self) { - assert!(!*self.dropped, "already dropped"); - *self.dropped = true; - } - } - - struct BadIter; - impl Iterator for BadIter { - type Item = PanicOnDoubleDrop; - fn size_hint(&self) -> (usize, Option) { (1, None) } - fn next(&mut self) -> Option { panic!() } - } - - let mut vec: SmallVec<[PanicOnDoubleDrop; 0]> = vec![ - PanicOnDoubleDrop { dropped: Box::new(false) }, - PanicOnDoubleDrop { dropped: Box::new(false) }, - ].into(); - let result = ::std::panic::catch_unwind(move || { - vec.insert_many(0, BadIter); - }); - assert!(result.is_err()); - } - - #[test] - #[should_panic] - fn test_invalid_grow() { - let mut v: SmallVec<[u8; 8]> = SmallVec::new(); - v.extend(0..8); - v.grow(5); - } - - #[test] - fn test_insert_from_slice() { - let mut v: SmallVec<[u8; 8]> = SmallVec::new(); - for x in 0..4 { - v.push(x); - } - assert_eq!(v.len(), 4); - v.insert_from_slice(1, &[5, 6]); - assert_eq!(&v.iter().map(|v| *v).collect::>(), &[0, 5, 6, 1, 2, 3]); - } - - #[test] - fn test_extend_from_slice() { - let mut v: SmallVec<[u8; 8]> = SmallVec::new(); - for x in 0..4 { - v.push(x); - } - assert_eq!(v.len(), 4); - v.extend_from_slice(&[5, 6]); - assert_eq!(&v.iter().map(|v| *v).collect::>(), &[0, 1, 2, 3, 5, 6]); - } - - #[test] - #[should_panic] - fn test_drop_panic_smallvec() { - // This test should only panic once, and not double panic, - // which would mean a double drop - struct DropPanic; - - impl Drop for DropPanic { - fn drop(&mut self) { - panic!("drop"); - } - } - - let mut v = SmallVec::<[_; 1]>::new(); - v.push(DropPanic); - } - - #[test] - fn test_eq() { - let mut a: SmallVec<[u32; 2]> = SmallVec::new(); - let mut b: SmallVec<[u32; 2]> = SmallVec::new(); - let mut c: SmallVec<[u32; 2]> = SmallVec::new(); - // a = [1, 2] - a.push(1); - a.push(2); - // b = [1, 2] - b.push(1); - b.push(2); - // c = [3, 4] - c.push(3); - c.push(4); - - assert!(a == b); - assert!(a != c); - } - - #[test] - fn test_ord() { - let mut a: SmallVec<[u32; 2]> = SmallVec::new(); - let mut b: SmallVec<[u32; 2]> = SmallVec::new(); - let mut c: SmallVec<[u32; 2]> = SmallVec::new(); - // a = [1] - a.push(1); - // b = [1, 1] - b.push(1); - b.push(1); - // c = [1, 2] - c.push(1); - c.push(2); - - assert!(a < b); - assert!(b > a); - assert!(b < c); - assert!(c > b); - } - - #[cfg(feature = "std")] - #[test] - fn test_hash() { - use std::hash::Hash; - use std::collections::hash_map::DefaultHasher; - - { - let mut a: SmallVec<[u32; 2]> = SmallVec::new(); - let b = [1, 2]; - a.extend(b.iter().cloned()); - let mut hasher = DefaultHasher::new(); - assert_eq!(a.hash(&mut hasher), b.hash(&mut hasher)); - } - { - let mut a: SmallVec<[u32; 2]> = SmallVec::new(); - let b = [1, 2, 11, 12]; - a.extend(b.iter().cloned()); - let mut hasher = DefaultHasher::new(); - assert_eq!(a.hash(&mut hasher), b.hash(&mut hasher)); - } - } - - #[test] - fn test_as_ref() { - let mut a: SmallVec<[u32; 2]> = SmallVec::new(); - a.push(1); - assert_eq!(a.as_ref(), [1]); - a.push(2); - assert_eq!(a.as_ref(), [1, 2]); - a.push(3); - assert_eq!(a.as_ref(), [1, 2, 3]); - } - - #[test] - fn test_as_mut() { - let mut a: SmallVec<[u32; 2]> = SmallVec::new(); - a.push(1); - assert_eq!(a.as_mut(), [1]); - a.push(2); - assert_eq!(a.as_mut(), [1, 2]); - a.push(3); - assert_eq!(a.as_mut(), [1, 2, 3]); - a.as_mut()[1] = 4; - assert_eq!(a.as_mut(), [1, 4, 3]); - } - - #[test] - fn test_borrow() { - use std::borrow::Borrow; - - let mut a: SmallVec<[u32; 2]> = SmallVec::new(); - a.push(1); - assert_eq!(a.borrow(), [1]); - a.push(2); - assert_eq!(a.borrow(), [1, 2]); - a.push(3); - assert_eq!(a.borrow(), [1, 2, 3]); - } - - #[test] - fn test_borrow_mut() { - use std::borrow::BorrowMut; - - let mut a: SmallVec<[u32; 2]> = SmallVec::new(); - a.push(1); - assert_eq!(a.borrow_mut(), [1]); - a.push(2); - assert_eq!(a.borrow_mut(), [1, 2]); - a.push(3); - assert_eq!(a.borrow_mut(), [1, 2, 3]); - BorrowMut::<[u32]>::borrow_mut(&mut a)[1] = 4; - assert_eq!(a.borrow_mut(), [1, 4, 3]); - } - - #[test] - fn test_from() { - assert_eq!(&SmallVec::<[u32; 2]>::from(&[1][..])[..], [1]); - assert_eq!(&SmallVec::<[u32; 2]>::from(&[1, 2, 3][..])[..], [1, 2, 3]); - - let vec = vec![]; - let small_vec: SmallVec<[u8; 3]> = SmallVec::from(vec); - assert_eq!(&*small_vec, &[]); - drop(small_vec); - - let vec = vec![1, 2, 3, 4, 5]; - let small_vec: SmallVec<[u8; 3]> = SmallVec::from(vec); - assert_eq!(&*small_vec, &[1, 2, 3, 4, 5]); - drop(small_vec); - - let vec = vec![1, 2, 3, 4, 5]; - let small_vec: SmallVec<[u8; 1]> = SmallVec::from(vec); - assert_eq!(&*small_vec, &[1, 2, 3, 4, 5]); - drop(small_vec); - - let array = [1]; - let small_vec: SmallVec<[u8; 1]> = SmallVec::from(array); - assert_eq!(&*small_vec, &[1]); - drop(small_vec); - - let array = [99; 128]; - let small_vec: SmallVec<[u8; 128]> = SmallVec::from(array); - assert_eq!(&*small_vec, vec![99u8; 128].as_slice()); - drop(small_vec); - } - - #[test] - fn test_from_slice() { - assert_eq!(&SmallVec::<[u32; 2]>::from_slice(&[1][..])[..], [1]); - assert_eq!(&SmallVec::<[u32; 2]>::from_slice(&[1, 2, 3][..])[..], [1, 2, 3]); - } - - #[test] - fn test_exact_size_iterator() { - let mut vec = SmallVec::<[u32; 2]>::from(&[1, 2, 3][..]); - assert_eq!(vec.clone().into_iter().len(), 3); - assert_eq!(vec.drain().len(), 3); - } - - #[test] - #[allow(deprecated)] - fn veclike_deref_slice() { - use super::VecLike; - - fn test>(vec: &mut T) { - assert!(!vec.is_empty()); - assert_eq!(vec.len(), 3); - - vec.sort(); - assert_eq!(&vec[..], [1, 2, 3]); - } - - let mut vec = SmallVec::<[i32; 2]>::from(&[3, 1, 2][..]); - test(&mut vec); - } - - #[test] - fn shrink_to_fit_unspill() { - let mut vec = SmallVec::<[u8; 2]>::from_iter(0..3); - vec.pop(); - assert!(vec.spilled()); - vec.shrink_to_fit(); - assert!(!vec.spilled(), "shrink_to_fit will un-spill if possible"); - } - - #[test] - fn test_into_vec() { - let vec = SmallVec::<[u8; 2]>::from_iter(0..2); - assert_eq!(vec.into_vec(), vec![0, 1]); - - let vec = SmallVec::<[u8; 2]>::from_iter(0..3); - assert_eq!(vec.into_vec(), vec![0, 1, 2]); - } - - #[test] - fn test_into_inner() { - let vec = SmallVec::<[u8; 2]>::from_iter(0..2); - assert_eq!(vec.into_inner(), Ok([0, 1])); - - let vec = SmallVec::<[u8; 2]>::from_iter(0..1); - assert_eq!(vec.clone().into_inner(), Err(vec)); - - let vec = SmallVec::<[u8; 2]>::from_iter(0..3); - assert_eq!(vec.clone().into_inner(), Err(vec)); - } - - #[test] - fn test_from_vec() { - let vec = vec![]; - let small_vec: SmallVec<[u8; 3]> = SmallVec::from_vec(vec); - assert_eq!(&*small_vec, &[]); - drop(small_vec); - - let vec = vec![]; - let small_vec: SmallVec<[u8; 1]> = SmallVec::from_vec(vec); - assert_eq!(&*small_vec, &[]); - drop(small_vec); - - let vec = vec![1]; - let small_vec: SmallVec<[u8; 3]> = SmallVec::from_vec(vec); - assert_eq!(&*small_vec, &[1]); - drop(small_vec); - - let vec = vec![1, 2, 3]; - let small_vec: SmallVec<[u8; 3]> = SmallVec::from_vec(vec); - assert_eq!(&*small_vec, &[1, 2, 3]); - drop(small_vec); - - let vec = vec![1, 2, 3, 4, 5]; - let small_vec: SmallVec<[u8; 3]> = SmallVec::from_vec(vec); - assert_eq!(&*small_vec, &[1, 2, 3, 4, 5]); - drop(small_vec); - - let vec = vec![1, 2, 3, 4, 5]; - let small_vec: SmallVec<[u8; 1]> = SmallVec::from_vec(vec); - assert_eq!(&*small_vec, &[1, 2, 3, 4, 5]); - drop(small_vec); - } - - #[test] - fn test_retain() { - // Test inline data storate - let mut sv: SmallVec<[i32; 5]> = SmallVec::from_slice(&[1, 2, 3, 3, 4]); - sv.retain(|&mut i| i != 3); - assert_eq!(sv.pop(), Some(4)); - assert_eq!(sv.pop(), Some(2)); - assert_eq!(sv.pop(), Some(1)); - assert_eq!(sv.pop(), None); - - // Test spilled data storage - let mut sv: SmallVec<[i32; 3]> = SmallVec::from_slice(&[1, 2, 3, 3, 4]); - sv.retain(|&mut i| i != 3); - assert_eq!(sv.pop(), Some(4)); - assert_eq!(sv.pop(), Some(2)); - assert_eq!(sv.pop(), Some(1)); - assert_eq!(sv.pop(), None); - - // Test that drop implementations are called for inline. - let one = Rc::new(1); - let mut sv: SmallVec<[Rc; 3]> = SmallVec::new(); - sv.push(Rc::clone(&one)); - assert_eq!(Rc::strong_count(&one), 2); - sv.retain(|_| false); - assert_eq!(Rc::strong_count(&one), 1); - - // Test that drop implementations are called for spilled data. - let mut sv: SmallVec<[Rc; 1]> = SmallVec::new(); - sv.push(Rc::clone(&one)); - sv.push(Rc::new(2)); - assert_eq!(Rc::strong_count(&one), 2); - sv.retain(|_| false); - assert_eq!(Rc::strong_count(&one), 1); - } - - #[test] - fn test_dedup() { - let mut dupes: SmallVec<[i32; 5]> = SmallVec::from_slice(&[1, 1, 2, 3, 3]); - dupes.dedup(); - assert_eq!(&*dupes, &[1, 2, 3]); - - let mut empty: SmallVec<[i32; 5]> = SmallVec::new(); - empty.dedup(); - assert!(empty.is_empty()); - - let mut all_ones: SmallVec<[i32; 5]> = SmallVec::from_slice(&[1, 1, 1, 1, 1]); - all_ones.dedup(); - assert_eq!(all_ones.len(), 1); - - let mut no_dupes: SmallVec<[i32; 5]> = SmallVec::from_slice(&[1, 2, 3, 4, 5]); - no_dupes.dedup(); - assert_eq!(no_dupes.len(), 5); - } - - #[test] - fn test_resize() { - let mut v: SmallVec<[i32; 8]> = SmallVec::new(); - v.push(1); - v.resize(5, 0); - assert_eq!(v[..], [1, 0, 0, 0, 0][..]); - - v.resize(2, -1); - assert_eq!(v[..], [1, 0][..]); - } - - #[cfg(feature = "std")] - #[test] - fn test_write() { - use io::Write; - - let data = [1, 2, 3, 4, 5]; - - let mut small_vec: SmallVec<[u8; 2]> = SmallVec::new(); - let len = small_vec.write(&data[..]).unwrap(); - assert_eq!(len, 5); - assert_eq!(small_vec.as_ref(), data.as_ref()); - - let mut small_vec: SmallVec<[u8; 2]> = SmallVec::new(); - small_vec.write_all(&data[..]).unwrap(); - assert_eq!(small_vec.as_ref(), data.as_ref()); - } - - #[cfg(feature = "serde")] - extern crate bincode; - - #[cfg(feature = "serde")] - #[test] - fn test_serde() { - use self::bincode::{config, deserialize}; - let mut small_vec: SmallVec<[i32; 2]> = SmallVec::new(); - small_vec.push(1); - let encoded = config().limit(100).serialize(&small_vec).unwrap(); - let decoded: SmallVec<[i32; 2]> = deserialize(&encoded).unwrap(); - assert_eq!(small_vec, decoded); - small_vec.push(2); - // Spill the vec - small_vec.push(3); - small_vec.push(4); - // Check again after spilling. - let encoded = config().limit(100).serialize(&small_vec).unwrap(); - let decoded: SmallVec<[i32; 2]> = deserialize(&encoded).unwrap(); - assert_eq!(small_vec, decoded); - } - - #[test] - fn grow_to_shrink() { - let mut v: SmallVec<[u8; 2]> = SmallVec::new(); - v.push(1); - v.push(2); - v.push(3); - assert!(v.spilled()); - v.clear(); - // Shrink to inline. - v.grow(2); - assert!(!v.spilled()); - assert_eq!(v.capacity(), 2); - assert_eq!(v.len(), 0); - v.push(4); - assert_eq!(v[..], [4]); - } - - #[test] - fn resumable_extend() { - let s = "a b c"; - // This iterator yields: (Some('a'), None, Some('b'), None, Some('c')), None - let it = s - .chars() - .scan(0, |_, ch| if ch.is_whitespace() { None } else { Some(ch) }); - let mut v: SmallVec<[char; 4]> = SmallVec::new(); - v.extend(it); - assert_eq!(v[..], ['a']); - } - - #[test] - fn grow_spilled_same_size() { - let mut v: SmallVec<[u8; 2]> = SmallVec::new(); - v.push(0); - v.push(1); - v.push(2); - assert!(v.spilled()); - assert_eq!(v.capacity(), 4); - // grow with the same capacity - v.grow(4); - assert_eq!(v.capacity(), 4); - assert_eq!(v[..], [0, 1, 2]); - } -} diff --git a/third_party/rust/smallvec/.cargo-checksum.json b/third_party/rust/smallvec/.cargo-checksum.json index 9bf3a329df..3e4ba00883 100644 --- a/third_party/rust/smallvec/.cargo-checksum.json +++ b/third_party/rust/smallvec/.cargo-checksum.json @@ -1 +1 @@ -{"files":{"Cargo.toml":"a0b3c2e96922c85896116d470c1e5cd67a1c8165c0f8f78d3e719d3c74c6e4d7","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"0b28172679e0009b655da42797c03fd163a3379d5cfa67ba1f1655e974a2a1a9","README.md":"38eef4ebde6fe6effa12a2dbca3bd69d6446b2935f19a329ac4926f1cb2e5013","benches/bench.rs":"9dca7122a3dcb2c099e49807e4d3b8f01d9220e2b3db0a54e9901ee74392866f","lib.rs":"0bf36124f10391d44b6afa228fb77dfadb7376da8108c0dc064d01b1adbe7fad","scripts/run_miri.sh":"cd645dfecf19cc77141ecaf698e58a3a743ad69aca5e5d25c8e5d3911e031322","specialization.rs":"46433586203399251cba496d67b88d34e1be3c2b591986b77463513da1c66471"},"package":"4ecf3b85f68e8abaa7555aa5abdb1153079387e60b718283d732f03897fcfc86"} \ No newline at end of file +{"files":{"Cargo.toml":"82c58cfe1208040b0772a4eb0fc59c2f84c75dd28115f2847a6edc91a340b7f4","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"0b28172679e0009b655da42797c03fd163a3379d5cfa67ba1f1655e974a2a1a9","README.md":"a01127c37308457e8d396b176fb790846be0978c173be3f13260b62efcef011b","benches/bench.rs":"9dca7122a3dcb2c099e49807e4d3b8f01d9220e2b3db0a54e9901ee74392866f","lib.rs":"6b128fc5aa50b5dd775d45252e277c13546f1de2ebee340c6c8ff48627678244","scripts/run_miri.sh":"2e83d153efc16cbc3c41589e306faa0624c8b9a0feecea3baae6e34f4563ac42","specialization.rs":"46433586203399251cba496d67b88d34e1be3c2b591986b77463513da1c66471"},"package":"5c2fb2ec9bcd216a5b0d0ccf31ab17b5ed1d627960edff65bbe95d3ce221cefc"} \ No newline at end of file diff --git a/third_party/rust/smallvec/Cargo.toml b/third_party/rust/smallvec/Cargo.toml index e58122aa36..199dc13b27 100644 --- a/third_party/rust/smallvec/Cargo.toml +++ b/third_party/rust/smallvec/Cargo.toml @@ -13,10 +13,10 @@ [package] edition = "2018" name = "smallvec" -version = "1.0.0" +version = "1.2.0" authors = ["Simon Sapin "] description = "'Small vector' optimization: store up to a small number of items on the stack" -documentation = "https://doc.servo.org/smallvec/" +documentation = "https://docs.rs/smallvec/" readme = "README.md" keywords = ["small", "vec", "vector", "stack", "no_std"] categories = ["data-structures"] diff --git a/third_party/rust/smallvec/README.md b/third_party/rust/smallvec/README.md index fda7fd4d2c..724637c6ec 100644 --- a/third_party/rust/smallvec/README.md +++ b/third_party/rust/smallvec/README.md @@ -6,3 +6,21 @@ rust-smallvec [Release notes](https://github.com/servo/rust-smallvec/releases) "Small vector" optimization for Rust: store up to a small number of items on the stack + +## Example + +```rust +use smallvec::{SmallVec, smallvec}; + +// This SmallVec can hold up to 4 items on the stack: +let mut v: SmallVec<[i32; 4]> = smallvec![1, 2, 3, 4]; + +// It will automatically move its contents to the heap if +// contains more than four items: +v.push(5); + +// SmallVec points to a slice, so you can use normal slice +// indexing and other methods to access its contents: +v[0] = v[1] + v[2]; +v.sort(); +``` diff --git a/third_party/rust/smallvec/lib.rs b/third_party/rust/smallvec/lib.rs index d2fbbc63e3..0c8243edae 100644 --- a/third_party/rust/smallvec/lib.rs +++ b/third_party/rust/smallvec/lib.rs @@ -14,9 +14,6 @@ //! `write` feature implements the `std::io::Write` trait for vectors of `u8`. //! When this feature is enabled, `smallvec` depends on `std`. //! -//! To depend on `smallvec` without `libstd`, use `default-features = false` in the `smallvec` -//! section of Cargo.toml to disable its `"std"` feature. -//! //! ## `union` feature //! //! When the `union` feature is enabled `smallvec` will track its state (inline or spilled) @@ -34,13 +31,14 @@ #![cfg_attr(feature = "may_dangle", feature(dropck_eyepatch))] #![deny(missing_docs)] -#[macro_use] -extern crate alloc; +#[doc(hidden)] +pub extern crate alloc; #[cfg(any(test, feature = "write"))] extern crate std; -use alloc::vec::Vec; +use alloc::boxed::Box; +use alloc::{vec, vec::Vec}; use core::borrow::{Borrow, BorrowMut}; use core::cmp; use core::fmt; @@ -118,7 +116,7 @@ macro_rules! smallvec { $(vec.push($x);)* vec } else { - $crate::SmallVec::from_vec(vec![$($x,)*]) + $crate::SmallVec::from_vec($crate::alloc::vec![$($x,)*]) } }); } @@ -256,7 +254,7 @@ impl<'a, T: 'a + Array> Drop for Drain<'a, T> { #[cfg(feature = "union")] union SmallVecData { inline: MaybeUninit, - heap: (NonNull, usize), + heap: (*mut A::Item, usize), } #[cfg(feature = "union")] @@ -279,24 +277,22 @@ impl SmallVecData { } #[inline] unsafe fn heap(&self) -> (*mut A::Item, usize) { - (self.heap.0.as_ptr(), self.heap.1) + self.heap } #[inline] - unsafe fn heap_mut(&mut self) -> (*mut A::Item, &mut usize) { - (self.heap.0.as_ptr(), &mut self.heap.1) + unsafe fn heap_mut(&mut self) -> &mut (*mut A::Item, usize) { + &mut self.heap } #[inline] fn from_heap(ptr: *mut A::Item, len: usize) -> SmallVecData { - SmallVecData { - heap: (NonNull::new(ptr).unwrap(), len), - } + SmallVecData { heap: (ptr, len) } } } #[cfg(not(feature = "union"))] enum SmallVecData { Inline(MaybeUninit), - Heap((NonNull, usize)), + Heap((*mut A::Item, usize)), } #[cfg(not(feature = "union"))] @@ -329,20 +325,20 @@ impl SmallVecData { #[inline] unsafe fn heap(&self) -> (*mut A::Item, usize) { match self { - SmallVecData::Heap(data) => (data.0.as_ptr(), data.1), + SmallVecData::Heap(data) => *data, _ => debug_unreachable!(), } } #[inline] - unsafe fn heap_mut(&mut self) -> (*mut A::Item, &mut usize) { + unsafe fn heap_mut(&mut self) -> &mut (*mut A::Item, usize) { match self { - SmallVecData::Heap(data) => (data.0.as_ptr(), &mut data.1), + SmallVecData::Heap(data) => data, _ => debug_unreachable!(), } } #[inline] fn from_heap(ptr: *mut A::Item, len: usize) -> SmallVecData { - SmallVecData::Heap((NonNull::new(ptr).unwrap(), len)) + SmallVecData::Heap((ptr, len)) } } @@ -569,7 +565,7 @@ impl SmallVec { fn triple_mut(&mut self) -> (*mut A::Item, &mut usize, usize) { unsafe { if self.spilled() { - let (ptr, len_ptr) = self.data.heap_mut(); + let &mut (ptr, ref mut len_ptr) = self.data.heap_mut(); (ptr, len_ptr, self.capacity) } else { (self.data.inline_mut(), &mut self.capacity, A::size()) @@ -641,7 +637,7 @@ impl SmallVec { } let (ptr, len_ptr, _) = self.triple_mut(); *len_ptr = len + 1; - ptr::write(ptr.offset(len as isize), value); + ptr::write(ptr.add(len), value); } } @@ -655,7 +651,7 @@ impl SmallVec { } let last_index = *len_ptr - 1; *len_ptr = last_index; - Some(ptr::read(ptr.offset(last_index as isize))) + Some(ptr::read(ptr.add(last_index))) } } @@ -761,7 +757,7 @@ impl SmallVec { while len < *len_ptr { let last_index = *len_ptr - 1; *len_ptr = last_index; - ptr::drop_in_place(ptr.offset(last_index as isize)); + ptr::drop_in_place(ptr.add(last_index)); } } } @@ -809,9 +805,9 @@ impl SmallVec { let len = *len_ptr; assert!(index < len); *len_ptr = len - 1; - ptr = ptr.offset(index as isize); + ptr = ptr.add(index); let item = ptr::read(ptr); - ptr::copy(ptr.offset(1), ptr, len - index - 1); + ptr::copy(ptr.add(1), ptr, len - index - 1); item } } @@ -827,8 +823,8 @@ impl SmallVec { let len = *len_ptr; assert!(index <= len); *len_ptr = len + 1; - ptr = ptr.offset(index as isize); - ptr::copy(ptr, ptr.offset(1), len - index); + ptr = ptr.add(index); + ptr::copy(ptr, ptr.add(1), len - index); ptr::write(ptr, element); } } @@ -849,23 +845,23 @@ impl SmallVec { unsafe { let old_len = self.len(); assert!(index <= old_len); - let mut ptr = self.as_mut_ptr().offset(index as isize); + let mut ptr = self.as_mut_ptr().add(index); // Move the trailing elements. - ptr::copy(ptr, ptr.offset(lower_size_bound as isize), old_len - index); + ptr::copy(ptr, ptr.add(lower_size_bound), old_len - index); // In case the iterator panics, don't double-drop the items we just copied above. self.set_len(index); let mut num_added = 0; for element in iter { - let mut cur = ptr.offset(num_added as isize); + let mut cur = ptr.add(num_added); if num_added >= lower_size_bound { // Iterator provided more elements than the hint. Move trailing items again. self.reserve(1); - ptr = self.as_mut_ptr().offset(index as isize); - cur = ptr.offset(num_added as isize); - ptr::copy(cur, cur.offset(1), old_len - index); + ptr = self.as_mut_ptr().add(index); + cur = ptr.add(num_added); + ptr::copy(cur, cur.add(1), old_len - index); } ptr::write(cur, element); num_added += 1; @@ -873,8 +869,8 @@ impl SmallVec { if num_added < lower_size_bound { // Iterator provided fewer elements than the hint ptr::copy( - ptr.offset(lower_size_bound as isize), - ptr.offset(num_added as isize), + ptr.add(lower_size_bound), + ptr.add(num_added), old_len - index, ); } @@ -898,6 +894,14 @@ impl SmallVec { } } + /// Converts a `SmallVec` into a `Box<[T]>` without reallocating if the `SmallVec` has already spilled + /// onto the heap. + /// + /// Note that this will drop any excess capacity. + pub fn into_boxed_slice(self) -> Box<[A::Item]> { + self.into_vec().into_boxed_slice() + } + /// Convert the SmallVec into an `A` if possible. Otherwise return `Err(Self)`. /// /// This method returns `Err(Self)` if the SmallVec is too short (and the `A` contains uninitialized elements), @@ -957,11 +961,11 @@ impl SmallVec { unsafe { for r in 1..len { - let p_r = ptr.offset(r as isize); - let p_wm1 = ptr.offset((w - 1) as isize); + let p_r = ptr.add(r); + let p_wm1 = ptr.add(w - 1); if !same_bucket(&mut *p_r, &mut *p_wm1) { if r != w { - let p_w = p_wm1.offset(1); + let p_w = p_wm1.add(1); mem::swap(&mut *p_r, &mut *p_w); } w += 1; @@ -1039,8 +1043,8 @@ impl SmallVec { /// // writing into the old `SmallVec`'s inline storage on the /// // stack. /// assert!(spilled); - /// for i in 0..len as isize { - /// ptr::write(p.offset(i), 4 + i); + /// for i in 0..len { + /// ptr::write(p.add(i), 4 + i); /// } /// /// // Put everything back together into a SmallVec with a different @@ -1103,8 +1107,8 @@ where unsafe { let slice_ptr = slice.as_ptr(); - let ptr = self.as_mut_ptr().offset(index as isize); - ptr::copy(ptr, ptr.offset(slice.len() as isize), len - index); + let ptr = self.as_mut_ptr().add(index); + ptr::copy(ptr, ptr.add(slice.len()), len - index); ptr::copy_nonoverlapping(slice_ptr, ptr, slice.len()); self.set_len(len + slice.len()); } @@ -1156,8 +1160,8 @@ where let (ptr, len_ptr, _) = v.triple_mut(); let mut local_len = SetLenOnDrop::new(len_ptr); - for i in 0..n as isize { - ::core::ptr::write(ptr.offset(i), elem.clone()); + for i in 0..n { + ::core::ptr::write(ptr.add(i), elem.clone()); local_len.increment_len(1); } } @@ -1318,7 +1322,7 @@ where #[cfg(not(feature = "specialization"))] #[inline] fn from(slice: &'a [A::Item]) -> SmallVec { - slice.into_iter().cloned().collect() + slice.iter().cloned().collect() } #[cfg(feature = "specialization")] @@ -1384,7 +1388,7 @@ impl Extend for SmallVec { let mut len = SetLenOnDrop::new(len_ptr); while len.get() < cap { if let Some(out) = iter.next() { - ptr::write(ptr.offset(len.get() as isize), out); + ptr::write(ptr.add(len.get()), out); len.increment_len(1); } else { return; @@ -1463,10 +1467,6 @@ where fn eq(&self, other: &SmallVec) -> bool { self[..] == other[..] } - #[inline] - fn ne(&self, other: &SmallVec) -> bool { - self[..] != other[..] - } } impl Eq for SmallVec where A::Item: Eq {} @@ -1513,6 +1513,24 @@ pub struct IntoIter { end: usize, } +impl fmt::Debug for IntoIter +where + A::Item: fmt::Debug, +{ + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_tuple("IntoIter").field(&self.as_slice()).finish() + } +} + +impl Clone for IntoIter +where + A::Item: Clone, +{ + fn clone(&self) -> IntoIter { + SmallVec::from(self.as_slice()).into_iter() + } +} + impl Drop for IntoIter { fn drop(&mut self) { for _ in self {} @@ -1528,9 +1546,9 @@ impl Iterator for IntoIter { None } else { unsafe { - let current = self.current as isize; + let current = self.current; self.current += 1; - Some(ptr::read(self.data.as_ptr().offset(current))) + Some(ptr::read(self.data.as_ptr().add(current))) } } } @@ -1550,7 +1568,7 @@ impl DoubleEndedIterator for IntoIter { } else { unsafe { self.end -= 1; - Some(ptr::read(self.data.as_ptr().offset(self.end as isize))) + Some(ptr::read(self.data.as_ptr().add(self.end))) } } } @@ -1559,6 +1577,20 @@ impl DoubleEndedIterator for IntoIter { impl ExactSizeIterator for IntoIter {} impl FusedIterator for IntoIter {} +impl IntoIter { + /// Returns the remaining items of this iterator as a slice. + pub fn as_slice(&self) -> &[A::Item] { + let len = self.end - self.current; + unsafe { core::slice::from_raw_parts(self.data.as_ptr().add(self.current), len) } + } + + /// Returns the remaining items of this iterator as a mutable slice. + pub fn as_mut_slice(&mut self) -> &mut [A::Item] { + let len = self.end - self.current; + unsafe { core::slice::from_raw_parts_mut(self.data.as_mut_ptr().add(self.current), len) } + } +} + impl IntoIterator for SmallVec { type IntoIter = IntoIter; type Item = A::Item; @@ -1613,7 +1645,7 @@ impl<'a> SetLenOnDrop<'a> { fn new(len: &'a mut usize) -> Self { SetLenOnDrop { local_len: *len, - len: len, + len, } } @@ -1649,7 +1681,7 @@ macro_rules! impl_array( impl_array!( 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 20, 24, 32, 36, 0x40, 0x60, 0x80, 0x100, 0x200, 0x400, 0x600, 0x800, 0x1000, 0x2000, 0x4000, 0x6000, 0x8000, 0x10000, 0x20000, - 0x40000, 0x60000, 0x80000, 0x100000 + 0x40000, 0x60000, 0x80000, 0x10_0000 ); #[cfg(test)] @@ -1661,7 +1693,7 @@ mod tests { use alloc::borrow::ToOwned; use alloc::boxed::Box; use alloc::rc::Rc; - use alloc::vec::Vec; + use alloc::{vec, vec::Vec}; #[test] pub fn test_zero() { @@ -1976,7 +2008,6 @@ mod tests { ); } - #[cfg(all(feature = "std", not(miri)))] // Miri currently does not support unwinding #[test] // https://github.com/servo/rust-smallvec/issues/96 fn test_insert_many_panic() { @@ -2109,7 +2140,6 @@ mod tests { assert!(c > b); } - #[cfg(feature = "std")] #[test] fn test_hash() { use std::collections::hash_map::DefaultHasher; @@ -2231,6 +2261,51 @@ mod tests { assert_eq!(vec.into_iter().len(), 1); } + #[test] + fn test_into_iter_as_slice() { + let vec = SmallVec::<[u32; 2]>::from(&[1, 2, 3][..]); + let mut iter = vec.clone().into_iter(); + assert_eq!(iter.as_slice(), &[1, 2, 3]); + assert_eq!(iter.as_mut_slice(), &[1, 2, 3]); + iter.next(); + assert_eq!(iter.as_slice(), &[2, 3]); + assert_eq!(iter.as_mut_slice(), &[2, 3]); + iter.next_back(); + assert_eq!(iter.as_slice(), &[2]); + assert_eq!(iter.as_mut_slice(), &[2]); + } + + #[test] + fn test_into_iter_clone() { + // Test that the cloned iterator yields identical elements and that it owns its own copy + // (i.e. no use after move errors). + let mut iter = SmallVec::<[u8; 2]>::from_iter(0..3).into_iter(); + let mut clone_iter = iter.clone(); + while let Some(x) = iter.next() { + assert_eq!(x, clone_iter.next().unwrap()); + } + assert_eq!(clone_iter.next(), None); + } + + #[test] + fn test_into_iter_clone_partially_consumed_iterator() { + // Test that the cloned iterator only contains the remaining elements of the original iterator. + let mut iter = SmallVec::<[u8; 2]>::from_iter(0..3).into_iter().skip(1); + let mut clone_iter = iter.clone(); + while let Some(x) = iter.next() { + assert_eq!(x, clone_iter.next().unwrap()); + } + assert_eq!(clone_iter.next(), None); + } + + #[test] + fn test_into_iter_clone_empty_smallvec() { + let mut iter = SmallVec::<[u8; 2]>::new().into_iter(); + let mut clone_iter = iter.clone(); + assert_eq!(iter.next(), None); + assert_eq!(clone_iter.next(), None); + } + #[test] fn shrink_to_fit_unspill() { let mut vec = SmallVec::<[u8; 2]>::from_iter(0..3); @@ -2359,10 +2434,10 @@ mod tests { assert_eq!(v[..], [1, 0][..]); } - #[cfg(feature = "std")] + #[cfg(feature = "write")] #[test] fn test_write() { - use io::Write; + use std::io::Write; let data = [1, 2, 3, 4, 5]; diff --git a/third_party/rust/smallvec/scripts/run_miri.sh b/third_party/rust/smallvec/scripts/run_miri.sh index 42f28849c6..c5e5376f46 100644 --- a/third_party/rust/smallvec/scripts/run_miri.sh +++ b/third_party/rust/smallvec/scripts/run_miri.sh @@ -16,6 +16,6 @@ rustup default "$MIRI_NIGHTLY" rustup component add miri cargo miri setup -cargo miri test --verbose -- -- -Zunstable-options --exclude-should-panic -cargo miri test --verbose --features union -- -- -Zunstable-options --exclude-should-panic -cargo miri test --verbose --all-features -- -- -Zunstable-options --exclude-should-panic +cargo miri test --verbose -- -Zmiri-ignore-leaks +cargo miri test --verbose --features union -- -Zmiri-ignore-leaks +cargo miri test --verbose --all-features -- -Zmiri-ignore-leaks diff --git a/third_party/rust/syn-0.15.30/.cargo-checksum.json b/third_party/rust/syn-0.15.30/.cargo-checksum.json deleted file mode 100644 index 2aa729af57..0000000000 --- a/third_party/rust/syn-0.15.30/.cargo-checksum.json +++ /dev/null @@ -1 +0,0 @@ -{"files":{"Cargo.toml":"ebf2c860a726b1210648f946b92b56a54414f2a009043d8affefdb1a0e4bd234","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"23f18e03dc49df91622fe2a76176497404e46ced8a715d9d2b67a7446571cca3","README.md":"3f3d105c0f1bae3bdb5ed3cc32a8e5a02f3de6f62a9f17f5ba03af3f813d0881","build.rs":"9f3b0dc6ae4c0065c3cf001a40a2ff9c5a86cac5edf66fa0e80d0bcd37c0d4fc","src/attr.rs":"9210a8dc5fea0ee3004b14a9e2923142932c53986b56685d62d9aa115fe093b0","src/buffer.rs":"fac77febe022ab4982078c73ca502f9d698a402b3eca0f8e9c7a2796af01e5f4","src/data.rs":"54ee54c3c650bc5e200d4bea947d6e89093a39f2444cd43e8861f5852975e0bc","src/derive.rs":"eb041b47a73bace73c2872cd9a8e190de7e2b3b57cd57213770f088ec19ab3c6","src/error.rs":"0bcf09b0892c6d5f5f31f7f758866ded1e47463833cad60694329348ac1fb44a","src/export.rs":"1f7e017dac9426e91c796445e733c390c408c808ba9c21cb7df7799b67352fde","src/expr.rs":"c9000092a57412dcbef401cd51195b816ebfae96736deeebcece5bdc73471cdc","src/ext.rs":"1881179e634681cdd472ecac502192b5a5a7759056d1c49497706749fdab1bdf","src/file.rs":"abb9f5e71a8a6b52649c15da21d3f99e25a727d87c2f87d2744ac3970c1d683f","src/gen/fold.rs":"5507dde16c41e4f11b8903938a5f442fff9f94468afccbf0a37a4fa14f2eddde","src/gen/visit.rs":"4c8f499d1fd988b9c9ff820ef76c0c93b1e5ee91e6a94e0892298f5e6f424893","src/gen/visit_mut.rs":"d1f905b2b5afd685ace07532b6cbc67175e2d104508c7ed86ed4e9d1567c7c7b","src/gen_helper.rs":"644b1d31987919c0239891d8f90c09f3bf541a71fb85e744438e4814cc6dff89","src/generics.rs":"6ee5bba532b95f7de7c6bbe8caadabc6a71c45e7f8d7636d823075ff27f28937","src/group.rs":"03487f75d0abd302f06e50eb09d14ab83fb60c67e1f2602be53ca3f28a833b90","src/ident.rs":"bd7884de0031d942c556f929905532dd8799f8ca2450e65b8e4e8dfa78cf4fee","src/item.rs":"92b5ceca78590d9236dbdd008b97d817d051a16b10cb4dac8614199a09bda03e","src/keyword.rs":"aaca18ec0226f454ed5eecf8d99ef01afb8dd2457c9cae4f91b262accc0c0c41","src/lib.rs":"d36df1838b3454cd63a4ca214044c2be9281a33b533581f5da622bc66e2446d2","src/lifetime.rs":"7912a4c77ee805e912fb28c7f434836ea82540263d549877cd5edfbe32d1bf95","src/lit.rs":"bc034e4443a32f2066b41f62e05946143f60397fbbd05016c0020c4818e0a0f8","src/lookahead.rs":"5b3c55ae8c1b1d0ed813c296dc6fa586379a99e7792a3cb0d634ae6ca74f54b5","src/mac.rs":"a91623ed9c1de7b18ef752db79a242002e95156497a52a1790a75069915d22ee","src/macros.rs":"2f91e07a1aec4b385986c0a0f66274e8de1c1aa81f95d398a5cd364b3c451bb4","src/op.rs":"01edb1e07b6d60b266797ca4b30788b0a511452228e04073a11f0b61f106a0e7","src/parse.rs":"d907b9822943bafbcb1e005f09a145e46c162e7702fce703b57f9b7ccbdf85a2","src/parse_macro_input.rs":"8df7b4c1b361171f3fefb0490dec570ad29c024c04e35184b296725f97f2002c","src/parse_quote.rs":"d5e613fbba06900d882f2aaa042f10c1bee1b1dffaa1d9ee9a73d1e504a08fad","src/path.rs":"18b5c17b5acb7814a63517bbc473e1120895b4f7ff8f856279f31493663cc7fa","src/print.rs":"7ebb68123898f2ebbae12abf028747c05bea7b08f1e96b17164f1dcebdab7355","src/punctuated.rs":"3abae461aa47b71f00ede80e42691da5617a21c6f4864c40b2f984bb34a9a277","src/span.rs":"748c51c6feb223c26d3b1701f5bb98aee823666c775c98106cfa24fe29d8cec1","src/spanned.rs":"83b4ab1e2138ac9340eaa8234ad1d9f7468b450ddf3a852e574cac18e4f766b8","src/thread.rs":"ac3f4aa972b0dee5b9ae5202c5cd6bef46823fc91ff83787a3fe1bdfb8f79135","src/token.rs":"20868cd459ac8eea83e8891cf0b5d7c9dc179b0ff76953d5d0a2a29f17c0c7af","src/tt.rs":"b3d99cbd68cd50749f26f4afa138e6366d327099ed566b30c315ccb58fa26ded","src/ty.rs":"4ac9d1b84f9bf269516348e1b923b1c8e3f7562b98ec7ef66174c31fffb8dce5"},"package":"66c8865bf5a7cbb662d8b011950060b3c8743dca141b054bf7195b20d314d8e2"} \ No newline at end of file diff --git a/third_party/rust/syn-0.15.30/Cargo.toml b/third_party/rust/syn-0.15.30/Cargo.toml deleted file mode 100644 index 3e73e38fe3..0000000000 --- a/third_party/rust/syn-0.15.30/Cargo.toml +++ /dev/null @@ -1,71 +0,0 @@ -# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO -# -# When uploading crates to the registry Cargo will automatically -# "normalize" Cargo.toml files for maximal compatibility -# with all versions of Cargo and also rewrite `path` dependencies -# to registry (e.g., crates.io) dependencies -# -# If you believe there's an error in this file please file an -# issue against the rust-lang/cargo repository. If you're -# editing this file be aware that the upstream Cargo.toml -# will likely look very different (and much more reasonable) - -[package] -name = "syn" -version = "0.15.30" -authors = ["David Tolnay "] -include = ["/build.rs", "/Cargo.toml", "/LICENSE-APACHE", "/LICENSE-MIT", "/README.md", "/src/**/*.rs"] -description = "Parser for Rust source code" -documentation = "https://docs.rs/syn" -readme = "README.md" -categories = ["development-tools::procedural-macro-helpers"] -license = "MIT/Apache-2.0" -repository = "https://github.com/dtolnay/syn" -[package.metadata.docs.rs] -all-features = true - -[package.metadata.playground] -all-features = true - -[lib] -name = "syn" -[dependencies.proc-macro2] -version = "0.4.4" -default-features = false - -[dependencies.quote] -version = "0.6" -optional = true -default-features = false - -[dependencies.unicode-xid] -version = "0.1" -[dev-dependencies.colored] -version = "1.7" - -[dev-dependencies.insta] -version = "0.7" - -[dev-dependencies.rayon] -version = "1.0" - -[dev-dependencies.regex] -version = "1.0" - -[dev-dependencies.walkdir] -version = "2.1" - -[features] -clone-impls = [] -default = ["derive", "parsing", "printing", "clone-impls", "proc-macro"] -derive = [] -extra-traits = [] -fold = [] -full = [] -parsing = [] -printing = ["quote"] -proc-macro = ["proc-macro2/proc-macro", "quote/proc-macro"] -visit = [] -visit-mut = [] -[badges.travis-ci] -repository = "dtolnay/syn" diff --git a/third_party/rust/syn-0.15.30/LICENSE-APACHE b/third_party/rust/syn-0.15.30/LICENSE-APACHE deleted file mode 100644 index 16fe87b06e..0000000000 --- a/third_party/rust/syn-0.15.30/LICENSE-APACHE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - -TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - -1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - -2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - -3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - -4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - -5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - -6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - -7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - -8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - -9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - -END OF TERMS AND CONDITIONS - -APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - -Copyright [yyyy] [name of copyright owner] - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. diff --git a/third_party/rust/syn-0.15.30/LICENSE-MIT b/third_party/rust/syn-0.15.30/LICENSE-MIT deleted file mode 100644 index 31aa79387f..0000000000 --- a/third_party/rust/syn-0.15.30/LICENSE-MIT +++ /dev/null @@ -1,23 +0,0 @@ -Permission is hereby granted, free of charge, to any -person obtaining a copy of this software and associated -documentation files (the "Software"), to deal in the -Software without restriction, including without -limitation the rights to use, copy, modify, merge, -publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software -is furnished to do so, subject to the following -conditions: - -The above copyright notice and this permission notice -shall be included in all copies or substantial portions -of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -DEALINGS IN THE SOFTWARE. diff --git a/third_party/rust/syn-0.15.30/README.md b/third_party/rust/syn-0.15.30/README.md deleted file mode 100644 index ecc6eb8782..0000000000 --- a/third_party/rust/syn-0.15.30/README.md +++ /dev/null @@ -1,256 +0,0 @@ -Parser for Rust source code -=========================== - -[![Build Status](https://api.travis-ci.org/dtolnay/syn.svg?branch=master)](https://travis-ci.org/dtolnay/syn) -[![Latest Version](https://img.shields.io/crates/v/syn.svg)](https://crates.io/crates/syn) -[![Rust Documentation](https://img.shields.io/badge/api-rustdoc-blue.svg)](https://docs.rs/syn/0.15/syn/) -[![Rustc Version 1.15+](https://img.shields.io/badge/rustc-1.15+-lightgray.svg)](https://blog.rust-lang.org/2017/02/02/Rust-1.15.html) - -Syn is a parsing library for parsing a stream of Rust tokens into a syntax tree -of Rust source code. - -Currently this library is geared toward use in Rust procedural macros, but -contains some APIs that may be useful more generally. - -[custom derive]: https://github.com/rust-lang/rfcs/blob/master/text/1681-macros-1.1.md - -- **Data structures** — Syn provides a complete syntax tree that can represent - any valid Rust source code. The syntax tree is rooted at [`syn::File`] which - represents a full source file, but there are other entry points that may be - useful to procedural macros including [`syn::Item`], [`syn::Expr`] and - [`syn::Type`]. - -- **Custom derives** — Of particular interest to custom derives is - [`syn::DeriveInput`] which is any of the three legal input items to a derive - macro. An example below shows using this type in a library that can derive - implementations of a trait of your own. - -- **Parsing** — Parsing in Syn is built around [parser functions] with the - signature `fn(ParseStream) -> Result`. Every syntax tree node defined by - Syn is individually parsable and may be used as a building block for custom - syntaxes, or you may dream up your own brand new syntax without involving any - of our syntax tree types. - -- **Location information** — Every token parsed by Syn is associated with a - `Span` that tracks line and column information back to the source of that - token. These spans allow a procedural macro to display detailed error messages - pointing to all the right places in the user's code. There is an example of - this below. - -- **Feature flags** — Functionality is aggressively feature gated so your - procedural macros enable only what they need, and do not pay in compile time - for all the rest. - -[`syn::File`]: https://docs.rs/syn/0.15/syn/struct.File.html -[`syn::Item`]: https://docs.rs/syn/0.15/syn/enum.Item.html -[`syn::Expr`]: https://docs.rs/syn/0.15/syn/enum.Expr.html -[`syn::Type`]: https://docs.rs/syn/0.15/syn/enum.Type.html -[`syn::DeriveInput`]: https://docs.rs/syn/0.15/syn/struct.DeriveInput.html -[parser functions]: https://docs.rs/syn/0.15/syn/parse/index.html - -If you get stuck with anything involving procedural macros in Rust I am happy to -provide help even if the issue is not related to Syn. Please file a ticket in -this repo. - -*Version requirement: Syn supports any compiler version back to Rust's very -first support for procedural macros in Rust 1.15.0. Some features especially -around error reporting are only available in newer compilers or on the nightly -channel.* - -[*Release notes*](https://github.com/dtolnay/syn/releases) - -## Example of a custom derive - -The canonical custom derive using Syn looks like this. We write an ordinary Rust -function tagged with a `proc_macro_derive` attribute and the name of the trait -we are deriving. Any time that derive appears in the user's code, the Rust -compiler passes their data structure as tokens into our macro. We get to execute -arbitrary Rust code to figure out what to do with those tokens, then hand some -tokens back to the compiler to compile into the user's crate. - -[`TokenStream`]: https://doc.rust-lang.org/proc_macro/struct.TokenStream.html - -```toml -[dependencies] -syn = "0.15" -quote = "0.6" - -[lib] -proc-macro = true -``` - -```rust -extern crate proc_macro; - -use proc_macro::TokenStream; -use quote::quote; -use syn::{parse_macro_input, DeriveInput}; - -#[proc_macro_derive(MyMacro)] -pub fn my_macro(input: TokenStream) -> TokenStream { - // Parse the input tokens into a syntax tree - let input = parse_macro_input!(input as DeriveInput); - - // Build the output, possibly using quasi-quotation - let expanded = quote! { - // ... - }; - - // Hand the output tokens back to the compiler - TokenStream::from(expanded) -} -``` - -The [`heapsize`] example directory shows a complete working Macros 1.1 -implementation of a custom derive. It works on any Rust compiler 1.15+. The -example derives a `HeapSize` trait which computes an estimate of the amount of -heap memory owned by a value. - -[`heapsize`]: examples/heapsize - -```rust -pub trait HeapSize { - /// Total number of bytes of heap memory owned by `self`. - fn heap_size_of_children(&self) -> usize; -} -``` - -The custom derive allows users to write `#[derive(HeapSize)]` on data structures -in their program. - -```rust -#[derive(HeapSize)] -struct Demo<'a, T: ?Sized> { - a: Box, - b: u8, - c: &'a str, - d: String, -} -``` - -## Spans and error reporting - -The token-based procedural macro API provides great control over where the -compiler's error messages are displayed in user code. Consider the error the -user sees if one of their field types does not implement `HeapSize`. - -```rust -#[derive(HeapSize)] -struct Broken { - ok: String, - bad: std::thread::Thread, -} -``` - -By tracking span information all the way through the expansion of a procedural -macro as shown in the `heapsize` example, token-based macros in Syn are able to -trigger errors that directly pinpoint the source of the problem. - -``` -error[E0277]: the trait bound `std::thread::Thread: HeapSize` is not satisfied - --> src/main.rs:7:5 - | -7 | bad: std::thread::Thread, - | ^^^^^^^^^^^^^^^^^^^^^^^^ the trait `HeapSize` is not implemented for `std::thread::Thread` -``` - -## Parsing a custom syntax - -The [`lazy-static`] example directory shows the implementation of a -`functionlike!(...)` procedural macro in which the input tokens are parsed using -Syn's parsing API. - -[`lazy-static`]: examples/lazy-static - -The example reimplements the popular `lazy_static` crate from crates.io as a -procedural macro. - -``` -lazy_static! { - static ref USERNAME: Regex = Regex::new("^[a-z0-9_-]{3,16}$").unwrap(); -} -``` - -The implementation shows how to trigger custom warnings and error messages on -the macro input. - -``` -warning: come on, pick a more creative name - --> src/main.rs:10:16 - | -10 | static ref FOO: String = "lazy_static".to_owned(); - | ^^^ -``` - -## Debugging - -When developing a procedural macro it can be helpful to look at what the -generated code looks like. Use `cargo rustc -- -Zunstable-options ---pretty=expanded` or the [`cargo expand`] subcommand. - -[`cargo expand`]: https://github.com/dtolnay/cargo-expand - -To show the expanded code for some crate that uses your procedural macro, run -`cargo expand` from that crate. To show the expanded code for one of your own -test cases, run `cargo expand --test the_test_case` where the last argument is -the name of the test file without the `.rs` extension. - -This write-up by Brandon W Maister discusses debugging in more detail: -[Debugging Rust's new Custom Derive system][debugging]. - -[debugging]: https://quodlibetor.github.io/posts/debugging-rusts-new-custom-derive-system/ - -## Optional features - -Syn puts a lot of functionality behind optional features in order to optimize -compile time for the most common use cases. The following features are -available. - -- **`derive`** *(enabled by default)* — Data structures for representing the - possible input to a custom derive, including structs and enums and types. -- **`full`** — Data structures for representing the syntax tree of all valid - Rust source code, including items and expressions. -- **`parsing`** *(enabled by default)* — Ability to parse input tokens into a - syntax tree node of a chosen type. -- **`printing`** *(enabled by default)* — Ability to print a syntax tree node as - tokens of Rust source code. -- **`visit`** — Trait for traversing a syntax tree. -- **`visit-mut`** — Trait for traversing and mutating in place a syntax tree. -- **`fold`** — Trait for transforming an owned syntax tree. -- **`clone-impls`** *(enabled by default)* — Clone impls for all syntax tree - types. -- **`extra-traits`** — Debug, Eq, PartialEq, Hash impls for all syntax tree - types. -- **`proc-macro`** *(enabled by default)* — Runtime dependency on the dynamic - library libproc_macro from rustc toolchain. - -## Proc macro shim - -Syn uses the [proc-macro2] crate to emulate the compiler's procedural macro API -in a stable way that works all the way back to Rust 1.15.0. This shim makes it -possible to write code without regard for whether the current compiler version -supports the features we use. - -In general all of your code should be written against proc-macro2 rather than -proc-macro. The one exception is in the signatures of procedural macro entry -points, which are required by the language to use `proc_macro::TokenStream`. - -The proc-macro2 crate will automatically detect and use the compiler's data -structures on sufficiently new compilers. - -[proc-macro2]: https://github.com/alexcrichton/proc-macro2 - -## License - -Licensed under either of - - * Apache License, Version 2.0 ([LICENSE-APACHE](LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0) - * MIT license ([LICENSE-MIT](LICENSE-MIT) or http://opensource.org/licenses/MIT) - -at your option. - -### Contribution - -Unless you explicitly state otherwise, any contribution intentionally submitted -for inclusion in this crate by you, as defined in the Apache-2.0 license, shall -be dual licensed as above, without any additional terms or conditions. diff --git a/third_party/rust/syn-0.15.30/build.rs b/third_party/rust/syn-0.15.30/build.rs deleted file mode 100644 index 644bf75ffe..0000000000 --- a/third_party/rust/syn-0.15.30/build.rs +++ /dev/null @@ -1,68 +0,0 @@ -use std::env; -use std::process::Command; -use std::str::{self, FromStr}; - -// The rustc-cfg strings below are *not* public API. Please let us know by -// opening a GitHub issue if your build environment requires some way to enable -// these cfgs other than by executing our build script. -fn main() { - let compiler = match rustc_version() { - Some(compiler) => compiler, - None => return, - }; - - if compiler.minor >= 19 { - println!("cargo:rustc-cfg=syn_can_use_thread_id"); - } - - // Macro modularization allows re-exporting the `quote!` macro in 1.30+. - if compiler.minor >= 30 { - println!("cargo:rustc-cfg=syn_can_call_macro_by_path"); - } - - if !compiler.nightly { - println!("cargo:rustc-cfg=syn_disable_nightly_tests"); - } -} - -struct Compiler { - minor: u32, - nightly: bool, -} - -fn rustc_version() -> Option { - let rustc = match env::var_os("RUSTC") { - Some(rustc) => rustc, - None => return None, - }; - - let output = match Command::new(rustc).arg("--version").output() { - Ok(output) => output, - Err(_) => return None, - }; - - let version = match str::from_utf8(&output.stdout) { - Ok(version) => version, - Err(_) => return None, - }; - - let mut pieces = version.split('.'); - if pieces.next() != Some("rustc 1") { - return None; - } - - let next = match pieces.next() { - Some(next) => next, - None => return None, - }; - - let minor = match u32::from_str(next) { - Ok(minor) => minor, - Err(_) => return None, - }; - - Some(Compiler { - minor: minor, - nightly: version.contains("nightly"), - }) -} diff --git a/third_party/rust/syn-0.15.30/src/attr.rs b/third_party/rust/syn-0.15.30/src/attr.rs deleted file mode 100644 index d1e7dca7c0..0000000000 --- a/third_party/rust/syn-0.15.30/src/attr.rs +++ /dev/null @@ -1,681 +0,0 @@ -use super::*; -use punctuated::Punctuated; - -use std::iter; - -use proc_macro2::TokenStream; -#[cfg(not(feature = "parsing"))] -use proc_macro2::{Delimiter, Spacing, TokenTree}; - -#[cfg(feature = "parsing")] -use parse::{ParseStream, Result}; -#[cfg(feature = "extra-traits")] -use std::hash::{Hash, Hasher}; -#[cfg(feature = "extra-traits")] -use tt::TokenStreamHelper; - -ast_struct! { - /// An attribute like `#[repr(transparent)]`. - /// - /// *This type is available if Syn is built with the `"derive"` or `"full"` - /// feature.* - /// - /// # Syntax - /// - /// Rust has six types of attributes. - /// - /// - Outer attributes like `#[repr(transparent)]`. These appear outside or - /// in front of the item they describe. - /// - Inner attributes like `#![feature(proc_macro)]`. These appear inside - /// of the item they describe, usually a module. - /// - Outer doc comments like `/// # Example`. - /// - Inner doc comments like `//! Please file an issue`. - /// - Outer block comments `/** # Example */`. - /// - Inner block comments `/*! Please file an issue */`. - /// - /// The `style` field of type `AttrStyle` distinguishes whether an attribute - /// is outer or inner. Doc comments and block comments are promoted to - /// attributes, as this is how they are processed by the compiler and by - /// `macro_rules!` macros. - /// - /// The `path` field gives the possibly colon-delimited path against which - /// the attribute is resolved. It is equal to `"doc"` for desugared doc - /// comments. The `tts` field contains the rest of the attribute body as - /// tokens. - /// - /// ```text - /// #[derive(Copy)] #[crate::precondition x < 5] - /// ^^^^^^~~~~~~ ^^^^^^^^^^^^^^^^^^^ ~~~~~ - /// path tts path tts - /// ``` - /// - /// Use the [`parse_meta`] method to try parsing the tokens of an attribute - /// into the structured representation that is used by convention across - /// most Rust libraries. - /// - /// [`parse_meta`]: #method.parse_meta - /// - /// # Parsing - /// - /// This type does not implement the [`Parse`] trait and thus cannot be - /// parsed directly by [`ParseStream::parse`]. Instead use - /// [`ParseStream::call`] with one of the two parser functions - /// [`Attribute::parse_outer`] or [`Attribute::parse_inner`] depending on - /// which you intend to parse. - /// - /// [`Parse`]: parse/trait.Parse.html - /// [`ParseStream::parse`]: parse/struct.ParseBuffer.html#method.parse - /// [`ParseStream::call`]: parse/struct.ParseBuffer.html#method.call - /// [`Attribute::parse_outer`]: #method.parse_outer - /// [`Attribute::parse_inner`]: #method.parse_inner - /// - /// ```edition2018 - /// use syn::{Attribute, Ident, Result, Token}; - /// use syn::parse::{Parse, ParseStream}; - /// - /// // Parses a unit struct with attributes. - /// // - /// // #[path = "s.tmpl"] - /// // struct S; - /// struct UnitStruct { - /// attrs: Vec, - /// struct_token: Token![struct], - /// name: Ident, - /// semi_token: Token![;], - /// } - /// - /// impl Parse for UnitStruct { - /// fn parse(input: ParseStream) -> Result { - /// Ok(UnitStruct { - /// attrs: input.call(Attribute::parse_outer)?, - /// struct_token: input.parse()?, - /// name: input.parse()?, - /// semi_token: input.parse()?, - /// }) - /// } - /// } - /// ``` - pub struct Attribute #manual_extra_traits { - pub pound_token: Token![#], - pub style: AttrStyle, - pub bracket_token: token::Bracket, - pub path: Path, - pub tts: TokenStream, - } -} - -#[cfg(feature = "extra-traits")] -impl Eq for Attribute {} - -#[cfg(feature = "extra-traits")] -impl PartialEq for Attribute { - fn eq(&self, other: &Self) -> bool { - self.style == other.style - && self.pound_token == other.pound_token - && self.bracket_token == other.bracket_token - && self.path == other.path - && TokenStreamHelper(&self.tts) == TokenStreamHelper(&other.tts) - } -} - -#[cfg(feature = "extra-traits")] -impl Hash for Attribute { - fn hash(&self, state: &mut H) - where - H: Hasher, - { - self.style.hash(state); - self.pound_token.hash(state); - self.bracket_token.hash(state); - self.path.hash(state); - TokenStreamHelper(&self.tts).hash(state); - } -} - -impl Attribute { - /// Parses the tokens after the path as a [`Meta`](enum.Meta.html) if - /// possible. - /// - /// Deprecated; use `parse_meta` instead. - #[doc(hidden)] - pub fn interpret_meta(&self) -> Option { - #[cfg(feature = "parsing")] - { - self.parse_meta().ok() - } - - #[cfg(not(feature = "parsing"))] - { - let name = if self.path.segments.len() == 1 { - &self.path.segments.first().unwrap().value().ident - } else { - return None; - }; - - if self.tts.is_empty() { - return Some(Meta::Word(name.clone())); - } - - let tts = self.tts.clone().into_iter().collect::>(); - - if tts.len() == 1 { - if let Some(meta) = Attribute::extract_meta_list(name.clone(), &tts[0]) { - return Some(meta); - } - } - - if tts.len() == 2 { - if let Some(meta) = Attribute::extract_name_value(name.clone(), &tts[0], &tts[1]) { - return Some(meta); - } - } - - None - } - } - - /// Parses the tokens after the path as a [`Meta`](enum.Meta.html) if - /// possible. - #[cfg(feature = "parsing")] - pub fn parse_meta(&self) -> Result { - if let Some(ref colon) = self.path.leading_colon { - return Err(Error::new(colon.spans[0], "expected meta identifier")); - } - - let first_segment = self - .path - .segments - .first() - .expect("paths have at least one segment"); - if let Some(colon) = first_segment.punct() { - return Err(Error::new(colon.spans[0], "expected meta value")); - } - let ident = first_segment.value().ident.clone(); - - let parser = |input: ParseStream| parsing::parse_meta_after_ident(ident, input); - parse::Parser::parse2(parser, self.tts.clone()) - } - - /// Parses zero or more outer attributes from the stream. - /// - /// *This function is available if Syn is built with the `"parsing"` - /// feature.* - #[cfg(feature = "parsing")] - pub fn parse_outer(input: ParseStream) -> Result> { - let mut attrs = Vec::new(); - while input.peek(Token![#]) { - attrs.push(input.call(parsing::single_parse_outer)?); - } - Ok(attrs) - } - - /// Parses zero or more inner attributes from the stream. - /// - /// *This function is available if Syn is built with the `"parsing"` - /// feature.* - #[cfg(feature = "parsing")] - pub fn parse_inner(input: ParseStream) -> Result> { - let mut attrs = Vec::new(); - while input.peek(Token![#]) && input.peek2(Token![!]) { - attrs.push(input.call(parsing::single_parse_inner)?); - } - Ok(attrs) - } - - #[cfg(not(feature = "parsing"))] - fn extract_meta_list(ident: Ident, tt: &TokenTree) -> Option { - let g = match *tt { - TokenTree::Group(ref g) => g, - _ => return None, - }; - if g.delimiter() != Delimiter::Parenthesis { - return None; - } - let tokens = g.stream().clone().into_iter().collect::>(); - let nested = match list_of_nested_meta_items_from_tokens(&tokens) { - Some(n) => n, - None => return None, - }; - Some(Meta::List(MetaList { - paren_token: token::Paren(g.span()), - ident: ident, - nested: nested, - })) - } - - #[cfg(not(feature = "parsing"))] - fn extract_name_value(ident: Ident, a: &TokenTree, b: &TokenTree) -> Option { - let a = match *a { - TokenTree::Punct(ref o) => o, - _ => return None, - }; - if a.spacing() != Spacing::Alone { - return None; - } - if a.as_char() != '=' { - return None; - } - - match *b { - TokenTree::Literal(ref l) if !l.to_string().starts_with('/') => { - Some(Meta::NameValue(MetaNameValue { - ident: ident, - eq_token: Token![=]([a.span()]), - lit: Lit::new(l.clone()), - })) - } - TokenTree::Ident(ref v) => match &v.to_string()[..] { - v @ "true" | v @ "false" => Some(Meta::NameValue(MetaNameValue { - ident: ident, - eq_token: Token![=]([a.span()]), - lit: Lit::Bool(LitBool { - value: v == "true", - span: b.span(), - }), - })), - _ => None, - }, - _ => None, - } - } -} - -#[cfg(not(feature = "parsing"))] -fn nested_meta_item_from_tokens(tts: &[TokenTree]) -> Option<(NestedMeta, &[TokenTree])> { - assert!(!tts.is_empty()); - - match tts[0] { - TokenTree::Literal(ref lit) => { - if lit.to_string().starts_with('/') { - None - } else { - let lit = Lit::new(lit.clone()); - Some((NestedMeta::Literal(lit), &tts[1..])) - } - } - - TokenTree::Ident(ref ident) => { - if tts.len() >= 3 { - if let Some(meta) = Attribute::extract_name_value(ident.clone(), &tts[1], &tts[2]) { - return Some((NestedMeta::Meta(meta), &tts[3..])); - } - } - - if tts.len() >= 2 { - if let Some(meta) = Attribute::extract_meta_list(ident.clone(), &tts[1]) { - return Some((NestedMeta::Meta(meta), &tts[2..])); - } - } - - let nested_meta = if ident == "true" || ident == "false" { - NestedMeta::Literal(Lit::Bool(LitBool { - value: ident == "true", - span: ident.span(), - })) - } else { - NestedMeta::Meta(Meta::Word(ident.clone())) - }; - Some((nested_meta, &tts[1..])) - } - - _ => None, - } -} - -#[cfg(not(feature = "parsing"))] -fn list_of_nested_meta_items_from_tokens( - mut tts: &[TokenTree], -) -> Option> { - let mut nested_meta_items = Punctuated::new(); - let mut first = true; - - while !tts.is_empty() { - let prev_comma = if first { - first = false; - None - } else if let TokenTree::Punct(ref op) = tts[0] { - if op.spacing() != Spacing::Alone { - return None; - } - if op.as_char() != ',' { - return None; - } - let tok = Token![,]([op.span()]); - tts = &tts[1..]; - if tts.is_empty() { - break; - } - Some(tok) - } else { - return None; - }; - let (nested, rest) = match nested_meta_item_from_tokens(tts) { - Some(pair) => pair, - None => return None, - }; - if let Some(comma) = prev_comma { - nested_meta_items.push_punct(comma); - } - nested_meta_items.push_value(nested); - tts = rest; - } - - Some(nested_meta_items) -} - -ast_enum! { - /// Distinguishes between attributes that decorate an item and attributes - /// that are contained within an item. - /// - /// *This type is available if Syn is built with the `"derive"` or `"full"` - /// feature.* - /// - /// # Outer attributes - /// - /// - `#[repr(transparent)]` - /// - `/// # Example` - /// - `/** Please file an issue */` - /// - /// # Inner attributes - /// - /// - `#![feature(proc_macro)]` - /// - `//! # Example` - /// - `/*! Please file an issue */` - #[cfg_attr(feature = "clone-impls", derive(Copy))] - pub enum AttrStyle { - Outer, - Inner(Token![!]), - } -} - -ast_enum_of_structs! { - /// Content of a compile-time structured attribute. - /// - /// *This type is available if Syn is built with the `"derive"` or `"full"` - /// feature.* - /// - /// ## Word - /// - /// A meta word is like the `test` in `#[test]`. - /// - /// ## List - /// - /// A meta list is like the `derive(Copy)` in `#[derive(Copy)]`. - /// - /// ## NameValue - /// - /// A name-value meta is like the `path = "..."` in `#[path = - /// "sys/windows.rs"]`. - /// - /// # Syntax tree enum - /// - /// This type is a [syntax tree enum]. - /// - /// [syntax tree enum]: enum.Expr.html#syntax-tree-enums - pub enum Meta { - pub Word(Ident), - /// A structured list within an attribute, like `derive(Copy, Clone)`. - /// - /// *This type is available if Syn is built with the `"derive"` or - /// `"full"` feature.* - pub List(MetaList { - pub ident: Ident, - pub paren_token: token::Paren, - pub nested: Punctuated, - }), - /// A name-value pair within an attribute, like `feature = "nightly"`. - /// - /// *This type is available if Syn is built with the `"derive"` or - /// `"full"` feature.* - pub NameValue(MetaNameValue { - pub ident: Ident, - pub eq_token: Token![=], - pub lit: Lit, - }), - } -} - -impl Meta { - /// Returns the identifier that begins this structured meta item. - /// - /// For example this would return the `test` in `#[test]`, the `derive` in - /// `#[derive(Copy)]`, and the `path` in `#[path = "sys/windows.rs"]`. - pub fn name(&self) -> Ident { - match *self { - Meta::Word(ref meta) => meta.clone(), - Meta::List(ref meta) => meta.ident.clone(), - Meta::NameValue(ref meta) => meta.ident.clone(), - } - } -} - -ast_enum_of_structs! { - /// Element of a compile-time attribute list. - /// - /// *This type is available if Syn is built with the `"derive"` or `"full"` - /// feature.* - pub enum NestedMeta { - /// A structured meta item, like the `Copy` in `#[derive(Copy)]` which - /// would be a nested `Meta::Word`. - pub Meta(Meta), - - /// A Rust literal, like the `"new_name"` in `#[rename("new_name")]`. - pub Literal(Lit), - } -} - -/// Conventional argument type associated with an invocation of an attribute -/// macro. -/// -/// For example if we are developing an attribute macro that is intended to be -/// invoked on function items as follows: -/// -/// ```edition2018 -/// # const IGNORE: &str = stringify! { -/// #[my_attribute(path = "/v1/refresh")] -/// # }; -/// pub fn refresh() { -/// /* ... */ -/// } -/// ``` -/// -/// The implementation of this macro would want to parse its attribute arguments -/// as type `AttributeArgs`. -/// -/// ```edition2018 -/// extern crate proc_macro; -/// -/// use proc_macro::TokenStream; -/// use syn::{parse_macro_input, AttributeArgs, ItemFn}; -/// -/// # const IGNORE: &str = stringify! { -/// #[proc_macro_attribute] -/// # }; -/// pub fn my_attribute(args: TokenStream, input: TokenStream) -> TokenStream { -/// let args = parse_macro_input!(args as AttributeArgs); -/// let input = parse_macro_input!(input as ItemFn); -/// -/// /* ... */ -/// # "".parse().unwrap() -/// } -/// ``` -pub type AttributeArgs = Vec; - -pub trait FilterAttrs<'a> { - type Ret: Iterator; - - fn outer(self) -> Self::Ret; - fn inner(self) -> Self::Ret; -} - -impl<'a, T> FilterAttrs<'a> for T -where - T: IntoIterator, -{ - type Ret = iter::Filter bool>; - - fn outer(self) -> Self::Ret { - #[cfg_attr(feature = "cargo-clippy", allow(trivially_copy_pass_by_ref))] - fn is_outer(attr: &&Attribute) -> bool { - match attr.style { - AttrStyle::Outer => true, - _ => false, - } - } - self.into_iter().filter(is_outer) - } - - fn inner(self) -> Self::Ret { - #[cfg_attr(feature = "cargo-clippy", allow(trivially_copy_pass_by_ref))] - fn is_inner(attr: &&Attribute) -> bool { - match attr.style { - AttrStyle::Inner(_) => true, - _ => false, - } - } - self.into_iter().filter(is_inner) - } -} - -#[cfg(feature = "parsing")] -pub mod parsing { - use super::*; - - use ext::IdentExt; - use parse::{Parse, ParseStream, Result}; - #[cfg(feature = "full")] - use private; - - pub fn single_parse_inner(input: ParseStream) -> Result { - let content; - Ok(Attribute { - pound_token: input.parse()?, - style: AttrStyle::Inner(input.parse()?), - bracket_token: bracketed!(content in input), - path: content.call(Path::parse_mod_style)?, - tts: content.parse()?, - }) - } - - pub fn single_parse_outer(input: ParseStream) -> Result { - let content; - Ok(Attribute { - pound_token: input.parse()?, - style: AttrStyle::Outer, - bracket_token: bracketed!(content in input), - path: content.call(Path::parse_mod_style)?, - tts: content.parse()?, - }) - } - - #[cfg(feature = "full")] - impl private { - pub fn attrs(outer: Vec, inner: Vec) -> Vec { - let mut attrs = outer; - attrs.extend(inner); - attrs - } - } - - impl Parse for Meta { - fn parse(input: ParseStream) -> Result { - let ident = input.call(Ident::parse_any)?; - parse_meta_after_ident(ident, input) - } - } - - impl Parse for MetaList { - fn parse(input: ParseStream) -> Result { - let ident = input.call(Ident::parse_any)?; - parse_meta_list_after_ident(ident, input) - } - } - - impl Parse for MetaNameValue { - fn parse(input: ParseStream) -> Result { - let ident = input.call(Ident::parse_any)?; - parse_meta_name_value_after_ident(ident, input) - } - } - - impl Parse for NestedMeta { - fn parse(input: ParseStream) -> Result { - let ahead = input.fork(); - - if ahead.peek(Lit) && !(ahead.peek(LitBool) && ahead.peek2(Token![=])) { - input.parse().map(NestedMeta::Literal) - } else if ahead.call(Ident::parse_any).is_ok() { - input.parse().map(NestedMeta::Meta) - } else { - Err(input.error("expected identifier or literal")) - } - } - } - - pub fn parse_meta_after_ident(ident: Ident, input: ParseStream) -> Result { - if input.peek(token::Paren) { - parse_meta_list_after_ident(ident, input).map(Meta::List) - } else if input.peek(Token![=]) { - parse_meta_name_value_after_ident(ident, input).map(Meta::NameValue) - } else { - Ok(Meta::Word(ident)) - } - } - - fn parse_meta_list_after_ident(ident: Ident, input: ParseStream) -> Result { - let content; - Ok(MetaList { - ident: ident, - paren_token: parenthesized!(content in input), - nested: content.parse_terminated(NestedMeta::parse)?, - }) - } - - fn parse_meta_name_value_after_ident( - ident: Ident, - input: ParseStream, - ) -> Result { - Ok(MetaNameValue { - ident: ident, - eq_token: input.parse()?, - lit: input.parse()?, - }) - } -} - -#[cfg(feature = "printing")] -mod printing { - use super::*; - use proc_macro2::TokenStream; - use quote::ToTokens; - - impl ToTokens for Attribute { - fn to_tokens(&self, tokens: &mut TokenStream) { - self.pound_token.to_tokens(tokens); - if let AttrStyle::Inner(ref b) = self.style { - b.to_tokens(tokens); - } - self.bracket_token.surround(tokens, |tokens| { - self.path.to_tokens(tokens); - self.tts.to_tokens(tokens); - }); - } - } - - impl ToTokens for MetaList { - fn to_tokens(&self, tokens: &mut TokenStream) { - self.ident.to_tokens(tokens); - self.paren_token.surround(tokens, |tokens| { - self.nested.to_tokens(tokens); - }) - } - } - - impl ToTokens for MetaNameValue { - fn to_tokens(&self, tokens: &mut TokenStream) { - self.ident.to_tokens(tokens); - self.eq_token.to_tokens(tokens); - self.lit.to_tokens(tokens); - } - } -} diff --git a/third_party/rust/syn-0.15.30/src/buffer.rs b/third_party/rust/syn-0.15.30/src/buffer.rs deleted file mode 100644 index 8c326451eb..0000000000 --- a/third_party/rust/syn-0.15.30/src/buffer.rs +++ /dev/null @@ -1,366 +0,0 @@ -//! A stably addressed token buffer supporting efficient traversal based on a -//! cheaply copyable cursor. -//! -//! *This module is available if Syn is built with the `"parsing"` feature.* - -// This module is heavily commented as it contains most of the unsafe code in -// Syn, and caution should be used when editing it. The public-facing interface -// is 100% safe but the implementation is fragile internally. - -#[cfg(all( - not(all(target_arch = "wasm32", target_os = "unknown")), - feature = "proc-macro" -))] -use proc_macro as pm; -use proc_macro2::{Delimiter, Group, Ident, Literal, Punct, Spacing, Span, TokenStream, TokenTree}; - -use std::marker::PhantomData; -use std::ptr; - -use private; -use Lifetime; - -/// Internal type which is used instead of `TokenTree` to represent a token tree -/// within a `TokenBuffer`. -enum Entry { - // Mimicking types from proc-macro. - Group(Group, TokenBuffer), - Ident(Ident), - Punct(Punct), - Literal(Literal), - // End entries contain a raw pointer to the entry from the containing - // token tree, or null if this is the outermost level. - End(*const Entry), -} - -/// A buffer that can be efficiently traversed multiple times, unlike -/// `TokenStream` which requires a deep copy in order to traverse more than -/// once. -/// -/// *This type is available if Syn is built with the `"parsing"` feature.* -pub struct TokenBuffer { - // NOTE: Do not derive clone on this - there are raw pointers inside which - // will be messed up. Moving the `TokenBuffer` itself is safe as the actual - // backing slices won't be moved. - data: Box<[Entry]>, -} - -impl TokenBuffer { - // NOTE: DO NOT MUTATE THE `Vec` RETURNED FROM THIS FUNCTION ONCE IT - // RETURNS, THE ADDRESS OF ITS BACKING MEMORY MUST REMAIN STABLE. - fn inner_new(stream: TokenStream, up: *const Entry) -> TokenBuffer { - // Build up the entries list, recording the locations of any Groups - // in the list to be processed later. - let mut entries = Vec::new(); - let mut seqs = Vec::new(); - for tt in stream { - match tt { - TokenTree::Ident(sym) => { - entries.push(Entry::Ident(sym)); - } - TokenTree::Punct(op) => { - entries.push(Entry::Punct(op)); - } - TokenTree::Literal(l) => { - entries.push(Entry::Literal(l)); - } - TokenTree::Group(g) => { - // Record the index of the interesting entry, and store an - // `End(null)` there temporarially. - seqs.push((entries.len(), g)); - entries.push(Entry::End(ptr::null())); - } - } - } - // Add an `End` entry to the end with a reference to the enclosing token - // stream which was passed in. - entries.push(Entry::End(up)); - - // NOTE: This is done to ensure that we don't accidentally modify the - // length of the backing buffer. The backing buffer must remain at a - // constant address after this point, as we are going to store a raw - // pointer into it. - let mut entries = entries.into_boxed_slice(); - for (idx, group) in seqs { - // We know that this index refers to one of the temporary - // `End(null)` entries, and we know that the last entry is - // `End(up)`, so the next index is also valid. - let seq_up = &entries[idx + 1] as *const Entry; - - // The end entry stored at the end of this Entry::Group should - // point to the Entry which follows the Group in the list. - let inner = Self::inner_new(group.stream(), seq_up); - entries[idx] = Entry::Group(group, inner); - } - - TokenBuffer { data: entries } - } - - /// Creates a `TokenBuffer` containing all the tokens from the input - /// `TokenStream`. - /// - /// *This method is available if Syn is built with both the `"parsing"` and - /// `"proc-macro"` features.* - #[cfg(all( - not(all(target_arch = "wasm32", target_os = "unknown")), - feature = "proc-macro" - ))] - pub fn new(stream: pm::TokenStream) -> TokenBuffer { - Self::new2(stream.into()) - } - - /// Creates a `TokenBuffer` containing all the tokens from the input - /// `TokenStream`. - pub fn new2(stream: TokenStream) -> TokenBuffer { - Self::inner_new(stream, ptr::null()) - } - - /// Creates a cursor referencing the first token in the buffer and able to - /// traverse until the end of the buffer. - pub fn begin(&self) -> Cursor { - unsafe { Cursor::create(&self.data[0], &self.data[self.data.len() - 1]) } - } -} - -/// A cheaply copyable cursor into a `TokenBuffer`. -/// -/// This cursor holds a shared reference into the immutable data which is used -/// internally to represent a `TokenStream`, and can be efficiently manipulated -/// and copied around. -/// -/// An empty `Cursor` can be created directly, or one may create a `TokenBuffer` -/// object and get a cursor to its first token with `begin()`. -/// -/// Two cursors are equal if they have the same location in the same input -/// stream, and have the same scope. -/// -/// *This type is available if Syn is built with the `"parsing"` feature.* -#[derive(Copy, Clone, Eq, PartialEq)] -pub struct Cursor<'a> { - // The current entry which the `Cursor` is pointing at. - ptr: *const Entry, - // This is the only `Entry::End(..)` object which this cursor is allowed to - // point at. All other `End` objects are skipped over in `Cursor::create`. - scope: *const Entry, - // Cursor is covariant in 'a. This field ensures that our pointers are still - // valid. - marker: PhantomData<&'a Entry>, -} - -impl<'a> Cursor<'a> { - /// Creates a cursor referencing a static empty TokenStream. - pub fn empty() -> Self { - // It's safe in this situation for us to put an `Entry` object in global - // storage, despite it not actually being safe to send across threads - // (`Ident` is a reference into a thread-local table). This is because - // this entry never includes a `Ident` object. - // - // This wrapper struct allows us to break the rules and put a `Sync` - // object in global storage. - struct UnsafeSyncEntry(Entry); - unsafe impl Sync for UnsafeSyncEntry {} - static EMPTY_ENTRY: UnsafeSyncEntry = UnsafeSyncEntry(Entry::End(0 as *const Entry)); - - Cursor { - ptr: &EMPTY_ENTRY.0, - scope: &EMPTY_ENTRY.0, - marker: PhantomData, - } - } - - /// This create method intelligently exits non-explicitly-entered - /// `None`-delimited scopes when the cursor reaches the end of them, - /// allowing for them to be treated transparently. - unsafe fn create(mut ptr: *const Entry, scope: *const Entry) -> Self { - // NOTE: If we're looking at a `End(..)`, we want to advance the cursor - // past it, unless `ptr == scope`, which means that we're at the edge of - // our cursor's scope. We should only have `ptr != scope` at the exit - // from None-delimited groups entered with `ignore_none`. - while let Entry::End(exit) = *ptr { - if ptr == scope { - break; - } - ptr = exit; - } - - Cursor { - ptr: ptr, - scope: scope, - marker: PhantomData, - } - } - - /// Get the current entry. - fn entry(self) -> &'a Entry { - unsafe { &*self.ptr } - } - - /// Bump the cursor to point at the next token after the current one. This - /// is undefined behavior if the cursor is currently looking at an - /// `Entry::End`. - unsafe fn bump(self) -> Cursor<'a> { - Cursor::create(self.ptr.offset(1), self.scope) - } - - /// If the cursor is looking at a `None`-delimited group, move it to look at - /// the first token inside instead. If the group is empty, this will move - /// the cursor past the `None`-delimited group. - /// - /// WARNING: This mutates its argument. - fn ignore_none(&mut self) { - if let Entry::Group(ref group, ref buf) = *self.entry() { - if group.delimiter() == Delimiter::None { - // NOTE: We call `Cursor::create` here to make sure that - // situations where we should immediately exit the span after - // entering it are handled correctly. - unsafe { - *self = Cursor::create(&buf.data[0], self.scope); - } - } - } - } - - /// Checks whether the cursor is currently pointing at the end of its valid - /// scope. - #[inline] - pub fn eof(self) -> bool { - // We're at eof if we're at the end of our scope. - self.ptr == self.scope - } - - /// If the cursor is pointing at a `Group` with the given delimiter, returns - /// a cursor into that group and one pointing to the next `TokenTree`. - pub fn group(mut self, delim: Delimiter) -> Option<(Cursor<'a>, Span, Cursor<'a>)> { - // If we're not trying to enter a none-delimited group, we want to - // ignore them. We have to make sure to _not_ ignore them when we want - // to enter them, of course. For obvious reasons. - if delim != Delimiter::None { - self.ignore_none(); - } - - if let Entry::Group(ref group, ref buf) = *self.entry() { - if group.delimiter() == delim { - return Some((buf.begin(), group.span(), unsafe { self.bump() })); - } - } - - None - } - - /// If the cursor is pointing at a `Ident`, returns it along with a cursor - /// pointing at the next `TokenTree`. - pub fn ident(mut self) -> Option<(Ident, Cursor<'a>)> { - self.ignore_none(); - match *self.entry() { - Entry::Ident(ref ident) => Some((ident.clone(), unsafe { self.bump() })), - _ => None, - } - } - - /// If the cursor is pointing at an `Punct`, returns it along with a cursor - /// pointing at the next `TokenTree`. - pub fn punct(mut self) -> Option<(Punct, Cursor<'a>)> { - self.ignore_none(); - match *self.entry() { - Entry::Punct(ref op) if op.as_char() != '\'' => { - Some((op.clone(), unsafe { self.bump() })) - } - _ => None, - } - } - - /// If the cursor is pointing at a `Literal`, return it along with a cursor - /// pointing at the next `TokenTree`. - pub fn literal(mut self) -> Option<(Literal, Cursor<'a>)> { - self.ignore_none(); - match *self.entry() { - Entry::Literal(ref lit) => Some((lit.clone(), unsafe { self.bump() })), - _ => None, - } - } - - /// If the cursor is pointing at a `Lifetime`, returns it along with a - /// cursor pointing at the next `TokenTree`. - pub fn lifetime(mut self) -> Option<(Lifetime, Cursor<'a>)> { - self.ignore_none(); - match *self.entry() { - Entry::Punct(ref op) if op.as_char() == '\'' && op.spacing() == Spacing::Joint => { - let next = unsafe { self.bump() }; - match next.ident() { - Some((ident, rest)) => { - let lifetime = Lifetime { - apostrophe: op.span(), - ident: ident, - }; - Some((lifetime, rest)) - } - None => None, - } - } - _ => None, - } - } - - /// Copies all remaining tokens visible from this cursor into a - /// `TokenStream`. - pub fn token_stream(self) -> TokenStream { - let mut tts = Vec::new(); - let mut cursor = self; - while let Some((tt, rest)) = cursor.token_tree() { - tts.push(tt); - cursor = rest; - } - tts.into_iter().collect() - } - - /// If the cursor is pointing at a `TokenTree`, returns it along with a - /// cursor pointing at the next `TokenTree`. - /// - /// Returns `None` if the cursor has reached the end of its stream. - /// - /// This method does not treat `None`-delimited groups as transparent, and - /// will return a `Group(None, ..)` if the cursor is looking at one. - pub fn token_tree(self) -> Option<(TokenTree, Cursor<'a>)> { - let tree = match *self.entry() { - Entry::Group(ref group, _) => group.clone().into(), - Entry::Literal(ref lit) => lit.clone().into(), - Entry::Ident(ref ident) => ident.clone().into(), - Entry::Punct(ref op) => op.clone().into(), - Entry::End(..) => { - return None; - } - }; - - Some((tree, unsafe { self.bump() })) - } - - /// Returns the `Span` of the current token, or `Span::call_site()` if this - /// cursor points to eof. - pub fn span(self) -> Span { - match *self.entry() { - Entry::Group(ref group, _) => group.span(), - Entry::Literal(ref l) => l.span(), - Entry::Ident(ref t) => t.span(), - Entry::Punct(ref o) => o.span(), - Entry::End(..) => Span::call_site(), - } - } -} - -impl private { - #[cfg(procmacro2_semver_exempt)] - pub fn open_span_of_group(cursor: Cursor) -> Span { - match *cursor.entry() { - Entry::Group(ref group, _) => group.span_open(), - _ => cursor.span(), - } - } - - #[cfg(procmacro2_semver_exempt)] - pub fn close_span_of_group(cursor: Cursor) -> Span { - match *cursor.entry() { - Entry::Group(ref group, _) => group.span_close(), - _ => cursor.span(), - } - } -} diff --git a/third_party/rust/syn-0.15.30/src/data.rs b/third_party/rust/syn-0.15.30/src/data.rs deleted file mode 100644 index f54f5b9afe..0000000000 --- a/third_party/rust/syn-0.15.30/src/data.rs +++ /dev/null @@ -1,384 +0,0 @@ -use super::*; -use punctuated::Punctuated; - -ast_struct! { - /// An enum variant. - /// - /// *This type is available if Syn is built with the `"derive"` or `"full"` - /// feature.* - pub struct Variant { - /// Attributes tagged on the variant. - pub attrs: Vec, - - /// Name of the variant. - pub ident: Ident, - - /// Content stored in the variant. - pub fields: Fields, - - /// Explicit discriminant: `Variant = 1` - pub discriminant: Option<(Token![=], Expr)>, - } -} - -ast_enum_of_structs! { - /// Data stored within an enum variant or struct. - /// - /// *This type is available if Syn is built with the `"derive"` or `"full"` - /// feature.* - /// - /// # Syntax tree enum - /// - /// This type is a [syntax tree enum]. - /// - /// [syntax tree enum]: enum.Expr.html#syntax-tree-enums - pub enum Fields { - /// Named fields of a struct or struct variant such as `Point { x: f64, - /// y: f64 }`. - /// - /// *This type is available if Syn is built with the `"derive"` or - /// `"full"` feature.* - pub Named(FieldsNamed { - pub brace_token: token::Brace, - pub named: Punctuated, - }), - - /// Unnamed fields of a tuple struct or tuple variant such as `Some(T)`. - /// - /// *This type is available if Syn is built with the `"derive"` or - /// `"full"` feature.* - pub Unnamed(FieldsUnnamed { - pub paren_token: token::Paren, - pub unnamed: Punctuated, - }), - - /// Unit struct or unit variant such as `None`. - pub Unit, - } -} - -impl Fields { - /// Get an iterator over the borrowed [`Field`] items in this object. This - /// iterator can be used to iterate over a named or unnamed struct or - /// variant's fields uniformly. - /// - /// [`Field`]: struct.Field.html - pub fn iter(&self) -> punctuated::Iter { - match *self { - Fields::Unit => private::empty_punctuated_iter(), - Fields::Named(ref f) => f.named.iter(), - Fields::Unnamed(ref f) => f.unnamed.iter(), - } - } - - /// Get an iterator over the mutably borrowed [`Field`] items in this - /// object. This iterator can be used to iterate over a named or unnamed - /// struct or variant's fields uniformly. - /// - /// [`Field`]: struct.Field.html - pub fn iter_mut(&mut self) -> punctuated::IterMut { - match *self { - Fields::Unit => private::empty_punctuated_iter_mut(), - Fields::Named(ref mut f) => f.named.iter_mut(), - Fields::Unnamed(ref mut f) => f.unnamed.iter_mut(), - } - } -} - -impl<'a> IntoIterator for &'a Fields { - type Item = &'a Field; - type IntoIter = punctuated::Iter<'a, Field>; - - fn into_iter(self) -> Self::IntoIter { - self.iter() - } -} - -impl<'a> IntoIterator for &'a mut Fields { - type Item = &'a mut Field; - type IntoIter = punctuated::IterMut<'a, Field>; - - fn into_iter(self) -> Self::IntoIter { - self.iter_mut() - } -} - -ast_struct! { - /// A field of a struct or enum variant. - /// - /// *This type is available if Syn is built with the `"derive"` or `"full"` - /// feature.* - pub struct Field { - /// Attributes tagged on the field. - pub attrs: Vec, - - /// Visibility of the field. - pub vis: Visibility, - - /// Name of the field, if any. - /// - /// Fields of tuple structs have no names. - pub ident: Option, - - pub colon_token: Option, - - /// Type of the field. - pub ty: Type, - } -} - -ast_enum_of_structs! { - /// The visibility level of an item: inherited or `pub` or - /// `pub(restricted)`. - /// - /// *This type is available if Syn is built with the `"derive"` or `"full"` - /// feature.* - /// - /// # Syntax tree enum - /// - /// This type is a [syntax tree enum]. - /// - /// [syntax tree enum]: enum.Expr.html#syntax-tree-enums - pub enum Visibility { - /// A public visibility level: `pub`. - /// - /// *This type is available if Syn is built with the `"derive"` or - /// `"full"` feature.* - pub Public(VisPublic { - pub pub_token: Token![pub], - }), - - /// A crate-level visibility: `crate`. - /// - /// *This type is available if Syn is built with the `"derive"` or - /// `"full"` feature.* - pub Crate(VisCrate { - pub crate_token: Token![crate], - }), - - /// A visibility level restricted to some path: `pub(self)` or - /// `pub(super)` or `pub(crate)` or `pub(in some::module)`. - /// - /// *This type is available if Syn is built with the `"derive"` or - /// `"full"` feature.* - pub Restricted(VisRestricted { - pub pub_token: Token![pub], - pub paren_token: token::Paren, - pub in_token: Option, - pub path: Box, - }), - - /// An inherited visibility, which usually means private. - pub Inherited, - } -} - -#[cfg(feature = "parsing")] -pub mod parsing { - use super::*; - - use ext::IdentExt; - use parse::{Parse, ParseStream, Result}; - - impl Parse for Variant { - fn parse(input: ParseStream) -> Result { - Ok(Variant { - attrs: input.call(Attribute::parse_outer)?, - ident: input.parse()?, - fields: { - if input.peek(token::Brace) { - Fields::Named(input.parse()?) - } else if input.peek(token::Paren) { - Fields::Unnamed(input.parse()?) - } else { - Fields::Unit - } - }, - discriminant: { - if input.peek(Token![=]) { - let eq_token: Token![=] = input.parse()?; - let discriminant: Expr = input.parse()?; - Some((eq_token, discriminant)) - } else { - None - } - }, - }) - } - } - - impl Parse for FieldsNamed { - fn parse(input: ParseStream) -> Result { - let content; - Ok(FieldsNamed { - brace_token: braced!(content in input), - named: content.parse_terminated(Field::parse_named)?, - }) - } - } - - impl Parse for FieldsUnnamed { - fn parse(input: ParseStream) -> Result { - let content; - Ok(FieldsUnnamed { - paren_token: parenthesized!(content in input), - unnamed: content.parse_terminated(Field::parse_unnamed)?, - }) - } - } - - impl Field { - /// Parses a named (braced struct) field. - pub fn parse_named(input: ParseStream) -> Result { - Ok(Field { - attrs: input.call(Attribute::parse_outer)?, - vis: input.parse()?, - ident: Some(input.parse()?), - colon_token: Some(input.parse()?), - ty: input.parse()?, - }) - } - - /// Parses an unnamed (tuple struct) field. - pub fn parse_unnamed(input: ParseStream) -> Result { - Ok(Field { - attrs: input.call(Attribute::parse_outer)?, - vis: input.parse()?, - ident: None, - colon_token: None, - ty: input.parse()?, - }) - } - } - - impl Parse for Visibility { - fn parse(input: ParseStream) -> Result { - if input.peek(Token![pub]) { - Self::parse_pub(input) - } else if input.peek(Token![crate]) { - Self::parse_crate(input) - } else { - Ok(Visibility::Inherited) - } - } - } - - impl Visibility { - fn parse_pub(input: ParseStream) -> Result { - let pub_token = input.parse::()?; - - if input.peek(token::Paren) { - let ahead = input.fork(); - let mut content; - parenthesized!(content in ahead); - - if content.peek(Token![crate]) - || content.peek(Token![self]) - || content.peek(Token![super]) - { - return Ok(Visibility::Restricted(VisRestricted { - pub_token: pub_token, - paren_token: parenthesized!(content in input), - in_token: None, - path: Box::new(Path::from(content.call(Ident::parse_any)?)), - })); - } else if content.peek(Token![in]) { - return Ok(Visibility::Restricted(VisRestricted { - pub_token: pub_token, - paren_token: parenthesized!(content in input), - in_token: Some(content.parse()?), - path: Box::new(content.call(Path::parse_mod_style)?), - })); - } - } - - Ok(Visibility::Public(VisPublic { - pub_token: pub_token, - })) - } - - fn parse_crate(input: ParseStream) -> Result { - if input.peek2(Token![::]) { - Ok(Visibility::Inherited) - } else { - Ok(Visibility::Crate(VisCrate { - crate_token: input.parse()?, - })) - } - } - } -} - -#[cfg(feature = "printing")] -mod printing { - use super::*; - - use proc_macro2::TokenStream; - use quote::{ToTokens, TokenStreamExt}; - - use print::TokensOrDefault; - - impl ToTokens for Variant { - fn to_tokens(&self, tokens: &mut TokenStream) { - tokens.append_all(&self.attrs); - self.ident.to_tokens(tokens); - self.fields.to_tokens(tokens); - if let Some((ref eq_token, ref disc)) = self.discriminant { - eq_token.to_tokens(tokens); - disc.to_tokens(tokens); - } - } - } - - impl ToTokens for FieldsNamed { - fn to_tokens(&self, tokens: &mut TokenStream) { - self.brace_token.surround(tokens, |tokens| { - self.named.to_tokens(tokens); - }); - } - } - - impl ToTokens for FieldsUnnamed { - fn to_tokens(&self, tokens: &mut TokenStream) { - self.paren_token.surround(tokens, |tokens| { - self.unnamed.to_tokens(tokens); - }); - } - } - - impl ToTokens for Field { - fn to_tokens(&self, tokens: &mut TokenStream) { - tokens.append_all(&self.attrs); - self.vis.to_tokens(tokens); - if let Some(ref ident) = self.ident { - ident.to_tokens(tokens); - TokensOrDefault(&self.colon_token).to_tokens(tokens); - } - self.ty.to_tokens(tokens); - } - } - - impl ToTokens for VisPublic { - fn to_tokens(&self, tokens: &mut TokenStream) { - self.pub_token.to_tokens(tokens) - } - } - - impl ToTokens for VisCrate { - fn to_tokens(&self, tokens: &mut TokenStream) { - self.crate_token.to_tokens(tokens); - } - } - - impl ToTokens for VisRestricted { - fn to_tokens(&self, tokens: &mut TokenStream) { - self.pub_token.to_tokens(tokens); - self.paren_token.surround(tokens, |tokens| { - // TODO: If we have a path which is not "self" or "super" or - // "crate", automatically add the "in" token. - self.in_token.to_tokens(tokens); - self.path.to_tokens(tokens); - }); - } - } -} diff --git a/third_party/rust/syn-0.15.30/src/derive.rs b/third_party/rust/syn-0.15.30/src/derive.rs deleted file mode 100644 index 48ae7e46ee..0000000000 --- a/third_party/rust/syn-0.15.30/src/derive.rs +++ /dev/null @@ -1,255 +0,0 @@ -use super::*; -use punctuated::Punctuated; - -ast_struct! { - /// Data structure sent to a `proc_macro_derive` macro. - /// - /// *This type is available if Syn is built with the `"derive"` feature.* - pub struct DeriveInput { - /// Attributes tagged on the whole struct or enum. - pub attrs: Vec, - - /// Visibility of the struct or enum. - pub vis: Visibility, - - /// Name of the struct or enum. - pub ident: Ident, - - /// Generics required to complete the definition. - pub generics: Generics, - - /// Data within the struct or enum. - pub data: Data, - } -} - -ast_enum_of_structs! { - /// The storage of a struct, enum or union data structure. - /// - /// *This type is available if Syn is built with the `"derive"` feature.* - /// - /// # Syntax tree enum - /// - /// This type is a [syntax tree enum]. - /// - /// [syntax tree enum]: enum.Expr.html#syntax-tree-enums - pub enum Data { - /// A struct input to a `proc_macro_derive` macro. - /// - /// *This type is available if Syn is built with the `"derive"` - /// feature.* - pub Struct(DataStruct { - pub struct_token: Token![struct], - pub fields: Fields, - pub semi_token: Option, - }), - - /// An enum input to a `proc_macro_derive` macro. - /// - /// *This type is available if Syn is built with the `"derive"` - /// feature.* - pub Enum(DataEnum { - pub enum_token: Token![enum], - pub brace_token: token::Brace, - pub variants: Punctuated, - }), - - /// A tagged union input to a `proc_macro_derive` macro. - /// - /// *This type is available if Syn is built with the `"derive"` - /// feature.* - pub Union(DataUnion { - pub union_token: Token![union], - pub fields: FieldsNamed, - }), - } - - do_not_generate_to_tokens -} - -#[cfg(feature = "parsing")] -pub mod parsing { - use super::*; - - use parse::{Parse, ParseStream, Result}; - - impl Parse for DeriveInput { - fn parse(input: ParseStream) -> Result { - let attrs = input.call(Attribute::parse_outer)?; - let vis = input.parse::()?; - - let lookahead = input.lookahead1(); - if lookahead.peek(Token![struct]) { - let struct_token = input.parse::()?; - let ident = input.parse::()?; - let generics = input.parse::()?; - let (where_clause, fields, semi) = data_struct(input)?; - Ok(DeriveInput { - attrs: attrs, - vis: vis, - ident: ident, - generics: Generics { - where_clause: where_clause, - ..generics - }, - data: Data::Struct(DataStruct { - struct_token: struct_token, - fields: fields, - semi_token: semi, - }), - }) - } else if lookahead.peek(Token![enum]) { - let enum_token = input.parse::()?; - let ident = input.parse::()?; - let generics = input.parse::()?; - let (where_clause, brace, variants) = data_enum(input)?; - Ok(DeriveInput { - attrs: attrs, - vis: vis, - ident: ident, - generics: Generics { - where_clause: where_clause, - ..generics - }, - data: Data::Enum(DataEnum { - enum_token: enum_token, - brace_token: brace, - variants: variants, - }), - }) - } else if lookahead.peek(Token![union]) { - let union_token = input.parse::()?; - let ident = input.parse::()?; - let generics = input.parse::()?; - let (where_clause, fields) = data_union(input)?; - Ok(DeriveInput { - attrs: attrs, - vis: vis, - ident: ident, - generics: Generics { - where_clause: where_clause, - ..generics - }, - data: Data::Union(DataUnion { - union_token: union_token, - fields: fields, - }), - }) - } else { - Err(lookahead.error()) - } - } - } - - pub fn data_struct( - input: ParseStream, - ) -> Result<(Option, Fields, Option)> { - let mut lookahead = input.lookahead1(); - let mut where_clause = None; - if lookahead.peek(Token![where]) { - where_clause = Some(input.parse()?); - lookahead = input.lookahead1(); - } - - if where_clause.is_none() && lookahead.peek(token::Paren) { - let fields = input.parse()?; - - lookahead = input.lookahead1(); - if lookahead.peek(Token![where]) { - where_clause = Some(input.parse()?); - lookahead = input.lookahead1(); - } - - if lookahead.peek(Token![;]) { - let semi = input.parse()?; - Ok((where_clause, Fields::Unnamed(fields), Some(semi))) - } else { - Err(lookahead.error()) - } - } else if lookahead.peek(token::Brace) { - let fields = input.parse()?; - Ok((where_clause, Fields::Named(fields), None)) - } else if lookahead.peek(Token![;]) { - let semi = input.parse()?; - Ok((where_clause, Fields::Unit, Some(semi))) - } else { - Err(lookahead.error()) - } - } - - pub fn data_enum( - input: ParseStream, - ) -> Result<( - Option, - token::Brace, - Punctuated, - )> { - let where_clause = input.parse()?; - - let content; - let brace = braced!(content in input); - let variants = content.parse_terminated(Variant::parse)?; - - Ok((where_clause, brace, variants)) - } - - pub fn data_union(input: ParseStream) -> Result<(Option, FieldsNamed)> { - let where_clause = input.parse()?; - let fields = input.parse()?; - Ok((where_clause, fields)) - } -} - -#[cfg(feature = "printing")] -mod printing { - use super::*; - - use proc_macro2::TokenStream; - use quote::ToTokens; - - use attr::FilterAttrs; - use print::TokensOrDefault; - - impl ToTokens for DeriveInput { - fn to_tokens(&self, tokens: &mut TokenStream) { - for attr in self.attrs.outer() { - attr.to_tokens(tokens); - } - self.vis.to_tokens(tokens); - match self.data { - Data::Struct(ref d) => d.struct_token.to_tokens(tokens), - Data::Enum(ref d) => d.enum_token.to_tokens(tokens), - Data::Union(ref d) => d.union_token.to_tokens(tokens), - } - self.ident.to_tokens(tokens); - self.generics.to_tokens(tokens); - match self.data { - Data::Struct(ref data) => match data.fields { - Fields::Named(ref fields) => { - self.generics.where_clause.to_tokens(tokens); - fields.to_tokens(tokens); - } - Fields::Unnamed(ref fields) => { - fields.to_tokens(tokens); - self.generics.where_clause.to_tokens(tokens); - TokensOrDefault(&data.semi_token).to_tokens(tokens); - } - Fields::Unit => { - self.generics.where_clause.to_tokens(tokens); - TokensOrDefault(&data.semi_token).to_tokens(tokens); - } - }, - Data::Enum(ref data) => { - self.generics.where_clause.to_tokens(tokens); - data.brace_token.surround(tokens, |tokens| { - data.variants.to_tokens(tokens); - }); - } - Data::Union(ref data) => { - self.generics.where_clause.to_tokens(tokens); - data.fields.to_tokens(tokens); - } - } - } - } -} diff --git a/third_party/rust/syn-0.15.30/src/error.rs b/third_party/rust/syn-0.15.30/src/error.rs deleted file mode 100644 index e07051d6e0..0000000000 --- a/third_party/rust/syn-0.15.30/src/error.rs +++ /dev/null @@ -1,216 +0,0 @@ -use std; -use std::fmt::{self, Display}; -use std::iter::FromIterator; - -use proc_macro2::{ - Delimiter, Group, Ident, LexError, Literal, Punct, Spacing, Span, TokenStream, TokenTree, -}; -#[cfg(feature = "printing")] -use quote::ToTokens; - -#[cfg(feature = "parsing")] -use buffer::Cursor; -#[cfg(all(procmacro2_semver_exempt, feature = "parsing"))] -use private; -use thread::ThreadBound; - -/// The result of a Syn parser. -pub type Result = std::result::Result; - -/// Error returned when a Syn parser cannot parse the input tokens. -/// -/// Refer to the [module documentation] for details about parsing in Syn. -/// -/// [module documentation]: index.html -/// -/// *This type is available if Syn is built with the `"parsing"` feature.* -#[derive(Debug)] -pub struct Error { - // Span is implemented as an index into a thread-local interner to keep the - // size small. It is not safe to access from a different thread. We want - // errors to be Send and Sync to play nicely with the Failure crate, so pin - // the span we're given to its original thread and assume it is - // Span::call_site if accessed from any other thread. - start_span: ThreadBound, - end_span: ThreadBound, - message: String, -} - -#[cfg(test)] -struct _Test -where - Error: Send + Sync; - -impl Error { - /// Usually the [`ParseStream::error`] method will be used instead, which - /// automatically uses the correct span from the current position of the - /// parse stream. - /// - /// Use `Error::new` when the error needs to be triggered on some span other - /// than where the parse stream is currently positioned. - /// - /// [`ParseStream::error`]: struct.ParseBuffer.html#method.error - /// - /// # Example - /// - /// ```edition2018 - /// use syn::{Error, Ident, LitStr, Result, Token}; - /// use syn::parse::ParseStream; - /// - /// // Parses input that looks like `name = "string"` where the key must be - /// // the identifier `name` and the value may be any string literal. - /// // Returns the string literal. - /// fn parse_name(input: ParseStream) -> Result { - /// let name_token: Ident = input.parse()?; - /// if name_token != "name" { - /// // Trigger an error not on the current position of the stream, - /// // but on the position of the unexpected identifier. - /// return Err(Error::new(name_token.span(), "expected `name`")); - /// } - /// input.parse::()?; - /// let s: LitStr = input.parse()?; - /// Ok(s) - /// } - /// ``` - pub fn new(span: Span, message: T) -> Self { - Error { - start_span: ThreadBound::new(span), - end_span: ThreadBound::new(span), - message: message.to_string(), - } - } - - /// Creates an error with the specified message spanning the given syntax - /// tree node. - /// - /// Unlike the `Error::new` constructor, this constructor takes an argument - /// `tokens` which is a syntax tree node. This allows the resulting `Error` - /// to attempt to span all tokens inside of `tokens`. While you would - /// typically be able to use the `Spanned` trait with the above `Error::new` - /// constructor, implementation limitations today mean that - /// `Error::new_spanned` may provide a higher-quality error message on - /// stable Rust. - /// - /// When in doubt it's recommended to stick to `Error::new` (or - /// `ParseStream::error`)! - #[cfg(feature = "printing")] - pub fn new_spanned(tokens: T, message: U) -> Self { - let mut iter = tokens.into_token_stream().into_iter(); - let start = iter.next().map_or_else(Span::call_site, |t| t.span()); - let end = iter.last().map_or(start, |t| t.span()); - Error { - start_span: ThreadBound::new(start), - end_span: ThreadBound::new(end), - message: message.to_string(), - } - } - - /// The source location of the error. - /// - /// Spans are not thread-safe so this function returns `Span::call_site()` - /// if called from a different thread than the one on which the `Error` was - /// originally created. - pub fn span(&self) -> Span { - let start = match self.start_span.get() { - Some(span) => *span, - None => return Span::call_site(), - }; - - #[cfg(procmacro2_semver_exempt)] - { - let end = match self.end_span.get() { - Some(span) => *span, - None => return Span::call_site(), - }; - start.join(end).unwrap_or(start) - } - #[cfg(not(procmacro2_semver_exempt))] - { - start - } - } - - /// Render the error as an invocation of [`compile_error!`]. - /// - /// The [`parse_macro_input!`] macro provides a convenient way to invoke - /// this method correctly in a procedural macro. - /// - /// [`compile_error!`]: https://doc.rust-lang.org/std/macro.compile_error.html - /// [`parse_macro_input!`]: ../macro.parse_macro_input.html - pub fn to_compile_error(&self) -> TokenStream { - let start = self - .start_span - .get() - .cloned() - .unwrap_or_else(Span::call_site); - let end = self.end_span.get().cloned().unwrap_or_else(Span::call_site); - - // compile_error!($message) - TokenStream::from_iter(vec![ - TokenTree::Ident(Ident::new("compile_error", start)), - TokenTree::Punct({ - let mut punct = Punct::new('!', Spacing::Alone); - punct.set_span(start); - punct - }), - TokenTree::Group({ - let mut group = Group::new(Delimiter::Brace, { - TokenStream::from_iter(vec![TokenTree::Literal({ - let mut string = Literal::string(&self.message); - string.set_span(end); - string - })]) - }); - group.set_span(end); - group - }), - ]) - } -} - -#[cfg(feature = "parsing")] -pub fn new_at(scope: Span, cursor: Cursor, message: T) -> Error { - if cursor.eof() { - Error::new(scope, format!("unexpected end of input, {}", message)) - } else { - #[cfg(procmacro2_semver_exempt)] - let span = private::open_span_of_group(cursor); - #[cfg(not(procmacro2_semver_exempt))] - let span = cursor.span(); - Error::new(span, message) - } -} - -impl Display for Error { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str(&self.message) - } -} - -impl Clone for Error { - fn clone(&self) -> Self { - let start = self - .start_span - .get() - .cloned() - .unwrap_or_else(Span::call_site); - let end = self.end_span.get().cloned().unwrap_or_else(Span::call_site); - Error { - start_span: ThreadBound::new(start), - end_span: ThreadBound::new(end), - message: self.message.clone(), - } - } -} - -impl std::error::Error for Error { - fn description(&self) -> &str { - "parse error" - } -} - -impl From for Error { - fn from(err: LexError) -> Self { - Error::new(Span::call_site(), format!("{:?}", err)) - } -} diff --git a/third_party/rust/syn-0.15.30/src/export.rs b/third_party/rust/syn-0.15.30/src/export.rs deleted file mode 100644 index 8e270bd017..0000000000 --- a/third_party/rust/syn-0.15.30/src/export.rs +++ /dev/null @@ -1,35 +0,0 @@ -pub use std::clone::Clone; -pub use std::cmp::{Eq, PartialEq}; -pub use std::convert::From; -pub use std::default::Default; -pub use std::fmt::{self, Debug, Formatter}; -pub use std::hash::{Hash, Hasher}; -pub use std::marker::Copy; -pub use std::option::Option::{None, Some}; -pub use std::result::Result::{Err, Ok}; - -#[cfg(feature = "printing")] -pub extern crate quote; - -pub use proc_macro2::{Span, TokenStream as TokenStream2}; - -pub use span::IntoSpans; - -#[cfg(all( - not(all(target_arch = "wasm32", target_os = "unknown")), - feature = "proc-macro" -))] -pub use proc_macro::TokenStream; - -#[cfg(feature = "printing")] -pub use quote::{ToTokens, TokenStreamExt}; - -#[allow(non_camel_case_types)] -pub type bool = help::Bool; -#[allow(non_camel_case_types)] -pub type str = help::Str; - -mod help { - pub type Bool = bool; - pub type Str = str; -} diff --git a/third_party/rust/syn-0.15.30/src/expr.rs b/third_party/rust/syn-0.15.30/src/expr.rs deleted file mode 100644 index 44241971bc..0000000000 --- a/third_party/rust/syn-0.15.30/src/expr.rs +++ /dev/null @@ -1,3768 +0,0 @@ -use super::*; -use proc_macro2::{Span, TokenStream}; -use punctuated::Punctuated; -#[cfg(feature = "extra-traits")] -use std::hash::{Hash, Hasher}; -#[cfg(all(feature = "parsing", feature = "full"))] -use std::mem; -#[cfg(feature = "extra-traits")] -use tt::TokenStreamHelper; - -ast_enum_of_structs! { - /// A Rust expression. - /// - /// *This type is available if Syn is built with the `"derive"` or `"full"` - /// feature.* - /// - /// # Syntax tree enums - /// - /// This type is a syntax tree enum. In Syn this and other syntax tree enums - /// are designed to be traversed using the following rebinding idiom. - /// - /// ```edition2018 - /// # use syn::Expr; - /// # - /// # fn example(expr: Expr) { - /// # const IGNORE: &str = stringify! { - /// let expr: Expr = /* ... */; - /// # }; - /// match expr { - /// Expr::MethodCall(expr) => { - /// /* ... */ - /// } - /// Expr::Cast(expr) => { - /// /* ... */ - /// } - /// Expr::If(expr) => { - /// /* ... */ - /// } - /// /* ... */ - /// # _ => {} - /// } - /// # } - /// ``` - /// - /// We begin with a variable `expr` of type `Expr` that has no fields - /// (because it is an enum), and by matching on it and rebinding a variable - /// with the same name `expr` we effectively imbue our variable with all of - /// the data fields provided by the variant that it turned out to be. So for - /// example above if we ended up in the `MethodCall` case then we get to use - /// `expr.receiver`, `expr.args` etc; if we ended up in the `If` case we get - /// to use `expr.cond`, `expr.then_branch`, `expr.else_branch`. - /// - /// The pattern is similar if the input expression is borrowed: - /// - /// ```edition2018 - /// # use syn::Expr; - /// # - /// # fn example(expr: &Expr) { - /// match *expr { - /// Expr::MethodCall(ref expr) => { - /// # } - /// # _ => {} - /// # } - /// # } - /// ``` - /// - /// This approach avoids repeating the variant names twice on every line. - /// - /// ```edition2018 - /// # use syn::{Expr, ExprMethodCall}; - /// # - /// # fn example(expr: Expr) { - /// # match expr { - /// Expr::MethodCall(ExprMethodCall { method, args, .. }) => { // repetitive - /// # } - /// # _ => {} - /// # } - /// # } - /// ``` - /// - /// In general, the name to which a syntax tree enum variant is bound should - /// be a suitable name for the complete syntax tree enum type. - /// - /// ```edition2018 - /// # use syn::{Expr, ExprField}; - /// # - /// # fn example(discriminant: &ExprField) { - /// // Binding is called `base` which is the name I would use if I were - /// // assigning `*discriminant.base` without an `if let`. - /// if let Expr::Tuple(ref base) = *discriminant.base { - /// # } - /// # } - /// ``` - /// - /// A sign that you may not be choosing the right variable names is if you - /// see names getting repeated in your code, like accessing - /// `receiver.receiver` or `pat.pat` or `cond.cond`. - pub enum Expr { - /// A box expression: `box f`. - /// - /// *This type is available if Syn is built with the `"full"` feature.* - pub Box(ExprBox #full { - pub attrs: Vec, - pub box_token: Token![box], - pub expr: Box, - }), - - /// A placement expression: `place <- value`. - /// - /// *This type is available if Syn is built with the `"full"` feature.* - pub InPlace(ExprInPlace #full { - pub attrs: Vec, - pub place: Box, - pub arrow_token: Token![<-], - pub value: Box, - }), - - /// A slice literal expression: `[a, b, c, d]`. - /// - /// *This type is available if Syn is built with the `"full"` feature.* - pub Array(ExprArray #full { - pub attrs: Vec, - pub bracket_token: token::Bracket, - pub elems: Punctuated, - }), - - /// A function call expression: `invoke(a, b)`. - /// - /// *This type is available if Syn is built with the `"derive"` or - /// `"full"` feature.* - pub Call(ExprCall { - pub attrs: Vec, - pub func: Box, - pub paren_token: token::Paren, - pub args: Punctuated, - }), - - /// A method call expression: `x.foo::(a, b)`. - /// - /// *This type is available if Syn is built with the `"full"` feature.* - pub MethodCall(ExprMethodCall #full { - pub attrs: Vec, - pub receiver: Box, - pub dot_token: Token![.], - pub method: Ident, - pub turbofish: Option, - pub paren_token: token::Paren, - pub args: Punctuated, - }), - - /// A tuple expression: `(a, b, c, d)`. - /// - /// *This type is available if Syn is built with the `"full"` feature.* - pub Tuple(ExprTuple #full { - pub attrs: Vec, - pub paren_token: token::Paren, - pub elems: Punctuated, - }), - - /// A binary operation: `a + b`, `a * b`. - /// - /// *This type is available if Syn is built with the `"derive"` or - /// `"full"` feature.* - pub Binary(ExprBinary { - pub attrs: Vec, - pub left: Box, - pub op: BinOp, - pub right: Box, - }), - - /// A unary operation: `!x`, `*x`. - /// - /// *This type is available if Syn is built with the `"derive"` or - /// `"full"` feature.* - pub Unary(ExprUnary { - pub attrs: Vec, - pub op: UnOp, - pub expr: Box, - }), - - /// A literal in place of an expression: `1`, `"foo"`. - /// - /// *This type is available if Syn is built with the `"derive"` or - /// `"full"` feature.* - pub Lit(ExprLit { - pub attrs: Vec, - pub lit: Lit, - }), - - /// A cast expression: `foo as f64`. - /// - /// *This type is available if Syn is built with the `"derive"` or - /// `"full"` feature.* - pub Cast(ExprCast { - pub attrs: Vec, - pub expr: Box, - pub as_token: Token![as], - pub ty: Box, - }), - - /// A type ascription expression: `foo: f64`. - /// - /// *This type is available if Syn is built with the `"full"` feature.* - pub Type(ExprType #full { - pub attrs: Vec, - pub expr: Box, - pub colon_token: Token![:], - pub ty: Box, - }), - - /// A `let` guard: `let Some(x) = opt`. - /// - /// *This type is available if Syn is built with the `"full"` feature.* - pub Let(ExprLet #full { - pub attrs: Vec, - pub let_token: Token![let], - pub pats: Punctuated, - pub eq_token: Token![=], - pub expr: Box, - }), - - /// An `if` expression with an optional `else` block: `if expr { ... } - /// else { ... }`. - /// - /// The `else` branch expression may only be an `If` or `Block` - /// expression, not any of the other types of expression. - /// - /// *This type is available if Syn is built with the `"full"` feature.* - pub If(ExprIf #full { - pub attrs: Vec, - pub if_token: Token![if], - pub cond: Box, - pub then_branch: Block, - pub else_branch: Option<(Token![else], Box)>, - }), - - /// A while loop: `while expr { ... }`. - /// - /// *This type is available if Syn is built with the `"full"` feature.* - pub While(ExprWhile #full { - pub attrs: Vec, - pub label: Option::B::C` and `::B::C` can only legally refer to - /// associated constants. - /// - /// *This type is available if Syn is built with the `"full"` feature.* - pub Path(PatPath { - pub qself: Option, - pub path: Path, - }), - - /// A tuple pattern: `(a, b)`. - /// - /// *This type is available if Syn is built with the `"full"` feature.* - pub Tuple(PatTuple { - pub paren_token: token::Paren, - pub front: Punctuated, - pub dot2_token: Option, - pub comma_token: Option, - pub back: Punctuated, - }), - - /// A box pattern: `box v`. - /// - /// *This type is available if Syn is built with the `"full"` feature.* - pub Box(PatBox { - pub box_token: Token![box], - pub pat: Box, - }), - - /// A reference pattern: `&mut (first, second)`. - /// - /// *This type is available if Syn is built with the `"full"` feature.* - pub Ref(PatRef { - pub and_token: Token![&], - pub mutability: Option, - pub pat: Box, - }), - - /// A literal pattern: `0`. - /// - /// This holds an `Expr` rather than a `Lit` because negative numbers - /// are represented as an `Expr::Unary`. - /// - /// *This type is available if Syn is built with the `"full"` feature.* - pub Lit(PatLit { - pub expr: Box, - }), - - /// A range pattern: `1..=2`. - /// - /// *This type is available if Syn is built with the `"full"` feature.* - pub Range(PatRange { - pub lo: Box, - pub limits: RangeLimits, - pub hi: Box, - }), - - /// A dynamically sized slice pattern: `[a, b, i.., y, z]`. - /// - /// *This type is available if Syn is built with the `"full"` feature.* - pub Slice(PatSlice { - pub bracket_token: token::Bracket, - pub front: Punctuated, - pub middle: Option>, - pub dot2_token: Option, - pub comma_token: Option, - pub back: Punctuated, - }), - - /// A macro in expression position. - /// - /// *This type is available if Syn is built with the `"full"` feature.* - pub Macro(PatMacro { - pub mac: Macro, - }), - - /// Tokens in pattern position not interpreted by Syn. - /// - /// *This type is available if Syn is built with the `"full"` feature.* - pub Verbatim(PatVerbatim #manual_extra_traits { - pub tts: TokenStream, - }), - } -} - -#[cfg(all(feature = "full", feature = "extra-traits"))] -impl Eq for PatVerbatim {} - -#[cfg(all(feature = "full", feature = "extra-traits"))] -impl PartialEq for PatVerbatim { - fn eq(&self, other: &Self) -> bool { - TokenStreamHelper(&self.tts) == TokenStreamHelper(&other.tts) - } -} - -#[cfg(all(feature = "full", feature = "extra-traits"))] -impl Hash for PatVerbatim { - fn hash(&self, state: &mut H) - where - H: Hasher, - { - TokenStreamHelper(&self.tts).hash(state); - } -} - -#[cfg(feature = "full")] -ast_struct! { - /// One arm of a `match` expression: `0...10 => { return true; }`. - /// - /// As in: - /// - /// ```edition2018 - /// # fn f() -> bool { - /// # let n = 0; - /// match n { - /// 0...10 => { - /// return true; - /// } - /// // ... - /// # _ => {} - /// } - /// # false - /// # } - /// ``` - /// - /// *This type is available if Syn is built with the `"full"` feature.* - pub struct Arm { - pub attrs: Vec, - pub leading_vert: Option, - pub pats: Punctuated, - pub guard: Option<(Token![if], Box)>, - pub fat_arrow_token: Token![=>], - pub body: Box, - pub comma: Option, - } -} - -#[cfg(feature = "full")] -ast_enum! { - /// Limit types of a range, inclusive or exclusive. - /// - /// *This type is available if Syn is built with the `"full"` feature.* - #[cfg_attr(feature = "clone-impls", derive(Copy))] - pub enum RangeLimits { - /// Inclusive at the beginning, exclusive at the end. - HalfOpen(Token![..]), - /// Inclusive at the beginning and end. - Closed(Token![..=]), - } -} - -#[cfg(feature = "full")] -ast_struct! { - /// A single field in a struct pattern. - /// - /// Patterns like the fields of Foo `{ x, ref y, ref mut z }` are treated - /// the same as `x: x, y: ref y, z: ref mut z` but there is no colon token. - /// - /// *This type is available if Syn is built with the `"full"` feature.* - pub struct FieldPat { - pub attrs: Vec, - pub member: Member, - pub colon_token: Option, - pub pat: Box, - } -} - -#[cfg(any(feature = "parsing", feature = "printing"))] -#[cfg(feature = "full")] -fn requires_terminator(expr: &Expr) -> bool { - // see https://github.com/rust-lang/rust/blob/eb8f2586e/src/libsyntax/parse/classify.rs#L17-L37 - match *expr { - Expr::Unsafe(..) - | Expr::Block(..) - | Expr::If(..) - | Expr::Match(..) - | Expr::While(..) - | Expr::Loop(..) - | Expr::ForLoop(..) - | Expr::Async(..) - | Expr::TryBlock(..) => false, - _ => true, - } -} - -#[cfg(feature = "parsing")] -pub mod parsing { - use super::*; - - #[cfg(feature = "full")] - use ext::IdentExt; - use parse::{Parse, ParseStream, Result}; - use path; - - // When we're parsing expressions which occur before blocks, like in an if - // statement's condition, we cannot parse a struct literal. - // - // Struct literals are ambiguous in certain positions - // https://github.com/rust-lang/rfcs/pull/92 - #[derive(Copy, Clone)] - pub struct AllowStruct(bool); - - #[derive(Copy, Clone, PartialEq, PartialOrd)] - enum Precedence { - Any, - Assign, - Placement, - Range, - Or, - And, - Compare, - BitOr, - BitXor, - BitAnd, - Shift, - Arithmetic, - Term, - Cast, - } - - impl Precedence { - fn of(op: &BinOp) -> Self { - match *op { - BinOp::Add(_) | BinOp::Sub(_) => Precedence::Arithmetic, - BinOp::Mul(_) | BinOp::Div(_) | BinOp::Rem(_) => Precedence::Term, - BinOp::And(_) => Precedence::And, - BinOp::Or(_) => Precedence::Or, - BinOp::BitXor(_) => Precedence::BitXor, - BinOp::BitAnd(_) => Precedence::BitAnd, - BinOp::BitOr(_) => Precedence::BitOr, - BinOp::Shl(_) | BinOp::Shr(_) => Precedence::Shift, - BinOp::Eq(_) - | BinOp::Lt(_) - | BinOp::Le(_) - | BinOp::Ne(_) - | BinOp::Ge(_) - | BinOp::Gt(_) => Precedence::Compare, - BinOp::AddEq(_) - | BinOp::SubEq(_) - | BinOp::MulEq(_) - | BinOp::DivEq(_) - | BinOp::RemEq(_) - | BinOp::BitXorEq(_) - | BinOp::BitAndEq(_) - | BinOp::BitOrEq(_) - | BinOp::ShlEq(_) - | BinOp::ShrEq(_) => Precedence::Assign, - } - } - } - - impl Parse for Expr { - fn parse(input: ParseStream) -> Result { - ambiguous_expr(input, AllowStruct(true)) - } - } - - #[cfg(feature = "full")] - fn expr_no_struct(input: ParseStream) -> Result { - ambiguous_expr(input, AllowStruct(false)) - } - - #[cfg(feature = "full")] - fn parse_expr( - input: ParseStream, - mut lhs: Expr, - allow_struct: AllowStruct, - base: Precedence, - ) -> Result { - loop { - if input - .fork() - .parse::() - .ok() - .map_or(false, |op| Precedence::of(&op) >= base) - { - let op: BinOp = input.parse()?; - let precedence = Precedence::of(&op); - let mut rhs = unary_expr(input, allow_struct)?; - loop { - let next = peek_precedence(input); - if next > precedence || next == precedence && precedence == Precedence::Assign { - rhs = parse_expr(input, rhs, allow_struct, next)?; - } else { - break; - } - } - lhs = if precedence == Precedence::Assign { - Expr::AssignOp(ExprAssignOp { - attrs: Vec::new(), - left: Box::new(lhs), - op: op, - right: Box::new(rhs), - }) - } else { - Expr::Binary(ExprBinary { - attrs: Vec::new(), - left: Box::new(lhs), - op: op, - right: Box::new(rhs), - }) - }; - } else if Precedence::Assign >= base - && input.peek(Token![=]) - && !input.peek(Token![==]) - && !input.peek(Token![=>]) - { - let eq_token: Token![=] = input.parse()?; - let mut rhs = unary_expr(input, allow_struct)?; - loop { - let next = peek_precedence(input); - if next >= Precedence::Assign { - rhs = parse_expr(input, rhs, allow_struct, next)?; - } else { - break; - } - } - lhs = Expr::Assign(ExprAssign { - attrs: Vec::new(), - left: Box::new(lhs), - eq_token: eq_token, - right: Box::new(rhs), - }); - } else if Precedence::Placement >= base && input.peek(Token![<-]) { - let arrow_token: Token![<-] = input.parse()?; - let mut rhs = unary_expr(input, allow_struct)?; - loop { - let next = peek_precedence(input); - if next > Precedence::Placement { - rhs = parse_expr(input, rhs, allow_struct, next)?; - } else { - break; - } - } - lhs = Expr::InPlace(ExprInPlace { - attrs: Vec::new(), - place: Box::new(lhs), - arrow_token: arrow_token, - value: Box::new(rhs), - }); - } else if Precedence::Range >= base && input.peek(Token![..]) { - let limits: RangeLimits = input.parse()?; - let rhs = if input.is_empty() - || input.peek(Token![,]) - || input.peek(Token![;]) - || !allow_struct.0 && input.peek(token::Brace) - { - None - } else { - let mut rhs = unary_expr(input, allow_struct)?; - loop { - let next = peek_precedence(input); - if next > Precedence::Range { - rhs = parse_expr(input, rhs, allow_struct, next)?; - } else { - break; - } - } - Some(rhs) - }; - lhs = Expr::Range(ExprRange { - attrs: Vec::new(), - from: Some(Box::new(lhs)), - limits: limits, - to: rhs.map(Box::new), - }); - } else if Precedence::Cast >= base && input.peek(Token![as]) { - let as_token: Token![as] = input.parse()?; - let ty = input.call(Type::without_plus)?; - lhs = Expr::Cast(ExprCast { - attrs: Vec::new(), - expr: Box::new(lhs), - as_token: as_token, - ty: Box::new(ty), - }); - } else if Precedence::Cast >= base && input.peek(Token![:]) && !input.peek(Token![::]) { - let colon_token: Token![:] = input.parse()?; - let ty = input.call(Type::without_plus)?; - lhs = Expr::Type(ExprType { - attrs: Vec::new(), - expr: Box::new(lhs), - colon_token: colon_token, - ty: Box::new(ty), - }); - } else { - break; - } - } - Ok(lhs) - } - - #[cfg(not(feature = "full"))] - fn parse_expr( - input: ParseStream, - mut lhs: Expr, - allow_struct: AllowStruct, - base: Precedence, - ) -> Result { - loop { - if input - .fork() - .parse::() - .ok() - .map_or(false, |op| Precedence::of(&op) >= base) - { - let op: BinOp = input.parse()?; - let precedence = Precedence::of(&op); - let mut rhs = unary_expr(input, allow_struct)?; - loop { - let next = peek_precedence(input); - if next > precedence || next == precedence && precedence == Precedence::Assign { - rhs = parse_expr(input, rhs, allow_struct, next)?; - } else { - break; - } - } - lhs = Expr::Binary(ExprBinary { - attrs: Vec::new(), - left: Box::new(lhs), - op: op, - right: Box::new(rhs), - }); - } else if Precedence::Cast >= base && input.peek(Token![as]) { - let as_token: Token![as] = input.parse()?; - let ty = input.call(Type::without_plus)?; - lhs = Expr::Cast(ExprCast { - attrs: Vec::new(), - expr: Box::new(lhs), - as_token: as_token, - ty: Box::new(ty), - }); - } else { - break; - } - } - Ok(lhs) - } - - fn peek_precedence(input: ParseStream) -> Precedence { - if let Ok(op) = input.fork().parse() { - Precedence::of(&op) - } else if input.peek(Token![=]) && !input.peek(Token![=>]) { - Precedence::Assign - } else if input.peek(Token![<-]) { - Precedence::Placement - } else if input.peek(Token![..]) { - Precedence::Range - } else if input.peek(Token![as]) || input.peek(Token![:]) && !input.peek(Token![::]) { - Precedence::Cast - } else { - Precedence::Any - } - } - - // Parse an arbitrary expression. - fn ambiguous_expr(input: ParseStream, allow_struct: AllowStruct) -> Result { - let lhs = unary_expr(input, allow_struct)?; - parse_expr(input, lhs, allow_struct, Precedence::Any) - } - - // - // & - // &mut - // box - #[cfg(feature = "full")] - fn unary_expr(input: ParseStream, allow_struct: AllowStruct) -> Result { - let ahead = input.fork(); - ahead.call(Attribute::parse_outer)?; - if ahead.peek(Token![&]) - || ahead.peek(Token![box]) - || ahead.peek(Token![*]) - || ahead.peek(Token![!]) - || ahead.peek(Token![-]) - { - let attrs = input.call(Attribute::parse_outer)?; - if input.peek(Token![&]) { - Ok(Expr::Reference(ExprReference { - attrs: attrs, - and_token: input.parse()?, - mutability: input.parse()?, - expr: Box::new(unary_expr(input, allow_struct)?), - })) - } else if input.peek(Token![box]) { - Ok(Expr::Box(ExprBox { - attrs: attrs, - box_token: input.parse()?, - expr: Box::new(unary_expr(input, allow_struct)?), - })) - } else { - Ok(Expr::Unary(ExprUnary { - attrs: attrs, - op: input.parse()?, - expr: Box::new(unary_expr(input, allow_struct)?), - })) - } - } else { - trailer_expr(input, allow_struct) - } - } - - #[cfg(not(feature = "full"))] - fn unary_expr(input: ParseStream, allow_struct: AllowStruct) -> Result { - let ahead = input.fork(); - ahead.call(Attribute::parse_outer)?; - if ahead.peek(Token![*]) || ahead.peek(Token![!]) || ahead.peek(Token![-]) { - Ok(Expr::Unary(ExprUnary { - attrs: input.call(Attribute::parse_outer)?, - op: input.parse()?, - expr: Box::new(unary_expr(input, allow_struct)?), - })) - } else { - trailer_expr(input, allow_struct) - } - } - - // (..) ... - // . (..) ... - // . ... - // . ... - // [ ] ... - // ? ... - #[cfg(feature = "full")] - fn trailer_expr(input: ParseStream, allow_struct: AllowStruct) -> Result { - if input.peek(token::Group) { - return input.call(expr_group).map(Expr::Group); - } - - let outer_attrs = input.call(Attribute::parse_outer)?; - - let atom = atom_expr(input, allow_struct)?; - let mut e = trailer_helper(input, atom)?; - - let inner_attrs = e.replace_attrs(Vec::new()); - let attrs = private::attrs(outer_attrs, inner_attrs); - e.replace_attrs(attrs); - Ok(e) - } - - #[cfg(feature = "full")] - fn trailer_helper(input: ParseStream, mut e: Expr) -> Result { - loop { - if input.peek(token::Paren) { - let content; - e = Expr::Call(ExprCall { - attrs: Vec::new(), - func: Box::new(e), - paren_token: parenthesized!(content in input), - args: content.parse_terminated(Expr::parse)?, - }); - } else if input.peek(Token![.]) && !input.peek(Token![..]) { - let dot_token: Token![.] = input.parse()?; - let member: Member = input.parse()?; - let turbofish = if member.is_named() && input.peek(Token![::]) { - Some(MethodTurbofish { - colon2_token: input.parse()?, - lt_token: input.parse()?, - args: { - let mut args = Punctuated::new(); - loop { - if input.peek(Token![>]) { - break; - } - let value = input.call(generic_method_argument)?; - args.push_value(value); - if input.peek(Token![>]) { - break; - } - let punct = input.parse()?; - args.push_punct(punct); - } - args - }, - gt_token: input.parse()?, - }) - } else { - None - }; - - if turbofish.is_some() || input.peek(token::Paren) { - if let Member::Named(method) = member { - let content; - e = Expr::MethodCall(ExprMethodCall { - attrs: Vec::new(), - receiver: Box::new(e), - dot_token: dot_token, - method: method, - turbofish: turbofish, - paren_token: parenthesized!(content in input), - args: content.parse_terminated(Expr::parse)?, - }); - continue; - } - } - - e = Expr::Field(ExprField { - attrs: Vec::new(), - base: Box::new(e), - dot_token: dot_token, - member: member, - }); - } else if input.peek(token::Bracket) { - let content; - e = Expr::Index(ExprIndex { - attrs: Vec::new(), - expr: Box::new(e), - bracket_token: bracketed!(content in input), - index: content.parse()?, - }); - } else if input.peek(Token![?]) { - e = Expr::Try(ExprTry { - attrs: Vec::new(), - expr: Box::new(e), - question_token: input.parse()?, - }); - } else { - break; - } - } - Ok(e) - } - - #[cfg(not(feature = "full"))] - fn trailer_expr(input: ParseStream, allow_struct: AllowStruct) -> Result { - let mut e = atom_expr(input, allow_struct)?; - - loop { - if input.peek(token::Paren) { - let content; - e = Expr::Call(ExprCall { - attrs: Vec::new(), - func: Box::new(e), - paren_token: parenthesized!(content in input), - args: content.parse_terminated(Expr::parse)?, - }); - } else if input.peek(Token![.]) { - e = Expr::Field(ExprField { - attrs: Vec::new(), - base: Box::new(e), - dot_token: input.parse()?, - member: input.parse()?, - }); - } else if input.peek(token::Bracket) { - let content; - e = Expr::Index(ExprIndex { - attrs: Vec::new(), - expr: Box::new(e), - bracket_token: bracketed!(content in input), - index: content.parse()?, - }); - } else { - break; - } - } - - Ok(e) - } - - // Parse all atomic expressions which don't have to worry about precedence - // interactions, as they are fully contained. - #[cfg(feature = "full")] - fn atom_expr(input: ParseStream, allow_struct: AllowStruct) -> Result { - if input.peek(token::Group) { - input.call(expr_group).map(Expr::Group) - } else if input.peek(Lit) { - input.call(expr_lit).map(Expr::Lit) - } else if input.peek(Token![async]) - && (input.peek2(token::Brace) || input.peek2(Token![move]) && input.peek3(token::Brace)) - { - input.call(expr_async).map(Expr::Async) - } else if input.peek(Token![try]) && input.peek2(token::Brace) { - input.call(expr_try_block).map(Expr::TryBlock) - } else if input.peek(Token![|]) - || input.peek(Token![async]) && (input.peek2(Token![|]) || input.peek2(Token![move])) - || input.peek(Token![static]) - || input.peek(Token![move]) - { - expr_closure(input, allow_struct).map(Expr::Closure) - } else if input.peek(Ident) - || input.peek(Token![::]) - || input.peek(Token![<]) - || input.peek(Token![self]) - || input.peek(Token![Self]) - || input.peek(Token![super]) - || input.peek(Token![extern]) - || input.peek(Token![crate]) - { - path_or_macro_or_struct(input, allow_struct) - } else if input.peek(token::Paren) { - paren_or_tuple(input) - } else if input.peek(Token![break]) { - expr_break(input, allow_struct).map(Expr::Break) - } else if input.peek(Token![continue]) { - input.call(expr_continue).map(Expr::Continue) - } else if input.peek(Token![return]) { - expr_ret(input, allow_struct).map(Expr::Return) - } else if input.peek(token::Bracket) { - array_or_repeat(input) - } else if input.peek(Token![let]) { - input.call(expr_let).map(Expr::Let) - } else if input.peek(Token![if]) { - input.call(expr_if).map(Expr::If) - } else if input.peek(Token![while]) { - input.call(expr_while).map(Expr::While) - } else if input.peek(Token![for]) { - input.call(expr_for_loop).map(Expr::ForLoop) - } else if input.peek(Token![loop]) { - input.call(expr_loop).map(Expr::Loop) - } else if input.peek(Token![match]) { - input.parse().map(Expr::Match) - } else if input.peek(Token![yield]) { - input.call(expr_yield).map(Expr::Yield) - } else if input.peek(Token![unsafe]) { - input.call(expr_unsafe).map(Expr::Unsafe) - } else if input.peek(token::Brace) { - input.call(expr_block).map(Expr::Block) - } else if input.peek(Token![..]) { - expr_range(input, allow_struct).map(Expr::Range) - } else if input.peek(Lifetime) { - let the_label: Label = input.parse()?; - let mut expr = if input.peek(Token![while]) { - Expr::While(input.call(expr_while)?) - } else if input.peek(Token![for]) { - Expr::ForLoop(input.call(expr_for_loop)?) - } else if input.peek(Token![loop]) { - Expr::Loop(input.call(expr_loop)?) - } else if input.peek(token::Brace) { - Expr::Block(input.call(expr_block)?) - } else { - return Err(input.error("expected loop or block expression")); - }; - match expr { - Expr::While(ExprWhile { ref mut label, .. }) - | Expr::ForLoop(ExprForLoop { ref mut label, .. }) - | Expr::Loop(ExprLoop { ref mut label, .. }) - | Expr::Block(ExprBlock { ref mut label, .. }) => *label = Some(the_label), - _ => unreachable!(), - } - Ok(expr) - } else { - Err(input.error("expected expression")) - } - } - - #[cfg(not(feature = "full"))] - fn atom_expr(input: ParseStream, _allow_struct: AllowStruct) -> Result { - if input.peek(Lit) { - input.call(expr_lit).map(Expr::Lit) - } else if input.peek(token::Paren) { - input.call(expr_paren).map(Expr::Paren) - } else if input.peek(Ident) - || input.peek(Token![::]) - || input.peek(Token![<]) - || input.peek(Token![self]) - || input.peek(Token![Self]) - || input.peek(Token![super]) - || input.peek(Token![extern]) - || input.peek(Token![crate]) - { - input.parse().map(Expr::Path) - } else { - Err(input.error("unsupported expression; enable syn's features=[\"full\"]")) - } - } - - #[cfg(feature = "full")] - fn path_or_macro_or_struct(input: ParseStream, allow_struct: AllowStruct) -> Result { - let expr: ExprPath = input.parse()?; - if expr.qself.is_some() { - return Ok(Expr::Path(expr)); - } - - if input.peek(Token![!]) && !input.peek(Token![!=]) { - let mut contains_arguments = false; - for segment in &expr.path.segments { - match segment.arguments { - PathArguments::None => {} - PathArguments::AngleBracketed(_) | PathArguments::Parenthesized(_) => { - contains_arguments = true; - } - } - } - - if !contains_arguments { - let bang_token: Token![!] = input.parse()?; - let (delimiter, tts) = mac::parse_delimiter(input)?; - return Ok(Expr::Macro(ExprMacro { - attrs: Vec::new(), - mac: Macro { - path: expr.path, - bang_token: bang_token, - delimiter: delimiter, - tts: tts, - }, - })); - } - } - - if allow_struct.0 && input.peek(token::Brace) { - let outer_attrs = Vec::new(); - expr_struct_helper(input, outer_attrs, expr.path).map(Expr::Struct) - } else { - Ok(Expr::Path(expr)) - } - } - - #[cfg(feature = "full")] - fn paren_or_tuple(input: ParseStream) -> Result { - let content; - let paren_token = parenthesized!(content in input); - let inner_attrs = content.call(Attribute::parse_inner)?; - if content.is_empty() { - return Ok(Expr::Tuple(ExprTuple { - attrs: inner_attrs, - paren_token: paren_token, - elems: Punctuated::new(), - })); - } - - let first: Expr = content.parse()?; - if content.is_empty() { - return Ok(Expr::Paren(ExprParen { - attrs: inner_attrs, - paren_token: paren_token, - expr: Box::new(first), - })); - } - - let mut elems = Punctuated::new(); - elems.push_value(first); - while !content.is_empty() { - let punct = content.parse()?; - elems.push_punct(punct); - if content.is_empty() { - break; - } - let value = content.parse()?; - elems.push_value(value); - } - Ok(Expr::Tuple(ExprTuple { - attrs: inner_attrs, - paren_token: paren_token, - elems: elems, - })) - } - - #[cfg(feature = "full")] - fn array_or_repeat(input: ParseStream) -> Result { - let content; - let bracket_token = bracketed!(content in input); - let inner_attrs = content.call(Attribute::parse_inner)?; - if content.is_empty() { - return Ok(Expr::Array(ExprArray { - attrs: inner_attrs, - bracket_token: bracket_token, - elems: Punctuated::new(), - })); - } - - let first: Expr = content.parse()?; - if content.is_empty() || content.peek(Token![,]) { - let mut elems = Punctuated::new(); - elems.push_value(first); - while !content.is_empty() { - let punct = content.parse()?; - elems.push_punct(punct); - if content.is_empty() { - break; - } - let value = content.parse()?; - elems.push_value(value); - } - Ok(Expr::Array(ExprArray { - attrs: inner_attrs, - bracket_token: bracket_token, - elems: elems, - })) - } else if content.peek(Token![;]) { - let semi_token: Token![;] = content.parse()?; - let len: Expr = content.parse()?; - Ok(Expr::Repeat(ExprRepeat { - attrs: inner_attrs, - bracket_token: bracket_token, - expr: Box::new(first), - semi_token: semi_token, - len: Box::new(len), - })) - } else { - Err(content.error("expected `,` or `;`")) - } - } - - #[cfg(feature = "full")] - fn expr_early(input: ParseStream) -> Result { - let mut attrs = input.call(Attribute::parse_outer)?; - let mut expr = if input.peek(Token![if]) { - Expr::If(input.call(expr_if)?) - } else if input.peek(Token![while]) { - Expr::While(input.call(expr_while)?) - } else if input.peek(Token![for]) { - Expr::ForLoop(input.call(expr_for_loop)?) - } else if input.peek(Token![loop]) { - Expr::Loop(input.call(expr_loop)?) - } else if input.peek(Token![match]) { - Expr::Match(input.parse()?) - } else if input.peek(Token![try]) && input.peek2(token::Brace) { - Expr::TryBlock(input.call(expr_try_block)?) - } else if input.peek(Token![unsafe]) { - Expr::Unsafe(input.call(expr_unsafe)?) - } else if input.peek(token::Brace) { - Expr::Block(input.call(expr_block)?) - } else { - let allow_struct = AllowStruct(true); - let mut expr = unary_expr(input, allow_struct)?; - - attrs.extend(expr.replace_attrs(Vec::new())); - expr.replace_attrs(attrs); - - return parse_expr(input, expr, allow_struct, Precedence::Any); - }; - - if input.peek(Token![.]) || input.peek(Token![?]) { - expr = trailer_helper(input, expr)?; - - attrs.extend(expr.replace_attrs(Vec::new())); - expr.replace_attrs(attrs); - - let allow_struct = AllowStruct(true); - return parse_expr(input, expr, allow_struct, Precedence::Any); - } - - attrs.extend(expr.replace_attrs(Vec::new())); - expr.replace_attrs(attrs); - Ok(expr) - } - - pub fn expr_lit(input: ParseStream) -> Result { - Ok(ExprLit { - attrs: Vec::new(), - lit: input.parse()?, - }) - } - - #[cfg(feature = "full")] - fn expr_group(input: ParseStream) -> Result { - let group = private::parse_group(input)?; - Ok(ExprGroup { - attrs: Vec::new(), - group_token: group.token, - expr: group.content.parse()?, - }) - } - - #[cfg(not(feature = "full"))] - fn expr_paren(input: ParseStream) -> Result { - let content; - Ok(ExprParen { - attrs: Vec::new(), - paren_token: parenthesized!(content in input), - expr: content.parse()?, - }) - } - - #[cfg(feature = "full")] - fn generic_method_argument(input: ParseStream) -> Result { - // TODO parse const generics as well - input.parse().map(GenericMethodArgument::Type) - } - - #[cfg(feature = "full")] - fn expr_let(input: ParseStream) -> Result { - Ok(ExprLet { - attrs: Vec::new(), - let_token: input.parse()?, - pats: { - let mut pats = Punctuated::new(); - input.parse::>()?; - let value: Pat = input.parse()?; - pats.push_value(value); - while input.peek(Token![|]) && !input.peek(Token![||]) && !input.peek(Token![|=]) { - let punct = input.parse()?; - pats.push_punct(punct); - let value: Pat = input.parse()?; - pats.push_value(value); - } - pats - }, - eq_token: input.parse()?, - expr: Box::new(input.call(expr_no_struct)?), - }) - } - - #[cfg(feature = "full")] - fn expr_if(input: ParseStream) -> Result { - Ok(ExprIf { - attrs: Vec::new(), - if_token: input.parse()?, - cond: Box::new(input.call(expr_no_struct)?), - then_branch: input.parse()?, - else_branch: { - if input.peek(Token![else]) { - Some(input.call(else_block)?) - } else { - None - } - }, - }) - } - - #[cfg(feature = "full")] - fn else_block(input: ParseStream) -> Result<(Token![else], Box)> { - let else_token: Token![else] = input.parse()?; - - let lookahead = input.lookahead1(); - let else_branch = if input.peek(Token![if]) { - input.call(expr_if).map(Expr::If)? - } else if input.peek(token::Brace) { - Expr::Block(ExprBlock { - attrs: Vec::new(), - label: None, - block: input.parse()?, - }) - } else { - return Err(lookahead.error()); - }; - - Ok((else_token, Box::new(else_branch))) - } - - #[cfg(feature = "full")] - fn expr_for_loop(input: ParseStream) -> Result { - let label: OptionTypeParam> - /// over the type parameters in `self.params`. - pub fn type_params(&self) -> TypeParams { - TypeParams(self.params.iter()) - } - - /// Returns an - /// Iterator<Item = &mut TypeParam> - /// over the type parameters in `self.params`. - pub fn type_params_mut(&mut self) -> TypeParamsMut { - TypeParamsMut(self.params.iter_mut()) - } - - /// Returns an - /// Iterator<Item = &LifetimeDef> - /// over the lifetime parameters in `self.params`. - pub fn lifetimes(&self) -> Lifetimes { - Lifetimes(self.params.iter()) - } - - /// Returns an - /// Iterator<Item = &mut LifetimeDef> - /// over the lifetime parameters in `self.params`. - pub fn lifetimes_mut(&mut self) -> LifetimesMut { - LifetimesMut(self.params.iter_mut()) - } - - /// Returns an - /// Iterator<Item = &ConstParam> - /// over the constant parameters in `self.params`. - pub fn const_params(&self) -> ConstParams { - ConstParams(self.params.iter()) - } - - /// Returns an - /// Iterator<Item = &mut ConstParam> - /// over the constant parameters in `self.params`. - pub fn const_params_mut(&mut self) -> ConstParamsMut { - ConstParamsMut(self.params.iter_mut()) - } - - /// Initializes an empty `where`-clause if there is not one present already. - pub fn make_where_clause(&mut self) -> &mut WhereClause { - // This is Option::get_or_insert_with in Rust 1.20. - if self.where_clause.is_none() { - self.where_clause = Some(WhereClause { - where_token: ::default(), - predicates: Punctuated::new(), - }); - } - match self.where_clause { - Some(ref mut where_clause) => where_clause, - None => unreachable!(), - } - } -} - -pub struct TypeParams<'a>(Iter<'a, GenericParam>); - -impl<'a> Iterator for TypeParams<'a> { - type Item = &'a TypeParam; - - fn next(&mut self) -> Option { - let next = match self.0.next() { - Some(item) => item, - None => return None, - }; - if let GenericParam::Type(ref type_param) = *next { - Some(type_param) - } else { - self.next() - } - } -} - -pub struct TypeParamsMut<'a>(IterMut<'a, GenericParam>); - -impl<'a> Iterator for TypeParamsMut<'a> { - type Item = &'a mut TypeParam; - - fn next(&mut self) -> Option { - let next = match self.0.next() { - Some(item) => item, - None => return None, - }; - if let GenericParam::Type(ref mut type_param) = *next { - Some(type_param) - } else { - self.next() - } - } -} - -pub struct Lifetimes<'a>(Iter<'a, GenericParam>); - -impl<'a> Iterator for Lifetimes<'a> { - type Item = &'a LifetimeDef; - - fn next(&mut self) -> Option { - let next = match self.0.next() { - Some(item) => item, - None => return None, - }; - if let GenericParam::Lifetime(ref lifetime) = *next { - Some(lifetime) - } else { - self.next() - } - } -} - -pub struct LifetimesMut<'a>(IterMut<'a, GenericParam>); - -impl<'a> Iterator for LifetimesMut<'a> { - type Item = &'a mut LifetimeDef; - - fn next(&mut self) -> Option { - let next = match self.0.next() { - Some(item) => item, - None => return None, - }; - if let GenericParam::Lifetime(ref mut lifetime) = *next { - Some(lifetime) - } else { - self.next() - } - } -} - -pub struct ConstParams<'a>(Iter<'a, GenericParam>); - -impl<'a> Iterator for ConstParams<'a> { - type Item = &'a ConstParam; - - fn next(&mut self) -> Option { - let next = match self.0.next() { - Some(item) => item, - None => return None, - }; - if let GenericParam::Const(ref const_param) = *next { - Some(const_param) - } else { - self.next() - } - } -} - -pub struct ConstParamsMut<'a>(IterMut<'a, GenericParam>); - -impl<'a> Iterator for ConstParamsMut<'a> { - type Item = &'a mut ConstParam; - - fn next(&mut self) -> Option { - let next = match self.0.next() { - Some(item) => item, - None => return None, - }; - if let GenericParam::Const(ref mut const_param) = *next { - Some(const_param) - } else { - self.next() - } - } -} - -/// Returned by `Generics::split_for_impl`. -/// -/// *This type is available if Syn is built with the `"derive"` or `"full"` -/// feature and the `"printing"` feature.* -#[cfg(feature = "printing")] -#[cfg_attr(feature = "extra-traits", derive(Debug, Eq, PartialEq, Hash))] -#[cfg_attr(feature = "clone-impls", derive(Clone))] -pub struct ImplGenerics<'a>(&'a Generics); - -/// Returned by `Generics::split_for_impl`. -/// -/// *This type is available if Syn is built with the `"derive"` or `"full"` -/// feature and the `"printing"` feature.* -#[cfg(feature = "printing")] -#[cfg_attr(feature = "extra-traits", derive(Debug, Eq, PartialEq, Hash))] -#[cfg_attr(feature = "clone-impls", derive(Clone))] -pub struct TypeGenerics<'a>(&'a Generics); - -/// Returned by `TypeGenerics::as_turbofish`. -/// -/// *This type is available if Syn is built with the `"derive"` or `"full"` -/// feature and the `"printing"` feature.* -#[cfg(feature = "printing")] -#[cfg_attr(feature = "extra-traits", derive(Debug, Eq, PartialEq, Hash))] -#[cfg_attr(feature = "clone-impls", derive(Clone))] -pub struct Turbofish<'a>(&'a Generics); - -#[cfg(feature = "printing")] -impl Generics { - /// Split a type's generics into the pieces required for impl'ing a trait - /// for that type. - /// - /// ```edition2018 - /// # use proc_macro2::{Span, Ident}; - /// # use quote::quote; - /// # - /// # fn main() { - /// # let generics: syn::Generics = Default::default(); - /// # let name = Ident::new("MyType", Span::call_site()); - /// # - /// let (impl_generics, ty_generics, where_clause) = generics.split_for_impl(); - /// quote! { - /// impl #impl_generics MyTrait for #name #ty_generics #where_clause { - /// // ... - /// } - /// } - /// # ; - /// # } - /// ``` - /// - /// *This method is available if Syn is built with the `"derive"` or - /// `"full"` feature and the `"printing"` feature.* - pub fn split_for_impl(&self) -> (ImplGenerics, TypeGenerics, Option<&WhereClause>) { - ( - ImplGenerics(self), - TypeGenerics(self), - self.where_clause.as_ref(), - ) - } -} - -#[cfg(feature = "printing")] -impl<'a> TypeGenerics<'a> { - /// Turn a type's generics like `` into a turbofish like `::`. - /// - /// *This method is available if Syn is built with the `"derive"` or - /// `"full"` feature and the `"printing"` feature.* - pub fn as_turbofish(&self) -> Turbofish { - Turbofish(self.0) - } -} - -ast_struct! { - /// A set of bound lifetimes: `for<'a, 'b, 'c>`. - /// - /// *This type is available if Syn is built with the `"derive"` or `"full"` - /// feature.* - #[derive(Default)] - pub struct BoundLifetimes { - pub for_token: Token![for], - pub lt_token: Token![<], - pub lifetimes: Punctuated, - pub gt_token: Token![>], - } -} - -impl LifetimeDef { - pub fn new(lifetime: Lifetime) -> Self { - LifetimeDef { - attrs: Vec::new(), - lifetime: lifetime, - colon_token: None, - bounds: Punctuated::new(), - } - } -} - -impl From for TypeParam { - fn from(ident: Ident) -> Self { - TypeParam { - attrs: vec![], - ident: ident, - colon_token: None, - bounds: Punctuated::new(), - eq_token: None, - default: None, - } - } -} - -ast_enum_of_structs! { - /// A trait or lifetime used as a bound on a type parameter. - /// - /// *This type is available if Syn is built with the `"derive"` or `"full"` - /// feature.* - pub enum TypeParamBound { - pub Trait(TraitBound), - pub Lifetime(Lifetime), - } -} - -ast_struct! { - /// A trait used as a bound on a type parameter. - /// - /// *This type is available if Syn is built with the `"derive"` or `"full"` - /// feature.* - pub struct TraitBound { - pub paren_token: Option, - pub modifier: TraitBoundModifier, - /// The `for<'a>` in `for<'a> Foo<&'a T>` - pub lifetimes: Option, - /// The `Foo<&'a T>` in `for<'a> Foo<&'a T>` - pub path: Path, - } -} - -ast_enum! { - /// A modifier on a trait bound, currently only used for the `?` in - /// `?Sized`. - /// - /// *This type is available if Syn is built with the `"derive"` or `"full"` - /// feature.* - #[cfg_attr(feature = "clone-impls", derive(Copy))] - pub enum TraitBoundModifier { - None, - Maybe(Token![?]), - } -} - -ast_struct! { - /// A `where` clause in a definition: `where T: Deserialize<'de>, D: - /// 'static`. - /// - /// *This type is available if Syn is built with the `"derive"` or `"full"` - /// feature.* - pub struct WhereClause { - pub where_token: Token![where], - pub predicates: Punctuated, - } -} - -ast_enum_of_structs! { - /// A single predicate in a `where` clause: `T: Deserialize<'de>`. - /// - /// *This type is available if Syn is built with the `"derive"` or `"full"` - /// feature.* - /// - /// # Syntax tree enum - /// - /// This type is a [syntax tree enum]. - /// - /// [syntax tree enum]: enum.Expr.html#syntax-tree-enums - pub enum WherePredicate { - /// A type predicate in a `where` clause: `for<'c> Foo<'c>: Trait<'c>`. - /// - /// *This type is available if Syn is built with the `"derive"` or - /// `"full"` feature.* - pub Type(PredicateType { - /// Any lifetimes from a `for` binding - pub lifetimes: Option, - /// The type being bounded - pub bounded_ty: Type, - pub colon_token: Token![:], - /// Trait and lifetime bounds (`Clone+Send+'static`) - pub bounds: Punctuated, - }), - - /// A lifetime predicate in a `where` clause: `'a: 'b + 'c`. - /// - /// *This type is available if Syn is built with the `"derive"` or - /// `"full"` feature.* - pub Lifetime(PredicateLifetime { - pub lifetime: Lifetime, - pub colon_token: Token![:], - pub bounds: Punctuated, - }), - - /// An equality predicate in a `where` clause (unsupported). - /// - /// *This type is available if Syn is built with the `"derive"` or - /// `"full"` feature.* - pub Eq(PredicateEq { - pub lhs_ty: Type, - pub eq_token: Token![=], - pub rhs_ty: Type, - }), - } -} - -#[cfg(feature = "parsing")] -pub mod parsing { - use super::*; - - use parse::{Parse, ParseStream, Result}; - - impl Parse for Generics { - fn parse(input: ParseStream) -> Result { - if !input.peek(Token![<]) { - return Ok(Generics::default()); - } - - let lt_token: Token![<] = input.parse()?; - - let mut params = Punctuated::new(); - let mut has_type_param = false; - loop { - if input.peek(Token![>]) { - break; - } - - let attrs = input.call(Attribute::parse_outer)?; - let lookahead = input.lookahead1(); - if !has_type_param && lookahead.peek(Lifetime) { - params.push_value(GenericParam::Lifetime(LifetimeDef { - attrs: attrs, - ..input.parse()? - })); - } else if lookahead.peek(Ident) { - has_type_param = true; - params.push_value(GenericParam::Type(TypeParam { - attrs: attrs, - ..input.parse()? - })); - } else { - return Err(lookahead.error()); - } - - if input.peek(Token![>]) { - break; - } - let punct = input.parse()?; - params.push_punct(punct); - } - - let gt_token: Token![>] = input.parse()?; - - Ok(Generics { - lt_token: Some(lt_token), - params: params, - gt_token: Some(gt_token), - where_clause: None, - }) - } - } - - impl Parse for GenericParam { - fn parse(input: ParseStream) -> Result { - let attrs = input.call(Attribute::parse_outer)?; - - let lookahead = input.lookahead1(); - if lookahead.peek(Ident) { - Ok(GenericParam::Type(TypeParam { - attrs: attrs, - ..input.parse()? - })) - } else if lookahead.peek(Lifetime) { - Ok(GenericParam::Lifetime(LifetimeDef { - attrs: attrs, - ..input.parse()? - })) - } else if lookahead.peek(Token![const]) { - Ok(GenericParam::Const(ConstParam { - attrs: attrs, - ..input.parse()? - })) - } else { - Err(lookahead.error()) - } - } - } - - impl Parse for LifetimeDef { - fn parse(input: ParseStream) -> Result { - let has_colon; - Ok(LifetimeDef { - attrs: input.call(Attribute::parse_outer)?, - lifetime: input.parse()?, - colon_token: { - if input.peek(Token![:]) { - has_colon = true; - Some(input.parse()?) - } else { - has_colon = false; - None - } - }, - bounds: { - let mut bounds = Punctuated::new(); - if has_colon { - loop { - if input.peek(Token![,]) || input.peek(Token![>]) { - break; - } - let value = input.parse()?; - bounds.push_value(value); - if !input.peek(Token![+]) { - break; - } - let punct = input.parse()?; - bounds.push_punct(punct); - } - } - bounds - }, - }) - } - } - - impl Parse for BoundLifetimes { - fn parse(input: ParseStream) -> Result { - Ok(BoundLifetimes { - for_token: input.parse()?, - lt_token: input.parse()?, - lifetimes: { - let mut lifetimes = Punctuated::new(); - while !input.peek(Token![>]) { - lifetimes.push_value(input.parse()?); - if input.peek(Token![>]) { - break; - } - lifetimes.push_punct(input.parse()?); - } - lifetimes - }, - gt_token: input.parse()?, - }) - } - } - - impl Parse for Option { - fn parse(input: ParseStream) -> Result { - if input.peek(Token![for]) { - input.parse().map(Some) - } else { - Ok(None) - } - } - } - - impl Parse for TypeParam { - fn parse(input: ParseStream) -> Result { - let has_colon; - let has_default; - Ok(TypeParam { - attrs: input.call(Attribute::parse_outer)?, - ident: input.parse()?, - colon_token: { - if input.peek(Token![:]) { - has_colon = true; - Some(input.parse()?) - } else { - has_colon = false; - None - } - }, - bounds: { - let mut bounds = Punctuated::new(); - if has_colon { - loop { - if input.peek(Token![,]) - || input.peek(Token![>]) - || input.peek(Token![=]) - { - break; - } - let value = input.parse()?; - bounds.push_value(value); - if !input.peek(Token![+]) { - break; - } - let punct = input.parse()?; - bounds.push_punct(punct); - } - } - bounds - }, - eq_token: { - if input.peek(Token![=]) { - has_default = true; - Some(input.parse()?) - } else { - has_default = false; - None - } - }, - default: { - if has_default { - Some(input.parse()?) - } else { - None - } - }, - }) - } - } - - impl Parse for TypeParamBound { - fn parse(input: ParseStream) -> Result { - if input.peek(Lifetime) { - return input.parse().map(TypeParamBound::Lifetime); - } - - if input.peek(token::Paren) { - let content; - let paren_token = parenthesized!(content in input); - let mut bound: TraitBound = content.parse()?; - bound.paren_token = Some(paren_token); - return Ok(TypeParamBound::Trait(bound)); - } - - input.parse().map(TypeParamBound::Trait) - } - } - - impl Parse for TraitBound { - fn parse(input: ParseStream) -> Result { - let modifier: TraitBoundModifier = input.parse()?; - let lifetimes: Option = input.parse()?; - - let mut path: Path = input.parse()?; - if path.segments.last().unwrap().value().arguments.is_empty() - && input.peek(token::Paren) - { - let parenthesized = PathArguments::Parenthesized(input.parse()?); - path.segments.last_mut().unwrap().value_mut().arguments = parenthesized; - } - - Ok(TraitBound { - paren_token: None, - modifier: modifier, - lifetimes: lifetimes, - path: path, - }) - } - } - - impl Parse for TraitBoundModifier { - fn parse(input: ParseStream) -> Result { - if input.peek(Token![?]) { - input.parse().map(TraitBoundModifier::Maybe) - } else { - Ok(TraitBoundModifier::None) - } - } - } - - impl Parse for ConstParam { - fn parse(input: ParseStream) -> Result { - let mut default = None; - Ok(ConstParam { - attrs: input.call(Attribute::parse_outer)?, - const_token: input.parse()?, - ident: input.parse()?, - colon_token: input.parse()?, - ty: input.parse()?, - eq_token: { - if input.peek(Token![=]) { - let eq_token = input.parse()?; - default = Some(input.parse::()?); - Some(eq_token) - } else { - None - } - }, - default: default, - }) - } - } - - impl Parse for WhereClause { - fn parse(input: ParseStream) -> Result { - Ok(WhereClause { - where_token: input.parse()?, - predicates: { - let mut predicates = Punctuated::new(); - loop { - if input.is_empty() - || input.peek(token::Brace) - || input.peek(Token![,]) - || input.peek(Token![;]) - || input.peek(Token![:]) && !input.peek(Token![::]) - || input.peek(Token![=]) - { - break; - } - let value = input.parse()?; - predicates.push_value(value); - if !input.peek(Token![,]) { - break; - } - let punct = input.parse()?; - predicates.push_punct(punct); - } - predicates - }, - }) - } - } - - impl Parse for Option { - fn parse(input: ParseStream) -> Result { - if input.peek(Token![where]) { - input.parse().map(Some) - } else { - Ok(None) - } - } - } - - impl Parse for WherePredicate { - fn parse(input: ParseStream) -> Result { - if input.peek(Lifetime) && input.peek2(Token![:]) { - Ok(WherePredicate::Lifetime(PredicateLifetime { - lifetime: input.parse()?, - colon_token: input.parse()?, - bounds: { - let mut bounds = Punctuated::new(); - loop { - if input.peek(token::Brace) - || input.peek(Token![,]) - || input.peek(Token![;]) - || input.peek(Token![:]) - || input.peek(Token![=]) - { - break; - } - let value = input.parse()?; - bounds.push_value(value); - if !input.peek(Token![+]) { - break; - } - let punct = input.parse()?; - bounds.push_punct(punct); - } - bounds - }, - })) - } else { - Ok(WherePredicate::Type(PredicateType { - lifetimes: input.parse()?, - bounded_ty: input.parse()?, - colon_token: input.parse()?, - bounds: { - let mut bounds = Punctuated::new(); - loop { - if input.peek(token::Brace) - || input.peek(Token![,]) - || input.peek(Token![;]) - || input.peek(Token![:]) && !input.peek(Token![::]) - || input.peek(Token![=]) - { - break; - } - let value = input.parse()?; - bounds.push_value(value); - if !input.peek(Token![+]) { - break; - } - let punct = input.parse()?; - bounds.push_punct(punct); - } - bounds - }, - })) - } - } - } -} - -#[cfg(feature = "printing")] -mod printing { - use super::*; - - use proc_macro2::TokenStream; - use quote::{ToTokens, TokenStreamExt}; - - use attr::FilterAttrs; - use print::TokensOrDefault; - - impl ToTokens for Generics { - fn to_tokens(&self, tokens: &mut TokenStream) { - if self.params.is_empty() { - return; - } - - TokensOrDefault(&self.lt_token).to_tokens(tokens); - - // Print lifetimes before types and consts, regardless of their - // order in self.params. - // - // TODO: ordering rules for const parameters vs type parameters have - // not been settled yet. https://github.com/rust-lang/rust/issues/44580 - let mut trailing_or_empty = true; - for param in self.params.pairs() { - if let GenericParam::Lifetime(_) = **param.value() { - param.to_tokens(tokens); - trailing_or_empty = param.punct().is_some(); - } - } - for param in self.params.pairs() { - match **param.value() { - GenericParam::Type(_) | GenericParam::Const(_) => { - if !trailing_or_empty { - ::default().to_tokens(tokens); - trailing_or_empty = true; - } - param.to_tokens(tokens); - } - GenericParam::Lifetime(_) => {} - } - } - - TokensOrDefault(&self.gt_token).to_tokens(tokens); - } - } - - impl<'a> ToTokens for ImplGenerics<'a> { - fn to_tokens(&self, tokens: &mut TokenStream) { - if self.0.params.is_empty() { - return; - } - - TokensOrDefault(&self.0.lt_token).to_tokens(tokens); - - // Print lifetimes before types and consts, regardless of their - // order in self.params. - // - // TODO: ordering rules for const parameters vs type parameters have - // not been settled yet. https://github.com/rust-lang/rust/issues/44580 - let mut trailing_or_empty = true; - for param in self.0.params.pairs() { - if let GenericParam::Lifetime(_) = **param.value() { - param.to_tokens(tokens); - trailing_or_empty = param.punct().is_some(); - } - } - for param in self.0.params.pairs() { - if let GenericParam::Lifetime(_) = **param.value() { - continue; - } - if !trailing_or_empty { - ::default().to_tokens(tokens); - trailing_or_empty = true; - } - match **param.value() { - GenericParam::Lifetime(_) => unreachable!(), - GenericParam::Type(ref param) => { - // Leave off the type parameter defaults - tokens.append_all(param.attrs.outer()); - param.ident.to_tokens(tokens); - if !param.bounds.is_empty() { - TokensOrDefault(¶m.colon_token).to_tokens(tokens); - param.bounds.to_tokens(tokens); - } - } - GenericParam::Const(ref param) => { - // Leave off the const parameter defaults - tokens.append_all(param.attrs.outer()); - param.const_token.to_tokens(tokens); - param.ident.to_tokens(tokens); - param.colon_token.to_tokens(tokens); - param.ty.to_tokens(tokens); - } - } - param.punct().to_tokens(tokens); - } - - TokensOrDefault(&self.0.gt_token).to_tokens(tokens); - } - } - - impl<'a> ToTokens for TypeGenerics<'a> { - fn to_tokens(&self, tokens: &mut TokenStream) { - if self.0.params.is_empty() { - return; - } - - TokensOrDefault(&self.0.lt_token).to_tokens(tokens); - - // Print lifetimes before types and consts, regardless of their - // order in self.params. - // - // TODO: ordering rules for const parameters vs type parameters have - // not been settled yet. https://github.com/rust-lang/rust/issues/44580 - let mut trailing_or_empty = true; - for param in self.0.params.pairs() { - if let GenericParam::Lifetime(ref def) = **param.value() { - // Leave off the lifetime bounds and attributes - def.lifetime.to_tokens(tokens); - param.punct().to_tokens(tokens); - trailing_or_empty = param.punct().is_some(); - } - } - for param in self.0.params.pairs() { - if let GenericParam::Lifetime(_) = **param.value() { - continue; - } - if !trailing_or_empty { - ::default().to_tokens(tokens); - trailing_or_empty = true; - } - match **param.value() { - GenericParam::Lifetime(_) => unreachable!(), - GenericParam::Type(ref param) => { - // Leave off the type parameter defaults - param.ident.to_tokens(tokens); - } - GenericParam::Const(ref param) => { - // Leave off the const parameter defaults - param.ident.to_tokens(tokens); - } - } - param.punct().to_tokens(tokens); - } - - TokensOrDefault(&self.0.gt_token).to_tokens(tokens); - } - } - - impl<'a> ToTokens for Turbofish<'a> { - fn to_tokens(&self, tokens: &mut TokenStream) { - if !self.0.params.is_empty() { - ::default().to_tokens(tokens); - TypeGenerics(self.0).to_tokens(tokens); - } - } - } - - impl ToTokens for BoundLifetimes { - fn to_tokens(&self, tokens: &mut TokenStream) { - self.for_token.to_tokens(tokens); - self.lt_token.to_tokens(tokens); - self.lifetimes.to_tokens(tokens); - self.gt_token.to_tokens(tokens); - } - } - - impl ToTokens for LifetimeDef { - fn to_tokens(&self, tokens: &mut TokenStream) { - tokens.append_all(self.attrs.outer()); - self.lifetime.to_tokens(tokens); - if !self.bounds.is_empty() { - TokensOrDefault(&self.colon_token).to_tokens(tokens); - self.bounds.to_tokens(tokens); - } - } - } - - impl ToTokens for TypeParam { - fn to_tokens(&self, tokens: &mut TokenStream) { - tokens.append_all(self.attrs.outer()); - self.ident.to_tokens(tokens); - if !self.bounds.is_empty() { - TokensOrDefault(&self.colon_token).to_tokens(tokens); - self.bounds.to_tokens(tokens); - } - if self.default.is_some() { - TokensOrDefault(&self.eq_token).to_tokens(tokens); - self.default.to_tokens(tokens); - } - } - } - - impl ToTokens for TraitBound { - fn to_tokens(&self, tokens: &mut TokenStream) { - let to_tokens = |tokens: &mut TokenStream| { - self.modifier.to_tokens(tokens); - self.lifetimes.to_tokens(tokens); - self.path.to_tokens(tokens); - }; - match self.paren_token { - Some(ref paren) => paren.surround(tokens, to_tokens), - None => to_tokens(tokens), - } - } - } - - impl ToTokens for TraitBoundModifier { - fn to_tokens(&self, tokens: &mut TokenStream) { - match *self { - TraitBoundModifier::None => {} - TraitBoundModifier::Maybe(ref t) => t.to_tokens(tokens), - } - } - } - - impl ToTokens for ConstParam { - fn to_tokens(&self, tokens: &mut TokenStream) { - tokens.append_all(self.attrs.outer()); - self.const_token.to_tokens(tokens); - self.ident.to_tokens(tokens); - self.colon_token.to_tokens(tokens); - self.ty.to_tokens(tokens); - if self.default.is_some() { - TokensOrDefault(&self.eq_token).to_tokens(tokens); - self.default.to_tokens(tokens); - } - } - } - - impl ToTokens for WhereClause { - fn to_tokens(&self, tokens: &mut TokenStream) { - if !self.predicates.is_empty() { - self.where_token.to_tokens(tokens); - self.predicates.to_tokens(tokens); - } - } - } - - impl ToTokens for PredicateType { - fn to_tokens(&self, tokens: &mut TokenStream) { - self.lifetimes.to_tokens(tokens); - self.bounded_ty.to_tokens(tokens); - self.colon_token.to_tokens(tokens); - self.bounds.to_tokens(tokens); - } - } - - impl ToTokens for PredicateLifetime { - fn to_tokens(&self, tokens: &mut TokenStream) { - self.lifetime.to_tokens(tokens); - self.colon_token.to_tokens(tokens); - self.bounds.to_tokens(tokens); - } - } - - impl ToTokens for PredicateEq { - fn to_tokens(&self, tokens: &mut TokenStream) { - self.lhs_ty.to_tokens(tokens); - self.eq_token.to_tokens(tokens); - self.rhs_ty.to_tokens(tokens); - } - } -} diff --git a/third_party/rust/syn-0.15.30/src/group.rs b/third_party/rust/syn-0.15.30/src/group.rs deleted file mode 100644 index 272e435f24..0000000000 --- a/third_party/rust/syn-0.15.30/src/group.rs +++ /dev/null @@ -1,283 +0,0 @@ -use proc_macro2::{Delimiter, Span}; - -use error::Result; -use parse::{ParseBuffer, ParseStream}; -use private; -use token; - -// Not public API. -#[doc(hidden)] -pub struct Parens<'a> { - pub token: token::Paren, - pub content: ParseBuffer<'a>, -} - -// Not public API. -#[doc(hidden)] -pub struct Braces<'a> { - pub token: token::Brace, - pub content: ParseBuffer<'a>, -} - -// Not public API. -#[doc(hidden)] -pub struct Brackets<'a> { - pub token: token::Bracket, - pub content: ParseBuffer<'a>, -} - -// Not public API. -#[cfg(any(feature = "full", feature = "derive"))] -#[doc(hidden)] -pub struct Group<'a> { - pub token: token::Group, - pub content: ParseBuffer<'a>, -} - -// Not public API. -#[doc(hidden)] -pub fn parse_parens(input: ParseStream) -> Result { - parse_delimited(input, Delimiter::Parenthesis).map(|(span, content)| Parens { - token: token::Paren(span), - content: content, - }) -} - -// Not public API. -#[doc(hidden)] -pub fn parse_braces(input: ParseStream) -> Result { - parse_delimited(input, Delimiter::Brace).map(|(span, content)| Braces { - token: token::Brace(span), - content: content, - }) -} - -// Not public API. -#[doc(hidden)] -pub fn parse_brackets(input: ParseStream) -> Result { - parse_delimited(input, Delimiter::Bracket).map(|(span, content)| Brackets { - token: token::Bracket(span), - content: content, - }) -} - -#[cfg(any(feature = "full", feature = "derive"))] -impl private { - pub fn parse_group(input: ParseStream) -> Result { - parse_delimited(input, Delimiter::None).map(|(span, content)| Group { - token: token::Group(span), - content: content, - }) - } -} - -fn parse_delimited(input: ParseStream, delimiter: Delimiter) -> Result<(Span, ParseBuffer)> { - input.step(|cursor| { - if let Some((content, span, rest)) = cursor.group(delimiter) { - #[cfg(procmacro2_semver_exempt)] - let scope = private::close_span_of_group(*cursor); - #[cfg(not(procmacro2_semver_exempt))] - let scope = span; - let nested = private::advance_step_cursor(cursor, content); - let unexpected = private::get_unexpected(input); - let content = private::new_parse_buffer(scope, nested, unexpected); - Ok(((span, content), rest)) - } else { - let message = match delimiter { - Delimiter::Parenthesis => "expected parentheses", - Delimiter::Brace => "expected curly braces", - Delimiter::Bracket => "expected square brackets", - Delimiter::None => "expected invisible group", - }; - Err(cursor.error(message)) - } - }) -} - -/// Parse a set of parentheses and expose their content to subsequent parsers. -/// -/// # Example -/// -/// ```edition2018 -/// # use quote::quote; -/// # -/// use syn::{parenthesized, token, Ident, Result, Token, Type}; -/// use syn::parse::{Parse, ParseStream}; -/// use syn::punctuated::Punctuated; -/// -/// // Parse a simplified tuple struct syntax like: -/// // -/// // struct S(A, B); -/// struct TupleStruct { -/// struct_token: Token![struct], -/// ident: Ident, -/// paren_token: token::Paren, -/// fields: Punctuated, -/// semi_token: Token![;], -/// } -/// -/// impl Parse for TupleStruct { -/// fn parse(input: ParseStream) -> Result { -/// let content; -/// Ok(TupleStruct { -/// struct_token: input.parse()?, -/// ident: input.parse()?, -/// paren_token: parenthesized!(content in input), -/// fields: content.parse_terminated(Type::parse)?, -/// semi_token: input.parse()?, -/// }) -/// } -/// } -/// # -/// # fn main() { -/// # let input = quote! { -/// # struct S(A, B); -/// # }; -/// # syn::parse2::(input).unwrap(); -/// # } -/// ``` -#[macro_export] -macro_rules! parenthesized { - ($content:ident in $cursor:expr) => { - match $crate::group::parse_parens(&$cursor) { - $crate::export::Ok(parens) => { - $content = parens.content; - parens.token - } - $crate::export::Err(error) => { - return $crate::export::Err(error); - } - } - }; -} - -/// Parse a set of curly braces and expose their content to subsequent parsers. -/// -/// # Example -/// -/// ```edition2018 -/// # use quote::quote; -/// # -/// use syn::{braced, token, Ident, Result, Token, Type}; -/// use syn::parse::{Parse, ParseStream}; -/// use syn::punctuated::Punctuated; -/// -/// // Parse a simplified struct syntax like: -/// // -/// // struct S { -/// // a: A, -/// // b: B, -/// // } -/// struct Struct { -/// struct_token: Token![struct], -/// ident: Ident, -/// brace_token: token::Brace, -/// fields: Punctuated, -/// } -/// -/// struct Field { -/// name: Ident, -/// colon_token: Token![:], -/// ty: Type, -/// } -/// -/// impl Parse for Struct { -/// fn parse(input: ParseStream) -> Result { -/// let content; -/// Ok(Struct { -/// struct_token: input.parse()?, -/// ident: input.parse()?, -/// brace_token: braced!(content in input), -/// fields: content.parse_terminated(Field::parse)?, -/// }) -/// } -/// } -/// -/// impl Parse for Field { -/// fn parse(input: ParseStream) -> Result { -/// Ok(Field { -/// name: input.parse()?, -/// colon_token: input.parse()?, -/// ty: input.parse()?, -/// }) -/// } -/// } -/// # -/// # fn main() { -/// # let input = quote! { -/// # struct S { -/// # a: A, -/// # b: B, -/// # } -/// # }; -/// # syn::parse2::(input).unwrap(); -/// # } -/// ``` -#[macro_export] -macro_rules! braced { - ($content:ident in $cursor:expr) => { - match $crate::group::parse_braces(&$cursor) { - $crate::export::Ok(braces) => { - $content = braces.content; - braces.token - } - $crate::export::Err(error) => { - return $crate::export::Err(error); - } - } - }; -} - -/// Parse a set of square brackets and expose their content to subsequent -/// parsers. -/// -/// # Example -/// -/// ```edition2018 -/// # use quote::quote; -/// # -/// use proc_macro2::TokenStream; -/// use syn::{bracketed, token, Result, Token}; -/// use syn::parse::{Parse, ParseStream}; -/// -/// // Parse an outer attribute like: -/// // -/// // #[repr(C, packed)] -/// struct OuterAttribute { -/// pound_token: Token![#], -/// bracket_token: token::Bracket, -/// content: TokenStream, -/// } -/// -/// impl Parse for OuterAttribute { -/// fn parse(input: ParseStream) -> Result { -/// let content; -/// Ok(OuterAttribute { -/// pound_token: input.parse()?, -/// bracket_token: bracketed!(content in input), -/// content: content.parse()?, -/// }) -/// } -/// } -/// # -/// # fn main() { -/// # let input = quote! { -/// # #[repr(C, packed)] -/// # }; -/// # syn::parse2::(input).unwrap(); -/// # } -/// ``` -#[macro_export] -macro_rules! bracketed { - ($content:ident in $cursor:expr) => { - match $crate::group::parse_brackets(&$cursor) { - $crate::export::Ok(brackets) => { - $content = brackets.content; - brackets.token - } - $crate::export::Err(error) => { - return $crate::export::Err(error); - } - } - }; -} diff --git a/third_party/rust/syn-0.15.30/src/ident.rs b/third_party/rust/syn-0.15.30/src/ident.rs deleted file mode 100644 index f5a76a1de8..0000000000 --- a/third_party/rust/syn-0.15.30/src/ident.rs +++ /dev/null @@ -1,86 +0,0 @@ -#[cfg(feature = "parsing")] -use buffer::Cursor; -#[cfg(feature = "parsing")] -use lookahead; -#[cfg(feature = "parsing")] -use parse::{Parse, ParseStream, Result}; -#[cfg(feature = "parsing")] -use token::Token; - -pub use proc_macro2::Ident; - -#[cfg(feature = "parsing")] -#[doc(hidden)] -#[allow(non_snake_case)] -pub fn Ident(marker: lookahead::TokenMarker) -> Ident { - match marker {} -} - -#[cfg(feature = "parsing")] -fn accept_as_ident(ident: &Ident) -> bool { - match ident.to_string().as_str() { - "_" - // Based on https://doc.rust-lang.org/grammar.html#keywords - // and https://github.com/rust-lang/rfcs/blob/master/text/2421-unreservations-2018.md - // and https://github.com/rust-lang/rfcs/blob/master/text/2420-unreserve-proc.md - | "abstract" | "as" | "become" | "box" | "break" | "const" - | "continue" | "crate" | "do" | "else" | "enum" | "extern" | "false" | "final" - | "fn" | "for" | "if" | "impl" | "in" | "let" | "loop" | "macro" | "match" - | "mod" | "move" | "mut" | "override" | "priv" | "pub" - | "ref" | "return" | "Self" | "self" | "static" | "struct" - | "super" | "trait" | "true" | "type" | "typeof" | "unsafe" | "unsized" | "use" - | "virtual" | "where" | "while" | "yield" => false, - _ => true, - } -} - -#[cfg(feature = "parsing")] -impl Parse for Ident { - fn parse(input: ParseStream) -> Result { - input.step(|cursor| { - if let Some((ident, rest)) = cursor.ident() { - if accept_as_ident(&ident) { - return Ok((ident, rest)); - } - } - Err(cursor.error("expected identifier")) - }) - } -} - -#[cfg(feature = "parsing")] -impl Token for Ident { - fn peek(cursor: Cursor) -> bool { - if let Some((ident, _rest)) = cursor.ident() { - accept_as_ident(&ident) - } else { - false - } - } - - fn display() -> &'static str { - "identifier" - } -} - -macro_rules! ident_from_token { - ($token:ident) => { - impl From for Ident { - fn from(token: Token![$token]) -> Ident { - Ident::new(stringify!($token), token.span) - } - } - }; -} - -ident_from_token!(self); -ident_from_token!(Self); -ident_from_token!(super); -ident_from_token!(crate); -ident_from_token!(extern); - -impl From for Ident { - fn from(token: Token![_]) -> Ident { - Ident::new("_", token.span) - } -} diff --git a/third_party/rust/syn-0.15.30/src/item.rs b/third_party/rust/syn-0.15.30/src/item.rs deleted file mode 100644 index 858e8ecc91..0000000000 --- a/third_party/rust/syn-0.15.30/src/item.rs +++ /dev/null @@ -1,2675 +0,0 @@ -use super::*; -use derive::{Data, DeriveInput}; -use proc_macro2::TokenStream; -use punctuated::Punctuated; -use token::{Brace, Paren}; - -#[cfg(feature = "extra-traits")] -use std::hash::{Hash, Hasher}; -#[cfg(feature = "extra-traits")] -use tt::TokenStreamHelper; - -ast_enum_of_structs! { - /// Things that can appear directly inside of a module or scope. - /// - /// *This type is available if Syn is built with the `"full"` feature.* - /// - /// # Syntax tree enum - /// - /// This type is a [syntax tree enum]. - /// - /// [syntax tree enum]: enum.Expr.html#syntax-tree-enums - pub enum Item { - /// An `extern crate` item: `extern crate serde`. - /// - /// *This type is available if Syn is built with the `"full"` feature.* - pub ExternCrate(ItemExternCrate { - pub attrs: Vec, - pub vis: Visibility, - pub extern_token: Token![extern], - pub crate_token: Token![crate], - pub ident: Ident, - pub rename: Option<(Token![as], Ident)>, - pub semi_token: Token![;], - }), - - /// A use declaration: `use std::collections::HashMap`. - /// - /// *This type is available if Syn is built with the `"full"` feature.* - pub Use(ItemUse { - pub attrs: Vec, - pub vis: Visibility, - pub use_token: Token![use], - pub leading_colon: Option, - pub tree: UseTree, - pub semi_token: Token![;], - }), - - /// A static item: `static BIKE: Shed = Shed(42)`. - /// - /// *This type is available if Syn is built with the `"full"` feature.* - pub Static(ItemStatic { - pub attrs: Vec, - pub vis: Visibility, - pub static_token: Token![static], - pub mutability: Option, - pub ident: Ident, - pub colon_token: Token![:], - pub ty: Box, - pub eq_token: Token![=], - pub expr: Box, - pub semi_token: Token![;], - }), - - /// A constant item: `const MAX: u16 = 65535`. - /// - /// *This type is available if Syn is built with the `"full"` feature.* - pub Const(ItemConst { - pub attrs: Vec, - pub vis: Visibility, - pub const_token: Token![const], - pub ident: Ident, - pub colon_token: Token![:], - pub ty: Box, - pub eq_token: Token![=], - pub expr: Box, - pub semi_token: Token![;], - }), - - /// A free-standing function: `fn process(n: usize) -> Result<()> { ... - /// }`. - /// - /// *This type is available if Syn is built with the `"full"` feature.* - pub Fn(ItemFn { - pub attrs: Vec, - pub vis: Visibility, - pub constness: Option, - pub unsafety: Option, - pub asyncness: Option, - pub abi: Option, - pub ident: Ident, - pub decl: Box, - pub block: Box, - }), - - /// A module or module declaration: `mod m` or `mod m { ... }`. - /// - /// *This type is available if Syn is built with the `"full"` feature.* - pub Mod(ItemMod { - pub attrs: Vec, - pub vis: Visibility, - pub mod_token: Token![mod], - pub ident: Ident, - pub content: Option<(token::Brace, Vec)>, - pub semi: Option, - }), - - /// A block of foreign items: `extern "C" { ... }`. - /// - /// *This type is available if Syn is built with the `"full"` feature.* - pub ForeignMod(ItemForeignMod { - pub attrs: Vec, - pub abi: Abi, - pub brace_token: token::Brace, - pub items: Vec, - }), - - /// A type alias: `type Result = std::result::Result`. - /// - /// *This type is available if Syn is built with the `"full"` feature.* - pub Type(ItemType { - pub attrs: Vec, - pub vis: Visibility, - pub type_token: Token![type], - pub ident: Ident, - pub generics: Generics, - pub eq_token: Token![=], - pub ty: Box, - pub semi_token: Token![;], - }), - - /// An existential type: `existential type Iter: Iterator`. - /// - /// *This type is available if Syn is built with the `"full"` feature.* - pub Existential(ItemExistential { - pub attrs: Vec, - pub vis: Visibility, - pub existential_token: Token![existential], - pub type_token: Token![type], - pub ident: Ident, - pub generics: Generics, - pub colon_token: Option, - pub bounds: Punctuated, - pub semi_token: Token![;], - }), - - /// A struct definition: `struct Foo { x: A }`. - /// - /// *This type is available if Syn is built with the `"full"` feature.* - pub Struct(ItemStruct { - pub attrs: Vec, - pub vis: Visibility, - pub struct_token: Token![struct], - pub ident: Ident, - pub generics: Generics, - pub fields: Fields, - pub semi_token: Option, - }), - - /// An enum definition: `enum Foo { C, D }`. - /// - /// *This type is available if Syn is built with the `"full"` feature.* - pub Enum(ItemEnum { - pub attrs: Vec, - pub vis: Visibility, - pub enum_token: Token![enum], - pub ident: Ident, - pub generics: Generics, - pub brace_token: token::Brace, - pub variants: Punctuated, - }), - - /// A union definition: `union Foo { x: A, y: B }`. - /// - /// *This type is available if Syn is built with the `"full"` feature.* - pub Union(ItemUnion { - pub attrs: Vec, - pub vis: Visibility, - pub union_token: Token![union], - pub ident: Ident, - pub generics: Generics, - pub fields: FieldsNamed, - }), - - /// A trait definition: `pub trait Iterator { ... }`. - /// - /// *This type is available if Syn is built with the `"full"` feature.* - pub Trait(ItemTrait { - pub attrs: Vec, - pub vis: Visibility, - pub unsafety: Option, - pub auto_token: Option, - pub trait_token: Token![trait], - pub ident: Ident, - pub generics: Generics, - pub colon_token: Option, - pub supertraits: Punctuated, - pub brace_token: token::Brace, - pub items: Vec, - }), - - /// A trait alias: `pub trait SharableIterator = Iterator + Sync`. - /// - /// *This type is available if Syn is built with the `"full"` feature.* - pub TraitAlias(ItemTraitAlias { - pub attrs: Vec, - pub vis: Visibility, - pub trait_token: Token![trait], - pub ident: Ident, - pub generics: Generics, - pub eq_token: Token![=], - pub bounds: Punctuated, - pub semi_token: Token![;], - }), - - /// An impl block providing trait or associated items: `impl Trait - /// for Data { ... }`. - /// - /// *This type is available if Syn is built with the `"full"` feature.* - pub Impl(ItemImpl { - pub attrs: Vec, - pub defaultness: Option, - pub unsafety: Option, - pub impl_token: Token![impl], - pub generics: Generics, - /// Trait this impl implements. - pub trait_: Option<(Option, Path, Token![for])>, - /// The Self type of the impl. - pub self_ty: Box, - pub brace_token: token::Brace, - pub items: Vec, - }), - - /// A macro invocation, which includes `macro_rules!` definitions. - /// - /// *This type is available if Syn is built with the `"full"` feature.* - pub Macro(ItemMacro { - pub attrs: Vec, - /// The `example` in `macro_rules! example { ... }`. - pub ident: Option, - pub mac: Macro, - pub semi_token: Option, - }), - - /// A 2.0-style declarative macro introduced by the `macro` keyword. - /// - /// *This type is available if Syn is built with the `"full"` feature.* - pub Macro2(ItemMacro2 #manual_extra_traits { - pub attrs: Vec, - pub vis: Visibility, - pub macro_token: Token![macro], - pub ident: Ident, - pub paren_token: Paren, - pub args: TokenStream, - pub brace_token: Brace, - pub body: TokenStream, - }), - - /// Tokens forming an item not interpreted by Syn. - /// - /// *This type is available if Syn is built with the `"full"` feature.* - pub Verbatim(ItemVerbatim #manual_extra_traits { - pub tts: TokenStream, - }), - } -} - -#[cfg(feature = "extra-traits")] -impl Eq for ItemMacro2 {} - -#[cfg(feature = "extra-traits")] -impl PartialEq for ItemMacro2 { - fn eq(&self, other: &Self) -> bool { - self.attrs == other.attrs - && self.vis == other.vis - && self.macro_token == other.macro_token - && self.ident == other.ident - && self.paren_token == other.paren_token - && TokenStreamHelper(&self.args) == TokenStreamHelper(&other.args) - && self.brace_token == other.brace_token - && TokenStreamHelper(&self.body) == TokenStreamHelper(&other.body) - } -} - -#[cfg(feature = "extra-traits")] -impl Hash for ItemMacro2 { - fn hash(&self, state: &mut H) - where - H: Hasher, - { - self.attrs.hash(state); - self.vis.hash(state); - self.macro_token.hash(state); - self.ident.hash(state); - self.paren_token.hash(state); - TokenStreamHelper(&self.args).hash(state); - self.brace_token.hash(state); - TokenStreamHelper(&self.body).hash(state); - } -} - -#[cfg(feature = "extra-traits")] -impl Eq for ItemVerbatim {} - -#[cfg(feature = "extra-traits")] -impl PartialEq for ItemVerbatim { - fn eq(&self, other: &Self) -> bool { - TokenStreamHelper(&self.tts) == TokenStreamHelper(&other.tts) - } -} - -#[cfg(feature = "extra-traits")] -impl Hash for ItemVerbatim { - fn hash(&self, state: &mut H) - where - H: Hasher, - { - TokenStreamHelper(&self.tts).hash(state); - } -} - -impl From for Item { - fn from(input: DeriveInput) -> Item { - match input.data { - Data::Struct(data) => Item::Struct(ItemStruct { - attrs: input.attrs, - vis: input.vis, - struct_token: data.struct_token, - ident: input.ident, - generics: input.generics, - fields: data.fields, - semi_token: data.semi_token, - }), - Data::Enum(data) => Item::Enum(ItemEnum { - attrs: input.attrs, - vis: input.vis, - enum_token: data.enum_token, - ident: input.ident, - generics: input.generics, - brace_token: data.brace_token, - variants: data.variants, - }), - Data::Union(data) => Item::Union(ItemUnion { - attrs: input.attrs, - vis: input.vis, - union_token: data.union_token, - ident: input.ident, - generics: input.generics, - fields: data.fields, - }), - } - } -} - -ast_enum_of_structs! { - /// A suffix of an import tree in a `use` item: `Type as Renamed` or `*`. - /// - /// *This type is available if Syn is built with the `"full"` feature.* - /// - /// # Syntax tree enum - /// - /// This type is a [syntax tree enum]. - /// - /// [syntax tree enum]: enum.Expr.html#syntax-tree-enums - pub enum UseTree { - /// A path prefix of imports in a `use` item: `std::...`. - /// - /// *This type is available if Syn is built with the `"full"` feature.* - pub Path(UsePath { - pub ident: Ident, - pub colon2_token: Token![::], - pub tree: Box, - }), - - /// An identifier imported by a `use` item: `HashMap`. - /// - /// *This type is available if Syn is built with the `"full"` feature.* - pub Name(UseName { - pub ident: Ident, - }), - - /// An renamed identifier imported by a `use` item: `HashMap as Map`. - /// - /// *This type is available if Syn is built with the `"full"` feature.* - pub Rename(UseRename { - pub ident: Ident, - pub as_token: Token![as], - pub rename: Ident, - }), - - /// A glob import in a `use` item: `*`. - /// - /// *This type is available if Syn is built with the `"full"` feature.* - pub Glob(UseGlob { - pub star_token: Token![*], - }), - - /// A braced group of imports in a `use` item: `{A, B, C}`. - /// - /// *This type is available if Syn is built with the `"full"` feature.* - pub Group(UseGroup { - pub brace_token: token::Brace, - pub items: Punctuated, - }), - } -} - -ast_enum_of_structs! { - /// An item within an `extern` block. - /// - /// *This type is available if Syn is built with the `"full"` feature.* - /// - /// # Syntax tree enum - /// - /// This type is a [syntax tree enum]. - /// - /// [syntax tree enum]: enum.Expr.html#syntax-tree-enums - pub enum ForeignItem { - /// A foreign function in an `extern` block. - /// - /// *This type is available if Syn is built with the `"full"` feature.* - pub Fn(ForeignItemFn { - pub attrs: Vec, - pub vis: Visibility, - pub ident: Ident, - pub decl: Box, - pub semi_token: Token![;], - }), - - /// A foreign static item in an `extern` block: `static ext: u8`. - /// - /// *This type is available if Syn is built with the `"full"` feature.* - pub Static(ForeignItemStatic { - pub attrs: Vec, - pub vis: Visibility, - pub static_token: Token![static], - pub mutability: Option, - pub ident: Ident, - pub colon_token: Token![:], - pub ty: Box, - pub semi_token: Token![;], - }), - - /// A foreign type in an `extern` block: `type void`. - /// - /// *This type is available if Syn is built with the `"full"` feature.* - pub Type(ForeignItemType { - pub attrs: Vec, - pub vis: Visibility, - pub type_token: Token![type], - pub ident: Ident, - pub semi_token: Token![;], - }), - - /// A macro invocation within an extern block. - /// - /// *This type is available if Syn is built with the `"full"` feature.* - pub Macro(ForeignItemMacro { - pub attrs: Vec, - pub mac: Macro, - pub semi_token: Option, - }), - - /// Tokens in an `extern` block not interpreted by Syn. - /// - /// *This type is available if Syn is built with the `"full"` feature.* - pub Verbatim(ForeignItemVerbatim #manual_extra_traits { - pub tts: TokenStream, - }), - } -} - -#[cfg(feature = "extra-traits")] -impl Eq for ForeignItemVerbatim {} - -#[cfg(feature = "extra-traits")] -impl PartialEq for ForeignItemVerbatim { - fn eq(&self, other: &Self) -> bool { - TokenStreamHelper(&self.tts) == TokenStreamHelper(&other.tts) - } -} - -#[cfg(feature = "extra-traits")] -impl Hash for ForeignItemVerbatim { - fn hash(&self, state: &mut H) - where - H: Hasher, - { - TokenStreamHelper(&self.tts).hash(state); - } -} - -ast_enum_of_structs! { - /// An item declaration within the definition of a trait. - /// - /// *This type is available if Syn is built with the `"full"` feature.* - /// - /// # Syntax tree enum - /// - /// This type is a [syntax tree enum]. - /// - /// [syntax tree enum]: enum.Expr.html#syntax-tree-enums - pub enum TraitItem { - /// An associated constant within the definition of a trait. - /// - /// *This type is available if Syn is built with the `"full"` feature.* - pub Const(TraitItemConst { - pub attrs: Vec, - pub const_token: Token![const], - pub ident: Ident, - pub colon_token: Token![:], - pub ty: Type, - pub default: Option<(Token![=], Expr)>, - pub semi_token: Token![;], - }), - - /// A trait method within the definition of a trait. - /// - /// *This type is available if Syn is built with the `"full"` feature.* - pub Method(TraitItemMethod { - pub attrs: Vec, - pub sig: MethodSig, - pub default: Option, - pub semi_token: Option, - }), - - /// An associated type within the definition of a trait. - /// - /// *This type is available if Syn is built with the `"full"` feature.* - pub Type(TraitItemType { - pub attrs: Vec, - pub type_token: Token![type], - pub ident: Ident, - pub generics: Generics, - pub colon_token: Option, - pub bounds: Punctuated, - pub default: Option<(Token![=], Type)>, - pub semi_token: Token![;], - }), - - /// A macro invocation within the definition of a trait. - /// - /// *This type is available if Syn is built with the `"full"` feature.* - pub Macro(TraitItemMacro { - pub attrs: Vec, - pub mac: Macro, - pub semi_token: Option, - }), - - /// Tokens within the definition of a trait not interpreted by Syn. - /// - /// *This type is available if Syn is built with the `"full"` feature.* - pub Verbatim(TraitItemVerbatim #manual_extra_traits { - pub tts: TokenStream, - }), - } -} - -#[cfg(feature = "extra-traits")] -impl Eq for TraitItemVerbatim {} - -#[cfg(feature = "extra-traits")] -impl PartialEq for TraitItemVerbatim { - fn eq(&self, other: &Self) -> bool { - TokenStreamHelper(&self.tts) == TokenStreamHelper(&other.tts) - } -} - -#[cfg(feature = "extra-traits")] -impl Hash for TraitItemVerbatim { - fn hash(&self, state: &mut H) - where - H: Hasher, - { - TokenStreamHelper(&self.tts).hash(state); - } -} - -ast_enum_of_structs! { - /// An item within an impl block. - /// - /// *This type is available if Syn is built with the `"full"` feature.* - /// - /// # Syntax tree enum - /// - /// This type is a [syntax tree enum]. - /// - /// [syntax tree enum]: enum.Expr.html#syntax-tree-enums - pub enum ImplItem { - /// An associated constant within an impl block. - /// - /// *This type is available if Syn is built with the `"full"` feature.* - pub Const(ImplItemConst { - pub attrs: Vec, - pub vis: Visibility, - pub defaultness: Option, - pub const_token: Token![const], - pub ident: Ident, - pub colon_token: Token![:], - pub ty: Type, - pub eq_token: Token![=], - pub expr: Expr, - pub semi_token: Token![;], - }), - - /// A method within an impl block. - /// - /// *This type is available if Syn is built with the `"full"` feature.* - pub Method(ImplItemMethod { - pub attrs: Vec, - pub vis: Visibility, - pub defaultness: Option, - pub sig: MethodSig, - pub block: Block, - }), - - /// An associated type within an impl block. - /// - /// *This type is available if Syn is built with the `"full"` feature.* - pub Type(ImplItemType { - pub attrs: Vec, - pub vis: Visibility, - pub defaultness: Option, - pub type_token: Token![type], - pub ident: Ident, - pub generics: Generics, - pub eq_token: Token![=], - pub ty: Type, - pub semi_token: Token![;], - }), - - /// An existential type within an impl block. - /// - /// *This type is available if Syn is built with the `"full"` feature.* - pub Existential(ImplItemExistential { - pub attrs: Vec, - pub existential_token: Token![existential], - pub type_token: Token![type], - pub ident: Ident, - pub generics: Generics, - pub colon_token: Option, - pub bounds: Punctuated, - pub semi_token: Token![;], - }), - - /// A macro invocation within an impl block. - /// - /// *This type is available if Syn is built with the `"full"` feature.* - pub Macro(ImplItemMacro { - pub attrs: Vec, - pub mac: Macro, - pub semi_token: Option, - }), - - /// Tokens within an impl block not interpreted by Syn. - /// - /// *This type is available if Syn is built with the `"full"` feature.* - pub Verbatim(ImplItemVerbatim #manual_extra_traits { - pub tts: TokenStream, - }), - } -} - -#[cfg(feature = "extra-traits")] -impl Eq for ImplItemVerbatim {} - -#[cfg(feature = "extra-traits")] -impl PartialEq for ImplItemVerbatim { - fn eq(&self, other: &Self) -> bool { - TokenStreamHelper(&self.tts) == TokenStreamHelper(&other.tts) - } -} - -#[cfg(feature = "extra-traits")] -impl Hash for ImplItemVerbatim { - fn hash(&self, state: &mut H) - where - H: Hasher, - { - TokenStreamHelper(&self.tts).hash(state); - } -} - -ast_struct! { - /// A method's signature in a trait or implementation: `unsafe fn - /// initialize(&self)`. - /// - /// *This type is available if Syn is built with the `"full"` feature.* - pub struct MethodSig { - pub constness: Option, - pub unsafety: Option, - pub asyncness: Option, - pub abi: Option, - pub ident: Ident, - pub decl: FnDecl, - } -} - -ast_struct! { - /// Header of a function declaration, without including the body. - /// - /// *This type is available if Syn is built with the `"full"` feature.* - pub struct FnDecl { - pub fn_token: Token![fn], - pub generics: Generics, - pub paren_token: token::Paren, - pub inputs: Punctuated, - pub variadic: Option, - pub output: ReturnType, - } -} - -ast_enum_of_structs! { - /// An argument in a function signature: the `n: usize` in `fn f(n: usize)`. - /// - /// *This type is available if Syn is built with the `"full"` feature.* - /// - /// # Syntax tree enum - /// - /// This type is a [syntax tree enum]. - /// - /// [syntax tree enum]: enum.Expr.html#syntax-tree-enums - pub enum FnArg { - /// Self captured by reference in a function signature: `&self` or `&mut - /// self`. - /// - /// *This type is available if Syn is built with the `"full"` feature.* - pub SelfRef(ArgSelfRef { - pub and_token: Token![&], - pub lifetime: Option, - pub mutability: Option, - pub self_token: Token![self], - }), - - /// Self captured by value in a function signature: `self` or `mut - /// self`. - /// - /// *This type is available if Syn is built with the `"full"` feature.* - pub SelfValue(ArgSelf { - pub mutability: Option, - pub self_token: Token![self], - }), - - /// An explicitly typed pattern captured by a function signature. - /// - /// *This type is available if Syn is built with the `"full"` feature.* - pub Captured(ArgCaptured { - pub pat: Pat, - pub colon_token: Token![:], - pub ty: Type, - }), - - /// A pattern whose type is inferred captured by a function signature. - pub Inferred(Pat), - /// A type not bound to any pattern in a function signature. - pub Ignored(Type), - } -} - -#[cfg(feature = "parsing")] -pub mod parsing { - use super::*; - - use ext::IdentExt; - use parse::{Parse, ParseStream, Result}; - use proc_macro2::{Punct, Spacing, TokenTree}; - use std::iter::FromIterator; - - impl Parse for Item { - fn parse(input: ParseStream) -> Result { - let ahead = input.fork(); - ahead.call(Attribute::parse_outer)?; - let vis: Visibility = ahead.parse()?; - - let lookahead = ahead.lookahead1(); - if lookahead.peek(Token![extern]) { - ahead.parse::()?; - let lookahead = ahead.lookahead1(); - if lookahead.peek(Token![crate]) { - input.parse().map(Item::ExternCrate) - } else if lookahead.peek(Token![fn]) { - input.parse().map(Item::Fn) - } else if lookahead.peek(token::Brace) { - input.parse().map(Item::ForeignMod) - } else if lookahead.peek(LitStr) { - ahead.parse::()?; - let lookahead = ahead.lookahead1(); - if lookahead.peek(token::Brace) { - input.parse().map(Item::ForeignMod) - } else if lookahead.peek(Token![fn]) { - input.parse().map(Item::Fn) - } else { - Err(lookahead.error()) - } - } else { - Err(lookahead.error()) - } - } else if lookahead.peek(Token![use]) { - input.parse().map(Item::Use) - } else if lookahead.peek(Token![static]) { - input.parse().map(Item::Static) - } else if lookahead.peek(Token![const]) { - ahead.parse::()?; - let lookahead = ahead.lookahead1(); - if lookahead.peek(Ident) || lookahead.peek(Token![_]) { - input.parse().map(Item::Const) - } else if lookahead.peek(Token![unsafe]) - || lookahead.peek(Token![async]) - || lookahead.peek(Token![extern]) - || lookahead.peek(Token![fn]) - { - input.parse().map(Item::Fn) - } else { - Err(lookahead.error()) - } - } else if lookahead.peek(Token![unsafe]) { - ahead.parse::()?; - let lookahead = ahead.lookahead1(); - if lookahead.peek(Token![trait]) - || lookahead.peek(Token![auto]) && ahead.peek2(Token![trait]) - { - input.parse().map(Item::Trait) - } else if lookahead.peek(Token![impl ]) { - input.parse().map(Item::Impl) - } else if lookahead.peek(Token![async]) - || lookahead.peek(Token![extern]) - || lookahead.peek(Token![fn]) - { - input.parse().map(Item::Fn) - } else { - Err(lookahead.error()) - } - } else if lookahead.peek(Token![async]) || lookahead.peek(Token![fn]) { - input.parse().map(Item::Fn) - } else if lookahead.peek(Token![mod]) { - input.parse().map(Item::Mod) - } else if lookahead.peek(Token![type]) { - input.parse().map(Item::Type) - } else if lookahead.peek(Token![existential]) { - input.parse().map(Item::Existential) - } else if lookahead.peek(Token![struct]) { - input.parse().map(Item::Struct) - } else if lookahead.peek(Token![enum]) { - input.parse().map(Item::Enum) - } else if lookahead.peek(Token![union]) && ahead.peek2(Ident) { - input.parse().map(Item::Union) - } else if lookahead.peek(Token![trait]) { - input.call(parse_trait_or_trait_alias) - } else if lookahead.peek(Token![auto]) && ahead.peek2(Token![trait]) { - input.parse().map(Item::Trait) - } else if lookahead.peek(Token![impl ]) - || lookahead.peek(Token![default]) && !ahead.peek2(Token![!]) - { - input.parse().map(Item::Impl) - } else if lookahead.peek(Token![macro]) { - input.parse().map(Item::Macro2) - } else if vis.is_inherited() - && (lookahead.peek(Ident) - || lookahead.peek(Token![self]) - || lookahead.peek(Token![super]) - || lookahead.peek(Token![extern]) - || lookahead.peek(Token![crate]) - || lookahead.peek(Token![::])) - { - input.parse().map(Item::Macro) - } else { - Err(lookahead.error()) - } - } - } - - impl Parse for ItemMacro { - fn parse(input: ParseStream) -> Result { - let attrs = input.call(Attribute::parse_outer)?; - let path = input.call(Path::parse_mod_style)?; - let bang_token: Token![!] = input.parse()?; - let ident: Option = input.parse()?; - let (delimiter, tts) = input.call(mac::parse_delimiter)?; - let semi_token: Option = if !delimiter.is_brace() { - Some(input.parse()?) - } else { - None - }; - Ok(ItemMacro { - attrs: attrs, - ident: ident, - mac: Macro { - path: path, - bang_token: bang_token, - delimiter: delimiter, - tts: tts, - }, - semi_token: semi_token, - }) - } - } - - // TODO: figure out the actual grammar; is body required to be braced? - impl Parse for ItemMacro2 { - fn parse(input: ParseStream) -> Result { - let attrs = input.call(Attribute::parse_outer)?; - let vis: Visibility = input.parse()?; - let macro_token: Token![macro] = input.parse()?; - let ident: Ident = input.parse()?; - - let paren_token; - let args; - let brace_token; - let body; - let lookahead = input.lookahead1(); - if lookahead.peek(token::Paren) { - let paren_content; - paren_token = parenthesized!(paren_content in input); - args = paren_content.parse()?; - - let brace_content; - brace_token = braced!(brace_content in input); - body = brace_content.parse()?; - } else if lookahead.peek(token::Brace) { - // Hack: the ItemMacro2 syntax tree will need to change so that - // we can store None for the args. - // - // https://github.com/dtolnay/syn/issues/548 - // - // For now, store some sentinel tokens that are otherwise - // illegal. - paren_token = token::Paren::default(); - args = TokenStream::from_iter(vec![ - TokenTree::Punct(Punct::new('$', Spacing::Alone)), - TokenTree::Punct(Punct::new('$', Spacing::Alone)), - ]); - - let brace_content; - brace_token = braced!(brace_content in input); - body = brace_content.parse()?; - } else { - return Err(lookahead.error()); - } - - Ok(ItemMacro2 { - attrs: attrs, - vis: vis, - macro_token: macro_token, - ident: ident, - paren_token: paren_token, - args: args, - brace_token: brace_token, - body: body, - }) - } - } - - impl Parse for ItemExternCrate { - fn parse(input: ParseStream) -> Result { - Ok(ItemExternCrate { - attrs: input.call(Attribute::parse_outer)?, - vis: input.parse()?, - extern_token: input.parse()?, - crate_token: input.parse()?, - ident: { - if input.peek(Token![self]) { - input.call(Ident::parse_any)? - } else { - input.parse()? - } - }, - rename: { - if input.peek(Token![as]) { - let as_token: Token![as] = input.parse()?; - let rename: Ident = input.parse()?; - Some((as_token, rename)) - } else { - None - } - }, - semi_token: input.parse()?, - }) - } - } - - impl Parse for ItemUse { - fn parse(input: ParseStream) -> Result { - Ok(ItemUse { - attrs: input.call(Attribute::parse_outer)?, - vis: input.parse()?, - use_token: input.parse()?, - leading_colon: input.parse()?, - tree: input.call(use_tree)?, - semi_token: input.parse()?, - }) - } - } - - fn use_tree(input: ParseStream) -> Result { - let lookahead = input.lookahead1(); - if lookahead.peek(Ident) - || lookahead.peek(Token![self]) - || lookahead.peek(Token![super]) - || lookahead.peek(Token![crate]) - || lookahead.peek(Token![extern]) - { - let ident = input.call(Ident::parse_any)?; - if input.peek(Token![::]) { - Ok(UseTree::Path(UsePath { - ident: ident, - colon2_token: input.parse()?, - tree: Box::new(input.call(use_tree)?), - })) - } else if input.peek(Token![as]) { - Ok(UseTree::Rename(UseRename { - ident: ident, - as_token: input.parse()?, - rename: { - if input.peek(Ident) { - input.parse()? - } else if input.peek(Token![_]) { - Ident::from(input.parse::()?) - } else { - return Err(input.error("expected identifier or underscore")); - } - }, - })) - } else { - Ok(UseTree::Name(UseName { ident: ident })) - } - } else if lookahead.peek(Token![*]) { - Ok(UseTree::Glob(UseGlob { - star_token: input.parse()?, - })) - } else if lookahead.peek(token::Brace) { - let content; - Ok(UseTree::Group(UseGroup { - brace_token: braced!(content in input), - items: content.parse_terminated(use_tree)?, - })) - } else { - Err(lookahead.error()) - } - } - - impl Parse for ItemStatic { - fn parse(input: ParseStream) -> Result { - Ok(ItemStatic { - attrs: input.call(Attribute::parse_outer)?, - vis: input.parse()?, - static_token: input.parse()?, - mutability: input.parse()?, - ident: input.parse()?, - colon_token: input.parse()?, - ty: input.parse()?, - eq_token: input.parse()?, - expr: input.parse()?, - semi_token: input.parse()?, - }) - } - } - - impl Parse for ItemConst { - fn parse(input: ParseStream) -> Result { - Ok(ItemConst { - attrs: input.call(Attribute::parse_outer)?, - vis: input.parse()?, - const_token: input.parse()?, - ident: { - let lookahead = input.lookahead1(); - if lookahead.peek(Ident) || lookahead.peek(Token![_]) { - input.call(Ident::parse_any)? - } else { - return Err(lookahead.error()); - } - }, - colon_token: input.parse()?, - ty: input.parse()?, - eq_token: input.parse()?, - expr: input.parse()?, - semi_token: input.parse()?, - }) - } - } - - impl Parse for ItemFn { - fn parse(input: ParseStream) -> Result { - let outer_attrs = input.call(Attribute::parse_outer)?; - let vis: Visibility = input.parse()?; - let constness: Option = input.parse()?; - let unsafety: Option = input.parse()?; - let asyncness: Option = input.parse()?; - let abi: Option = input.parse()?; - let fn_token: Token![fn] = input.parse()?; - let ident: Ident = input.parse()?; - let generics: Generics = input.parse()?; - - let content; - let paren_token = parenthesized!(content in input); - let inputs = content.parse_terminated(FnArg::parse)?; - let variadic: Option = match inputs.last() { - Some(punctuated::Pair::End(&FnArg::Captured(ArgCaptured { - ty: Type::Verbatim(TypeVerbatim { ref tts }), - .. - }))) => parse2(tts.clone()).ok(), - _ => None, - }; - - let output: ReturnType = input.parse()?; - let where_clause: Option = input.parse()?; - - let content; - let brace_token = braced!(content in input); - let inner_attrs = content.call(Attribute::parse_inner)?; - let stmts = content.call(Block::parse_within)?; - - Ok(ItemFn { - attrs: private::attrs(outer_attrs, inner_attrs), - vis: vis, - constness: constness, - unsafety: unsafety, - asyncness: asyncness, - abi: abi, - ident: ident, - decl: Box::new(FnDecl { - fn_token: fn_token, - paren_token: paren_token, - inputs: inputs, - output: output, - variadic: variadic, - generics: Generics { - where_clause: where_clause, - ..generics - }, - }), - block: Box::new(Block { - brace_token: brace_token, - stmts: stmts, - }), - }) - } - } - - impl Parse for FnArg { - fn parse(input: ParseStream) -> Result { - if input.peek(Token![&]) { - let ahead = input.fork(); - if ahead.call(arg_self_ref).is_ok() && !ahead.peek(Token![:]) { - return input.call(arg_self_ref).map(FnArg::SelfRef); - } - } - - if input.peek(Token![mut]) || input.peek(Token![self]) { - let ahead = input.fork(); - if ahead.call(arg_self).is_ok() && !ahead.peek(Token![:]) { - return input.call(arg_self).map(FnArg::SelfValue); - } - } - - let ahead = input.fork(); - let err = match ahead.call(arg_captured) { - Ok(_) => return input.call(arg_captured).map(FnArg::Captured), - Err(err) => err, - }; - - let ahead = input.fork(); - if ahead.parse::().is_ok() { - return input.parse().map(FnArg::Ignored); - } - - Err(err) - } - } - - fn arg_self_ref(input: ParseStream) -> Result { - Ok(ArgSelfRef { - and_token: input.parse()?, - lifetime: input.parse()?, - mutability: input.parse()?, - self_token: input.parse()?, - }) - } - - fn arg_self(input: ParseStream) -> Result { - Ok(ArgSelf { - mutability: input.parse()?, - self_token: input.parse()?, - }) - } - - fn arg_captured(input: ParseStream) -> Result { - Ok(ArgCaptured { - pat: input.parse()?, - colon_token: input.parse()?, - ty: match input.parse::() { - Ok(dot3) => { - let mut args = vec![ - TokenTree::Punct(Punct::new('.', Spacing::Joint)), - TokenTree::Punct(Punct::new('.', Spacing::Joint)), - TokenTree::Punct(Punct::new('.', Spacing::Alone)), - ]; - let tokens = TokenStream::from_iter(args.into_iter().zip(&dot3.spans).map( - |(mut arg, span)| { - arg.set_span(*span); - arg - }, - )); - Type::Verbatim(TypeVerbatim { tts: tokens }) - } - Err(_) => input.parse()?, - }, - }) - } - - impl Parse for ItemMod { - fn parse(input: ParseStream) -> Result { - let outer_attrs = input.call(Attribute::parse_outer)?; - let vis: Visibility = input.parse()?; - let mod_token: Token![mod] = input.parse()?; - let ident: Ident = input.parse()?; - - let lookahead = input.lookahead1(); - if lookahead.peek(Token![;]) { - Ok(ItemMod { - attrs: outer_attrs, - vis: vis, - mod_token: mod_token, - ident: ident, - content: None, - semi: Some(input.parse()?), - }) - } else if lookahead.peek(token::Brace) { - let content; - let brace_token = braced!(content in input); - let inner_attrs = content.call(Attribute::parse_inner)?; - - let mut items = Vec::new(); - while !content.is_empty() { - items.push(content.parse()?); - } - - Ok(ItemMod { - attrs: private::attrs(outer_attrs, inner_attrs), - vis: vis, - mod_token: mod_token, - ident: ident, - content: Some((brace_token, items)), - semi: None, - }) - } else { - Err(lookahead.error()) - } - } - } - - impl Parse for ItemForeignMod { - fn parse(input: ParseStream) -> Result { - let outer_attrs = input.call(Attribute::parse_outer)?; - let abi: Abi = input.parse()?; - - let content; - let brace_token = braced!(content in input); - let inner_attrs = content.call(Attribute::parse_inner)?; - let mut items = Vec::new(); - while !content.is_empty() { - items.push(content.parse()?); - } - - Ok(ItemForeignMod { - attrs: private::attrs(outer_attrs, inner_attrs), - abi: abi, - brace_token: brace_token, - items: items, - }) - } - } - - impl Parse for ForeignItem { - fn parse(input: ParseStream) -> Result { - let ahead = input.fork(); - ahead.call(Attribute::parse_outer)?; - let vis: Visibility = ahead.parse()?; - - let lookahead = ahead.lookahead1(); - if lookahead.peek(Token![fn]) { - input.parse().map(ForeignItem::Fn) - } else if lookahead.peek(Token![static]) { - input.parse().map(ForeignItem::Static) - } else if lookahead.peek(Token![type]) { - input.parse().map(ForeignItem::Type) - } else if vis.is_inherited() - && (lookahead.peek(Ident) - || lookahead.peek(Token![self]) - || lookahead.peek(Token![super]) - || lookahead.peek(Token![extern]) - || lookahead.peek(Token![crate]) - || lookahead.peek(Token![::])) - { - input.parse().map(ForeignItem::Macro) - } else { - Err(lookahead.error()) - } - } - } - - impl Parse for ForeignItemFn { - fn parse(input: ParseStream) -> Result { - let attrs = input.call(Attribute::parse_outer)?; - let vis: Visibility = input.parse()?; - let fn_token: Token![fn] = input.parse()?; - let ident: Ident = input.parse()?; - let generics: Generics = input.parse()?; - - let content; - let paren_token = parenthesized!(content in input); - let mut inputs = Punctuated::new(); - while !content.is_empty() && !content.peek(Token![...]) { - inputs.push_value(content.parse()?); - if content.is_empty() { - break; - } - inputs.push_punct(content.parse()?); - } - let variadic: Option = if inputs.empty_or_trailing() { - content.parse()? - } else { - None - }; - - let output: ReturnType = input.parse()?; - let where_clause: Option = input.parse()?; - let semi_token: Token![;] = input.parse()?; - - Ok(ForeignItemFn { - attrs: attrs, - vis: vis, - ident: ident, - decl: Box::new(FnDecl { - fn_token: fn_token, - paren_token: paren_token, - inputs: inputs, - output: output, - variadic: variadic, - generics: Generics { - where_clause: where_clause, - ..generics - }, - }), - semi_token: semi_token, - }) - } - } - - impl Parse for ForeignItemStatic { - fn parse(input: ParseStream) -> Result { - Ok(ForeignItemStatic { - attrs: input.call(Attribute::parse_outer)?, - vis: input.parse()?, - static_token: input.parse()?, - mutability: input.parse()?, - ident: input.parse()?, - colon_token: input.parse()?, - ty: input.parse()?, - semi_token: input.parse()?, - }) - } - } - - impl Parse for ForeignItemType { - fn parse(input: ParseStream) -> Result { - Ok(ForeignItemType { - attrs: input.call(Attribute::parse_outer)?, - vis: input.parse()?, - type_token: input.parse()?, - ident: input.parse()?, - semi_token: input.parse()?, - }) - } - } - - impl Parse for ForeignItemMacro { - fn parse(input: ParseStream) -> Result { - let attrs = input.call(Attribute::parse_outer)?; - let mac: Macro = input.parse()?; - let semi_token: Option = if mac.delimiter.is_brace() { - None - } else { - Some(input.parse()?) - }; - Ok(ForeignItemMacro { - attrs: attrs, - mac: mac, - semi_token: semi_token, - }) - } - } - - impl Parse for ItemType { - fn parse(input: ParseStream) -> Result { - Ok(ItemType { - attrs: input.call(Attribute::parse_outer)?, - vis: input.parse()?, - type_token: input.parse()?, - ident: input.parse()?, - generics: { - let mut generics: Generics = input.parse()?; - generics.where_clause = input.parse()?; - generics - }, - eq_token: input.parse()?, - ty: input.parse()?, - semi_token: input.parse()?, - }) - } - } - - impl Parse for ItemExistential { - fn parse(input: ParseStream) -> Result { - Ok(ItemExistential { - attrs: input.call(Attribute::parse_outer)?, - vis: input.parse()?, - existential_token: input.parse()?, - type_token: input.parse()?, - ident: input.parse()?, - generics: { - let mut generics: Generics = input.parse()?; - generics.where_clause = input.parse()?; - generics - }, - colon_token: Some(input.parse()?), - bounds: { - let mut bounds = Punctuated::new(); - while !input.peek(Token![;]) { - if !bounds.is_empty() { - bounds.push_punct(input.parse()?); - } - bounds.push_value(input.parse()?); - } - bounds - }, - semi_token: input.parse()?, - }) - } - } - - impl Parse for ItemStruct { - fn parse(input: ParseStream) -> Result { - let attrs = input.call(Attribute::parse_outer)?; - let vis = input.parse::()?; - let struct_token = input.parse::()?; - let ident = input.parse::()?; - let generics = input.parse::()?; - let (where_clause, fields, semi_token) = derive::parsing::data_struct(input)?; - Ok(ItemStruct { - attrs: attrs, - vis: vis, - struct_token: struct_token, - ident: ident, - generics: Generics { - where_clause: where_clause, - ..generics - }, - fields: fields, - semi_token: semi_token, - }) - } - } - - impl Parse for ItemEnum { - fn parse(input: ParseStream) -> Result { - let attrs = input.call(Attribute::parse_outer)?; - let vis = input.parse::()?; - let enum_token = input.parse::()?; - let ident = input.parse::()?; - let generics = input.parse::()?; - let (where_clause, brace_token, variants) = derive::parsing::data_enum(input)?; - Ok(ItemEnum { - attrs: attrs, - vis: vis, - enum_token: enum_token, - ident: ident, - generics: Generics { - where_clause: where_clause, - ..generics - }, - brace_token: brace_token, - variants: variants, - }) - } - } - - impl Parse for ItemUnion { - fn parse(input: ParseStream) -> Result { - let attrs = input.call(Attribute::parse_outer)?; - let vis = input.parse::()?; - let union_token = input.parse::()?; - let ident = input.parse::()?; - let generics = input.parse::()?; - let (where_clause, fields) = derive::parsing::data_union(input)?; - Ok(ItemUnion { - attrs: attrs, - vis: vis, - union_token: union_token, - ident: ident, - generics: Generics { - where_clause: where_clause, - ..generics - }, - fields: fields, - }) - } - } - - fn parse_trait_or_trait_alias(input: ParseStream) -> Result { - let (attrs, vis, trait_token, ident, generics) = parse_start_of_trait_alias(input)?; - let lookahead = input.lookahead1(); - if lookahead.peek(token::Brace) - || lookahead.peek(Token![:]) - || lookahead.peek(Token![where]) - { - let unsafety = None; - let auto_token = None; - parse_rest_of_trait( - input, - attrs, - vis, - unsafety, - auto_token, - trait_token, - ident, - generics, - ) - .map(Item::Trait) - } else if lookahead.peek(Token![=]) { - parse_rest_of_trait_alias(input, attrs, vis, trait_token, ident, generics) - .map(Item::TraitAlias) - } else { - Err(lookahead.error()) - } - } - - impl Parse for ItemTrait { - fn parse(input: ParseStream) -> Result { - let attrs = input.call(Attribute::parse_outer)?; - let vis: Visibility = input.parse()?; - let unsafety: Option = input.parse()?; - let auto_token: Option = input.parse()?; - let trait_token: Token![trait] = input.parse()?; - let ident: Ident = input.parse()?; - let generics: Generics = input.parse()?; - parse_rest_of_trait( - input, - attrs, - vis, - unsafety, - auto_token, - trait_token, - ident, - generics, - ) - } - } - - fn parse_rest_of_trait( - input: ParseStream, - attrs: Vec, - vis: Visibility, - unsafety: Option, - auto_token: Option, - trait_token: Token![trait], - ident: Ident, - mut generics: Generics, - ) -> Result { - let colon_token: Option = input.parse()?; - - let mut supertraits = Punctuated::new(); - if colon_token.is_some() { - loop { - supertraits.push_value(input.parse()?); - if input.peek(Token![where]) || input.peek(token::Brace) { - break; - } - supertraits.push_punct(input.parse()?); - if input.peek(Token![where]) || input.peek(token::Brace) { - break; - } - } - } - - generics.where_clause = input.parse()?; - - let content; - let brace_token = braced!(content in input); - let mut items = Vec::new(); - while !content.is_empty() { - items.push(content.parse()?); - } - - Ok(ItemTrait { - attrs: attrs, - vis: vis, - unsafety: unsafety, - auto_token: auto_token, - trait_token: trait_token, - ident: ident, - generics: generics, - colon_token: colon_token, - supertraits: supertraits, - brace_token: brace_token, - items: items, - }) - } - - impl Parse for ItemTraitAlias { - fn parse(input: ParseStream) -> Result { - let (attrs, vis, trait_token, ident, generics) = parse_start_of_trait_alias(input)?; - parse_rest_of_trait_alias(input, attrs, vis, trait_token, ident, generics) - } - } - - fn parse_start_of_trait_alias( - input: ParseStream, - ) -> Result<(Vec, Visibility, Token![trait], Ident, Generics)> { - let attrs = input.call(Attribute::parse_outer)?; - let vis: Visibility = input.parse()?; - let trait_token: Token![trait] = input.parse()?; - let ident: Ident = input.parse()?; - let generics: Generics = input.parse()?; - Ok((attrs, vis, trait_token, ident, generics)) - } - - fn parse_rest_of_trait_alias( - input: ParseStream, - attrs: Vec, - vis: Visibility, - trait_token: Token![trait], - ident: Ident, - mut generics: Generics, - ) -> Result { - let eq_token: Token![=] = input.parse()?; - - let mut bounds = Punctuated::new(); - loop { - if input.peek(Token![where]) || input.peek(Token![;]) { - break; - } - bounds.push_value(input.parse()?); - if input.peek(Token![where]) || input.peek(Token![;]) { - break; - } - bounds.push_punct(input.parse()?); - } - - generics.where_clause = input.parse()?; - let semi_token: Token![;] = input.parse()?; - - Ok(ItemTraitAlias { - attrs: attrs, - vis: vis, - trait_token: trait_token, - ident: ident, - generics: generics, - eq_token: eq_token, - bounds: bounds, - semi_token: semi_token, - }) - } - - impl Parse for TraitItem { - fn parse(input: ParseStream) -> Result { - let ahead = input.fork(); - ahead.call(Attribute::parse_outer)?; - - let lookahead = ahead.lookahead1(); - if lookahead.peek(Token![const]) { - ahead.parse::()?; - let lookahead = ahead.lookahead1(); - if lookahead.peek(Ident) { - input.parse().map(TraitItem::Const) - } else if lookahead.peek(Token![unsafe]) - || lookahead.peek(Token![extern]) - || lookahead.peek(Token![fn]) - { - input.parse().map(TraitItem::Method) - } else { - Err(lookahead.error()) - } - } else if lookahead.peek(Token![unsafe]) - || lookahead.peek(Token![extern]) - || lookahead.peek(Token![fn]) - { - input.parse().map(TraitItem::Method) - } else if lookahead.peek(Token![type]) { - input.parse().map(TraitItem::Type) - } else if lookahead.peek(Ident) - || lookahead.peek(Token![self]) - || lookahead.peek(Token![super]) - || lookahead.peek(Token![extern]) - || lookahead.peek(Token![crate]) - || lookahead.peek(Token![::]) - { - input.parse().map(TraitItem::Macro) - } else { - Err(lookahead.error()) - } - } - } - - impl Parse for TraitItemConst { - fn parse(input: ParseStream) -> Result { - Ok(TraitItemConst { - attrs: input.call(Attribute::parse_outer)?, - const_token: input.parse()?, - ident: input.parse()?, - colon_token: input.parse()?, - ty: input.parse()?, - default: { - if input.peek(Token![=]) { - let eq_token: Token![=] = input.parse()?; - let default: Expr = input.parse()?; - Some((eq_token, default)) - } else { - None - } - }, - semi_token: input.parse()?, - }) - } - } - - impl Parse for TraitItemMethod { - fn parse(input: ParseStream) -> Result { - let outer_attrs = input.call(Attribute::parse_outer)?; - let constness: Option = input.parse()?; - let unsafety: Option = input.parse()?; - let abi: Option = input.parse()?; - let fn_token: Token![fn] = input.parse()?; - let ident: Ident = input.parse()?; - let generics: Generics = input.parse()?; - - let content; - let paren_token = parenthesized!(content in input); - let inputs = content.parse_terminated(FnArg::parse)?; - - let output: ReturnType = input.parse()?; - let where_clause: Option = input.parse()?; - - let lookahead = input.lookahead1(); - let (brace_token, inner_attrs, stmts, semi_token) = if lookahead.peek(token::Brace) { - let content; - let brace_token = braced!(content in input); - let inner_attrs = content.call(Attribute::parse_inner)?; - let stmts = content.call(Block::parse_within)?; - (Some(brace_token), inner_attrs, stmts, None) - } else if lookahead.peek(Token![;]) { - let semi_token: Token![;] = input.parse()?; - (None, Vec::new(), Vec::new(), Some(semi_token)) - } else { - return Err(lookahead.error()); - }; - - Ok(TraitItemMethod { - attrs: private::attrs(outer_attrs, inner_attrs), - sig: MethodSig { - constness: constness, - unsafety: unsafety, - asyncness: None, - abi: abi, - ident: ident, - decl: FnDecl { - fn_token: fn_token, - paren_token: paren_token, - inputs: inputs, - output: output, - variadic: None, - generics: Generics { - where_clause: where_clause, - ..generics - }, - }, - }, - default: brace_token.map(|brace_token| Block { - brace_token: brace_token, - stmts: stmts, - }), - semi_token: semi_token, - }) - } - } - - impl Parse for TraitItemType { - fn parse(input: ParseStream) -> Result { - let attrs = input.call(Attribute::parse_outer)?; - let type_token: Token![type] = input.parse()?; - let ident: Ident = input.parse()?; - let mut generics: Generics = input.parse()?; - let colon_token: Option = input.parse()?; - - let mut bounds = Punctuated::new(); - if colon_token.is_some() { - while !input.peek(Token![where]) && !input.peek(Token![=]) && !input.peek(Token![;]) - { - if !bounds.is_empty() { - bounds.push_punct(input.parse()?); - } - bounds.push_value(input.parse()?); - } - } - - generics.where_clause = input.parse()?; - let default = if input.peek(Token![=]) { - let eq_token: Token![=] = input.parse()?; - let default: Type = input.parse()?; - Some((eq_token, default)) - } else { - None - }; - let semi_token: Token![;] = input.parse()?; - - Ok(TraitItemType { - attrs: attrs, - type_token: type_token, - ident: ident, - generics: generics, - colon_token: colon_token, - bounds: bounds, - default: default, - semi_token: semi_token, - }) - } - } - - impl Parse for TraitItemMacro { - fn parse(input: ParseStream) -> Result { - let attrs = input.call(Attribute::parse_outer)?; - let mac: Macro = input.parse()?; - let semi_token: Option = if mac.delimiter.is_brace() { - None - } else { - Some(input.parse()?) - }; - Ok(TraitItemMacro { - attrs: attrs, - mac: mac, - semi_token: semi_token, - }) - } - } - - impl Parse for ItemImpl { - fn parse(input: ParseStream) -> Result { - let outer_attrs = input.call(Attribute::parse_outer)?; - let defaultness: Option = input.parse()?; - let unsafety: Option = input.parse()?; - let impl_token: Token![impl ] = input.parse()?; - - let has_generics = input.peek(Token![<]) - && (input.peek2(Token![>]) - || input.peek2(Token![#]) - || (input.peek2(Ident) || input.peek2(Lifetime)) - && (input.peek3(Token![:]) - || input.peek3(Token![,]) - || input.peek3(Token![>]))); - let generics: Generics = if has_generics { - input.parse()? - } else { - Generics::default() - }; - - let trait_ = { - let ahead = input.fork(); - if ahead.parse::>().is_ok() - && ahead.parse::().is_ok() - && ahead.parse::().is_ok() - { - let polarity: Option = input.parse()?; - let path: Path = input.parse()?; - let for_token: Token![for] = input.parse()?; - Some((polarity, path, for_token)) - } else { - None - } - }; - let self_ty: Type = input.parse()?; - let where_clause: Option = input.parse()?; - - let content; - let brace_token = braced!(content in input); - let inner_attrs = content.call(Attribute::parse_inner)?; - - let mut items = Vec::new(); - while !content.is_empty() { - items.push(content.parse()?); - } - - Ok(ItemImpl { - attrs: private::attrs(outer_attrs, inner_attrs), - defaultness: defaultness, - unsafety: unsafety, - impl_token: impl_token, - generics: Generics { - where_clause: where_clause, - ..generics - }, - trait_: trait_, - self_ty: Box::new(self_ty), - brace_token: brace_token, - items: items, - }) - } - } - - impl Parse for ImplItem { - fn parse(input: ParseStream) -> Result { - let ahead = input.fork(); - ahead.call(Attribute::parse_outer)?; - let vis: Visibility = ahead.parse()?; - - let mut lookahead = ahead.lookahead1(); - let defaultness = if lookahead.peek(Token![default]) && !ahead.peek2(Token![!]) { - let defaultness: Token![default] = ahead.parse()?; - lookahead = ahead.lookahead1(); - Some(defaultness) - } else { - None - }; - - if lookahead.peek(Token![const]) { - ahead.parse::()?; - let lookahead = ahead.lookahead1(); - if lookahead.peek(Ident) { - input.parse().map(ImplItem::Const) - } else if lookahead.peek(Token![unsafe]) - || lookahead.peek(Token![async]) - || lookahead.peek(Token![extern]) - || lookahead.peek(Token![fn]) - { - input.parse().map(ImplItem::Method) - } else { - Err(lookahead.error()) - } - } else if lookahead.peek(Token![unsafe]) - || lookahead.peek(Token![async]) - || lookahead.peek(Token![extern]) - || lookahead.peek(Token![fn]) - { - input.parse().map(ImplItem::Method) - } else if lookahead.peek(Token![type]) { - input.parse().map(ImplItem::Type) - } else if vis.is_inherited() - && defaultness.is_none() - && lookahead.peek(Token![existential]) - { - input.parse().map(ImplItem::Existential) - } else if vis.is_inherited() - && defaultness.is_none() - && (lookahead.peek(Ident) - || lookahead.peek(Token![self]) - || lookahead.peek(Token![super]) - || lookahead.peek(Token![extern]) - || lookahead.peek(Token![crate]) - || lookahead.peek(Token![::])) - { - input.parse().map(ImplItem::Macro) - } else { - Err(lookahead.error()) - } - } - } - - impl Parse for ImplItemConst { - fn parse(input: ParseStream) -> Result { - Ok(ImplItemConst { - attrs: input.call(Attribute::parse_outer)?, - vis: input.parse()?, - defaultness: input.parse()?, - const_token: input.parse()?, - ident: input.parse()?, - colon_token: input.parse()?, - ty: input.parse()?, - eq_token: input.parse()?, - expr: input.parse()?, - semi_token: input.parse()?, - }) - } - } - - impl Parse for ImplItemMethod { - fn parse(input: ParseStream) -> Result { - let outer_attrs = input.call(Attribute::parse_outer)?; - let vis: Visibility = input.parse()?; - let defaultness: Option = input.parse()?; - let constness: Option = input.parse()?; - let unsafety: Option = input.parse()?; - let asyncness: Option = input.parse()?; - let abi: Option = input.parse()?; - let fn_token: Token![fn] = input.parse()?; - let ident: Ident = input.parse()?; - let generics: Generics = input.parse()?; - - let content; - let paren_token = parenthesized!(content in input); - let inputs = content.parse_terminated(FnArg::parse)?; - - let output: ReturnType = input.parse()?; - let where_clause: Option = input.parse()?; - - let content; - let brace_token = braced!(content in input); - let inner_attrs = content.call(Attribute::parse_inner)?; - let stmts = content.call(Block::parse_within)?; - - Ok(ImplItemMethod { - attrs: private::attrs(outer_attrs, inner_attrs), - vis: vis, - defaultness: defaultness, - sig: MethodSig { - constness: constness, - unsafety: unsafety, - asyncness: asyncness, - abi: abi, - ident: ident, - decl: FnDecl { - fn_token: fn_token, - paren_token: paren_token, - inputs: inputs, - output: output, - variadic: None, - generics: Generics { - where_clause: where_clause, - ..generics - }, - }, - }, - block: Block { - brace_token: brace_token, - stmts: stmts, - }, - }) - } - } - - impl Parse for ImplItemType { - fn parse(input: ParseStream) -> Result { - Ok(ImplItemType { - attrs: input.call(Attribute::parse_outer)?, - vis: input.parse()?, - defaultness: input.parse()?, - type_token: input.parse()?, - ident: input.parse()?, - generics: { - let mut generics: Generics = input.parse()?; - generics.where_clause = input.parse()?; - generics - }, - eq_token: input.parse()?, - ty: input.parse()?, - semi_token: input.parse()?, - }) - } - } - - impl Parse for ImplItemExistential { - fn parse(input: ParseStream) -> Result { - let ety: ItemExistential = input.parse()?; - Ok(ImplItemExistential { - attrs: ety.attrs, - existential_token: ety.existential_token, - type_token: ety.type_token, - ident: ety.ident, - generics: ety.generics, - colon_token: ety.colon_token, - bounds: ety.bounds, - semi_token: ety.semi_token, - }) - } - } - - impl Parse for ImplItemMacro { - fn parse(input: ParseStream) -> Result { - let attrs = input.call(Attribute::parse_outer)?; - let mac: Macro = input.parse()?; - let semi_token: Option = if mac.delimiter.is_brace() { - None - } else { - Some(input.parse()?) - }; - Ok(ImplItemMacro { - attrs: attrs, - mac: mac, - semi_token: semi_token, - }) - } - } - - impl Visibility { - fn is_inherited(&self) -> bool { - match *self { - Visibility::Inherited => true, - _ => false, - } - } - } - - impl MacroDelimiter { - fn is_brace(&self) -> bool { - match *self { - MacroDelimiter::Brace(_) => true, - MacroDelimiter::Paren(_) | MacroDelimiter::Bracket(_) => false, - } - } - } -} - -#[cfg(feature = "printing")] -mod printing { - use super::*; - - use proc_macro2::TokenStream; - use quote::{ToTokens, TokenStreamExt}; - - use attr::FilterAttrs; - use print::TokensOrDefault; - - impl ToTokens for ItemExternCrate { - fn to_tokens(&self, tokens: &mut TokenStream) { - tokens.append_all(self.attrs.outer()); - self.vis.to_tokens(tokens); - self.extern_token.to_tokens(tokens); - self.crate_token.to_tokens(tokens); - self.ident.to_tokens(tokens); - if let Some((ref as_token, ref rename)) = self.rename { - as_token.to_tokens(tokens); - rename.to_tokens(tokens); - } - self.semi_token.to_tokens(tokens); - } - } - - impl ToTokens for ItemUse { - fn to_tokens(&self, tokens: &mut TokenStream) { - tokens.append_all(self.attrs.outer()); - self.vis.to_tokens(tokens); - self.use_token.to_tokens(tokens); - self.leading_colon.to_tokens(tokens); - self.tree.to_tokens(tokens); - self.semi_token.to_tokens(tokens); - } - } - - impl ToTokens for ItemStatic { - fn to_tokens(&self, tokens: &mut TokenStream) { - tokens.append_all(self.attrs.outer()); - self.vis.to_tokens(tokens); - self.static_token.to_tokens(tokens); - self.mutability.to_tokens(tokens); - self.ident.to_tokens(tokens); - self.colon_token.to_tokens(tokens); - self.ty.to_tokens(tokens); - self.eq_token.to_tokens(tokens); - self.expr.to_tokens(tokens); - self.semi_token.to_tokens(tokens); - } - } - - impl ToTokens for ItemConst { - fn to_tokens(&self, tokens: &mut TokenStream) { - tokens.append_all(self.attrs.outer()); - self.vis.to_tokens(tokens); - self.const_token.to_tokens(tokens); - self.ident.to_tokens(tokens); - self.colon_token.to_tokens(tokens); - self.ty.to_tokens(tokens); - self.eq_token.to_tokens(tokens); - self.expr.to_tokens(tokens); - self.semi_token.to_tokens(tokens); - } - } - - impl ToTokens for ItemFn { - fn to_tokens(&self, tokens: &mut TokenStream) { - tokens.append_all(self.attrs.outer()); - self.vis.to_tokens(tokens); - self.constness.to_tokens(tokens); - self.unsafety.to_tokens(tokens); - self.asyncness.to_tokens(tokens); - self.abi.to_tokens(tokens); - NamedDecl(&self.decl, &self.ident).to_tokens(tokens); - self.block.brace_token.surround(tokens, |tokens| { - tokens.append_all(self.attrs.inner()); - tokens.append_all(&self.block.stmts); - }); - } - } - - impl ToTokens for ItemMod { - fn to_tokens(&self, tokens: &mut TokenStream) { - tokens.append_all(self.attrs.outer()); - self.vis.to_tokens(tokens); - self.mod_token.to_tokens(tokens); - self.ident.to_tokens(tokens); - if let Some((ref brace, ref items)) = self.content { - brace.surround(tokens, |tokens| { - tokens.append_all(self.attrs.inner()); - tokens.append_all(items); - }); - } else { - TokensOrDefault(&self.semi).to_tokens(tokens); - } - } - } - - impl ToTokens for ItemForeignMod { - fn to_tokens(&self, tokens: &mut TokenStream) { - tokens.append_all(self.attrs.outer()); - self.abi.to_tokens(tokens); - self.brace_token.surround(tokens, |tokens| { - tokens.append_all(self.attrs.inner()); - tokens.append_all(&self.items); - }); - } - } - - impl ToTokens for ItemType { - fn to_tokens(&self, tokens: &mut TokenStream) { - tokens.append_all(self.attrs.outer()); - self.vis.to_tokens(tokens); - self.type_token.to_tokens(tokens); - self.ident.to_tokens(tokens); - self.generics.to_tokens(tokens); - self.generics.where_clause.to_tokens(tokens); - self.eq_token.to_tokens(tokens); - self.ty.to_tokens(tokens); - self.semi_token.to_tokens(tokens); - } - } - - impl ToTokens for ItemExistential { - fn to_tokens(&self, tokens: &mut TokenStream) { - tokens.append_all(self.attrs.outer()); - self.vis.to_tokens(tokens); - self.existential_token.to_tokens(tokens); - self.type_token.to_tokens(tokens); - self.ident.to_tokens(tokens); - self.generics.to_tokens(tokens); - self.generics.where_clause.to_tokens(tokens); - if !self.bounds.is_empty() { - TokensOrDefault(&self.colon_token).to_tokens(tokens); - self.bounds.to_tokens(tokens); - } - self.semi_token.to_tokens(tokens); - } - } - - impl ToTokens for ItemEnum { - fn to_tokens(&self, tokens: &mut TokenStream) { - tokens.append_all(self.attrs.outer()); - self.vis.to_tokens(tokens); - self.enum_token.to_tokens(tokens); - self.ident.to_tokens(tokens); - self.generics.to_tokens(tokens); - self.generics.where_clause.to_tokens(tokens); - self.brace_token.surround(tokens, |tokens| { - self.variants.to_tokens(tokens); - }); - } - } - - impl ToTokens for ItemStruct { - fn to_tokens(&self, tokens: &mut TokenStream) { - tokens.append_all(self.attrs.outer()); - self.vis.to_tokens(tokens); - self.struct_token.to_tokens(tokens); - self.ident.to_tokens(tokens); - self.generics.to_tokens(tokens); - match self.fields { - Fields::Named(ref fields) => { - self.generics.where_clause.to_tokens(tokens); - fields.to_tokens(tokens); - } - Fields::Unnamed(ref fields) => { - fields.to_tokens(tokens); - self.generics.where_clause.to_tokens(tokens); - TokensOrDefault(&self.semi_token).to_tokens(tokens); - } - Fields::Unit => { - self.generics.where_clause.to_tokens(tokens); - TokensOrDefault(&self.semi_token).to_tokens(tokens); - } - } - } - } - - impl ToTokens for ItemUnion { - fn to_tokens(&self, tokens: &mut TokenStream) { - tokens.append_all(self.attrs.outer()); - self.vis.to_tokens(tokens); - self.union_token.to_tokens(tokens); - self.ident.to_tokens(tokens); - self.generics.to_tokens(tokens); - self.generics.where_clause.to_tokens(tokens); - self.fields.to_tokens(tokens); - } - } - - impl ToTokens for ItemTrait { - fn to_tokens(&self, tokens: &mut TokenStream) { - tokens.append_all(self.attrs.outer()); - self.vis.to_tokens(tokens); - self.unsafety.to_tokens(tokens); - self.auto_token.to_tokens(tokens); - self.trait_token.to_tokens(tokens); - self.ident.to_tokens(tokens); - self.generics.to_tokens(tokens); - if !self.supertraits.is_empty() { - TokensOrDefault(&self.colon_token).to_tokens(tokens); - self.supertraits.to_tokens(tokens); - } - self.generics.where_clause.to_tokens(tokens); - self.brace_token.surround(tokens, |tokens| { - tokens.append_all(&self.items); - }); - } - } - - impl ToTokens for ItemTraitAlias { - fn to_tokens(&self, tokens: &mut TokenStream) { - tokens.append_all(self.attrs.outer()); - self.vis.to_tokens(tokens); - self.trait_token.to_tokens(tokens); - self.ident.to_tokens(tokens); - self.generics.to_tokens(tokens); - self.eq_token.to_tokens(tokens); - self.bounds.to_tokens(tokens); - self.generics.where_clause.to_tokens(tokens); - self.semi_token.to_tokens(tokens); - } - } - - impl ToTokens for ItemImpl { - fn to_tokens(&self, tokens: &mut TokenStream) { - tokens.append_all(self.attrs.outer()); - self.defaultness.to_tokens(tokens); - self.unsafety.to_tokens(tokens); - self.impl_token.to_tokens(tokens); - self.generics.to_tokens(tokens); - if let Some((ref polarity, ref path, ref for_token)) = self.trait_ { - polarity.to_tokens(tokens); - path.to_tokens(tokens); - for_token.to_tokens(tokens); - } - self.self_ty.to_tokens(tokens); - self.generics.where_clause.to_tokens(tokens); - self.brace_token.surround(tokens, |tokens| { - tokens.append_all(self.attrs.inner()); - tokens.append_all(&self.items); - }); - } - } - - impl ToTokens for ItemMacro { - fn to_tokens(&self, tokens: &mut TokenStream) { - tokens.append_all(self.attrs.outer()); - self.mac.path.to_tokens(tokens); - self.mac.bang_token.to_tokens(tokens); - self.ident.to_tokens(tokens); - match self.mac.delimiter { - MacroDelimiter::Paren(ref paren) => { - paren.surround(tokens, |tokens| self.mac.tts.to_tokens(tokens)); - } - MacroDelimiter::Brace(ref brace) => { - brace.surround(tokens, |tokens| self.mac.tts.to_tokens(tokens)); - } - MacroDelimiter::Bracket(ref bracket) => { - bracket.surround(tokens, |tokens| self.mac.tts.to_tokens(tokens)); - } - } - self.semi_token.to_tokens(tokens); - } - } - - impl ToTokens for ItemMacro2 { - fn to_tokens(&self, tokens: &mut TokenStream) { - tokens.append_all(self.attrs.outer()); - self.vis.to_tokens(tokens); - self.macro_token.to_tokens(tokens); - self.ident.to_tokens(tokens); - - // Hack: see comment in impl Parse for ItemMacro2. - if self.args.to_string() != "$ $" { - self.paren_token.surround(tokens, |tokens| { - self.args.to_tokens(tokens); - }); - } - - self.brace_token.surround(tokens, |tokens| { - self.body.to_tokens(tokens); - }); - } - } - - impl ToTokens for ItemVerbatim { - fn to_tokens(&self, tokens: &mut TokenStream) { - self.tts.to_tokens(tokens); - } - } - - impl ToTokens for UsePath { - fn to_tokens(&self, tokens: &mut TokenStream) { - self.ident.to_tokens(tokens); - self.colon2_token.to_tokens(tokens); - self.tree.to_tokens(tokens); - } - } - - impl ToTokens for UseName { - fn to_tokens(&self, tokens: &mut TokenStream) { - self.ident.to_tokens(tokens); - } - } - - impl ToTokens for UseRename { - fn to_tokens(&self, tokens: &mut TokenStream) { - self.ident.to_tokens(tokens); - self.as_token.to_tokens(tokens); - self.rename.to_tokens(tokens); - } - } - - impl ToTokens for UseGlob { - fn to_tokens(&self, tokens: &mut TokenStream) { - self.star_token.to_tokens(tokens); - } - } - - impl ToTokens for UseGroup { - fn to_tokens(&self, tokens: &mut TokenStream) { - self.brace_token.surround(tokens, |tokens| { - self.items.to_tokens(tokens); - }); - } - } - - impl ToTokens for TraitItemConst { - fn to_tokens(&self, tokens: &mut TokenStream) { - tokens.append_all(self.attrs.outer()); - self.const_token.to_tokens(tokens); - self.ident.to_tokens(tokens); - self.colon_token.to_tokens(tokens); - self.ty.to_tokens(tokens); - if let Some((ref eq_token, ref default)) = self.default { - eq_token.to_tokens(tokens); - default.to_tokens(tokens); - } - self.semi_token.to_tokens(tokens); - } - } - - impl ToTokens for TraitItemMethod { - fn to_tokens(&self, tokens: &mut TokenStream) { - tokens.append_all(self.attrs.outer()); - self.sig.to_tokens(tokens); - match self.default { - Some(ref block) => { - block.brace_token.surround(tokens, |tokens| { - tokens.append_all(self.attrs.inner()); - tokens.append_all(&block.stmts); - }); - } - None => { - TokensOrDefault(&self.semi_token).to_tokens(tokens); - } - } - } - } - - impl ToTokens for TraitItemType { - fn to_tokens(&self, tokens: &mut TokenStream) { - tokens.append_all(self.attrs.outer()); - self.type_token.to_tokens(tokens); - self.ident.to_tokens(tokens); - self.generics.to_tokens(tokens); - if !self.bounds.is_empty() { - TokensOrDefault(&self.colon_token).to_tokens(tokens); - self.bounds.to_tokens(tokens); - } - self.generics.where_clause.to_tokens(tokens); - if let Some((ref eq_token, ref default)) = self.default { - eq_token.to_tokens(tokens); - default.to_tokens(tokens); - } - self.semi_token.to_tokens(tokens); - } - } - - impl ToTokens for TraitItemMacro { - fn to_tokens(&self, tokens: &mut TokenStream) { - tokens.append_all(self.attrs.outer()); - self.mac.to_tokens(tokens); - self.semi_token.to_tokens(tokens); - } - } - - impl ToTokens for TraitItemVerbatim { - fn to_tokens(&self, tokens: &mut TokenStream) { - self.tts.to_tokens(tokens); - } - } - - impl ToTokens for ImplItemConst { - fn to_tokens(&self, tokens: &mut TokenStream) { - tokens.append_all(self.attrs.outer()); - self.vis.to_tokens(tokens); - self.defaultness.to_tokens(tokens); - self.const_token.to_tokens(tokens); - self.ident.to_tokens(tokens); - self.colon_token.to_tokens(tokens); - self.ty.to_tokens(tokens); - self.eq_token.to_tokens(tokens); - self.expr.to_tokens(tokens); - self.semi_token.to_tokens(tokens); - } - } - - impl ToTokens for ImplItemMethod { - fn to_tokens(&self, tokens: &mut TokenStream) { - tokens.append_all(self.attrs.outer()); - self.vis.to_tokens(tokens); - self.defaultness.to_tokens(tokens); - self.sig.to_tokens(tokens); - self.block.brace_token.surround(tokens, |tokens| { - tokens.append_all(self.attrs.inner()); - tokens.append_all(&self.block.stmts); - }); - } - } - - impl ToTokens for ImplItemType { - fn to_tokens(&self, tokens: &mut TokenStream) { - tokens.append_all(self.attrs.outer()); - self.vis.to_tokens(tokens); - self.defaultness.to_tokens(tokens); - self.type_token.to_tokens(tokens); - self.ident.to_tokens(tokens); - self.generics.to_tokens(tokens); - self.generics.where_clause.to_tokens(tokens); - self.eq_token.to_tokens(tokens); - self.ty.to_tokens(tokens); - self.semi_token.to_tokens(tokens); - } - } - - impl ToTokens for ImplItemExistential { - fn to_tokens(&self, tokens: &mut TokenStream) { - tokens.append_all(self.attrs.outer()); - self.existential_token.to_tokens(tokens); - self.type_token.to_tokens(tokens); - self.ident.to_tokens(tokens); - self.generics.to_tokens(tokens); - self.generics.where_clause.to_tokens(tokens); - if !self.bounds.is_empty() { - TokensOrDefault(&self.colon_token).to_tokens(tokens); - self.bounds.to_tokens(tokens); - } - self.semi_token.to_tokens(tokens); - } - } - - impl ToTokens for ImplItemMacro { - fn to_tokens(&self, tokens: &mut TokenStream) { - tokens.append_all(self.attrs.outer()); - self.mac.to_tokens(tokens); - self.semi_token.to_tokens(tokens); - } - } - - impl ToTokens for ImplItemVerbatim { - fn to_tokens(&self, tokens: &mut TokenStream) { - self.tts.to_tokens(tokens); - } - } - - impl ToTokens for ForeignItemFn { - fn to_tokens(&self, tokens: &mut TokenStream) { - tokens.append_all(self.attrs.outer()); - self.vis.to_tokens(tokens); - NamedDecl(&self.decl, &self.ident).to_tokens(tokens); - self.semi_token.to_tokens(tokens); - } - } - - impl ToTokens for ForeignItemStatic { - fn to_tokens(&self, tokens: &mut TokenStream) { - tokens.append_all(self.attrs.outer()); - self.vis.to_tokens(tokens); - self.static_token.to_tokens(tokens); - self.mutability.to_tokens(tokens); - self.ident.to_tokens(tokens); - self.colon_token.to_tokens(tokens); - self.ty.to_tokens(tokens); - self.semi_token.to_tokens(tokens); - } - } - - impl ToTokens for ForeignItemType { - fn to_tokens(&self, tokens: &mut TokenStream) { - tokens.append_all(self.attrs.outer()); - self.vis.to_tokens(tokens); - self.type_token.to_tokens(tokens); - self.ident.to_tokens(tokens); - self.semi_token.to_tokens(tokens); - } - } - - impl ToTokens for ForeignItemMacro { - fn to_tokens(&self, tokens: &mut TokenStream) { - tokens.append_all(self.attrs.outer()); - self.mac.to_tokens(tokens); - self.semi_token.to_tokens(tokens); - } - } - - impl ToTokens for ForeignItemVerbatim { - fn to_tokens(&self, tokens: &mut TokenStream) { - self.tts.to_tokens(tokens); - } - } - - impl ToTokens for MethodSig { - fn to_tokens(&self, tokens: &mut TokenStream) { - self.constness.to_tokens(tokens); - self.unsafety.to_tokens(tokens); - self.asyncness.to_tokens(tokens); - self.abi.to_tokens(tokens); - NamedDecl(&self.decl, &self.ident).to_tokens(tokens); - } - } - - struct NamedDecl<'a>(&'a FnDecl, &'a Ident); - - impl<'a> ToTokens for NamedDecl<'a> { - fn to_tokens(&self, tokens: &mut TokenStream) { - self.0.fn_token.to_tokens(tokens); - self.1.to_tokens(tokens); - self.0.generics.to_tokens(tokens); - self.0.paren_token.surround(tokens, |tokens| { - self.0.inputs.to_tokens(tokens); - if self.0.variadic.is_some() && !self.0.inputs.empty_or_trailing() { - ::default().to_tokens(tokens); - } - self.0.variadic.to_tokens(tokens); - }); - self.0.output.to_tokens(tokens); - self.0.generics.where_clause.to_tokens(tokens); - } - } - - impl ToTokens for ArgSelfRef { - fn to_tokens(&self, tokens: &mut TokenStream) { - self.and_token.to_tokens(tokens); - self.lifetime.to_tokens(tokens); - self.mutability.to_tokens(tokens); - self.self_token.to_tokens(tokens); - } - } - - impl ToTokens for ArgSelf { - fn to_tokens(&self, tokens: &mut TokenStream) { - self.mutability.to_tokens(tokens); - self.self_token.to_tokens(tokens); - } - } - - impl ToTokens for ArgCaptured { - fn to_tokens(&self, tokens: &mut TokenStream) { - self.pat.to_tokens(tokens); - self.colon_token.to_tokens(tokens); - self.ty.to_tokens(tokens); - } - } -} diff --git a/third_party/rust/syn-0.15.30/src/keyword.rs b/third_party/rust/syn-0.15.30/src/keyword.rs deleted file mode 100644 index 07ee1bd8e2..0000000000 --- a/third_party/rust/syn-0.15.30/src/keyword.rs +++ /dev/null @@ -1,245 +0,0 @@ -/// Define a type that supports parsing and printing a given identifier as if it -/// were a keyword. -/// -/// # Usage -/// -/// As a convention, it is recommended that this macro be invoked within a -/// module called `kw` or `keyword` and that the resulting parser be invoked -/// with a `kw::` or `keyword::` prefix. -/// -/// ```edition2018 -/// mod kw { -/// syn::custom_keyword!(whatever); -/// } -/// ``` -/// -/// The generated syntax tree node supports the following operations just like -/// any built-in keyword token. -/// -/// - [Peeking] — `input.peek(kw::whatever)` -/// -/// - [Parsing] — `input.parse::()?` -/// -/// - [Printing] — `quote!( ... #whatever_token ... )` -/// -/// - Construction from a [`Span`] — `let whatever_token = kw::whatever(sp)` -/// -/// - Field access to its span — `let sp = whatever_token.span` -/// -/// [Peeking]: parse/struct.ParseBuffer.html#method.peek -/// [Parsing]: parse/struct.ParseBuffer.html#method.parse -/// [Printing]: https://docs.rs/quote/0.6/quote/trait.ToTokens.html -/// [`Span`]: https://docs.rs/proc-macro2/0.4/proc_macro2/struct.Span.html -/// -/// # Example -/// -/// This example parses input that looks like `bool = true` or `str = "value"`. -/// The key must be either the identifier `bool` or the identifier `str`. If -/// `bool`, the value may be either `true` or `false`. If `str`, the value may -/// be any string literal. -/// -/// The symbols `bool` and `str` are not reserved keywords in Rust so these are -/// not considered keywords in the `syn::token` module. Like any other -/// identifier that is not a keyword, these can be declared as custom keywords -/// by crates that need to use them as such. -/// -/// ```edition2018 -/// use syn::{LitBool, LitStr, Result, Token}; -/// use syn::parse::{Parse, ParseStream}; -/// -/// mod kw { -/// syn::custom_keyword!(bool); -/// syn::custom_keyword!(str); -/// } -/// -/// enum Argument { -/// Bool { -/// bool_token: kw::bool, -/// eq_token: Token![=], -/// value: LitBool, -/// }, -/// Str { -/// str_token: kw::str, -/// eq_token: Token![=], -/// value: LitStr, -/// }, -/// } -/// -/// impl Parse for Argument { -/// fn parse(input: ParseStream) -> Result { -/// let lookahead = input.lookahead1(); -/// if lookahead.peek(kw::bool) { -/// Ok(Argument::Bool { -/// bool_token: input.parse::()?, -/// eq_token: input.parse()?, -/// value: input.parse()?, -/// }) -/// } else if lookahead.peek(kw::str) { -/// Ok(Argument::Str { -/// str_token: input.parse::()?, -/// eq_token: input.parse()?, -/// value: input.parse()?, -/// }) -/// } else { -/// Err(lookahead.error()) -/// } -/// } -/// } -/// ``` -#[macro_export(local_inner_macros)] -macro_rules! custom_keyword { - ($ident:ident) => { - #[allow(non_camel_case_types)] - pub struct $ident { - pub span: $crate::export::Span, - } - - #[doc(hidden)] - #[allow(non_snake_case)] - pub fn $ident<__S: $crate::export::IntoSpans<[$crate::export::Span; 1]>>( - span: __S, - ) -> $ident { - $ident { - span: $crate::export::IntoSpans::into_spans(span)[0], - } - } - - impl $crate::export::Default for $ident { - fn default() -> Self { - $ident { - span: $crate::export::Span::call_site(), - } - } - } - - impl_parse_for_custom_keyword!($ident); - impl_to_tokens_for_custom_keyword!($ident); - impl_clone_for_custom_keyword!($ident); - impl_extra_traits_for_custom_keyword!($ident); - }; -} - -// Not public API. -#[cfg(feature = "parsing")] -#[doc(hidden)] -#[macro_export] -macro_rules! impl_parse_for_custom_keyword { - ($ident:ident) => { - // For peek. - impl $crate::token::CustomKeyword for $ident { - fn ident() -> &'static $crate::export::str { - stringify!($ident) - } - - fn display() -> &'static $crate::export::str { - concat!("`", stringify!($ident), "`") - } - } - - impl $crate::parse::Parse for $ident { - fn parse(input: $crate::parse::ParseStream) -> $crate::parse::Result<$ident> { - input.step(|cursor| { - if let $crate::export::Some((ident, rest)) = cursor.ident() { - if ident == stringify!($ident) { - return $crate::export::Ok(($ident { span: ident.span() }, rest)); - } - } - $crate::export::Err(cursor.error(concat!( - "expected `", - stringify!($ident), - "`" - ))) - }) - } - } - }; -} - -// Not public API. -#[cfg(not(feature = "parsing"))] -#[doc(hidden)] -#[macro_export] -macro_rules! impl_parse_for_custom_keyword { - ($ident:ident) => {}; -} - -// Not public API. -#[cfg(feature = "printing")] -#[doc(hidden)] -#[macro_export] -macro_rules! impl_to_tokens_for_custom_keyword { - ($ident:ident) => { - impl $crate::export::ToTokens for $ident { - fn to_tokens(&self, tokens: &mut $crate::export::TokenStream2) { - let ident = $crate::Ident::new(stringify!($ident), self.span); - $crate::export::TokenStreamExt::append(tokens, ident); - } - } - }; -} - -// Not public API. -#[cfg(not(feature = "printing"))] -#[doc(hidden)] -#[macro_export] -macro_rules! impl_to_tokens_for_custom_keyword { - ($ident:ident) => {}; -} - -// Not public API. -#[cfg(feature = "clone-impls")] -#[doc(hidden)] -#[macro_export] -macro_rules! impl_clone_for_custom_keyword { - ($ident:ident) => { - impl $crate::export::Copy for $ident {} - - impl $crate::export::Clone for $ident { - fn clone(&self) -> Self { - *self - } - } - }; -} - -// Not public API. -#[cfg(not(feature = "clone-impls"))] -#[doc(hidden)] -#[macro_export] -macro_rules! impl_clone_for_custom_keyword { - ($ident:ident) => {}; -} - -// Not public API. -#[cfg(feature = "extra-traits")] -#[doc(hidden)] -#[macro_export] -macro_rules! impl_extra_traits_for_custom_keyword { - ($ident:ident) => { - impl $crate::export::Debug for $ident { - fn fmt(&self, f: &mut $crate::export::Formatter) -> $crate::export::fmt::Result { - $crate::export::Formatter::write_str(f, stringify!($ident)) - } - } - - impl $crate::export::Eq for $ident {} - - impl $crate::export::PartialEq for $ident { - fn eq(&self, _other: &Self) -> $crate::export::bool { - true - } - } - - impl $crate::export::Hash for $ident { - fn hash<__H: $crate::export::Hasher>(&self, _state: &mut __H) {} - } - }; -} - -// Not public API. -#[cfg(not(feature = "extra-traits"))] -#[doc(hidden)] -#[macro_export] -macro_rules! impl_extra_traits_for_custom_keyword { - ($ident:ident) => {}; -} diff --git a/third_party/rust/syn-0.15.30/src/lib.rs b/third_party/rust/syn-0.15.30/src/lib.rs deleted file mode 100644 index 24f682e1c8..0000000000 --- a/third_party/rust/syn-0.15.30/src/lib.rs +++ /dev/null @@ -1,725 +0,0 @@ -//! Syn is a parsing library for parsing a stream of Rust tokens into a syntax -//! tree of Rust source code. -//! -//! Currently this library is geared toward use in Rust procedural macros, but -//! contains some APIs that may be useful more generally. -//! -//! - **Data structures** — Syn provides a complete syntax tree that can -//! represent any valid Rust source code. The syntax tree is rooted at -//! [`syn::File`] which represents a full source file, but there are other -//! entry points that may be useful to procedural macros including -//! [`syn::Item`], [`syn::Expr`] and [`syn::Type`]. -//! -//! - **Custom derives** — Of particular interest to custom derives is -//! [`syn::DeriveInput`] which is any of the three legal input items to a -//! derive macro. An example below shows using this type in a library that can -//! derive implementations of a trait of your own. -//! -//! - **Parsing** — Parsing in Syn is built around [parser functions] with the -//! signature `fn(ParseStream) -> Result`. Every syntax tree node defined -//! by Syn is individually parsable and may be used as a building block for -//! custom syntaxes, or you may dream up your own brand new syntax without -//! involving any of our syntax tree types. -//! -//! - **Location information** — Every token parsed by Syn is associated with a -//! `Span` that tracks line and column information back to the source of that -//! token. These spans allow a procedural macro to display detailed error -//! messages pointing to all the right places in the user's code. There is an -//! example of this below. -//! -//! - **Feature flags** — Functionality is aggressively feature gated so your -//! procedural macros enable only what they need, and do not pay in compile -//! time for all the rest. -//! -//! [`syn::File`]: struct.File.html -//! [`syn::Item`]: enum.Item.html -//! [`syn::Expr`]: enum.Expr.html -//! [`syn::Type`]: enum.Type.html -//! [`syn::DeriveInput`]: struct.DeriveInput.html -//! [parser functions]: parse/index.html -//! -//! *Version requirement: Syn supports any compiler version back to Rust's very -//! first support for procedural macros in Rust 1.15.0. Some features especially -//! around error reporting are only available in newer compilers or on the -//! nightly channel.* -//! -//! ## Example of a custom derive -//! -//! The canonical custom derive using Syn looks like this. We write an ordinary -//! Rust function tagged with a `proc_macro_derive` attribute and the name of -//! the trait we are deriving. Any time that derive appears in the user's code, -//! the Rust compiler passes their data structure as tokens into our macro. We -//! get to execute arbitrary Rust code to figure out what to do with those -//! tokens, then hand some tokens back to the compiler to compile into the -//! user's crate. -//! -//! [`TokenStream`]: https://doc.rust-lang.org/proc_macro/struct.TokenStream.html -//! -//! ```toml -//! [dependencies] -//! syn = "0.15" -//! quote = "0.6" -//! -//! [lib] -//! proc-macro = true -//! ``` -//! -//! ```edition2018 -//! extern crate proc_macro; -//! -//! use proc_macro::TokenStream; -//! use quote::quote; -//! use syn::{parse_macro_input, DeriveInput}; -//! -//! # const IGNORE_TOKENS: &str = stringify! { -//! #[proc_macro_derive(MyMacro)] -//! # }; -//! pub fn my_macro(input: TokenStream) -> TokenStream { -//! // Parse the input tokens into a syntax tree -//! let input = parse_macro_input!(input as DeriveInput); -//! -//! // Build the output, possibly using quasi-quotation -//! let expanded = quote! { -//! // ... -//! }; -//! -//! // Hand the output tokens back to the compiler -//! TokenStream::from(expanded) -//! } -//! ``` -//! -//! The [`heapsize`] example directory shows a complete working Macros 1.1 -//! implementation of a custom derive. It works on any Rust compiler 1.15+. -//! The example derives a `HeapSize` trait which computes an estimate of the -//! amount of heap memory owned by a value. -//! -//! [`heapsize`]: https://github.com/dtolnay/syn/tree/master/examples/heapsize -//! -//! ```edition2018 -//! pub trait HeapSize { -//! /// Total number of bytes of heap memory owned by `self`. -//! fn heap_size_of_children(&self) -> usize; -//! } -//! ``` -//! -//! The custom derive allows users to write `#[derive(HeapSize)]` on data -//! structures in their program. -//! -//! ```edition2018 -//! # const IGNORE_TOKENS: &str = stringify! { -//! #[derive(HeapSize)] -//! # }; -//! struct Demo<'a, T: ?Sized> { -//! a: Box, -//! b: u8, -//! c: &'a str, -//! d: String, -//! } -//! ``` -//! -//! ## Spans and error reporting -//! -//! The token-based procedural macro API provides great control over where the -//! compiler's error messages are displayed in user code. Consider the error the -//! user sees if one of their field types does not implement `HeapSize`. -//! -//! ```edition2018 -//! # const IGNORE_TOKENS: &str = stringify! { -//! #[derive(HeapSize)] -//! # }; -//! struct Broken { -//! ok: String, -//! bad: std::thread::Thread, -//! } -//! ``` -//! -//! By tracking span information all the way through the expansion of a -//! procedural macro as shown in the `heapsize` example, token-based macros in -//! Syn are able to trigger errors that directly pinpoint the source of the -//! problem. -//! -//! ```text -//! error[E0277]: the trait bound `std::thread::Thread: HeapSize` is not satisfied -//! --> src/main.rs:7:5 -//! | -//! 7 | bad: std::thread::Thread, -//! | ^^^^^^^^^^^^^^^^^^^^^^^^ the trait `HeapSize` is not implemented for `Thread` -//! ``` -//! -//! ## Parsing a custom syntax -//! -//! The [`lazy-static`] example directory shows the implementation of a -//! `functionlike!(...)` procedural macro in which the input tokens are parsed -//! using Syn's parsing API. -//! -//! [`lazy-static`]: https://github.com/dtolnay/syn/tree/master/examples/lazy-static -//! -//! The example reimplements the popular `lazy_static` crate from crates.io as a -//! procedural macro. -//! -//! ```edition2018 -//! # macro_rules! lazy_static { -//! # ($($tt:tt)*) => {} -//! # } -//! # -//! lazy_static! { -//! static ref USERNAME: Regex = Regex::new("^[a-z0-9_-]{3,16}$").unwrap(); -//! } -//! ``` -//! -//! The implementation shows how to trigger custom warnings and error messages -//! on the macro input. -//! -//! ```text -//! warning: come on, pick a more creative name -//! --> src/main.rs:10:16 -//! | -//! 10 | static ref FOO: String = "lazy_static".to_owned(); -//! | ^^^ -//! ``` -//! -//! ## Debugging -//! -//! When developing a procedural macro it can be helpful to look at what the -//! generated code looks like. Use `cargo rustc -- -Zunstable-options -//! --pretty=expanded` or the [`cargo expand`] subcommand. -//! -//! [`cargo expand`]: https://github.com/dtolnay/cargo-expand -//! -//! To show the expanded code for some crate that uses your procedural macro, -//! run `cargo expand` from that crate. To show the expanded code for one of -//! your own test cases, run `cargo expand --test the_test_case` where the last -//! argument is the name of the test file without the `.rs` extension. -//! -//! This write-up by Brandon W Maister discusses debugging in more detail: -//! [Debugging Rust's new Custom Derive system][debugging]. -//! -//! [debugging]: https://quodlibetor.github.io/posts/debugging-rusts-new-custom-derive-system/ -//! -//! ## Optional features -//! -//! Syn puts a lot of functionality behind optional features in order to -//! optimize compile time for the most common use cases. The following features -//! are available. -//! -//! - **`derive`** *(enabled by default)* — Data structures for representing the -//! possible input to a custom derive, including structs and enums and types. -//! - **`full`** — Data structures for representing the syntax tree of all valid -//! Rust source code, including items and expressions. -//! - **`parsing`** *(enabled by default)* — Ability to parse input tokens into -//! a syntax tree node of a chosen type. -//! - **`printing`** *(enabled by default)* — Ability to print a syntax tree -//! node as tokens of Rust source code. -//! - **`visit`** — Trait for traversing a syntax tree. -//! - **`visit-mut`** — Trait for traversing and mutating in place a syntax -//! tree. -//! - **`fold`** — Trait for transforming an owned syntax tree. -//! - **`clone-impls`** *(enabled by default)* — Clone impls for all syntax tree -//! types. -//! - **`extra-traits`** — Debug, Eq, PartialEq, Hash impls for all syntax tree -//! types. -//! - **`proc-macro`** *(enabled by default)* — Runtime dependency on the -//! dynamic library libproc_macro from rustc toolchain. - -// Syn types in rustdoc of other crates get linked to here. -#![doc(html_root_url = "https://docs.rs/syn/0.15.30")] -#![cfg_attr(feature = "cargo-clippy", allow(renamed_and_removed_lints))] -#![cfg_attr(feature = "cargo-clippy", deny(clippy, clippy_pedantic))] -// Ignored clippy lints. -#![cfg_attr( - feature = "cargo-clippy", - allow( - block_in_if_condition_stmt, - cognitive_complexity, - const_static_lifetime, - deprecated_cfg_attr, - doc_markdown, - eval_order_dependence, - large_enum_variant, - needless_pass_by_value, - never_loop, - redundant_field_names, - too_many_arguments, - ) -)] -// Ignored clippy_pedantic lints. -#![cfg_attr( - feature = "cargo-clippy", - allow( - cast_possible_truncation, - cast_possible_wrap, - empty_enum, - if_not_else, - items_after_statements, - module_name_repetitions, - shadow_unrelated, - similar_names, - single_match_else, - unseparated_literal_suffix, - use_self, - used_underscore_binding, - ) -)] - -#[cfg(all( - not(all(target_arch = "wasm32", target_os = "unknown")), - feature = "proc-macro" -))] -extern crate proc_macro; -extern crate proc_macro2; -extern crate unicode_xid; - -#[cfg(feature = "printing")] -extern crate quote; - -#[macro_use] -mod macros; - -// Not public API. -#[cfg(feature = "parsing")] -#[doc(hidden)] -#[macro_use] -pub mod group; - -#[macro_use] -pub mod token; - -mod ident; -pub use ident::Ident; - -#[cfg(any(feature = "full", feature = "derive"))] -mod attr; -#[cfg(any(feature = "full", feature = "derive"))] -pub use attr::{AttrStyle, Attribute, AttributeArgs, Meta, MetaList, MetaNameValue, NestedMeta}; - -#[cfg(any(feature = "full", feature = "derive"))] -mod data; -#[cfg(any(feature = "full", feature = "derive"))] -pub use data::{ - Field, Fields, FieldsNamed, FieldsUnnamed, Variant, VisCrate, VisPublic, VisRestricted, - Visibility, -}; - -#[cfg(any(feature = "full", feature = "derive"))] -mod expr; -#[cfg(any(feature = "full", feature = "derive"))] -pub use expr::{ - Expr, ExprArray, ExprAssign, ExprAssignOp, ExprAsync, ExprBinary, ExprBlock, ExprBox, - ExprBreak, ExprCall, ExprCast, ExprClosure, ExprContinue, ExprField, ExprForLoop, ExprGroup, - ExprIf, ExprInPlace, ExprIndex, ExprLet, ExprLit, ExprLoop, ExprMacro, ExprMatch, - ExprMethodCall, ExprParen, ExprPath, ExprRange, ExprReference, ExprRepeat, ExprReturn, - ExprStruct, ExprTry, ExprTryBlock, ExprTuple, ExprType, ExprUnary, ExprUnsafe, ExprVerbatim, - ExprWhile, ExprYield, Index, Member, -}; - -#[cfg(feature = "full")] -pub use expr::{ - Arm, Block, FieldPat, FieldValue, GenericMethodArgument, Label, Local, MethodTurbofish, Pat, - PatBox, PatIdent, PatLit, PatMacro, PatPath, PatRange, PatRef, PatSlice, PatStruct, PatTuple, - PatTupleStruct, PatVerbatim, PatWild, RangeLimits, Stmt, -}; - -#[cfg(any(feature = "full", feature = "derive"))] -mod generics; -#[cfg(any(feature = "full", feature = "derive"))] -pub use generics::{ - BoundLifetimes, ConstParam, GenericParam, Generics, LifetimeDef, PredicateEq, - PredicateLifetime, PredicateType, TraitBound, TraitBoundModifier, TypeParam, TypeParamBound, - WhereClause, WherePredicate, -}; -#[cfg(all(any(feature = "full", feature = "derive"), feature = "printing"))] -pub use generics::{ImplGenerics, Turbofish, TypeGenerics}; - -#[cfg(feature = "full")] -mod item; -#[cfg(feature = "full")] -pub use item::{ - ArgCaptured, ArgSelf, ArgSelfRef, FnArg, FnDecl, ForeignItem, ForeignItemFn, ForeignItemMacro, - ForeignItemStatic, ForeignItemType, ForeignItemVerbatim, ImplItem, ImplItemConst, - ImplItemExistential, ImplItemMacro, ImplItemMethod, ImplItemType, ImplItemVerbatim, Item, - ItemConst, ItemEnum, ItemExistential, ItemExternCrate, ItemFn, ItemForeignMod, ItemImpl, - ItemMacro, ItemMacro2, ItemMod, ItemStatic, ItemStruct, ItemTrait, ItemTraitAlias, ItemType, - ItemUnion, ItemUse, ItemVerbatim, MethodSig, TraitItem, TraitItemConst, TraitItemMacro, - TraitItemMethod, TraitItemType, TraitItemVerbatim, UseGlob, UseGroup, UseName, UsePath, - UseRename, UseTree, -}; - -#[cfg(feature = "full")] -mod file; -#[cfg(feature = "full")] -pub use file::File; - -mod lifetime; -pub use lifetime::Lifetime; - -#[cfg(any(feature = "full", feature = "derive"))] -mod lit; -#[cfg(any(feature = "full", feature = "derive"))] -pub use lit::{ - FloatSuffix, IntSuffix, Lit, LitBool, LitByte, LitByteStr, LitChar, LitFloat, LitInt, LitStr, - LitVerbatim, StrStyle, -}; - -#[cfg(any(feature = "full", feature = "derive"))] -mod mac; -#[cfg(any(feature = "full", feature = "derive"))] -pub use mac::{Macro, MacroDelimiter}; - -#[cfg(any(feature = "full", feature = "derive"))] -mod derive; -#[cfg(feature = "derive")] -pub use derive::{Data, DataEnum, DataStruct, DataUnion, DeriveInput}; - -#[cfg(any(feature = "full", feature = "derive"))] -mod op; -#[cfg(any(feature = "full", feature = "derive"))] -pub use op::{BinOp, UnOp}; - -#[cfg(any(feature = "full", feature = "derive"))] -mod ty; -#[cfg(any(feature = "full", feature = "derive"))] -pub use ty::{ - Abi, BareFnArg, BareFnArgName, ReturnType, Type, TypeArray, TypeBareFn, TypeGroup, - TypeImplTrait, TypeInfer, TypeMacro, TypeNever, TypeParen, TypePath, TypePtr, TypeReference, - TypeSlice, TypeTraitObject, TypeTuple, TypeVerbatim, -}; - -#[cfg(any(feature = "full", feature = "derive"))] -mod path; -#[cfg(any(feature = "full", feature = "derive"))] -pub use path::{ - AngleBracketedGenericArguments, Binding, Constraint, GenericArgument, - ParenthesizedGenericArguments, Path, PathArguments, PathSegment, QSelf, -}; - -#[cfg(feature = "parsing")] -pub mod buffer; -#[cfg(feature = "parsing")] -pub mod ext; -pub mod punctuated; -#[cfg(all(any(feature = "full", feature = "derive"), feature = "extra-traits"))] -mod tt; - -// Not public API except the `parse_quote!` macro. -#[cfg(feature = "parsing")] -#[doc(hidden)] -pub mod parse_quote; - -// Not public API except the `parse_macro_input!` macro. -#[cfg(all( - not(all(target_arch = "wasm32", target_os = "unknown")), - feature = "parsing", - feature = "proc-macro" -))] -#[doc(hidden)] -pub mod parse_macro_input; - -#[cfg(all(feature = "parsing", feature = "printing"))] -pub mod spanned; - -mod gen { - /// Syntax tree traversal to walk a shared borrow of a syntax tree. - /// - /// Each method of the [`Visit`] trait is a hook that can be overridden to - /// customize the behavior when visiting the corresponding type of node. By - /// default, every method recursively visits the substructure of the input - /// by invoking the right visitor method of each of its fields. - /// - /// [`Visit`]: trait.Visit.html - /// - /// ```edition2018 - /// # use syn::{Attribute, BinOp, Expr, ExprBinary}; - /// # - /// pub trait Visit<'ast> { - /// /* ... */ - /// - /// fn visit_expr_binary(&mut self, node: &'ast ExprBinary) { - /// for attr in &node.attrs { - /// self.visit_attribute(attr); - /// } - /// self.visit_expr(&*node.left); - /// self.visit_bin_op(&node.op); - /// self.visit_expr(&*node.right); - /// } - /// - /// /* ... */ - /// # fn visit_attribute(&mut self, node: &'ast Attribute); - /// # fn visit_expr(&mut self, node: &'ast Expr); - /// # fn visit_bin_op(&mut self, node: &'ast BinOp); - /// } - /// ``` - /// - /// *This module is available if Syn is built with the `"visit"` feature.* - #[cfg(feature = "visit")] - pub mod visit; - - /// Syntax tree traversal to mutate an exclusive borrow of a syntax tree in - /// place. - /// - /// Each method of the [`VisitMut`] trait is a hook that can be overridden - /// to customize the behavior when mutating the corresponding type of node. - /// By default, every method recursively visits the substructure of the - /// input by invoking the right visitor method of each of its fields. - /// - /// [`VisitMut`]: trait.VisitMut.html - /// - /// ```edition2018 - /// # use syn::{Attribute, BinOp, Expr, ExprBinary}; - /// # - /// pub trait VisitMut { - /// /* ... */ - /// - /// fn visit_expr_binary_mut(&mut self, node: &mut ExprBinary) { - /// for attr in &mut node.attrs { - /// self.visit_attribute_mut(attr); - /// } - /// self.visit_expr_mut(&mut *node.left); - /// self.visit_bin_op_mut(&mut node.op); - /// self.visit_expr_mut(&mut *node.right); - /// } - /// - /// /* ... */ - /// # fn visit_attribute_mut(&mut self, node: &mut Attribute); - /// # fn visit_expr_mut(&mut self, node: &mut Expr); - /// # fn visit_bin_op_mut(&mut self, node: &mut BinOp); - /// } - /// ``` - /// - /// *This module is available if Syn is built with the `"visit-mut"` - /// feature.* - #[cfg(feature = "visit-mut")] - pub mod visit_mut; - - /// Syntax tree traversal to transform the nodes of an owned syntax tree. - /// - /// Each method of the [`Fold`] trait is a hook that can be overridden to - /// customize the behavior when transforming the corresponding type of node. - /// By default, every method recursively visits the substructure of the - /// input by invoking the right visitor method of each of its fields. - /// - /// [`Fold`]: trait.Fold.html - /// - /// ```edition2018 - /// # use syn::{Attribute, BinOp, Expr, ExprBinary}; - /// # - /// pub trait Fold { - /// /* ... */ - /// - /// fn fold_expr_binary(&mut self, node: ExprBinary) -> ExprBinary { - /// ExprBinary { - /// attrs: node.attrs - /// .into_iter() - /// .map(|attr| self.fold_attribute(attr)) - /// .collect(), - /// left: Box::new(self.fold_expr(*node.left)), - /// op: self.fold_bin_op(node.op), - /// right: Box::new(self.fold_expr(*node.right)), - /// } - /// } - /// - /// /* ... */ - /// # fn fold_attribute(&mut self, node: Attribute) -> Attribute; - /// # fn fold_expr(&mut self, node: Expr) -> Expr; - /// # fn fold_bin_op(&mut self, node: BinOp) -> BinOp; - /// } - /// ``` - /// - /// *This module is available if Syn is built with the `"fold"` feature.* - #[cfg(feature = "fold")] - pub mod fold; - - #[cfg(any(feature = "full", feature = "derive"))] - #[path = "../gen_helper.rs"] - mod helper; -} -pub use gen::*; - -// Not public API. -#[doc(hidden)] -pub mod export; - -mod keyword; - -#[cfg(feature = "parsing")] -mod lookahead; - -#[cfg(feature = "parsing")] -pub mod parse; - -mod span; - -#[cfg(all(any(feature = "full", feature = "derive"), feature = "printing"))] -mod print; - -mod thread; - -//////////////////////////////////////////////////////////////////////////////// - -#[cfg(any(feature = "parsing", feature = "full", feature = "derive"))] -#[allow(non_camel_case_types)] -struct private; - -//////////////////////////////////////////////////////////////////////////////// - -mod error; -pub use error::{Error, Result}; - -/// Parse tokens of source code into the chosen syntax tree node. -/// -/// This is preferred over parsing a string because tokens are able to preserve -/// information about where in the user's code they were originally written (the -/// "span" of the token), possibly allowing the compiler to produce better error -/// messages. -/// -/// This function parses a `proc_macro::TokenStream` which is the type used for -/// interop with the compiler in a procedural macro. To parse a -/// `proc_macro2::TokenStream`, use [`syn::parse2`] instead. -/// -/// [`syn::parse2`]: fn.parse2.html -/// -/// *This function is available if Syn is built with both the `"parsing"` and -/// `"proc-macro"` features.* -/// -/// # Examples -/// -/// ```edition2018 -/// extern crate proc_macro; -/// -/// use proc_macro::TokenStream; -/// use quote::quote; -/// use syn::DeriveInput; -/// -/// # const IGNORE_TOKENS: &str = stringify! { -/// #[proc_macro_derive(MyMacro)] -/// # }; -/// pub fn my_macro(input: TokenStream) -> TokenStream { -/// // Parse the tokens into a syntax tree -/// let ast: DeriveInput = syn::parse(input).unwrap(); -/// -/// // Build the output, possibly using quasi-quotation -/// let expanded = quote! { -/// /* ... */ -/// }; -/// -/// // Convert into a token stream and return it -/// expanded.into() -/// } -/// ``` -#[cfg(all( - not(all(target_arch = "wasm32", target_os = "unknown")), - feature = "parsing", - feature = "proc-macro" -))] -pub fn parse(tokens: proc_macro::TokenStream) -> Result { - parse::Parser::parse(T::parse, tokens) -} - -/// Parse a proc-macro2 token stream into the chosen syntax tree node. -/// -/// This function parses a `proc_macro2::TokenStream` which is commonly useful -/// when the input comes from a node of the Syn syntax tree, for example the tts -/// of a [`Macro`] node. When in a procedural macro parsing the -/// `proc_macro::TokenStream` provided by the compiler, use [`syn::parse`] -/// instead. -/// -/// [`Macro`]: struct.Macro.html -/// [`syn::parse`]: fn.parse.html -/// -/// *This function is available if Syn is built with the `"parsing"` feature.* -#[cfg(feature = "parsing")] -pub fn parse2(tokens: proc_macro2::TokenStream) -> Result { - parse::Parser::parse2(T::parse, tokens) -} - -/// Parse a string of Rust code into the chosen syntax tree node. -/// -/// *This function is available if Syn is built with the `"parsing"` feature.* -/// -/// # Hygiene -/// -/// Every span in the resulting syntax tree will be set to resolve at the macro -/// call site. -/// -/// # Examples -/// -/// ```edition2018 -/// use syn::{Expr, Result}; -/// -/// fn run() -> Result<()> { -/// let code = "assert_eq!(u8::max_value(), 255)"; -/// let expr = syn::parse_str::(code)?; -/// println!("{:#?}", expr); -/// Ok(()) -/// } -/// # -/// # fn main() { -/// # run().unwrap(); -/// # } -/// ``` -#[cfg(feature = "parsing")] -pub fn parse_str(s: &str) -> Result { - parse::Parser::parse_str(T::parse, s) -} - -// FIXME the name parse_file makes it sound like you might pass in a path to a -// file, rather than the content. -/// Parse the content of a file of Rust code. -/// -/// This is different from `syn::parse_str::(content)` in two ways: -/// -/// - It discards a leading byte order mark `\u{FEFF}` if the file has one. -/// - It preserves the shebang line of the file, such as `#!/usr/bin/env rustx`. -/// -/// If present, either of these would be an error using `from_str`. -/// -/// *This function is available if Syn is built with the `"parsing"` and -/// `"full"` features.* -/// -/// # Examples -/// -/// ```edition2018,no_run -/// use std::error::Error; -/// use std::fs::File; -/// use std::io::Read; -/// -/// fn run() -> Result<(), Box> { -/// let mut file = File::open("path/to/code.rs")?; -/// let mut content = String::new(); -/// file.read_to_string(&mut content)?; -/// -/// let ast = syn::parse_file(&content)?; -/// if let Some(shebang) = ast.shebang { -/// println!("{}", shebang); -/// } -/// println!("{} items", ast.items.len()); -/// -/// Ok(()) -/// } -/// # -/// # fn main() { -/// # run().unwrap(); -/// # } -/// ``` -#[cfg(all(feature = "parsing", feature = "full"))] -pub fn parse_file(mut content: &str) -> Result { - // Strip the BOM if it is present - const BOM: &'static str = "\u{feff}"; - if content.starts_with(BOM) { - content = &content[BOM.len()..]; - } - - let mut shebang = None; - if content.starts_with("#!") && !content.starts_with("#![") { - if let Some(idx) = content.find('\n') { - shebang = Some(content[..idx].to_string()); - content = &content[idx..]; - } else { - shebang = Some(content.to_string()); - content = ""; - } - } - - let mut file: File = parse_str(content)?; - file.shebang = shebang; - Ok(file) -} diff --git a/third_party/rust/syn-0.15.30/src/lifetime.rs b/third_party/rust/syn-0.15.30/src/lifetime.rs deleted file mode 100644 index 461560c91c..0000000000 --- a/third_party/rust/syn-0.15.30/src/lifetime.rs +++ /dev/null @@ -1,155 +0,0 @@ -use std::cmp::Ordering; -use std::fmt::{self, Display}; -use std::hash::{Hash, Hasher}; - -use proc_macro2::{Ident, Span}; -use unicode_xid::UnicodeXID; - -#[cfg(feature = "parsing")] -use lookahead; - -/// A Rust lifetime: `'a`. -/// -/// Lifetime names must conform to the following rules: -/// -/// - Must start with an apostrophe. -/// - Must not consist of just an apostrophe: `'`. -/// - Character after the apostrophe must be `_` or a Unicode code point with -/// the XID_Start property. -/// - All following characters must be Unicode code points with the XID_Continue -/// property. -/// -/// *This type is available if Syn is built with the `"derive"` or `"full"` -/// feature.* -#[cfg_attr(feature = "extra-traits", derive(Debug))] -#[derive(Clone)] -pub struct Lifetime { - pub apostrophe: Span, - pub ident: Ident, -} - -impl Lifetime { - /// # Panics - /// - /// Panics if the lifetime does not conform to the bulleted rules above. - /// - /// # Invocation - /// - /// ```edition2018 - /// # use proc_macro2::Span; - /// # use syn::Lifetime; - /// # - /// # fn f() -> Lifetime { - /// Lifetime::new("'a", Span::call_site()) - /// # } - /// ``` - pub fn new(symbol: &str, span: Span) -> Self { - if !symbol.starts_with('\'') { - panic!( - "lifetime name must start with apostrophe as in \"'a\", got {:?}", - symbol - ); - } - - if symbol == "'" { - panic!("lifetime name must not be empty"); - } - - fn xid_ok(symbol: &str) -> bool { - let mut chars = symbol.chars(); - let first = chars.next().unwrap(); - if !(UnicodeXID::is_xid_start(first) || first == '_') { - return false; - } - for ch in chars { - if !UnicodeXID::is_xid_continue(ch) { - return false; - } - } - true - } - - if !xid_ok(&symbol[1..]) { - panic!("{:?} is not a valid lifetime name", symbol); - } - - Lifetime { - apostrophe: span, - ident: Ident::new(&symbol[1..], span), - } - } -} - -impl Display for Lifetime { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - "'".fmt(formatter)?; - self.ident.fmt(formatter) - } -} - -impl PartialEq for Lifetime { - fn eq(&self, other: &Lifetime) -> bool { - self.ident.eq(&other.ident) - } -} - -impl Eq for Lifetime {} - -impl PartialOrd for Lifetime { - fn partial_cmp(&self, other: &Lifetime) -> Option { - Some(self.cmp(other)) - } -} - -impl Ord for Lifetime { - fn cmp(&self, other: &Lifetime) -> Ordering { - self.ident.cmp(&other.ident) - } -} - -impl Hash for Lifetime { - fn hash(&self, h: &mut H) { - self.ident.hash(h) - } -} - -#[cfg(feature = "parsing")] -#[doc(hidden)] -#[allow(non_snake_case)] -pub fn Lifetime(marker: lookahead::TokenMarker) -> Lifetime { - match marker {} -} - -#[cfg(feature = "parsing")] -pub mod parsing { - use super::*; - - use parse::{Parse, ParseStream, Result}; - - impl Parse for Lifetime { - fn parse(input: ParseStream) -> Result { - input.step(|cursor| { - cursor - .lifetime() - .ok_or_else(|| cursor.error("expected lifetime")) - }) - } - } -} - -#[cfg(feature = "printing")] -mod printing { - use super::*; - - use proc_macro2::{Punct, Spacing, TokenStream}; - use quote::{ToTokens, TokenStreamExt}; - - impl ToTokens for Lifetime { - fn to_tokens(&self, tokens: &mut TokenStream) { - let mut apostrophe = Punct::new('\'', Spacing::Joint); - apostrophe.set_span(self.apostrophe); - tokens.append(apostrophe); - self.ident.to_tokens(tokens); - } - } -} diff --git a/third_party/rust/syn-0.15.30/src/lit.rs b/third_party/rust/syn-0.15.30/src/lit.rs deleted file mode 100644 index 3a66cb763f..0000000000 --- a/third_party/rust/syn-0.15.30/src/lit.rs +++ /dev/null @@ -1,1103 +0,0 @@ -use proc_macro2::{Literal, Span}; -use std::str; - -#[cfg(feature = "printing")] -use proc_macro2::Ident; - -#[cfg(feature = "parsing")] -use proc_macro2::TokenStream; - -use proc_macro2::TokenTree; - -#[cfg(feature = "extra-traits")] -use std::hash::{Hash, Hasher}; - -#[cfg(feature = "parsing")] -use lookahead; -#[cfg(feature = "parsing")] -use parse::{Parse, Parser, Result}; - -ast_enum_of_structs! { - /// A Rust literal such as a string or integer or boolean. - /// - /// *This type is available if Syn is built with the `"derive"` or `"full"` - /// feature.* - /// - /// # Syntax tree enum - /// - /// This type is a [syntax tree enum]. - /// - /// [syntax tree enum]: enum.Expr.html#syntax-tree-enums - pub enum Lit { - /// A UTF-8 string literal: `"foo"`. - /// - /// *This type is available if Syn is built with the `"derive"` or - /// `"full"` feature.* - pub Str(LitStr #manual_extra_traits { - token: Literal, - }), - - /// A byte string literal: `b"foo"`. - /// - /// *This type is available if Syn is built with the `"derive"` or - /// `"full"` feature.* - pub ByteStr(LitByteStr #manual_extra_traits { - token: Literal, - }), - - /// A byte literal: `b'f'`. - /// - /// *This type is available if Syn is built with the `"derive"` or - /// `"full"` feature.* - pub Byte(LitByte #manual_extra_traits { - token: Literal, - }), - - /// A character literal: `'a'`. - /// - /// *This type is available if Syn is built with the `"derive"` or - /// `"full"` feature.* - pub Char(LitChar #manual_extra_traits { - token: Literal, - }), - - /// An integer literal: `1` or `1u16`. - /// - /// Holds up to 64 bits of data. Use `LitVerbatim` for any larger - /// integer literal. - /// - /// *This type is available if Syn is built with the `"derive"` or - /// `"full"` feature.* - pub Int(LitInt #manual_extra_traits { - token: Literal, - }), - - /// A floating point literal: `1f64` or `1.0e10f64`. - /// - /// Must be finite. May not be infinte or NaN. - /// - /// *This type is available if Syn is built with the `"derive"` or - /// `"full"` feature.* - pub Float(LitFloat #manual_extra_traits { - token: Literal, - }), - - /// A boolean literal: `true` or `false`. - /// - /// *This type is available if Syn is built with the `"derive"` or - /// `"full"` feature.* - pub Bool(LitBool #manual_extra_traits { - pub value: bool, - pub span: Span, - }), - - /// A raw token literal not interpreted by Syn, possibly because it - /// represents an integer larger than 64 bits. - /// - /// *This type is available if Syn is built with the `"derive"` or - /// `"full"` feature.* - pub Verbatim(LitVerbatim #manual_extra_traits { - pub token: Literal, - }), - } -} - -impl LitStr { - pub fn new(value: &str, span: Span) -> Self { - let mut lit = Literal::string(value); - lit.set_span(span); - LitStr { token: lit } - } - - pub fn value(&self) -> String { - value::parse_lit_str(&self.token.to_string()) - } - - /// Parse a syntax tree node from the content of this string literal. - /// - /// All spans in the syntax tree will point to the span of this `LitStr`. - /// - /// # Example - /// - /// ```edition2018 - /// use proc_macro2::Span; - /// use syn::{Attribute, Error, Ident, Lit, Meta, MetaNameValue, Path, Result}; - /// - /// // Parses the path from an attribute that looks like: - /// // - /// // #[path = "a::b::c"] - /// // - /// // or returns `None` if the input is some other attribute. - /// fn get_path(attr: &Attribute) -> Result> { - /// if !attr.path.is_ident("path") { - /// return Ok(None); - /// } - /// - /// match attr.parse_meta()? { - /// Meta::NameValue(MetaNameValue { lit: Lit::Str(lit_str), .. }) => { - /// lit_str.parse().map(Some) - /// } - /// _ => { - /// let message = "expected #[path = \"...\"]"; - /// Err(Error::new_spanned(attr, message)) - /// } - /// } - /// } - /// ``` - #[cfg(feature = "parsing")] - pub fn parse(&self) -> Result { - self.parse_with(T::parse) - } - - /// Invoke parser on the content of this string literal. - /// - /// All spans in the syntax tree will point to the span of this `LitStr`. - /// - /// # Example - /// - /// ```edition2018 - /// # use proc_macro2::Span; - /// # use syn::{LitStr, Result}; - /// # - /// # fn main() -> Result<()> { - /// # let lit_str = LitStr::new("a::b::c", Span::call_site()); - /// # - /// # const IGNORE: &str = stringify! { - /// let lit_str: LitStr = /* ... */; - /// # }; - /// - /// // Parse a string literal like "a::b::c" into a Path, not allowing - /// // generic arguments on any of the path segments. - /// let basic_path = lit_str.parse_with(syn::Path::parse_mod_style)?; - /// # - /// # Ok(()) - /// # } - /// ``` - #[cfg(feature = "parsing")] - pub fn parse_with(&self, parser: F) -> Result { - use proc_macro2::Group; - - // Token stream with every span replaced by the given one. - fn respan_token_stream(stream: TokenStream, span: Span) -> TokenStream { - stream - .into_iter() - .map(|token| respan_token_tree(token, span)) - .collect() - } - - // Token tree with every span replaced by the given one. - fn respan_token_tree(mut token: TokenTree, span: Span) -> TokenTree { - match token { - TokenTree::Group(ref mut g) => { - let stream = respan_token_stream(g.stream().clone(), span); - *g = Group::new(g.delimiter(), stream); - g.set_span(span); - } - ref mut other => other.set_span(span), - } - token - } - - // Parse string literal into a token stream with every span equal to the - // original literal's span. - let mut tokens = ::parse_str(&self.value())?; - tokens = respan_token_stream(tokens, self.span()); - - parser.parse2(tokens) - } - - pub fn span(&self) -> Span { - self.token.span() - } - - pub fn set_span(&mut self, span: Span) { - self.token.set_span(span) - } -} - -impl LitByteStr { - pub fn new(value: &[u8], span: Span) -> Self { - let mut token = Literal::byte_string(value); - token.set_span(span); - LitByteStr { token: token } - } - - pub fn value(&self) -> Vec { - value::parse_lit_byte_str(&self.token.to_string()) - } - - pub fn span(&self) -> Span { - self.token.span() - } - - pub fn set_span(&mut self, span: Span) { - self.token.set_span(span) - } -} - -impl LitByte { - pub fn new(value: u8, span: Span) -> Self { - let mut token = Literal::u8_suffixed(value); - token.set_span(span); - LitByte { token: token } - } - - pub fn value(&self) -> u8 { - value::parse_lit_byte(&self.token.to_string()) - } - - pub fn span(&self) -> Span { - self.token.span() - } - - pub fn set_span(&mut self, span: Span) { - self.token.set_span(span) - } -} - -impl LitChar { - pub fn new(value: char, span: Span) -> Self { - let mut token = Literal::character(value); - token.set_span(span); - LitChar { token: token } - } - - pub fn value(&self) -> char { - value::parse_lit_char(&self.token.to_string()) - } - - pub fn span(&self) -> Span { - self.token.span() - } - - pub fn set_span(&mut self, span: Span) { - self.token.set_span(span) - } -} - -impl LitInt { - pub fn new(value: u64, suffix: IntSuffix, span: Span) -> Self { - let mut token = match suffix { - IntSuffix::Isize => Literal::isize_suffixed(value as isize), - IntSuffix::I8 => Literal::i8_suffixed(value as i8), - IntSuffix::I16 => Literal::i16_suffixed(value as i16), - IntSuffix::I32 => Literal::i32_suffixed(value as i32), - IntSuffix::I64 => Literal::i64_suffixed(value as i64), - IntSuffix::I128 => value::to_literal(&format!("{}i128", value)), - IntSuffix::Usize => Literal::usize_suffixed(value as usize), - IntSuffix::U8 => Literal::u8_suffixed(value as u8), - IntSuffix::U16 => Literal::u16_suffixed(value as u16), - IntSuffix::U32 => Literal::u32_suffixed(value as u32), - IntSuffix::U64 => Literal::u64_suffixed(value), - IntSuffix::U128 => value::to_literal(&format!("{}u128", value)), - IntSuffix::None => Literal::u64_unsuffixed(value), - }; - token.set_span(span); - LitInt { token: token } - } - - pub fn value(&self) -> u64 { - value::parse_lit_int(&self.token.to_string()).unwrap() - } - - pub fn suffix(&self) -> IntSuffix { - let value = self.token.to_string(); - for (s, suffix) in vec![ - ("i8", IntSuffix::I8), - ("i16", IntSuffix::I16), - ("i32", IntSuffix::I32), - ("i64", IntSuffix::I64), - ("i128", IntSuffix::I128), - ("isize", IntSuffix::Isize), - ("u8", IntSuffix::U8), - ("u16", IntSuffix::U16), - ("u32", IntSuffix::U32), - ("u64", IntSuffix::U64), - ("u128", IntSuffix::U128), - ("usize", IntSuffix::Usize), - ] { - if value.ends_with(s) { - return suffix; - } - } - IntSuffix::None - } - - pub fn span(&self) -> Span { - self.token.span() - } - - pub fn set_span(&mut self, span: Span) { - self.token.set_span(span) - } -} - -impl LitFloat { - pub fn new(value: f64, suffix: FloatSuffix, span: Span) -> Self { - let mut token = match suffix { - FloatSuffix::F32 => Literal::f32_suffixed(value as f32), - FloatSuffix::F64 => Literal::f64_suffixed(value), - FloatSuffix::None => Literal::f64_unsuffixed(value), - }; - token.set_span(span); - LitFloat { token: token } - } - - pub fn value(&self) -> f64 { - value::parse_lit_float(&self.token.to_string()) - } - - pub fn suffix(&self) -> FloatSuffix { - let value = self.token.to_string(); - for (s, suffix) in vec![("f32", FloatSuffix::F32), ("f64", FloatSuffix::F64)] { - if value.ends_with(s) { - return suffix; - } - } - FloatSuffix::None - } - - pub fn span(&self) -> Span { - self.token.span() - } - - pub fn set_span(&mut self, span: Span) { - self.token.set_span(span) - } -} - -macro_rules! lit_extra_traits { - ($ty:ident, $field:ident) => { - #[cfg(feature = "extra-traits")] - impl Eq for $ty {} - - #[cfg(feature = "extra-traits")] - impl PartialEq for $ty { - fn eq(&self, other: &Self) -> bool { - self.$field.to_string() == other.$field.to_string() - } - } - - #[cfg(feature = "extra-traits")] - impl Hash for $ty { - fn hash(&self, state: &mut H) - where - H: Hasher, - { - self.$field.to_string().hash(state); - } - } - - #[cfg(feature = "parsing")] - #[doc(hidden)] - #[allow(non_snake_case)] - pub fn $ty(marker: lookahead::TokenMarker) -> $ty { - match marker {} - } - }; -} - -impl LitVerbatim { - pub fn span(&self) -> Span { - self.token.span() - } - - pub fn set_span(&mut self, span: Span) { - self.token.set_span(span) - } -} - -lit_extra_traits!(LitStr, token); -lit_extra_traits!(LitByteStr, token); -lit_extra_traits!(LitByte, token); -lit_extra_traits!(LitChar, token); -lit_extra_traits!(LitInt, token); -lit_extra_traits!(LitFloat, token); -lit_extra_traits!(LitBool, value); -lit_extra_traits!(LitVerbatim, token); - -ast_enum! { - /// The style of a string literal, either plain quoted or a raw string like - /// `r##"data"##`. - /// - /// *This type is available if Syn is built with the `"derive"` or `"full"` - /// feature.* - pub enum StrStyle #no_visit { - /// An ordinary string like `"data"`. - Cooked, - /// A raw string like `r##"data"##`. - /// - /// The unsigned integer is the number of `#` symbols used. - Raw(usize), - } -} - -ast_enum! { - /// The suffix on an integer literal if any, like the `u8` in `127u8`. - /// - /// *This type is available if Syn is built with the `"derive"` or `"full"` - /// feature.* - pub enum IntSuffix #no_visit { - I8, - I16, - I32, - I64, - I128, - Isize, - U8, - U16, - U32, - U64, - U128, - Usize, - None, - } -} - -ast_enum! { - /// The suffix on a floating point literal if any, like the `f32` in - /// `1.0f32`. - /// - /// *This type is available if Syn is built with the `"derive"` or `"full"` - /// feature.* - pub enum FloatSuffix #no_visit { - F32, - F64, - None, - } -} - -#[cfg(feature = "parsing")] -#[doc(hidden)] -#[allow(non_snake_case)] -pub fn Lit(marker: lookahead::TokenMarker) -> Lit { - match marker {} -} - -#[cfg(feature = "parsing")] -pub mod parsing { - use super::*; - use parse::{Parse, ParseStream, Result}; - - impl Parse for Lit { - fn parse(input: ParseStream) -> Result { - input.step(|cursor| { - if let Some((lit, rest)) = cursor.literal() { - return Ok((Lit::new(lit), rest)); - } - while let Some((ident, rest)) = cursor.ident() { - let value = if ident == "true" { - true - } else if ident == "false" { - false - } else { - break; - }; - let lit_bool = LitBool { - value: value, - span: ident.span(), - }; - return Ok((Lit::Bool(lit_bool), rest)); - } - Err(cursor.error("expected literal")) - }) - } - } - - impl Parse for LitStr { - fn parse(input: ParseStream) -> Result { - let head = input.fork(); - match input.parse()? { - Lit::Str(lit) => Ok(lit), - _ => Err(head.error("expected string literal")), - } - } - } - - impl Parse for LitByteStr { - fn parse(input: ParseStream) -> Result { - let head = input.fork(); - match input.parse()? { - Lit::ByteStr(lit) => Ok(lit), - _ => Err(head.error("expected byte string literal")), - } - } - } - - impl Parse for LitByte { - fn parse(input: ParseStream) -> Result { - let head = input.fork(); - match input.parse()? { - Lit::Byte(lit) => Ok(lit), - _ => Err(head.error("expected byte literal")), - } - } - } - - impl Parse for LitChar { - fn parse(input: ParseStream) -> Result { - let head = input.fork(); - match input.parse()? { - Lit::Char(lit) => Ok(lit), - _ => Err(head.error("expected character literal")), - } - } - } - - impl Parse for LitInt { - fn parse(input: ParseStream) -> Result { - let head = input.fork(); - match input.parse()? { - Lit::Int(lit) => Ok(lit), - _ => Err(head.error("expected integer literal")), - } - } - } - - impl Parse for LitFloat { - fn parse(input: ParseStream) -> Result { - let head = input.fork(); - match input.parse()? { - Lit::Float(lit) => Ok(lit), - _ => Err(head.error("expected floating point literal")), - } - } - } - - impl Parse for LitBool { - fn parse(input: ParseStream) -> Result { - let head = input.fork(); - match input.parse()? { - Lit::Bool(lit) => Ok(lit), - _ => Err(head.error("expected boolean literal")), - } - } - } -} - -#[cfg(feature = "printing")] -mod printing { - use super::*; - use proc_macro2::TokenStream; - use quote::{ToTokens, TokenStreamExt}; - - impl ToTokens for LitStr { - fn to_tokens(&self, tokens: &mut TokenStream) { - self.token.to_tokens(tokens); - } - } - - impl ToTokens for LitByteStr { - fn to_tokens(&self, tokens: &mut TokenStream) { - self.token.to_tokens(tokens); - } - } - - impl ToTokens for LitByte { - fn to_tokens(&self, tokens: &mut TokenStream) { - self.token.to_tokens(tokens); - } - } - - impl ToTokens for LitChar { - fn to_tokens(&self, tokens: &mut TokenStream) { - self.token.to_tokens(tokens); - } - } - - impl ToTokens for LitInt { - fn to_tokens(&self, tokens: &mut TokenStream) { - self.token.to_tokens(tokens); - } - } - - impl ToTokens for LitFloat { - fn to_tokens(&self, tokens: &mut TokenStream) { - self.token.to_tokens(tokens); - } - } - - impl ToTokens for LitBool { - fn to_tokens(&self, tokens: &mut TokenStream) { - let s = if self.value { "true" } else { "false" }; - tokens.append(Ident::new(s, self.span)); - } - } - - impl ToTokens for LitVerbatim { - fn to_tokens(&self, tokens: &mut TokenStream) { - self.token.to_tokens(tokens); - } - } -} - -mod value { - use super::*; - use proc_macro2::TokenStream; - use std::char; - use std::ops::{Index, RangeFrom}; - - impl Lit { - /// Interpret a Syn literal from a proc-macro2 literal. - /// - /// Not all proc-macro2 literals are valid Syn literals. In particular, - /// doc comments are considered by proc-macro2 to be literals but in Syn - /// they are [`Attribute`]. - /// - /// [`Attribute`]: struct.Attribute.html - /// - /// # Panics - /// - /// Panics if the input is a doc comment literal. - pub fn new(token: Literal) -> Self { - let value = token.to_string(); - - match value::byte(&value, 0) { - b'"' | b'r' => return Lit::Str(LitStr { token: token }), - b'b' => match value::byte(&value, 1) { - b'"' | b'r' => return Lit::ByteStr(LitByteStr { token: token }), - b'\'' => return Lit::Byte(LitByte { token: token }), - _ => {} - }, - b'\'' => return Lit::Char(LitChar { token: token }), - b'0'...b'9' => { - if number_is_int(&value) { - return Lit::Int(LitInt { token: token }); - } else if number_is_float(&value) { - return Lit::Float(LitFloat { token: token }); - } else { - // number overflow - return Lit::Verbatim(LitVerbatim { token: token }); - } - } - _ => { - if value == "true" || value == "false" { - return Lit::Bool(LitBool { - value: value == "true", - span: token.span(), - }); - } - } - } - - panic!("Unrecognized literal: {}", value); - } - } - - fn number_is_int(value: &str) -> bool { - if number_is_float(value) { - false - } else { - value::parse_lit_int(value).is_some() - } - } - - fn number_is_float(value: &str) -> bool { - if value.contains('.') { - true - } else if value.starts_with("0x") || value.ends_with("size") { - false - } else { - value.contains('e') || value.contains('E') - } - } - - /// Get the byte at offset idx, or a default of `b'\0'` if we're looking - /// past the end of the input buffer. - pub fn byte + ?Sized>(s: &S, idx: usize) -> u8 { - let s = s.as_ref(); - if idx < s.len() { - s[idx] - } else { - 0 - } - } - - fn next_chr(s: &str) -> char { - s.chars().next().unwrap_or('\0') - } - - pub fn parse_lit_str(s: &str) -> String { - match byte(s, 0) { - b'"' => parse_lit_str_cooked(s), - b'r' => parse_lit_str_raw(s), - _ => unreachable!(), - } - } - - // Clippy false positive - // https://github.com/rust-lang-nursery/rust-clippy/issues/2329 - #[cfg_attr(feature = "cargo-clippy", allow(needless_continue))] - fn parse_lit_str_cooked(mut s: &str) -> String { - assert_eq!(byte(s, 0), b'"'); - s = &s[1..]; - - let mut out = String::new(); - 'outer: loop { - let ch = match byte(s, 0) { - b'"' => break, - b'\\' => { - let b = byte(s, 1); - s = &s[2..]; - match b { - b'x' => { - let (byte, rest) = backslash_x(s); - s = rest; - assert!(byte <= 0x80, "Invalid \\x byte in string literal"); - char::from_u32(u32::from(byte)).unwrap() - } - b'u' => { - let (chr, rest) = backslash_u(s); - s = rest; - chr - } - b'n' => '\n', - b'r' => '\r', - b't' => '\t', - b'\\' => '\\', - b'0' => '\0', - b'\'' => '\'', - b'"' => '"', - b'\r' | b'\n' => loop { - let ch = next_chr(s); - if ch.is_whitespace() { - s = &s[ch.len_utf8()..]; - } else { - continue 'outer; - } - }, - b => panic!("unexpected byte {:?} after \\ character in byte literal", b), - } - } - b'\r' => { - assert_eq!(byte(s, 1), b'\n', "Bare CR not allowed in string"); - s = &s[2..]; - '\n' - } - _ => { - let ch = next_chr(s); - s = &s[ch.len_utf8()..]; - ch - } - }; - out.push(ch); - } - - assert_eq!(s, "\""); - out - } - - fn parse_lit_str_raw(mut s: &str) -> String { - assert_eq!(byte(s, 0), b'r'); - s = &s[1..]; - - let mut pounds = 0; - while byte(s, pounds) == b'#' { - pounds += 1; - } - assert_eq!(byte(s, pounds), b'"'); - assert_eq!(byte(s, s.len() - pounds - 1), b'"'); - for end in s[s.len() - pounds..].bytes() { - assert_eq!(end, b'#'); - } - - s[pounds + 1..s.len() - pounds - 1].to_owned() - } - - pub fn parse_lit_byte_str(s: &str) -> Vec { - assert_eq!(byte(s, 0), b'b'); - match byte(s, 1) { - b'"' => parse_lit_byte_str_cooked(s), - b'r' => parse_lit_byte_str_raw(s), - _ => unreachable!(), - } - } - - // Clippy false positive - // https://github.com/rust-lang-nursery/rust-clippy/issues/2329 - #[cfg_attr(feature = "cargo-clippy", allow(needless_continue))] - fn parse_lit_byte_str_cooked(mut s: &str) -> Vec { - assert_eq!(byte(s, 0), b'b'); - assert_eq!(byte(s, 1), b'"'); - s = &s[2..]; - - // We're going to want to have slices which don't respect codepoint boundaries. - let mut s = s.as_bytes(); - - let mut out = Vec::new(); - 'outer: loop { - let byte = match byte(s, 0) { - b'"' => break, - b'\\' => { - let b = byte(s, 1); - s = &s[2..]; - match b { - b'x' => { - let (b, rest) = backslash_x(s); - s = rest; - b - } - b'n' => b'\n', - b'r' => b'\r', - b't' => b'\t', - b'\\' => b'\\', - b'0' => b'\0', - b'\'' => b'\'', - b'"' => b'"', - b'\r' | b'\n' => loop { - let byte = byte(s, 0); - let ch = char::from_u32(u32::from(byte)).unwrap(); - if ch.is_whitespace() { - s = &s[1..]; - } else { - continue 'outer; - } - }, - b => panic!("unexpected byte {:?} after \\ character in byte literal", b), - } - } - b'\r' => { - assert_eq!(byte(s, 1), b'\n', "Bare CR not allowed in string"); - s = &s[2..]; - b'\n' - } - b => { - s = &s[1..]; - b - } - }; - out.push(byte); - } - - assert_eq!(s, b"\""); - out - } - - fn parse_lit_byte_str_raw(s: &str) -> Vec { - assert_eq!(byte(s, 0), b'b'); - parse_lit_str_raw(&s[1..]).into_bytes() - } - - pub fn parse_lit_byte(s: &str) -> u8 { - assert_eq!(byte(s, 0), b'b'); - assert_eq!(byte(s, 1), b'\''); - - // We're going to want to have slices which don't respect codepoint boundaries. - let mut s = s[2..].as_bytes(); - - let b = match byte(s, 0) { - b'\\' => { - let b = byte(s, 1); - s = &s[2..]; - match b { - b'x' => { - let (b, rest) = backslash_x(s); - s = rest; - b - } - b'n' => b'\n', - b'r' => b'\r', - b't' => b'\t', - b'\\' => b'\\', - b'0' => b'\0', - b'\'' => b'\'', - b'"' => b'"', - b => panic!("unexpected byte {:?} after \\ character in byte literal", b), - } - } - b => { - s = &s[1..]; - b - } - }; - - assert_eq!(byte(s, 0), b'\''); - b - } - - pub fn parse_lit_char(mut s: &str) -> char { - assert_eq!(byte(s, 0), b'\''); - s = &s[1..]; - - let ch = match byte(s, 0) { - b'\\' => { - let b = byte(s, 1); - s = &s[2..]; - match b { - b'x' => { - let (byte, rest) = backslash_x(s); - s = rest; - assert!(byte <= 0x80, "Invalid \\x byte in string literal"); - char::from_u32(u32::from(byte)).unwrap() - } - b'u' => { - let (chr, rest) = backslash_u(s); - s = rest; - chr - } - b'n' => '\n', - b'r' => '\r', - b't' => '\t', - b'\\' => '\\', - b'0' => '\0', - b'\'' => '\'', - b'"' => '"', - b => panic!("unexpected byte {:?} after \\ character in byte literal", b), - } - } - _ => { - let ch = next_chr(s); - s = &s[ch.len_utf8()..]; - ch - } - }; - assert_eq!(s, "\'", "Expected end of char literal"); - ch - } - - fn backslash_x(s: &S) -> (u8, &S) - where - S: Index, Output = S> + AsRef<[u8]> + ?Sized, - { - let mut ch = 0; - let b0 = byte(s, 0); - let b1 = byte(s, 1); - ch += 0x10 - * match b0 { - b'0'...b'9' => b0 - b'0', - b'a'...b'f' => 10 + (b0 - b'a'), - b'A'...b'F' => 10 + (b0 - b'A'), - _ => panic!("unexpected non-hex character after \\x"), - }; - ch += match b1 { - b'0'...b'9' => b1 - b'0', - b'a'...b'f' => 10 + (b1 - b'a'), - b'A'...b'F' => 10 + (b1 - b'A'), - _ => panic!("unexpected non-hex character after \\x"), - }; - (ch, &s[2..]) - } - - fn backslash_u(mut s: &str) -> (char, &str) { - if byte(s, 0) != b'{' { - panic!("expected {{ after \\u"); - } - s = &s[1..]; - - let mut ch = 0; - for _ in 0..6 { - let b = byte(s, 0); - match b { - b'0'...b'9' => { - ch *= 0x10; - ch += u32::from(b - b'0'); - s = &s[1..]; - } - b'a'...b'f' => { - ch *= 0x10; - ch += u32::from(10 + b - b'a'); - s = &s[1..]; - } - b'A'...b'F' => { - ch *= 0x10; - ch += u32::from(10 + b - b'A'); - s = &s[1..]; - } - b'}' => break, - _ => panic!("unexpected non-hex character after \\u"), - } - } - assert!(byte(s, 0) == b'}'); - s = &s[1..]; - - if let Some(ch) = char::from_u32(ch) { - (ch, s) - } else { - panic!("character code {:x} is not a valid unicode character", ch); - } - } - - pub fn parse_lit_int(mut s: &str) -> Option { - let base = match (byte(s, 0), byte(s, 1)) { - (b'0', b'x') => { - s = &s[2..]; - 16 - } - (b'0', b'o') => { - s = &s[2..]; - 8 - } - (b'0', b'b') => { - s = &s[2..]; - 2 - } - (b'0'...b'9', _) => 10, - _ => unreachable!(), - }; - - let mut value = 0u64; - loop { - let b = byte(s, 0); - let digit = match b { - b'0'...b'9' => u64::from(b - b'0'), - b'a'...b'f' if base > 10 => 10 + u64::from(b - b'a'), - b'A'...b'F' if base > 10 => 10 + u64::from(b - b'A'), - b'_' => { - s = &s[1..]; - continue; - } - // NOTE: Looking at a floating point literal, we don't want to - // consider these integers. - b'.' if base == 10 => return None, - b'e' | b'E' if base == 10 => return None, - _ => break, - }; - - if digit >= base { - panic!("Unexpected digit {:x} out of base range", digit); - } - - value = match value.checked_mul(base) { - Some(value) => value, - None => return None, - }; - value = match value.checked_add(digit) { - Some(value) => value, - None => return None, - }; - s = &s[1..]; - } - - Some(value) - } - - pub fn parse_lit_float(input: &str) -> f64 { - // Rust's floating point literals are very similar to the ones parsed by - // the standard library, except that rust's literals can contain - // ignorable underscores. Let's remove those underscores. - let mut bytes = input.to_owned().into_bytes(); - let mut write = 0; - for read in 0..bytes.len() { - if bytes[read] == b'_' { - continue; // Don't increase write - } - if write != read { - let x = bytes[read]; - bytes[write] = x; - } - write += 1; - } - bytes.truncate(write); - let input = String::from_utf8(bytes).unwrap(); - let end = input.find('f').unwrap_or_else(|| input.len()); - input[..end].parse().unwrap() - } - - pub fn to_literal(s: &str) -> Literal { - let stream = s.parse::().unwrap(); - match stream.into_iter().next().unwrap() { - TokenTree::Literal(l) => l, - _ => unreachable!(), - } - } -} diff --git a/third_party/rust/syn-0.15.30/src/lookahead.rs b/third_party/rust/syn-0.15.30/src/lookahead.rs deleted file mode 100644 index 91d55f10ec..0000000000 --- a/third_party/rust/syn-0.15.30/src/lookahead.rs +++ /dev/null @@ -1,170 +0,0 @@ -use std::cell::RefCell; - -use proc_macro2::{Delimiter, Span}; - -use buffer::Cursor; -use error::{self, Error}; -use span::IntoSpans; -use token::Token; - -/// Support for checking the next token in a stream to decide how to parse. -/// -/// An important advantage over [`ParseStream::peek`] is that here we -/// automatically construct an appropriate error message based on the token -/// alternatives that get peeked. If you are producing your own error message, -/// go ahead and use `ParseStream::peek` instead. -/// -/// Use [`ParseStream::lookahead1`] to construct this object. -/// -/// [`ParseStream::peek`]: struct.ParseBuffer.html#method.peek -/// [`ParseStream::lookahead1`]: struct.ParseBuffer.html#method.lookahead1 -/// -/// # Example -/// -/// ```edition2018 -/// use syn::{ConstParam, Ident, Lifetime, LifetimeDef, Result, Token, TypeParam}; -/// use syn::parse::{Parse, ParseStream}; -/// -/// // A generic parameter, a single one of the comma-separated elements inside -/// // angle brackets in: -/// // -/// // fn f() { ... } -/// // -/// // On invalid input, lookahead gives us a reasonable error message. -/// // -/// // error: expected one of: identifier, lifetime, `const` -/// // | -/// // 5 | fn f() {} -/// // | ^ -/// enum GenericParam { -/// Type(TypeParam), -/// Lifetime(LifetimeDef), -/// Const(ConstParam), -/// } -/// -/// impl Parse for GenericParam { -/// fn parse(input: ParseStream) -> Result { -/// let lookahead = input.lookahead1(); -/// if lookahead.peek(Ident) { -/// input.parse().map(GenericParam::Type) -/// } else if lookahead.peek(Lifetime) { -/// input.parse().map(GenericParam::Lifetime) -/// } else if lookahead.peek(Token![const]) { -/// input.parse().map(GenericParam::Const) -/// } else { -/// Err(lookahead.error()) -/// } -/// } -/// } -/// ``` -pub struct Lookahead1<'a> { - scope: Span, - cursor: Cursor<'a>, - comparisons: RefCell>, -} - -pub fn new(scope: Span, cursor: Cursor) -> Lookahead1 { - Lookahead1 { - scope: scope, - cursor: cursor, - comparisons: RefCell::new(Vec::new()), - } -} - -fn peek_impl( - lookahead: &Lookahead1, - peek: fn(Cursor) -> bool, - display: fn() -> &'static str, -) -> bool { - if peek(lookahead.cursor) { - return true; - } - lookahead.comparisons.borrow_mut().push(display()); - false -} - -impl<'a> Lookahead1<'a> { - /// Looks at the next token in the parse stream to determine whether it - /// matches the requested type of token. - /// - /// # Syntax - /// - /// Note that this method does not use turbofish syntax. Pass the peek type - /// inside of parentheses. - /// - /// - `input.peek(Token![struct])` - /// - `input.peek(Token![==])` - /// - `input.peek(Ident)` - /// - `input.peek(Lifetime)` - /// - `input.peek(token::Brace)` - pub fn peek(&self, token: T) -> bool { - let _ = token; - peek_impl(self, T::Token::peek, T::Token::display) - } - - /// Triggers an error at the current position of the parse stream. - /// - /// The error message will identify all of the expected token types that - /// have been peeked against this lookahead instance. - pub fn error(self) -> Error { - let comparisons = self.comparisons.borrow(); - match comparisons.len() { - 0 => { - if self.cursor.eof() { - Error::new(self.scope, "unexpected end of input") - } else { - Error::new(self.cursor.span(), "unexpected token") - } - } - 1 => { - let message = format!("expected {}", comparisons[0]); - error::new_at(self.scope, self.cursor, message) - } - 2 => { - let message = format!("expected {} or {}", comparisons[0], comparisons[1]); - error::new_at(self.scope, self.cursor, message) - } - _ => { - let join = comparisons.join(", "); - let message = format!("expected one of: {}", join); - error::new_at(self.scope, self.cursor, message) - } - } - } -} - -/// Types that can be parsed by looking at just one token. -/// -/// Use [`ParseStream::peek`] to peek one of these types in a parse stream -/// without consuming it from the stream. -/// -/// This trait is sealed and cannot be implemented for types outside of Syn. -/// -/// [`ParseStream::peek`]: struct.ParseBuffer.html#method.peek -pub trait Peek: private::Sealed { - // Not public API. - #[doc(hidden)] - type Token: Token; -} - -impl T, T: Token> Peek for F { - type Token = T; -} - -pub enum TokenMarker {} - -impl IntoSpans for TokenMarker { - fn into_spans(self) -> S { - match self {} - } -} - -pub fn is_delimiter(cursor: Cursor, delimiter: Delimiter) -> bool { - cursor.group(delimiter).is_some() -} - -mod private { - use super::{Token, TokenMarker}; - pub trait Sealed {} - impl T, T: Token> Sealed for F {} -} diff --git a/third_party/rust/syn-0.15.30/src/mac.rs b/third_party/rust/syn-0.15.30/src/mac.rs deleted file mode 100644 index 46dc5dd579..0000000000 --- a/third_party/rust/syn-0.15.30/src/mac.rs +++ /dev/null @@ -1,131 +0,0 @@ -use super::*; -use proc_macro2::TokenStream; -#[cfg(feature = "parsing")] -use proc_macro2::{Delimiter, TokenTree}; -use token::{Brace, Bracket, Paren}; - -#[cfg(feature = "parsing")] -use parse::{ParseStream, Result}; -#[cfg(feature = "extra-traits")] -use std::hash::{Hash, Hasher}; -#[cfg(feature = "extra-traits")] -use tt::TokenStreamHelper; - -ast_struct! { - /// A macro invocation: `println!("{}", mac)`. - /// - /// *This type is available if Syn is built with the `"derive"` or `"full"` - /// feature.* - pub struct Macro #manual_extra_traits { - pub path: Path, - pub bang_token: Token![!], - pub delimiter: MacroDelimiter, - pub tts: TokenStream, - } -} - -ast_enum! { - /// A grouping token that surrounds a macro body: `m!(...)` or `m!{...}` or `m![...]`. - /// - /// *This type is available if Syn is built with the `"derive"` or `"full"` - /// feature.* - pub enum MacroDelimiter { - Paren(Paren), - Brace(Brace), - Bracket(Bracket), - } -} - -#[cfg(feature = "extra-traits")] -impl Eq for Macro {} - -#[cfg(feature = "extra-traits")] -impl PartialEq for Macro { - fn eq(&self, other: &Self) -> bool { - self.path == other.path - && self.bang_token == other.bang_token - && self.delimiter == other.delimiter - && TokenStreamHelper(&self.tts) == TokenStreamHelper(&other.tts) - } -} - -#[cfg(feature = "extra-traits")] -impl Hash for Macro { - fn hash(&self, state: &mut H) - where - H: Hasher, - { - self.path.hash(state); - self.bang_token.hash(state); - self.delimiter.hash(state); - TokenStreamHelper(&self.tts).hash(state); - } -} - -#[cfg(feature = "parsing")] -pub fn parse_delimiter(input: ParseStream) -> Result<(MacroDelimiter, TokenStream)> { - input.step(|cursor| { - if let Some((TokenTree::Group(g), rest)) = cursor.token_tree() { - let span = g.span(); - let delimiter = match g.delimiter() { - Delimiter::Parenthesis => MacroDelimiter::Paren(Paren(span)), - Delimiter::Brace => MacroDelimiter::Brace(Brace(span)), - Delimiter::Bracket => MacroDelimiter::Bracket(Bracket(span)), - Delimiter::None => { - return Err(cursor.error("expected delimiter")); - } - }; - Ok(((delimiter, g.stream().clone()), rest)) - } else { - Err(cursor.error("expected delimiter")) - } - }) -} - -#[cfg(feature = "parsing")] -pub mod parsing { - use super::*; - - use parse::{Parse, ParseStream, Result}; - - impl Parse for Macro { - fn parse(input: ParseStream) -> Result { - let tts; - Ok(Macro { - path: input.call(Path::parse_mod_style)?, - bang_token: input.parse()?, - delimiter: { - let (delimiter, content) = parse_delimiter(input)?; - tts = content; - delimiter - }, - tts: tts, - }) - } - } -} - -#[cfg(feature = "printing")] -mod printing { - use super::*; - use proc_macro2::TokenStream; - use quote::ToTokens; - - impl ToTokens for Macro { - fn to_tokens(&self, tokens: &mut TokenStream) { - self.path.to_tokens(tokens); - self.bang_token.to_tokens(tokens); - match self.delimiter { - MacroDelimiter::Paren(ref paren) => { - paren.surround(tokens, |tokens| self.tts.to_tokens(tokens)); - } - MacroDelimiter::Brace(ref brace) => { - brace.surround(tokens, |tokens| self.tts.to_tokens(tokens)); - } - MacroDelimiter::Bracket(ref bracket) => { - bracket.surround(tokens, |tokens| self.tts.to_tokens(tokens)); - } - } - } - } -} diff --git a/third_party/rust/syn-0.15.30/src/macros.rs b/third_party/rust/syn-0.15.30/src/macros.rs deleted file mode 100644 index bb59063a66..0000000000 --- a/third_party/rust/syn-0.15.30/src/macros.rs +++ /dev/null @@ -1,165 +0,0 @@ -#[cfg(any(feature = "full", feature = "derive"))] -macro_rules! ast_struct { - ( - $(#[$attr:meta])* - pub struct $name:ident #full $($rest:tt)* - ) => { - #[cfg(feature = "full")] - $(#[$attr])* - #[cfg_attr(feature = "extra-traits", derive(Debug, Eq, PartialEq, Hash))] - #[cfg_attr(feature = "clone-impls", derive(Clone))] - pub struct $name $($rest)* - - #[cfg(not(feature = "full"))] - $(#[$attr])* - #[cfg_attr(feature = "extra-traits", derive(Debug, Eq, PartialEq, Hash))] - #[cfg_attr(feature = "clone-impls", derive(Clone))] - pub struct $name { - _noconstruct: (), - } - }; - - ( - $(#[$attr:meta])* - pub struct $name:ident #manual_extra_traits $($rest:tt)* - ) => { - $(#[$attr])* - #[cfg_attr(feature = "extra-traits", derive(Debug))] - #[cfg_attr(feature = "clone-impls", derive(Clone))] - pub struct $name $($rest)* - }; - - ( - $(#[$attr:meta])* - pub struct $name:ident $($rest:tt)* - ) => { - $(#[$attr])* - #[cfg_attr(feature = "extra-traits", derive(Debug, Eq, PartialEq, Hash))] - #[cfg_attr(feature = "clone-impls", derive(Clone))] - pub struct $name $($rest)* - }; -} - -#[cfg(any(feature = "full", feature = "derive"))] -macro_rules! ast_enum { - ( - $(#[$enum_attr:meta])* - pub enum $name:ident $(# $tags:ident)* { $($variants:tt)* } - ) => ( - $(#[$enum_attr])* - #[cfg_attr(feature = "extra-traits", derive(Debug, Eq, PartialEq, Hash))] - #[cfg_attr(feature = "clone-impls", derive(Clone))] - pub enum $name { - $($variants)* - } - ) -} - -#[cfg(any(feature = "full", feature = "derive"))] -macro_rules! ast_enum_of_structs { - ( - $(#[$enum_attr:meta])* - pub enum $name:ident { - $( - $(#[$variant_attr:meta])* - pub $variant:ident $( ($member:ident $($rest:tt)*) )*, - )* - } - - $($remaining:tt)* - ) => ( - ast_enum! { - $(#[$enum_attr])* - pub enum $name { - $( - $(#[$variant_attr])* - $variant $( ($member) )*, - )* - } - } - - $( - maybe_ast_struct! { - $(#[$variant_attr])* - $( - pub struct $member $($rest)* - )* - } - - $( - impl From<$member> for $name { - fn from(e: $member) -> $name { - $name::$variant(e) - } - } - )* - )* - - #[cfg(feature = "printing")] - generate_to_tokens! { - $($remaining)* - () - tokens - $name { $($variant $( [$($rest)*] )*,)* } - } - ) -} - -#[cfg(all(feature = "printing", any(feature = "full", feature = "derive")))] -macro_rules! generate_to_tokens { - (do_not_generate_to_tokens $($foo:tt)*) => (); - - (($($arms:tt)*) $tokens:ident $name:ident { $variant:ident, $($next:tt)*}) => { - generate_to_tokens!( - ($($arms)* $name::$variant => {}) - $tokens $name { $($next)* } - ); - }; - - (($($arms:tt)*) $tokens:ident $name:ident { $variant:ident [$($rest:tt)*], $($next:tt)*}) => { - generate_to_tokens!( - ($($arms)* $name::$variant(ref _e) => to_tokens_call!(_e, $tokens, $($rest)*),) - $tokens $name { $($next)* } - ); - }; - - (($($arms:tt)*) $tokens:ident $name:ident {}) => { - impl ::quote::ToTokens for $name { - fn to_tokens(&self, $tokens: &mut ::proc_macro2::TokenStream) { - match *self { - $($arms)* - } - } - } - }; -} - -#[cfg(all(feature = "printing", feature = "full"))] -macro_rules! to_tokens_call { - ($e:ident, $tokens:ident, $($rest:tt)*) => { - $e.to_tokens($tokens) - }; -} - -#[cfg(all(feature = "printing", feature = "derive", not(feature = "full")))] -macro_rules! to_tokens_call { - // If the variant is marked as #full, don't auto-generate to-tokens for it. - ($e:ident, $tokens:ident, #full $($rest:tt)*) => { - unreachable!() - }; - ($e:ident, $tokens:ident, $($rest:tt)*) => { - $e.to_tokens($tokens) - }; -} - -#[cfg(any(feature = "full", feature = "derive"))] -macro_rules! maybe_ast_struct { - ( - $(#[$attr:meta])* - $( - pub struct $name:ident - )* - ) => (); - - ($($rest:tt)*) => (ast_struct! { $($rest)* }); -} diff --git a/third_party/rust/syn-0.15.30/src/op.rs b/third_party/rust/syn-0.15.30/src/op.rs deleted file mode 100644 index 96d8c99585..0000000000 --- a/third_party/rust/syn-0.15.30/src/op.rs +++ /dev/null @@ -1,231 +0,0 @@ -ast_enum! { - /// A binary operator: `+`, `+=`, `&`. - /// - /// *This type is available if Syn is built with the `"derive"` or `"full"` - /// feature.* - #[cfg_attr(feature = "clone-impls", derive(Copy))] - pub enum BinOp { - /// The `+` operator (addition) - Add(Token![+]), - /// The `-` operator (subtraction) - Sub(Token![-]), - /// The `*` operator (multiplication) - Mul(Token![*]), - /// The `/` operator (division) - Div(Token![/]), - /// The `%` operator (modulus) - Rem(Token![%]), - /// The `&&` operator (logical and) - And(Token![&&]), - /// The `||` operator (logical or) - Or(Token![||]), - /// The `^` operator (bitwise xor) - BitXor(Token![^]), - /// The `&` operator (bitwise and) - BitAnd(Token![&]), - /// The `|` operator (bitwise or) - BitOr(Token![|]), - /// The `<<` operator (shift left) - Shl(Token![<<]), - /// The `>>` operator (shift right) - Shr(Token![>>]), - /// The `==` operator (equality) - Eq(Token![==]), - /// The `<` operator (less than) - Lt(Token![<]), - /// The `<=` operator (less than or equal to) - Le(Token![<=]), - /// The `!=` operator (not equal to) - Ne(Token![!=]), - /// The `>=` operator (greater than or equal to) - Ge(Token![>=]), - /// The `>` operator (greater than) - Gt(Token![>]), - /// The `+=` operator - AddEq(Token![+=]), - /// The `-=` operator - SubEq(Token![-=]), - /// The `*=` operator - MulEq(Token![*=]), - /// The `/=` operator - DivEq(Token![/=]), - /// The `%=` operator - RemEq(Token![%=]), - /// The `^=` operator - BitXorEq(Token![^=]), - /// The `&=` operator - BitAndEq(Token![&=]), - /// The `|=` operator - BitOrEq(Token![|=]), - /// The `<<=` operator - ShlEq(Token![<<=]), - /// The `>>=` operator - ShrEq(Token![>>=]), - } -} - -ast_enum! { - /// A unary operator: `*`, `!`, `-`. - /// - /// *This type is available if Syn is built with the `"derive"` or `"full"` - /// feature.* - #[cfg_attr(feature = "clone-impls", derive(Copy))] - pub enum UnOp { - /// The `*` operator for dereferencing - Deref(Token![*]), - /// The `!` operator for logical inversion - Not(Token![!]), - /// The `-` operator for negation - Neg(Token![-]), - } -} - -#[cfg(feature = "parsing")] -pub mod parsing { - use super::*; - - use parse::{Parse, ParseStream, Result}; - - fn parse_binop(input: ParseStream) -> Result { - if input.peek(Token![&&]) { - input.parse().map(BinOp::And) - } else if input.peek(Token![||]) { - input.parse().map(BinOp::Or) - } else if input.peek(Token![<<]) { - input.parse().map(BinOp::Shl) - } else if input.peek(Token![>>]) { - input.parse().map(BinOp::Shr) - } else if input.peek(Token![==]) { - input.parse().map(BinOp::Eq) - } else if input.peek(Token![<=]) { - input.parse().map(BinOp::Le) - } else if input.peek(Token![!=]) { - input.parse().map(BinOp::Ne) - } else if input.peek(Token![>=]) { - input.parse().map(BinOp::Ge) - } else if input.peek(Token![+]) { - input.parse().map(BinOp::Add) - } else if input.peek(Token![-]) { - input.parse().map(BinOp::Sub) - } else if input.peek(Token![*]) { - input.parse().map(BinOp::Mul) - } else if input.peek(Token![/]) { - input.parse().map(BinOp::Div) - } else if input.peek(Token![%]) { - input.parse().map(BinOp::Rem) - } else if input.peek(Token![^]) { - input.parse().map(BinOp::BitXor) - } else if input.peek(Token![&]) { - input.parse().map(BinOp::BitAnd) - } else if input.peek(Token![|]) { - input.parse().map(BinOp::BitOr) - } else if input.peek(Token![<]) { - input.parse().map(BinOp::Lt) - } else if input.peek(Token![>]) { - input.parse().map(BinOp::Gt) - } else { - Err(input.error("expected binary operator")) - } - } - - impl Parse for BinOp { - #[cfg(not(feature = "full"))] - fn parse(input: ParseStream) -> Result { - parse_binop(input) - } - - #[cfg(feature = "full")] - fn parse(input: ParseStream) -> Result { - if input.peek(Token![+=]) { - input.parse().map(BinOp::AddEq) - } else if input.peek(Token![-=]) { - input.parse().map(BinOp::SubEq) - } else if input.peek(Token![*=]) { - input.parse().map(BinOp::MulEq) - } else if input.peek(Token![/=]) { - input.parse().map(BinOp::DivEq) - } else if input.peek(Token![%=]) { - input.parse().map(BinOp::RemEq) - } else if input.peek(Token![^=]) { - input.parse().map(BinOp::BitXorEq) - } else if input.peek(Token![&=]) { - input.parse().map(BinOp::BitAndEq) - } else if input.peek(Token![|=]) { - input.parse().map(BinOp::BitOrEq) - } else if input.peek(Token![<<=]) { - input.parse().map(BinOp::ShlEq) - } else if input.peek(Token![>>=]) { - input.parse().map(BinOp::ShrEq) - } else { - parse_binop(input) - } - } - } - - impl Parse for UnOp { - fn parse(input: ParseStream) -> Result { - let lookahead = input.lookahead1(); - if lookahead.peek(Token![*]) { - input.parse().map(UnOp::Deref) - } else if lookahead.peek(Token![!]) { - input.parse().map(UnOp::Not) - } else if lookahead.peek(Token![-]) { - input.parse().map(UnOp::Neg) - } else { - Err(lookahead.error()) - } - } - } -} - -#[cfg(feature = "printing")] -mod printing { - use super::*; - use proc_macro2::TokenStream; - use quote::ToTokens; - - impl ToTokens for BinOp { - fn to_tokens(&self, tokens: &mut TokenStream) { - match *self { - BinOp::Add(ref t) => t.to_tokens(tokens), - BinOp::Sub(ref t) => t.to_tokens(tokens), - BinOp::Mul(ref t) => t.to_tokens(tokens), - BinOp::Div(ref t) => t.to_tokens(tokens), - BinOp::Rem(ref t) => t.to_tokens(tokens), - BinOp::And(ref t) => t.to_tokens(tokens), - BinOp::Or(ref t) => t.to_tokens(tokens), - BinOp::BitXor(ref t) => t.to_tokens(tokens), - BinOp::BitAnd(ref t) => t.to_tokens(tokens), - BinOp::BitOr(ref t) => t.to_tokens(tokens), - BinOp::Shl(ref t) => t.to_tokens(tokens), - BinOp::Shr(ref t) => t.to_tokens(tokens), - BinOp::Eq(ref t) => t.to_tokens(tokens), - BinOp::Lt(ref t) => t.to_tokens(tokens), - BinOp::Le(ref t) => t.to_tokens(tokens), - BinOp::Ne(ref t) => t.to_tokens(tokens), - BinOp::Ge(ref t) => t.to_tokens(tokens), - BinOp::Gt(ref t) => t.to_tokens(tokens), - BinOp::AddEq(ref t) => t.to_tokens(tokens), - BinOp::SubEq(ref t) => t.to_tokens(tokens), - BinOp::MulEq(ref t) => t.to_tokens(tokens), - BinOp::DivEq(ref t) => t.to_tokens(tokens), - BinOp::RemEq(ref t) => t.to_tokens(tokens), - BinOp::BitXorEq(ref t) => t.to_tokens(tokens), - BinOp::BitAndEq(ref t) => t.to_tokens(tokens), - BinOp::BitOrEq(ref t) => t.to_tokens(tokens), - BinOp::ShlEq(ref t) => t.to_tokens(tokens), - BinOp::ShrEq(ref t) => t.to_tokens(tokens), - } - } - } - - impl ToTokens for UnOp { - fn to_tokens(&self, tokens: &mut TokenStream) { - match *self { - UnOp::Deref(ref t) => t.to_tokens(tokens), - UnOp::Not(ref t) => t.to_tokens(tokens), - UnOp::Neg(ref t) => t.to_tokens(tokens), - } - } - } -} diff --git a/third_party/rust/syn-0.15.30/src/parse.rs b/third_party/rust/syn-0.15.30/src/parse.rs deleted file mode 100644 index 36f1caf300..0000000000 --- a/third_party/rust/syn-0.15.30/src/parse.rs +++ /dev/null @@ -1,1110 +0,0 @@ -//! Parsing interface for parsing a token stream into a syntax tree node. -//! -//! Parsing in Syn is built on parser functions that take in a [`ParseStream`] -//! and produce a [`Result`] where `T` is some syntax tree node. Underlying -//! these parser functions is a lower level mechanism built around the -//! [`Cursor`] type. `Cursor` is a cheaply copyable cursor over a range of -//! tokens in a token stream. -//! -//! [`ParseStream`]: type.ParseStream.html -//! [`Result`]: type.Result.html -//! [`Cursor`]: ../buffer/index.html -//! -//! # Example -//! -//! Here is a snippet of parsing code to get a feel for the style of the -//! library. We define data structures for a subset of Rust syntax including -//! enums (not shown) and structs, then provide implementations of the [`Parse`] -//! trait to parse these syntax tree data structures from a token stream. -//! -//! Once `Parse` impls have been defined, they can be called conveniently from a -//! procedural macro through [`parse_macro_input!`] as shown at the bottom of -//! the snippet. If the caller provides syntactically invalid input to the -//! procedural macro, they will receive a helpful compiler error message -//! pointing out the exact token that triggered the failure to parse. -//! -//! [`parse_macro_input!`]: ../macro.parse_macro_input.html -//! -//! ```edition2018 -//! extern crate proc_macro; -//! -//! use proc_macro::TokenStream; -//! use syn::{braced, parse_macro_input, token, Field, Ident, Result, Token}; -//! use syn::parse::{Parse, ParseStream}; -//! use syn::punctuated::Punctuated; -//! -//! enum Item { -//! Struct(ItemStruct), -//! Enum(ItemEnum), -//! } -//! -//! struct ItemStruct { -//! struct_token: Token![struct], -//! ident: Ident, -//! brace_token: token::Brace, -//! fields: Punctuated, -//! } -//! # -//! # enum ItemEnum {} -//! -//! impl Parse for Item { -//! fn parse(input: ParseStream) -> Result { -//! let lookahead = input.lookahead1(); -//! if lookahead.peek(Token![struct]) { -//! input.parse().map(Item::Struct) -//! } else if lookahead.peek(Token![enum]) { -//! input.parse().map(Item::Enum) -//! } else { -//! Err(lookahead.error()) -//! } -//! } -//! } -//! -//! impl Parse for ItemStruct { -//! fn parse(input: ParseStream) -> Result { -//! let content; -//! Ok(ItemStruct { -//! struct_token: input.parse()?, -//! ident: input.parse()?, -//! brace_token: braced!(content in input), -//! fields: content.parse_terminated(Field::parse_named)?, -//! }) -//! } -//! } -//! # -//! # impl Parse for ItemEnum { -//! # fn parse(input: ParseStream) -> Result { -//! # unimplemented!() -//! # } -//! # } -//! -//! # const IGNORE: &str = stringify! { -//! #[proc_macro] -//! # }; -//! pub fn my_macro(tokens: TokenStream) -> TokenStream { -//! let input = parse_macro_input!(tokens as Item); -//! -//! /* ... */ -//! # "".parse().unwrap() -//! } -//! ``` -//! -//! # The `syn::parse*` functions -//! -//! The [`syn::parse`], [`syn::parse2`], and [`syn::parse_str`] functions serve -//! as an entry point for parsing syntax tree nodes that can be parsed in an -//! obvious default way. These functions can return any syntax tree node that -//! implements the [`Parse`] trait, which includes most types in Syn. -//! -//! [`syn::parse`]: ../fn.parse.html -//! [`syn::parse2`]: ../fn.parse2.html -//! [`syn::parse_str`]: ../fn.parse_str.html -//! [`Parse`]: trait.Parse.html -//! -//! ```edition2018 -//! use syn::Type; -//! -//! # fn run_parser() -> syn::Result<()> { -//! let t: Type = syn::parse_str("std::collections::HashMap")?; -//! # Ok(()) -//! # } -//! # -//! # fn main() { -//! # run_parser().unwrap(); -//! # } -//! ``` -//! -//! The [`parse_quote!`] macro also uses this approach. -//! -//! [`parse_quote!`]: ../macro.parse_quote.html -//! -//! # The `Parser` trait -//! -//! Some types can be parsed in several ways depending on context. For example -//! an [`Attribute`] can be either "outer" like `#[...]` or "inner" like -//! `#![...]` and parsing the wrong one would be a bug. Similarly [`Punctuated`] -//! may or may not allow trailing punctuation, and parsing it the wrong way -//! would either reject valid input or accept invalid input. -//! -//! [`Attribute`]: ../struct.Attribute.html -//! [`Punctuated`]: ../punctuated/index.html -//! -//! The `Parse` trait is not implemented in these cases because there is no good -//! behavior to consider the default. -//! -//! ```edition2018,compile_fail -//! # extern crate proc_macro; -//! # -//! # use syn::punctuated::Punctuated; -//! # use syn::{PathSegment, Result, Token}; -//! # -//! # fn f(tokens: proc_macro::TokenStream) -> Result<()> { -//! # -//! // Can't parse `Punctuated` without knowing whether trailing punctuation -//! // should be allowed in this context. -//! let path: Punctuated = syn::parse(tokens)?; -//! # -//! # Ok(()) -//! # } -//! ``` -//! -//! In these cases the types provide a choice of parser functions rather than a -//! single `Parse` implementation, and those parser functions can be invoked -//! through the [`Parser`] trait. -//! -//! [`Parser`]: trait.Parser.html -//! -//! ```edition2018 -//! extern crate proc_macro; -//! -//! use proc_macro::TokenStream; -//! use syn::parse::Parser; -//! use syn::punctuated::Punctuated; -//! use syn::{Attribute, Expr, PathSegment, Result, Token}; -//! -//! fn call_some_parser_methods(input: TokenStream) -> Result<()> { -//! // Parse a nonempty sequence of path segments separated by `::` punctuation -//! // with no trailing punctuation. -//! let tokens = input.clone(); -//! let parser = Punctuated::::parse_separated_nonempty; -//! let _path = parser.parse(tokens)?; -//! -//! // Parse a possibly empty sequence of expressions terminated by commas with -//! // an optional trailing punctuation. -//! let tokens = input.clone(); -//! let parser = Punctuated::::parse_terminated; -//! let _args = parser.parse(tokens)?; -//! -//! // Parse zero or more outer attributes but not inner attributes. -//! let tokens = input.clone(); -//! let parser = Attribute::parse_outer; -//! let _attrs = parser.parse(tokens)?; -//! -//! Ok(()) -//! } -//! ``` -//! -//! --- -//! -//! *This module is available if Syn is built with the `"parsing"` feature.* - -use std::cell::Cell; -use std::fmt::{self, Debug, Display}; -use std::marker::PhantomData; -use std::mem; -use std::ops::Deref; -use std::rc::Rc; -use std::str::FromStr; - -#[cfg(all( - not(all(target_arch = "wasm32", target_os = "unknown")), - feature = "proc-macro" -))] -use proc_macro; -use proc_macro2::{self, Delimiter, Group, Literal, Punct, Span, TokenStream, TokenTree}; - -use buffer::{Cursor, TokenBuffer}; -use error; -use lookahead; -use private; -use punctuated::Punctuated; -use token::Token; - -pub use error::{Error, Result}; -pub use lookahead::{Lookahead1, Peek}; - -/// Parsing interface implemented by all types that can be parsed in a default -/// way from a token stream. -pub trait Parse: Sized { - fn parse(input: ParseStream) -> Result; -} - -/// Input to a Syn parser function. -/// -/// See the methods of this type under the documentation of [`ParseBuffer`]. For -/// an overview of parsing in Syn, refer to the [module documentation]. -/// -/// [module documentation]: index.html -pub type ParseStream<'a> = &'a ParseBuffer<'a>; - -/// Cursor position within a buffered token stream. -/// -/// This type is more commonly used through the type alias [`ParseStream`] which -/// is an alias for `&ParseBuffer`. -/// -/// `ParseStream` is the input type for all parser functions in Syn. They have -/// the signature `fn(ParseStream) -> Result`. -/// -/// ## Calling a parser function -/// -/// There is no public way to construct a `ParseBuffer`. Instead, if you are -/// looking to invoke a parser function that requires `ParseStream` as input, -/// you will need to go through one of the public parsing entry points. -/// -/// - The [`parse_macro_input!`] macro if parsing input of a procedural macro; -/// - One of [the `syn::parse*` functions][syn-parse]; or -/// - A method of the [`Parser`] trait. -/// -/// [`parse_macro_input!`]: ../macro.parse_macro_input.html -/// [syn-parse]: index.html#the-synparse-functions -pub struct ParseBuffer<'a> { - scope: Span, - // Instead of Cell> so that ParseBuffer<'a> is covariant in 'a. - // The rest of the code in this module needs to be careful that only a - // cursor derived from this `cell` is ever assigned to this `cell`. - // - // Cell> cannot be covariant in 'a because then we could take a - // ParseBuffer<'a>, upcast to ParseBuffer<'short> for some lifetime shorter - // than 'a, and then assign a Cursor<'short> into the Cell. - // - // By extension, it would not be safe to expose an API that accepts a - // Cursor<'a> and trusts that it lives as long as the cursor currently in - // the cell. - cell: Cell>, - marker: PhantomData>, - unexpected: Rc>>, -} - -impl<'a> Drop for ParseBuffer<'a> { - fn drop(&mut self) { - if !self.is_empty() && self.unexpected.get().is_none() { - self.unexpected.set(Some(self.cursor().span())); - } - } -} - -impl<'a> Display for ParseBuffer<'a> { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - Display::fmt(&self.cursor().token_stream(), f) - } -} - -impl<'a> Debug for ParseBuffer<'a> { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - Debug::fmt(&self.cursor().token_stream(), f) - } -} - -/// Cursor state associated with speculative parsing. -/// -/// This type is the input of the closure provided to [`ParseStream::step`]. -/// -/// [`ParseStream::step`]: struct.ParseBuffer.html#method.step -/// -/// # Example -/// -/// ```edition2018 -/// use proc_macro2::TokenTree; -/// use syn::Result; -/// use syn::parse::ParseStream; -/// -/// // This function advances the stream past the next occurrence of `@`. If -/// // no `@` is present in the stream, the stream position is unchanged and -/// // an error is returned. -/// fn skip_past_next_at(input: ParseStream) -> Result<()> { -/// input.step(|cursor| { -/// let mut rest = *cursor; -/// while let Some((tt, next)) = rest.token_tree() { -/// match tt { -/// TokenTree::Punct(ref punct) if punct.as_char() == '@' => { -/// return Ok(((), next)); -/// } -/// _ => rest = next, -/// } -/// } -/// Err(cursor.error("no `@` was found after this point")) -/// }) -/// } -/// # -/// # fn remainder_after_skipping_past_next_at( -/// # input: ParseStream, -/// # ) -> Result { -/// # skip_past_next_at(input)?; -/// # input.parse() -/// # } -/// # -/// # fn main() { -/// # use syn::parse::Parser; -/// # let remainder = remainder_after_skipping_past_next_at -/// # .parse_str("a @ b c") -/// # .unwrap(); -/// # assert_eq!(remainder.to_string(), "b c"); -/// # } -/// ``` -#[derive(Copy, Clone)] -pub struct StepCursor<'c, 'a> { - scope: Span, - // This field is covariant in 'c. - cursor: Cursor<'c>, - // This field is contravariant in 'c. Together these make StepCursor - // invariant in 'c. Also covariant in 'a. The user cannot cast 'c to a - // different lifetime but can upcast into a StepCursor with a shorter - // lifetime 'a. - // - // As long as we only ever construct a StepCursor for which 'c outlives 'a, - // this means if ever a StepCursor<'c, 'a> exists we are guaranteed that 'c - // outlives 'a. - marker: PhantomData) -> Cursor<'a>>, -} - -impl<'c, 'a> Deref for StepCursor<'c, 'a> { - type Target = Cursor<'c>; - - fn deref(&self) -> &Self::Target { - &self.cursor - } -} - -impl<'c, 'a> StepCursor<'c, 'a> { - /// Triggers an error at the current position of the parse stream. - /// - /// The `ParseStream::step` invocation will return this same error without - /// advancing the stream state. - pub fn error(self, message: T) -> Error { - error::new_at(self.scope, self.cursor, message) - } -} - -impl private { - pub fn advance_step_cursor<'c, 'a>(proof: StepCursor<'c, 'a>, to: Cursor<'c>) -> Cursor<'a> { - // Refer to the comments within the StepCursor definition. We use the - // fact that a StepCursor<'c, 'a> exists as proof that 'c outlives 'a. - // Cursor is covariant in its lifetime parameter so we can cast a - // Cursor<'c> to one with the shorter lifetime Cursor<'a>. - let _ = proof; - unsafe { mem::transmute::, Cursor<'a>>(to) } - } -} - -fn skip(input: ParseStream) -> bool { - input - .step(|cursor| { - if let Some((_lifetime, rest)) = cursor.lifetime() { - Ok((true, rest)) - } else if let Some((_token, rest)) = cursor.token_tree() { - Ok((true, rest)) - } else { - Ok((false, *cursor)) - } - }) - .unwrap() -} - -impl private { - pub fn new_parse_buffer( - scope: Span, - cursor: Cursor, - unexpected: Rc>>, - ) -> ParseBuffer { - ParseBuffer { - scope: scope, - // See comment on `cell` in the struct definition. - cell: Cell::new(unsafe { mem::transmute::>(cursor) }), - marker: PhantomData, - unexpected: unexpected, - } - } - - pub fn get_unexpected(buffer: &ParseBuffer) -> Rc>> { - buffer.unexpected.clone() - } -} - -impl<'a> ParseBuffer<'a> { - /// Parses a syntax tree node of type `T`, advancing the position of our - /// parse stream past it. - pub fn parse(&self) -> Result { - T::parse(self) - } - - /// Calls the given parser function to parse a syntax tree node of type `T` - /// from this stream. - /// - /// # Example - /// - /// The parser below invokes [`Attribute::parse_outer`] to parse a vector of - /// zero or more outer attributes. - /// - /// [`Attribute::parse_outer`]: ../struct.Attribute.html#method.parse_outer - /// - /// ```edition2018 - /// use syn::{Attribute, Ident, Result, Token}; - /// use syn::parse::{Parse, ParseStream}; - /// - /// // Parses a unit struct with attributes. - /// // - /// // #[path = "s.tmpl"] - /// // struct S; - /// struct UnitStruct { - /// attrs: Vec, - /// struct_token: Token![struct], - /// name: Ident, - /// semi_token: Token![;], - /// } - /// - /// impl Parse for UnitStruct { - /// fn parse(input: ParseStream) -> Result { - /// Ok(UnitStruct { - /// attrs: input.call(Attribute::parse_outer)?, - /// struct_token: input.parse()?, - /// name: input.parse()?, - /// semi_token: input.parse()?, - /// }) - /// } - /// } - /// ``` - pub fn call(&self, function: fn(ParseStream) -> Result) -> Result { - function(self) - } - - /// Looks at the next token in the parse stream to determine whether it - /// matches the requested type of token. - /// - /// Does not advance the position of the parse stream. - /// - /// # Syntax - /// - /// Note that this method does not use turbofish syntax. Pass the peek type - /// inside of parentheses. - /// - /// - `input.peek(Token![struct])` - /// - `input.peek(Token![==])` - /// - `input.peek(Ident)` - /// - `input.peek(Lifetime)` - /// - `input.peek(token::Brace)` - /// - /// # Example - /// - /// In this example we finish parsing the list of supertraits when the next - /// token in the input is either `where` or an opening curly brace. - /// - /// ```edition2018 - /// use syn::{braced, token, Generics, Ident, Result, Token, TypeParamBound}; - /// use syn::parse::{Parse, ParseStream}; - /// use syn::punctuated::Punctuated; - /// - /// // Parses a trait definition containing no associated items. - /// // - /// // trait Marker<'de, T>: A + B<'de> where Box: Clone {} - /// struct MarkerTrait { - /// trait_token: Token![trait], - /// ident: Ident, - /// generics: Generics, - /// colon_token: Option, - /// supertraits: Punctuated, - /// brace_token: token::Brace, - /// } - /// - /// impl Parse for MarkerTrait { - /// fn parse(input: ParseStream) -> Result { - /// let trait_token: Token![trait] = input.parse()?; - /// let ident: Ident = input.parse()?; - /// let mut generics: Generics = input.parse()?; - /// let colon_token: Option = input.parse()?; - /// - /// let mut supertraits = Punctuated::new(); - /// if colon_token.is_some() { - /// loop { - /// supertraits.push_value(input.parse()?); - /// if input.peek(Token![where]) || input.peek(token::Brace) { - /// break; - /// } - /// supertraits.push_punct(input.parse()?); - /// } - /// } - /// - /// generics.where_clause = input.parse()?; - /// let content; - /// let empty_brace_token = braced!(content in input); - /// - /// Ok(MarkerTrait { - /// trait_token: trait_token, - /// ident: ident, - /// generics: generics, - /// colon_token: colon_token, - /// supertraits: supertraits, - /// brace_token: empty_brace_token, - /// }) - /// } - /// } - /// ``` - pub fn peek(&self, token: T) -> bool { - let _ = token; - T::Token::peek(self.cursor()) - } - - /// Looks at the second-next token in the parse stream. - /// - /// This is commonly useful as a way to implement contextual keywords. - /// - /// # Example - /// - /// This example needs to use `peek2` because the symbol `union` is not a - /// keyword in Rust. We can't use just `peek` and decide to parse a union if - /// the very next token is `union`, because someone is free to write a `mod - /// union` and a macro invocation that looks like `union::some_macro! { ... - /// }`. In other words `union` is a contextual keyword. - /// - /// ```edition2018 - /// use syn::{Ident, ItemUnion, Macro, Result, Token}; - /// use syn::parse::{Parse, ParseStream}; - /// - /// // Parses either a union or a macro invocation. - /// enum UnionOrMacro { - /// // union MaybeUninit { uninit: (), value: T } - /// Union(ItemUnion), - /// // lazy_static! { ... } - /// Macro(Macro), - /// } - /// - /// impl Parse for UnionOrMacro { - /// fn parse(input: ParseStream) -> Result { - /// if input.peek(Token![union]) && input.peek2(Ident) { - /// input.parse().map(UnionOrMacro::Union) - /// } else { - /// input.parse().map(UnionOrMacro::Macro) - /// } - /// } - /// } - /// ``` - pub fn peek2(&self, token: T) -> bool { - let ahead = self.fork(); - skip(&ahead) && ahead.peek(token) - } - - /// Looks at the third-next token in the parse stream. - pub fn peek3(&self, token: T) -> bool { - let ahead = self.fork(); - skip(&ahead) && skip(&ahead) && ahead.peek(token) - } - - /// Parses zero or more occurrences of `T` separated by punctuation of type - /// `P`, with optional trailing punctuation. - /// - /// Parsing continues until the end of this parse stream. The entire content - /// of this parse stream must consist of `T` and `P`. - /// - /// # Example - /// - /// ```edition2018 - /// # use quote::quote; - /// # - /// use syn::{parenthesized, token, Ident, Result, Token, Type}; - /// use syn::parse::{Parse, ParseStream}; - /// use syn::punctuated::Punctuated; - /// - /// // Parse a simplified tuple struct syntax like: - /// // - /// // struct S(A, B); - /// struct TupleStruct { - /// struct_token: Token![struct], - /// ident: Ident, - /// paren_token: token::Paren, - /// fields: Punctuated, - /// semi_token: Token![;], - /// } - /// - /// impl Parse for TupleStruct { - /// fn parse(input: ParseStream) -> Result { - /// let content; - /// Ok(TupleStruct { - /// struct_token: input.parse()?, - /// ident: input.parse()?, - /// paren_token: parenthesized!(content in input), - /// fields: content.parse_terminated(Type::parse)?, - /// semi_token: input.parse()?, - /// }) - /// } - /// } - /// # - /// # fn main() { - /// # let input = quote! { - /// # struct S(A, B); - /// # }; - /// # syn::parse2::(input).unwrap(); - /// # } - /// ``` - pub fn parse_terminated( - &self, - parser: fn(ParseStream) -> Result, - ) -> Result> { - Punctuated::parse_terminated_with(self, parser) - } - - /// Returns whether there are tokens remaining in this stream. - /// - /// This method returns true at the end of the content of a set of - /// delimiters, as well as at the very end of the complete macro input. - /// - /// # Example - /// - /// ```edition2018 - /// use syn::{braced, token, Ident, Item, Result, Token}; - /// use syn::parse::{Parse, ParseStream}; - /// - /// // Parses a Rust `mod m { ... }` containing zero or more items. - /// struct Mod { - /// mod_token: Token![mod], - /// name: Ident, - /// brace_token: token::Brace, - /// items: Vec, - /// } - /// - /// impl Parse for Mod { - /// fn parse(input: ParseStream) -> Result { - /// let content; - /// Ok(Mod { - /// mod_token: input.parse()?, - /// name: input.parse()?, - /// brace_token: braced!(content in input), - /// items: { - /// let mut items = Vec::new(); - /// while !content.is_empty() { - /// items.push(content.parse()?); - /// } - /// items - /// }, - /// }) - /// } - /// } - /// ``` - pub fn is_empty(&self) -> bool { - self.cursor().eof() - } - - /// Constructs a helper for peeking at the next token in this stream and - /// building an error message if it is not one of a set of expected tokens. - /// - /// # Example - /// - /// ```edition2018 - /// use syn::{ConstParam, Ident, Lifetime, LifetimeDef, Result, Token, TypeParam}; - /// use syn::parse::{Parse, ParseStream}; - /// - /// // A generic parameter, a single one of the comma-separated elements inside - /// // angle brackets in: - /// // - /// // fn f() { ... } - /// // - /// // On invalid input, lookahead gives us a reasonable error message. - /// // - /// // error: expected one of: identifier, lifetime, `const` - /// // | - /// // 5 | fn f() {} - /// // | ^ - /// enum GenericParam { - /// Type(TypeParam), - /// Lifetime(LifetimeDef), - /// Const(ConstParam), - /// } - /// - /// impl Parse for GenericParam { - /// fn parse(input: ParseStream) -> Result { - /// let lookahead = input.lookahead1(); - /// if lookahead.peek(Ident) { - /// input.parse().map(GenericParam::Type) - /// } else if lookahead.peek(Lifetime) { - /// input.parse().map(GenericParam::Lifetime) - /// } else if lookahead.peek(Token![const]) { - /// input.parse().map(GenericParam::Const) - /// } else { - /// Err(lookahead.error()) - /// } - /// } - /// } - /// ``` - pub fn lookahead1(&self) -> Lookahead1<'a> { - lookahead::new(self.scope, self.cursor()) - } - - /// Forks a parse stream so that parsing tokens out of either the original - /// or the fork does not advance the position of the other. - /// - /// # Performance - /// - /// Forking a parse stream is a cheap fixed amount of work and does not - /// involve copying token buffers. Where you might hit performance problems - /// is if your macro ends up parsing a large amount of content more than - /// once. - /// - /// ```edition2018 - /// # use syn::{Expr, Result}; - /// # use syn::parse::ParseStream; - /// # - /// # fn bad(input: ParseStream) -> Result { - /// // Do not do this. - /// if input.fork().parse::().is_ok() { - /// return input.parse::(); - /// } - /// # unimplemented!() - /// # } - /// ``` - /// - /// As a rule, avoid parsing an unbounded amount of tokens out of a forked - /// parse stream. Only use a fork when the amount of work performed against - /// the fork is small and bounded. - /// - /// For a lower level but occasionally more performant way to perform - /// speculative parsing, consider using [`ParseStream::step`] instead. - /// - /// [`ParseStream::step`]: #method.step - /// - /// # Example - /// - /// The parse implementation shown here parses possibly restricted `pub` - /// visibilities. - /// - /// - `pub` - /// - `pub(crate)` - /// - `pub(self)` - /// - `pub(super)` - /// - `pub(in some::path)` - /// - /// To handle the case of visibilities inside of tuple structs, the parser - /// needs to distinguish parentheses that specify visibility restrictions - /// from parentheses that form part of a tuple type. - /// - /// ```edition2018 - /// # struct A; - /// # struct B; - /// # struct C; - /// # - /// struct S(pub(crate) A, pub (B, C)); - /// ``` - /// - /// In this example input the first tuple struct element of `S` has - /// `pub(crate)` visibility while the second tuple struct element has `pub` - /// visibility; the parentheses around `(B, C)` are part of the type rather - /// than part of a visibility restriction. - /// - /// The parser uses a forked parse stream to check the first token inside of - /// parentheses after the `pub` keyword. This is a small bounded amount of - /// work performed against the forked parse stream. - /// - /// ```edition2018 - /// use syn::{parenthesized, token, Ident, Path, Result, Token}; - /// use syn::ext::IdentExt; - /// use syn::parse::{Parse, ParseStream}; - /// - /// struct PubVisibility { - /// pub_token: Token![pub], - /// restricted: Option, - /// } - /// - /// struct Restricted { - /// paren_token: token::Paren, - /// in_token: Option, - /// path: Path, - /// } - /// - /// impl Parse for PubVisibility { - /// fn parse(input: ParseStream) -> Result { - /// let pub_token: Token![pub] = input.parse()?; - /// - /// if input.peek(token::Paren) { - /// let ahead = input.fork(); - /// let mut content; - /// parenthesized!(content in ahead); - /// - /// if content.peek(Token![crate]) - /// || content.peek(Token![self]) - /// || content.peek(Token![super]) - /// { - /// return Ok(PubVisibility { - /// pub_token: pub_token, - /// restricted: Some(Restricted { - /// paren_token: parenthesized!(content in input), - /// in_token: None, - /// path: Path::from(content.call(Ident::parse_any)?), - /// }), - /// }); - /// } else if content.peek(Token![in]) { - /// return Ok(PubVisibility { - /// pub_token: pub_token, - /// restricted: Some(Restricted { - /// paren_token: parenthesized!(content in input), - /// in_token: Some(content.parse()?), - /// path: content.call(Path::parse_mod_style)?, - /// }), - /// }); - /// } - /// } - /// - /// Ok(PubVisibility { - /// pub_token: pub_token, - /// restricted: None, - /// }) - /// } - /// } - /// ``` - pub fn fork(&self) -> Self { - ParseBuffer { - scope: self.scope, - cell: self.cell.clone(), - marker: PhantomData, - // Not the parent's unexpected. Nothing cares whether the clone - // parses all the way. - unexpected: Rc::new(Cell::new(None)), - } - } - - /// Triggers an error at the current position of the parse stream. - /// - /// # Example - /// - /// ```edition2018 - /// use syn::{Expr, Result, Token}; - /// use syn::parse::{Parse, ParseStream}; - /// - /// // Some kind of loop: `while` or `for` or `loop`. - /// struct Loop { - /// expr: Expr, - /// } - /// - /// impl Parse for Loop { - /// fn parse(input: ParseStream) -> Result { - /// if input.peek(Token![while]) - /// || input.peek(Token![for]) - /// || input.peek(Token![loop]) - /// { - /// Ok(Loop { - /// expr: input.parse()?, - /// }) - /// } else { - /// Err(input.error("expected some kind of loop")) - /// } - /// } - /// } - /// ``` - pub fn error(&self, message: T) -> Error { - error::new_at(self.scope, self.cursor(), message) - } - - /// Speculatively parses tokens from this parse stream, advancing the - /// position of this stream only if parsing succeeds. - /// - /// This is a powerful low-level API used for defining the `Parse` impls of - /// the basic built-in token types. It is not something that will be used - /// widely outside of the Syn codebase. - /// - /// # Example - /// - /// ```edition2018 - /// use proc_macro2::TokenTree; - /// use syn::Result; - /// use syn::parse::ParseStream; - /// - /// // This function advances the stream past the next occurrence of `@`. If - /// // no `@` is present in the stream, the stream position is unchanged and - /// // an error is returned. - /// fn skip_past_next_at(input: ParseStream) -> Result<()> { - /// input.step(|cursor| { - /// let mut rest = *cursor; - /// while let Some((tt, next)) = rest.token_tree() { - /// match tt { - /// TokenTree::Punct(ref punct) if punct.as_char() == '@' => { - /// return Ok(((), next)); - /// } - /// _ => rest = next, - /// } - /// } - /// Err(cursor.error("no `@` was found after this point")) - /// }) - /// } - /// # - /// # fn remainder_after_skipping_past_next_at( - /// # input: ParseStream, - /// # ) -> Result { - /// # skip_past_next_at(input)?; - /// # input.parse() - /// # } - /// # - /// # fn main() { - /// # use syn::parse::Parser; - /// # let remainder = remainder_after_skipping_past_next_at - /// # .parse_str("a @ b c") - /// # .unwrap(); - /// # assert_eq!(remainder.to_string(), "b c"); - /// # } - /// ``` - pub fn step(&self, function: F) -> Result - where - F: for<'c> FnOnce(StepCursor<'c, 'a>) -> Result<(R, Cursor<'c>)>, - { - // Since the user's function is required to work for any 'c, we know - // that the Cursor<'c> they return is either derived from the input - // StepCursor<'c, 'a> or from a Cursor<'static>. - // - // It would not be legal to write this function without the invariant - // lifetime 'c in StepCursor<'c, 'a>. If this function were written only - // in terms of 'a, the user could take our ParseBuffer<'a>, upcast it to - // a ParseBuffer<'short> which some shorter lifetime than 'a, invoke - // `step` on their ParseBuffer<'short> with a closure that returns - // Cursor<'short>, and we would wrongly write that Cursor<'short> into - // the Cell intended to hold Cursor<'a>. - // - // In some cases it may be necessary for R to contain a Cursor<'a>. - // Within Syn we solve this using `private::advance_step_cursor` which - // uses the existence of a StepCursor<'c, 'a> as proof that it is safe - // to cast from Cursor<'c> to Cursor<'a>. If needed outside of Syn, it - // would be safe to expose that API as a method on StepCursor. - let (node, rest) = function(StepCursor { - scope: self.scope, - cursor: self.cell.get(), - marker: PhantomData, - })?; - self.cell.set(rest); - Ok(node) - } - - /// Provides low-level access to the token representation underlying this - /// parse stream. - /// - /// Cursors are immutable so no operations you perform against the cursor - /// will affect the state of this parse stream. - pub fn cursor(&self) -> Cursor<'a> { - self.cell.get() - } - - fn check_unexpected(&self) -> Result<()> { - match self.unexpected.get() { - Some(span) => Err(Error::new(span, "unexpected token")), - None => Ok(()), - } - } -} - -impl Parse for Box { - fn parse(input: ParseStream) -> Result { - input.parse().map(Box::new) - } -} - -impl Parse for Option { - fn parse(input: ParseStream) -> Result { - if T::peek(input.cursor()) { - Ok(Some(input.parse()?)) - } else { - Ok(None) - } - } -} - -impl Parse for TokenStream { - fn parse(input: ParseStream) -> Result { - input.step(|cursor| Ok((cursor.token_stream(), Cursor::empty()))) - } -} - -impl Parse for TokenTree { - fn parse(input: ParseStream) -> Result { - input.step(|cursor| match cursor.token_tree() { - Some((tt, rest)) => Ok((tt, rest)), - None => Err(cursor.error("expected token tree")), - }) - } -} - -impl Parse for Group { - fn parse(input: ParseStream) -> Result { - input.step(|cursor| { - for delim in &[Delimiter::Parenthesis, Delimiter::Brace, Delimiter::Bracket] { - if let Some((inside, span, rest)) = cursor.group(*delim) { - let mut group = Group::new(*delim, inside.token_stream()); - group.set_span(span); - return Ok((group, rest)); - } - } - Err(cursor.error("expected group token")) - }) - } -} - -impl Parse for Punct { - fn parse(input: ParseStream) -> Result { - input.step(|cursor| match cursor.punct() { - Some((punct, rest)) => Ok((punct, rest)), - None => Err(cursor.error("expected punctuation token")), - }) - } -} - -impl Parse for Literal { - fn parse(input: ParseStream) -> Result { - input.step(|cursor| match cursor.literal() { - Some((literal, rest)) => Ok((literal, rest)), - None => Err(cursor.error("expected literal token")), - }) - } -} - -/// Parser that can parse Rust tokens into a particular syntax tree node. -/// -/// Refer to the [module documentation] for details about parsing in Syn. -/// -/// [module documentation]: index.html -/// -/// *This trait is available if Syn is built with the `"parsing"` feature.* -pub trait Parser: Sized { - type Output; - - /// Parse a proc-macro2 token stream into the chosen syntax tree node. - /// - /// This function will check that the input is fully parsed. If there are - /// any unparsed tokens at the end of the stream, an error is returned. - fn parse2(self, tokens: TokenStream) -> Result; - - /// Parse tokens of source code into the chosen syntax tree node. - /// - /// This function will check that the input is fully parsed. If there are - /// any unparsed tokens at the end of the stream, an error is returned. - /// - /// *This method is available if Syn is built with both the `"parsing"` and - /// `"proc-macro"` features.* - #[cfg(all( - not(all(target_arch = "wasm32", target_os = "unknown")), - feature = "proc-macro" - ))] - fn parse(self, tokens: proc_macro::TokenStream) -> Result { - self.parse2(proc_macro2::TokenStream::from(tokens)) - } - - /// Parse a string of Rust code into the chosen syntax tree node. - /// - /// This function will check that the input is fully parsed. If there are - /// any unparsed tokens at the end of the string, an error is returned. - /// - /// # Hygiene - /// - /// Every span in the resulting syntax tree will be set to resolve at the - /// macro call site. - fn parse_str(self, s: &str) -> Result { - self.parse2(proc_macro2::TokenStream::from_str(s)?) - } -} - -fn tokens_to_parse_buffer(tokens: &TokenBuffer) -> ParseBuffer { - let scope = Span::call_site(); - let cursor = tokens.begin(); - let unexpected = Rc::new(Cell::new(None)); - private::new_parse_buffer(scope, cursor, unexpected) -} - -impl Parser for F -where - F: FnOnce(ParseStream) -> Result, -{ - type Output = T; - - fn parse2(self, tokens: TokenStream) -> Result { - let buf = TokenBuffer::new2(tokens); - let state = tokens_to_parse_buffer(&buf); - let node = self(&state)?; - state.check_unexpected()?; - if state.is_empty() { - Ok(node) - } else { - Err(state.error("unexpected token")) - } - } -} diff --git a/third_party/rust/syn-0.15.30/src/parse_macro_input.rs b/third_party/rust/syn-0.15.30/src/parse_macro_input.rs deleted file mode 100644 index 9d8f6aea2e..0000000000 --- a/third_party/rust/syn-0.15.30/src/parse_macro_input.rs +++ /dev/null @@ -1,103 +0,0 @@ -/// Parse the input TokenStream of a macro, triggering a compile error if the -/// tokens fail to parse. -/// -/// Refer to the [`parse` module] documentation for more details about parsing -/// in Syn. -/// -/// [`parse` module]: parse/index.html -/// -/// # Intended usage -/// -/// ```edition2018 -/// extern crate proc_macro; -/// -/// use proc_macro::TokenStream; -/// use syn::{parse_macro_input, Result}; -/// use syn::parse::{Parse, ParseStream}; -/// -/// struct MyMacroInput { -/// /* ... */ -/// } -/// -/// impl Parse for MyMacroInput { -/// fn parse(input: ParseStream) -> Result { -/// /* ... */ -/// # Ok(MyMacroInput {}) -/// } -/// } -/// -/// # const IGNORE: &str = stringify! { -/// #[proc_macro] -/// # }; -/// pub fn my_macro(tokens: TokenStream) -> TokenStream { -/// let input = parse_macro_input!(tokens as MyMacroInput); -/// -/// /* ... */ -/// # "".parse().unwrap() -/// } -/// ``` -#[macro_export(local_inner_macros)] -macro_rules! parse_macro_input { - ($tokenstream:ident as $ty:ty) => { - match $crate::parse_macro_input::parse::<$ty>($tokenstream) { - $crate::export::Ok(data) => data, - $crate::export::Err(err) => { - return $crate::export::TokenStream::from(err.to_compile_error()); - } - } - }; - ($tokenstream:ident) => { - parse_macro_input!($tokenstream as _) - }; -} - -//////////////////////////////////////////////////////////////////////////////// -// Can parse any type that implements Parse. - -use parse::{Parse, ParseStream, Parser, Result}; -use proc_macro::TokenStream; - -// Not public API. -#[doc(hidden)] -pub fn parse(token_stream: TokenStream) -> Result { - T::parse.parse(token_stream) -} - -// Not public API. -#[doc(hidden)] -pub trait ParseMacroInput: Sized { - fn parse(input: ParseStream) -> Result; -} - -impl ParseMacroInput for T { - fn parse(input: ParseStream) -> Result { - ::parse(input) - } -} - -//////////////////////////////////////////////////////////////////////////////// -// Any other types that we want `parse_macro_input!` to be able to parse. - -#[cfg(any(feature = "full", feature = "derive"))] -use AttributeArgs; - -#[cfg(any(feature = "full", feature = "derive"))] -impl ParseMacroInput for AttributeArgs { - fn parse(input: ParseStream) -> Result { - let mut metas = Vec::new(); - - loop { - if input.is_empty() { - break; - } - let value = input.parse()?; - metas.push(value); - if input.is_empty() { - break; - } - input.parse::()?; - } - - Ok(metas) - } -} diff --git a/third_party/rust/syn-0.15.30/src/parse_quote.rs b/third_party/rust/syn-0.15.30/src/parse_quote.rs deleted file mode 100644 index 0b9d4d4598..0000000000 --- a/third_party/rust/syn-0.15.30/src/parse_quote.rs +++ /dev/null @@ -1,144 +0,0 @@ -/// Quasi-quotation macro that accepts input like the [`quote!`] macro but uses -/// type inference to figure out a return type for those tokens. -/// -/// [`quote!`]: https://docs.rs/quote/0.6/quote/index.html -/// -/// The return type can be any syntax tree node that implements the [`Parse`] -/// trait. -/// -/// [`Parse`]: parse/trait.Parse.html -/// -/// ```edition2018 -/// use quote::quote; -/// use syn::{parse_quote, Stmt}; -/// -/// fn main() { -/// let name = quote!(v); -/// let ty = quote!(u8); -/// -/// let stmt: Stmt = parse_quote! { -/// let #name: #ty = Default::default(); -/// }; -/// -/// println!("{:#?}", stmt); -/// } -/// ``` -/// -/// *This macro is available if Syn is built with the `"parsing"` feature, -/// although interpolation of syntax tree nodes into the quoted tokens is only -/// supported if Syn is built with the `"printing"` feature as well.* -/// -/// # Example -/// -/// The following helper function adds a bound `T: HeapSize` to every type -/// parameter `T` in the input generics. -/// -/// ```edition2018 -/// use syn::{parse_quote, Generics, GenericParam}; -/// -/// // Add a bound `T: HeapSize` to every type parameter T. -/// fn add_trait_bounds(mut generics: Generics) -> Generics { -/// for param in &mut generics.params { -/// if let GenericParam::Type(ref mut type_param) = *param { -/// type_param.bounds.push(parse_quote!(HeapSize)); -/// } -/// } -/// generics -/// } -/// ``` -/// -/// # Special cases -/// -/// This macro can parse the following additional types as a special case even -/// though they do not implement the `Parse` trait. -/// -/// - [`Attribute`] — parses one attribute, allowing either outer like `#[...]` -/// or inner like `#![...]` -/// - [`Punctuated`] — parses zero or more `T` separated by punctuation -/// `P` with optional trailing punctuation -/// -/// [`Attribute`]: struct.Attribute.html -/// [`Punctuated`]: punctuated/struct.Punctuated.html -/// -/// # Panics -/// -/// Panics if the tokens fail to parse as the expected syntax tree type. The -/// caller is responsible for ensuring that the input tokens are syntactically -/// valid. -#[macro_export(local_inner_macros)] -macro_rules! parse_quote { - ($($tt:tt)*) => { - $crate::parse_quote::parse($crate::export::From::from(quote_impl!($($tt)*))) - }; -} - -#[cfg(not(syn_can_call_macro_by_path))] -#[doc(hidden)] -#[macro_export] -macro_rules! quote_impl { - ($($tt:tt)*) => { - // Require caller to have their own `#[macro_use] extern crate quote`. - quote!($($tt)*) - }; -} - -#[cfg(syn_can_call_macro_by_path)] -#[doc(hidden)] -#[macro_export] -macro_rules! quote_impl { - ($($tt:tt)*) => { - $crate::export::quote::quote!($($tt)*) - }; -} - -//////////////////////////////////////////////////////////////////////////////// -// Can parse any type that implements Parse. - -use parse::{Parse, ParseStream, Parser, Result}; -use proc_macro2::TokenStream; - -// Not public API. -#[doc(hidden)] -pub fn parse(token_stream: TokenStream) -> T { - let parser = T::parse; - match parser.parse2(token_stream) { - Ok(t) => t, - Err(err) => panic!("{}", err), - } -} - -// Not public API. -#[doc(hidden)] -pub trait ParseQuote: Sized { - fn parse(input: ParseStream) -> Result; -} - -impl ParseQuote for T { - fn parse(input: ParseStream) -> Result { - ::parse(input) - } -} - -//////////////////////////////////////////////////////////////////////////////// -// Any other types that we want `parse_quote!` to be able to parse. - -use punctuated::Punctuated; -#[cfg(any(feature = "full", feature = "derive"))] -use {attr, Attribute}; - -#[cfg(any(feature = "full", feature = "derive"))] -impl ParseQuote for Attribute { - fn parse(input: ParseStream) -> Result { - if input.peek(Token![#]) && input.peek2(Token![!]) { - attr::parsing::single_parse_inner(input) - } else { - attr::parsing::single_parse_outer(input) - } - } -} - -impl ParseQuote for Punctuated { - fn parse(input: ParseStream) -> Result { - Self::parse_terminated(input) - } -} diff --git a/third_party/rust/syn-0.15.30/src/path.rs b/third_party/rust/syn-0.15.30/src/path.rs deleted file mode 100644 index 20c70cd79f..0000000000 --- a/third_party/rust/syn-0.15.30/src/path.rs +++ /dev/null @@ -1,704 +0,0 @@ -use super::*; -use punctuated::Punctuated; - -ast_struct! { - /// A path at which a named item is exported: `std::collections::HashMap`. - /// - /// *This type is available if Syn is built with the `"derive"` or `"full"` - /// feature.* - pub struct Path { - pub leading_colon: Option, - pub segments: Punctuated, - } -} - -impl From for Path -where - T: Into, -{ - fn from(segment: T) -> Self { - let mut path = Path { - leading_colon: None, - segments: Punctuated::new(), - }; - path.segments.push_value(segment.into()); - path - } -} - -ast_struct! { - /// A segment of a path together with any path arguments on that segment. - /// - /// *This type is available if Syn is built with the `"derive"` or `"full"` - /// feature.* - pub struct PathSegment { - pub ident: Ident, - pub arguments: PathArguments, - } -} - -impl From for PathSegment -where - T: Into, -{ - fn from(ident: T) -> Self { - PathSegment { - ident: ident.into(), - arguments: PathArguments::None, - } - } -} - -ast_enum! { - /// Angle bracketed or parenthesized arguments of a path segment. - /// - /// *This type is available if Syn is built with the `"derive"` or `"full"` - /// feature.* - /// - /// ## Angle bracketed - /// - /// The `<'a, T>` in `std::slice::iter<'a, T>`. - /// - /// ## Parenthesized - /// - /// The `(A, B) -> C` in `Fn(A, B) -> C`. - pub enum PathArguments { - None, - /// The `<'a, T>` in `std::slice::iter<'a, T>`. - AngleBracketed(AngleBracketedGenericArguments), - /// The `(A, B) -> C` in `Fn(A, B) -> C`. - Parenthesized(ParenthesizedGenericArguments), - } -} - -impl Default for PathArguments { - fn default() -> Self { - PathArguments::None - } -} - -impl PathArguments { - pub fn is_empty(&self) -> bool { - match *self { - PathArguments::None => true, - PathArguments::AngleBracketed(ref bracketed) => bracketed.args.is_empty(), - PathArguments::Parenthesized(_) => false, - } - } - - #[cfg(feature = "parsing")] - fn is_none(&self) -> bool { - match *self { - PathArguments::None => true, - PathArguments::AngleBracketed(_) | PathArguments::Parenthesized(_) => false, - } - } -} - -ast_enum! { - /// An individual generic argument, like `'a`, `T`, or `Item = T`. - /// - /// *This type is available if Syn is built with the `"derive"` or `"full"` - /// feature.* - pub enum GenericArgument { - /// A lifetime argument. - Lifetime(Lifetime), - /// A type argument. - Type(Type), - /// A binding (equality constraint) on an associated type: the `Item = - /// u8` in `Iterator`. - Binding(Binding), - /// An associated type bound: `Iterator`. - Constraint(Constraint), - /// A const expression. Must be inside of a block. - /// - /// NOTE: Identity expressions are represented as Type arguments, as - /// they are indistinguishable syntactically. - Const(Expr), - } -} - -ast_struct! { - /// Angle bracketed arguments of a path segment: the `` in `HashMap`. - /// - /// *This type is available if Syn is built with the `"derive"` or `"full"` - /// feature.* - pub struct AngleBracketedGenericArguments { - pub colon2_token: Option, - pub lt_token: Token![<], - pub args: Punctuated, - pub gt_token: Token![>], - } -} - -ast_struct! { - /// A binding (equality constraint) on an associated type: `Item = u8`. - /// - /// *This type is available if Syn is built with the `"derive"` or `"full"` - /// feature.* - pub struct Binding { - pub ident: Ident, - pub eq_token: Token![=], - pub ty: Type, - } -} - -ast_struct! { - /// An associated type bound: `Iterator`. - /// - /// *This type is available if Syn is built with the `"derive"` or `"full"` - /// feature.* - pub struct Constraint { - pub ident: Ident, - pub colon_token: Token![:], - pub bounds: Punctuated, - } -} - -ast_struct! { - /// Arguments of a function path segment: the `(A, B) -> C` in `Fn(A,B) -> - /// C`. - /// - /// *This type is available if Syn is built with the `"derive"` or `"full"` - /// feature.* - pub struct ParenthesizedGenericArguments { - pub paren_token: token::Paren, - /// `(A, B)` - pub inputs: Punctuated, - /// `C` - pub output: ReturnType, - } -} - -ast_struct! { - /// The explicit Self type in a qualified path: the `T` in `::fmt`. - /// - /// The actual path, including the trait and the associated item, is stored - /// separately. The `position` field represents the index of the associated - /// item qualified with this Self type. - /// - /// ```text - /// as a::b::Trait>::AssociatedItem - /// ^~~~~~ ~~~~~~~~~~~~~~^ - /// ty position = 3 - /// - /// >::AssociatedItem - /// ^~~~~~ ^ - /// ty position = 0 - /// ``` - /// - /// *This type is available if Syn is built with the `"derive"` or `"full"` - /// feature.* - pub struct QSelf { - pub lt_token: Token![<], - pub ty: Box, - pub position: usize, - pub as_token: Option, - pub gt_token: Token![>], - } -} - -#[cfg(feature = "parsing")] -pub mod parsing { - use super::*; - - #[cfg(feature = "full")] - use expr; - use ext::IdentExt; - use parse::{Parse, ParseStream, Result}; - - impl Parse for Path { - fn parse(input: ParseStream) -> Result { - Self::parse_helper(input, false) - } - } - - impl Parse for GenericArgument { - fn parse(input: ParseStream) -> Result { - if input.peek(Lifetime) && !input.peek2(Token![+]) { - return Ok(GenericArgument::Lifetime(input.parse()?)); - } - - if input.peek(Ident) && input.peek2(Token![=]) { - return Ok(GenericArgument::Binding(input.parse()?)); - } - - #[cfg(feature = "full")] - { - if input.peek(Ident) && input.peek2(Token![:]) && !input.peek2(Token![::]) { - return Ok(GenericArgument::Constraint(input.parse()?)); - } - - if input.peek(Lit) { - let lit = input.call(expr::parsing::expr_lit)?; - return Ok(GenericArgument::Const(Expr::Lit(lit))); - } - - if input.peek(token::Brace) { - let block = input.call(expr::parsing::expr_block)?; - return Ok(GenericArgument::Const(Expr::Block(block))); - } - } - - input.parse().map(GenericArgument::Type) - } - } - - impl Parse for AngleBracketedGenericArguments { - fn parse(input: ParseStream) -> Result { - Ok(AngleBracketedGenericArguments { - colon2_token: input.parse()?, - lt_token: input.parse()?, - args: { - let mut args = Punctuated::new(); - loop { - if input.peek(Token![>]) { - break; - } - let value = input.parse()?; - args.push_value(value); - if input.peek(Token![>]) { - break; - } - let punct = input.parse()?; - args.push_punct(punct); - } - args - }, - gt_token: input.parse()?, - }) - } - } - - impl Parse for ParenthesizedGenericArguments { - fn parse(input: ParseStream) -> Result { - let content; - Ok(ParenthesizedGenericArguments { - paren_token: parenthesized!(content in input), - inputs: content.parse_terminated(Type::parse)?, - output: input.call(ReturnType::without_plus)?, - }) - } - } - - impl Parse for PathSegment { - fn parse(input: ParseStream) -> Result { - Self::parse_helper(input, false) - } - } - - impl PathSegment { - fn parse_helper(input: ParseStream, expr_style: bool) -> Result { - if input.peek(Token![super]) - || input.peek(Token![self]) - || input.peek(Token![Self]) - || input.peek(Token![crate]) - || input.peek(Token![extern]) - { - let ident = input.call(Ident::parse_any)?; - return Ok(PathSegment::from(ident)); - } - - let ident = input.parse()?; - if !expr_style && input.peek(Token![<]) && !input.peek(Token![<=]) - || input.peek(Token![::]) && input.peek3(Token![<]) - { - Ok(PathSegment { - ident: ident, - arguments: PathArguments::AngleBracketed(input.parse()?), - }) - } else { - Ok(PathSegment::from(ident)) - } - } - } - - impl Parse for Binding { - fn parse(input: ParseStream) -> Result { - Ok(Binding { - ident: input.parse()?, - eq_token: input.parse()?, - ty: input.parse()?, - }) - } - } - - #[cfg(feature = "full")] - impl Parse for Constraint { - fn parse(input: ParseStream) -> Result { - Ok(Constraint { - ident: input.parse()?, - colon_token: input.parse()?, - bounds: { - let mut bounds = Punctuated::new(); - loop { - if input.peek(Token![,]) || input.peek(Token![>]) { - break; - } - let value = input.parse()?; - bounds.push_value(value); - if !input.peek(Token![+]) { - break; - } - let punct = input.parse()?; - bounds.push_punct(punct); - } - bounds - }, - }) - } - } - - impl Path { - /// Parse a `Path` containing no path arguments on any of its segments. - /// - /// *This function is available if Syn is built with the `"parsing"` - /// feature.* - /// - /// # Example - /// - /// ```edition2018 - /// use syn::{Path, Result, Token}; - /// use syn::parse::{Parse, ParseStream}; - /// - /// // A simplified single `use` statement like: - /// // - /// // use std::collections::HashMap; - /// // - /// // Note that generic parameters are not allowed in a `use` statement - /// // so the following must not be accepted. - /// // - /// // use a::::c; - /// struct SingleUse { - /// use_token: Token![use], - /// path: Path, - /// } - /// - /// impl Parse for SingleUse { - /// fn parse(input: ParseStream) -> Result { - /// Ok(SingleUse { - /// use_token: input.parse()?, - /// path: input.call(Path::parse_mod_style)?, - /// }) - /// } - /// } - /// ``` - pub fn parse_mod_style(input: ParseStream) -> Result { - Ok(Path { - leading_colon: input.parse()?, - segments: { - let mut segments = Punctuated::new(); - loop { - if !input.peek(Ident) - && !input.peek(Token![super]) - && !input.peek(Token![self]) - && !input.peek(Token![Self]) - && !input.peek(Token![crate]) - && !input.peek(Token![extern]) - { - break; - } - let ident = Ident::parse_any(input)?; - segments.push_value(PathSegment::from(ident)); - if !input.peek(Token![::]) { - break; - } - let punct = input.parse()?; - segments.push_punct(punct); - } - if segments.is_empty() { - return Err(input.error("expected path")); - } else if segments.trailing_punct() { - return Err(input.error("expected path segment")); - } - segments - }, - }) - } - - /// Determines whether this is a path of length 1 equal to the given - /// ident. - /// - /// For them to compare equal, it must be the case that: - /// - /// - the path has no leading colon, - /// - the number of path segments is 1, - /// - the first path segment has no angle bracketed or parenthesized - /// path arguments - /// - and the ident of the first path segment is equal to the given one. - /// - /// *This function is available if Syn is built with the `"parsing"` - /// feature.* - pub fn is_ident(&self, ident: I) -> bool - where - Ident: PartialEq, - { - self.leading_colon.is_none() - && self.segments.len() == 1 - && self.segments[0].arguments.is_none() - && self.segments[0].ident == ident - } - - fn parse_helper(input: ParseStream, expr_style: bool) -> Result { - if input.peek(Token![dyn]) { - return Err(input.error("expected path")); - } - - Ok(Path { - leading_colon: input.parse()?, - segments: { - let mut segments = Punctuated::new(); - let value = PathSegment::parse_helper(input, expr_style)?; - segments.push_value(value); - while input.peek(Token![::]) { - let punct: Token![::] = input.parse()?; - segments.push_punct(punct); - let value = PathSegment::parse_helper(input, expr_style)?; - segments.push_value(value); - } - segments - }, - }) - } - } - - pub fn qpath(input: ParseStream, expr_style: bool) -> Result<(Option, Path)> { - if input.peek(Token![<]) { - let lt_token: Token![<] = input.parse()?; - let this: Type = input.parse()?; - let path = if input.peek(Token![as]) { - let as_token: Token![as] = input.parse()?; - let path: Path = input.parse()?; - Some((as_token, path)) - } else { - None - }; - let gt_token: Token![>] = input.parse()?; - let colon2_token: Token![::] = input.parse()?; - let mut rest = Punctuated::new(); - loop { - let path = PathSegment::parse_helper(input, expr_style)?; - rest.push_value(path); - if !input.peek(Token![::]) { - break; - } - let punct: Token![::] = input.parse()?; - rest.push_punct(punct); - } - let (position, as_token, path) = match path { - Some((as_token, mut path)) => { - let pos = path.segments.len(); - path.segments.push_punct(colon2_token); - path.segments.extend(rest.into_pairs()); - (pos, Some(as_token), path) - } - None => { - let path = Path { - leading_colon: Some(colon2_token), - segments: rest, - }; - (0, None, path) - } - }; - let qself = QSelf { - lt_token: lt_token, - ty: Box::new(this), - position: position, - as_token: as_token, - gt_token: gt_token, - }; - Ok((Some(qself), path)) - } else { - let path = Path::parse_helper(input, expr_style)?; - Ok((None, path)) - } - } -} - -#[cfg(feature = "printing")] -mod printing { - use super::*; - - use proc_macro2::TokenStream; - use quote::ToTokens; - - use print::TokensOrDefault; - - impl ToTokens for Path { - fn to_tokens(&self, tokens: &mut TokenStream) { - self.leading_colon.to_tokens(tokens); - self.segments.to_tokens(tokens); - } - } - - impl ToTokens for PathSegment { - fn to_tokens(&self, tokens: &mut TokenStream) { - self.ident.to_tokens(tokens); - self.arguments.to_tokens(tokens); - } - } - - impl ToTokens for PathArguments { - fn to_tokens(&self, tokens: &mut TokenStream) { - match *self { - PathArguments::None => {} - PathArguments::AngleBracketed(ref arguments) => { - arguments.to_tokens(tokens); - } - PathArguments::Parenthesized(ref arguments) => { - arguments.to_tokens(tokens); - } - } - } - } - - impl ToTokens for GenericArgument { - #[cfg_attr(feature = "cargo-clippy", allow(match_same_arms))] - fn to_tokens(&self, tokens: &mut TokenStream) { - match *self { - GenericArgument::Lifetime(ref lt) => lt.to_tokens(tokens), - GenericArgument::Type(ref ty) => ty.to_tokens(tokens), - GenericArgument::Binding(ref tb) => tb.to_tokens(tokens), - GenericArgument::Constraint(ref tc) => tc.to_tokens(tokens), - GenericArgument::Const(ref e) => match *e { - Expr::Lit(_) => e.to_tokens(tokens), - - // NOTE: We should probably support parsing blocks with only - // expressions in them without the full feature for const - // generics. - #[cfg(feature = "full")] - Expr::Block(_) => e.to_tokens(tokens), - - // ERROR CORRECTION: Add braces to make sure that the - // generated code is valid. - _ => token::Brace::default().surround(tokens, |tokens| { - e.to_tokens(tokens); - }), - }, - } - } - } - - impl ToTokens for AngleBracketedGenericArguments { - fn to_tokens(&self, tokens: &mut TokenStream) { - self.colon2_token.to_tokens(tokens); - self.lt_token.to_tokens(tokens); - - // Print lifetimes before types and consts, all before bindings, - // regardless of their order in self.args. - // - // TODO: ordering rules for const arguments vs type arguments have - // not been settled yet. https://github.com/rust-lang/rust/issues/44580 - let mut trailing_or_empty = true; - for param in self.args.pairs() { - match **param.value() { - GenericArgument::Lifetime(_) => { - param.to_tokens(tokens); - trailing_or_empty = param.punct().is_some(); - } - GenericArgument::Type(_) - | GenericArgument::Binding(_) - | GenericArgument::Constraint(_) - | GenericArgument::Const(_) => {} - } - } - for param in self.args.pairs() { - match **param.value() { - GenericArgument::Type(_) | GenericArgument::Const(_) => { - if !trailing_or_empty { - ::default().to_tokens(tokens); - } - param.to_tokens(tokens); - trailing_or_empty = param.punct().is_some(); - } - GenericArgument::Lifetime(_) - | GenericArgument::Binding(_) - | GenericArgument::Constraint(_) => {} - } - } - for param in self.args.pairs() { - match **param.value() { - GenericArgument::Binding(_) | GenericArgument::Constraint(_) => { - if !trailing_or_empty { - ::default().to_tokens(tokens); - trailing_or_empty = true; - } - param.to_tokens(tokens); - } - GenericArgument::Lifetime(_) - | GenericArgument::Type(_) - | GenericArgument::Const(_) => {} - } - } - - self.gt_token.to_tokens(tokens); - } - } - - impl ToTokens for Binding { - fn to_tokens(&self, tokens: &mut TokenStream) { - self.ident.to_tokens(tokens); - self.eq_token.to_tokens(tokens); - self.ty.to_tokens(tokens); - } - } - - impl ToTokens for Constraint { - fn to_tokens(&self, tokens: &mut TokenStream) { - self.ident.to_tokens(tokens); - self.colon_token.to_tokens(tokens); - self.bounds.to_tokens(tokens); - } - } - - impl ToTokens for ParenthesizedGenericArguments { - fn to_tokens(&self, tokens: &mut TokenStream) { - self.paren_token.surround(tokens, |tokens| { - self.inputs.to_tokens(tokens); - }); - self.output.to_tokens(tokens); - } - } - - impl private { - pub fn print_path(tokens: &mut TokenStream, qself: &Option, path: &Path) { - let qself = match *qself { - Some(ref qself) => qself, - None => { - path.to_tokens(tokens); - return; - } - }; - qself.lt_token.to_tokens(tokens); - qself.ty.to_tokens(tokens); - - let pos = if qself.position > 0 && qself.position >= path.segments.len() { - path.segments.len() - 1 - } else { - qself.position - }; - let mut segments = path.segments.pairs(); - if pos > 0 { - TokensOrDefault(&qself.as_token).to_tokens(tokens); - path.leading_colon.to_tokens(tokens); - for (i, segment) in segments.by_ref().take(pos).enumerate() { - if i + 1 == pos { - segment.value().to_tokens(tokens); - qself.gt_token.to_tokens(tokens); - segment.punct().to_tokens(tokens); - } else { - segment.to_tokens(tokens); - } - } - } else { - qself.gt_token.to_tokens(tokens); - path.leading_colon.to_tokens(tokens); - } - for segment in segments { - segment.to_tokens(tokens); - } - } - } -} diff --git a/third_party/rust/syn-0.15.30/src/print.rs b/third_party/rust/syn-0.15.30/src/print.rs deleted file mode 100644 index 90570a040b..0000000000 --- a/third_party/rust/syn-0.15.30/src/print.rs +++ /dev/null @@ -1,16 +0,0 @@ -use proc_macro2::TokenStream; -use quote::ToTokens; - -pub struct TokensOrDefault<'a, T: 'a>(pub &'a Option); - -impl<'a, T> ToTokens for TokensOrDefault<'a, T> -where - T: ToTokens + Default, -{ - fn to_tokens(&self, tokens: &mut TokenStream) { - match *self.0 { - Some(ref t) => t.to_tokens(tokens), - None => T::default().to_tokens(tokens), - } - } -} diff --git a/third_party/rust/syn-0.15.30/src/punctuated.rs b/third_party/rust/syn-0.15.30/src/punctuated.rs deleted file mode 100644 index 6b29176b32..0000000000 --- a/third_party/rust/syn-0.15.30/src/punctuated.rs +++ /dev/null @@ -1,784 +0,0 @@ -//! A punctuated sequence of syntax tree nodes separated by punctuation. -//! -//! Lots of things in Rust are punctuated sequences. -//! -//! - The fields of a struct are `Punctuated`. -//! - The segments of a path are `Punctuated`. -//! - The bounds on a generic parameter are `Punctuated`. -//! - The arguments to a function call are `Punctuated`. -//! -//! This module provides a common representation for these punctuated sequences -//! in the form of the [`Punctuated`] type. We store a vector of pairs of -//! syntax tree node + punctuation, where every node in the sequence is followed -//! by punctuation except for possibly the final one. -//! -//! [`Punctuated`]: struct.Punctuated.html -//! -//! ```text -//! a_function_call(arg1, arg2, arg3); -//! ^^^^^ ~~~~~ ^^^^ -//! ``` - -#[cfg(feature = "extra-traits")] -use std::fmt::{self, Debug}; -#[cfg(any(feature = "full", feature = "derive"))] -use std::iter; -use std::iter::FromIterator; -use std::ops::{Index, IndexMut}; -use std::option; -use std::slice; -use std::vec; - -#[cfg(feature = "parsing")] -use parse::{Parse, ParseStream, Result}; -#[cfg(any(feature = "full", feature = "derive"))] -use private; -#[cfg(feature = "parsing")] -use token::Token; - -/// A punctuated sequence of syntax tree nodes of type `T` separated by -/// punctuation of type `P`. -/// -/// Refer to the [module documentation] for details about punctuated sequences. -/// -/// [module documentation]: index.html -#[cfg_attr(feature = "extra-traits", derive(Eq, PartialEq, Hash))] -#[cfg_attr(feature = "clone-impls", derive(Clone))] -pub struct Punctuated { - inner: Vec<(T, P)>, - last: Option>, -} - -impl Punctuated { - /// Creates an empty punctuated sequence. - pub fn new() -> Punctuated { - Punctuated { - inner: Vec::new(), - last: None, - } - } - - /// Determines whether this punctuated sequence is empty, meaning it - /// contains no syntax tree nodes or punctuation. - pub fn is_empty(&self) -> bool { - self.inner.len() == 0 && self.last.is_none() - } - - /// Returns the number of syntax tree nodes in this punctuated sequence. - /// - /// This is the number of nodes of type `T`, not counting the punctuation of - /// type `P`. - pub fn len(&self) -> usize { - self.inner.len() + if self.last.is_some() { 1 } else { 0 } - } - - /// Borrows the first punctuated pair in this sequence. - pub fn first(&self) -> Option> { - self.pairs().next() - } - - /// Borrows the last punctuated pair in this sequence. - pub fn last(&self) -> Option> { - if self.last.is_some() { - self.last.as_ref().map(|t| Pair::End(t.as_ref())) - } else { - self.inner - .last() - .map(|&(ref t, ref d)| Pair::Punctuated(t, d)) - } - } - - /// Mutably borrows the last punctuated pair in this sequence. - pub fn last_mut(&mut self) -> Option> { - if self.last.is_some() { - self.last.as_mut().map(|t| Pair::End(t.as_mut())) - } else { - self.inner - .last_mut() - .map(|&mut (ref mut t, ref mut d)| Pair::Punctuated(t, d)) - } - } - - /// Returns an iterator over borrowed syntax tree nodes of type `&T`. - pub fn iter(&self) -> Iter { - Iter { - inner: Box::new(PrivateIter { - inner: self.inner.iter(), - last: self.last.as_ref().map(Box::as_ref).into_iter(), - }), - } - } - - /// Returns an iterator over mutably borrowed syntax tree nodes of type - /// `&mut T`. - pub fn iter_mut(&mut self) -> IterMut { - IterMut { - inner: Box::new(PrivateIterMut { - inner: self.inner.iter_mut(), - last: self.last.as_mut().map(Box::as_mut).into_iter(), - }), - } - } - - /// Returns an iterator over the contents of this sequence as borrowed - /// punctuated pairs. - pub fn pairs(&self) -> Pairs { - Pairs { - inner: self.inner.iter(), - last: self.last.as_ref().map(Box::as_ref).into_iter(), - } - } - - /// Returns an iterator over the contents of this sequence as mutably - /// borrowed punctuated pairs. - pub fn pairs_mut(&mut self) -> PairsMut { - PairsMut { - inner: self.inner.iter_mut(), - last: self.last.as_mut().map(Box::as_mut).into_iter(), - } - } - - /// Returns an iterator over the contents of this sequence as owned - /// punctuated pairs. - pub fn into_pairs(self) -> IntoPairs { - IntoPairs { - inner: self.inner.into_iter(), - last: self.last.map(|t| *t).into_iter(), - } - } - - /// Appends a syntax tree node onto the end of this punctuated sequence. The - /// sequence must previously have a trailing punctuation. - /// - /// Use [`push`] instead if the punctuated sequence may or may not already - /// have trailing punctuation. - /// - /// [`push`]: #method.push - /// - /// # Panics - /// - /// Panics if the sequence does not already have a trailing punctuation when - /// this method is called. - pub fn push_value(&mut self, value: T) { - assert!(self.empty_or_trailing()); - self.last = Some(Box::new(value)); - } - - /// Appends a trailing punctuation onto the end of this punctuated sequence. - /// The sequence must be non-empty and must not already have trailing - /// punctuation. - /// - /// # Panics - /// - /// Panics if the sequence is empty or already has a trailing punctuation. - pub fn push_punct(&mut self, punctuation: P) { - assert!(self.last.is_some()); - let last = self.last.take().unwrap(); - self.inner.push((*last, punctuation)); - } - - /// Removes the last punctuated pair from this sequence, or `None` if the - /// sequence is empty. - pub fn pop(&mut self) -> Option> { - if self.last.is_some() { - self.last.take().map(|t| Pair::End(*t)) - } else { - self.inner.pop().map(|(t, d)| Pair::Punctuated(t, d)) - } - } - - /// Determines whether this punctuated sequence ends with a trailing - /// punctuation. - pub fn trailing_punct(&self) -> bool { - self.last.is_none() && !self.is_empty() - } - - /// Returns true if either this `Punctuated` is empty, or it has a trailing - /// punctuation. - /// - /// Equivalent to `punctuated.is_empty() || punctuated.trailing_punct()`. - pub fn empty_or_trailing(&self) -> bool { - self.last.is_none() - } - - /// Appends a syntax tree node onto the end of this punctuated sequence. - /// - /// If there is not a trailing punctuation in this sequence when this method - /// is called, the default value of punctuation type `P` is inserted before - /// the given value of type `T`. - pub fn push(&mut self, value: T) - where - P: Default, - { - if !self.empty_or_trailing() { - self.push_punct(Default::default()); - } - self.push_value(value); - } - - /// Inserts an element at position `index`. - /// - /// # Panics - /// - /// Panics if `index` is greater than the number of elements previously in - /// this punctuated sequence. - pub fn insert(&mut self, index: usize, value: T) - where - P: Default, - { - assert!(index <= self.len()); - - if index == self.len() { - self.push(value); - } else { - self.inner.insert(index, (value, Default::default())); - } - } - - /// Parses zero or more occurrences of `T` separated by punctuation of type - /// `P`, with optional trailing punctuation. - /// - /// Parsing continues until the end of this parse stream. The entire content - /// of this parse stream must consist of `T` and `P`. - /// - /// *This function is available if Syn is built with the `"parsing"` - /// feature.* - #[cfg(feature = "parsing")] - pub fn parse_terminated(input: ParseStream) -> Result - where - T: Parse, - P: Parse, - { - Self::parse_terminated_with(input, T::parse) - } - - /// Parses zero or more occurrences of `T` using the given parse function, - /// separated by punctuation of type `P`, with optional trailing - /// punctuation. - /// - /// Like [`parse_terminated`], the entire content of this stream is expected - /// to be parsed. - /// - /// [`parse_terminated`]: #method.parse_terminated - /// - /// *This function is available if Syn is built with the `"parsing"` - /// feature.* - #[cfg(feature = "parsing")] - pub fn parse_terminated_with( - input: ParseStream, - parser: fn(ParseStream) -> Result, - ) -> Result - where - P: Parse, - { - let mut punctuated = Punctuated::new(); - - loop { - if input.is_empty() { - break; - } - let value = parser(input)?; - punctuated.push_value(value); - if input.is_empty() { - break; - } - let punct = input.parse()?; - punctuated.push_punct(punct); - } - - Ok(punctuated) - } - - /// Parses one or more occurrences of `T` separated by punctuation of type - /// `P`, not accepting trailing punctuation. - /// - /// Parsing continues as long as punctuation `P` is present at the head of - /// the stream. This method returns upon parsing a `T` and observing that it - /// is not followed by a `P`, even if there are remaining tokens in the - /// stream. - /// - /// *This function is available if Syn is built with the `"parsing"` - /// feature.* - #[cfg(feature = "parsing")] - pub fn parse_separated_nonempty(input: ParseStream) -> Result - where - T: Parse, - P: Token + Parse, - { - Self::parse_separated_nonempty_with(input, T::parse) - } - - /// Parses one or more occurrences of `T` using the given parse function, - /// separated by punctuation of type `P`, not accepting trailing - /// punctuation. - /// - /// Like [`parse_separated_nonempty`], may complete early without parsing - /// the entire content of this stream. - /// - /// [`parse_separated_nonempty`]: #method.parse_separated_nonempty - /// - /// *This function is available if Syn is built with the `"parsing"` - /// feature.* - #[cfg(feature = "parsing")] - pub fn parse_separated_nonempty_with( - input: ParseStream, - parser: fn(ParseStream) -> Result, - ) -> Result - where - P: Token + Parse, - { - let mut punctuated = Punctuated::new(); - - loop { - let value = parser(input)?; - punctuated.push_value(value); - if !P::peek(input.cursor()) { - break; - } - let punct = input.parse()?; - punctuated.push_punct(punct); - } - - Ok(punctuated) - } -} - -#[cfg(feature = "extra-traits")] -impl Debug for Punctuated { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - let mut list = f.debug_list(); - for &(ref t, ref p) in &self.inner { - list.entry(t); - list.entry(p); - } - if let Some(ref last) = self.last { - list.entry(last); - } - list.finish() - } -} - -impl FromIterator for Punctuated -where - P: Default, -{ - fn from_iter>(i: I) -> Self { - let mut ret = Punctuated::new(); - ret.extend(i); - ret - } -} - -impl Extend for Punctuated -where - P: Default, -{ - fn extend>(&mut self, i: I) { - for value in i { - self.push(value); - } - } -} - -impl FromIterator> for Punctuated { - fn from_iter>>(i: I) -> Self { - let mut ret = Punctuated::new(); - ret.extend(i); - ret - } -} - -impl Extend> for Punctuated { - fn extend>>(&mut self, i: I) { - assert!(self.empty_or_trailing()); - let mut nomore = false; - for pair in i { - if nomore { - panic!("Punctuated extended with items after a Pair::End"); - } - match pair { - Pair::Punctuated(a, b) => self.inner.push((a, b)), - Pair::End(a) => { - self.last = Some(Box::new(a)); - nomore = true; - } - } - } - } -} - -impl IntoIterator for Punctuated { - type Item = T; - type IntoIter = IntoIter; - - fn into_iter(self) -> Self::IntoIter { - IntoIter { - inner: self.inner.into_iter(), - last: self.last.map(|t| *t).into_iter(), - } - } -} - -impl<'a, T, P> IntoIterator for &'a Punctuated { - type Item = &'a T; - type IntoIter = Iter<'a, T>; - - fn into_iter(self) -> Self::IntoIter { - Punctuated::iter(self) - } -} - -impl<'a, T, P> IntoIterator for &'a mut Punctuated { - type Item = &'a mut T; - type IntoIter = IterMut<'a, T>; - - fn into_iter(self) -> Self::IntoIter { - Punctuated::iter_mut(self) - } -} - -impl Default for Punctuated { - fn default() -> Self { - Punctuated::new() - } -} - -/// An iterator over borrowed pairs of type `Pair<&T, &P>`. -/// -/// Refer to the [module documentation] for details about punctuated sequences. -/// -/// [module documentation]: index.html -pub struct Pairs<'a, T: 'a, P: 'a> { - inner: slice::Iter<'a, (T, P)>, - last: option::IntoIter<&'a T>, -} - -impl<'a, T, P> Iterator for Pairs<'a, T, P> { - type Item = Pair<&'a T, &'a P>; - - fn next(&mut self) -> Option { - self.inner - .next() - .map(|&(ref t, ref p)| Pair::Punctuated(t, p)) - .or_else(|| self.last.next().map(Pair::End)) - } -} - -impl<'a, T, P> ExactSizeIterator for Pairs<'a, T, P> { - fn len(&self) -> usize { - self.inner.len() + self.last.len() - } -} - -/// An iterator over mutably borrowed pairs of type `Pair<&mut T, &mut P>`. -/// -/// Refer to the [module documentation] for details about punctuated sequences. -/// -/// [module documentation]: index.html -pub struct PairsMut<'a, T: 'a, P: 'a> { - inner: slice::IterMut<'a, (T, P)>, - last: option::IntoIter<&'a mut T>, -} - -impl<'a, T, P> Iterator for PairsMut<'a, T, P> { - type Item = Pair<&'a mut T, &'a mut P>; - - fn next(&mut self) -> Option { - self.inner - .next() - .map(|&mut (ref mut t, ref mut p)| Pair::Punctuated(t, p)) - .or_else(|| self.last.next().map(Pair::End)) - } -} - -impl<'a, T, P> ExactSizeIterator for PairsMut<'a, T, P> { - fn len(&self) -> usize { - self.inner.len() + self.last.len() - } -} - -/// An iterator over owned pairs of type `Pair`. -/// -/// Refer to the [module documentation] for details about punctuated sequences. -/// -/// [module documentation]: index.html -pub struct IntoPairs { - inner: vec::IntoIter<(T, P)>, - last: option::IntoIter, -} - -impl Iterator for IntoPairs { - type Item = Pair; - - fn next(&mut self) -> Option { - self.inner - .next() - .map(|(t, p)| Pair::Punctuated(t, p)) - .or_else(|| self.last.next().map(Pair::End)) - } -} - -impl ExactSizeIterator for IntoPairs { - fn len(&self) -> usize { - self.inner.len() + self.last.len() - } -} - -/// An iterator over owned values of type `T`. -/// -/// Refer to the [module documentation] for details about punctuated sequences. -/// -/// [module documentation]: index.html -pub struct IntoIter { - inner: vec::IntoIter<(T, P)>, - last: option::IntoIter, -} - -impl Iterator for IntoIter { - type Item = T; - - fn next(&mut self) -> Option { - self.inner - .next() - .map(|pair| pair.0) - .or_else(|| self.last.next()) - } -} - -impl ExactSizeIterator for IntoIter { - fn len(&self) -> usize { - self.inner.len() + self.last.len() - } -} - -/// An iterator over borrowed values of type `&T`. -/// -/// Refer to the [module documentation] for details about punctuated sequences. -/// -/// [module documentation]: index.html -pub struct Iter<'a, T: 'a> { - inner: Box + 'a>, -} - -struct PrivateIter<'a, T: 'a, P: 'a> { - inner: slice::Iter<'a, (T, P)>, - last: option::IntoIter<&'a T>, -} - -#[cfg(any(feature = "full", feature = "derive"))] -impl private { - pub fn empty_punctuated_iter<'a, T>() -> Iter<'a, T> { - Iter { - inner: Box::new(iter::empty()), - } - } -} - -impl<'a, T> Iterator for Iter<'a, T> { - type Item = &'a T; - - fn next(&mut self) -> Option { - self.inner.next() - } -} - -impl<'a, T> ExactSizeIterator for Iter<'a, T> { - fn len(&self) -> usize { - self.inner.len() - } -} - -impl<'a, T, P> Iterator for PrivateIter<'a, T, P> { - type Item = &'a T; - - fn next(&mut self) -> Option { - self.inner - .next() - .map(|pair| &pair.0) - .or_else(|| self.last.next()) - } -} - -impl<'a, T, P> ExactSizeIterator for PrivateIter<'a, T, P> { - fn len(&self) -> usize { - self.inner.len() + self.last.len() - } -} - -/// An iterator over mutably borrowed values of type `&mut T`. -/// -/// Refer to the [module documentation] for details about punctuated sequences. -/// -/// [module documentation]: index.html -pub struct IterMut<'a, T: 'a> { - inner: Box + 'a>, -} - -struct PrivateIterMut<'a, T: 'a, P: 'a> { - inner: slice::IterMut<'a, (T, P)>, - last: option::IntoIter<&'a mut T>, -} - -#[cfg(any(feature = "full", feature = "derive"))] -impl private { - pub fn empty_punctuated_iter_mut<'a, T>() -> IterMut<'a, T> { - IterMut { - inner: Box::new(iter::empty()), - } - } -} - -impl<'a, T> Iterator for IterMut<'a, T> { - type Item = &'a mut T; - - fn next(&mut self) -> Option { - self.inner.next() - } -} - -impl<'a, T> ExactSizeIterator for IterMut<'a, T> { - fn len(&self) -> usize { - self.inner.len() - } -} - -impl<'a, T, P> Iterator for PrivateIterMut<'a, T, P> { - type Item = &'a mut T; - - fn next(&mut self) -> Option { - self.inner - .next() - .map(|pair| &mut pair.0) - .or_else(|| self.last.next()) - } -} - -impl<'a, T, P> ExactSizeIterator for PrivateIterMut<'a, T, P> { - fn len(&self) -> usize { - self.inner.len() + self.last.len() - } -} - -/// A single syntax tree node of type `T` followed by its trailing punctuation -/// of type `P` if any. -/// -/// Refer to the [module documentation] for details about punctuated sequences. -/// -/// [module documentation]: index.html -pub enum Pair { - Punctuated(T, P), - End(T), -} - -impl Pair { - /// Extracts the syntax tree node from this punctuated pair, discarding the - /// following punctuation. - pub fn into_value(self) -> T { - match self { - Pair::Punctuated(t, _) | Pair::End(t) => t, - } - } - - /// Borrows the syntax tree node from this punctuated pair. - pub fn value(&self) -> &T { - match *self { - Pair::Punctuated(ref t, _) | Pair::End(ref t) => t, - } - } - - /// Mutably borrows the syntax tree node from this punctuated pair. - pub fn value_mut(&mut self) -> &mut T { - match *self { - Pair::Punctuated(ref mut t, _) | Pair::End(ref mut t) => t, - } - } - - /// Borrows the punctuation from this punctuated pair, unless this pair is - /// the final one and there is no trailing punctuation. - pub fn punct(&self) -> Option<&P> { - match *self { - Pair::Punctuated(_, ref d) => Some(d), - Pair::End(_) => None, - } - } - - /// Creates a punctuated pair out of a syntax tree node and an optional - /// following punctuation. - pub fn new(t: T, d: Option

) -> Self { - match d { - Some(d) => Pair::Punctuated(t, d), - None => Pair::End(t), - } - } - - /// Produces this punctuated pair as a tuple of syntax tree node and - /// optional following punctuation. - pub fn into_tuple(self) -> (T, Option

) { - match self { - Pair::Punctuated(t, d) => (t, Some(d)), - Pair::End(t) => (t, None), - } - } -} - -impl Index for Punctuated { - type Output = T; - - fn index(&self, index: usize) -> &Self::Output { - if index == self.len() - 1 { - match self.last { - Some(ref t) => t, - None => &self.inner[index].0, - } - } else { - &self.inner[index].0 - } - } -} - -impl IndexMut for Punctuated { - fn index_mut(&mut self, index: usize) -> &mut Self::Output { - if index == self.len() - 1 { - match self.last { - Some(ref mut t) => t, - None => &mut self.inner[index].0, - } - } else { - &mut self.inner[index].0 - } - } -} - -#[cfg(feature = "printing")] -mod printing { - use super::*; - use proc_macro2::TokenStream; - use quote::{ToTokens, TokenStreamExt}; - - impl ToTokens for Punctuated - where - T: ToTokens, - P: ToTokens, - { - fn to_tokens(&self, tokens: &mut TokenStream) { - tokens.append_all(self.pairs()) - } - } - - impl ToTokens for Pair - where - T: ToTokens, - P: ToTokens, - { - fn to_tokens(&self, tokens: &mut TokenStream) { - match *self { - Pair::Punctuated(ref a, ref b) => { - a.to_tokens(tokens); - b.to_tokens(tokens); - } - Pair::End(ref a) => a.to_tokens(tokens), - } - } - } -} diff --git a/third_party/rust/syn-0.15.30/src/span.rs b/third_party/rust/syn-0.15.30/src/span.rs deleted file mode 100644 index 27a7fe846d..0000000000 --- a/third_party/rust/syn-0.15.30/src/span.rs +++ /dev/null @@ -1,67 +0,0 @@ -use proc_macro2::Span; - -pub trait IntoSpans { - fn into_spans(self) -> S; -} - -impl IntoSpans<[Span; 1]> for Span { - fn into_spans(self) -> [Span; 1] { - [self] - } -} - -impl IntoSpans<[Span; 2]> for Span { - fn into_spans(self) -> [Span; 2] { - [self, self] - } -} - -impl IntoSpans<[Span; 3]> for Span { - fn into_spans(self) -> [Span; 3] { - [self, self, self] - } -} - -impl IntoSpans<[Span; 1]> for [Span; 1] { - fn into_spans(self) -> [Span; 1] { - self - } -} - -impl IntoSpans<[Span; 2]> for [Span; 2] { - fn into_spans(self) -> [Span; 2] { - self - } -} - -impl IntoSpans<[Span; 3]> for [Span; 3] { - fn into_spans(self) -> [Span; 3] { - self - } -} - -#[cfg(feature = "parsing")] -pub trait FromSpans: Sized { - fn from_spans(spans: &[Span]) -> Self; -} - -#[cfg(feature = "parsing")] -impl FromSpans for [Span; 1] { - fn from_spans(spans: &[Span]) -> Self { - [spans[0]] - } -} - -#[cfg(feature = "parsing")] -impl FromSpans for [Span; 2] { - fn from_spans(spans: &[Span]) -> Self { - [spans[0], spans[1]] - } -} - -#[cfg(feature = "parsing")] -impl FromSpans for [Span; 3] { - fn from_spans(spans: &[Span]) -> Self { - [spans[0], spans[1], spans[2]] - } -} diff --git a/third_party/rust/syn-0.15.30/src/spanned.rs b/third_party/rust/syn-0.15.30/src/spanned.rs deleted file mode 100644 index 79e23dfbbf..0000000000 --- a/third_party/rust/syn-0.15.30/src/spanned.rs +++ /dev/null @@ -1,144 +0,0 @@ -//! A trait that can provide the `Span` of the complete contents of a syntax -//! tree node. -//! -//! *This module is available if Syn is built with both the `"parsing"` and -//! `"printing"` features.* -//! -//! # Example -//! -//! Suppose in a procedural macro we have a [`Type`] that we want to assert -//! implements the [`Sync`] trait. Maybe this is the type of one of the fields -//! of a struct for which we are deriving a trait implementation, and we need to -//! be able to pass a reference to one of those fields across threads. -//! -//! [`Type`]: ../enum.Type.html -//! [`Sync`]: https://doc.rust-lang.org/std/marker/trait.Sync.html -//! -//! If the field type does *not* implement `Sync` as required, we want the -//! compiler to report an error pointing out exactly which type it was. -//! -//! The following macro code takes a variable `ty` of type `Type` and produces a -//! static assertion that `Sync` is implemented for that type. -//! -//! ```edition2018 -//! # extern crate proc_macro; -//! # -//! use proc_macro::TokenStream; -//! use proc_macro2::Span; -//! use quote::quote_spanned; -//! use syn::Type; -//! use syn::spanned::Spanned; -//! -//! # const IGNORE_TOKENS: &str = stringify! { -//! #[proc_macro_derive(MyMacro)] -//! # }; -//! pub fn my_macro(input: TokenStream) -> TokenStream { -//! # let ty = get_a_type(); -//! /* ... */ -//! -//! let assert_sync = quote_spanned! {ty.span()=> -//! struct _AssertSync where #ty: Sync; -//! }; -//! -//! /* ... */ -//! # input -//! } -//! # -//! # fn get_a_type() -> Type { -//! # unimplemented!() -//! # } -//! ``` -//! -//! By inserting this `assert_sync` fragment into the output code generated by -//! our macro, the user's code will fail to compile if `ty` does not implement -//! `Sync`. The errors they would see look like the following. -//! -//! ```text -//! error[E0277]: the trait bound `*const i32: std::marker::Sync` is not satisfied -//! --> src/main.rs:10:21 -//! | -//! 10 | bad_field: *const i32, -//! | ^^^^^^^^^^ `*const i32` cannot be shared between threads safely -//! ``` -//! -//! In this technique, using the `Type`'s span for the error message makes the -//! error appear in the correct place underlining the right type. - -use proc_macro2::{Span, TokenStream}; -use quote::ToTokens; - -/// A trait that can provide the `Span` of the complete contents of a syntax -/// tree node. -/// -/// This trait is automatically implemented for all types that implement -/// [`ToTokens`] from the `quote` crate. It is sealed and cannot be implemented -/// outside of the Syn crate other than by implementing `ToTokens`. -/// -/// [`ToTokens`]: https://docs.rs/quote/0.6/quote/trait.ToTokens.html -/// -/// See the [module documentation] for an example. -/// -/// [module documentation]: index.html -/// -/// *This trait is available if Syn is built with both the `"parsing"` and -/// `"printing"` features.* -pub trait Spanned: private::Sealed { - /// Returns a `Span` covering the complete contents of this syntax tree - /// node, or [`Span::call_site()`] if this node is empty. - /// - /// [`Span::call_site()`]: https://docs.rs/proc-macro2/0.4/proc_macro2/struct.Span.html#method.call_site - fn span(&self) -> Span; -} - -mod private { - use quote::ToTokens; - pub trait Sealed {} - impl Sealed for T {} -} - -impl Spanned for T -where - T: ToTokens, -{ - fn span(&self) -> Span { - join_spans(self.into_token_stream()) - } -} - -fn join_spans(tokens: TokenStream) -> Span { - let mut iter = tokens.into_iter().filter_map(|tt| { - // FIXME: This shouldn't be required, since optimally spans should - // never be invalid. This filter_map can probably be removed when - // https://github.com/rust-lang/rust/issues/43081 is resolved. - let span = tt.span(); - let debug = format!("{:?}", span); - if debug.ends_with("bytes(0..0)") { - None - } else { - Some(span) - } - }); - - let mut joined = match iter.next() { - Some(span) => span, - None => return Span::call_site(), - }; - - #[cfg(procmacro2_semver_exempt)] - { - for next in iter { - if let Some(span) = joined.join(next) { - joined = span; - } - } - } - - #[cfg(not(procmacro2_semver_exempt))] - { - // We can't join spans without procmacro2_semver_exempt so just grab the - // first one. - joined = joined; - } - - joined -} diff --git a/third_party/rust/syn-0.15.30/src/thread.rs b/third_party/rust/syn-0.15.30/src/thread.rs deleted file mode 100644 index ff47e4af4d..0000000000 --- a/third_party/rust/syn-0.15.30/src/thread.rs +++ /dev/null @@ -1,83 +0,0 @@ -use std::fmt::{self, Debug}; - -use self::thread_id::ThreadId; - -/// ThreadBound is a Sync-maker and Send-maker that allows accessing a value -/// of type T only from the original thread on which the ThreadBound was -/// constructed. -pub struct ThreadBound { - value: T, - thread_id: ThreadId, -} - -unsafe impl Sync for ThreadBound {} - -// Send bound requires Copy, as otherwise Drop could run in the wrong place. -unsafe impl Send for ThreadBound {} - -impl ThreadBound { - pub fn new(value: T) -> Self { - ThreadBound { - value: value, - thread_id: thread_id::current(), - } - } - - pub fn get(&self) -> Option<&T> { - if thread_id::current() == self.thread_id { - Some(&self.value) - } else { - None - } - } -} - -impl Debug for ThreadBound { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - match self.get() { - Some(value) => Debug::fmt(value, formatter), - None => formatter.write_str("unknown"), - } - } -} - -#[cfg(syn_can_use_thread_id)] -mod thread_id { - use std::thread; - - pub use std::thread::ThreadId; - - pub fn current() -> ThreadId { - thread::current().id() - } -} - -#[cfg(not(syn_can_use_thread_id))] -mod thread_id { - #[allow(deprecated)] - use std::sync::atomic::{AtomicUsize, Ordering, ATOMIC_USIZE_INIT}; - - thread_local! { - static THREAD_ID: usize = { - #[allow(deprecated)] - static NEXT_THREAD_ID: AtomicUsize = ATOMIC_USIZE_INIT; - - // Ordering::Relaxed because our only requirement for the ids is - // that they are unique. It is okay for the compiler to rearrange - // other memory reads around this fetch. It's still an atomic - // fetch_add, so no two threads will be able to read the same value - // from it. - // - // The main thing which these orderings affect is other memory reads - // around the atomic read, which for our case are irrelevant as this - // atomic guards nothing. - NEXT_THREAD_ID.fetch_add(1, Ordering::Relaxed) - }; - } - - pub type ThreadId = usize; - - pub fn current() -> ThreadId { - THREAD_ID.with(|id| *id) - } -} diff --git a/third_party/rust/syn-0.15.30/src/token.rs b/third_party/rust/syn-0.15.30/src/token.rs deleted file mode 100644 index 04d69670fe..0000000000 --- a/third_party/rust/syn-0.15.30/src/token.rs +++ /dev/null @@ -1,946 +0,0 @@ -//! Tokens representing Rust punctuation, keywords, and delimiters. -//! -//! The type names in this module can be difficult to keep straight, so we -//! prefer to use the [`Token!`] macro instead. This is a type-macro that -//! expands to the token type of the given token. -//! -//! [`Token!`]: ../macro.Token.html -//! -//! # Example -//! -//! The [`ItemStatic`] syntax tree node is defined like this. -//! -//! [`ItemStatic`]: ../struct.ItemStatic.html -//! -//! ```edition2018 -//! # use syn::{Attribute, Expr, Ident, Token, Type, Visibility}; -//! # -//! pub struct ItemStatic { -//! pub attrs: Vec, -//! pub vis: Visibility, -//! pub static_token: Token![static], -//! pub mutability: Option, -//! pub ident: Ident, -//! pub colon_token: Token![:], -//! pub ty: Box, -//! pub eq_token: Token![=], -//! pub expr: Box, -//! pub semi_token: Token![;], -//! } -//! ``` -//! -//! # Parsing -//! -//! Keywords and punctuation can be parsed through the [`ParseStream::parse`] -//! method. Delimiter tokens are parsed using the [`parenthesized!`], -//! [`bracketed!`] and [`braced!`] macros. -//! -//! [`ParseStream::parse`]: ../parse/struct.ParseBuffer.html#method.parse -//! [`parenthesized!`]: ../macro.parenthesized.html -//! [`bracketed!`]: ../macro.bracketed.html -//! [`braced!`]: ../macro.braced.html -//! -//! ```edition2018 -//! use syn::{Attribute, Result}; -//! use syn::parse::{Parse, ParseStream}; -//! # -//! # enum ItemStatic {} -//! -//! // Parse the ItemStatic struct shown above. -//! impl Parse for ItemStatic { -//! fn parse(input: ParseStream) -> Result { -//! # use syn::ItemStatic; -//! # fn parse(input: ParseStream) -> Result { -//! Ok(ItemStatic { -//! attrs: input.call(Attribute::parse_outer)?, -//! vis: input.parse()?, -//! static_token: input.parse()?, -//! mutability: input.parse()?, -//! ident: input.parse()?, -//! colon_token: input.parse()?, -//! ty: input.parse()?, -//! eq_token: input.parse()?, -//! expr: input.parse()?, -//! semi_token: input.parse()?, -//! }) -//! # } -//! # unimplemented!() -//! } -//! } -//! ``` -//! -//! # Other operations -//! -//! Every keyword and punctuation token supports the following operations. -//! -//! - [Peeking] — `input.peek(Token![...])` -//! -//! - [Parsing] — `input.parse::()?` -//! -//! - [Printing] — `quote!( ... #the_token ... )` -//! -//! - Construction from a [`Span`] — `let the_token = Token![...](sp)` -//! -//! - Field access to its span — `let sp = the_token.span` -//! -//! [Peeking]: ../parse/struct.ParseBuffer.html#method.peek -//! [Parsing]: ../parse/struct.ParseBuffer.html#method.parse -//! [Printing]: https://docs.rs/quote/0.6/quote/trait.ToTokens.html -//! [`Span`]: https://docs.rs/proc-macro2/0.4/proc_macro2/struct.Span.html - -use std; -#[cfg(feature = "extra-traits")] -use std::cmp; -#[cfg(feature = "extra-traits")] -use std::fmt::{self, Debug}; -#[cfg(feature = "extra-traits")] -use std::hash::{Hash, Hasher}; -use std::ops::{Deref, DerefMut}; - -#[cfg(feature = "parsing")] -use proc_macro2::Delimiter; -#[cfg(any(feature = "parsing", feature = "printing"))] -use proc_macro2::Ident; -use proc_macro2::Span; -#[cfg(feature = "printing")] -use proc_macro2::TokenStream; -#[cfg(feature = "printing")] -use quote::{ToTokens, TokenStreamExt}; - -use self::private::WithSpan; -#[cfg(feature = "parsing")] -use buffer::Cursor; -#[cfg(feature = "parsing")] -use error::Result; -#[cfg(any(feature = "full", feature = "derive"))] -#[cfg(feature = "parsing")] -use lifetime::Lifetime; -#[cfg(any(feature = "full", feature = "derive"))] -#[cfg(feature = "parsing")] -use lit::{Lit, LitBool, LitByte, LitByteStr, LitChar, LitFloat, LitInt, LitStr}; -#[cfg(feature = "parsing")] -use lookahead; -#[cfg(feature = "parsing")] -use parse::{Parse, ParseStream}; -use span::IntoSpans; - -/// Marker trait for types that represent single tokens. -/// -/// This trait is sealed and cannot be implemented for types outside of Syn. -#[cfg(feature = "parsing")] -pub trait Token: private::Sealed { - // Not public API. - #[doc(hidden)] - fn peek(cursor: Cursor) -> bool; - - // Not public API. - #[doc(hidden)] - fn display() -> &'static str; -} - -mod private { - use proc_macro2::Span; - - #[cfg(feature = "parsing")] - pub trait Sealed {} - - /// Support writing `token.span` rather than `token.spans[0]` on tokens that - /// hold a single span. - #[repr(C)] - pub struct WithSpan { - pub span: Span, - } -} - -#[cfg(feature = "parsing")] -impl private::Sealed for Ident {} - -#[cfg(any(feature = "full", feature = "derive"))] -#[cfg(feature = "parsing")] -fn peek_impl(cursor: Cursor, peek: fn(ParseStream) -> bool) -> bool { - use std::cell::Cell; - use std::rc::Rc; - - let scope = Span::call_site(); - let unexpected = Rc::new(Cell::new(None)); - let buffer = ::private::new_parse_buffer(scope, cursor, unexpected); - peek(&buffer) -} - -#[cfg(any(feature = "full", feature = "derive"))] -macro_rules! impl_token { - ($name:ident $display:expr) => { - #[cfg(feature = "parsing")] - impl Token for $name { - fn peek(cursor: Cursor) -> bool { - fn peek(input: ParseStream) -> bool { - <$name as Parse>::parse(input).is_ok() - } - peek_impl(cursor, peek) - } - - fn display() -> &'static str { - $display - } - } - - #[cfg(feature = "parsing")] - impl private::Sealed for $name {} - }; -} - -#[cfg(any(feature = "full", feature = "derive"))] -impl_token!(Lifetime "lifetime"); -#[cfg(any(feature = "full", feature = "derive"))] -impl_token!(Lit "literal"); -#[cfg(any(feature = "full", feature = "derive"))] -impl_token!(LitStr "string literal"); -#[cfg(any(feature = "full", feature = "derive"))] -impl_token!(LitByteStr "byte string literal"); -#[cfg(any(feature = "full", feature = "derive"))] -impl_token!(LitByte "byte literal"); -#[cfg(any(feature = "full", feature = "derive"))] -impl_token!(LitChar "character literal"); -#[cfg(any(feature = "full", feature = "derive"))] -impl_token!(LitInt "integer literal"); -#[cfg(any(feature = "full", feature = "derive"))] -impl_token!(LitFloat "floating point literal"); -#[cfg(any(feature = "full", feature = "derive"))] -impl_token!(LitBool "boolean literal"); - -// Not public API. -#[cfg(feature = "parsing")] -#[doc(hidden)] -pub trait CustomKeyword { - fn ident() -> &'static str; - fn display() -> &'static str; -} - -#[cfg(feature = "parsing")] -impl private::Sealed for K {} - -#[cfg(feature = "parsing")] -impl Token for K { - fn peek(cursor: Cursor) -> bool { - parsing::peek_keyword(cursor, K::ident()) - } - - fn display() -> &'static str { - K::display() - } -} - -macro_rules! define_keywords { - ($($token:tt pub struct $name:ident #[$doc:meta])*) => { - $( - #[cfg_attr(feature = "clone-impls", derive(Copy, Clone))] - #[$doc] - /// - /// Don't try to remember the name of this type -- use the [`Token!`] - /// macro instead. - /// - /// [`Token!`]: index.html - pub struct $name { - pub span: Span, - } - - #[doc(hidden)] - #[allow(non_snake_case)] - pub fn $name>(span: S) -> $name { - $name { - span: span.into_spans()[0], - } - } - - impl std::default::Default for $name { - fn default() -> Self { - $name { - span: Span::call_site(), - } - } - } - - #[cfg(feature = "extra-traits")] - impl Debug for $name { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.write_str(stringify!($name)) - } - } - - #[cfg(feature = "extra-traits")] - impl cmp::Eq for $name {} - - #[cfg(feature = "extra-traits")] - impl PartialEq for $name { - fn eq(&self, _other: &$name) -> bool { - true - } - } - - #[cfg(feature = "extra-traits")] - impl Hash for $name { - fn hash(&self, _state: &mut H) {} - } - - #[cfg(feature = "printing")] - impl ToTokens for $name { - fn to_tokens(&self, tokens: &mut TokenStream) { - printing::keyword($token, self.span, tokens); - } - } - - #[cfg(feature = "parsing")] - impl Parse for $name { - fn parse(input: ParseStream) -> Result { - Ok($name { - span: parsing::keyword(input, $token)?, - }) - } - } - - #[cfg(feature = "parsing")] - impl Token for $name { - fn peek(cursor: Cursor) -> bool { - parsing::peek_keyword(cursor, $token) - } - - fn display() -> &'static str { - concat!("`", $token, "`") - } - } - - #[cfg(feature = "parsing")] - impl private::Sealed for $name {} - )* - }; -} - -macro_rules! impl_deref_if_len_is_1 { - ($name:ident/1) => { - impl Deref for $name { - type Target = WithSpan; - - fn deref(&self) -> &Self::Target { - unsafe { &*(self as *const Self as *const WithSpan) } - } - } - - impl DerefMut for $name { - fn deref_mut(&mut self) -> &mut Self::Target { - unsafe { &mut *(self as *mut Self as *mut WithSpan) } - } - } - }; - - ($name:ident/$len:tt) => {}; -} - -macro_rules! define_punctuation_structs { - ($($token:tt pub struct $name:ident/$len:tt #[$doc:meta])*) => { - $( - #[cfg_attr(feature = "clone-impls", derive(Copy, Clone))] - #[repr(C)] - #[$doc] - /// - /// Don't try to remember the name of this type -- use the [`Token!`] - /// macro instead. - /// - /// [`Token!`]: index.html - pub struct $name { - pub spans: [Span; $len], - } - - #[doc(hidden)] - #[allow(non_snake_case)] - pub fn $name>(spans: S) -> $name { - $name { - spans: spans.into_spans(), - } - } - - impl std::default::Default for $name { - fn default() -> Self { - $name { - spans: [Span::call_site(); $len], - } - } - } - - #[cfg(feature = "extra-traits")] - impl Debug for $name { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.write_str(stringify!($name)) - } - } - - #[cfg(feature = "extra-traits")] - impl cmp::Eq for $name {} - - #[cfg(feature = "extra-traits")] - impl PartialEq for $name { - fn eq(&self, _other: &$name) -> bool { - true - } - } - - #[cfg(feature = "extra-traits")] - impl Hash for $name { - fn hash(&self, _state: &mut H) {} - } - - impl_deref_if_len_is_1!($name/$len); - )* - }; -} - -macro_rules! define_punctuation { - ($($token:tt pub struct $name:ident/$len:tt #[$doc:meta])*) => { - $( - define_punctuation_structs! { - $token pub struct $name/$len #[$doc] - } - - #[cfg(feature = "printing")] - impl ToTokens for $name { - fn to_tokens(&self, tokens: &mut TokenStream) { - printing::punct($token, &self.spans, tokens); - } - } - - #[cfg(feature = "parsing")] - impl Parse for $name { - fn parse(input: ParseStream) -> Result { - Ok($name { - spans: parsing::punct(input, $token)?, - }) - } - } - - #[cfg(feature = "parsing")] - impl Token for $name { - fn peek(cursor: Cursor) -> bool { - parsing::peek_punct(cursor, $token) - } - - fn display() -> &'static str { - concat!("`", $token, "`") - } - } - - #[cfg(feature = "parsing")] - impl private::Sealed for $name {} - )* - }; -} - -macro_rules! define_delimiters { - ($($token:tt pub struct $name:ident #[$doc:meta])*) => { - $( - #[cfg_attr(feature = "clone-impls", derive(Copy, Clone))] - #[$doc] - pub struct $name { - pub span: Span, - } - - #[doc(hidden)] - #[allow(non_snake_case)] - pub fn $name>(span: S) -> $name { - $name { - span: span.into_spans()[0], - } - } - - impl std::default::Default for $name { - fn default() -> Self { - $name { - span: Span::call_site(), - } - } - } - - #[cfg(feature = "extra-traits")] - impl Debug for $name { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.write_str(stringify!($name)) - } - } - - #[cfg(feature = "extra-traits")] - impl cmp::Eq for $name {} - - #[cfg(feature = "extra-traits")] - impl PartialEq for $name { - fn eq(&self, _other: &$name) -> bool { - true - } - } - - #[cfg(feature = "extra-traits")] - impl Hash for $name { - fn hash(&self, _state: &mut H) {} - } - - impl $name { - #[cfg(feature = "printing")] - pub fn surround(&self, tokens: &mut TokenStream, f: F) - where - F: FnOnce(&mut TokenStream), - { - printing::delim($token, self.span, tokens, f); - } - } - - #[cfg(feature = "parsing")] - impl private::Sealed for $name {} - )* - }; -} - -define_punctuation_structs! { - "_" pub struct Underscore/1 /// `_` -} - -#[cfg(feature = "printing")] -impl ToTokens for Underscore { - fn to_tokens(&self, tokens: &mut TokenStream) { - tokens.append(Ident::new("_", self.span)); - } -} - -#[cfg(feature = "parsing")] -impl Parse for Underscore { - fn parse(input: ParseStream) -> Result { - input.step(|cursor| { - if let Some((ident, rest)) = cursor.ident() { - if ident == "_" { - return Ok((Underscore(ident.span()), rest)); - } - } - if let Some((punct, rest)) = cursor.punct() { - if punct.as_char() == '_' { - return Ok((Underscore(punct.span()), rest)); - } - } - Err(cursor.error("expected `_`")) - }) - } -} - -#[cfg(feature = "parsing")] -impl Token for Underscore { - fn peek(cursor: Cursor) -> bool { - if let Some((ident, _rest)) = cursor.ident() { - return ident == "_"; - } - if let Some((punct, _rest)) = cursor.punct() { - return punct.as_char() == '_'; - } - false - } - - fn display() -> &'static str { - "`_`" - } -} - -#[cfg(feature = "parsing")] -impl private::Sealed for Underscore {} - -#[cfg(feature = "parsing")] -impl Token for Paren { - fn peek(cursor: Cursor) -> bool { - lookahead::is_delimiter(cursor, Delimiter::Parenthesis) - } - - fn display() -> &'static str { - "parentheses" - } -} - -#[cfg(feature = "parsing")] -impl Token for Brace { - fn peek(cursor: Cursor) -> bool { - lookahead::is_delimiter(cursor, Delimiter::Brace) - } - - fn display() -> &'static str { - "curly braces" - } -} - -#[cfg(feature = "parsing")] -impl Token for Bracket { - fn peek(cursor: Cursor) -> bool { - lookahead::is_delimiter(cursor, Delimiter::Bracket) - } - - fn display() -> &'static str { - "square brackets" - } -} - -#[cfg(feature = "parsing")] -impl Token for Group { - fn peek(cursor: Cursor) -> bool { - lookahead::is_delimiter(cursor, Delimiter::None) - } - - fn display() -> &'static str { - "invisible group" - } -} - -define_keywords! { - "abstract" pub struct Abstract /// `abstract` - "as" pub struct As /// `as` - "async" pub struct Async /// `async` - "auto" pub struct Auto /// `auto` - "become" pub struct Become /// `become` - "box" pub struct Box /// `box` - "break" pub struct Break /// `break` - "const" pub struct Const /// `const` - "continue" pub struct Continue /// `continue` - "crate" pub struct Crate /// `crate` - "default" pub struct Default /// `default` - "do" pub struct Do /// `do` - "dyn" pub struct Dyn /// `dyn` - "else" pub struct Else /// `else` - "enum" pub struct Enum /// `enum` - "existential" pub struct Existential /// `existential` - "extern" pub struct Extern /// `extern` - "final" pub struct Final /// `final` - "fn" pub struct Fn /// `fn` - "for" pub struct For /// `for` - "if" pub struct If /// `if` - "impl" pub struct Impl /// `impl` - "in" pub struct In /// `in` - "let" pub struct Let /// `let` - "loop" pub struct Loop /// `loop` - "macro" pub struct Macro /// `macro` - "match" pub struct Match /// `match` - "mod" pub struct Mod /// `mod` - "move" pub struct Move /// `move` - "mut" pub struct Mut /// `mut` - "override" pub struct Override /// `override` - "priv" pub struct Priv /// `priv` - "pub" pub struct Pub /// `pub` - "ref" pub struct Ref /// `ref` - "return" pub struct Return /// `return` - "Self" pub struct SelfType /// `Self` - "self" pub struct SelfValue /// `self` - "static" pub struct Static /// `static` - "struct" pub struct Struct /// `struct` - "super" pub struct Super /// `super` - "trait" pub struct Trait /// `trait` - "try" pub struct Try /// `try` - "type" pub struct Type /// `type` - "typeof" pub struct Typeof /// `typeof` - "union" pub struct Union /// `union` - "unsafe" pub struct Unsafe /// `unsafe` - "unsized" pub struct Unsized /// `unsized` - "use" pub struct Use /// `use` - "virtual" pub struct Virtual /// `virtual` - "where" pub struct Where /// `where` - "while" pub struct While /// `while` - "yield" pub struct Yield /// `yield` -} - -define_punctuation! { - "+" pub struct Add/1 /// `+` - "+=" pub struct AddEq/2 /// `+=` - "&" pub struct And/1 /// `&` - "&&" pub struct AndAnd/2 /// `&&` - "&=" pub struct AndEq/2 /// `&=` - "@" pub struct At/1 /// `@` - "!" pub struct Bang/1 /// `!` - "^" pub struct Caret/1 /// `^` - "^=" pub struct CaretEq/2 /// `^=` - ":" pub struct Colon/1 /// `:` - "::" pub struct Colon2/2 /// `::` - "," pub struct Comma/1 /// `,` - "/" pub struct Div/1 /// `/` - "/=" pub struct DivEq/2 /// `/=` - "$" pub struct Dollar/1 /// `$` - "." pub struct Dot/1 /// `.` - ".." pub struct Dot2/2 /// `..` - "..." pub struct Dot3/3 /// `...` - "..=" pub struct DotDotEq/3 /// `..=` - "=" pub struct Eq/1 /// `=` - "==" pub struct EqEq/2 /// `==` - ">=" pub struct Ge/2 /// `>=` - ">" pub struct Gt/1 /// `>` - "<=" pub struct Le/2 /// `<=` - "<" pub struct Lt/1 /// `<` - "*=" pub struct MulEq/2 /// `*=` - "!=" pub struct Ne/2 /// `!=` - "|" pub struct Or/1 /// `|` - "|=" pub struct OrEq/2 /// `|=` - "||" pub struct OrOr/2 /// `||` - "#" pub struct Pound/1 /// `#` - "?" pub struct Question/1 /// `?` - "->" pub struct RArrow/2 /// `->` - "<-" pub struct LArrow/2 /// `<-` - "%" pub struct Rem/1 /// `%` - "%=" pub struct RemEq/2 /// `%=` - "=>" pub struct FatArrow/2 /// `=>` - ";" pub struct Semi/1 /// `;` - "<<" pub struct Shl/2 /// `<<` - "<<=" pub struct ShlEq/3 /// `<<=` - ">>" pub struct Shr/2 /// `>>` - ">>=" pub struct ShrEq/3 /// `>>=` - "*" pub struct Star/1 /// `*` - "-" pub struct Sub/1 /// `-` - "-=" pub struct SubEq/2 /// `-=` - "~" pub struct Tilde/1 /// `~` -} - -define_delimiters! { - "{" pub struct Brace /// `{...}` - "[" pub struct Bracket /// `[...]` - "(" pub struct Paren /// `(...)` - " " pub struct Group /// None-delimited group -} - -/// A type-macro that expands to the name of the Rust type representation of a -/// given token. -/// -/// See the [token module] documentation for details and examples. -/// -/// [token module]: token/index.html -// Unfortunate duplication due to a rustdoc bug. -// https://github.com/rust-lang/rust/issues/45939 -#[macro_export] -#[cfg_attr(rustfmt, rustfmt_skip)] -macro_rules! Token { - (abstract) => { $crate::token::Abstract }; - (as) => { $crate::token::As }; - (async) => { $crate::token::Async }; - (auto) => { $crate::token::Auto }; - (become) => { $crate::token::Become }; - (box) => { $crate::token::Box }; - (break) => { $crate::token::Break }; - (const) => { $crate::token::Const }; - (continue) => { $crate::token::Continue }; - (crate) => { $crate::token::Crate }; - (default) => { $crate::token::Default }; - (do) => { $crate::token::Do }; - (dyn) => { $crate::token::Dyn }; - (else) => { $crate::token::Else }; - (enum) => { $crate::token::Enum }; - (existential) => { $crate::token::Existential }; - (extern) => { $crate::token::Extern }; - (final) => { $crate::token::Final }; - (fn) => { $crate::token::Fn }; - (for) => { $crate::token::For }; - (if) => { $crate::token::If }; - (impl) => { $crate::token::Impl }; - (in) => { $crate::token::In }; - (let) => { $crate::token::Let }; - (loop) => { $crate::token::Loop }; - (macro) => { $crate::token::Macro }; - (match) => { $crate::token::Match }; - (mod) => { $crate::token::Mod }; - (move) => { $crate::token::Move }; - (mut) => { $crate::token::Mut }; - (override) => { $crate::token::Override }; - (priv) => { $crate::token::Priv }; - (pub) => { $crate::token::Pub }; - (ref) => { $crate::token::Ref }; - (return) => { $crate::token::Return }; - (Self) => { $crate::token::SelfType }; - (self) => { $crate::token::SelfValue }; - (static) => { $crate::token::Static }; - (struct) => { $crate::token::Struct }; - (super) => { $crate::token::Super }; - (trait) => { $crate::token::Trait }; - (try) => { $crate::token::Try }; - (type) => { $crate::token::Type }; - (typeof) => { $crate::token::Typeof }; - (union) => { $crate::token::Union }; - (unsafe) => { $crate::token::Unsafe }; - (unsized) => { $crate::token::Unsized }; - (use) => { $crate::token::Use }; - (virtual) => { $crate::token::Virtual }; - (where) => { $crate::token::Where }; - (while) => { $crate::token::While }; - (yield) => { $crate::token::Yield }; - (+) => { $crate::token::Add }; - (+=) => { $crate::token::AddEq }; - (&) => { $crate::token::And }; - (&&) => { $crate::token::AndAnd }; - (&=) => { $crate::token::AndEq }; - (@) => { $crate::token::At }; - (!) => { $crate::token::Bang }; - (^) => { $crate::token::Caret }; - (^=) => { $crate::token::CaretEq }; - (:) => { $crate::token::Colon }; - (::) => { $crate::token::Colon2 }; - (,) => { $crate::token::Comma }; - (/) => { $crate::token::Div }; - (/=) => { $crate::token::DivEq }; - (.) => { $crate::token::Dot }; - (..) => { $crate::token::Dot2 }; - (...) => { $crate::token::Dot3 }; - (..=) => { $crate::token::DotDotEq }; - (=) => { $crate::token::Eq }; - (==) => { $crate::token::EqEq }; - (>=) => { $crate::token::Ge }; - (>) => { $crate::token::Gt }; - (<=) => { $crate::token::Le }; - (<) => { $crate::token::Lt }; - (*=) => { $crate::token::MulEq }; - (!=) => { $crate::token::Ne }; - (|) => { $crate::token::Or }; - (|=) => { $crate::token::OrEq }; - (||) => { $crate::token::OrOr }; - (#) => { $crate::token::Pound }; - (?) => { $crate::token::Question }; - (->) => { $crate::token::RArrow }; - (<-) => { $crate::token::LArrow }; - (%) => { $crate::token::Rem }; - (%=) => { $crate::token::RemEq }; - (=>) => { $crate::token::FatArrow }; - (;) => { $crate::token::Semi }; - (<<) => { $crate::token::Shl }; - (<<=) => { $crate::token::ShlEq }; - (>>) => { $crate::token::Shr }; - (>>=) => { $crate::token::ShrEq }; - (*) => { $crate::token::Star }; - (-) => { $crate::token::Sub }; - (-=) => { $crate::token::SubEq }; - (~) => { $crate::token::Tilde }; - (_) => { $crate::token::Underscore }; -} - -// Old names. TODO: remove these re-exports in a breaking change. -// https://github.com/dtolnay/syn/issues/486 -#[doc(hidden)] -pub use self::SelfType as CapSelf; -#[doc(hidden)] -pub use self::SelfValue as Self_; - -#[cfg(feature = "parsing")] -mod parsing { - use proc_macro2::{Spacing, Span}; - - use buffer::Cursor; - use error::{Error, Result}; - use parse::ParseStream; - use span::FromSpans; - - pub fn keyword(input: ParseStream, token: &str) -> Result { - input.step(|cursor| { - if let Some((ident, rest)) = cursor.ident() { - if ident == token { - return Ok((ident.span(), rest)); - } - } - Err(cursor.error(format!("expected `{}`", token))) - }) - } - - pub fn peek_keyword(cursor: Cursor, token: &str) -> bool { - if let Some((ident, _rest)) = cursor.ident() { - ident == token - } else { - false - } - } - - pub fn punct(input: ParseStream, token: &str) -> Result { - let mut spans = [input.cursor().span(); 3]; - punct_helper(input, token, &mut spans)?; - Ok(S::from_spans(&spans)) - } - - fn punct_helper(input: ParseStream, token: &str, spans: &mut [Span; 3]) -> Result<()> { - input.step(|cursor| { - let mut cursor = *cursor; - assert!(token.len() <= spans.len()); - - for (i, ch) in token.chars().enumerate() { - match cursor.punct() { - Some((punct, rest)) => { - spans[i] = punct.span(); - if punct.as_char() != ch { - break; - } else if i == token.len() - 1 { - return Ok(((), rest)); - } else if punct.spacing() != Spacing::Joint { - break; - } - cursor = rest; - } - None => break, - } - } - - Err(Error::new(spans[0], format!("expected `{}`", token))) - }) - } - - pub fn peek_punct(mut cursor: Cursor, token: &str) -> bool { - for (i, ch) in token.chars().enumerate() { - match cursor.punct() { - Some((punct, rest)) => { - if punct.as_char() != ch { - break; - } else if i == token.len() - 1 { - return true; - } else if punct.spacing() != Spacing::Joint { - break; - } - cursor = rest; - } - None => break, - } - } - false - } -} - -#[cfg(feature = "printing")] -mod printing { - use proc_macro2::{Delimiter, Group, Ident, Punct, Spacing, Span, TokenStream}; - use quote::TokenStreamExt; - - pub fn punct(s: &str, spans: &[Span], tokens: &mut TokenStream) { - assert_eq!(s.len(), spans.len()); - - let mut chars = s.chars(); - let mut spans = spans.iter(); - let ch = chars.next_back().unwrap(); - let span = spans.next_back().unwrap(); - for (ch, span) in chars.zip(spans) { - let mut op = Punct::new(ch, Spacing::Joint); - op.set_span(*span); - tokens.append(op); - } - - let mut op = Punct::new(ch, Spacing::Alone); - op.set_span(*span); - tokens.append(op); - } - - pub fn keyword(s: &str, span: Span, tokens: &mut TokenStream) { - tokens.append(Ident::new(s, span)); - } - - pub fn delim(s: &str, span: Span, tokens: &mut TokenStream, f: F) - where - F: FnOnce(&mut TokenStream), - { - let delim = match s { - "(" => Delimiter::Parenthesis, - "[" => Delimiter::Bracket, - "{" => Delimiter::Brace, - " " => Delimiter::None, - _ => panic!("unknown delimiter: {}", s), - }; - let mut inner = TokenStream::new(); - f(&mut inner); - let mut g = Group::new(delim, inner); - g.set_span(span); - tokens.append(g); - } -} diff --git a/third_party/rust/syn-0.15.30/src/tt.rs b/third_party/rust/syn-0.15.30/src/tt.rs deleted file mode 100644 index 72bc3c11e0..0000000000 --- a/third_party/rust/syn-0.15.30/src/tt.rs +++ /dev/null @@ -1,110 +0,0 @@ -use std::hash::{Hash, Hasher}; - -use proc_macro2::{Delimiter, TokenStream, TokenTree}; - -pub struct TokenTreeHelper<'a>(pub &'a TokenTree); - -impl<'a> PartialEq for TokenTreeHelper<'a> { - fn eq(&self, other: &Self) -> bool { - use proc_macro2::Spacing; - - match (self.0, other.0) { - (&TokenTree::Group(ref g1), &TokenTree::Group(ref g2)) => { - match (g1.delimiter(), g2.delimiter()) { - (Delimiter::Parenthesis, Delimiter::Parenthesis) - | (Delimiter::Brace, Delimiter::Brace) - | (Delimiter::Bracket, Delimiter::Bracket) - | (Delimiter::None, Delimiter::None) => {} - _ => return false, - } - - let s1 = g1.stream().clone().into_iter(); - let mut s2 = g2.stream().clone().into_iter(); - - for item1 in s1 { - let item2 = match s2.next() { - Some(item) => item, - None => return false, - }; - if TokenTreeHelper(&item1) != TokenTreeHelper(&item2) { - return false; - } - } - s2.next().is_none() - } - (&TokenTree::Punct(ref o1), &TokenTree::Punct(ref o2)) => { - o1.as_char() == o2.as_char() - && match (o1.spacing(), o2.spacing()) { - (Spacing::Alone, Spacing::Alone) | (Spacing::Joint, Spacing::Joint) => true, - _ => false, - } - } - (&TokenTree::Literal(ref l1), &TokenTree::Literal(ref l2)) => { - l1.to_string() == l2.to_string() - } - (&TokenTree::Ident(ref s1), &TokenTree::Ident(ref s2)) => s1 == s2, - _ => false, - } - } -} - -impl<'a> Hash for TokenTreeHelper<'a> { - fn hash(&self, h: &mut H) { - use proc_macro2::Spacing; - - match *self.0 { - TokenTree::Group(ref g) => { - 0u8.hash(h); - match g.delimiter() { - Delimiter::Parenthesis => 0u8.hash(h), - Delimiter::Brace => 1u8.hash(h), - Delimiter::Bracket => 2u8.hash(h), - Delimiter::None => 3u8.hash(h), - } - - for item in g.stream().clone() { - TokenTreeHelper(&item).hash(h); - } - 0xffu8.hash(h); // terminator w/ a variant we don't normally hash - } - TokenTree::Punct(ref op) => { - 1u8.hash(h); - op.as_char().hash(h); - match op.spacing() { - Spacing::Alone => 0u8.hash(h), - Spacing::Joint => 1u8.hash(h), - } - } - TokenTree::Literal(ref lit) => (2u8, lit.to_string()).hash(h), - TokenTree::Ident(ref word) => (3u8, word).hash(h), - } - } -} - -pub struct TokenStreamHelper<'a>(pub &'a TokenStream); - -impl<'a> PartialEq for TokenStreamHelper<'a> { - fn eq(&self, other: &Self) -> bool { - let left = self.0.clone().into_iter().collect::>(); - let right = other.0.clone().into_iter().collect::>(); - if left.len() != right.len() { - return false; - } - for (a, b) in left.into_iter().zip(right) { - if TokenTreeHelper(&a) != TokenTreeHelper(&b) { - return false; - } - } - true - } -} - -impl<'a> Hash for TokenStreamHelper<'a> { - fn hash(&self, state: &mut H) { - let tts = self.0.clone().into_iter().collect::>(); - tts.len().hash(state); - for tt in tts { - TokenTreeHelper(&tt).hash(state); - } - } -} diff --git a/third_party/rust/syn-0.15.30/src/ty.rs b/third_party/rust/syn-0.15.30/src/ty.rs deleted file mode 100644 index f53449f9a6..0000000000 --- a/third_party/rust/syn-0.15.30/src/ty.rs +++ /dev/null @@ -1,994 +0,0 @@ -use super::*; -use proc_macro2::TokenStream; -use punctuated::Punctuated; -#[cfg(feature = "extra-traits")] -use std::hash::{Hash, Hasher}; -#[cfg(feature = "extra-traits")] -use tt::TokenStreamHelper; - -ast_enum_of_structs! { - /// The possible types that a Rust value could have. - /// - /// *This type is available if Syn is built with the `"derive"` or `"full"` - /// feature.* - /// - /// # Syntax tree enum - /// - /// This type is a [syntax tree enum]. - /// - /// [syntax tree enum]: enum.Expr.html#syntax-tree-enums - pub enum Type { - /// A dynamically sized slice type: `[T]`. - /// - /// *This type is available if Syn is built with the `"derive"` or - /// `"full"` feature.* - pub Slice(TypeSlice { - pub bracket_token: token::Bracket, - pub elem: Box, - }), - - /// A fixed size array type: `[T; n]`. - /// - /// *This type is available if Syn is built with the `"derive"` or - /// `"full"` feature.* - pub Array(TypeArray { - pub bracket_token: token::Bracket, - pub elem: Box, - pub semi_token: Token![;], - pub len: Expr, - }), - - /// A raw pointer type: `*const T` or `*mut T`. - /// - /// *This type is available if Syn is built with the `"derive"` or - /// `"full"` feature.* - pub Ptr(TypePtr { - pub star_token: Token![*], - pub const_token: Option, - pub mutability: Option, - pub elem: Box, - }), - - /// A reference type: `&'a T` or `&'a mut T`. - /// - /// *This type is available if Syn is built with the `"derive"` or - /// `"full"` feature.* - pub Reference(TypeReference { - pub and_token: Token![&], - pub lifetime: Option, - pub mutability: Option, - pub elem: Box, - }), - - /// A bare function type: `fn(usize) -> bool`. - /// - /// *This type is available if Syn is built with the `"derive"` or - /// `"full"` feature.* - pub BareFn(TypeBareFn { - pub lifetimes: Option, - pub unsafety: Option, - pub abi: Option, - pub fn_token: Token![fn], - pub paren_token: token::Paren, - pub inputs: Punctuated, - pub variadic: Option, - pub output: ReturnType, - }), - - /// The never type: `!`. - /// - /// *This type is available if Syn is built with the `"derive"` or - /// `"full"` feature.* - pub Never(TypeNever { - pub bang_token: Token![!], - }), - - /// A tuple type: `(A, B, C, String)`. - /// - /// *This type is available if Syn is built with the `"derive"` or - /// `"full"` feature.* - pub Tuple(TypeTuple { - pub paren_token: token::Paren, - pub elems: Punctuated, - }), - - /// A path like `std::slice::Iter`, optionally qualified with a - /// self-type as in ` as SomeTrait>::Associated`. - /// - /// Type arguments are stored in the Path itself. - /// - /// *This type is available if Syn is built with the `"derive"` or - /// `"full"` feature.* - pub Path(TypePath { - pub qself: Option, - pub path: Path, - }), - - /// A trait object type `Bound1 + Bound2 + Bound3` where `Bound` is a - /// trait or a lifetime. - /// - /// *This type is available if Syn is built with the `"derive"` or - /// `"full"` feature.* - pub TraitObject(TypeTraitObject { - pub dyn_token: Option, - pub bounds: Punctuated, - }), - - /// An `impl Bound1 + Bound2 + Bound3` type where `Bound` is a trait or - /// a lifetime. - /// - /// *This type is available if Syn is built with the `"derive"` or - /// `"full"` feature.* - pub ImplTrait(TypeImplTrait { - pub impl_token: Token![impl], - pub bounds: Punctuated, - }), - - /// A parenthesized type equivalent to the inner type. - /// - /// *This type is available if Syn is built with the `"derive"` or - /// `"full"` feature.* - pub Paren(TypeParen { - pub paren_token: token::Paren, - pub elem: Box, - }), - - /// A type contained within invisible delimiters. - /// - /// *This type is available if Syn is built with the `"derive"` or - /// `"full"` feature.* - pub Group(TypeGroup { - pub group_token: token::Group, - pub elem: Box, - }), - - /// Indication that a type should be inferred by the compiler: `_`. - /// - /// *This type is available if Syn is built with the `"derive"` or - /// `"full"` feature.* - pub Infer(TypeInfer { - pub underscore_token: Token![_], - }), - - /// A macro in the type position. - /// - /// *This type is available if Syn is built with the `"derive"` or - /// `"full"` feature.* - pub Macro(TypeMacro { - pub mac: Macro, - }), - - /// Tokens in type position not interpreted by Syn. - /// - /// *This type is available if Syn is built with the `"derive"` or - /// `"full"` feature.* - pub Verbatim(TypeVerbatim #manual_extra_traits { - pub tts: TokenStream, - }), - } -} - -#[cfg(feature = "extra-traits")] -impl Eq for TypeVerbatim {} - -#[cfg(feature = "extra-traits")] -impl PartialEq for TypeVerbatim { - fn eq(&self, other: &Self) -> bool { - TokenStreamHelper(&self.tts) == TokenStreamHelper(&other.tts) - } -} - -#[cfg(feature = "extra-traits")] -impl Hash for TypeVerbatim { - fn hash(&self, state: &mut H) - where - H: Hasher, - { - TokenStreamHelper(&self.tts).hash(state); - } -} - -ast_struct! { - /// The binary interface of a function: `extern "C"`. - /// - /// *This type is available if Syn is built with the `"derive"` or `"full"` - /// feature.* - pub struct Abi { - pub extern_token: Token![extern], - pub name: Option, - } -} - -ast_struct! { - /// An argument in a function type: the `usize` in `fn(usize) -> bool`. - /// - /// *This type is available if Syn is built with the `"derive"` or `"full"` - /// feature.* - pub struct BareFnArg { - pub name: Option<(BareFnArgName, Token![:])>, - pub ty: Type, - } -} - -ast_enum! { - /// Name of an argument in a function type: the `n` in `fn(n: usize)`. - /// - /// *This type is available if Syn is built with the `"derive"` or `"full"` - /// feature.* - pub enum BareFnArgName { - /// Argument given a name. - Named(Ident), - /// Argument not given a name, matched with `_`. - Wild(Token![_]), - } -} - -ast_enum! { - /// Return type of a function signature. - /// - /// *This type is available if Syn is built with the `"derive"` or `"full"` - /// feature.* - pub enum ReturnType { - /// Return type is not specified. - /// - /// Functions default to `()` and closures default to type inference. - Default, - /// A particular type is returned. - Type(Token![->], Box), - } -} - -#[cfg(feature = "parsing")] -pub mod parsing { - use super::*; - - use parse::{Parse, ParseStream, Result}; - use path; - - impl Parse for Type { - fn parse(input: ParseStream) -> Result { - ambig_ty(input, true) - } - } - - impl Type { - /// In some positions, types may not contain the `+` character, to - /// disambiguate them. For example in the expression `1 as T`, T may not - /// contain a `+` character. - /// - /// This parser does not allow a `+`, while the default parser does. - pub fn without_plus(input: ParseStream) -> Result { - ambig_ty(input, false) - } - } - - fn ambig_ty(input: ParseStream, allow_plus: bool) -> Result { - if input.peek(token::Group) { - return input.parse().map(Type::Group); - } - - let mut lifetimes = None::; - let mut lookahead = input.lookahead1(); - if lookahead.peek(Token![for]) { - lifetimes = input.parse()?; - lookahead = input.lookahead1(); - if !lookahead.peek(Ident) - && !lookahead.peek(Token![fn]) - && !lookahead.peek(Token![unsafe]) - && !lookahead.peek(Token![extern]) - && !lookahead.peek(Token![super]) - && !lookahead.peek(Token![self]) - && !lookahead.peek(Token![Self]) - && !lookahead.peek(Token![crate]) - { - return Err(lookahead.error()); - } - } - - if lookahead.peek(token::Paren) { - let content; - let paren_token = parenthesized!(content in input); - if content.is_empty() { - return Ok(Type::Tuple(TypeTuple { - paren_token: paren_token, - elems: Punctuated::new(), - })); - } - if content.peek(Lifetime) { - return Ok(Type::Paren(TypeParen { - paren_token: paren_token, - elem: Box::new(Type::TraitObject(content.parse()?)), - })); - } - if content.peek(Token![?]) { - return Ok(Type::TraitObject(TypeTraitObject { - dyn_token: None, - bounds: { - let mut bounds = Punctuated::new(); - bounds.push_value(TypeParamBound::Trait(TraitBound { - paren_token: Some(paren_token), - ..content.parse()? - })); - while let Some(plus) = input.parse()? { - bounds.push_punct(plus); - bounds.push_value(input.parse()?); - } - bounds - }, - })); - } - let first: Type = content.parse()?; - if content.peek(Token![,]) { - return Ok(Type::Tuple(TypeTuple { - paren_token: paren_token, - elems: { - let mut elems = Punctuated::new(); - elems.push_value(first); - elems.push_punct(content.parse()?); - let rest: Punctuated = - content.parse_terminated(Parse::parse)?; - elems.extend(rest); - elems - }, - })); - } - if allow_plus && input.peek(Token![+]) { - loop { - let first = match first { - Type::Path(TypePath { qself: None, path }) => { - TypeParamBound::Trait(TraitBound { - paren_token: Some(paren_token), - modifier: TraitBoundModifier::None, - lifetimes: None, - path: path, - }) - } - Type::TraitObject(TypeTraitObject { - dyn_token: None, - ref bounds, - }) => { - if bounds.len() > 1 || bounds.trailing_punct() { - break; - } - match first { - Type::TraitObject(TypeTraitObject { bounds, .. }) => { - match bounds.into_iter().next().unwrap() { - TypeParamBound::Trait(trait_bound) => { - TypeParamBound::Trait(TraitBound { - paren_token: Some(paren_token), - ..trait_bound - }) - } - other => other, - } - } - _ => unreachable!(), - } - } - _ => break, - }; - return Ok(Type::TraitObject(TypeTraitObject { - dyn_token: None, - bounds: { - let mut bounds = Punctuated::new(); - bounds.push_value(first); - while let Some(plus) = input.parse()? { - bounds.push_punct(plus); - bounds.push_value(input.parse()?); - } - bounds - }, - })); - } - } - Ok(Type::Paren(TypeParen { - paren_token: paren_token, - elem: Box::new(first), - })) - } else if lookahead.peek(Token![fn]) - || lookahead.peek(Token![unsafe]) - || lookahead.peek(Token![extern]) && !input.peek2(Token![::]) - { - let mut bare_fn: TypeBareFn = input.parse()?; - bare_fn.lifetimes = lifetimes; - Ok(Type::BareFn(bare_fn)) - } else if lookahead.peek(Ident) - || input.peek(Token![super]) - || input.peek(Token![self]) - || input.peek(Token![Self]) - || input.peek(Token![crate]) - || input.peek(Token![extern]) - || lookahead.peek(Token![::]) - || lookahead.peek(Token![<]) - { - if input.peek(Token![dyn]) { - let mut trait_object: TypeTraitObject = input.parse()?; - if lifetimes.is_some() { - match *trait_object.bounds.iter_mut().next().unwrap() { - TypeParamBound::Trait(ref mut trait_bound) => { - trait_bound.lifetimes = lifetimes; - } - TypeParamBound::Lifetime(_) => unreachable!(), - } - } - return Ok(Type::TraitObject(trait_object)); - } - - let ty: TypePath = input.parse()?; - if ty.qself.is_some() { - return Ok(Type::Path(ty)); - } - - if input.peek(Token![!]) && !input.peek(Token![!=]) { - let mut contains_arguments = false; - for segment in &ty.path.segments { - match segment.arguments { - PathArguments::None => {} - PathArguments::AngleBracketed(_) | PathArguments::Parenthesized(_) => { - contains_arguments = true; - } - } - } - - if !contains_arguments { - let bang_token: Token![!] = input.parse()?; - let (delimiter, tts) = mac::parse_delimiter(input)?; - return Ok(Type::Macro(TypeMacro { - mac: Macro { - path: ty.path, - bang_token: bang_token, - delimiter: delimiter, - tts: tts, - }, - })); - } - } - - if lifetimes.is_some() || allow_plus && input.peek(Token![+]) { - let mut bounds = Punctuated::new(); - bounds.push_value(TypeParamBound::Trait(TraitBound { - paren_token: None, - modifier: TraitBoundModifier::None, - lifetimes: lifetimes, - path: ty.path, - })); - if allow_plus { - while input.peek(Token![+]) { - bounds.push_punct(input.parse()?); - if input.peek(Token![>]) { - break; - } - bounds.push_value(input.parse()?); - } - } - return Ok(Type::TraitObject(TypeTraitObject { - dyn_token: None, - bounds: bounds, - })); - } - - Ok(Type::Path(ty)) - } else if lookahead.peek(token::Bracket) { - let content; - let bracket_token = bracketed!(content in input); - let elem: Type = content.parse()?; - if content.peek(Token![;]) { - Ok(Type::Array(TypeArray { - bracket_token: bracket_token, - elem: Box::new(elem), - semi_token: content.parse()?, - len: content.parse()?, - })) - } else { - Ok(Type::Slice(TypeSlice { - bracket_token: bracket_token, - elem: Box::new(elem), - })) - } - } else if lookahead.peek(Token![*]) { - input.parse().map(Type::Ptr) - } else if lookahead.peek(Token![&]) { - input.parse().map(Type::Reference) - } else if lookahead.peek(Token![!]) && !input.peek(Token![=]) { - input.parse().map(Type::Never) - } else if lookahead.peek(Token![impl ]) { - input.parse().map(Type::ImplTrait) - } else if lookahead.peek(Token![_]) { - input.parse().map(Type::Infer) - } else if lookahead.peek(Lifetime) { - input.parse().map(Type::TraitObject) - } else { - Err(lookahead.error()) - } - } - - impl Parse for TypeSlice { - fn parse(input: ParseStream) -> Result { - let content; - Ok(TypeSlice { - bracket_token: bracketed!(content in input), - elem: content.parse()?, - }) - } - } - - impl Parse for TypeArray { - fn parse(input: ParseStream) -> Result { - let content; - Ok(TypeArray { - bracket_token: bracketed!(content in input), - elem: content.parse()?, - semi_token: content.parse()?, - len: content.parse()?, - }) - } - } - - impl Parse for TypePtr { - fn parse(input: ParseStream) -> Result { - let star_token: Token![*] = input.parse()?; - - let lookahead = input.lookahead1(); - let (const_token, mutability) = if lookahead.peek(Token![const]) { - (Some(input.parse()?), None) - } else if lookahead.peek(Token![mut]) { - (None, Some(input.parse()?)) - } else { - return Err(lookahead.error()); - }; - - Ok(TypePtr { - star_token: star_token, - const_token: const_token, - mutability: mutability, - elem: Box::new(input.call(Type::without_plus)?), - }) - } - } - - impl Parse for TypeReference { - fn parse(input: ParseStream) -> Result { - Ok(TypeReference { - and_token: input.parse()?, - lifetime: input.parse()?, - mutability: input.parse()?, - // & binds tighter than +, so we don't allow + here. - elem: Box::new(input.call(Type::without_plus)?), - }) - } - } - - impl Parse for TypeBareFn { - fn parse(input: ParseStream) -> Result { - let args; - let allow_variadic; - Ok(TypeBareFn { - lifetimes: input.parse()?, - unsafety: input.parse()?, - abi: input.parse()?, - fn_token: input.parse()?, - paren_token: parenthesized!(args in input), - inputs: { - let mut inputs = Punctuated::new(); - while !args.is_empty() && !args.peek(Token![...]) { - inputs.push_value(args.parse()?); - if args.is_empty() { - break; - } - inputs.push_punct(args.parse()?); - } - allow_variadic = inputs.empty_or_trailing(); - inputs - }, - variadic: { - if allow_variadic && args.peek(Token![...]) { - Some(args.parse()?) - } else { - None - } - }, - output: input.call(ReturnType::without_plus)?, - }) - } - } - - impl Parse for TypeNever { - fn parse(input: ParseStream) -> Result { - Ok(TypeNever { - bang_token: input.parse()?, - }) - } - } - - impl Parse for TypeInfer { - fn parse(input: ParseStream) -> Result { - Ok(TypeInfer { - underscore_token: input.parse()?, - }) - } - } - - impl Parse for TypeTuple { - fn parse(input: ParseStream) -> Result { - let content; - Ok(TypeTuple { - paren_token: parenthesized!(content in input), - elems: content.parse_terminated(Type::parse)?, - }) - } - } - - impl Parse for TypeMacro { - fn parse(input: ParseStream) -> Result { - Ok(TypeMacro { - mac: input.parse()?, - }) - } - } - - impl Parse for TypePath { - fn parse(input: ParseStream) -> Result { - let (qself, mut path) = path::parsing::qpath(input, false)?; - - if path.segments.last().unwrap().value().arguments.is_empty() - && input.peek(token::Paren) - { - let args: ParenthesizedGenericArguments = input.parse()?; - let parenthesized = PathArguments::Parenthesized(args); - path.segments.last_mut().unwrap().value_mut().arguments = parenthesized; - } - - Ok(TypePath { - qself: qself, - path: path, - }) - } - } - - impl ReturnType { - pub fn without_plus(input: ParseStream) -> Result { - Self::parse(input, false) - } - - pub fn parse(input: ParseStream, allow_plus: bool) -> Result { - if input.peek(Token![->]) { - let arrow = input.parse()?; - let ty = ambig_ty(input, allow_plus)?; - Ok(ReturnType::Type(arrow, Box::new(ty))) - } else { - Ok(ReturnType::Default) - } - } - } - - impl Parse for ReturnType { - fn parse(input: ParseStream) -> Result { - Self::parse(input, true) - } - } - - impl Parse for TypeTraitObject { - fn parse(input: ParseStream) -> Result { - Self::parse(input, true) - } - } - - fn at_least_one_type(bounds: &Punctuated) -> bool { - for bound in bounds { - if let TypeParamBound::Trait(_) = *bound { - return true; - } - } - false - } - - impl TypeTraitObject { - pub fn without_plus(input: ParseStream) -> Result { - Self::parse(input, false) - } - - // Only allow multiple trait references if allow_plus is true. - pub fn parse(input: ParseStream, allow_plus: bool) -> Result { - Ok(TypeTraitObject { - dyn_token: input.parse()?, - bounds: { - let mut bounds = Punctuated::new(); - if allow_plus { - loop { - bounds.push_value(input.parse()?); - if !input.peek(Token![+]) { - break; - } - bounds.push_punct(input.parse()?); - if input.peek(Token![>]) { - break; - } - } - } else { - bounds.push_value(input.parse()?); - } - // Just lifetimes like `'a + 'b` is not a TraitObject. - if !at_least_one_type(&bounds) { - return Err(input.error("expected at least one type")); - } - bounds - }, - }) - } - } - - impl Parse for TypeImplTrait { - fn parse(input: ParseStream) -> Result { - Ok(TypeImplTrait { - impl_token: input.parse()?, - // NOTE: rust-lang/rust#34511 includes discussion about whether - // or not + should be allowed in ImplTrait directly without (). - bounds: { - let mut bounds = Punctuated::new(); - loop { - bounds.push_value(input.parse()?); - if !input.peek(Token![+]) { - break; - } - bounds.push_punct(input.parse()?); - } - bounds - }, - }) - } - } - - impl Parse for TypeGroup { - fn parse(input: ParseStream) -> Result { - let group = private::parse_group(input)?; - Ok(TypeGroup { - group_token: group.token, - elem: group.content.parse()?, - }) - } - } - - impl Parse for TypeParen { - fn parse(input: ParseStream) -> Result { - Self::parse(input, false) - } - } - - impl TypeParen { - fn parse(input: ParseStream, allow_plus: bool) -> Result { - let content; - Ok(TypeParen { - paren_token: parenthesized!(content in input), - elem: Box::new(ambig_ty(&content, allow_plus)?), - }) - } - } - - impl Parse for BareFnArg { - fn parse(input: ParseStream) -> Result { - Ok(BareFnArg { - name: { - if (input.peek(Ident) || input.peek(Token![_])) - && !input.peek2(Token![::]) - && input.peek2(Token![:]) - { - let name: BareFnArgName = input.parse()?; - let colon: Token![:] = input.parse()?; - Some((name, colon)) - } else { - None - } - }, - ty: input.parse()?, - }) - } - } - - impl Parse for BareFnArgName { - fn parse(input: ParseStream) -> Result { - let lookahead = input.lookahead1(); - if lookahead.peek(Ident) { - input.parse().map(BareFnArgName::Named) - } else if lookahead.peek(Token![_]) { - input.parse().map(BareFnArgName::Wild) - } else { - Err(lookahead.error()) - } - } - } - - impl Parse for Abi { - fn parse(input: ParseStream) -> Result { - Ok(Abi { - extern_token: input.parse()?, - name: input.parse()?, - }) - } - } - - impl Parse for Option { - fn parse(input: ParseStream) -> Result { - if input.peek(Token![extern]) { - input.parse().map(Some) - } else { - Ok(None) - } - } - } -} - -#[cfg(feature = "printing")] -mod printing { - use super::*; - - use proc_macro2::TokenStream; - use quote::ToTokens; - - use print::TokensOrDefault; - - impl ToTokens for TypeSlice { - fn to_tokens(&self, tokens: &mut TokenStream) { - self.bracket_token.surround(tokens, |tokens| { - self.elem.to_tokens(tokens); - }); - } - } - - impl ToTokens for TypeArray { - fn to_tokens(&self, tokens: &mut TokenStream) { - self.bracket_token.surround(tokens, |tokens| { - self.elem.to_tokens(tokens); - self.semi_token.to_tokens(tokens); - self.len.to_tokens(tokens); - }); - } - } - - impl ToTokens for TypePtr { - fn to_tokens(&self, tokens: &mut TokenStream) { - self.star_token.to_tokens(tokens); - match self.mutability { - Some(ref tok) => tok.to_tokens(tokens), - None => { - TokensOrDefault(&self.const_token).to_tokens(tokens); - } - } - self.elem.to_tokens(tokens); - } - } - - impl ToTokens for TypeReference { - fn to_tokens(&self, tokens: &mut TokenStream) { - self.and_token.to_tokens(tokens); - self.lifetime.to_tokens(tokens); - self.mutability.to_tokens(tokens); - self.elem.to_tokens(tokens); - } - } - - impl ToTokens for TypeBareFn { - fn to_tokens(&self, tokens: &mut TokenStream) { - self.lifetimes.to_tokens(tokens); - self.unsafety.to_tokens(tokens); - self.abi.to_tokens(tokens); - self.fn_token.to_tokens(tokens); - self.paren_token.surround(tokens, |tokens| { - self.inputs.to_tokens(tokens); - if let Some(ref variadic) = self.variadic { - if !self.inputs.empty_or_trailing() { - let span = variadic.spans[0]; - Token![,](span).to_tokens(tokens); - } - variadic.to_tokens(tokens); - } - }); - self.output.to_tokens(tokens); - } - } - - impl ToTokens for TypeNever { - fn to_tokens(&self, tokens: &mut TokenStream) { - self.bang_token.to_tokens(tokens); - } - } - - impl ToTokens for TypeTuple { - fn to_tokens(&self, tokens: &mut TokenStream) { - self.paren_token.surround(tokens, |tokens| { - self.elems.to_tokens(tokens); - }); - } - } - - impl ToTokens for TypePath { - fn to_tokens(&self, tokens: &mut TokenStream) { - private::print_path(tokens, &self.qself, &self.path); - } - } - - impl ToTokens for TypeTraitObject { - fn to_tokens(&self, tokens: &mut TokenStream) { - self.dyn_token.to_tokens(tokens); - self.bounds.to_tokens(tokens); - } - } - - impl ToTokens for TypeImplTrait { - fn to_tokens(&self, tokens: &mut TokenStream) { - self.impl_token.to_tokens(tokens); - self.bounds.to_tokens(tokens); - } - } - - impl ToTokens for TypeGroup { - fn to_tokens(&self, tokens: &mut TokenStream) { - self.group_token.surround(tokens, |tokens| { - self.elem.to_tokens(tokens); - }); - } - } - - impl ToTokens for TypeParen { - fn to_tokens(&self, tokens: &mut TokenStream) { - self.paren_token.surround(tokens, |tokens| { - self.elem.to_tokens(tokens); - }); - } - } - - impl ToTokens for TypeInfer { - fn to_tokens(&self, tokens: &mut TokenStream) { - self.underscore_token.to_tokens(tokens); - } - } - - impl ToTokens for TypeMacro { - fn to_tokens(&self, tokens: &mut TokenStream) { - self.mac.to_tokens(tokens); - } - } - - impl ToTokens for TypeVerbatim { - fn to_tokens(&self, tokens: &mut TokenStream) { - self.tts.to_tokens(tokens); - } - } - - impl ToTokens for ReturnType { - fn to_tokens(&self, tokens: &mut TokenStream) { - match *self { - ReturnType::Default => {} - ReturnType::Type(ref arrow, ref ty) => { - arrow.to_tokens(tokens); - ty.to_tokens(tokens); - } - } - } - } - - impl ToTokens for BareFnArg { - fn to_tokens(&self, tokens: &mut TokenStream) { - if let Some((ref name, ref colon)) = self.name { - name.to_tokens(tokens); - colon.to_tokens(tokens); - } - self.ty.to_tokens(tokens); - } - } - - impl ToTokens for BareFnArgName { - fn to_tokens(&self, tokens: &mut TokenStream) { - match *self { - BareFnArgName::Named(ref t) => t.to_tokens(tokens), - BareFnArgName::Wild(ref t) => t.to_tokens(tokens), - } - } - } - - impl ToTokens for Abi { - fn to_tokens(&self, tokens: &mut TokenStream) { - self.extern_token.to_tokens(tokens); - self.name.to_tokens(tokens); - } - } -} diff --git a/third_party/rust/target-lexicon-0.4.0/.cargo-checksum.json b/third_party/rust/target-lexicon-0.4.0/.cargo-checksum.json deleted file mode 100644 index e9d6fd59da..0000000000 --- a/third_party/rust/target-lexicon-0.4.0/.cargo-checksum.json +++ /dev/null @@ -1 +0,0 @@ -{"files":{"Cargo.toml":"615784e9f863ec4eeb00066ba899ab2fd8cf6619abcc35411f47ed29745b88ad","LICENSE":"268872b9816f90fd8e85db5a28d33f8150ebb8dd016653fb39ef1f94f2686bc5","README.md":"89f73d9404c960632bef4cfde9709862dbb510081180318e0be7bf12d18e9da7","build.rs":"800c0d76136d72e2894a7a3eb603ce02fc4da5d96c46b970edf6f909b999741e","examples/misc.rs":"49a579845450b7b020ed5c97dca142fc548725893cbc82f6f750ee0caab2beca","src/host.rs":"fb543df4f362e9119a58523563e453110f4e3a426f0995911d0ca386657cf1d9","src/lib.rs":"69b1b9e21c0b6e447fd53991a60e7ab20f814c2ab5faa7870e3423bf588c658f","src/parse_error.rs":"9f6897c0f0b5b666ce5e7ff1f3e3001964397d3e5f933884036b14f52b612363","src/targets.rs":"ebd909b42ad8fcec6486170076f3b6454365377b2e23dbed56cf007c761ff490","src/triple.rs":"aba17839dd6895927d3d75fd5b83698df8be6d3ea64d7c0235c62acf12645a33"},"package":"1b0ab4982b8945c35cc1c46a83a9094c414f6828a099ce5dcaa8ee2b04642dcb"} \ No newline at end of file diff --git a/third_party/rust/target-lexicon-0.4.0/Cargo.toml b/third_party/rust/target-lexicon-0.4.0/Cargo.toml deleted file mode 100644 index 2fec9435ca..0000000000 --- a/third_party/rust/target-lexicon-0.4.0/Cargo.toml +++ /dev/null @@ -1,43 +0,0 @@ -# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO -# -# When uploading crates to the registry Cargo will automatically -# "normalize" Cargo.toml files for maximal compatibility -# with all versions of Cargo and also rewrite `path` dependencies -# to registry (e.g. crates.io) dependencies -# -# If you believe there's an error in this file please file an -# issue against the rust-lang/cargo repository. If you're -# editing this file be aware that the upstream Cargo.toml -# will likely look very different (and much more reasonable) - -[package] -edition = "2018" -name = "target-lexicon" -version = "0.4.0" -authors = ["Dan Gohman "] -description = "Targeting utilities for compilers and related tools" -documentation = "https://docs.rs/target-lexicon/" -readme = "README.md" -keywords = ["target", "host", "triple", "compiler", "jit"] -categories = ["no-std"] -license = "Apache-2.0 WITH LLVM-exception" -repository = "https://github.com/CraneStation/target-lexicon" -[dependencies.failure] -version = "0.1.3" -features = ["derive"] -default-features = false - -[dependencies.failure_derive] -version = "0.1.3" -default-features = false -[build-dependencies.serde_json] -version = "1.0" - -[features] -default = ["std"] -std = [] -[badges.maintenance] -status = "passively-maintained" - -[badges.travis-ci] -repository = "CraneStation/target-lexicon" diff --git a/third_party/rust/target-lexicon-0.4.0/LICENSE b/third_party/rust/target-lexicon-0.4.0/LICENSE deleted file mode 100644 index be1d7c438a..0000000000 --- a/third_party/rust/target-lexicon-0.4.0/LICENSE +++ /dev/null @@ -1,219 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - - ---- LLVM Exceptions to the Apache 2.0 License ---- - -As an exception, if, as a result of your compiling your source code, portions -of this Software are embedded into an Object form of such source code, you -may redistribute such embedded portions in such Object form without complying -with the conditions of Sections 4(a), 4(b) and 4(d) of the License. - -In addition, if you combine or link compiled forms of this Software with -software that is licensed under the GPLv2 ("Combined Software") and if a -court of competent jurisdiction determines that the patent provision (Section -3), the indemnity provision (Section 9) or other Section of the License -conflicts with the conditions of the GPLv2, you may retroactively and -prospectively choose to deem waived or otherwise exclude such Section(s) of -the License, but only in their entirety and only with respect to the Combined -Software. diff --git a/third_party/rust/target-lexicon-0.4.0/README.md b/third_party/rust/target-lexicon-0.4.0/README.md deleted file mode 100644 index 582db4617e..0000000000 --- a/third_party/rust/target-lexicon-0.4.0/README.md +++ /dev/null @@ -1,18 +0,0 @@ -This is a library for managing targets for compilers and related tools. - -Currently, the main feature is support for decoding "triples", which -are strings that identify a particular target configuration. This library -provides a `Triple` struct containing enums for each of fields of a -triple. `Triple` implements `FromStr` and `fmt::Display` so it can be -converted to and from the conventional string representation of a triple. - -`Triple` also has functions for querying a triple's endianness, -pointer bit width, and binary format. - -And, `Triple` and the enum types have `host()` constructors, for targeting -the host. - -It supports all triples currently used by rustc and rustup. - -It does not support reading JSON target files itself. To use it with a JSON -target file, construct a `Triple` using the value of the "llvm-target" field. diff --git a/third_party/rust/target-lexicon-0.4.0/build.rs b/third_party/rust/target-lexicon-0.4.0/build.rs deleted file mode 100644 index 8d8663baad..0000000000 --- a/third_party/rust/target-lexicon-0.4.0/build.rs +++ /dev/null @@ -1,229 +0,0 @@ -//! build.rs file to obtain the host information. - -// Allow dead code in triple.rs and targets.rs for our purposes here. -#![allow(dead_code)] - -use serde_json::Value; -use std::env; -use std::ffi::OsString; -use std::fs::File; -use std::io::prelude::*; -use std::io::{self, Write}; -use std::path::{Path, PathBuf}; -use std::str::FromStr; - -extern crate serde_json; - -// Include triple.rs and targets.rs so we can parse the TARGET environment variable. -mod triple { - include!("src/triple.rs"); -} -mod targets { - include!("src/targets.rs"); -} - -// Stub out `ParseError` to minimally support triple.rs and targets.rs. -mod parse_error { - #[derive(Debug)] - pub enum ParseError { - UnrecognizedArchitecture(String), - UnrecognizedVendor(String), - UnrecognizedOperatingSystem(String), - UnrecognizedEnvironment(String), - UnrecognizedBinaryFormat(String), - UnrecognizedField(String), - NoneWithoutBinaryFormat, - } -} - -use self::triple::{Endianness, PointerWidth, Triple}; - -/// Assuming `target` is a path to a custom target json config file, open it -/// and build a `Triple` using its contents. -fn read_target_from_file(path: &Path) -> Triple { - let mut file = File::open(path).expect("error opening target file"); - let mut json = String::new(); - file.read_to_string(&mut json) - .expect("error reading target file"); - - let v: Value = serde_json::from_str(&json).expect("error parsing target file as json"); - let target = v["llvm-target"] - .as_str() - .expect("error parsing \"llvm-target\" as a string"); - let triple = Triple::from_str(target).expect("error parsing host target"); - - // Check that the JSON describes a known target configuration. - // - // Unfortunately, none of Rust's "arch", "os", "env", nor "vendor" - // fields directly correspond to triple fields, so we can't easily - // check them. - if let Some(endian) = v["target-endian"].as_str() { - assert_eq!( - endian, - match triple.endianness().unwrap() { - Endianness::Little => "little", - Endianness::Big => "big", - }, - "\"target-endian\" field disagrees with the target triple" - ); - } - if let Some(pointer_width) = v["target-pointer-width"].as_str() { - assert_eq!( - pointer_width, - match triple.pointer_width().unwrap() { - PointerWidth::U16 => "16", - PointerWidth::U32 => "32", - PointerWidth::U64 => "64", - }, - "\"target-pointer-width\" field disagrees with the target triple" - ); - } - - triple -} - -/// Assuming `target` is a target identifier, search for an appropriate custom -/// target json config file in the way that rustc does, and then call -/// `read_target_from_file` on that. -fn read_target_from_file_in_path(target: &str) -> Triple { - let mut target_filename = target.to_owned(); - target_filename.push_str(".json"); - let target_basename = PathBuf::from(target_filename); - let target_path = env::var_os("RUST_TARGET_PATH").unwrap_or_else(|| OsString::new()); - for dir in env::split_paths(&target_path) { - let p = dir.join(&target_basename); - if p.is_file() { - return read_target_from_file(&p); - } - } - panic!("can't find custom target {}", target); -} - -fn main() { - let out_dir = - PathBuf::from(env::var("OUT_DIR").expect("The OUT_DIR environment variable must be set")); - - let target = env::var("TARGET").expect("The TARGET environment variable must be set"); - - // The following intends to match the logic in rustc. - let triple = if target.ends_with(".json") { - read_target_from_file(Path::new(&target)) - } else { - match Triple::from_str(&target) { - Ok(triple) => triple, - Err(_) => read_target_from_file_in_path(&target), - } - }; - - let out = File::create(out_dir.join("host.rs")).expect("error creating host.rs"); - write_host_rs(out, triple).expect("error writing host.rs"); -} - -fn write_host_rs(mut out: File, triple: Triple) -> io::Result<()> { - writeln!(out, "/// The `Triple` of the current host.")?; - writeln!(out, "pub static HOST: Triple = Triple {{")?; - writeln!( - out, - " architecture: Architecture::{:?},", - triple.architecture - )?; - writeln!(out, " vendor: Vendor::{:?},", triple.vendor)?; - writeln!( - out, - " operating_system: OperatingSystem::{:?},", - triple.operating_system - )?; - writeln!( - out, - " environment: Environment::{:?},", - triple.environment - )?; - writeln!( - out, - " binary_format: BinaryFormat::{:?},", - triple.binary_format - )?; - writeln!(out, "}};")?; - writeln!(out)?; - - writeln!(out, "impl Architecture {{")?; - writeln!(out, " /// Return the architecture for the current host.")?; - writeln!(out, " pub fn host() -> Self {{")?; - writeln!(out, " Architecture::{:?}", triple.architecture)?; - writeln!(out, " }}")?; - writeln!(out, "}}")?; - writeln!(out)?; - - writeln!(out, "impl Vendor {{")?; - writeln!(out, " /// Return the vendor for the current host.")?; - writeln!(out, " pub fn host() -> Self {{")?; - writeln!(out, " Vendor::{:?}", triple.vendor)?; - writeln!(out, " }}")?; - writeln!(out, "}}")?; - writeln!(out)?; - - writeln!(out, "impl OperatingSystem {{")?; - writeln!( - out, - " /// Return the operating system for the current host." - )?; - writeln!(out, " pub fn host() -> Self {{")?; - writeln!( - out, - " OperatingSystem::{:?}", - triple.operating_system - )?; - writeln!(out, " }}")?; - writeln!(out, "}}")?; - writeln!(out)?; - - writeln!(out, "impl Environment {{")?; - writeln!(out, " /// Return the environment for the current host.")?; - writeln!(out, " pub fn host() -> Self {{")?; - writeln!(out, " Environment::{:?}", triple.environment)?; - writeln!(out, " }}")?; - writeln!(out, "}}")?; - writeln!(out)?; - - writeln!(out, "impl BinaryFormat {{")?; - writeln!( - out, - " /// Return the binary format for the current host." - )?; - writeln!(out, " pub fn host() -> Self {{")?; - writeln!(out, " BinaryFormat::{:?}", triple.binary_format)?; - writeln!(out, " }}")?; - writeln!(out, "}}")?; - writeln!(out)?; - - writeln!(out, "impl Triple {{")?; - writeln!(out, " /// Return the triple for the current host.")?; - writeln!(out, " pub fn host() -> Self {{")?; - writeln!(out, " Self {{")?; - writeln!( - out, - " architecture: Architecture::{:?},", - triple.architecture - )?; - writeln!(out, " vendor: Vendor::{:?},", triple.vendor)?; - writeln!( - out, - " operating_system: OperatingSystem::{:?},", - triple.operating_system - )?; - writeln!( - out, - " environment: Environment::{:?},", - triple.environment - )?; - writeln!( - out, - " binary_format: BinaryFormat::{:?},", - triple.binary_format - )?; - writeln!(out, " }}")?; - writeln!(out, " }}")?; - writeln!(out, "}}")?; - - Ok(()) -} diff --git a/third_party/rust/target-lexicon-0.4.0/examples/misc.rs b/third_party/rust/target-lexicon-0.4.0/examples/misc.rs deleted file mode 100644 index 25c99e8677..0000000000 --- a/third_party/rust/target-lexicon-0.4.0/examples/misc.rs +++ /dev/null @@ -1,14 +0,0 @@ -extern crate target_lexicon; - -use core::str::FromStr; -use target_lexicon::{Triple, HOST}; - -fn main() { - println!("The host triple is {}.", HOST); - - let e = Triple::from_str("riscv32-unknown-unknown") - .expect("expected to recognize the RISC-V target") - .endianness() - .expect("expected to know the endianness of RISC-V"); - println!("The endianness of RISC-V is {:?}.", e); -} diff --git a/third_party/rust/target-lexicon-0.4.0/src/host.rs b/third_party/rust/target-lexicon-0.4.0/src/host.rs deleted file mode 100644 index 4c6ad5ba54..0000000000 --- a/third_party/rust/target-lexicon-0.4.0/src/host.rs +++ /dev/null @@ -1,56 +0,0 @@ -use crate::{Architecture, BinaryFormat, Environment, OperatingSystem, Triple, Vendor}; - -// Include the implementations of the `HOST` object containing information -// about the current host. -include!(concat!(env!("OUT_DIR"), "/host.rs")); - -#[cfg(test)] -mod tests { - #[cfg(target_os = "linux")] - #[test] - fn test_linux() { - use super::*; - assert_eq!(OperatingSystem::host(), OperatingSystem::Linux); - } - - #[cfg(target_os = "macos")] - #[test] - fn test_macos() { - use super::*; - assert_eq!(OperatingSystem::host(), OperatingSystem::Darwin); - } - - #[cfg(windows)] - #[test] - fn test_windows() { - use super::*; - assert_eq!(OperatingSystem::host(), OperatingSystem::Windows); - } - - #[cfg(target_pointer_width = "16")] - #[test] - fn test_ptr16() { - use super::*; - assert_eq!(Architecture::host().pointer_width().unwrap().bits(), 16); - } - - #[cfg(target_pointer_width = "32")] - #[test] - fn test_ptr32() { - use super::*; - assert_eq!(Architecture::host().pointer_width().unwrap().bits(), 32); - } - - #[cfg(target_pointer_width = "64")] - #[test] - fn test_ptr64() { - use super::*; - assert_eq!(Architecture::host().pointer_width().unwrap().bits(), 64); - } - - #[test] - fn host_object() { - use super::*; - assert_eq!(HOST, Triple::host()); - } -} diff --git a/third_party/rust/target-lexicon-0.4.0/src/lib.rs b/third_party/rust/target-lexicon-0.4.0/src/lib.rs deleted file mode 100644 index 91ca5b3d65..0000000000 --- a/third_party/rust/target-lexicon-0.4.0/src/lib.rs +++ /dev/null @@ -1,38 +0,0 @@ -//! Target "triple" support. - -#![deny(missing_docs, trivial_numeric_casts, unused_extern_crates)] -#![warn(unused_import_braces)] -#![cfg_attr( - feature = "cargo-clippy", - warn( - float_arithmetic, - mut_mut, - nonminimal_bool, - option_map_unwrap_or, - option_map_unwrap_or_else, - print_stdout, - unicode_not_nfc, - use_self - ) -)] -#![no_std] -#![cfg_attr(not(feature = "std"), feature(alloc))] - -#[cfg(not(feature = "std"))] -extern crate alloc as std; -#[cfg(feature = "std")] -extern crate std; - -#[macro_use] -extern crate failure_derive; - -mod host; -mod parse_error; -mod targets; -#[macro_use] -mod triple; - -pub use self::host::HOST; -pub use self::parse_error::ParseError; -pub use self::targets::{Architecture, BinaryFormat, Environment, OperatingSystem, Vendor}; -pub use self::triple::{CallingConvention, Endianness, PointerWidth, Triple}; diff --git a/third_party/rust/target-lexicon-0.4.0/src/parse_error.rs b/third_party/rust/target-lexicon-0.4.0/src/parse_error.rs deleted file mode 100644 index c3e39d2e83..0000000000 --- a/third_party/rust/target-lexicon-0.4.0/src/parse_error.rs +++ /dev/null @@ -1,19 +0,0 @@ -use std::string::String; - -/// An error returned from parsing a triple. -#[derive(Fail, Clone, Debug, PartialEq, Eq)] -#[allow(missing_docs)] -pub enum ParseError { - #[fail(display = "Unrecognized architecture: {}", _0)] - UnrecognizedArchitecture(String), - #[fail(display = "Unrecognized vendor: {}", _0)] - UnrecognizedVendor(String), - #[fail(display = "Unrecognized operating system: {}", _0)] - UnrecognizedOperatingSystem(String), - #[fail(display = "Unrecognized environment: {}", _0)] - UnrecognizedEnvironment(String), - #[fail(display = "Unrecognized binary format: {}", _0)] - UnrecognizedBinaryFormat(String), - #[fail(display = "Unrecognized field: {}", _0)] - UnrecognizedField(String), -} diff --git a/third_party/rust/target-lexicon-0.4.0/src/targets.rs b/third_party/rust/target-lexicon-0.4.0/src/targets.rs deleted file mode 100644 index ab0beb4e58..0000000000 --- a/third_party/rust/target-lexicon-0.4.0/src/targets.rs +++ /dev/null @@ -1,636 +0,0 @@ -// This file defines all the identifier enums and target-aware logic. - -use crate::triple::{Endianness, PointerWidth, Triple}; -use core::fmt; -use core::str::FromStr; - -/// The "architecture" field, which in some cases also specifies a specific -/// subarchitecture. -#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)] -#[allow(missing_docs)] -pub enum Architecture { - Unknown, - Aarch64, - Arm, - Armebv7r, - Armv4t, - Armv5te, - Armv6, - Armv7, - Armv7r, - Armv7s, - Asmjs, - I386, - I586, - I686, - Mips, - Mips64, - Mips64el, - Mipsel, - Msp430, - Powerpc, - Powerpc64, - Powerpc64le, - Riscv32, - Riscv32imac, - Riscv32imc, - Riscv64, - S390x, - Sparc, - Sparc64, - Sparcv9, - Thumbv6m, - Thumbv7a, - Thumbv7em, - Thumbv7m, - Thumbv7neon, - Thumbv8mBase, - Thumbv8mMain, - Wasm32, - X86_64, -} - -/// The "vendor" field, which in practice is little more than an arbitrary -/// modifier. -#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)] -#[allow(missing_docs)] -pub enum Vendor { - Unknown, - Apple, - Experimental, - Fortanix, - Pc, - Rumprun, - Sun, -} - -/// The "operating system" field, which sometimes implies an environment, and -/// sometimes isn't an actual operating system. -#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)] -#[allow(missing_docs)] -pub enum OperatingSystem { - Unknown, - Bitrig, - Cloudabi, - Darwin, - Dragonfly, - Emscripten, - Freebsd, - Fuchsia, - Haiku, - Hermit, - Ios, - L4re, - Linux, - Nebulet, - Netbsd, - None_, - Openbsd, - Redox, - Solaris, - Uefi, - Windows, -} - -/// The "environment" field, which specifies an ABI environment on top of the -/// operating system. In many configurations, this field is omitted, and the -/// environment is implied by the operating system. -#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)] -#[allow(missing_docs)] -pub enum Environment { - Unknown, - Android, - Androideabi, - Eabi, - Eabihf, - Gnu, - Gnuabi64, - Gnueabi, - Gnueabihf, - Gnuspe, - Gnux32, - Musl, - Musleabi, - Musleabihf, - Msvc, - Uclibc, - Sgx, -} - -/// The "binary format" field, which is usually omitted, and the binary format -/// is implied by the other fields. -#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)] -#[allow(missing_docs)] -pub enum BinaryFormat { - Unknown, - Elf, - Coff, - Macho, - Wasm, -} - -impl Architecture { - /// Return the endianness of this architecture. - pub fn endianness(self) -> Result { - match self { - Architecture::Unknown => Err(()), - Architecture::Aarch64 - | Architecture::Arm - | Architecture::Armv4t - | Architecture::Armv5te - | Architecture::Armv6 - | Architecture::Armv7 - | Architecture::Armv7r - | Architecture::Armv7s - | Architecture::Asmjs - | Architecture::I386 - | Architecture::I586 - | Architecture::I686 - | Architecture::Mips64el - | Architecture::Mipsel - | Architecture::Msp430 - | Architecture::Powerpc64le - | Architecture::Riscv32 - | Architecture::Riscv32imac - | Architecture::Riscv32imc - | Architecture::Riscv64 - | Architecture::Thumbv6m - | Architecture::Thumbv7a - | Architecture::Thumbv7em - | Architecture::Thumbv7m - | Architecture::Thumbv7neon - | Architecture::Thumbv8mBase - | Architecture::Thumbv8mMain - | Architecture::Wasm32 - | Architecture::X86_64 => Ok(Endianness::Little), - Architecture::Armebv7r - | Architecture::Mips - | Architecture::Mips64 - | Architecture::Powerpc - | Architecture::Powerpc64 - | Architecture::S390x - | Architecture::Sparc - | Architecture::Sparc64 - | Architecture::Sparcv9 => Ok(Endianness::Big), - } - } - - /// Return the pointer bit width of this target's architecture. - pub fn pointer_width(self) -> Result { - match self { - Architecture::Unknown => Err(()), - Architecture::Msp430 => Ok(PointerWidth::U16), - Architecture::Arm - | Architecture::Armebv7r - | Architecture::Armv4t - | Architecture::Armv5te - | Architecture::Armv6 - | Architecture::Armv7 - | Architecture::Armv7r - | Architecture::Armv7s - | Architecture::Asmjs - | Architecture::I386 - | Architecture::I586 - | Architecture::I686 - | Architecture::Mipsel - | Architecture::Riscv32 - | Architecture::Riscv32imac - | Architecture::Riscv32imc - | Architecture::Sparc - | Architecture::Thumbv6m - | Architecture::Thumbv7a - | Architecture::Thumbv7em - | Architecture::Thumbv7m - | Architecture::Thumbv7neon - | Architecture::Thumbv8mBase - | Architecture::Thumbv8mMain - | Architecture::Wasm32 - | Architecture::Mips - | Architecture::Powerpc => Ok(PointerWidth::U32), - Architecture::Aarch64 - | Architecture::Mips64el - | Architecture::Powerpc64le - | Architecture::Riscv64 - | Architecture::X86_64 - | Architecture::Mips64 - | Architecture::Powerpc64 - | Architecture::S390x - | Architecture::Sparc64 - | Architecture::Sparcv9 => Ok(PointerWidth::U64), - } - } -} - -/// Return the binary format implied by this target triple, ignoring its -/// `binary_format` field. -pub fn default_binary_format(triple: &Triple) -> BinaryFormat { - match triple.operating_system { - OperatingSystem::None_ => BinaryFormat::Unknown, - OperatingSystem::Darwin | OperatingSystem::Ios => BinaryFormat::Macho, - OperatingSystem::Windows => BinaryFormat::Coff, - OperatingSystem::Nebulet | OperatingSystem::Emscripten | OperatingSystem::Unknown => { - match triple.architecture { - Architecture::Wasm32 => BinaryFormat::Wasm, - _ => BinaryFormat::Unknown, - } - } - _ => BinaryFormat::Elf, - } -} - -impl fmt::Display for Architecture { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - let s = match *self { - Architecture::Unknown => "unknown", - Architecture::Aarch64 => "aarch64", - Architecture::Arm => "arm", - Architecture::Armebv7r => "armebv7r", - Architecture::Armv4t => "armv4t", - Architecture::Armv5te => "armv5te", - Architecture::Armv6 => "armv6", - Architecture::Armv7 => "armv7", - Architecture::Armv7r => "armv7r", - Architecture::Armv7s => "armv7s", - Architecture::Asmjs => "asmjs", - Architecture::I386 => "i386", - Architecture::I586 => "i586", - Architecture::I686 => "i686", - Architecture::Mips => "mips", - Architecture::Mips64 => "mips64", - Architecture::Mips64el => "mips64el", - Architecture::Mipsel => "mipsel", - Architecture::Msp430 => "msp430", - Architecture::Powerpc => "powerpc", - Architecture::Powerpc64 => "powerpc64", - Architecture::Powerpc64le => "powerpc64le", - Architecture::Riscv32 => "riscv32", - Architecture::Riscv32imac => "riscv32imac", - Architecture::Riscv32imc => "riscv32imc", - Architecture::Riscv64 => "riscv64", - Architecture::S390x => "s390x", - Architecture::Sparc => "sparc", - Architecture::Sparc64 => "sparc64", - Architecture::Sparcv9 => "sparcv9", - Architecture::Thumbv6m => "thumbv6m", - Architecture::Thumbv7a => "thumbv7a", - Architecture::Thumbv7em => "thumbv7em", - Architecture::Thumbv7m => "thumbv7m", - Architecture::Thumbv7neon => "thumbv7neon", - Architecture::Thumbv8mBase => "thumbv8m.base", - Architecture::Thumbv8mMain => "thumbv8m.main", - Architecture::Wasm32 => "wasm32", - Architecture::X86_64 => "x86_64", - }; - f.write_str(s) - } -} - -impl FromStr for Architecture { - type Err = (); - - fn from_str(s: &str) -> Result { - Ok(match s { - "unknown" => Architecture::Unknown, - "aarch64" => Architecture::Aarch64, - "arm" => Architecture::Arm, - "armebv7r" => Architecture::Armebv7r, - "armv4t" => Architecture::Armv4t, - "armv5te" => Architecture::Armv5te, - "armv6" => Architecture::Armv6, - "armv7" => Architecture::Armv7, - "armv7r" => Architecture::Armv7r, - "armv7s" => Architecture::Armv7s, - "asmjs" => Architecture::Asmjs, - "i386" => Architecture::I386, - "i586" => Architecture::I586, - "i686" => Architecture::I686, - "mips" => Architecture::Mips, - "mips64" => Architecture::Mips64, - "mips64el" => Architecture::Mips64el, - "mipsel" => Architecture::Mipsel, - "msp430" => Architecture::Msp430, - "powerpc" => Architecture::Powerpc, - "powerpc64" => Architecture::Powerpc64, - "powerpc64le" => Architecture::Powerpc64le, - "riscv32" => Architecture::Riscv32, - "riscv32imac" => Architecture::Riscv32imac, - "riscv32imc" => Architecture::Riscv32imc, - "riscv64" => Architecture::Riscv64, - "s390x" => Architecture::S390x, - "sparc" => Architecture::Sparc, - "sparc64" => Architecture::Sparc64, - "sparcv9" => Architecture::Sparcv9, - "thumbv6m" => Architecture::Thumbv6m, - "thumbv7a" => Architecture::Thumbv7a, - "thumbv7em" => Architecture::Thumbv7em, - "thumbv7m" => Architecture::Thumbv7m, - "thumbv7neon" => Architecture::Thumbv7neon, - "thumbv8m.base" => Architecture::Thumbv8mBase, - "thumbv8m.main" => Architecture::Thumbv8mMain, - "wasm32" => Architecture::Wasm32, - "x86_64" => Architecture::X86_64, - _ => return Err(()), - }) - } -} - -impl fmt::Display for Vendor { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - let s = match *self { - Vendor::Unknown => "unknown", - Vendor::Apple => "apple", - Vendor::Experimental => "experimental", - Vendor::Fortanix => "fortanix", - Vendor::Pc => "pc", - Vendor::Rumprun => "rumprun", - Vendor::Sun => "sun", - }; - f.write_str(s) - } -} - -impl FromStr for Vendor { - type Err = (); - - fn from_str(s: &str) -> Result { - Ok(match s { - "unknown" => Vendor::Unknown, - "apple" => Vendor::Apple, - "experimental" => Vendor::Experimental, - "fortanix" => Vendor::Fortanix, - "pc" => Vendor::Pc, - "rumprun" => Vendor::Rumprun, - "sun" => Vendor::Sun, - _ => return Err(()), - }) - } -} - -impl fmt::Display for OperatingSystem { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - let s = match *self { - OperatingSystem::Unknown => "unknown", - OperatingSystem::Bitrig => "bitrig", - OperatingSystem::Cloudabi => "cloudabi", - OperatingSystem::Darwin => "darwin", - OperatingSystem::Dragonfly => "dragonfly", - OperatingSystem::Emscripten => "emscripten", - OperatingSystem::Freebsd => "freebsd", - OperatingSystem::Fuchsia => "fuchsia", - OperatingSystem::Haiku => "haiku", - OperatingSystem::Hermit => "hermit", - OperatingSystem::Ios => "ios", - OperatingSystem::L4re => "l4re", - OperatingSystem::Linux => "linux", - OperatingSystem::Nebulet => "nebulet", - OperatingSystem::Netbsd => "netbsd", - OperatingSystem::None_ => "none", - OperatingSystem::Openbsd => "openbsd", - OperatingSystem::Redox => "redox", - OperatingSystem::Solaris => "solaris", - OperatingSystem::Uefi => "uefi", - OperatingSystem::Windows => "windows", - }; - f.write_str(s) - } -} - -impl FromStr for OperatingSystem { - type Err = (); - - fn from_str(s: &str) -> Result { - Ok(match s { - "unknown" => OperatingSystem::Unknown, - "bitrig" => OperatingSystem::Bitrig, - "cloudabi" => OperatingSystem::Cloudabi, - "darwin" => OperatingSystem::Darwin, - "dragonfly" => OperatingSystem::Dragonfly, - "emscripten" => OperatingSystem::Emscripten, - "freebsd" => OperatingSystem::Freebsd, - "fuchsia" => OperatingSystem::Fuchsia, - "haiku" => OperatingSystem::Haiku, - "hermit" => OperatingSystem::Hermit, - "ios" => OperatingSystem::Ios, - "l4re" => OperatingSystem::L4re, - "linux" => OperatingSystem::Linux, - "nebulet" => OperatingSystem::Nebulet, - "netbsd" => OperatingSystem::Netbsd, - "none" => OperatingSystem::None_, - "openbsd" => OperatingSystem::Openbsd, - "redox" => OperatingSystem::Redox, - "solaris" => OperatingSystem::Solaris, - "uefi" => OperatingSystem::Uefi, - "windows" => OperatingSystem::Windows, - _ => return Err(()), - }) - } -} - -impl fmt::Display for Environment { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - let s = match *self { - Environment::Unknown => "unknown", - Environment::Android => "android", - Environment::Androideabi => "androideabi", - Environment::Eabi => "eabi", - Environment::Eabihf => "eabihf", - Environment::Gnu => "gnu", - Environment::Gnuabi64 => "gnuabi64", - Environment::Gnueabi => "gnueabi", - Environment::Gnueabihf => "gnueabihf", - Environment::Gnuspe => "gnuspe", - Environment::Gnux32 => "gnux32", - Environment::Musl => "musl", - Environment::Musleabi => "musleabi", - Environment::Musleabihf => "musleabihf", - Environment::Msvc => "msvc", - Environment::Uclibc => "uclibc", - Environment::Sgx => "sgx", - }; - f.write_str(s) - } -} - -impl FromStr for Environment { - type Err = (); - - fn from_str(s: &str) -> Result { - Ok(match s { - "unknown" => Environment::Unknown, - "android" => Environment::Android, - "androideabi" => Environment::Androideabi, - "eabi" => Environment::Eabi, - "eabihf" => Environment::Eabihf, - "gnu" => Environment::Gnu, - "gnuabi64" => Environment::Gnuabi64, - "gnueabi" => Environment::Gnueabi, - "gnueabihf" => Environment::Gnueabihf, - "gnuspe" => Environment::Gnuspe, - "gnux32" => Environment::Gnux32, - "musl" => Environment::Musl, - "musleabi" => Environment::Musleabi, - "musleabihf" => Environment::Musleabihf, - "msvc" => Environment::Msvc, - "uclibc" => Environment::Uclibc, - "sgx" => Environment::Sgx, - _ => return Err(()), - }) - } -} - -impl fmt::Display for BinaryFormat { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - let s = match *self { - BinaryFormat::Unknown => "unknown", - BinaryFormat::Elf => "elf", - BinaryFormat::Coff => "coff", - BinaryFormat::Macho => "macho", - BinaryFormat::Wasm => "wasm", - }; - f.write_str(s) - } -} - -impl FromStr for BinaryFormat { - type Err = (); - - fn from_str(s: &str) -> Result { - Ok(match s { - "unknown" => BinaryFormat::Unknown, - "elf" => BinaryFormat::Elf, - "coff" => BinaryFormat::Coff, - "macho" => BinaryFormat::Macho, - "wasm" => BinaryFormat::Wasm, - _ => return Err(()), - }) - } -} - -#[cfg(test)] -mod tests { - use super::*; - use std::string::ToString; - - #[test] - fn rust_targets() { - // At the time of writing this, these are all the targets emitted by - // "rustup target list" and "rustc --print target-list". - let targets = [ - "aarch64-apple-ios", - "aarch64-fuchsia", - "aarch64-linux-android", - "aarch64-pc-windows-msvc", - "aarch64-unknown-cloudabi", - "aarch64-unknown-freebsd", - "aarch64-unknown-hermit", - "aarch64-unknown-linux-gnu", - "aarch64-unknown-linux-musl", - "aarch64-unknown-netbsd", - "aarch64-unknown-none", - "aarch64-unknown-openbsd", - "armebv7r-none-eabi", - "armebv7r-none-eabihf", - "arm-linux-androideabi", - "arm-unknown-linux-gnueabi", - "arm-unknown-linux-gnueabihf", - "arm-unknown-linux-musleabi", - "arm-unknown-linux-musleabihf", - "armv4t-unknown-linux-gnueabi", - "armv5te-unknown-linux-gnueabi", - "armv5te-unknown-linux-musleabi", - "armv6-unknown-netbsd-eabihf", - "armv7-apple-ios", - "armv7-linux-androideabi", - "armv7r-none-eabi", - "armv7r-none-eabihf", - "armv7s-apple-ios", - "armv7-unknown-cloudabi-eabihf", - "armv7-unknown-linux-gnueabihf", - "armv7-unknown-linux-musleabihf", - "armv7-unknown-netbsd-eabihf", - "asmjs-unknown-emscripten", - "i386-apple-ios", - "i586-pc-windows-msvc", - "i586-unknown-linux-gnu", - "i586-unknown-linux-musl", - "i686-apple-darwin", - "i686-linux-android", - "i686-pc-windows-gnu", - "i686-pc-windows-msvc", - "i686-unknown-cloudabi", - "i686-unknown-dragonfly", - "i686-unknown-freebsd", - "i686-unknown-haiku", - "i686-unknown-linux-gnu", - "i686-unknown-linux-musl", - "i686-unknown-netbsd", - "i686-unknown-openbsd", - "mips64el-unknown-linux-gnuabi64", - "mips64-unknown-linux-gnuabi64", - "mipsel-unknown-linux-gnu", - "mipsel-unknown-linux-musl", - "mipsel-unknown-linux-uclibc", - "mips-unknown-linux-gnu", - "mips-unknown-linux-musl", - "mips-unknown-linux-uclibc", - "msp430-none-elf", - "powerpc64le-unknown-linux-gnu", - "powerpc64le-unknown-linux-musl", - "powerpc64-unknown-linux-gnu", - "powerpc64-unknown-linux-musl", - "powerpc-unknown-linux-gnu", - "powerpc-unknown-linux-gnuspe", - "powerpc-unknown-linux-musl", - "powerpc-unknown-netbsd", - "riscv32imac-unknown-none-elf", - "riscv32imc-unknown-none-elf", - "s390x-unknown-linux-gnu", - "sparc64-unknown-linux-gnu", - "sparc64-unknown-netbsd", - "sparc-unknown-linux-gnu", - "sparcv9-sun-solaris", - "thumbv6m-none-eabi", - "thumbv7a-pc-windows-msvc", - "thumbv7em-none-eabi", - "thumbv7em-none-eabihf", - "thumbv7m-none-eabi", - "thumbv7neon-linux-androideabi", - "thumbv7neon-unknown-linux-gnueabihf", - "thumbv8m.base-none-eabi", - "thumbv8m.main-none-eabi", - "thumbv8m.main-none-eabihf", - "wasm32-experimental-emscripten", - "wasm32-unknown-emscripten", - "wasm32-unknown-unknown", - "x86_64-apple-darwin", - "x86_64-apple-ios", - "x86_64-fortanix-unknown-sgx", - "x86_64-fuchsia", - "x86_64-linux-android", - "x86_64-pc-windows-gnu", - "x86_64-pc-windows-msvc", - "x86_64-rumprun-netbsd", - "x86_64-sun-solaris", - "x86_64-unknown-bitrig", - "x86_64-unknown-cloudabi", - "x86_64-unknown-dragonfly", - "x86_64-unknown-freebsd", - "x86_64-unknown-haiku", - "x86_64-unknown-hermit", - "x86_64-unknown-l4re-uclibc", - "x86_64-unknown-linux-gnu", - "x86_64-unknown-linux-gnux32", - "x86_64-unknown-linux-musl", - "x86_64-unknown-netbsd", - "x86_64-unknown-openbsd", - "x86_64-unknown-redox", - "x86_64-unknown-uefi", - ]; - - for target in targets.iter() { - let t = Triple::from_str(target).expect("can't parse target"); - assert_ne!(t.architecture, Architecture::Unknown); - assert_eq!(t.to_string(), *target); - } - } -} diff --git a/third_party/rust/target-lexicon-0.4.0/src/triple.rs b/third_party/rust/target-lexicon-0.4.0/src/triple.rs deleted file mode 100644 index c998891a90..0000000000 --- a/third_party/rust/target-lexicon-0.4.0/src/triple.rs +++ /dev/null @@ -1,342 +0,0 @@ -// This file defines the `Triple` type and support code shared by all targets. - -use crate::parse_error::ParseError; -use crate::targets::{ - default_binary_format, Architecture, BinaryFormat, Environment, OperatingSystem, Vendor, -}; -use core::fmt; -use core::str::FromStr; -use std::borrow::ToOwned; - -/// The target memory endianness. -#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)] -#[allow(missing_docs)] -pub enum Endianness { - Little, - Big, -} - -/// The width of a pointer (in the default address space). -#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)] -#[allow(missing_docs)] -pub enum PointerWidth { - U16, - U32, - U64, -} - -impl PointerWidth { - /// Return the number of bits in a pointer. - pub fn bits(self) -> u8 { - match self { - PointerWidth::U16 => 16, - PointerWidth::U32 => 32, - PointerWidth::U64 => 64, - } - } - - /// Return the number of bytes in a pointer. - /// - /// For these purposes, there are 8 bits in a byte. - pub fn bytes(self) -> u8 { - match self { - PointerWidth::U16 => 2, - PointerWidth::U32 => 4, - PointerWidth::U64 => 8, - } - } -} - -/// The calling convention, which specifies things like which registers are -/// used for passing arguments, which registers are callee-saved, and so on. -#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)] -#[allow(missing_docs)] -pub enum CallingConvention { - SystemV, - WindowsFastcall, -} - -/// A target "triple", because historically such things had three fields, though -/// they've grown more features over time. -#[derive(Clone, Debug, PartialEq, Eq, Hash)] -pub struct Triple { - /// The "architecture" (and sometimes the subarchitecture). - pub architecture: Architecture, - /// The "vendor" (whatever that means). - pub vendor: Vendor, - /// The "operating system" (sometimes also the environment). - pub operating_system: OperatingSystem, - /// The "environment" on top of the operating system. - pub environment: Environment, - /// The "binary format" (rarely used). - pub binary_format: BinaryFormat, -} - -impl Triple { - /// Return the endianness of this target's architecture. - pub fn endianness(&self) -> Result { - self.architecture.endianness() - } - - /// Return the pointer width of this target's architecture. - pub fn pointer_width(&self) -> Result { - self.architecture.pointer_width() - } - - /// Return the default calling convention for the given target triple. - pub fn default_calling_convention(&self) -> Result { - Ok(match self.operating_system { - OperatingSystem::Bitrig - | OperatingSystem::Cloudabi - | OperatingSystem::Darwin - | OperatingSystem::Dragonfly - | OperatingSystem::Freebsd - | OperatingSystem::Fuchsia - | OperatingSystem::Haiku - | OperatingSystem::Ios - | OperatingSystem::L4re - | OperatingSystem::Linux - | OperatingSystem::Nebulet - | OperatingSystem::Netbsd - | OperatingSystem::Openbsd - | OperatingSystem::Redox - | OperatingSystem::Solaris => CallingConvention::SystemV, - OperatingSystem::Windows => CallingConvention::WindowsFastcall, - _ => return Err(()), - }) - } -} - -impl Default for Triple { - fn default() -> Self { - Self { - architecture: Architecture::Unknown, - vendor: Vendor::Unknown, - operating_system: OperatingSystem::Unknown, - environment: Environment::Unknown, - binary_format: BinaryFormat::Unknown, - } - } -} - -impl Default for Architecture { - fn default() -> Self { - Architecture::Unknown - } -} - -impl Default for Vendor { - fn default() -> Self { - Vendor::Unknown - } -} - -impl Default for OperatingSystem { - fn default() -> Self { - OperatingSystem::Unknown - } -} - -impl Default for Environment { - fn default() -> Self { - Environment::Unknown - } -} - -impl Default for BinaryFormat { - fn default() -> Self { - BinaryFormat::Unknown - } -} - -impl fmt::Display for Triple { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - let implied_binary_format = default_binary_format(&self); - - write!(f, "{}", self.architecture)?; - if self.vendor == Vendor::Unknown - && ((self.operating_system == OperatingSystem::Linux - && (self.environment == Environment::Android - || self.environment == Environment::Androideabi)) - || self.operating_system == OperatingSystem::Fuchsia - || (self.operating_system == OperatingSystem::None_ - && (self.architecture == Architecture::Armebv7r - || self.architecture == Architecture::Armv7r - || self.architecture == Architecture::Thumbv6m - || self.architecture == Architecture::Thumbv7em - || self.architecture == Architecture::Thumbv7m - || self.architecture == Architecture::Thumbv8mBase - || self.architecture == Architecture::Thumbv8mMain - || self.architecture == Architecture::Msp430))) - { - // As a special case, omit the vendor for Android, Fuchsia, and sometimes - // None_, depending on the hardware architecture. This logic is entirely - // ad-hoc, and is just sufficient to handle the current set of recognized - // triples. - write!(f, "-{}", self.operating_system)?; - } else { - write!(f, "-{}-{}", self.vendor, self.operating_system)?; - } - if self.environment != Environment::Unknown { - write!(f, "-{}", self.environment)?; - } - - if self.binary_format != implied_binary_format { - write!(f, "-{}", self.binary_format)?; - } - Ok(()) - } -} - -impl FromStr for Triple { - type Err = ParseError; - - fn from_str(s: &str) -> Result { - let mut parts = s.split('-'); - let mut result = Self::default(); - let mut current_part; - - current_part = parts.next(); - if let Some(s) = current_part { - if let Ok(architecture) = Architecture::from_str(s) { - result.architecture = architecture; - current_part = parts.next(); - } else { - // Insist that the triple start with a valid architecture. - return Err(ParseError::UnrecognizedArchitecture(s.to_owned())); - } - } - - let mut has_vendor = false; - let mut has_operating_system = false; - if let Some(s) = current_part { - if let Ok(vendor) = Vendor::from_str(s) { - has_vendor = true; - result.vendor = vendor; - current_part = parts.next(); - } - } - - if !has_operating_system { - if let Some(s) = current_part { - if let Ok(operating_system) = OperatingSystem::from_str(s) { - has_operating_system = true; - result.operating_system = operating_system; - current_part = parts.next(); - } - } - } - - let mut has_environment = false; - if let Some(s) = current_part { - if let Ok(environment) = Environment::from_str(s) { - has_environment = true; - result.environment = environment; - current_part = parts.next(); - } - } - - let mut has_binary_format = false; - if let Some(s) = current_part { - if let Ok(binary_format) = BinaryFormat::from_str(s) { - has_binary_format = true; - result.binary_format = binary_format; - current_part = parts.next(); - } - } - - // The binary format is frequently omitted; if that's the case here, - // infer it from the other fields. - if !has_binary_format { - result.binary_format = default_binary_format(&result); - } - - if let Some(s) = current_part { - Err(if !has_vendor { - ParseError::UnrecognizedVendor(s.to_owned()) - } else if !has_operating_system { - ParseError::UnrecognizedOperatingSystem(s.to_owned()) - } else if !has_environment { - ParseError::UnrecognizedEnvironment(s.to_owned()) - } else if !has_binary_format { - ParseError::UnrecognizedBinaryFormat(s.to_owned()) - } else { - ParseError::UnrecognizedField(s.to_owned()) - }) - } else { - Ok(result) - } - } -} - -/// A convenient syntax for triple "literals". -/// -/// This currently expands to code that just calls `Triple::from_str` and does -/// an `expect`, though in the future it would be cool to use procedural macros -/// or so to report errors at compile time instead. -#[macro_export] -macro_rules! triple { - ($str:tt) => { - target_lexicon::Triple::from_str($str).expect("invalid triple literal") - }; -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn parse_errors() { - assert_eq!( - Triple::from_str(""), - Err(ParseError::UnrecognizedArchitecture("".to_owned())) - ); - assert_eq!( - Triple::from_str("foo"), - Err(ParseError::UnrecognizedArchitecture("foo".to_owned())) - ); - assert_eq!( - Triple::from_str("unknown-foo"), - Err(ParseError::UnrecognizedVendor("foo".to_owned())) - ); - assert_eq!( - Triple::from_str("unknown-unknown-foo"), - Err(ParseError::UnrecognizedOperatingSystem("foo".to_owned())) - ); - assert_eq!( - Triple::from_str("unknown-unknown-unknown-foo"), - Err(ParseError::UnrecognizedEnvironment("foo".to_owned())) - ); - assert_eq!( - Triple::from_str("unknown-unknown-unknown-unknown-foo"), - Err(ParseError::UnrecognizedBinaryFormat("foo".to_owned())) - ); - assert_eq!( - Triple::from_str("unknown-unknown-unknown-unknown-unknown-foo"), - Err(ParseError::UnrecognizedField("foo".to_owned())) - ); - } - - #[test] - fn defaults() { - assert_eq!( - Triple::from_str("unknown-unknown-unknown"), - Ok(Triple::default()) - ); - assert_eq!( - Triple::from_str("unknown-unknown-unknown-unknown"), - Ok(Triple::default()) - ); - assert_eq!( - Triple::from_str("unknown-unknown-unknown-unknown-unknown"), - Ok(Triple::default()) - ); - } - - #[test] - fn unknown_properties() { - assert_eq!(Triple::default().endianness(), Err(())); - assert_eq!(Triple::default().pointer_width(), Err(())); - assert_eq!(Triple::default().default_calling_convention(), Err(())); - } -} diff --git a/third_party/rust/target-lexicon/.cargo-checksum.json b/third_party/rust/target-lexicon/.cargo-checksum.json index fb4b8b4b2d..5551ea9293 100644 --- a/third_party/rust/target-lexicon/.cargo-checksum.json +++ b/third_party/rust/target-lexicon/.cargo-checksum.json @@ -1 +1 @@ -{"files":{"Cargo.lock":"75d1f84b3d9e98d87e2f74984f7d032f5b28d7610cccf5e13977367189a77acd","Cargo.toml":"fe18dc2e0c86e6b28ed0244ced50e26f481087c693cb7cc1ff3062929894b0d4","LICENSE":"268872b9816f90fd8e85db5a28d33f8150ebb8dd016653fb39ef1f94f2686bc5","README.md":"c3467056d91be3f59562158ee9604c729b5b5f473efbefb036032803eb76809e","build.rs":"a9f00c32de64b949c3bb23442304fc7943154efcc831aa0d87c9b83247e4e28a","examples/host.rs":"503bafddfb372123fe4dc0e7b8037808beb5bfe6df60c00d3315922bd3792c6c","examples/misc.rs":"49a579845450b7b020ed5c97dca142fc548725893cbc82f6f750ee0caab2beca","host.rs":"4ef91a2c26405151454c2695dc0a7f455374207900ea6814d4eafdfef5f7b630","newlist":"89564342916321c5bc35e772d374a7f0af22cc9ae6dcc0027eca48d2269f18cb","sorted.txt":"5548c14054ea61b51e2d8a495da662546523c3c13e8f742f6dd57754e11f46b5","src/host.rs":"fb543df4f362e9119a58523563e453110f4e3a426f0995911d0ca386657cf1d9","src/lib.rs":"5ad3a9418a6cb52cacc0a662645ccc671c326df954671b5ec0db667653dd125a","src/parse_error.rs":"f6689a741589ca8e659b1639728696f987c9da4948701f3b7ab6dc3e35754dab","src/targets.rs":"81320bd0280c96846a328689afe22be283cb7df9c37f005ff693ee1834b345a8","src/triple.rs":"ae2895bb2ee8451b90aa6e92d7fbf24c021230416fef030fb6ad0ef051c786c3","test.sh":"40761ee2ab0b361bdce4dc17708e671f32661f62cb56a45724d60510f9498b74"},"package":"7975cb2c6f37d77b190bc5004a2bb015971464756fde9514651a525ada2a741a"} \ No newline at end of file +{"files":{"Cargo.lock":"a1a162e6ce8fc2234a6ddf7090410006a1920ace8738772e32a5b50e4780c19d","Cargo.toml":"f3b545fa0f184fd0d3624e6e5c205fcbdf1ad0934a2e08406549ad53c2a62ac3","LICENSE":"268872b9816f90fd8e85db5a28d33f8150ebb8dd016653fb39ef1f94f2686bc5","README.md":"c3467056d91be3f59562158ee9604c729b5b5f473efbefb036032803eb76809e","build.rs":"723100e9cdc30cd8c48407233c2cffa10f5b10703a0a11bac1230d8b86e49ccf","examples/host.rs":"503bafddfb372123fe4dc0e7b8037808beb5bfe6df60c00d3315922bd3792c6c","examples/misc.rs":"49a579845450b7b020ed5c97dca142fc548725893cbc82f6f750ee0caab2beca","newlist":"89564342916321c5bc35e772d374a7f0af22cc9ae6dcc0027eca48d2269f18cb","src/host.rs":"fb543df4f362e9119a58523563e453110f4e3a426f0995911d0ca386657cf1d9","src/lib.rs":"4414353c30f25d44df6cc14f7f9eea9991222289c6aa662b74406f6923235970","src/parse_error.rs":"b3735eabc0fd0a9dfdd6375662f20ec96a79852a00a05a98fb2e421545285e53","src/targets.rs":"9ccc0849cff06d8906dacbdc15136cc47fab85ccd795033ddfdde1397dfcfe32","src/triple.rs":"949bd83b043b53b18f643ebc3fbebbfe02a13998b787fda432a5d36aa27d20bd","test.sh":"22e3c630a6c84e90d5c70c367a6712be8eeca1e7682c00d1f65bf53e330e9191"},"package":"6f4c118a7a38378f305a9e111fcb2f7f838c0be324bfb31a77ea04f7f6e684b4"} \ No newline at end of file diff --git a/third_party/rust/target-lexicon/Cargo.lock b/third_party/rust/target-lexicon/Cargo.lock index ce652ab013..a882b1f2ce 100644 --- a/third_party/rust/target-lexicon/Cargo.lock +++ b/third_party/rust/target-lexicon/Cargo.lock @@ -1,109 +1,6 @@ # This file is automatically @generated by Cargo. # It is not intended for manual editing. -[[package]] -name = "failure" -version = "0.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "failure_derive 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "failure_derive" -version = "0.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "proc-macro2 0.4.30 (registry+https://github.com/rust-lang/crates.io-index)", - "quote 0.6.13 (registry+https://github.com/rust-lang/crates.io-index)", - "syn 0.15.44 (registry+https://github.com/rust-lang/crates.io-index)", - "synstructure 0.10.2 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "itoa" -version = "0.4.4" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "proc-macro2" -version = "0.4.30" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "unicode-xid 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "quote" -version = "0.6.13" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "proc-macro2 0.4.30 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "ryu" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "serde" -version = "1.0.99" -source = "registry+https://github.com/rust-lang/crates.io-index" - -[[package]] -name = "serde_json" -version = "1.0.40" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "itoa 0.4.4 (registry+https://github.com/rust-lang/crates.io-index)", - "ryu 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", - "serde 1.0.99 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "syn" -version = "0.15.44" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "proc-macro2 0.4.30 (registry+https://github.com/rust-lang/crates.io-index)", - "quote 0.6.13 (registry+https://github.com/rust-lang/crates.io-index)", - "unicode-xid 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "synstructure" -version = "0.10.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "proc-macro2 0.4.30 (registry+https://github.com/rust-lang/crates.io-index)", - "quote 0.6.13 (registry+https://github.com/rust-lang/crates.io-index)", - "syn 0.15.44 (registry+https://github.com/rust-lang/crates.io-index)", - "unicode-xid 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", -] - [[package]] name = "target-lexicon" -version = "0.8.1" -dependencies = [ - "failure 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)", - "failure_derive 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)", - "serde_json 1.0.40 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "unicode-xid" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" +version = "0.9.0" -[metadata] -"checksum failure 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)" = "795bd83d3abeb9220f257e597aa0080a508b27533824adf336529648f6abf7e2" -"checksum failure_derive 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)" = "ea1063915fd7ef4309e222a5a07cf9c319fb9c7836b1f89b85458672dbb127e1" -"checksum itoa 0.4.4 (registry+https://github.com/rust-lang/crates.io-index)" = "501266b7edd0174f8530248f87f99c88fbe60ca4ef3dd486835b8d8d53136f7f" -"checksum proc-macro2 0.4.30 (registry+https://github.com/rust-lang/crates.io-index)" = "cf3d2011ab5c909338f7887f4fc896d35932e29146c12c8d01da6b22a80ba759" -"checksum quote 0.6.13 (registry+https://github.com/rust-lang/crates.io-index)" = "6ce23b6b870e8f94f81fb0a363d65d86675884b34a09043c81e5562f11c1f8e1" -"checksum ryu 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "c92464b447c0ee8c4fb3824ecc8383b81717b9f1e74ba2e72540aef7b9f82997" -"checksum serde 1.0.99 (registry+https://github.com/rust-lang/crates.io-index)" = "fec2851eb56d010dc9a21b89ca53ee75e6528bab60c11e89d38390904982da9f" -"checksum serde_json 1.0.40 (registry+https://github.com/rust-lang/crates.io-index)" = "051c49229f282f7c6f3813f8286cc1e3323e8051823fce42c7ea80fe13521704" -"checksum syn 0.15.44 (registry+https://github.com/rust-lang/crates.io-index)" = "9ca4b3b69a77cbe1ffc9e198781b7acb0c7365a883670e8f1c1bc66fba79a5c5" -"checksum synstructure 0.10.2 (registry+https://github.com/rust-lang/crates.io-index)" = "02353edf96d6e4dc81aea2d8490a7e9db177bf8acb0e951c24940bf866cb313f" -"checksum unicode-xid 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "fc72304796d0818e357ead4e000d19c9c174ab23dc11093ac919054d20a6a7fc" diff --git a/third_party/rust/target-lexicon/Cargo.toml b/third_party/rust/target-lexicon/Cargo.toml index 26f7cbd9dc..a00824bfb1 100644 --- a/third_party/rust/target-lexicon/Cargo.toml +++ b/third_party/rust/target-lexicon/Cargo.toml @@ -13,7 +13,7 @@ [package] edition = "2018" name = "target-lexicon" -version = "0.8.1" +version = "0.9.0" authors = ["Dan Gohman "] description = "Targeting utilities for compilers and related tools" documentation = "https://docs.rs/target-lexicon/" @@ -22,16 +22,10 @@ keywords = ["target", "host", "triple", "compiler", "jit"] categories = ["no-std"] license = "Apache-2.0 WITH LLVM-exception" repository = "https://github.com/CraneStation/target-lexicon" -[dependencies.failure] -version = "0.1.3" -features = ["derive"] -default-features = false -[dependencies.failure_derive] -version = "0.1.3" -default-features = false -[build-dependencies.serde_json] -version = "1.0" +[features] +default = [] +std = [] [badges.maintenance] status = "passively-maintained" diff --git a/third_party/rust/target-lexicon/build.rs b/third_party/rust/target-lexicon/build.rs index 51ef295639..a0ba3b7304 100644 --- a/third_party/rust/target-lexicon/build.rs +++ b/third_party/rust/target-lexicon/build.rs @@ -3,17 +3,13 @@ // Allow dead code in triple.rs and targets.rs for our purposes here. #![allow(dead_code)] -use serde_json::Value; use std::env; -use std::ffi::OsString; use std::fs::File; -use std::io::prelude::*; use std::io::{self, Write}; -use std::path::{Path, PathBuf}; +use std::path::PathBuf; use std::str::FromStr; extern crate alloc; -extern crate serde_json; // Include triple.rs and targets.rs so we can parse the TARGET environment variable. mod triple { @@ -33,89 +29,16 @@ mod parse_error { UnrecognizedEnvironment(String), UnrecognizedBinaryFormat(String), UnrecognizedField(String), - NoneWithoutBinaryFormat, } } -use self::triple::{Endianness, PointerWidth, Triple}; - -/// Assuming `target` is a path to a custom target json config file, open it -/// and build a `Triple` using its contents. -fn read_target_from_file(path: &Path) -> Triple { - let mut file = File::open(path).expect("error opening target file"); - let mut json = String::new(); - file.read_to_string(&mut json) - .expect("error reading target file"); - - let v: Value = serde_json::from_str(&json).expect("error parsing target file as json"); - let target = v["llvm-target"] - .as_str() - .expect("error parsing \"llvm-target\" as a string"); - let triple = Triple::from_str(target).expect("error parsing host target"); - - // Check that the JSON describes a known target configuration. - // - // Unfortunately, none of Rust's "arch", "os", "env", nor "vendor" - // fields directly correspond to triple fields, so we can't easily - // check them. - if let Some(endian) = v["target-endian"].as_str() { - assert_eq!( - endian, - match triple.endianness().unwrap() { - Endianness::Little => "little", - Endianness::Big => "big", - }, - "\"target-endian\" field disagrees with the target triple" - ); - } - if let Some(pointer_width) = v["target-pointer-width"].as_str() { - assert_eq!( - pointer_width, - match triple.pointer_width().unwrap() { - PointerWidth::U16 => "16", - PointerWidth::U32 => "32", - PointerWidth::U64 => "64", - }, - "\"target-pointer-width\" field disagrees with the target triple" - ); - } - - triple -} - -/// Assuming `target` is a target identifier, search for an appropriate custom -/// target json config file in the way that rustc does, and then call -/// `read_target_from_file` on that. -fn read_target_from_file_in_path(target: &str) -> Triple { - let mut target_filename = target.to_owned(); - target_filename.push_str(".json"); - let target_basename = PathBuf::from(target_filename); - let target_path = env::var_os("RUST_TARGET_PATH").unwrap_or_else(|| OsString::new()); - for dir in env::split_paths(&target_path) { - let p = dir.join(&target_basename); - if p.is_file() { - return read_target_from_file(&p); - } - } - panic!("can't find custom target {}", target); -} +use self::triple::Triple; fn main() { let out_dir = PathBuf::from(env::var("OUT_DIR").expect("The OUT_DIR environment variable must be set")); - let target = env::var("TARGET").expect("The TARGET environment variable must be set"); - - // The following intends to match the logic in rustc. - let triple = if target.ends_with(".json") { - read_target_from_file(Path::new(&target)) - } else { - match Triple::from_str(&target) { - Ok(triple) => triple, - Err(_) => read_target_from_file_in_path(&target), - } - }; - + let triple = Triple::from_str(&target).expect(&format!("Invalid target name: '{}'", target)); let out = File::create(out_dir.join("host.rs")).expect("error creating host.rs"); write_host_rs(out, triple).expect("error writing host.rs"); } diff --git a/third_party/rust/target-lexicon/src/lib.rs b/third_party/rust/target-lexicon/src/lib.rs index e08f69f9a2..8d6da8dcfc 100644 --- a/third_party/rust/target-lexicon/src/lib.rs +++ b/third_party/rust/target-lexicon/src/lib.rs @@ -15,13 +15,10 @@ clippy::use_self ) )] -#![no_std] +#![cfg_attr(not(feature = "std"), no_std)] extern crate alloc; -#[macro_use] -extern crate failure_derive; - mod host; mod parse_error; mod targets; diff --git a/third_party/rust/target-lexicon/src/parse_error.rs b/third_party/rust/target-lexicon/src/parse_error.rs index 32a5ee2464..03ca4aedb9 100644 --- a/third_party/rust/target-lexicon/src/parse_error.rs +++ b/third_party/rust/target-lexicon/src/parse_error.rs @@ -1,19 +1,34 @@ use alloc::string::String; +use core::fmt; + /// An error returned from parsing a triple. -#[derive(Fail, Clone, Debug, PartialEq, Eq)] +#[derive(Clone, Debug, PartialEq, Eq)] #[allow(missing_docs)] pub enum ParseError { - #[fail(display = "Unrecognized architecture: {}", _0)] UnrecognizedArchitecture(String), - #[fail(display = "Unrecognized vendor: {}", _0)] UnrecognizedVendor(String), - #[fail(display = "Unrecognized operating system: {}", _0)] UnrecognizedOperatingSystem(String), - #[fail(display = "Unrecognized environment: {}", _0)] UnrecognizedEnvironment(String), - #[fail(display = "Unrecognized binary format: {}", _0)] UnrecognizedBinaryFormat(String), - #[fail(display = "Unrecognized field: {}", _0)] UnrecognizedField(String), } + +impl fmt::Display for ParseError { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + use ParseError::*; + match self { + UnrecognizedArchitecture(msg) => write!(fmt, "Unrecognized architecture: {}", msg), + UnrecognizedVendor(msg) => write!(fmt, "Unrecognized vendor: {}", msg), + UnrecognizedOperatingSystem(msg) => { + write!(fmt, "Unrecognized operating system: {}", msg) + } + UnrecognizedEnvironment(msg) => write!(fmt, "Unrecognized environment: {}", msg), + UnrecognizedBinaryFormat(msg) => write!(fmt, "Unrecognized binary format: {}", msg), + UnrecognizedField(msg) => write!(fmt, "Unrecognized field: {}", msg), + } + } +} + +#[cfg(feature = "std")] +impl std::error::Error for ParseError {} diff --git a/third_party/rust/target-lexicon/src/targets.rs b/third_party/rust/target-lexicon/src/targets.rs index 0d1cc06881..6ae570ebe8 100644 --- a/third_party/rust/target-lexicon/src/targets.rs +++ b/third_party/rust/target-lexicon/src/targets.rs @@ -364,8 +364,10 @@ pub enum Environment { Musleabihf, Muslabi64, Msvc, + Kernel, Uclibc, Sgx, + Softfloat, Spe, } @@ -867,8 +869,10 @@ impl fmt::Display for Environment { Environment::Musleabihf => "musleabihf", Environment::Muslabi64 => "muslabi64", Environment::Msvc => "msvc", + Environment::Kernel => "kernel", Environment::Uclibc => "uclibc", Environment::Sgx => "sgx", + Environment::Softfloat => "softfloat", Environment::Spe => "spe", }; f.write_str(s) @@ -897,8 +901,10 @@ impl FromStr for Environment { "musleabihf" => Environment::Musleabihf, "muslabi64" => Environment::Muslabi64, "msvc" => Environment::Msvc, + "kernel" => Environment::Kernel, "uclibc" => Environment::Uclibc, "sgx" => Environment::Sgx, + "softfloat" => Environment::Softfloat, "spe" => Environment::Spe, _ => return Err(()), }) @@ -956,6 +962,7 @@ mod tests { "aarch64-unknown-linux-musl", "aarch64-unknown-netbsd", "aarch64-unknown-none", + "aarch64-unknown-none-softfloat", "aarch64-unknown-openbsd", "aarch64-unknown-redox", "aarch64-uwp-windows-msvc", @@ -1006,6 +1013,7 @@ mod tests { "i686-unknown-linux-musl", "i686-unknown-netbsd", "i686-unknown-openbsd", + "i686-unknown-uefi", "i686-uwp-windows-gnu", "i686-uwp-windows-msvc", "i686-wrs-vxworks", @@ -1067,6 +1075,7 @@ mod tests { "x86_64-fortanix-unknown-sgx", "x86_64-fuchsia", "x86_64-linux-android", + "x86_64-linux-kernel", "x86_64-apple-macosx10.7.0", "x86_64-pc-solaris", "x86_64-pc-windows-gnu", diff --git a/third_party/rust/target-lexicon/src/triple.rs b/third_party/rust/target-lexicon/src/triple.rs index c81335665d..36dcd9aa00 100644 --- a/third_party/rust/target-lexicon/src/triple.rs +++ b/third_party/rust/target-lexicon/src/triple.rs @@ -53,9 +53,20 @@ impl PointerWidth { #[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)] #[allow(missing_docs)] pub enum CallingConvention { + /// "System V", which is used on most Unix-like platfoms. Note that the + /// specific conventions vary between hardware architectures; for example, + /// x86-32's "System V" is entirely different from x86-64's "System V". SystemV, + + /// The WebAssembly C ABI. /// https://github.com/WebAssembly/tool-conventions/blob/master/BasicCABI.md WasmBasicCAbi, + + /// "Windows Fastcall", which is used on Windows. Note that like "System V", + /// this varies between hardware architectures. On x86-32 it describes what + /// Windows documentation calls "fastcall", and on x86-64 it describes what + /// Windows documentation often just calls the Windows x64 calling convention + /// (though the compiler still recognizes "fastcall" as an alias for it). WindowsFastcall, } @@ -168,7 +179,8 @@ impl fmt::Display for Triple { if self.vendor == Vendor::Unknown && ((self.operating_system == OperatingSystem::Linux && (self.environment == Environment::Android - || self.environment == Environment::Androideabi)) + || self.environment == Environment::Androideabi + || self.environment == Environment::Kernel)) || self.operating_system == OperatingSystem::Fuchsia || self.operating_system == OperatingSystem::Wasi || (self.operating_system == OperatingSystem::None_ @@ -179,7 +191,8 @@ impl fmt::Display for Triple { || self.architecture == Architecture::Arm(ArmArchitecture::Thumbv7m) || self.architecture == Architecture::Arm(ArmArchitecture::Thumbv8mBase) || self.architecture == Architecture::Arm(ArmArchitecture::Thumbv8mMain) - || self.architecture == Architecture::Msp430))) + || self.architecture == Architecture::Msp430 + || self.architecture == Architecture::X86_64))) { // As a special case, omit the vendor for Android, Fuchsia, Wasi, and sometimes // None_, depending on the hardware architecture. This logic is entirely @@ -264,17 +277,19 @@ impl FromStr for Triple { } if let Some(s) = current_part { - Err(if !has_vendor { - ParseError::UnrecognizedVendor(s.to_owned()) - } else if !has_operating_system { - ParseError::UnrecognizedOperatingSystem(s.to_owned()) - } else if !has_environment { - ParseError::UnrecognizedEnvironment(s.to_owned()) - } else if !has_binary_format { - ParseError::UnrecognizedBinaryFormat(s.to_owned()) - } else { - ParseError::UnrecognizedField(s.to_owned()) - }) + Err( + if !has_vendor && !has_operating_system && !has_environment && !has_binary_format { + ParseError::UnrecognizedVendor(s.to_owned()) + } else if !has_operating_system && !has_environment && !has_binary_format { + ParseError::UnrecognizedOperatingSystem(s.to_owned()) + } else if !has_environment && !has_binary_format { + ParseError::UnrecognizedEnvironment(s.to_owned()) + } else if !has_binary_format { + ParseError::UnrecognizedBinaryFormat(s.to_owned()) + } else { + ParseError::UnrecognizedField(s.to_owned()) + }, + ) } else { Ok(result) } diff --git a/third_party/rust/target-lexicon/test.sh b/third_party/rust/target-lexicon/test.sh index 9363dbfa77..dcff63a4e2 100644 --- a/third_party/rust/target-lexicon/test.sh +++ b/third_party/rust/target-lexicon/test.sh @@ -4,7 +4,7 @@ set -oeu pipefail for trip in wasm32-unknown-unknown wasm32-wasi arm-unknown-linux-gnueabi aarch64-unknown-linux-gnu; do echo TARGET $trip - cargo build --target $trip + cargo build --target $trip --all cp target/$trip/debug/build/target-lexicon-*/out/host.rs host.rs rustfmt host.rs diff -u target/$trip/debug/build/target-lexicon-*/out/host.rs host.rs diff --git a/third_party/rust/thread_local/.cargo-checksum.json b/third_party/rust/thread_local/.cargo-checksum.json index 8798696c8d..b84db3ed50 100644 --- a/third_party/rust/thread_local/.cargo-checksum.json +++ b/third_party/rust/thread_local/.cargo-checksum.json @@ -1 +1 @@ -{"files":{"Cargo.toml":"b4f81d513b95fff17f96d5da83768187870df83969fd5e49b030aaf6158ccd58","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"c9a75f18b9ab2927829a208fc6aa2cf4e63b8420887ba29cdb265d6619ae82d5","README.md":"00a29378d5aeb66b7a48b77bee0d463c8b408b8a9cb0abb7674edb10d142aca0","benches/thread_local.rs":"cc8bde81ed6206525feff209598caf1e01e89a83bf21d8b7ccc0dadc8b89d815","src/lib.rs":"d7f875b0c4a8e2229f3510c88c0425342f1628fdf2f2b113c80789b6b90378b7","src/thread_id.rs":"0962c130061939557aa272115e4420fbbc63b6bd306783a456a8ffcbf304a447","src/unreachable.rs":"830d44988f86f4fc6c3c4dd7e9e4e7d0f2cb9c5b024c360b5f7ceae365983367"},"package":"c6b53e329000edc2b34dbe8545fd20e55a333362d0a321909685a19bd28c3f1b"} \ No newline at end of file +{"files":{"Cargo.toml":"a08d3007cec7ad1a83afad57980965ece5457089404f6f5d41eacc8143386d69","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"c9a75f18b9ab2927829a208fc6aa2cf4e63b8420887ba29cdb265d6619ae82d5","README.md":"ab6f09e96c06e37ee3df492562a07c1c3548dd5abf73301f8215a5dcedcccc84","benches/thread_local.rs":"cc8bde81ed6206525feff209598caf1e01e89a83bf21d8b7ccc0dadc8b89d815","src/cached.rs":"089286aa7bcde7c92b1ee7381b74f8c30049c0d80a85c1babdbac69b2e210396","src/lib.rs":"a67d7bf8c7c3bd869ea297cf1d158db8c9c4bbf7ae1e23d9028cfc3a7554e235","src/thread_id.rs":"0962c130061939557aa272115e4420fbbc63b6bd306783a456a8ffcbf304a447","src/unreachable.rs":"830d44988f86f4fc6c3c4dd7e9e4e7d0f2cb9c5b024c360b5f7ceae365983367"},"package":"d40c6d1b69745a6ec6fb1ca717914848da4b44ae29d9b3080cbee91d72a69b14"} \ No newline at end of file diff --git a/third_party/rust/thread_local/Cargo.toml b/third_party/rust/thread_local/Cargo.toml index fc7b3c82dd..0ce59da27b 100644 --- a/third_party/rust/thread_local/Cargo.toml +++ b/third_party/rust/thread_local/Cargo.toml @@ -3,7 +3,7 @@ # When uploading crates to the registry Cargo will automatically # "normalize" Cargo.toml files for maximal compatibility # with all versions of Cargo and also rewrite `path` dependencies -# to registry (e.g. crates.io) dependencies +# to registry (e.g., crates.io) dependencies # # If you believe there's an error in this file please file an # issue against the rust-lang/cargo repository. If you're @@ -12,7 +12,7 @@ [package] name = "thread_local" -version = "0.3.6" +version = "1.0.1" authors = ["Amanieu d'Antras "] description = "Per-object thread-local storage" documentation = "https://amanieu.github.io/thread_local-rs/thread_local/index.html" diff --git a/third_party/rust/thread_local/README.md b/third_party/rust/thread_local/README.md index 1472ed1c61..d00355bb1a 100644 --- a/third_party/rust/thread_local/README.md +++ b/third_party/rust/thread_local/README.md @@ -16,7 +16,7 @@ Add this to your `Cargo.toml`: ```toml [dependencies] -thread_local = "0.3" +thread_local = "1.0" ``` and this to your crate root: diff --git a/third_party/rust/thread_local/src/cached.rs b/third_party/rust/thread_local/src/cached.rs new file mode 100644 index 0000000000..ab43c86da5 --- /dev/null +++ b/third_party/rust/thread_local/src/cached.rs @@ -0,0 +1,198 @@ +use super::{IntoIter, IterMut, ThreadLocal}; +use std::cell::UnsafeCell; +use std::fmt; +use std::panic::UnwindSafe; +use std::sync::atomic::{AtomicUsize, Ordering}; +use thread_id; +use unreachable::{UncheckedOptionExt, UncheckedResultExt}; + +/// Wrapper around `ThreadLocal` which adds a fast path for a single thread. +/// +/// This has the same API as `ThreadLocal`, but will register the first thread +/// that sets a value as its owner. All accesses by the owner will go through +/// a special fast path which is much faster than the normal `ThreadLocal` path. +pub struct CachedThreadLocal { + owner: AtomicUsize, + local: UnsafeCell>>, + global: ThreadLocal, +} + +// CachedThreadLocal is always Sync, even if T isn't +unsafe impl Sync for CachedThreadLocal {} + +impl Default for CachedThreadLocal { + fn default() -> CachedThreadLocal { + CachedThreadLocal::new() + } +} + +impl CachedThreadLocal { + /// Creates a new empty `CachedThreadLocal`. + pub fn new() -> CachedThreadLocal { + CachedThreadLocal { + owner: AtomicUsize::new(0), + local: UnsafeCell::new(None), + global: ThreadLocal::new(), + } + } + + /// Returns the element for the current thread, if it exists. + pub fn get(&self) -> Option<&T> { + let id = thread_id::get(); + let owner = self.owner.load(Ordering::Relaxed); + if owner == id { + return unsafe { Some((*self.local.get()).as_ref().unchecked_unwrap()) }; + } + if owner == 0 { + return None; + } + self.global.get_fast(id) + } + + /// Returns the element for the current thread, or creates it if it doesn't + /// exist. + #[inline(always)] + pub fn get_or(&self, create: F) -> &T + where + F: FnOnce() -> T, + { + unsafe { + self.get_or_try(|| Ok::(create())) + .unchecked_unwrap_ok() + } + } + + /// Returns the element for the current thread, or creates it if it doesn't + /// exist. If `create` fails, that error is returned and no element is + /// added. + pub fn get_or_try(&self, create: F) -> Result<&T, E> + where + F: FnOnce() -> Result, + { + let id = thread_id::get(); + let owner = self.owner.load(Ordering::Relaxed); + if owner == id { + return Ok(unsafe { (*self.local.get()).as_ref().unchecked_unwrap() }); + } + self.get_or_try_slow(id, owner, create) + } + + #[cold] + #[inline(never)] + fn get_or_try_slow(&self, id: usize, owner: usize, create: F) -> Result<&T, E> + where + F: FnOnce() -> Result, + { + if owner == 0 && self.owner.compare_and_swap(0, id, Ordering::Relaxed) == 0 { + unsafe { + (*self.local.get()) = Some(Box::new(create()?)); + return Ok((*self.local.get()).as_ref().unchecked_unwrap()); + } + } + match self.global.get_fast(id) { + Some(x) => Ok(x), + None => Ok(self.global.insert(id, Box::new(create()?), true)), + } + } + + /// Returns a mutable iterator over the local values of all threads. + /// + /// Since this call borrows the `ThreadLocal` mutably, this operation can + /// be done safely---the mutable borrow statically guarantees no other + /// threads are currently accessing their associated values. + pub fn iter_mut(&mut self) -> CachedIterMut { + CachedIterMut { + local: unsafe { (*self.local.get()).as_mut().map(|x| &mut **x) }, + global: self.global.iter_mut(), + } + } + + /// Removes all thread-specific values from the `ThreadLocal`, effectively + /// reseting it to its original state. + /// + /// Since this call borrows the `ThreadLocal` mutably, this operation can + /// be done safely---the mutable borrow statically guarantees no other + /// threads are currently accessing their associated values. + pub fn clear(&mut self) { + *self = CachedThreadLocal::new(); + } +} + +impl IntoIterator for CachedThreadLocal { + type Item = T; + type IntoIter = CachedIntoIter; + + fn into_iter(self) -> CachedIntoIter { + CachedIntoIter { + local: unsafe { (*self.local.get()).take().map(|x| *x) }, + global: self.global.into_iter(), + } + } +} + +impl<'a, T: Send + 'a> IntoIterator for &'a mut CachedThreadLocal { + type Item = &'a mut T; + type IntoIter = CachedIterMut<'a, T>; + + fn into_iter(self) -> CachedIterMut<'a, T> { + self.iter_mut() + } +} + +impl CachedThreadLocal { + /// Returns the element for the current thread, or creates a default one if + /// it doesn't exist. + pub fn get_or_default(&self) -> &T { + self.get_or(T::default) + } +} + +impl fmt::Debug for CachedThreadLocal { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "ThreadLocal {{ local_data: {:?} }}", self.get()) + } +} + +impl UnwindSafe for CachedThreadLocal {} + +/// Mutable iterator over the contents of a `CachedThreadLocal`. +pub struct CachedIterMut<'a, T: Send + 'a> { + local: Option<&'a mut T>, + global: IterMut<'a, T>, +} + +impl<'a, T: Send + 'a> Iterator for CachedIterMut<'a, T> { + type Item = &'a mut T; + + fn next(&mut self) -> Option<&'a mut T> { + self.local.take().or_else(|| self.global.next()) + } + + fn size_hint(&self) -> (usize, Option) { + let len = self.global.size_hint().0 + self.local.is_some() as usize; + (len, Some(len)) + } +} + +impl<'a, T: Send + 'a> ExactSizeIterator for CachedIterMut<'a, T> {} + +/// An iterator that moves out of a `CachedThreadLocal`. +pub struct CachedIntoIter { + local: Option, + global: IntoIter, +} + +impl Iterator for CachedIntoIter { + type Item = T; + + fn next(&mut self) -> Option { + self.local.take().or_else(|| self.global.next()) + } + + fn size_hint(&self) -> (usize, Option) { + let len = self.global.size_hint().0 + self.local.is_some() as usize; + (len, Some(len)) + } +} + +impl ExactSizeIterator for CachedIntoIter {} diff --git a/third_party/rust/thread_local/src/lib.rs b/third_party/rust/thread_local/src/lib.rs index 2f8e579b59..9fd6d197bc 100644 --- a/third_party/rust/thread_local/src/lib.rs +++ b/third_party/rust/thread_local/src/lib.rs @@ -37,7 +37,7 @@ //! use thread_local::ThreadLocal; //! let tls: ThreadLocal = ThreadLocal::new(); //! assert_eq!(tls.get(), None); -//! assert_eq!(tls.get_or(|| Box::new(5)), &5); +//! assert_eq!(tls.get_or(|| 5), &5); //! assert_eq!(tls.get(), Some(&5)); //! ``` //! @@ -56,7 +56,7 @@ //! let tls2 = tls.clone(); //! thread::spawn(move || { //! // Increment a counter to count some event... -//! let cell = tls2.get_or(|| Box::new(Cell::new(0))); +//! let cell = tls2.get_or(|| Cell::new(0)); //! cell.set(cell.get() + 1); //! }).join().unwrap(); //! } @@ -75,21 +75,22 @@ extern crate lazy_static; mod thread_id; mod unreachable; +mod cached; + +pub use cached::{CachedIntoIter, CachedIterMut, CachedThreadLocal}; -use std::sync::atomic::{AtomicPtr, AtomicUsize, Ordering}; -use std::sync::Mutex; -use std::marker::PhantomData; use std::cell::UnsafeCell; use std::fmt; -use std::iter::Chain; -use std::option::IntoIter as OptionIter; +use std::marker::PhantomData; use std::panic::UnwindSafe; +use std::sync::atomic::{AtomicPtr, AtomicUsize, Ordering}; +use std::sync::Mutex; use unreachable::{UncheckedOptionExt, UncheckedResultExt}; /// Thread-local variable wrapper /// /// See the [module-level documentation](index.html) for more. -pub struct ThreadLocal { +pub struct ThreadLocal { // Pointer to the current top-level hash table table: AtomicPtr>, @@ -97,12 +98,9 @@ pub struct ThreadLocal { // while writing to the table, not when reading from it. This also guards // the counter for the total number of values in the hash table. lock: Mutex, - - // PhantomData to indicate that we logically own T - marker: PhantomData, } -struct Table { +struct Table { // Hash entries for the table entries: Box<[TableEntry]>, @@ -113,7 +111,7 @@ struct Table { prev: Option>>, } -struct TableEntry { +struct TableEntry { // Current owner of this entry, or 0 if this is an empty entry owner: AtomicUsize, @@ -123,15 +121,15 @@ struct TableEntry { } // ThreadLocal is always Sync, even if T isn't -unsafe impl Sync for ThreadLocal {} +unsafe impl Sync for ThreadLocal {} -impl Default for ThreadLocal { +impl Default for ThreadLocal { fn default() -> ThreadLocal { ThreadLocal::new() } } -impl Drop for ThreadLocal { +impl Drop for ThreadLocal { fn drop(&mut self) { unsafe { Box::from_raw(self.table.load(Ordering::Relaxed)); @@ -140,7 +138,7 @@ impl Drop for ThreadLocal { } // Implementation of Clone for TableEntry, needed to make vec![] work -impl Clone for TableEntry { +impl Clone for TableEntry { fn clone(&self) -> TableEntry { TableEntry { owner: AtomicUsize::new(0), @@ -161,7 +159,7 @@ fn hash(id: usize, bits: usize) -> usize { id.wrapping_mul(0x9E37_79B9_7F4A_7C15) >> (64 - bits) } -impl ThreadLocal { +impl ThreadLocal { /// Creates a new empty `ThreadLocal`. pub fn new() -> ThreadLocal { let entry = TableEntry { @@ -176,7 +174,6 @@ impl ThreadLocal { ThreadLocal { table: AtomicPtr::new(Box::into_raw(Box::new(table))), lock: Mutex::new(0), - marker: PhantomData, } } @@ -190,10 +187,10 @@ impl ThreadLocal { /// exist. pub fn get_or(&self, create: F) -> &T where - F: FnOnce() -> Box, + F: FnOnce() -> T, { unsafe { - self.get_or_try(|| Ok::, ()>(create())) + self.get_or_try(|| Ok::(create())) .unchecked_unwrap_ok() } } @@ -203,12 +200,12 @@ impl ThreadLocal { /// added. pub fn get_or_try(&self, create: F) -> Result<&T, E> where - F: FnOnce() -> Result, E>, + F: FnOnce() -> Result, { let id = thread_id::get(); match self.get_fast(id) { Some(x) => Ok(x), - None => Ok(self.insert(id, try!(create()), true)), + None => Ok(self.insert(id, Box::new(create()?), true)), } } @@ -232,7 +229,7 @@ impl ThreadLocal { // Fast path: try to find our thread in the top-level hash table fn get_fast(&self, id: usize) -> Option<&T> { - let table = unsafe { &*self.table.load(Ordering::Relaxed) }; + let table = unsafe { &*self.table.load(Ordering::Acquire) }; match Self::lookup(id, table) { Some(x) => unsafe { Some((*x.get()).as_ref().unchecked_unwrap()) }, None => self.get_slow(id, table), @@ -307,19 +304,22 @@ impl ThreadLocal { unreachable!(); } + fn raw_iter(&mut self) -> RawIter { + RawIter { + remaining: *self.lock.get_mut().unwrap(), + index: 0, + table: self.table.load(Ordering::Relaxed), + } + } + /// Returns a mutable iterator over the local values of all threads. /// /// Since this call borrows the `ThreadLocal` mutably, this operation can /// be done safely---the mutable borrow statically guarantees no other /// threads are currently accessing their associated values. pub fn iter_mut(&mut self) -> IterMut { - let raw = RawIter { - remaining: *self.lock.lock().unwrap(), - index: 0, - table: self.table.load(Ordering::Relaxed), - }; IterMut { - raw: raw, + raw: self.raw_iter(), marker: PhantomData, } } @@ -335,25 +335,20 @@ impl ThreadLocal { } } -impl IntoIterator for ThreadLocal { - type Item = Box; +impl IntoIterator for ThreadLocal { + type Item = T; type IntoIter = IntoIter; - fn into_iter(self) -> IntoIter { - let raw = RawIter { - remaining: *self.lock.lock().unwrap(), - index: 0, - table: self.table.load(Ordering::Relaxed), - }; + fn into_iter(mut self) -> IntoIter { IntoIter { - raw: raw, + raw: self.raw_iter(), _thread_local: self, } } } -impl<'a, T: ?Sized + Send + 'a> IntoIterator for &'a mut ThreadLocal { - type Item = &'a mut Box; +impl<'a, T: Send + 'a> IntoIterator for &'a mut ThreadLocal { + type Item = &'a mut T; type IntoIter = IterMut<'a, T>; fn into_iter(self) -> IterMut<'a, T> { @@ -364,26 +359,28 @@ impl<'a, T: ?Sized + Send + 'a> IntoIterator for &'a mut ThreadLocal { impl ThreadLocal { /// Returns the element for the current thread, or creates a default one if /// it doesn't exist. - pub fn get_default(&self) -> &T { - self.get_or(|| Box::new(T::default())) + pub fn get_or_default(&self) -> &T { + self.get_or(Default::default) } } -impl fmt::Debug for ThreadLocal { +impl fmt::Debug for ThreadLocal { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "ThreadLocal {{ local_data: {:?} }}", self.get()) } } -impl UnwindSafe for ThreadLocal {} +impl UnwindSafe for ThreadLocal {} -struct RawIter { +struct RawIter { remaining: usize, index: usize, table: *const Table, } -impl RawIter { +impl Iterator for RawIter { + type Item = *mut Option>; + fn next(&mut self) -> Option<*mut Option>> { if self.remaining == 0 { return None; @@ -403,223 +400,68 @@ impl RawIter { self.table = unsafe { &**(*self.table).prev.as_ref().unchecked_unwrap() }; } } + + fn size_hint(&self) -> (usize, Option) { + (self.remaining, Some(self.remaining)) + } } /// Mutable iterator over the contents of a `ThreadLocal`. -pub struct IterMut<'a, T: ?Sized + Send + 'a> { +pub struct IterMut<'a, T: Send + 'a> { raw: RawIter, marker: PhantomData<&'a mut ThreadLocal>, } -impl<'a, T: ?Sized + Send + 'a> Iterator for IterMut<'a, T> { - type Item = &'a mut Box; +impl<'a, T: Send + 'a> Iterator for IterMut<'a, T> { + type Item = &'a mut T; - fn next(&mut self) -> Option<&'a mut Box> { - self.raw.next().map(|x| unsafe { - (*x).as_mut().unchecked_unwrap() - }) + fn next(&mut self) -> Option<&'a mut T> { + self.raw + .next() + .map(|x| unsafe { &mut **(*x).as_mut().unchecked_unwrap() }) } fn size_hint(&self) -> (usize, Option) { - (self.raw.remaining, Some(self.raw.remaining)) + self.raw.size_hint() } } -impl<'a, T: ?Sized + Send + 'a> ExactSizeIterator for IterMut<'a, T> {} +impl<'a, T: Send + 'a> ExactSizeIterator for IterMut<'a, T> {} /// An iterator that moves out of a `ThreadLocal`. -pub struct IntoIter { +pub struct IntoIter { raw: RawIter, _thread_local: ThreadLocal, } -impl Iterator for IntoIter { - type Item = Box; +impl Iterator for IntoIter { + type Item = T; - fn next(&mut self) -> Option> { - self.raw.next().map( - |x| unsafe { (*x).take().unchecked_unwrap() }, - ) + fn next(&mut self) -> Option { + self.raw + .next() + .map(|x| unsafe { *(*x).take().unchecked_unwrap() }) } fn size_hint(&self) -> (usize, Option) { - (self.raw.remaining, Some(self.raw.remaining)) + self.raw.size_hint() } } -impl ExactSizeIterator for IntoIter {} - -/// Wrapper around `ThreadLocal` which adds a fast path for a single thread. -/// -/// This has the same API as `ThreadLocal`, but will register the first thread -/// that sets a value as its owner. All accesses by the owner will go through -/// a special fast path which is much faster than the normal `ThreadLocal` path. -pub struct CachedThreadLocal { - owner: AtomicUsize, - local: UnsafeCell>>, - global: ThreadLocal, -} - -// CachedThreadLocal is always Sync, even if T isn't -unsafe impl Sync for CachedThreadLocal {} - -impl Default for CachedThreadLocal { - fn default() -> CachedThreadLocal { - CachedThreadLocal::new() - } -} - -impl CachedThreadLocal { - /// Creates a new empty `CachedThreadLocal`. - pub fn new() -> CachedThreadLocal { - CachedThreadLocal { - owner: AtomicUsize::new(0), - local: UnsafeCell::new(None), - global: ThreadLocal::new(), - } - } - - /// Returns the element for the current thread, if it exists. - pub fn get(&self) -> Option<&T> { - let id = thread_id::get(); - let owner = self.owner.load(Ordering::Relaxed); - if owner == id { - return unsafe { Some((*self.local.get()).as_ref().unchecked_unwrap()) }; - } - if owner == 0 { - return None; - } - self.global.get_fast(id) - } - - /// Returns the element for the current thread, or creates it if it doesn't - /// exist. - #[inline(always)] - pub fn get_or(&self, create: F) -> &T - where - F: FnOnce() -> Box, - { - unsafe { - self.get_or_try(|| Ok::, ()>(create())) - .unchecked_unwrap_ok() - } - } - - /// Returns the element for the current thread, or creates it if it doesn't - /// exist. If `create` fails, that error is returned and no element is - /// added. - pub fn get_or_try(&self, create: F) -> Result<&T, E> - where - F: FnOnce() -> Result, E>, - { - let id = thread_id::get(); - let owner = self.owner.load(Ordering::Relaxed); - if owner == id { - return Ok(unsafe { (*self.local.get()).as_ref().unchecked_unwrap() }); - } - self.get_or_try_slow(id, owner, create) - } - - #[cold] - #[inline(never)] - fn get_or_try_slow(&self, id: usize, owner: usize, create: F) -> Result<&T, E> - where - F: FnOnce() -> Result, E>, - { - if owner == 0 && self.owner.compare_and_swap(0, id, Ordering::Relaxed) == 0 { - unsafe { - (*self.local.get()) = Some(try!(create())); - return Ok((*self.local.get()).as_ref().unchecked_unwrap()); - } - } - match self.global.get_fast(id) { - Some(x) => Ok(x), - None => Ok(self.global.insert(id, try!(create()), true)), - } - } - - /// Returns a mutable iterator over the local values of all threads. - /// - /// Since this call borrows the `ThreadLocal` mutably, this operation can - /// be done safely---the mutable borrow statically guarantees no other - /// threads are currently accessing their associated values. - pub fn iter_mut(&mut self) -> CachedIterMut { - unsafe { - (*self.local.get()).as_mut().into_iter().chain( - self.global - .iter_mut(), - ) - } - } - - /// Removes all thread-specific values from the `ThreadLocal`, effectively - /// reseting it to its original state. - /// - /// Since this call borrows the `ThreadLocal` mutably, this operation can - /// be done safely---the mutable borrow statically guarantees no other - /// threads are currently accessing their associated values. - pub fn clear(&mut self) { - *self = CachedThreadLocal::new(); - } -} - -impl IntoIterator for CachedThreadLocal { - type Item = Box; - type IntoIter = CachedIntoIter; - - fn into_iter(self) -> CachedIntoIter { - unsafe { - (*self.local.get()).take().into_iter().chain( - self.global - .into_iter(), - ) - } - } -} - -impl<'a, T: ?Sized + Send + 'a> IntoIterator for &'a mut CachedThreadLocal { - type Item = &'a mut Box; - type IntoIter = CachedIterMut<'a, T>; - - fn into_iter(self) -> CachedIterMut<'a, T> { - self.iter_mut() - } -} - -impl CachedThreadLocal { - /// Returns the element for the current thread, or creates a default one if - /// it doesn't exist. - pub fn get_default(&self) -> &T { - self.get_or(|| Box::new(T::default())) - } -} - -impl fmt::Debug for CachedThreadLocal { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "ThreadLocal {{ local_data: {:?} }}", self.get()) - } -} - -/// Mutable iterator over the contents of a `CachedThreadLocal`. -pub type CachedIterMut<'a, T> = Chain>, IterMut<'a, T>>; - -/// An iterator that moves out of a `CachedThreadLocal`. -pub type CachedIntoIter = Chain>, IntoIter>; - -impl UnwindSafe for CachedThreadLocal {} +impl ExactSizeIterator for IntoIter {} #[cfg(test)] mod tests { + use super::{CachedThreadLocal, ThreadLocal}; use std::cell::RefCell; - use std::sync::Arc; use std::sync::atomic::AtomicUsize; use std::sync::atomic::Ordering::Relaxed; + use std::sync::Arc; use std::thread; - use super::{ThreadLocal, CachedThreadLocal}; - fn make_create() -> Arc Box + Send + Sync> { + fn make_create() -> Arc usize + Send + Sync> { let count = AtomicUsize::new(0); - Arc::new(move || Box::new(count.fetch_add(1, Relaxed))) + Arc::new(move || count.fetch_add(1, Relaxed)) } #[test] @@ -670,8 +512,9 @@ mod tests { assert_eq!(None, tls2.get()); assert_eq!(1, *tls2.get_or(|| create2())); assert_eq!(Some(&1), tls2.get()); - }).join() - .unwrap(); + }) + .join() + .unwrap(); assert_eq!(Some(&0), tls.get()); assert_eq!(0, *tls.get_or(|| create())); @@ -691,8 +534,9 @@ mod tests { assert_eq!(None, tls2.get()); assert_eq!(1, *tls2.get_or(|| create2())); assert_eq!(Some(&1), tls2.get()); - }).join() - .unwrap(); + }) + .join() + .unwrap(); assert_eq!(Some(&0), tls.get()); assert_eq!(0, *tls.get_or(|| create())); @@ -707,11 +551,14 @@ mod tests { thread::spawn(move || { tls2.get_or(|| Box::new(2)); let tls3 = tls2.clone(); - thread::spawn(move || { tls3.get_or(|| Box::new(3)); }) - .join() - .unwrap(); - }).join() + thread::spawn(move || { + tls3.get_or(|| Box::new(3)); + }) + .join() .unwrap(); + }) + .join() + .unwrap(); let mut tls = Arc::try_unwrap(tls).unwrap(); let mut v = tls.iter_mut().map(|x| **x).collect::>(); @@ -731,11 +578,14 @@ mod tests { thread::spawn(move || { tls2.get_or(|| Box::new(2)); let tls3 = tls2.clone(); - thread::spawn(move || { tls3.get_or(|| Box::new(3)); }) - .join() - .unwrap(); - }).join() + thread::spawn(move || { + tls3.get_or(|| Box::new(3)); + }) + .join() .unwrap(); + }) + .join() + .unwrap(); let mut tls = Arc::try_unwrap(tls).unwrap(); let mut v = tls.iter_mut().map(|x| **x).collect::>(); diff --git a/third_party/rust/tokio-uds-0.1.7/.cargo-checksum.json b/third_party/rust/tokio-uds-0.1.7/.cargo-checksum.json deleted file mode 100644 index 784699ab7e..0000000000 --- a/third_party/rust/tokio-uds-0.1.7/.cargo-checksum.json +++ /dev/null @@ -1 +0,0 @@ -{"files":{"Cargo.toml":"9fffc51c0a18476b10859c9c882f31e3dae0adc0a83eed0a58bdb97bf5322995","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"69036b033e4bb951821964dbc3d9b1efe6913a6e36d9c1f206de4035a1a85cc4","README.md":"d99405dcf91b78024df7460186680c5334524232338cd9ddce0d39639958694a","src/frame.rs":"bf35e7134ce40679ca8125dbb008871db54af50450b6b389284d505e110bd0bd","src/lib.rs":"04b1cbf5a0fa2f62a0e1f9441caaed877e008eb43cc7b3fa15ac52e7da37904a","src/ucred.rs":"c60465ebe515eefea6de9a09e5ae67538ed28509a5c6553ef67541dca6a80bb2"},"package":"65ae5d255ce739e8537221ed2942e0445f4b3b813daebac1c0050ddaaa3587f9"} \ No newline at end of file diff --git a/third_party/rust/tokio-uds-0.1.7/Cargo.toml b/third_party/rust/tokio-uds-0.1.7/Cargo.toml deleted file mode 100644 index f2d8af35f8..0000000000 --- a/third_party/rust/tokio-uds-0.1.7/Cargo.toml +++ /dev/null @@ -1,53 +0,0 @@ -# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO -# -# When uploading crates to the registry Cargo will automatically -# "normalize" Cargo.toml files for maximal compatibility -# with all versions of Cargo and also rewrite `path` dependencies -# to registry (e.g. crates.io) dependencies -# -# If you believe there's an error in this file please file an -# issue against the rust-lang/cargo repository. If you're -# editing this file be aware that the upstream Cargo.toml -# will likely look very different (and much more reasonable) - -[package] -name = "tokio-uds" -version = "0.1.7" -authors = ["Alex Crichton "] -description = "Unix Domain sockets for Tokio\n" -homepage = "https://github.com/tokio-rs/tokio-uds" -documentation = "https://docs.rs/tokio-uds" -categories = ["asynchronous"] -license = "MIT/Apache-2.0" -repository = "https://github.com/tokio-rs/tokio-uds" -[dependencies.bytes] -version = "0.4" - -[dependencies.log] -version = "0.3" - -[dependencies.tokio-core] -version = "0.1" - -[dependencies.futures] -version = "0.1.11" - -[dependencies.tokio-io] -version = "0.1" - -[dependencies.iovec] -version = "0.1" - -[dependencies.mio] -version = "0.6.5" - -[dependencies.mio-uds] -version = "0.6.4" - -[dependencies.libc] -version = "0.2" -[badges.travis-ci] -repository = "tokio-rs/tokio-uds" - -[badges.appveyor] -repository = "alexcrichton/tokio-uds" diff --git a/third_party/rust/tokio-uds-0.1.7/LICENSE-APACHE b/third_party/rust/tokio-uds-0.1.7/LICENSE-APACHE deleted file mode 100644 index 16fe87b06e..0000000000 --- a/third_party/rust/tokio-uds-0.1.7/LICENSE-APACHE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - -TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - -1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - -2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - -3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - -4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - -5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - -6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - -7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - -8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - -9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - -END OF TERMS AND CONDITIONS - -APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - -Copyright [yyyy] [name of copyright owner] - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. diff --git a/third_party/rust/tokio-uds-0.1.7/LICENSE-MIT b/third_party/rust/tokio-uds-0.1.7/LICENSE-MIT deleted file mode 100644 index 28e630cf40..0000000000 --- a/third_party/rust/tokio-uds-0.1.7/LICENSE-MIT +++ /dev/null @@ -1,25 +0,0 @@ -Copyright (c) 2016 Alex Crichton - -Permission is hereby granted, free of charge, to any -person obtaining a copy of this software and associated -documentation files (the "Software"), to deal in the -Software without restriction, including without -limitation the rights to use, copy, modify, merge, -publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software -is furnished to do so, subject to the following -conditions: - -The above copyright notice and this permission notice -shall be included in all copies or substantial portions -of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -DEALINGS IN THE SOFTWARE. diff --git a/third_party/rust/tokio-uds-0.1.7/README.md b/third_party/rust/tokio-uds-0.1.7/README.md deleted file mode 100644 index e02d7a645b..0000000000 --- a/third_party/rust/tokio-uds-0.1.7/README.md +++ /dev/null @@ -1,39 +0,0 @@ -# tokio-uds - -An implementation of Unix Domain Sockets for Tokio - -[![Build Status](https://travis-ci.org/tokio-rs/tokio-uds.svg?branch=master)](https://travis-ci.org/tokio-rs/tokio-uds) - -[Documentation](https://docs.rs/tokio-uds) - -## Usage - -First, add this to your `Cargo.toml`: - -```toml -[dependencies] -tokio-uds = "0.1" -``` - -Next, add this to your crate: - -```rust -extern crate tokio_uds; -``` - -# License - -This project is licensed under either of - - * Apache License, Version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or - http://www.apache.org/licenses/LICENSE-2.0) - * MIT license ([LICENSE-MIT](LICENSE-MIT) or - http://opensource.org/licenses/MIT) - -at your option. - -### Contribution - -Unless you explicitly state otherwise, any contribution intentionally submitted -for inclusion in tokio-uds by you, as defined in the Apache-2.0 license, shall be -dual licensed as above, without any additional terms or conditions. diff --git a/third_party/rust/tokio-uds-0.1.7/src/frame.rs b/third_party/rust/tokio-uds-0.1.7/src/frame.rs deleted file mode 100644 index a1e288a483..0000000000 --- a/third_party/rust/tokio-uds-0.1.7/src/frame.rs +++ /dev/null @@ -1,164 +0,0 @@ -use std::io; -use std::os::unix::net::SocketAddr; -use std::path::PathBuf; - -use futures::{Async, Poll, Stream, Sink, StartSend, AsyncSink}; - -use UnixDatagram; - -/// Encoding of frames via buffers. -/// -/// This trait is used when constructing an instance of `UnixDatagramFramed` and -/// provides the `In` and `Out` types which are decoded and encoded from the -/// socket, respectively. -/// -/// Because Unix datagrams are a connectionless protocol, the `decode` method -/// receives the address where data came from and the `encode` method is also -/// responsible for determining the remote host to which the datagram should be -/// sent -/// -/// The trait itself is implemented on a type that can track state for decoding -/// or encoding, which is particularly useful for streaming parsers. In many -/// cases, though, this type will simply be a unit struct (e.g. `struct -/// HttpCodec`). -pub trait UnixDatagramCodec { - /// The type of decoded frames. - type In; - - /// The type of frames to be encoded. - type Out; - - /// Attempts to decode a frame from the provided buffer of bytes. - /// - /// This method is called by `UnixDatagramFramed` on a single datagram which - /// has been read from a socket. The `buf` argument contains the data that - /// was received from the remote address, and `src` is the address the data - /// came from. Note that typically this method should require the entire - /// contents of `buf` to be valid or otherwise return an error with - /// trailing data. - /// - /// Finally, if the bytes in the buffer are malformed then an error is - /// returned indicating why. This informs `Framed` that the stream is now - /// corrupt and should be terminated. - fn decode(&mut self, src: &SocketAddr, buf: &[u8]) -> io::Result; - - /// Encodes a frame into the buffer provided. - /// - /// This method will encode `msg` into the byte buffer provided by `buf`. - /// The `buf` provided is an internal buffer of the `Framed` instance and - /// will be written out when possible. - /// - /// The encode method also determines the destination to which the buffer - /// should be directed, which will be returned as a `SocketAddr`. - fn encode(&mut self, msg: Self::Out, buf: &mut Vec) - -> io::Result; -} - -/// A unified `Stream` and `Sink` interface to an underlying -/// `UnixDatagramSocket`, using the `UnixDatagramCodec` trait to encode and -/// decode frames. -/// -/// You can acquire a `UnixDatagramFramed` instance by using the -/// `UnixDatagramSocket::framed` adapter. -pub struct UnixDatagramFramed { - socket: UnixDatagram, - codec: C, - rd: Vec, - wr: Vec, - out_addr: PathBuf, -} - -impl Stream for UnixDatagramFramed { - type Item = C::In; - type Error = io::Error; - - fn poll(&mut self) -> Poll, io::Error> { - let (n, addr) = try_nb!(self.socket.recv_from(&mut self.rd)); - trace!("received {} bytes, decoding", n); - let frame = try!(self.codec.decode(&addr, &self.rd[..n])); - trace!("frame decoded from buffer"); - Ok(Async::Ready(Some(frame))) - } -} - -impl Sink for UnixDatagramFramed { - type SinkItem = C::Out; - type SinkError = io::Error; - - fn start_send(&mut self, item: C::Out) -> StartSend { - if self.wr.len() > 0 { - try!(self.poll_complete()); - if self.wr.len() > 0 { - return Ok(AsyncSink::NotReady(item)); - } - } - - self.out_addr = try!(self.codec.encode(item, &mut self.wr)); - Ok(AsyncSink::Ready) - } - - fn poll_complete(&mut self) -> Poll<(), io::Error> { - trace!("flushing framed transport"); - - if self.wr.is_empty() { - return Ok(Async::Ready(())) - } - - trace!("writing; remaining={}", self.wr.len()); - let n = try_nb!(self.socket.send_to(&self.wr, &self.out_addr)); - trace!("written {}", n); - let wrote_all = n == self.wr.len(); - self.wr.clear(); - if wrote_all { - Ok(Async::Ready(())) - } else { - Err(io::Error::new(io::ErrorKind::Other, - "failed to write entire datagram to socket")) - } - } - - fn close(&mut self) -> Poll<(), io::Error> { - try_ready!(self.poll_complete()); - Ok(().into()) - } -} - -pub fn new(socket: UnixDatagram, codec: C) -> UnixDatagramFramed { - UnixDatagramFramed { - socket: socket, - codec: codec, - out_addr: PathBuf::new(), - rd: vec![0; 64 * 1024], - wr: Vec::with_capacity(8 * 1024), - } -} - -impl UnixDatagramFramed { - /// Returns a reference to the underlying I/O stream wrapped by `Framed`. - /// - /// Note that care should be taken to not tamper with the underlying stream - /// of data coming in as it may corrupt the stream of frames otherwise being - /// worked with. - pub fn get_ref(&self) -> &UnixDatagram { - &self.socket - } - - /// Returns a mutable reference to the underlying I/O stream wrapped by - /// `Framed`. - /// - /// Note that care should be taken to not tamper with the underlying stream - /// of data coming in as it may corrupt the stream of frames otherwise being - /// worked with. - pub fn get_mut(&mut self) -> &mut UnixDatagram { - &mut self.socket - } - - /// Consumes the `Framed`, returning its underlying I/O stream. - /// - /// Note that care should be taken to not tamper with the underlying stream - /// of data coming in as it may corrupt the stream of frames otherwise being - /// worked with. - pub fn into_inner(self) -> UnixDatagram { - self.socket - } -} diff --git a/third_party/rust/tokio-uds-0.1.7/src/lib.rs b/third_party/rust/tokio-uds-0.1.7/src/lib.rs deleted file mode 100644 index d28a94091a..0000000000 --- a/third_party/rust/tokio-uds-0.1.7/src/lib.rs +++ /dev/null @@ -1,875 +0,0 @@ -//! Bindings for Unix Domain Sockets and futures -//! -//! This crate provides bindings between `mio_uds`, the mio crate for Unix -//! Domain sockets, and `futures`. The APIs and bindings in this crate are very -//! similar to the TCP and UDP bindings in the `futures-mio` crate. This crate -//! is also an empty crate on Windows, as Unix Domain Sockets are Unix-specific. - -// NB: this is all *very* similar to TCP/UDP, and that's intentional! - -#![cfg(unix)] -#![deny(missing_docs)] -#![doc(html_root_url = "https://docs.rs/tokio-uds/0.1")] - -extern crate bytes; -#[macro_use] -extern crate futures; -extern crate iovec; -extern crate libc; -#[macro_use] -extern crate tokio_core; -extern crate tokio_io; -extern crate mio; -extern crate mio_uds; -#[macro_use] -extern crate log; - -use std::fmt; -use std::io::{self, Read, Write}; -use std::mem; -use std::net::Shutdown; -use std::os::unix::net::{self, SocketAddr}; -use std::os::unix::prelude::*; -use std::path::Path; - -use bytes::{Buf, BufMut}; -use futures::{Future, Poll, Async, Stream}; -use futures::sync::oneshot; -use iovec::IoVec; -use tokio_core::reactor::{PollEvented, Handle}; -#[allow(deprecated)] -use tokio_core::io::Io; -use tokio_io::{IoStream, AsyncRead, AsyncWrite}; - -mod frame; -pub use frame::{UnixDatagramFramed, UnixDatagramCodec}; -mod ucred; -pub use ucred::UCred; - -fn would_block() -> io::Error { - io::Error::new(io::ErrorKind::WouldBlock, "would block") -} - -/// A Unix socket which can accept connections from other unix sockets. -pub struct UnixListener { - io: PollEvented, - pending_accept: Option>>, -} - -impl UnixListener { - /// Creates a new `UnixListener` bound to the specified path. - pub fn bind

(path: P, handle: &Handle) -> io::Result - where P: AsRef - { - UnixListener::_bind(path.as_ref(), handle) - } - - /// Consumes a `UnixListener` in the standard library and returns a - /// nonblocking `UnixListener` from this crate. - /// - /// The returned listener will be associated with the given event loop - /// specified by `handle` and is ready to perform I/O. - pub fn from_listener(listener: net::UnixListener, handle: &Handle) - -> io::Result { - let s = try!(mio_uds::UnixListener::from_listener(listener)); - UnixListener::new(s, handle) - } - - fn _bind(path: &Path, handle: &Handle) -> io::Result { - let s = try!(mio_uds::UnixListener::bind(path)); - UnixListener::new(s, handle) - } - - fn new(listener: mio_uds::UnixListener, - handle: &Handle) -> io::Result { - let io = try!(PollEvented::new(listener, handle)); - Ok(UnixListener { - io: io, - pending_accept: None, - }) - } - - /// Returns the local socket address of this listener. - pub fn local_addr(&self) -> io::Result { - self.io.get_ref().local_addr() - } - - /// Test whether this socket is ready to be read or not. - pub fn poll_read(&self) -> Async<()> { - self.io.poll_read() - } - - /// Returns the value of the `SO_ERROR` option. - pub fn take_error(&self) -> io::Result> { - self.io.get_ref().take_error() - } - - /// Attempt to accept a connection and create a new connected `UnixStream` - /// if successful. - /// - /// This function will attempt an accept operation, but will not block - /// waiting for it to complete. If the operation would block then a "would - /// block" error is returned. Additionally, if this method would block, it - /// registers the current task to receive a notification when it would - /// otherwise not block. - /// - /// Note that typically for simple usage it's easier to treat incoming - /// connections as a `Stream` of `UnixStream`s with the `incoming` method - /// below. - /// - /// # Panics - /// - /// This function will panic if it is called outside the context of a - /// future's task. It's recommended to only call this from the - /// implementation of a `Future::poll`, if necessary. - pub fn accept(&mut self) -> io::Result<(UnixStream, SocketAddr)> { - loop { - if let Some(mut pending) = self.pending_accept.take() { - match pending.poll().expect("shouldn't be canceled") { - Async::NotReady => { - self.pending_accept = Some(pending); - return Err(would_block()) - }, - Async::Ready(r) => return r, - } - } - - if let Async::NotReady = self.io.poll_read() { - return Err(io::Error::new(io::ErrorKind::WouldBlock, "not ready")) - } - - match try!(self.io.get_ref().accept()) { - None => { - self.io.need_read(); - return Err(io::Error::new(io::ErrorKind::WouldBlock, - "not ready")) - } - Some((sock, addr)) => { - // Fast path if we haven't left the event loop - if let Some(handle) = self.io.remote().handle() { - let io = try!(PollEvented::new(sock, &handle)); - return Ok((UnixStream { io: io }, addr)) - } - - // If we're off the event loop then send the socket back - // over there to get registered and then we'll get it back - // eventually. - let (tx, rx) = oneshot::channel(); - let remote = self.io.remote().clone(); - remote.spawn(move |handle| { - let res = PollEvented::new(sock, handle) - .map(move |io| { - (UnixStream { io: io }, addr) - }); - drop(tx.send(res)); - Ok(()) - }); - self.pending_accept = Some(rx); - // continue to polling the `rx` at the beginning of the loop - } - } - } - } - - - /// Consumes this listener, returning a stream of the sockets this listener - /// accepts. - /// - /// This method returns an implementation of the `Stream` trait which - /// resolves to the sockets the are accepted on this listener. - pub fn incoming(self) -> IoStream<(UnixStream, SocketAddr)> { - struct Incoming { - inner: UnixListener, - } - - impl Stream for Incoming { - type Item = (UnixStream, SocketAddr); - type Error = io::Error; - - fn poll(&mut self) -> Poll, io::Error> { - Ok(Some(try_nb!(self.inner.accept())).into()) - } - } - - Incoming { inner: self }.boxed() - } -} - -impl fmt::Debug for UnixListener { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - self.io.get_ref().fmt(f) - } -} - -impl AsRawFd for UnixListener { - fn as_raw_fd(&self) -> RawFd { - self.io.get_ref().as_raw_fd() - } -} - -/// A structure representing a connected unix socket. -/// -/// This socket can be connected directly with `UnixStream::connect` or accepted -/// from a listener with `UnixListener::incoming`. Additionally, a pair of -/// anonymous Unix sockets can be created with `UnixStream::pair`. -pub struct UnixStream { - io: PollEvented, -} - -impl UnixStream { - /// Connects to the socket named by `path`. - /// - /// This function will create a new unix socket and connect to the path - /// specified, performing associating the returned stream with the provided - /// event loop's handle. - pub fn connect

(p: P, handle: &Handle) -> io::Result - where P: AsRef - { - UnixStream::_connect(p.as_ref(), handle) - } - - fn _connect(path: &Path, handle: &Handle) -> io::Result { - let s = try!(mio_uds::UnixStream::connect(path)); - UnixStream::new(s, handle) - } - - /// Consumes a `UnixStream` in the standard library and returns a - /// nonblocking `UnixStream` from this crate. - /// - /// The returned stream will be associated with the given event loop - /// specified by `handle` and is ready to perform I/O. - pub fn from_stream(stream: net::UnixStream, handle: &Handle) - -> io::Result { - let s = try!(mio_uds::UnixStream::from_stream(stream)); - UnixStream::new(s, handle) - } - - /// Creates an unnamed pair of connected sockets. - /// - /// This function will create a pair of interconnected unix sockets for - /// communicating back and forth between one another. Each socket will be - /// associated with the event loop whose handle is also provided. - pub fn pair(handle: &Handle) -> io::Result<(UnixStream, UnixStream)> { - let (a, b) = try!(mio_uds::UnixStream::pair()); - let a = try!(UnixStream::new(a, handle)); - let b = try!(UnixStream::new(b, handle)); - - Ok((a, b)) - } - - fn new(stream: mio_uds::UnixStream, handle: &Handle) - -> io::Result { - let io = try!(PollEvented::new(stream, handle)); - Ok(UnixStream { io: io }) - } - - /// Indicates to this source of events that the corresponding I/O object is - /// no longer readable, but it needs to be. - /// - /// # Panics - /// - /// This function will panic if called outside the context of a future's - /// task. - pub fn need_read(&self) { - self.io.need_read() - } - - /// Indicates to this source of events that the corresponding I/O object is - /// no longer writable, but it needs to be. - /// - /// # Panics - /// - /// This function will panic if called outside the context of a future's - /// task. - pub fn need_write(&self) { - self.io.need_write() - } - - /// Test whether this socket is ready to be read or not. - pub fn poll_read(&self) -> Async<()> { - self.io.poll_read() - } - - /// Test whether this socket is ready to be written to or not. - pub fn poll_write(&self) -> Async<()> { - self.io.poll_write() - } - - /// Returns the socket address of the local half of this connection. - pub fn local_addr(&self) -> io::Result { - self.io.get_ref().local_addr() - } - - /// Returns the socket address of the remote half of this connection. - pub fn peer_addr(&self) -> io::Result { - self.io.get_ref().peer_addr() - } - - /// Returns effective credentials of the process which called `connect` or `socketpair`. - pub fn peer_cred(&self) -> io::Result { - ucred::get_peer_cred(self) - } - - /// Returns the value of the `SO_ERROR` option. - pub fn take_error(&self) -> io::Result> { - self.io.get_ref().take_error() - } - - /// Shuts down the read, write, or both halves of this connection. - /// - /// This function will cause all pending and future I/O calls on the - /// specified portions to immediately return with an appropriate value - /// (see the documentation of `Shutdown`). - pub fn shutdown(&self, how: Shutdown) -> io::Result<()> { - self.io.get_ref().shutdown(how) - } -} - -impl Read for UnixStream { - fn read(&mut self, buf: &mut [u8]) -> io::Result { - self.io.read(buf) - } -} - -impl Write for UnixStream { - fn write(&mut self, buf: &[u8]) -> io::Result { - self.io.write(buf) - } - fn flush(&mut self) -> io::Result<()> { - self.io.flush() - } -} - -#[allow(deprecated)] -impl Io for UnixStream { - fn poll_read(&mut self) -> Async<()> { - ::poll_read(self) - } - - fn poll_write(&mut self) -> Async<()> { - ::poll_write(self) - } -} - -impl AsyncRead for UnixStream { - unsafe fn prepare_uninitialized_buffer(&self, _: &mut [u8]) -> bool { - false - } - - fn read_buf(&mut self, buf: &mut B) -> Poll { - <&UnixStream>::read_buf(&mut &*self, buf) - } -} - -impl AsyncWrite for UnixStream { - fn shutdown(&mut self) -> Poll<(), io::Error> { - <&UnixStream>::shutdown(&mut &*self) - } - - fn write_buf(&mut self, buf: &mut B) -> Poll { - <&UnixStream>::write_buf(&mut &*self, buf) - } -} - -impl<'a> Read for &'a UnixStream { - fn read(&mut self, buf: &mut [u8]) -> io::Result { - (&self.io).read(buf) - } -} - -impl<'a> Write for &'a UnixStream { - fn write(&mut self, buf: &[u8]) -> io::Result { - (&self.io).write(buf) - } - - fn flush(&mut self) -> io::Result<()> { - (&self.io).flush() - } -} - -#[allow(deprecated)] -impl<'a> Io for &'a UnixStream { - fn poll_read(&mut self) -> Async<()> { - ::poll_read(self) - } - - fn poll_write(&mut self) -> Async<()> { - ::poll_write(self) - } -} - -unsafe fn read_ready(buf: &mut B, raw_fd: RawFd) -> isize { - // The `IoVec` type can't have a 0-length size, so we create a bunch - // of dummy versions on the stack with 1 length which we'll quickly - // overwrite. - let b1: &mut [u8] = &mut [0]; - let b2: &mut [u8] = &mut [0]; - let b3: &mut [u8] = &mut [0]; - let b4: &mut [u8] = &mut [0]; - let b5: &mut [u8] = &mut [0]; - let b6: &mut [u8] = &mut [0]; - let b7: &mut [u8] = &mut [0]; - let b8: &mut [u8] = &mut [0]; - let b9: &mut [u8] = &mut [0]; - let b10: &mut [u8] = &mut [0]; - let b11: &mut [u8] = &mut [0]; - let b12: &mut [u8] = &mut [0]; - let b13: &mut [u8] = &mut [0]; - let b14: &mut [u8] = &mut [0]; - let b15: &mut [u8] = &mut [0]; - let b16: &mut [u8] = &mut [0]; - let mut bufs: [&mut IoVec; 16] = [ - b1.into(), b2.into(), b3.into(), b4.into(), - b5.into(), b6.into(), b7.into(), b8.into(), - b9.into(), b10.into(), b11.into(), b12.into(), - b13.into(), b14.into(), b15.into(), b16.into(), - ]; - - let n = buf.bytes_vec_mut(&mut bufs); - let iovecs = iovec::unix::as_os_slice_mut(&mut bufs[..n]); - - libc::readv(raw_fd, - iovecs.as_ptr(), - iovecs.len() as i32) -} - -impl<'a> AsyncRead for &'a UnixStream { - unsafe fn prepare_uninitialized_buffer(&self, _: &mut [u8]) -> bool { - false - } - - fn read_buf(&mut self, buf: &mut B) -> Poll { - if let Async::NotReady = ::poll_read(self) { - return Ok(Async::NotReady) - } - unsafe { - let r = read_ready(buf, self.as_raw_fd()); - if r == -1 { - let e = io::Error::last_os_error(); - if e.kind() == io::ErrorKind::WouldBlock { - self.io.need_read(); - Ok(Async::NotReady) - } else { - Err(e) - } - } else { - let r = r as usize; - buf.advance_mut(r); - Ok(r.into()) - } - } - } -} - -unsafe fn write_ready(buf: &mut B, raw_fd: RawFd) -> isize { - // The `IoVec` type can't have a zero-length size, so create a dummy - // version from a 1-length slice which we'll overwrite with the - // `bytes_vec` method. - static DUMMY: &[u8] = &[0]; - let iovec = <&IoVec>::from(DUMMY); - let mut bufs = [ - iovec, iovec, iovec, iovec, - iovec, iovec, iovec, iovec, - iovec, iovec, iovec, iovec, - iovec, iovec, iovec, iovec, - ]; - - let n = buf.bytes_vec(&mut bufs); - let iovecs = iovec::unix::as_os_slice(&bufs[..n]); - - libc::writev(raw_fd, - iovecs.as_ptr(), - iovecs.len() as i32) -} - -impl<'a> AsyncWrite for &'a UnixStream { - fn shutdown(&mut self) -> Poll<(), io::Error> { - Ok(().into()) - } - - fn write_buf(&mut self, buf: &mut B) -> Poll { - if let Async::NotReady = ::poll_write(self) { - return Ok(Async::NotReady) - } - unsafe { - let r = write_ready(buf, self.as_raw_fd()); - if r == -1 { - let e = io::Error::last_os_error(); - if e.kind() == io::ErrorKind::WouldBlock { - self.io.need_write(); - Ok(Async::NotReady) - } else { - Err(e) - } - } else { - let r = r as usize; - buf.advance(r); - Ok(r.into()) - } - } - } -} - -impl fmt::Debug for UnixStream { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - self.io.get_ref().fmt(f) - } -} - -impl AsRawFd for UnixStream { - fn as_raw_fd(&self) -> RawFd { - self.io.get_ref().as_raw_fd() - } -} - -/// An I/O object representing a Unix datagram socket. -pub struct UnixDatagram { - io: PollEvented, -} - -impl UnixDatagram { - /// Creates a new `UnixDatagram` bound to the specified path. - pub fn bind

(path: P, handle: &Handle) -> io::Result - where P: AsRef - { - UnixDatagram::_bind(path.as_ref(), handle) - } - - fn _bind(path: &Path, handle: &Handle) -> io::Result { - let s = try!(mio_uds::UnixDatagram::bind(path)); - UnixDatagram::new(s, handle) - } - - /// Creates an unnamed pair of connected sockets. - /// - /// This function will create a pair of interconnected unix sockets for - /// communicating back and forth between one another. Each socket will be - /// associated with the event loop whose handle is also provided. - pub fn pair(handle: &Handle) -> io::Result<(UnixDatagram, UnixDatagram)> { - let (a, b) = try!(mio_uds::UnixDatagram::pair()); - let a = try!(UnixDatagram::new(a, handle)); - let b = try!(UnixDatagram::new(b, handle)); - Ok((a, b)) - } - - /// Consumes a `UnixDatagram` in the standard library and returns a - /// nonblocking `UnixDatagram` from this crate. - /// - /// The returned datagram will be associated with the given event loop - /// specified by `handle` and is ready to perform I/O. - pub fn from_datagram(datagram: net::UnixDatagram, handle: &Handle) - -> io::Result { - let s = try!(mio_uds::UnixDatagram::from_datagram(datagram)); - UnixDatagram::new(s, handle) - } - - fn new(socket: mio_uds::UnixDatagram, handle: &Handle) - -> io::Result { - let io = try!(PollEvented::new(socket, handle)); - Ok(UnixDatagram { io: io }) - } - - /// Creates a new `UnixDatagram` which is not bound to any address. - pub fn unbound(handle: &Handle) -> io::Result { - let s = try!(mio_uds::UnixDatagram::unbound()); - UnixDatagram::new(s, handle) - } - - /// Connects the socket to the specified address. - /// - /// The `send` method may be used to send data to the specified address. - /// `recv` and `recv_from` will only receive data from that address. - pub fn connect>(&self, path: P) -> io::Result<()> { - self.io.get_ref().connect(path) - } - - /// Indicates to this source of events that the corresponding I/O object is - /// no longer readable, but it needs to be. - /// - /// # Panics - /// - /// This function will panic if called outside the context of a future's - /// task. - pub fn need_read(&self) { - self.io.need_read() - } - - /// Indicates to this source of events that the corresponding I/O object is - /// no longer writable, but it needs to be. - /// - /// # Panics - /// - /// This function will panic if called outside the context of a future's - /// task. - pub fn need_write(&self) { - self.io.need_write() - } - - /// Test whether this socket is ready to be read or not. - pub fn poll_read(&self) -> Async<()> { - self.io.poll_read() - } - - /// Test whether this socket is ready to be written to or not. - pub fn poll_write(&self) -> Async<()> { - self.io.poll_write() - } - - /// Returns the local address that this socket is bound to. - pub fn local_addr(&self) -> io::Result { - self.io.get_ref().local_addr() - } - - /// Returns the address of this socket's peer. - /// - /// The `connect` method will connect the socket to a peer. - pub fn peer_addr(&self) -> io::Result { - self.io.get_ref().peer_addr() - } - - /// Receives data from the socket. - /// - /// On success, returns the number of bytes read and the address from - /// whence the data came. - pub fn recv_from(&self, buf: &mut [u8]) -> io::Result<(usize, SocketAddr)> { - if self.io.poll_read().is_not_ready() { - return Err(would_block()) - } - let r = self.io.get_ref().recv_from(buf); - if is_wouldblock(&r) { - self.io.need_read(); - } - return r - } - - /// Receives data from the socket. - /// - /// On success, returns the number of bytes read. - pub fn recv(&self, buf: &mut [u8]) -> io::Result { - if self.io.poll_read().is_not_ready() { - return Err(would_block()) - } - let r = self.io.get_ref().recv(buf); - if is_wouldblock(&r) { - self.io.need_read(); - } - return r - } - - /// Returns a future for receiving a datagram. See the documentation on RecvDgram for details. - pub fn recv_dgram(self, buf: T) -> RecvDgram - where T: AsMut<[u8]> - { - RecvDgram { - st: RecvDgramState::Receiving { - sock: self, - buf: buf, - }, - } - } - - /// Sends data on the socket to the specified address. - /// - /// On success, returns the number of bytes written. - pub fn send_to

(&self, buf: &[u8], path: P) -> io::Result - where P: AsRef - { - if self.io.poll_write().is_not_ready() { - return Err(would_block()) - } - let r = self.io.get_ref().send_to(buf, path); - if is_wouldblock(&r) { - self.io.need_write(); - } - return r - } - - /// Sends data on the socket to the socket's peer. - /// - /// The peer address may be set by the `connect` method, and this method - /// will return an error if the socket has not already been connected. - /// - /// On success, returns the number of bytes written. - pub fn send(&self, buf: &[u8]) -> io::Result { - if self.io.poll_write().is_not_ready() { - return Err(would_block()) - } - let r = self.io.get_ref().send(buf); - if is_wouldblock(&r) { - self.io.need_write(); - } - return r - } - - - /// Returns a future sending the data in buf to the socket at path. - pub fn send_dgram(self, buf: T, path: P) -> SendDgram - where T: AsRef<[u8]>, - P: AsRef - { - SendDgram { - st: SendDgramState::Sending { - sock: self, - buf: buf, - addr: path, - }, - } - } - - /// Returns the value of the `SO_ERROR` option. - pub fn take_error(&self) -> io::Result> { - self.io.get_ref().take_error() - } - - /// Shut down the read, write, or both halves of this connection. - /// - /// This function will cause all pending and future I/O calls on the - /// specified portions to immediately return with an appropriate value - /// (see the documentation of `Shutdown`). - pub fn shutdown(&self, how: Shutdown) -> io::Result<()> { - self.io.get_ref().shutdown(how) - } - - /// Provides a `Stream` and `Sink` interface for reading and writing to - /// this `UnixDatagram` object, using the provided `UnixDatagramCodec` to - /// read and write the raw data. - /// - /// Raw `UnixDatagram` sockets work with datagrams, but higher-level code - /// usually wants to batch these into meaningful chunks, called "frames". - /// This method layers framing on top of this socket by using the - /// `UnixDatagramCodec` trait to handle encoding and decoding of messages - /// frames. Note that the incoming and outgoing frame types may be distinct. - /// - /// This function returns a *single* object that is both `Stream` and - /// `Sink`; grouping this into a single object is often useful for layering - /// things which require both read and write access to the underlying - /// object. - /// - /// If you want to work more directly with the streams and sink, consider - /// calling `split` on the `UnixDatagramFramed` returned by this method, - /// which will break them into separate objects, allowing them to interact - /// more easily. - pub fn framed(self, codec: C) -> UnixDatagramFramed - where C: UnixDatagramCodec, - { - frame::new(self, codec) - } -} - -impl fmt::Debug for UnixDatagram { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - self.io.get_ref().fmt(f) - } -} - -impl AsRawFd for UnixDatagram { - fn as_raw_fd(&self) -> RawFd { - self.io.get_ref().as_raw_fd() - } -} - -fn is_wouldblock(r: &io::Result) -> bool { - match *r { - Ok(_) => false, - Err(ref e) => e.kind() == io::ErrorKind::WouldBlock, - } -} - -/// A future for writing a buffer to a Unix datagram socket. -pub struct SendDgram { - st: SendDgramState, -} - -enum SendDgramState { - /// current state is Sending - Sending { - /// the underlying socket - sock: UnixDatagram, - /// the buffer to send - buf: T, - /// the destination - addr: P, - }, - /// neutral state - Empty, -} - -impl Future for SendDgram - where T: AsRef<[u8]>, - P: AsRef -{ - /// Returns the underlying socket and the buffer that was sent. - type Item = (UnixDatagram, T); - /// The error that is returned when sending failed. - type Error = io::Error; - - fn poll(&mut self) -> Poll { - if let SendDgramState::Sending { ref sock, ref buf, ref addr } = self.st { - let n = try_nb!(sock.send_to(buf.as_ref(), addr)); - if n < buf.as_ref().len() { - return Err(io::Error::new(io::ErrorKind::Other, - "Couldn't send whole buffer".to_string())); - } - } else { - panic!() - } - if let SendDgramState::Sending { sock, buf, addr: _ } = - mem::replace(&mut self.st, SendDgramState::Empty) { - Ok(Async::Ready((sock, buf))) - } else { - panic!() - } - } -} - -/// A future for receiving datagrams from a Unix datagram socket. -/// -/// An example that uses UDP sockets but is still applicable can be found at -/// https://gist.github.com/dermesser/e331094c2ab28fc7f6ba8a16183fe4d5. -pub struct RecvDgram { - st: RecvDgramState, -} - -/// A future similar to RecvDgram, but without allocating and returning the peer's address. -/// -/// This can be used if the peer's address is of no interest, so the allocation overhead can be -/// avoided. -enum RecvDgramState { - #[allow(dead_code)] - Receiving { sock: UnixDatagram, buf: T }, - Empty, -} - -impl Future for RecvDgram - where T: AsMut<[u8]> -{ - /// RecvDgram yields a tuple of the underlying socket, the receive buffer, how many bytes were - /// received, and the address (path) of the peer sending the datagram. If the buffer is too small, the - /// datagram is truncated. - type Item = (UnixDatagram, T, usize, String); - /// This future yields io::Error if an error occurred. - type Error = io::Error; - - fn poll(&mut self) -> Poll { - let received; - let peer; - - if let RecvDgramState::Receiving { ref sock, ref mut buf } = self.st { - let (n, p) = try_nb!(sock.recv_from(buf.as_mut())); - received = n; - - peer = p.as_pathname().map_or(String::new(), - |p| p.to_str().map_or(String::new(), |s| s.to_string())); - - } else { - panic!() - } - - if let RecvDgramState::Receiving { sock, buf } = mem::replace(&mut self.st, - RecvDgramState::Empty) { - Ok(Async::Ready((sock, buf, received, peer))) - } else { - panic!() - } - } -} diff --git a/third_party/rust/tokio-uds-0.1.7/src/ucred.rs b/third_party/rust/tokio-uds-0.1.7/src/ucred.rs deleted file mode 100644 index d4253a3606..0000000000 --- a/third_party/rust/tokio-uds-0.1.7/src/ucred.rs +++ /dev/null @@ -1,101 +0,0 @@ -use libc::{uid_t, gid_t}; - -/// Credentials of a process -#[derive(Copy, Clone, Eq, PartialEq, Hash, Debug)] -pub struct UCred { - /// UID (user ID) of the process - pub uid: uid_t, - /// GID (group ID) of the process - pub gid: gid_t, -} - -#[cfg(target_os = "linux")] -pub use self::impl_linux::get_peer_cred; - -#[cfg(any(target_os = "macos", target_os = "ios", target_os = "freebsd"))] -pub use self::impl_macos::get_peer_cred; - -#[cfg(target_os = "linux")] -pub mod impl_linux { - use libc::{getsockopt, SOL_SOCKET, SO_PEERCRED, c_void}; - use std::{io, mem}; - use UnixStream; - use std::os::unix::io::AsRawFd; - - use libc::ucred; - - pub fn get_peer_cred(sock: &UnixStream) -> io::Result { - unsafe { - let raw_fd = sock.as_raw_fd(); - - let mut ucred = ucred { pid: 0, uid: 0, gid: 0 }; - - let ucred_size = mem::size_of::(); - - // These paranoid checks should be optimized-out - assert!(mem::size_of::() <= mem::size_of::()); - assert!(ucred_size <= u32::max_value() as usize); - - let mut ucred_size = ucred_size as u32; - - let ret = getsockopt(raw_fd, SOL_SOCKET, SO_PEERCRED, &mut ucred as *mut ucred as *mut c_void, &mut ucred_size); - if ret == 0 && ucred_size as usize == mem::size_of::() { - Ok(super::UCred { - uid: ucred.uid, - gid: ucred.gid, - }) - } else { - Err(io::Error::last_os_error()) - } - } - } -} - -#[cfg(any(target_os = "macos", target_os = "ios", target_os = "freebsd"))] -pub mod impl_macos { - use libc::getpeereid; - use std::{io, mem}; - use UnixStream; - use std::os::unix::io::AsRawFd; - - pub fn get_peer_cred(sock: &UnixStream) -> io::Result { - unsafe { - let raw_fd = sock.as_raw_fd(); - - let mut cred: super::UCred = mem::uninitialized(); - - let ret = getpeereid(raw_fd, &mut cred.uid, &mut cred.gid); - - if ret == 0 { - Ok(cred) - } else { - Err(io::Error::last_os_error()) - } - } - } -} - -#[cfg(test)] -mod test { - use tokio_core::reactor::Core; - use UnixStream; - use libc::geteuid; - use libc::getegid; - - #[test] - fn test_socket_pair() { - let core = Core::new().unwrap(); - let handle = core.handle(); - - let (a, b) = UnixStream::pair(&handle).unwrap(); - let cred_a = a.peer_cred().unwrap(); - let cred_b = b.peer_cred().unwrap(); - assert_eq!(cred_a, cred_b); - - let uid = unsafe { geteuid() }; - let gid = unsafe { getegid() }; - - assert_eq!(cred_a.uid, uid); - assert_eq!(cred_a.gid, gid); - } -} diff --git a/third_party/rust/unicase/.cargo-checksum.json b/third_party/rust/unicase/.cargo-checksum.json index a2d3c39454..827c5ab4d7 100644 --- a/third_party/rust/unicase/.cargo-checksum.json +++ b/third_party/rust/unicase/.cargo-checksum.json @@ -1 +1 @@ -{"files":{"Cargo.toml":"d8b056d05f0794bcc0e8852424342570ce4b7536df4ef63c1eb2065091330e82","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"b6f96c1e11a9adb0a60ff1fa3da58bf89296ba8269b50fd330c610e3dfedb4dc","README.md":"4c322162a8ef03e352df3379bfa67dc68660c4aa229ebda28b86d91e7fdda743","build.rs":"ace1b64010b84b7fe3ae538aa8e9743d1a9e139ce32b3e2a9ddfd1a9c86f709d","src/ascii.rs":"400bb2510841a0c9b8d77f4c112de207a25a13630a43e140fee3fdb28ea8764d","src/lib.rs":"a4ceb95ec45e9b7f4a95ee53dd84e3194998e37edc4400f4d4b196e64f6ee210","src/unicode/map.rs":"fee453fd13707608fce636c462ab6d1ee7691f7ebf7c07df7ab0ed03dfa6a43f","src/unicode/mod.rs":"06a9a74af66936957a859740403d593afb4f1b6e9cb5797885ba8d2e7bc24934"},"package":"a84e5511b2a947f3ae965dcb29b13b7b1691b6e7332cf5dbc1744138d5acb7f6"} \ No newline at end of file +{"files":{"Cargo.toml":"9d53be46ab942ae4b7684d673c9d910f492f923e582ae9b27f721e2b57e3874f","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"b6f96c1e11a9adb0a60ff1fa3da58bf89296ba8269b50fd330c610e3dfedb4dc","README.md":"779866876776401c3bdd80f23e02131fa1484b21e9ec56408534c97f5cafca1c","build.rs":"18352be12daf69f90a317a479badfa0c2feb508a717a93d0abe3d07c37f1c9f4","src/ascii.rs":"37690625c90dbcd442ac7027c9a4298c1258368b437882036520a966675f62ad","src/lib.rs":"89ea45853f3aa797ba9aa7c856e6d93b674b7d93623fa304f292ddcc61fd0390","src/unicode/map.rs":"3679235bfd5ff68b0a0040bfe687fda231b1934762158bc52e6b58e2a1fcc6cb","src/unicode/mod.rs":"d6183755af22f1f74c63b27ae2b779071030f70358923b4c73010276e492d16b"},"package":"50f37be617794602aabbeee0be4f259dc1778fabe05e2d67ee8f79326d5cb4f6"} \ No newline at end of file diff --git a/third_party/rust/unicase/Cargo.toml b/third_party/rust/unicase/Cargo.toml index e16c29f944..d823e925a3 100644 --- a/third_party/rust/unicase/Cargo.toml +++ b/third_party/rust/unicase/Cargo.toml @@ -12,17 +12,19 @@ [package] name = "unicase" -version = "2.4.0" +version = "2.6.0" authors = ["Sean McArthur "] build = "build.rs" exclude = ["scripts/*"] description = "A case-insensitive wrapper around strings." documentation = "https://docs.rs/unicase" -keywords = ["lowercase", "case", "case-insensitive", "case-folding"] +readme = "README.md" +keywords = ["lowercase", "case", "case-insensitive", "case-folding", "no_std"] +categories = ["internationalization", "text-processing", "no-std"] license = "MIT/Apache-2.0" repository = "https://github.com/seanmonstar/unicase" [build-dependencies.version_check] -version = "0.1" +version = "0.9" [features] nightly = [] diff --git a/third_party/rust/unicase/README.md b/third_party/rust/unicase/README.md index e880299feb..9844b0795a 100644 --- a/third_party/rust/unicase/README.md +++ b/third_party/rust/unicase/README.md @@ -4,7 +4,7 @@ [Documentation](https://docs.rs/unicase) -Compare strings when case is not important. +Compare strings when case is not important (using Unicode Case-folding). ```rust // ignore ASCII case diff --git a/third_party/rust/unicase/build.rs b/third_party/rust/unicase/build.rs index 0ed3d8defc..6b22ab5f32 100644 --- a/third_party/rust/unicase/build.rs +++ b/third_party/rust/unicase/build.rs @@ -1,15 +1,23 @@ extern crate version_check as rustc; fn main() { - if rustc::is_min_version("1.5.0").map(|(is_min, _)| is_min).unwrap_or(true) { + if is_rustc_at_least("1.5.0") { println!("cargo:rustc-cfg=__unicase__iter_cmp"); } - if rustc::is_min_version("1.13.0").map(|(is_min, _)| is_min).unwrap_or(true) { + if is_rustc_at_least("1.13.0") { println!("cargo:rustc-cfg=__unicase__default_hasher"); } - if rustc::is_min_version("1.31.0").map(|(is_min, _)| is_min).unwrap_or(true) { + if is_rustc_at_least("1.31.0") { println!("cargo:rustc-cfg=__unicase__const_fns"); } + + if is_rustc_at_least("1.36.0") { + println!("cargo:rustc-cfg=__unicase__core_and_alloc"); + } +} + +fn is_rustc_at_least(v: &str) -> bool { + rustc::is_min_version(v).unwrap_or(true) } diff --git a/third_party/rust/unicase/src/ascii.rs b/third_party/rust/unicase/src/ascii.rs index d656cd2af9..8940733c38 100644 --- a/third_party/rust/unicase/src/ascii.rs +++ b/third_party/rust/unicase/src/ascii.rs @@ -1,11 +1,13 @@ +use alloc::string::String; +#[cfg(__unicase__iter_cmp)] +use core::cmp::Ordering; +use core::fmt; +use core::hash::{Hash, Hasher}; +use core::ops::{Deref, DerefMut}; +use core::str::FromStr; +#[cfg(not(__unicase__core_and_alloc))] #[allow(deprecated, unused)] use std::ascii::AsciiExt; -#[cfg(__unicase__iter_cmp)] -use std::cmp::Ordering; -use std::fmt; -use std::hash::{Hash, Hasher}; -use std::ops::{Deref, DerefMut}; -use std::str::FromStr; use super::{Ascii, Encoding, UniCase}; diff --git a/third_party/rust/unicase/src/lib.rs b/third_party/rust/unicase/src/lib.rs index 246cb8b05f..0c02b5ebca 100644 --- a/third_party/rust/unicase/src/lib.rs +++ b/third_party/rust/unicase/src/lib.rs @@ -1,7 +1,14 @@ #![cfg_attr(test, deny(missing_docs))] #![cfg_attr(test, deny(warnings))] -#![doc(html_root_url = "https://docs.rs/unicase/2.4.0")] +#![doc(html_root_url = "https://docs.rs/unicase/2.6.0")] #![cfg_attr(feature = "nightly", feature(test))] +#![cfg_attr( + all( + __unicase__core_and_alloc, + not(test), + ), + no_std, +)] //! # UniCase //! @@ -45,13 +52,23 @@ #[cfg(feature = "nightly")] extern crate test; -use std::borrow::Cow; +#[cfg(all(__unicase__core_and_alloc, not(test)))] +extern crate alloc; +#[cfg(all(__unicase__core_and_alloc, not(test)))] +use alloc::string::String; + +#[cfg(not(all(__unicase__core_and_alloc, not(test))))] +extern crate std as alloc; +#[cfg(not(all(__unicase__core_and_alloc, not(test))))] +extern crate std as core; + +use alloc::borrow::Cow; #[cfg(__unicase__iter_cmp)] -use std::cmp::Ordering; -use std::fmt; -use std::hash::{Hash, Hasher}; -use std::ops::{Deref, DerefMut}; -use std::str::FromStr; +use core::cmp::Ordering; +use core::fmt; +use core::hash::{Hash, Hasher}; +use core::ops::{Deref, DerefMut}; +use core::str::FromStr; use self::unicode::Unicode; @@ -118,8 +135,10 @@ impl> UniCase { /// /// Note: This scans the text to determine if it is all ASCII or not. pub fn new(s: S) -> UniCase { + #[cfg(not(__unicase__core_and_alloc))] #[allow(deprecated, unused)] use std::ascii::AsciiExt; + if s.as_ref().is_ascii() { UniCase(Encoding::Ascii(Ascii(s))) } else { @@ -341,7 +360,9 @@ mod tests { let c = UniCase::ascii("FoObAr"); assert_eq!(a, b); + assert_eq!(b, a); assert_eq!(a, c); + assert_eq!(c, a); assert_eq!(hash(&a), hash(&b)); assert_eq!(hash(&a), hash(&c)); assert!(a.is_ascii()); @@ -349,14 +370,26 @@ mod tests { assert!(c.is_ascii()); } + #[test] fn test_eq_unicode() { let a = UniCase::new("στιγμας"); let b = UniCase::new("στιγμασ"); assert_eq!(a, b); + assert_eq!(b, a); assert_eq!(hash(&a), hash(&b)); } + #[test] + fn test_eq_unicode_left_is_substring() { + // https://github.com/seanmonstar/unicase/issues/38 + let a = UniCase::unicode("foo"); + let b = UniCase::unicode("foobar"); + + assert!(a != b); + assert!(b != a); + } + #[cfg(feature = "nightly")] #[bench] fn bench_unicase_ascii(b: &mut ::test::Bencher) { diff --git a/third_party/rust/unicase/src/unicode/map.rs b/third_party/rust/unicase/src/unicode/map.rs index e71191810e..a6a045cb8b 100644 --- a/third_party/rust/unicase/src/unicode/map.rs +++ b/third_party/rust/unicase/src/unicode/map.rs @@ -1,1497 +1,1995 @@ // Generated by scripts/mapgen.py -// 2019-03-05 +// 2019-08-31 use super::fold::Fold; +use core::char; pub fn lookup(orig: char) -> Fold { - match orig as u32 { - 0x0041 => Fold::One('\u{0061}',), - 0x0042 => Fold::One('\u{0062}',), - 0x0043 => Fold::One('\u{0063}',), - 0x0044 => Fold::One('\u{0064}',), - 0x0045 => Fold::One('\u{0065}',), - 0x0046 => Fold::One('\u{0066}',), - 0x0047 => Fold::One('\u{0067}',), - 0x0048 => Fold::One('\u{0068}',), - 0x0049 => Fold::One('\u{0069}',), - 0x004A => Fold::One('\u{006A}',), - 0x004B => Fold::One('\u{006B}',), - 0x004C => Fold::One('\u{006C}',), - 0x004D => Fold::One('\u{006D}',), - 0x004E => Fold::One('\u{006E}',), - 0x004F => Fold::One('\u{006F}',), - 0x0050 => Fold::One('\u{0070}',), - 0x0051 => Fold::One('\u{0071}',), - 0x0052 => Fold::One('\u{0072}',), - 0x0053 => Fold::One('\u{0073}',), - 0x0054 => Fold::One('\u{0074}',), - 0x0055 => Fold::One('\u{0075}',), - 0x0056 => Fold::One('\u{0076}',), - 0x0057 => Fold::One('\u{0077}',), - 0x0058 => Fold::One('\u{0078}',), - 0x0059 => Fold::One('\u{0079}',), - 0x005A => Fold::One('\u{007A}',), - 0x00B5 => Fold::One('\u{03BC}',), - 0x00C0 => Fold::One('\u{00E0}',), - 0x00C1 => Fold::One('\u{00E1}',), - 0x00C2 => Fold::One('\u{00E2}',), - 0x00C3 => Fold::One('\u{00E3}',), - 0x00C4 => Fold::One('\u{00E4}',), - 0x00C5 => Fold::One('\u{00E5}',), - 0x00C6 => Fold::One('\u{00E6}',), - 0x00C7 => Fold::One('\u{00E7}',), - 0x00C8 => Fold::One('\u{00E8}',), - 0x00C9 => Fold::One('\u{00E9}',), - 0x00CA => Fold::One('\u{00EA}',), - 0x00CB => Fold::One('\u{00EB}',), - 0x00CC => Fold::One('\u{00EC}',), - 0x00CD => Fold::One('\u{00ED}',), - 0x00CE => Fold::One('\u{00EE}',), - 0x00CF => Fold::One('\u{00EF}',), - 0x00D0 => Fold::One('\u{00F0}',), - 0x00D1 => Fold::One('\u{00F1}',), - 0x00D2 => Fold::One('\u{00F2}',), - 0x00D3 => Fold::One('\u{00F3}',), - 0x00D4 => Fold::One('\u{00F4}',), - 0x00D5 => Fold::One('\u{00F5}',), - 0x00D6 => Fold::One('\u{00F6}',), - 0x00D8 => Fold::One('\u{00F8}',), - 0x00D9 => Fold::One('\u{00F9}',), - 0x00DA => Fold::One('\u{00FA}',), - 0x00DB => Fold::One('\u{00FB}',), - 0x00DC => Fold::One('\u{00FC}',), - 0x00DD => Fold::One('\u{00FD}',), - 0x00DE => Fold::One('\u{00FE}',), - 0x00DF => Fold::Two('\u{0073}', '\u{0073}',), - 0x0100 => Fold::One('\u{0101}',), - 0x0102 => Fold::One('\u{0103}',), - 0x0104 => Fold::One('\u{0105}',), - 0x0106 => Fold::One('\u{0107}',), - 0x0108 => Fold::One('\u{0109}',), - 0x010A => Fold::One('\u{010B}',), - 0x010C => Fold::One('\u{010D}',), - 0x010E => Fold::One('\u{010F}',), - 0x0110 => Fold::One('\u{0111}',), - 0x0112 => Fold::One('\u{0113}',), - 0x0114 => Fold::One('\u{0115}',), - 0x0116 => Fold::One('\u{0117}',), - 0x0118 => Fold::One('\u{0119}',), - 0x011A => Fold::One('\u{011B}',), - 0x011C => Fold::One('\u{011D}',), - 0x011E => Fold::One('\u{011F}',), - 0x0120 => Fold::One('\u{0121}',), - 0x0122 => Fold::One('\u{0123}',), - 0x0124 => Fold::One('\u{0125}',), - 0x0126 => Fold::One('\u{0127}',), - 0x0128 => Fold::One('\u{0129}',), - 0x012A => Fold::One('\u{012B}',), - 0x012C => Fold::One('\u{012D}',), - 0x012E => Fold::One('\u{012F}',), - 0x0130 => Fold::Two('\u{0069}', '\u{0307}',), - 0x0132 => Fold::One('\u{0133}',), - 0x0134 => Fold::One('\u{0135}',), - 0x0136 => Fold::One('\u{0137}',), - 0x0139 => Fold::One('\u{013A}',), - 0x013B => Fold::One('\u{013C}',), - 0x013D => Fold::One('\u{013E}',), - 0x013F => Fold::One('\u{0140}',), - 0x0141 => Fold::One('\u{0142}',), - 0x0143 => Fold::One('\u{0144}',), - 0x0145 => Fold::One('\u{0146}',), - 0x0147 => Fold::One('\u{0148}',), - 0x0149 => Fold::Two('\u{02BC}', '\u{006E}',), - 0x014A => Fold::One('\u{014B}',), - 0x014C => Fold::One('\u{014D}',), - 0x014E => Fold::One('\u{014F}',), - 0x0150 => Fold::One('\u{0151}',), - 0x0152 => Fold::One('\u{0153}',), - 0x0154 => Fold::One('\u{0155}',), - 0x0156 => Fold::One('\u{0157}',), - 0x0158 => Fold::One('\u{0159}',), - 0x015A => Fold::One('\u{015B}',), - 0x015C => Fold::One('\u{015D}',), - 0x015E => Fold::One('\u{015F}',), - 0x0160 => Fold::One('\u{0161}',), - 0x0162 => Fold::One('\u{0163}',), - 0x0164 => Fold::One('\u{0165}',), - 0x0166 => Fold::One('\u{0167}',), - 0x0168 => Fold::One('\u{0169}',), - 0x016A => Fold::One('\u{016B}',), - 0x016C => Fold::One('\u{016D}',), - 0x016E => Fold::One('\u{016F}',), - 0x0170 => Fold::One('\u{0171}',), - 0x0172 => Fold::One('\u{0173}',), - 0x0174 => Fold::One('\u{0175}',), - 0x0176 => Fold::One('\u{0177}',), - 0x0178 => Fold::One('\u{00FF}',), - 0x0179 => Fold::One('\u{017A}',), - 0x017B => Fold::One('\u{017C}',), - 0x017D => Fold::One('\u{017E}',), - 0x017F => Fold::One('\u{0073}',), - 0x0181 => Fold::One('\u{0253}',), - 0x0182 => Fold::One('\u{0183}',), - 0x0184 => Fold::One('\u{0185}',), - 0x0186 => Fold::One('\u{0254}',), - 0x0187 => Fold::One('\u{0188}',), - 0x0189 => Fold::One('\u{0256}',), - 0x018A => Fold::One('\u{0257}',), - 0x018B => Fold::One('\u{018C}',), - 0x018E => Fold::One('\u{01DD}',), - 0x018F => Fold::One('\u{0259}',), - 0x0190 => Fold::One('\u{025B}',), - 0x0191 => Fold::One('\u{0192}',), - 0x0193 => Fold::One('\u{0260}',), - 0x0194 => Fold::One('\u{0263}',), - 0x0196 => Fold::One('\u{0269}',), - 0x0197 => Fold::One('\u{0268}',), - 0x0198 => Fold::One('\u{0199}',), - 0x019C => Fold::One('\u{026F}',), - 0x019D => Fold::One('\u{0272}',), - 0x019F => Fold::One('\u{0275}',), - 0x01A0 => Fold::One('\u{01A1}',), - 0x01A2 => Fold::One('\u{01A3}',), - 0x01A4 => Fold::One('\u{01A5}',), - 0x01A6 => Fold::One('\u{0280}',), - 0x01A7 => Fold::One('\u{01A8}',), - 0x01A9 => Fold::One('\u{0283}',), - 0x01AC => Fold::One('\u{01AD}',), - 0x01AE => Fold::One('\u{0288}',), - 0x01AF => Fold::One('\u{01B0}',), - 0x01B1 => Fold::One('\u{028A}',), - 0x01B2 => Fold::One('\u{028B}',), - 0x01B3 => Fold::One('\u{01B4}',), - 0x01B5 => Fold::One('\u{01B6}',), - 0x01B7 => Fold::One('\u{0292}',), - 0x01B8 => Fold::One('\u{01B9}',), - 0x01BC => Fold::One('\u{01BD}',), - 0x01C4 => Fold::One('\u{01C6}',), - 0x01C5 => Fold::One('\u{01C6}',), - 0x01C7 => Fold::One('\u{01C9}',), - 0x01C8 => Fold::One('\u{01C9}',), - 0x01CA => Fold::One('\u{01CC}',), - 0x01CB => Fold::One('\u{01CC}',), - 0x01CD => Fold::One('\u{01CE}',), - 0x01CF => Fold::One('\u{01D0}',), - 0x01D1 => Fold::One('\u{01D2}',), - 0x01D3 => Fold::One('\u{01D4}',), - 0x01D5 => Fold::One('\u{01D6}',), - 0x01D7 => Fold::One('\u{01D8}',), - 0x01D9 => Fold::One('\u{01DA}',), - 0x01DB => Fold::One('\u{01DC}',), - 0x01DE => Fold::One('\u{01DF}',), - 0x01E0 => Fold::One('\u{01E1}',), - 0x01E2 => Fold::One('\u{01E3}',), - 0x01E4 => Fold::One('\u{01E5}',), - 0x01E6 => Fold::One('\u{01E7}',), - 0x01E8 => Fold::One('\u{01E9}',), - 0x01EA => Fold::One('\u{01EB}',), - 0x01EC => Fold::One('\u{01ED}',), - 0x01EE => Fold::One('\u{01EF}',), - 0x01F0 => Fold::Two('\u{006A}', '\u{030C}',), - 0x01F1 => Fold::One('\u{01F3}',), - 0x01F2 => Fold::One('\u{01F3}',), - 0x01F4 => Fold::One('\u{01F5}',), - 0x01F6 => Fold::One('\u{0195}',), - 0x01F7 => Fold::One('\u{01BF}',), - 0x01F8 => Fold::One('\u{01F9}',), - 0x01FA => Fold::One('\u{01FB}',), - 0x01FC => Fold::One('\u{01FD}',), - 0x01FE => Fold::One('\u{01FF}',), - 0x0200 => Fold::One('\u{0201}',), - 0x0202 => Fold::One('\u{0203}',), - 0x0204 => Fold::One('\u{0205}',), - 0x0206 => Fold::One('\u{0207}',), - 0x0208 => Fold::One('\u{0209}',), - 0x020A => Fold::One('\u{020B}',), - 0x020C => Fold::One('\u{020D}',), - 0x020E => Fold::One('\u{020F}',), - 0x0210 => Fold::One('\u{0211}',), - 0x0212 => Fold::One('\u{0213}',), - 0x0214 => Fold::One('\u{0215}',), - 0x0216 => Fold::One('\u{0217}',), - 0x0218 => Fold::One('\u{0219}',), - 0x021A => Fold::One('\u{021B}',), - 0x021C => Fold::One('\u{021D}',), - 0x021E => Fold::One('\u{021F}',), - 0x0220 => Fold::One('\u{019E}',), - 0x0222 => Fold::One('\u{0223}',), - 0x0224 => Fold::One('\u{0225}',), - 0x0226 => Fold::One('\u{0227}',), - 0x0228 => Fold::One('\u{0229}',), - 0x022A => Fold::One('\u{022B}',), - 0x022C => Fold::One('\u{022D}',), - 0x022E => Fold::One('\u{022F}',), - 0x0230 => Fold::One('\u{0231}',), - 0x0232 => Fold::One('\u{0233}',), - 0x023A => Fold::One('\u{2C65}',), - 0x023B => Fold::One('\u{023C}',), - 0x023D => Fold::One('\u{019A}',), - 0x023E => Fold::One('\u{2C66}',), - 0x0241 => Fold::One('\u{0242}',), - 0x0243 => Fold::One('\u{0180}',), - 0x0244 => Fold::One('\u{0289}',), - 0x0245 => Fold::One('\u{028C}',), - 0x0246 => Fold::One('\u{0247}',), - 0x0248 => Fold::One('\u{0249}',), - 0x024A => Fold::One('\u{024B}',), - 0x024C => Fold::One('\u{024D}',), - 0x024E => Fold::One('\u{024F}',), - 0x0345 => Fold::One('\u{03B9}',), - 0x0370 => Fold::One('\u{0371}',), - 0x0372 => Fold::One('\u{0373}',), - 0x0376 => Fold::One('\u{0377}',), - 0x037F => Fold::One('\u{03F3}',), - 0x0386 => Fold::One('\u{03AC}',), - 0x0388 => Fold::One('\u{03AD}',), - 0x0389 => Fold::One('\u{03AE}',), - 0x038A => Fold::One('\u{03AF}',), - 0x038C => Fold::One('\u{03CC}',), - 0x038E => Fold::One('\u{03CD}',), - 0x038F => Fold::One('\u{03CE}',), - 0x0390 => Fold::Three('\u{03B9}', '\u{0308}', '\u{0301}',), - 0x0391 => Fold::One('\u{03B1}',), - 0x0392 => Fold::One('\u{03B2}',), - 0x0393 => Fold::One('\u{03B3}',), - 0x0394 => Fold::One('\u{03B4}',), - 0x0395 => Fold::One('\u{03B5}',), - 0x0396 => Fold::One('\u{03B6}',), - 0x0397 => Fold::One('\u{03B7}',), - 0x0398 => Fold::One('\u{03B8}',), - 0x0399 => Fold::One('\u{03B9}',), - 0x039A => Fold::One('\u{03BA}',), - 0x039B => Fold::One('\u{03BB}',), - 0x039C => Fold::One('\u{03BC}',), - 0x039D => Fold::One('\u{03BD}',), - 0x039E => Fold::One('\u{03BE}',), - 0x039F => Fold::One('\u{03BF}',), - 0x03A0 => Fold::One('\u{03C0}',), - 0x03A1 => Fold::One('\u{03C1}',), - 0x03A3 => Fold::One('\u{03C3}',), - 0x03A4 => Fold::One('\u{03C4}',), - 0x03A5 => Fold::One('\u{03C5}',), - 0x03A6 => Fold::One('\u{03C6}',), - 0x03A7 => Fold::One('\u{03C7}',), - 0x03A8 => Fold::One('\u{03C8}',), - 0x03A9 => Fold::One('\u{03C9}',), - 0x03AA => Fold::One('\u{03CA}',), - 0x03AB => Fold::One('\u{03CB}',), - 0x03B0 => Fold::Three('\u{03C5}', '\u{0308}', '\u{0301}',), - 0x03C2 => Fold::One('\u{03C3}',), - 0x03CF => Fold::One('\u{03D7}',), - 0x03D0 => Fold::One('\u{03B2}',), - 0x03D1 => Fold::One('\u{03B8}',), - 0x03D5 => Fold::One('\u{03C6}',), - 0x03D6 => Fold::One('\u{03C0}',), - 0x03D8 => Fold::One('\u{03D9}',), - 0x03DA => Fold::One('\u{03DB}',), - 0x03DC => Fold::One('\u{03DD}',), - 0x03DE => Fold::One('\u{03DF}',), - 0x03E0 => Fold::One('\u{03E1}',), - 0x03E2 => Fold::One('\u{03E3}',), - 0x03E4 => Fold::One('\u{03E5}',), - 0x03E6 => Fold::One('\u{03E7}',), - 0x03E8 => Fold::One('\u{03E9}',), - 0x03EA => Fold::One('\u{03EB}',), - 0x03EC => Fold::One('\u{03ED}',), - 0x03EE => Fold::One('\u{03EF}',), - 0x03F0 => Fold::One('\u{03BA}',), - 0x03F1 => Fold::One('\u{03C1}',), - 0x03F4 => Fold::One('\u{03B8}',), - 0x03F5 => Fold::One('\u{03B5}',), - 0x03F7 => Fold::One('\u{03F8}',), - 0x03F9 => Fold::One('\u{03F2}',), - 0x03FA => Fold::One('\u{03FB}',), - 0x03FD => Fold::One('\u{037B}',), - 0x03FE => Fold::One('\u{037C}',), - 0x03FF => Fold::One('\u{037D}',), - 0x0400 => Fold::One('\u{0450}',), - 0x0401 => Fold::One('\u{0451}',), - 0x0402 => Fold::One('\u{0452}',), - 0x0403 => Fold::One('\u{0453}',), - 0x0404 => Fold::One('\u{0454}',), - 0x0405 => Fold::One('\u{0455}',), - 0x0406 => Fold::One('\u{0456}',), - 0x0407 => Fold::One('\u{0457}',), - 0x0408 => Fold::One('\u{0458}',), - 0x0409 => Fold::One('\u{0459}',), - 0x040A => Fold::One('\u{045A}',), - 0x040B => Fold::One('\u{045B}',), - 0x040C => Fold::One('\u{045C}',), - 0x040D => Fold::One('\u{045D}',), - 0x040E => Fold::One('\u{045E}',), - 0x040F => Fold::One('\u{045F}',), - 0x0410 => Fold::One('\u{0430}',), - 0x0411 => Fold::One('\u{0431}',), - 0x0412 => Fold::One('\u{0432}',), - 0x0413 => Fold::One('\u{0433}',), - 0x0414 => Fold::One('\u{0434}',), - 0x0415 => Fold::One('\u{0435}',), - 0x0416 => Fold::One('\u{0436}',), - 0x0417 => Fold::One('\u{0437}',), - 0x0418 => Fold::One('\u{0438}',), - 0x0419 => Fold::One('\u{0439}',), - 0x041A => Fold::One('\u{043A}',), - 0x041B => Fold::One('\u{043B}',), - 0x041C => Fold::One('\u{043C}',), - 0x041D => Fold::One('\u{043D}',), - 0x041E => Fold::One('\u{043E}',), - 0x041F => Fold::One('\u{043F}',), - 0x0420 => Fold::One('\u{0440}',), - 0x0421 => Fold::One('\u{0441}',), - 0x0422 => Fold::One('\u{0442}',), - 0x0423 => Fold::One('\u{0443}',), - 0x0424 => Fold::One('\u{0444}',), - 0x0425 => Fold::One('\u{0445}',), - 0x0426 => Fold::One('\u{0446}',), - 0x0427 => Fold::One('\u{0447}',), - 0x0428 => Fold::One('\u{0448}',), - 0x0429 => Fold::One('\u{0449}',), - 0x042A => Fold::One('\u{044A}',), - 0x042B => Fold::One('\u{044B}',), - 0x042C => Fold::One('\u{044C}',), - 0x042D => Fold::One('\u{044D}',), - 0x042E => Fold::One('\u{044E}',), - 0x042F => Fold::One('\u{044F}',), - 0x0460 => Fold::One('\u{0461}',), - 0x0462 => Fold::One('\u{0463}',), - 0x0464 => Fold::One('\u{0465}',), - 0x0466 => Fold::One('\u{0467}',), - 0x0468 => Fold::One('\u{0469}',), - 0x046A => Fold::One('\u{046B}',), - 0x046C => Fold::One('\u{046D}',), - 0x046E => Fold::One('\u{046F}',), - 0x0470 => Fold::One('\u{0471}',), - 0x0472 => Fold::One('\u{0473}',), - 0x0474 => Fold::One('\u{0475}',), - 0x0476 => Fold::One('\u{0477}',), - 0x0478 => Fold::One('\u{0479}',), - 0x047A => Fold::One('\u{047B}',), - 0x047C => Fold::One('\u{047D}',), - 0x047E => Fold::One('\u{047F}',), - 0x0480 => Fold::One('\u{0481}',), - 0x048A => Fold::One('\u{048B}',), - 0x048C => Fold::One('\u{048D}',), - 0x048E => Fold::One('\u{048F}',), - 0x0490 => Fold::One('\u{0491}',), - 0x0492 => Fold::One('\u{0493}',), - 0x0494 => Fold::One('\u{0495}',), - 0x0496 => Fold::One('\u{0497}',), - 0x0498 => Fold::One('\u{0499}',), - 0x049A => Fold::One('\u{049B}',), - 0x049C => Fold::One('\u{049D}',), - 0x049E => Fold::One('\u{049F}',), - 0x04A0 => Fold::One('\u{04A1}',), - 0x04A2 => Fold::One('\u{04A3}',), - 0x04A4 => Fold::One('\u{04A5}',), - 0x04A6 => Fold::One('\u{04A7}',), - 0x04A8 => Fold::One('\u{04A9}',), - 0x04AA => Fold::One('\u{04AB}',), - 0x04AC => Fold::One('\u{04AD}',), - 0x04AE => Fold::One('\u{04AF}',), - 0x04B0 => Fold::One('\u{04B1}',), - 0x04B2 => Fold::One('\u{04B3}',), - 0x04B4 => Fold::One('\u{04B5}',), - 0x04B6 => Fold::One('\u{04B7}',), - 0x04B8 => Fold::One('\u{04B9}',), - 0x04BA => Fold::One('\u{04BB}',), - 0x04BC => Fold::One('\u{04BD}',), - 0x04BE => Fold::One('\u{04BF}',), - 0x04C0 => Fold::One('\u{04CF}',), - 0x04C1 => Fold::One('\u{04C2}',), - 0x04C3 => Fold::One('\u{04C4}',), - 0x04C5 => Fold::One('\u{04C6}',), - 0x04C7 => Fold::One('\u{04C8}',), - 0x04C9 => Fold::One('\u{04CA}',), - 0x04CB => Fold::One('\u{04CC}',), - 0x04CD => Fold::One('\u{04CE}',), - 0x04D0 => Fold::One('\u{04D1}',), - 0x04D2 => Fold::One('\u{04D3}',), - 0x04D4 => Fold::One('\u{04D5}',), - 0x04D6 => Fold::One('\u{04D7}',), - 0x04D8 => Fold::One('\u{04D9}',), - 0x04DA => Fold::One('\u{04DB}',), - 0x04DC => Fold::One('\u{04DD}',), - 0x04DE => Fold::One('\u{04DF}',), - 0x04E0 => Fold::One('\u{04E1}',), - 0x04E2 => Fold::One('\u{04E3}',), - 0x04E4 => Fold::One('\u{04E5}',), - 0x04E6 => Fold::One('\u{04E7}',), - 0x04E8 => Fold::One('\u{04E9}',), - 0x04EA => Fold::One('\u{04EB}',), - 0x04EC => Fold::One('\u{04ED}',), - 0x04EE => Fold::One('\u{04EF}',), - 0x04F0 => Fold::One('\u{04F1}',), - 0x04F2 => Fold::One('\u{04F3}',), - 0x04F4 => Fold::One('\u{04F5}',), - 0x04F6 => Fold::One('\u{04F7}',), - 0x04F8 => Fold::One('\u{04F9}',), - 0x04FA => Fold::One('\u{04FB}',), - 0x04FC => Fold::One('\u{04FD}',), - 0x04FE => Fold::One('\u{04FF}',), - 0x0500 => Fold::One('\u{0501}',), - 0x0502 => Fold::One('\u{0503}',), - 0x0504 => Fold::One('\u{0505}',), - 0x0506 => Fold::One('\u{0507}',), - 0x0508 => Fold::One('\u{0509}',), - 0x050A => Fold::One('\u{050B}',), - 0x050C => Fold::One('\u{050D}',), - 0x050E => Fold::One('\u{050F}',), - 0x0510 => Fold::One('\u{0511}',), - 0x0512 => Fold::One('\u{0513}',), - 0x0514 => Fold::One('\u{0515}',), - 0x0516 => Fold::One('\u{0517}',), - 0x0518 => Fold::One('\u{0519}',), - 0x051A => Fold::One('\u{051B}',), - 0x051C => Fold::One('\u{051D}',), - 0x051E => Fold::One('\u{051F}',), - 0x0520 => Fold::One('\u{0521}',), - 0x0522 => Fold::One('\u{0523}',), - 0x0524 => Fold::One('\u{0525}',), - 0x0526 => Fold::One('\u{0527}',), - 0x0528 => Fold::One('\u{0529}',), - 0x052A => Fold::One('\u{052B}',), - 0x052C => Fold::One('\u{052D}',), - 0x052E => Fold::One('\u{052F}',), - 0x0531 => Fold::One('\u{0561}',), - 0x0532 => Fold::One('\u{0562}',), - 0x0533 => Fold::One('\u{0563}',), - 0x0534 => Fold::One('\u{0564}',), - 0x0535 => Fold::One('\u{0565}',), - 0x0536 => Fold::One('\u{0566}',), - 0x0537 => Fold::One('\u{0567}',), - 0x0538 => Fold::One('\u{0568}',), - 0x0539 => Fold::One('\u{0569}',), - 0x053A => Fold::One('\u{056A}',), - 0x053B => Fold::One('\u{056B}',), - 0x053C => Fold::One('\u{056C}',), - 0x053D => Fold::One('\u{056D}',), - 0x053E => Fold::One('\u{056E}',), - 0x053F => Fold::One('\u{056F}',), - 0x0540 => Fold::One('\u{0570}',), - 0x0541 => Fold::One('\u{0571}',), - 0x0542 => Fold::One('\u{0572}',), - 0x0543 => Fold::One('\u{0573}',), - 0x0544 => Fold::One('\u{0574}',), - 0x0545 => Fold::One('\u{0575}',), - 0x0546 => Fold::One('\u{0576}',), - 0x0547 => Fold::One('\u{0577}',), - 0x0548 => Fold::One('\u{0578}',), - 0x0549 => Fold::One('\u{0579}',), - 0x054A => Fold::One('\u{057A}',), - 0x054B => Fold::One('\u{057B}',), - 0x054C => Fold::One('\u{057C}',), - 0x054D => Fold::One('\u{057D}',), - 0x054E => Fold::One('\u{057E}',), - 0x054F => Fold::One('\u{057F}',), - 0x0550 => Fold::One('\u{0580}',), - 0x0551 => Fold::One('\u{0581}',), - 0x0552 => Fold::One('\u{0582}',), - 0x0553 => Fold::One('\u{0583}',), - 0x0554 => Fold::One('\u{0584}',), - 0x0555 => Fold::One('\u{0585}',), - 0x0556 => Fold::One('\u{0586}',), - 0x0587 => Fold::Two('\u{0565}', '\u{0582}',), - 0x10A0 => Fold::One('\u{2D00}',), - 0x10A1 => Fold::One('\u{2D01}',), - 0x10A2 => Fold::One('\u{2D02}',), - 0x10A3 => Fold::One('\u{2D03}',), - 0x10A4 => Fold::One('\u{2D04}',), - 0x10A5 => Fold::One('\u{2D05}',), - 0x10A6 => Fold::One('\u{2D06}',), - 0x10A7 => Fold::One('\u{2D07}',), - 0x10A8 => Fold::One('\u{2D08}',), - 0x10A9 => Fold::One('\u{2D09}',), - 0x10AA => Fold::One('\u{2D0A}',), - 0x10AB => Fold::One('\u{2D0B}',), - 0x10AC => Fold::One('\u{2D0C}',), - 0x10AD => Fold::One('\u{2D0D}',), - 0x10AE => Fold::One('\u{2D0E}',), - 0x10AF => Fold::One('\u{2D0F}',), - 0x10B0 => Fold::One('\u{2D10}',), - 0x10B1 => Fold::One('\u{2D11}',), - 0x10B2 => Fold::One('\u{2D12}',), - 0x10B3 => Fold::One('\u{2D13}',), - 0x10B4 => Fold::One('\u{2D14}',), - 0x10B5 => Fold::One('\u{2D15}',), - 0x10B6 => Fold::One('\u{2D16}',), - 0x10B7 => Fold::One('\u{2D17}',), - 0x10B8 => Fold::One('\u{2D18}',), - 0x10B9 => Fold::One('\u{2D19}',), - 0x10BA => Fold::One('\u{2D1A}',), - 0x10BB => Fold::One('\u{2D1B}',), - 0x10BC => Fold::One('\u{2D1C}',), - 0x10BD => Fold::One('\u{2D1D}',), - 0x10BE => Fold::One('\u{2D1E}',), - 0x10BF => Fold::One('\u{2D1F}',), - 0x10C0 => Fold::One('\u{2D20}',), - 0x10C1 => Fold::One('\u{2D21}',), - 0x10C2 => Fold::One('\u{2D22}',), - 0x10C3 => Fold::One('\u{2D23}',), - 0x10C4 => Fold::One('\u{2D24}',), - 0x10C5 => Fold::One('\u{2D25}',), - 0x10C7 => Fold::One('\u{2D27}',), - 0x10CD => Fold::One('\u{2D2D}',), - 0x13F8 => Fold::One('\u{13F0}',), - 0x13F9 => Fold::One('\u{13F1}',), - 0x13FA => Fold::One('\u{13F2}',), - 0x13FB => Fold::One('\u{13F3}',), - 0x13FC => Fold::One('\u{13F4}',), - 0x13FD => Fold::One('\u{13F5}',), - 0x1C80 => Fold::One('\u{0432}',), - 0x1C81 => Fold::One('\u{0434}',), - 0x1C82 => Fold::One('\u{043E}',), - 0x1C83 => Fold::One('\u{0441}',), - 0x1C84 => Fold::One('\u{0442}',), - 0x1C85 => Fold::One('\u{0442}',), - 0x1C86 => Fold::One('\u{044A}',), - 0x1C87 => Fold::One('\u{0463}',), - 0x1C88 => Fold::One('\u{A64B}',), - 0x1C90 => Fold::One('\u{10D0}',), - 0x1C91 => Fold::One('\u{10D1}',), - 0x1C92 => Fold::One('\u{10D2}',), - 0x1C93 => Fold::One('\u{10D3}',), - 0x1C94 => Fold::One('\u{10D4}',), - 0x1C95 => Fold::One('\u{10D5}',), - 0x1C96 => Fold::One('\u{10D6}',), - 0x1C97 => Fold::One('\u{10D7}',), - 0x1C98 => Fold::One('\u{10D8}',), - 0x1C99 => Fold::One('\u{10D9}',), - 0x1C9A => Fold::One('\u{10DA}',), - 0x1C9B => Fold::One('\u{10DB}',), - 0x1C9C => Fold::One('\u{10DC}',), - 0x1C9D => Fold::One('\u{10DD}',), - 0x1C9E => Fold::One('\u{10DE}',), - 0x1C9F => Fold::One('\u{10DF}',), - 0x1CA0 => Fold::One('\u{10E0}',), - 0x1CA1 => Fold::One('\u{10E1}',), - 0x1CA2 => Fold::One('\u{10E2}',), - 0x1CA3 => Fold::One('\u{10E3}',), - 0x1CA4 => Fold::One('\u{10E4}',), - 0x1CA5 => Fold::One('\u{10E5}',), - 0x1CA6 => Fold::One('\u{10E6}',), - 0x1CA7 => Fold::One('\u{10E7}',), - 0x1CA8 => Fold::One('\u{10E8}',), - 0x1CA9 => Fold::One('\u{10E9}',), - 0x1CAA => Fold::One('\u{10EA}',), - 0x1CAB => Fold::One('\u{10EB}',), - 0x1CAC => Fold::One('\u{10EC}',), - 0x1CAD => Fold::One('\u{10ED}',), - 0x1CAE => Fold::One('\u{10EE}',), - 0x1CAF => Fold::One('\u{10EF}',), - 0x1CB0 => Fold::One('\u{10F0}',), - 0x1CB1 => Fold::One('\u{10F1}',), - 0x1CB2 => Fold::One('\u{10F2}',), - 0x1CB3 => Fold::One('\u{10F3}',), - 0x1CB4 => Fold::One('\u{10F4}',), - 0x1CB5 => Fold::One('\u{10F5}',), - 0x1CB6 => Fold::One('\u{10F6}',), - 0x1CB7 => Fold::One('\u{10F7}',), - 0x1CB8 => Fold::One('\u{10F8}',), - 0x1CB9 => Fold::One('\u{10F9}',), - 0x1CBA => Fold::One('\u{10FA}',), - 0x1CBD => Fold::One('\u{10FD}',), - 0x1CBE => Fold::One('\u{10FE}',), - 0x1CBF => Fold::One('\u{10FF}',), - 0x1E00 => Fold::One('\u{1E01}',), - 0x1E02 => Fold::One('\u{1E03}',), - 0x1E04 => Fold::One('\u{1E05}',), - 0x1E06 => Fold::One('\u{1E07}',), - 0x1E08 => Fold::One('\u{1E09}',), - 0x1E0A => Fold::One('\u{1E0B}',), - 0x1E0C => Fold::One('\u{1E0D}',), - 0x1E0E => Fold::One('\u{1E0F}',), - 0x1E10 => Fold::One('\u{1E11}',), - 0x1E12 => Fold::One('\u{1E13}',), - 0x1E14 => Fold::One('\u{1E15}',), - 0x1E16 => Fold::One('\u{1E17}',), - 0x1E18 => Fold::One('\u{1E19}',), - 0x1E1A => Fold::One('\u{1E1B}',), - 0x1E1C => Fold::One('\u{1E1D}',), - 0x1E1E => Fold::One('\u{1E1F}',), - 0x1E20 => Fold::One('\u{1E21}',), - 0x1E22 => Fold::One('\u{1E23}',), - 0x1E24 => Fold::One('\u{1E25}',), - 0x1E26 => Fold::One('\u{1E27}',), - 0x1E28 => Fold::One('\u{1E29}',), - 0x1E2A => Fold::One('\u{1E2B}',), - 0x1E2C => Fold::One('\u{1E2D}',), - 0x1E2E => Fold::One('\u{1E2F}',), - 0x1E30 => Fold::One('\u{1E31}',), - 0x1E32 => Fold::One('\u{1E33}',), - 0x1E34 => Fold::One('\u{1E35}',), - 0x1E36 => Fold::One('\u{1E37}',), - 0x1E38 => Fold::One('\u{1E39}',), - 0x1E3A => Fold::One('\u{1E3B}',), - 0x1E3C => Fold::One('\u{1E3D}',), - 0x1E3E => Fold::One('\u{1E3F}',), - 0x1E40 => Fold::One('\u{1E41}',), - 0x1E42 => Fold::One('\u{1E43}',), - 0x1E44 => Fold::One('\u{1E45}',), - 0x1E46 => Fold::One('\u{1E47}',), - 0x1E48 => Fold::One('\u{1E49}',), - 0x1E4A => Fold::One('\u{1E4B}',), - 0x1E4C => Fold::One('\u{1E4D}',), - 0x1E4E => Fold::One('\u{1E4F}',), - 0x1E50 => Fold::One('\u{1E51}',), - 0x1E52 => Fold::One('\u{1E53}',), - 0x1E54 => Fold::One('\u{1E55}',), - 0x1E56 => Fold::One('\u{1E57}',), - 0x1E58 => Fold::One('\u{1E59}',), - 0x1E5A => Fold::One('\u{1E5B}',), - 0x1E5C => Fold::One('\u{1E5D}',), - 0x1E5E => Fold::One('\u{1E5F}',), - 0x1E60 => Fold::One('\u{1E61}',), - 0x1E62 => Fold::One('\u{1E63}',), - 0x1E64 => Fold::One('\u{1E65}',), - 0x1E66 => Fold::One('\u{1E67}',), - 0x1E68 => Fold::One('\u{1E69}',), - 0x1E6A => Fold::One('\u{1E6B}',), - 0x1E6C => Fold::One('\u{1E6D}',), - 0x1E6E => Fold::One('\u{1E6F}',), - 0x1E70 => Fold::One('\u{1E71}',), - 0x1E72 => Fold::One('\u{1E73}',), - 0x1E74 => Fold::One('\u{1E75}',), - 0x1E76 => Fold::One('\u{1E77}',), - 0x1E78 => Fold::One('\u{1E79}',), - 0x1E7A => Fold::One('\u{1E7B}',), - 0x1E7C => Fold::One('\u{1E7D}',), - 0x1E7E => Fold::One('\u{1E7F}',), - 0x1E80 => Fold::One('\u{1E81}',), - 0x1E82 => Fold::One('\u{1E83}',), - 0x1E84 => Fold::One('\u{1E85}',), - 0x1E86 => Fold::One('\u{1E87}',), - 0x1E88 => Fold::One('\u{1E89}',), - 0x1E8A => Fold::One('\u{1E8B}',), - 0x1E8C => Fold::One('\u{1E8D}',), - 0x1E8E => Fold::One('\u{1E8F}',), - 0x1E90 => Fold::One('\u{1E91}',), - 0x1E92 => Fold::One('\u{1E93}',), - 0x1E94 => Fold::One('\u{1E95}',), - 0x1E96 => Fold::Two('\u{0068}', '\u{0331}',), - 0x1E97 => Fold::Two('\u{0074}', '\u{0308}',), - 0x1E98 => Fold::Two('\u{0077}', '\u{030A}',), - 0x1E99 => Fold::Two('\u{0079}', '\u{030A}',), - 0x1E9A => Fold::Two('\u{0061}', '\u{02BE}',), - 0x1E9B => Fold::One('\u{1E61}',), - 0x1E9E => Fold::Two('\u{0073}', '\u{0073}',), - 0x1EA0 => Fold::One('\u{1EA1}',), - 0x1EA2 => Fold::One('\u{1EA3}',), - 0x1EA4 => Fold::One('\u{1EA5}',), - 0x1EA6 => Fold::One('\u{1EA7}',), - 0x1EA8 => Fold::One('\u{1EA9}',), - 0x1EAA => Fold::One('\u{1EAB}',), - 0x1EAC => Fold::One('\u{1EAD}',), - 0x1EAE => Fold::One('\u{1EAF}',), - 0x1EB0 => Fold::One('\u{1EB1}',), - 0x1EB2 => Fold::One('\u{1EB3}',), - 0x1EB4 => Fold::One('\u{1EB5}',), - 0x1EB6 => Fold::One('\u{1EB7}',), - 0x1EB8 => Fold::One('\u{1EB9}',), - 0x1EBA => Fold::One('\u{1EBB}',), - 0x1EBC => Fold::One('\u{1EBD}',), - 0x1EBE => Fold::One('\u{1EBF}',), - 0x1EC0 => Fold::One('\u{1EC1}',), - 0x1EC2 => Fold::One('\u{1EC3}',), - 0x1EC4 => Fold::One('\u{1EC5}',), - 0x1EC6 => Fold::One('\u{1EC7}',), - 0x1EC8 => Fold::One('\u{1EC9}',), - 0x1ECA => Fold::One('\u{1ECB}',), - 0x1ECC => Fold::One('\u{1ECD}',), - 0x1ECE => Fold::One('\u{1ECF}',), - 0x1ED0 => Fold::One('\u{1ED1}',), - 0x1ED2 => Fold::One('\u{1ED3}',), - 0x1ED4 => Fold::One('\u{1ED5}',), - 0x1ED6 => Fold::One('\u{1ED7}',), - 0x1ED8 => Fold::One('\u{1ED9}',), - 0x1EDA => Fold::One('\u{1EDB}',), - 0x1EDC => Fold::One('\u{1EDD}',), - 0x1EDE => Fold::One('\u{1EDF}',), - 0x1EE0 => Fold::One('\u{1EE1}',), - 0x1EE2 => Fold::One('\u{1EE3}',), - 0x1EE4 => Fold::One('\u{1EE5}',), - 0x1EE6 => Fold::One('\u{1EE7}',), - 0x1EE8 => Fold::One('\u{1EE9}',), - 0x1EEA => Fold::One('\u{1EEB}',), - 0x1EEC => Fold::One('\u{1EED}',), - 0x1EEE => Fold::One('\u{1EEF}',), - 0x1EF0 => Fold::One('\u{1EF1}',), - 0x1EF2 => Fold::One('\u{1EF3}',), - 0x1EF4 => Fold::One('\u{1EF5}',), - 0x1EF6 => Fold::One('\u{1EF7}',), - 0x1EF8 => Fold::One('\u{1EF9}',), - 0x1EFA => Fold::One('\u{1EFB}',), - 0x1EFC => Fold::One('\u{1EFD}',), - 0x1EFE => Fold::One('\u{1EFF}',), - 0x1F08 => Fold::One('\u{1F00}',), - 0x1F09 => Fold::One('\u{1F01}',), - 0x1F0A => Fold::One('\u{1F02}',), - 0x1F0B => Fold::One('\u{1F03}',), - 0x1F0C => Fold::One('\u{1F04}',), - 0x1F0D => Fold::One('\u{1F05}',), - 0x1F0E => Fold::One('\u{1F06}',), - 0x1F0F => Fold::One('\u{1F07}',), - 0x1F18 => Fold::One('\u{1F10}',), - 0x1F19 => Fold::One('\u{1F11}',), - 0x1F1A => Fold::One('\u{1F12}',), - 0x1F1B => Fold::One('\u{1F13}',), - 0x1F1C => Fold::One('\u{1F14}',), - 0x1F1D => Fold::One('\u{1F15}',), - 0x1F28 => Fold::One('\u{1F20}',), - 0x1F29 => Fold::One('\u{1F21}',), - 0x1F2A => Fold::One('\u{1F22}',), - 0x1F2B => Fold::One('\u{1F23}',), - 0x1F2C => Fold::One('\u{1F24}',), - 0x1F2D => Fold::One('\u{1F25}',), - 0x1F2E => Fold::One('\u{1F26}',), - 0x1F2F => Fold::One('\u{1F27}',), - 0x1F38 => Fold::One('\u{1F30}',), - 0x1F39 => Fold::One('\u{1F31}',), - 0x1F3A => Fold::One('\u{1F32}',), - 0x1F3B => Fold::One('\u{1F33}',), - 0x1F3C => Fold::One('\u{1F34}',), - 0x1F3D => Fold::One('\u{1F35}',), - 0x1F3E => Fold::One('\u{1F36}',), - 0x1F3F => Fold::One('\u{1F37}',), - 0x1F48 => Fold::One('\u{1F40}',), - 0x1F49 => Fold::One('\u{1F41}',), - 0x1F4A => Fold::One('\u{1F42}',), - 0x1F4B => Fold::One('\u{1F43}',), - 0x1F4C => Fold::One('\u{1F44}',), - 0x1F4D => Fold::One('\u{1F45}',), - 0x1F50 => Fold::Two('\u{03C5}', '\u{0313}',), - 0x1F52 => Fold::Three('\u{03C5}', '\u{0313}', '\u{0300}',), - 0x1F54 => Fold::Three('\u{03C5}', '\u{0313}', '\u{0301}',), - 0x1F56 => Fold::Three('\u{03C5}', '\u{0313}', '\u{0342}',), - 0x1F59 => Fold::One('\u{1F51}',), - 0x1F5B => Fold::One('\u{1F53}',), - 0x1F5D => Fold::One('\u{1F55}',), - 0x1F5F => Fold::One('\u{1F57}',), - 0x1F68 => Fold::One('\u{1F60}',), - 0x1F69 => Fold::One('\u{1F61}',), - 0x1F6A => Fold::One('\u{1F62}',), - 0x1F6B => Fold::One('\u{1F63}',), - 0x1F6C => Fold::One('\u{1F64}',), - 0x1F6D => Fold::One('\u{1F65}',), - 0x1F6E => Fold::One('\u{1F66}',), - 0x1F6F => Fold::One('\u{1F67}',), - 0x1F80 => Fold::Two('\u{1F00}', '\u{03B9}',), - 0x1F81 => Fold::Two('\u{1F01}', '\u{03B9}',), - 0x1F82 => Fold::Two('\u{1F02}', '\u{03B9}',), - 0x1F83 => Fold::Two('\u{1F03}', '\u{03B9}',), - 0x1F84 => Fold::Two('\u{1F04}', '\u{03B9}',), - 0x1F85 => Fold::Two('\u{1F05}', '\u{03B9}',), - 0x1F86 => Fold::Two('\u{1F06}', '\u{03B9}',), - 0x1F87 => Fold::Two('\u{1F07}', '\u{03B9}',), - 0x1F88 => Fold::Two('\u{1F00}', '\u{03B9}',), - 0x1F89 => Fold::Two('\u{1F01}', '\u{03B9}',), - 0x1F8A => Fold::Two('\u{1F02}', '\u{03B9}',), - 0x1F8B => Fold::Two('\u{1F03}', '\u{03B9}',), - 0x1F8C => Fold::Two('\u{1F04}', '\u{03B9}',), - 0x1F8D => Fold::Two('\u{1F05}', '\u{03B9}',), - 0x1F8E => Fold::Two('\u{1F06}', '\u{03B9}',), - 0x1F8F => Fold::Two('\u{1F07}', '\u{03B9}',), - 0x1F90 => Fold::Two('\u{1F20}', '\u{03B9}',), - 0x1F91 => Fold::Two('\u{1F21}', '\u{03B9}',), - 0x1F92 => Fold::Two('\u{1F22}', '\u{03B9}',), - 0x1F93 => Fold::Two('\u{1F23}', '\u{03B9}',), - 0x1F94 => Fold::Two('\u{1F24}', '\u{03B9}',), - 0x1F95 => Fold::Two('\u{1F25}', '\u{03B9}',), - 0x1F96 => Fold::Two('\u{1F26}', '\u{03B9}',), - 0x1F97 => Fold::Two('\u{1F27}', '\u{03B9}',), - 0x1F98 => Fold::Two('\u{1F20}', '\u{03B9}',), - 0x1F99 => Fold::Two('\u{1F21}', '\u{03B9}',), - 0x1F9A => Fold::Two('\u{1F22}', '\u{03B9}',), - 0x1F9B => Fold::Two('\u{1F23}', '\u{03B9}',), - 0x1F9C => Fold::Two('\u{1F24}', '\u{03B9}',), - 0x1F9D => Fold::Two('\u{1F25}', '\u{03B9}',), - 0x1F9E => Fold::Two('\u{1F26}', '\u{03B9}',), - 0x1F9F => Fold::Two('\u{1F27}', '\u{03B9}',), - 0x1FA0 => Fold::Two('\u{1F60}', '\u{03B9}',), - 0x1FA1 => Fold::Two('\u{1F61}', '\u{03B9}',), - 0x1FA2 => Fold::Two('\u{1F62}', '\u{03B9}',), - 0x1FA3 => Fold::Two('\u{1F63}', '\u{03B9}',), - 0x1FA4 => Fold::Two('\u{1F64}', '\u{03B9}',), - 0x1FA5 => Fold::Two('\u{1F65}', '\u{03B9}',), - 0x1FA6 => Fold::Two('\u{1F66}', '\u{03B9}',), - 0x1FA7 => Fold::Two('\u{1F67}', '\u{03B9}',), - 0x1FA8 => Fold::Two('\u{1F60}', '\u{03B9}',), - 0x1FA9 => Fold::Two('\u{1F61}', '\u{03B9}',), - 0x1FAA => Fold::Two('\u{1F62}', '\u{03B9}',), - 0x1FAB => Fold::Two('\u{1F63}', '\u{03B9}',), - 0x1FAC => Fold::Two('\u{1F64}', '\u{03B9}',), - 0x1FAD => Fold::Two('\u{1F65}', '\u{03B9}',), - 0x1FAE => Fold::Two('\u{1F66}', '\u{03B9}',), - 0x1FAF => Fold::Two('\u{1F67}', '\u{03B9}',), - 0x1FB2 => Fold::Two('\u{1F70}', '\u{03B9}',), - 0x1FB3 => Fold::Two('\u{03B1}', '\u{03B9}',), - 0x1FB4 => Fold::Two('\u{03AC}', '\u{03B9}',), - 0x1FB6 => Fold::Two('\u{03B1}', '\u{0342}',), - 0x1FB7 => Fold::Three('\u{03B1}', '\u{0342}', '\u{03B9}',), - 0x1FB8 => Fold::One('\u{1FB0}',), - 0x1FB9 => Fold::One('\u{1FB1}',), - 0x1FBA => Fold::One('\u{1F70}',), - 0x1FBB => Fold::One('\u{1F71}',), - 0x1FBC => Fold::Two('\u{03B1}', '\u{03B9}',), - 0x1FBE => Fold::One('\u{03B9}',), - 0x1FC2 => Fold::Two('\u{1F74}', '\u{03B9}',), - 0x1FC3 => Fold::Two('\u{03B7}', '\u{03B9}',), - 0x1FC4 => Fold::Two('\u{03AE}', '\u{03B9}',), - 0x1FC6 => Fold::Two('\u{03B7}', '\u{0342}',), - 0x1FC7 => Fold::Three('\u{03B7}', '\u{0342}', '\u{03B9}',), - 0x1FC8 => Fold::One('\u{1F72}',), - 0x1FC9 => Fold::One('\u{1F73}',), - 0x1FCA => Fold::One('\u{1F74}',), - 0x1FCB => Fold::One('\u{1F75}',), - 0x1FCC => Fold::Two('\u{03B7}', '\u{03B9}',), - 0x1FD2 => Fold::Three('\u{03B9}', '\u{0308}', '\u{0300}',), - 0x1FD3 => Fold::Three('\u{03B9}', '\u{0308}', '\u{0301}',), - 0x1FD6 => Fold::Two('\u{03B9}', '\u{0342}',), - 0x1FD7 => Fold::Three('\u{03B9}', '\u{0308}', '\u{0342}',), - 0x1FD8 => Fold::One('\u{1FD0}',), - 0x1FD9 => Fold::One('\u{1FD1}',), - 0x1FDA => Fold::One('\u{1F76}',), - 0x1FDB => Fold::One('\u{1F77}',), - 0x1FE2 => Fold::Three('\u{03C5}', '\u{0308}', '\u{0300}',), - 0x1FE3 => Fold::Three('\u{03C5}', '\u{0308}', '\u{0301}',), - 0x1FE4 => Fold::Two('\u{03C1}', '\u{0313}',), - 0x1FE6 => Fold::Two('\u{03C5}', '\u{0342}',), - 0x1FE7 => Fold::Three('\u{03C5}', '\u{0308}', '\u{0342}',), - 0x1FE8 => Fold::One('\u{1FE0}',), - 0x1FE9 => Fold::One('\u{1FE1}',), - 0x1FEA => Fold::One('\u{1F7A}',), - 0x1FEB => Fold::One('\u{1F7B}',), - 0x1FEC => Fold::One('\u{1FE5}',), - 0x1FF2 => Fold::Two('\u{1F7C}', '\u{03B9}',), - 0x1FF3 => Fold::Two('\u{03C9}', '\u{03B9}',), - 0x1FF4 => Fold::Two('\u{03CE}', '\u{03B9}',), - 0x1FF6 => Fold::Two('\u{03C9}', '\u{0342}',), - 0x1FF7 => Fold::Three('\u{03C9}', '\u{0342}', '\u{03B9}',), - 0x1FF8 => Fold::One('\u{1F78}',), - 0x1FF9 => Fold::One('\u{1F79}',), - 0x1FFA => Fold::One('\u{1F7C}',), - 0x1FFB => Fold::One('\u{1F7D}',), - 0x1FFC => Fold::Two('\u{03C9}', '\u{03B9}',), - 0x2126 => Fold::One('\u{03C9}',), - 0x212A => Fold::One('\u{006B}',), - 0x212B => Fold::One('\u{00E5}',), - 0x2132 => Fold::One('\u{214E}',), - 0x2160 => Fold::One('\u{2170}',), - 0x2161 => Fold::One('\u{2171}',), - 0x2162 => Fold::One('\u{2172}',), - 0x2163 => Fold::One('\u{2173}',), - 0x2164 => Fold::One('\u{2174}',), - 0x2165 => Fold::One('\u{2175}',), - 0x2166 => Fold::One('\u{2176}',), - 0x2167 => Fold::One('\u{2177}',), - 0x2168 => Fold::One('\u{2178}',), - 0x2169 => Fold::One('\u{2179}',), - 0x216A => Fold::One('\u{217A}',), - 0x216B => Fold::One('\u{217B}',), - 0x216C => Fold::One('\u{217C}',), - 0x216D => Fold::One('\u{217D}',), - 0x216E => Fold::One('\u{217E}',), - 0x216F => Fold::One('\u{217F}',), - 0x2183 => Fold::One('\u{2184}',), - 0x24B6 => Fold::One('\u{24D0}',), - 0x24B7 => Fold::One('\u{24D1}',), - 0x24B8 => Fold::One('\u{24D2}',), - 0x24B9 => Fold::One('\u{24D3}',), - 0x24BA => Fold::One('\u{24D4}',), - 0x24BB => Fold::One('\u{24D5}',), - 0x24BC => Fold::One('\u{24D6}',), - 0x24BD => Fold::One('\u{24D7}',), - 0x24BE => Fold::One('\u{24D8}',), - 0x24BF => Fold::One('\u{24D9}',), - 0x24C0 => Fold::One('\u{24DA}',), - 0x24C1 => Fold::One('\u{24DB}',), - 0x24C2 => Fold::One('\u{24DC}',), - 0x24C3 => Fold::One('\u{24DD}',), - 0x24C4 => Fold::One('\u{24DE}',), - 0x24C5 => Fold::One('\u{24DF}',), - 0x24C6 => Fold::One('\u{24E0}',), - 0x24C7 => Fold::One('\u{24E1}',), - 0x24C8 => Fold::One('\u{24E2}',), - 0x24C9 => Fold::One('\u{24E3}',), - 0x24CA => Fold::One('\u{24E4}',), - 0x24CB => Fold::One('\u{24E5}',), - 0x24CC => Fold::One('\u{24E6}',), - 0x24CD => Fold::One('\u{24E7}',), - 0x24CE => Fold::One('\u{24E8}',), - 0x24CF => Fold::One('\u{24E9}',), - 0x2C00 => Fold::One('\u{2C30}',), - 0x2C01 => Fold::One('\u{2C31}',), - 0x2C02 => Fold::One('\u{2C32}',), - 0x2C03 => Fold::One('\u{2C33}',), - 0x2C04 => Fold::One('\u{2C34}',), - 0x2C05 => Fold::One('\u{2C35}',), - 0x2C06 => Fold::One('\u{2C36}',), - 0x2C07 => Fold::One('\u{2C37}',), - 0x2C08 => Fold::One('\u{2C38}',), - 0x2C09 => Fold::One('\u{2C39}',), - 0x2C0A => Fold::One('\u{2C3A}',), - 0x2C0B => Fold::One('\u{2C3B}',), - 0x2C0C => Fold::One('\u{2C3C}',), - 0x2C0D => Fold::One('\u{2C3D}',), - 0x2C0E => Fold::One('\u{2C3E}',), - 0x2C0F => Fold::One('\u{2C3F}',), - 0x2C10 => Fold::One('\u{2C40}',), - 0x2C11 => Fold::One('\u{2C41}',), - 0x2C12 => Fold::One('\u{2C42}',), - 0x2C13 => Fold::One('\u{2C43}',), - 0x2C14 => Fold::One('\u{2C44}',), - 0x2C15 => Fold::One('\u{2C45}',), - 0x2C16 => Fold::One('\u{2C46}',), - 0x2C17 => Fold::One('\u{2C47}',), - 0x2C18 => Fold::One('\u{2C48}',), - 0x2C19 => Fold::One('\u{2C49}',), - 0x2C1A => Fold::One('\u{2C4A}',), - 0x2C1B => Fold::One('\u{2C4B}',), - 0x2C1C => Fold::One('\u{2C4C}',), - 0x2C1D => Fold::One('\u{2C4D}',), - 0x2C1E => Fold::One('\u{2C4E}',), - 0x2C1F => Fold::One('\u{2C4F}',), - 0x2C20 => Fold::One('\u{2C50}',), - 0x2C21 => Fold::One('\u{2C51}',), - 0x2C22 => Fold::One('\u{2C52}',), - 0x2C23 => Fold::One('\u{2C53}',), - 0x2C24 => Fold::One('\u{2C54}',), - 0x2C25 => Fold::One('\u{2C55}',), - 0x2C26 => Fold::One('\u{2C56}',), - 0x2C27 => Fold::One('\u{2C57}',), - 0x2C28 => Fold::One('\u{2C58}',), - 0x2C29 => Fold::One('\u{2C59}',), - 0x2C2A => Fold::One('\u{2C5A}',), - 0x2C2B => Fold::One('\u{2C5B}',), - 0x2C2C => Fold::One('\u{2C5C}',), - 0x2C2D => Fold::One('\u{2C5D}',), - 0x2C2E => Fold::One('\u{2C5E}',), - 0x2C60 => Fold::One('\u{2C61}',), - 0x2C62 => Fold::One('\u{026B}',), - 0x2C63 => Fold::One('\u{1D7D}',), - 0x2C64 => Fold::One('\u{027D}',), - 0x2C67 => Fold::One('\u{2C68}',), - 0x2C69 => Fold::One('\u{2C6A}',), - 0x2C6B => Fold::One('\u{2C6C}',), - 0x2C6D => Fold::One('\u{0251}',), - 0x2C6E => Fold::One('\u{0271}',), - 0x2C6F => Fold::One('\u{0250}',), - 0x2C70 => Fold::One('\u{0252}',), - 0x2C72 => Fold::One('\u{2C73}',), - 0x2C75 => Fold::One('\u{2C76}',), - 0x2C7E => Fold::One('\u{023F}',), - 0x2C7F => Fold::One('\u{0240}',), - 0x2C80 => Fold::One('\u{2C81}',), - 0x2C82 => Fold::One('\u{2C83}',), - 0x2C84 => Fold::One('\u{2C85}',), - 0x2C86 => Fold::One('\u{2C87}',), - 0x2C88 => Fold::One('\u{2C89}',), - 0x2C8A => Fold::One('\u{2C8B}',), - 0x2C8C => Fold::One('\u{2C8D}',), - 0x2C8E => Fold::One('\u{2C8F}',), - 0x2C90 => Fold::One('\u{2C91}',), - 0x2C92 => Fold::One('\u{2C93}',), - 0x2C94 => Fold::One('\u{2C95}',), - 0x2C96 => Fold::One('\u{2C97}',), - 0x2C98 => Fold::One('\u{2C99}',), - 0x2C9A => Fold::One('\u{2C9B}',), - 0x2C9C => Fold::One('\u{2C9D}',), - 0x2C9E => Fold::One('\u{2C9F}',), - 0x2CA0 => Fold::One('\u{2CA1}',), - 0x2CA2 => Fold::One('\u{2CA3}',), - 0x2CA4 => Fold::One('\u{2CA5}',), - 0x2CA6 => Fold::One('\u{2CA7}',), - 0x2CA8 => Fold::One('\u{2CA9}',), - 0x2CAA => Fold::One('\u{2CAB}',), - 0x2CAC => Fold::One('\u{2CAD}',), - 0x2CAE => Fold::One('\u{2CAF}',), - 0x2CB0 => Fold::One('\u{2CB1}',), - 0x2CB2 => Fold::One('\u{2CB3}',), - 0x2CB4 => Fold::One('\u{2CB5}',), - 0x2CB6 => Fold::One('\u{2CB7}',), - 0x2CB8 => Fold::One('\u{2CB9}',), - 0x2CBA => Fold::One('\u{2CBB}',), - 0x2CBC => Fold::One('\u{2CBD}',), - 0x2CBE => Fold::One('\u{2CBF}',), - 0x2CC0 => Fold::One('\u{2CC1}',), - 0x2CC2 => Fold::One('\u{2CC3}',), - 0x2CC4 => Fold::One('\u{2CC5}',), - 0x2CC6 => Fold::One('\u{2CC7}',), - 0x2CC8 => Fold::One('\u{2CC9}',), - 0x2CCA => Fold::One('\u{2CCB}',), - 0x2CCC => Fold::One('\u{2CCD}',), - 0x2CCE => Fold::One('\u{2CCF}',), - 0x2CD0 => Fold::One('\u{2CD1}',), - 0x2CD2 => Fold::One('\u{2CD3}',), - 0x2CD4 => Fold::One('\u{2CD5}',), - 0x2CD6 => Fold::One('\u{2CD7}',), - 0x2CD8 => Fold::One('\u{2CD9}',), - 0x2CDA => Fold::One('\u{2CDB}',), - 0x2CDC => Fold::One('\u{2CDD}',), - 0x2CDE => Fold::One('\u{2CDF}',), - 0x2CE0 => Fold::One('\u{2CE1}',), - 0x2CE2 => Fold::One('\u{2CE3}',), - 0x2CEB => Fold::One('\u{2CEC}',), - 0x2CED => Fold::One('\u{2CEE}',), - 0x2CF2 => Fold::One('\u{2CF3}',), - 0xA640 => Fold::One('\u{A641}',), - 0xA642 => Fold::One('\u{A643}',), - 0xA644 => Fold::One('\u{A645}',), - 0xA646 => Fold::One('\u{A647}',), - 0xA648 => Fold::One('\u{A649}',), - 0xA64A => Fold::One('\u{A64B}',), - 0xA64C => Fold::One('\u{A64D}',), - 0xA64E => Fold::One('\u{A64F}',), - 0xA650 => Fold::One('\u{A651}',), - 0xA652 => Fold::One('\u{A653}',), - 0xA654 => Fold::One('\u{A655}',), - 0xA656 => Fold::One('\u{A657}',), - 0xA658 => Fold::One('\u{A659}',), - 0xA65A => Fold::One('\u{A65B}',), - 0xA65C => Fold::One('\u{A65D}',), - 0xA65E => Fold::One('\u{A65F}',), - 0xA660 => Fold::One('\u{A661}',), - 0xA662 => Fold::One('\u{A663}',), - 0xA664 => Fold::One('\u{A665}',), - 0xA666 => Fold::One('\u{A667}',), - 0xA668 => Fold::One('\u{A669}',), - 0xA66A => Fold::One('\u{A66B}',), - 0xA66C => Fold::One('\u{A66D}',), - 0xA680 => Fold::One('\u{A681}',), - 0xA682 => Fold::One('\u{A683}',), - 0xA684 => Fold::One('\u{A685}',), - 0xA686 => Fold::One('\u{A687}',), - 0xA688 => Fold::One('\u{A689}',), - 0xA68A => Fold::One('\u{A68B}',), - 0xA68C => Fold::One('\u{A68D}',), - 0xA68E => Fold::One('\u{A68F}',), - 0xA690 => Fold::One('\u{A691}',), - 0xA692 => Fold::One('\u{A693}',), - 0xA694 => Fold::One('\u{A695}',), - 0xA696 => Fold::One('\u{A697}',), - 0xA698 => Fold::One('\u{A699}',), - 0xA69A => Fold::One('\u{A69B}',), - 0xA722 => Fold::One('\u{A723}',), - 0xA724 => Fold::One('\u{A725}',), - 0xA726 => Fold::One('\u{A727}',), - 0xA728 => Fold::One('\u{A729}',), - 0xA72A => Fold::One('\u{A72B}',), - 0xA72C => Fold::One('\u{A72D}',), - 0xA72E => Fold::One('\u{A72F}',), - 0xA732 => Fold::One('\u{A733}',), - 0xA734 => Fold::One('\u{A735}',), - 0xA736 => Fold::One('\u{A737}',), - 0xA738 => Fold::One('\u{A739}',), - 0xA73A => Fold::One('\u{A73B}',), - 0xA73C => Fold::One('\u{A73D}',), - 0xA73E => Fold::One('\u{A73F}',), - 0xA740 => Fold::One('\u{A741}',), - 0xA742 => Fold::One('\u{A743}',), - 0xA744 => Fold::One('\u{A745}',), - 0xA746 => Fold::One('\u{A747}',), - 0xA748 => Fold::One('\u{A749}',), - 0xA74A => Fold::One('\u{A74B}',), - 0xA74C => Fold::One('\u{A74D}',), - 0xA74E => Fold::One('\u{A74F}',), - 0xA750 => Fold::One('\u{A751}',), - 0xA752 => Fold::One('\u{A753}',), - 0xA754 => Fold::One('\u{A755}',), - 0xA756 => Fold::One('\u{A757}',), - 0xA758 => Fold::One('\u{A759}',), - 0xA75A => Fold::One('\u{A75B}',), - 0xA75C => Fold::One('\u{A75D}',), - 0xA75E => Fold::One('\u{A75F}',), - 0xA760 => Fold::One('\u{A761}',), - 0xA762 => Fold::One('\u{A763}',), - 0xA764 => Fold::One('\u{A765}',), - 0xA766 => Fold::One('\u{A767}',), - 0xA768 => Fold::One('\u{A769}',), - 0xA76A => Fold::One('\u{A76B}',), - 0xA76C => Fold::One('\u{A76D}',), - 0xA76E => Fold::One('\u{A76F}',), - 0xA779 => Fold::One('\u{A77A}',), - 0xA77B => Fold::One('\u{A77C}',), - 0xA77D => Fold::One('\u{1D79}',), - 0xA77E => Fold::One('\u{A77F}',), - 0xA780 => Fold::One('\u{A781}',), - 0xA782 => Fold::One('\u{A783}',), - 0xA784 => Fold::One('\u{A785}',), - 0xA786 => Fold::One('\u{A787}',), - 0xA78B => Fold::One('\u{A78C}',), - 0xA78D => Fold::One('\u{0265}',), - 0xA790 => Fold::One('\u{A791}',), - 0xA792 => Fold::One('\u{A793}',), - 0xA796 => Fold::One('\u{A797}',), - 0xA798 => Fold::One('\u{A799}',), - 0xA79A => Fold::One('\u{A79B}',), - 0xA79C => Fold::One('\u{A79D}',), - 0xA79E => Fold::One('\u{A79F}',), - 0xA7A0 => Fold::One('\u{A7A1}',), - 0xA7A2 => Fold::One('\u{A7A3}',), - 0xA7A4 => Fold::One('\u{A7A5}',), - 0xA7A6 => Fold::One('\u{A7A7}',), - 0xA7A8 => Fold::One('\u{A7A9}',), - 0xA7AA => Fold::One('\u{0266}',), - 0xA7AB => Fold::One('\u{025C}',), - 0xA7AC => Fold::One('\u{0261}',), - 0xA7AD => Fold::One('\u{026C}',), - 0xA7AE => Fold::One('\u{026A}',), - 0xA7B0 => Fold::One('\u{029E}',), - 0xA7B1 => Fold::One('\u{0287}',), - 0xA7B2 => Fold::One('\u{029D}',), - 0xA7B3 => Fold::One('\u{AB53}',), - 0xA7B4 => Fold::One('\u{A7B5}',), - 0xA7B6 => Fold::One('\u{A7B7}',), - 0xA7B8 => Fold::One('\u{A7B9}',), - 0xA7BA => Fold::One('\u{A7BB}',), - 0xA7BC => Fold::One('\u{A7BD}',), - 0xA7BE => Fold::One('\u{A7BF}',), - 0xA7C2 => Fold::One('\u{A7C3}',), - 0xA7C4 => Fold::One('\u{A794}',), - 0xA7C5 => Fold::One('\u{0282}',), - 0xA7C6 => Fold::One('\u{1D8E}',), - 0xAB70 => Fold::One('\u{13A0}',), - 0xAB71 => Fold::One('\u{13A1}',), - 0xAB72 => Fold::One('\u{13A2}',), - 0xAB73 => Fold::One('\u{13A3}',), - 0xAB74 => Fold::One('\u{13A4}',), - 0xAB75 => Fold::One('\u{13A5}',), - 0xAB76 => Fold::One('\u{13A6}',), - 0xAB77 => Fold::One('\u{13A7}',), - 0xAB78 => Fold::One('\u{13A8}',), - 0xAB79 => Fold::One('\u{13A9}',), - 0xAB7A => Fold::One('\u{13AA}',), - 0xAB7B => Fold::One('\u{13AB}',), - 0xAB7C => Fold::One('\u{13AC}',), - 0xAB7D => Fold::One('\u{13AD}',), - 0xAB7E => Fold::One('\u{13AE}',), - 0xAB7F => Fold::One('\u{13AF}',), - 0xAB80 => Fold::One('\u{13B0}',), - 0xAB81 => Fold::One('\u{13B1}',), - 0xAB82 => Fold::One('\u{13B2}',), - 0xAB83 => Fold::One('\u{13B3}',), - 0xAB84 => Fold::One('\u{13B4}',), - 0xAB85 => Fold::One('\u{13B5}',), - 0xAB86 => Fold::One('\u{13B6}',), - 0xAB87 => Fold::One('\u{13B7}',), - 0xAB88 => Fold::One('\u{13B8}',), - 0xAB89 => Fold::One('\u{13B9}',), - 0xAB8A => Fold::One('\u{13BA}',), - 0xAB8B => Fold::One('\u{13BB}',), - 0xAB8C => Fold::One('\u{13BC}',), - 0xAB8D => Fold::One('\u{13BD}',), - 0xAB8E => Fold::One('\u{13BE}',), - 0xAB8F => Fold::One('\u{13BF}',), - 0xAB90 => Fold::One('\u{13C0}',), - 0xAB91 => Fold::One('\u{13C1}',), - 0xAB92 => Fold::One('\u{13C2}',), - 0xAB93 => Fold::One('\u{13C3}',), - 0xAB94 => Fold::One('\u{13C4}',), - 0xAB95 => Fold::One('\u{13C5}',), - 0xAB96 => Fold::One('\u{13C6}',), - 0xAB97 => Fold::One('\u{13C7}',), - 0xAB98 => Fold::One('\u{13C8}',), - 0xAB99 => Fold::One('\u{13C9}',), - 0xAB9A => Fold::One('\u{13CA}',), - 0xAB9B => Fold::One('\u{13CB}',), - 0xAB9C => Fold::One('\u{13CC}',), - 0xAB9D => Fold::One('\u{13CD}',), - 0xAB9E => Fold::One('\u{13CE}',), - 0xAB9F => Fold::One('\u{13CF}',), - 0xABA0 => Fold::One('\u{13D0}',), - 0xABA1 => Fold::One('\u{13D1}',), - 0xABA2 => Fold::One('\u{13D2}',), - 0xABA3 => Fold::One('\u{13D3}',), - 0xABA4 => Fold::One('\u{13D4}',), - 0xABA5 => Fold::One('\u{13D5}',), - 0xABA6 => Fold::One('\u{13D6}',), - 0xABA7 => Fold::One('\u{13D7}',), - 0xABA8 => Fold::One('\u{13D8}',), - 0xABA9 => Fold::One('\u{13D9}',), - 0xABAA => Fold::One('\u{13DA}',), - 0xABAB => Fold::One('\u{13DB}',), - 0xABAC => Fold::One('\u{13DC}',), - 0xABAD => Fold::One('\u{13DD}',), - 0xABAE => Fold::One('\u{13DE}',), - 0xABAF => Fold::One('\u{13DF}',), - 0xABB0 => Fold::One('\u{13E0}',), - 0xABB1 => Fold::One('\u{13E1}',), - 0xABB2 => Fold::One('\u{13E2}',), - 0xABB3 => Fold::One('\u{13E3}',), - 0xABB4 => Fold::One('\u{13E4}',), - 0xABB5 => Fold::One('\u{13E5}',), - 0xABB6 => Fold::One('\u{13E6}',), - 0xABB7 => Fold::One('\u{13E7}',), - 0xABB8 => Fold::One('\u{13E8}',), - 0xABB9 => Fold::One('\u{13E9}',), - 0xABBA => Fold::One('\u{13EA}',), - 0xABBB => Fold::One('\u{13EB}',), - 0xABBC => Fold::One('\u{13EC}',), - 0xABBD => Fold::One('\u{13ED}',), - 0xABBE => Fold::One('\u{13EE}',), - 0xABBF => Fold::One('\u{13EF}',), - 0xFB00 => Fold::Two('\u{0066}', '\u{0066}',), - 0xFB01 => Fold::Two('\u{0066}', '\u{0069}',), - 0xFB02 => Fold::Two('\u{0066}', '\u{006C}',), - 0xFB03 => Fold::Three('\u{0066}', '\u{0066}', '\u{0069}',), - 0xFB04 => Fold::Three('\u{0066}', '\u{0066}', '\u{006C}',), - 0xFB05 => Fold::Two('\u{0073}', '\u{0074}',), - 0xFB06 => Fold::Two('\u{0073}', '\u{0074}',), - 0xFB13 => Fold::Two('\u{0574}', '\u{0576}',), - 0xFB14 => Fold::Two('\u{0574}', '\u{0565}',), - 0xFB15 => Fold::Two('\u{0574}', '\u{056B}',), - 0xFB16 => Fold::Two('\u{057E}', '\u{0576}',), - 0xFB17 => Fold::Two('\u{0574}', '\u{056D}',), - 0xFF21 => Fold::One('\u{FF41}',), - 0xFF22 => Fold::One('\u{FF42}',), - 0xFF23 => Fold::One('\u{FF43}',), - 0xFF24 => Fold::One('\u{FF44}',), - 0xFF25 => Fold::One('\u{FF45}',), - 0xFF26 => Fold::One('\u{FF46}',), - 0xFF27 => Fold::One('\u{FF47}',), - 0xFF28 => Fold::One('\u{FF48}',), - 0xFF29 => Fold::One('\u{FF49}',), - 0xFF2A => Fold::One('\u{FF4A}',), - 0xFF2B => Fold::One('\u{FF4B}',), - 0xFF2C => Fold::One('\u{FF4C}',), - 0xFF2D => Fold::One('\u{FF4D}',), - 0xFF2E => Fold::One('\u{FF4E}',), - 0xFF2F => Fold::One('\u{FF4F}',), - 0xFF30 => Fold::One('\u{FF50}',), - 0xFF31 => Fold::One('\u{FF51}',), - 0xFF32 => Fold::One('\u{FF52}',), - 0xFF33 => Fold::One('\u{FF53}',), - 0xFF34 => Fold::One('\u{FF54}',), - 0xFF35 => Fold::One('\u{FF55}',), - 0xFF36 => Fold::One('\u{FF56}',), - 0xFF37 => Fold::One('\u{FF57}',), - 0xFF38 => Fold::One('\u{FF58}',), - 0xFF39 => Fold::One('\u{FF59}',), - 0xFF3A => Fold::One('\u{FF5A}',), - 0x10400 => Fold::One('\u{10428}',), - 0x10401 => Fold::One('\u{10429}',), - 0x10402 => Fold::One('\u{1042A}',), - 0x10403 => Fold::One('\u{1042B}',), - 0x10404 => Fold::One('\u{1042C}',), - 0x10405 => Fold::One('\u{1042D}',), - 0x10406 => Fold::One('\u{1042E}',), - 0x10407 => Fold::One('\u{1042F}',), - 0x10408 => Fold::One('\u{10430}',), - 0x10409 => Fold::One('\u{10431}',), - 0x1040A => Fold::One('\u{10432}',), - 0x1040B => Fold::One('\u{10433}',), - 0x1040C => Fold::One('\u{10434}',), - 0x1040D => Fold::One('\u{10435}',), - 0x1040E => Fold::One('\u{10436}',), - 0x1040F => Fold::One('\u{10437}',), - 0x10410 => Fold::One('\u{10438}',), - 0x10411 => Fold::One('\u{10439}',), - 0x10412 => Fold::One('\u{1043A}',), - 0x10413 => Fold::One('\u{1043B}',), - 0x10414 => Fold::One('\u{1043C}',), - 0x10415 => Fold::One('\u{1043D}',), - 0x10416 => Fold::One('\u{1043E}',), - 0x10417 => Fold::One('\u{1043F}',), - 0x10418 => Fold::One('\u{10440}',), - 0x10419 => Fold::One('\u{10441}',), - 0x1041A => Fold::One('\u{10442}',), - 0x1041B => Fold::One('\u{10443}',), - 0x1041C => Fold::One('\u{10444}',), - 0x1041D => Fold::One('\u{10445}',), - 0x1041E => Fold::One('\u{10446}',), - 0x1041F => Fold::One('\u{10447}',), - 0x10420 => Fold::One('\u{10448}',), - 0x10421 => Fold::One('\u{10449}',), - 0x10422 => Fold::One('\u{1044A}',), - 0x10423 => Fold::One('\u{1044B}',), - 0x10424 => Fold::One('\u{1044C}',), - 0x10425 => Fold::One('\u{1044D}',), - 0x10426 => Fold::One('\u{1044E}',), - 0x10427 => Fold::One('\u{1044F}',), - 0x104B0 => Fold::One('\u{104D8}',), - 0x104B1 => Fold::One('\u{104D9}',), - 0x104B2 => Fold::One('\u{104DA}',), - 0x104B3 => Fold::One('\u{104DB}',), - 0x104B4 => Fold::One('\u{104DC}',), - 0x104B5 => Fold::One('\u{104DD}',), - 0x104B6 => Fold::One('\u{104DE}',), - 0x104B7 => Fold::One('\u{104DF}',), - 0x104B8 => Fold::One('\u{104E0}',), - 0x104B9 => Fold::One('\u{104E1}',), - 0x104BA => Fold::One('\u{104E2}',), - 0x104BB => Fold::One('\u{104E3}',), - 0x104BC => Fold::One('\u{104E4}',), - 0x104BD => Fold::One('\u{104E5}',), - 0x104BE => Fold::One('\u{104E6}',), - 0x104BF => Fold::One('\u{104E7}',), - 0x104C0 => Fold::One('\u{104E8}',), - 0x104C1 => Fold::One('\u{104E9}',), - 0x104C2 => Fold::One('\u{104EA}',), - 0x104C3 => Fold::One('\u{104EB}',), - 0x104C4 => Fold::One('\u{104EC}',), - 0x104C5 => Fold::One('\u{104ED}',), - 0x104C6 => Fold::One('\u{104EE}',), - 0x104C7 => Fold::One('\u{104EF}',), - 0x104C8 => Fold::One('\u{104F0}',), - 0x104C9 => Fold::One('\u{104F1}',), - 0x104CA => Fold::One('\u{104F2}',), - 0x104CB => Fold::One('\u{104F3}',), - 0x104CC => Fold::One('\u{104F4}',), - 0x104CD => Fold::One('\u{104F5}',), - 0x104CE => Fold::One('\u{104F6}',), - 0x104CF => Fold::One('\u{104F7}',), - 0x104D0 => Fold::One('\u{104F8}',), - 0x104D1 => Fold::One('\u{104F9}',), - 0x104D2 => Fold::One('\u{104FA}',), - 0x104D3 => Fold::One('\u{104FB}',), - 0x10C80 => Fold::One('\u{10CC0}',), - 0x10C81 => Fold::One('\u{10CC1}',), - 0x10C82 => Fold::One('\u{10CC2}',), - 0x10C83 => Fold::One('\u{10CC3}',), - 0x10C84 => Fold::One('\u{10CC4}',), - 0x10C85 => Fold::One('\u{10CC5}',), - 0x10C86 => Fold::One('\u{10CC6}',), - 0x10C87 => Fold::One('\u{10CC7}',), - 0x10C88 => Fold::One('\u{10CC8}',), - 0x10C89 => Fold::One('\u{10CC9}',), - 0x10C8A => Fold::One('\u{10CCA}',), - 0x10C8B => Fold::One('\u{10CCB}',), - 0x10C8C => Fold::One('\u{10CCC}',), - 0x10C8D => Fold::One('\u{10CCD}',), - 0x10C8E => Fold::One('\u{10CCE}',), - 0x10C8F => Fold::One('\u{10CCF}',), - 0x10C90 => Fold::One('\u{10CD0}',), - 0x10C91 => Fold::One('\u{10CD1}',), - 0x10C92 => Fold::One('\u{10CD2}',), - 0x10C93 => Fold::One('\u{10CD3}',), - 0x10C94 => Fold::One('\u{10CD4}',), - 0x10C95 => Fold::One('\u{10CD5}',), - 0x10C96 => Fold::One('\u{10CD6}',), - 0x10C97 => Fold::One('\u{10CD7}',), - 0x10C98 => Fold::One('\u{10CD8}',), - 0x10C99 => Fold::One('\u{10CD9}',), - 0x10C9A => Fold::One('\u{10CDA}',), - 0x10C9B => Fold::One('\u{10CDB}',), - 0x10C9C => Fold::One('\u{10CDC}',), - 0x10C9D => Fold::One('\u{10CDD}',), - 0x10C9E => Fold::One('\u{10CDE}',), - 0x10C9F => Fold::One('\u{10CDF}',), - 0x10CA0 => Fold::One('\u{10CE0}',), - 0x10CA1 => Fold::One('\u{10CE1}',), - 0x10CA2 => Fold::One('\u{10CE2}',), - 0x10CA3 => Fold::One('\u{10CE3}',), - 0x10CA4 => Fold::One('\u{10CE4}',), - 0x10CA5 => Fold::One('\u{10CE5}',), - 0x10CA6 => Fold::One('\u{10CE6}',), - 0x10CA7 => Fold::One('\u{10CE7}',), - 0x10CA8 => Fold::One('\u{10CE8}',), - 0x10CA9 => Fold::One('\u{10CE9}',), - 0x10CAA => Fold::One('\u{10CEA}',), - 0x10CAB => Fold::One('\u{10CEB}',), - 0x10CAC => Fold::One('\u{10CEC}',), - 0x10CAD => Fold::One('\u{10CED}',), - 0x10CAE => Fold::One('\u{10CEE}',), - 0x10CAF => Fold::One('\u{10CEF}',), - 0x10CB0 => Fold::One('\u{10CF0}',), - 0x10CB1 => Fold::One('\u{10CF1}',), - 0x10CB2 => Fold::One('\u{10CF2}',), - 0x118A0 => Fold::One('\u{118C0}',), - 0x118A1 => Fold::One('\u{118C1}',), - 0x118A2 => Fold::One('\u{118C2}',), - 0x118A3 => Fold::One('\u{118C3}',), - 0x118A4 => Fold::One('\u{118C4}',), - 0x118A5 => Fold::One('\u{118C5}',), - 0x118A6 => Fold::One('\u{118C6}',), - 0x118A7 => Fold::One('\u{118C7}',), - 0x118A8 => Fold::One('\u{118C8}',), - 0x118A9 => Fold::One('\u{118C9}',), - 0x118AA => Fold::One('\u{118CA}',), - 0x118AB => Fold::One('\u{118CB}',), - 0x118AC => Fold::One('\u{118CC}',), - 0x118AD => Fold::One('\u{118CD}',), - 0x118AE => Fold::One('\u{118CE}',), - 0x118AF => Fold::One('\u{118CF}',), - 0x118B0 => Fold::One('\u{118D0}',), - 0x118B1 => Fold::One('\u{118D1}',), - 0x118B2 => Fold::One('\u{118D2}',), - 0x118B3 => Fold::One('\u{118D3}',), - 0x118B4 => Fold::One('\u{118D4}',), - 0x118B5 => Fold::One('\u{118D5}',), - 0x118B6 => Fold::One('\u{118D6}',), - 0x118B7 => Fold::One('\u{118D7}',), - 0x118B8 => Fold::One('\u{118D8}',), - 0x118B9 => Fold::One('\u{118D9}',), - 0x118BA => Fold::One('\u{118DA}',), - 0x118BB => Fold::One('\u{118DB}',), - 0x118BC => Fold::One('\u{118DC}',), - 0x118BD => Fold::One('\u{118DD}',), - 0x118BE => Fold::One('\u{118DE}',), - 0x118BF => Fold::One('\u{118DF}',), - 0x16E40 => Fold::One('\u{16E60}',), - 0x16E41 => Fold::One('\u{16E61}',), - 0x16E42 => Fold::One('\u{16E62}',), - 0x16E43 => Fold::One('\u{16E63}',), - 0x16E44 => Fold::One('\u{16E64}',), - 0x16E45 => Fold::One('\u{16E65}',), - 0x16E46 => Fold::One('\u{16E66}',), - 0x16E47 => Fold::One('\u{16E67}',), - 0x16E48 => Fold::One('\u{16E68}',), - 0x16E49 => Fold::One('\u{16E69}',), - 0x16E4A => Fold::One('\u{16E6A}',), - 0x16E4B => Fold::One('\u{16E6B}',), - 0x16E4C => Fold::One('\u{16E6C}',), - 0x16E4D => Fold::One('\u{16E6D}',), - 0x16E4E => Fold::One('\u{16E6E}',), - 0x16E4F => Fold::One('\u{16E6F}',), - 0x16E50 => Fold::One('\u{16E70}',), - 0x16E51 => Fold::One('\u{16E71}',), - 0x16E52 => Fold::One('\u{16E72}',), - 0x16E53 => Fold::One('\u{16E73}',), - 0x16E54 => Fold::One('\u{16E74}',), - 0x16E55 => Fold::One('\u{16E75}',), - 0x16E56 => Fold::One('\u{16E76}',), - 0x16E57 => Fold::One('\u{16E77}',), - 0x16E58 => Fold::One('\u{16E78}',), - 0x16E59 => Fold::One('\u{16E79}',), - 0x16E5A => Fold::One('\u{16E7A}',), - 0x16E5B => Fold::One('\u{16E7B}',), - 0x16E5C => Fold::One('\u{16E7C}',), - 0x16E5D => Fold::One('\u{16E7D}',), - 0x16E5E => Fold::One('\u{16E7E}',), - 0x16E5F => Fold::One('\u{16E7F}',), - 0x1E900 => Fold::One('\u{1E922}',), - 0x1E901 => Fold::One('\u{1E923}',), - 0x1E902 => Fold::One('\u{1E924}',), - 0x1E903 => Fold::One('\u{1E925}',), - 0x1E904 => Fold::One('\u{1E926}',), - 0x1E905 => Fold::One('\u{1E927}',), - 0x1E906 => Fold::One('\u{1E928}',), - 0x1E907 => Fold::One('\u{1E929}',), - 0x1E908 => Fold::One('\u{1E92A}',), - 0x1E909 => Fold::One('\u{1E92B}',), - 0x1E90A => Fold::One('\u{1E92C}',), - 0x1E90B => Fold::One('\u{1E92D}',), - 0x1E90C => Fold::One('\u{1E92E}',), - 0x1E90D => Fold::One('\u{1E92F}',), - 0x1E90E => Fold::One('\u{1E930}',), - 0x1E90F => Fold::One('\u{1E931}',), - 0x1E910 => Fold::One('\u{1E932}',), - 0x1E911 => Fold::One('\u{1E933}',), - 0x1E912 => Fold::One('\u{1E934}',), - 0x1E913 => Fold::One('\u{1E935}',), - 0x1E914 => Fold::One('\u{1E936}',), - 0x1E915 => Fold::One('\u{1E937}',), - 0x1E916 => Fold::One('\u{1E938}',), - 0x1E917 => Fold::One('\u{1E939}',), - 0x1E918 => Fold::One('\u{1E93A}',), - 0x1E919 => Fold::One('\u{1E93B}',), - 0x1E91A => Fold::One('\u{1E93C}',), - 0x1E91B => Fold::One('\u{1E93D}',), - 0x1E91C => Fold::One('\u{1E93E}',), - 0x1E91D => Fold::One('\u{1E93F}',), - 0x1E91E => Fold::One('\u{1E940}',), - 0x1E91F => Fold::One('\u{1E941}',), - 0x1E920 => Fold::One('\u{1E942}',), - 0x1E921 => Fold::One('\u{1E943}',), - _ => Fold::One(orig,) + // The code below is is intended to reduce the binary size from that of a simple 1:1 lookup table. + // It exploits two facts: + // 1. Many of the mappings form ranges mapped to other ranges. + // To benefit from this, we match on ranges instead of single numbers. + // Alone, this decreases the binary size but results in performance regression over the simple 1:1 lookup. + // 2. Most of the mappings are from relatively small chars (0 - 0x2CFF). + // To benefit from this, we use a jump table based on the high byte for this range. + // This more than recovers the performance regression from exploting fact #1, at least in the tested benchmark. + let from = orig as u32; + if from <= 0x2CFF { + let from = from as u16; + let high_byte = (from >> 8) as u8; + let low_byte = (from & 0xff) as u8; + let single_char: u16 = match high_byte { + 0x00 => { + match low_byte { + x @ _ if 0x41 <= x && x <= 0x5a => from.wrapping_add(0x0020), + 0xb5 => 0x03bc, + x @ _ if 0xc0 <= x && x <= 0xd6 => from.wrapping_add(0x0020), + x @ _ if 0xd8 <= x && x <= 0xde => from.wrapping_add(0x0020), + 0xdf => return Fold::Two('\u{0073}', '\u{0073}',), + _ => from + } + } + 0x01 => { + match low_byte { + x @ _ if x <= 0x2e => (from | 1), + 0x30 => return Fold::Two('\u{0069}', '\u{0307}',), + x @ _ if 0x32 <= x && x <= 0x36 => (from | 1), + x @ _ if 0x39 <= x && x <= 0x47 => ((from+1) & !1), + 0x49 => return Fold::Two('\u{02bc}', '\u{006e}',), + x @ _ if 0x4a <= x && x <= 0x76 => (from | 1), + 0x78 => 0x00ff, + x @ _ if 0x79 <= x && x <= 0x7d => ((from+1) & !1), + 0x7f => 0x0073, + 0x81 => 0x0253, + x @ _ if 0x82 <= x && x <= 0x84 => (from | 1), + 0x86 => 0x0254, + 0x87 => 0x0188, + x @ _ if 0x89 <= x && x <= 0x8a => from.wrapping_add(0x00cd), + 0x8b => 0x018c, + 0x8e => 0x01dd, + 0x8f => 0x0259, + 0x90 => 0x025b, + 0x91 => 0x0192, + 0x93 => 0x0260, + 0x94 => 0x0263, + 0x96 => 0x0269, + 0x97 => 0x0268, + 0x98 => 0x0199, + 0x9c => 0x026f, + 0x9d => 0x0272, + 0x9f => 0x0275, + x @ _ if 0xa0 <= x && x <= 0xa4 => (from | 1), + 0xa6 => 0x0280, + 0xa7 => 0x01a8, + 0xa9 => 0x0283, + 0xac => 0x01ad, + 0xae => 0x0288, + 0xaf => 0x01b0, + x @ _ if 0xb1 <= x && x <= 0xb2 => from.wrapping_add(0x00d9), + x @ _ if 0xb3 <= x && x <= 0xb5 => ((from+1) & !1), + 0xb7 => 0x0292, + 0xb8 => 0x01b9, + 0xbc => 0x01bd, + 0xc4 => 0x01c6, + 0xc5 => 0x01c6, + 0xc7 => 0x01c9, + 0xc8 => 0x01c9, + 0xca => 0x01cc, + x @ _ if 0xcb <= x && x <= 0xdb => ((from+1) & !1), + x @ _ if 0xde <= x && x <= 0xee => (from | 1), + 0xf0 => return Fold::Two('\u{006a}', '\u{030c}',), + 0xf1 => 0x01f3, + x @ _ if 0xf2 <= x && x <= 0xf4 => (from | 1), + 0xf6 => 0x0195, + 0xf7 => 0x01bf, + x @ _ if 0xf8 <= x => (from | 1), + _ => from + } + } + 0x02 => { + match low_byte { + x @ _ if x <= 0x1e => (from | 1), + 0x20 => 0x019e, + x @ _ if 0x22 <= x && x <= 0x32 => (from | 1), + 0x3a => 0x2c65, + 0x3b => 0x023c, + 0x3d => 0x019a, + 0x3e => 0x2c66, + 0x41 => 0x0242, + 0x43 => 0x0180, + 0x44 => 0x0289, + 0x45 => 0x028c, + x @ _ if 0x46 <= x && x <= 0x4e => (from | 1), + _ => from + } + } + 0x03 => { + match low_byte { + 0x45 => 0x03b9, + x @ _ if 0x70 <= x && x <= 0x72 => (from | 1), + 0x76 => 0x0377, + 0x7f => 0x03f3, + 0x86 => 0x03ac, + x @ _ if 0x88 <= x && x <= 0x8a => from.wrapping_add(0x0025), + 0x8c => 0x03cc, + x @ _ if 0x8e <= x && x <= 0x8f => from.wrapping_add(0x003f), + 0x90 => return Fold::Three('\u{03b9}', '\u{0308}', '\u{0301}',), + x @ _ if 0x91 <= x && x <= 0xa1 => from.wrapping_add(0x0020), + x @ _ if 0xa3 <= x && x <= 0xab => from.wrapping_add(0x0020), + 0xb0 => return Fold::Three('\u{03c5}', '\u{0308}', '\u{0301}',), + 0xc2 => 0x03c3, + 0xcf => 0x03d7, + 0xd0 => 0x03b2, + 0xd1 => 0x03b8, + 0xd5 => 0x03c6, + 0xd6 => 0x03c0, + x @ _ if 0xd8 <= x && x <= 0xee => (from | 1), + 0xf0 => 0x03ba, + 0xf1 => 0x03c1, + 0xf4 => 0x03b8, + 0xf5 => 0x03b5, + 0xf7 => 0x03f8, + 0xf9 => 0x03f2, + 0xfa => 0x03fb, + x @ _ if 0xfd <= x => from.wrapping_sub(0x0082), + _ => from + } + } + 0x04 => { + match low_byte { + x @ _ if x <= 0x0f => from.wrapping_add(0x0050), + x @ _ if 0x10 <= x && x <= 0x2f => from.wrapping_add(0x0020), + x @ _ if 0x60 <= x && x <= 0x80 => (from | 1), + x @ _ if 0x8a <= x && x <= 0xbe => (from | 1), + 0xc0 => 0x04cf, + x @ _ if 0xc1 <= x && x <= 0xcd => ((from+1) & !1), + x @ _ if 0xd0 <= x => (from | 1), + _ => from + } + } + 0x05 => { + match low_byte { + x @ _ if x <= 0x2e => (from | 1), + x @ _ if 0x31 <= x && x <= 0x56 => from.wrapping_add(0x0030), + 0x87 => return Fold::Two('\u{0565}', '\u{0582}',), + _ => from + } + } + 0x06 => { + from + } + 0x07 => { + from + } + 0x08 => { + from + } + 0x09 => { + from + } + 0x0a => { + from + } + 0x0b => { + from + } + 0x0c => { + from + } + 0x0d => { + from + } + 0x0e => { + from + } + 0x0f => { + from + } + 0x10 => { + match low_byte { + x @ _ if 0xa0 <= x && x <= 0xc5 => from.wrapping_add(0x1c60), + 0xc7 => 0x2d27, + 0xcd => 0x2d2d, + _ => from + } + } + 0x11 => { + from + } + 0x12 => { + from + } + 0x13 => { + match low_byte { + x @ _ if 0xf8 <= x && x <= 0xfd => from.wrapping_sub(0x0008), + _ => from + } + } + 0x14 => { + from + } + 0x15 => { + from + } + 0x16 => { + from + } + 0x17 => { + from + } + 0x18 => { + from + } + 0x19 => { + from + } + 0x1a => { + from + } + 0x1b => { + from + } + 0x1c => { + match low_byte { + 0x80 => 0x0432, + 0x81 => 0x0434, + 0x82 => 0x043e, + x @ _ if 0x83 <= x && x <= 0x84 => from.wrapping_sub(0x1842), + 0x85 => 0x0442, + 0x86 => 0x044a, + 0x87 => 0x0463, + 0x88 => 0xa64b, + x @ _ if 0x90 <= x && x <= 0xba => from.wrapping_sub(0x0bc0), + x @ _ if 0xbd <= x && x <= 0xbf => from.wrapping_sub(0x0bc0), + _ => from + } + } + 0x1d => { + from + } + 0x1e => { + match low_byte { + x @ _ if x <= 0x94 => (from | 1), + 0x96 => return Fold::Two('\u{0068}', '\u{0331}',), + 0x97 => return Fold::Two('\u{0074}', '\u{0308}',), + 0x98 => return Fold::Two('\u{0077}', '\u{030a}',), + 0x99 => return Fold::Two('\u{0079}', '\u{030a}',), + 0x9a => return Fold::Two('\u{0061}', '\u{02be}',), + 0x9b => 0x1e61, + 0x9e => return Fold::Two('\u{0073}', '\u{0073}',), + x @ _ if 0xa0 <= x && x <= 0xfe => (from | 1), + _ => from + } + } + 0x1f => { + match low_byte { + x @ _ if 0x08 <= x && x <= 0x0f => from.wrapping_sub(0x0008), + x @ _ if 0x18 <= x && x <= 0x1d => from.wrapping_sub(0x0008), + x @ _ if 0x28 <= x && x <= 0x2f => from.wrapping_sub(0x0008), + x @ _ if 0x38 <= x && x <= 0x3f => from.wrapping_sub(0x0008), + x @ _ if 0x48 <= x && x <= 0x4d => from.wrapping_sub(0x0008), + 0x50 => return Fold::Two('\u{03c5}', '\u{0313}',), + 0x52 => return Fold::Three('\u{03c5}', '\u{0313}', '\u{0300}',), + 0x54 => return Fold::Three('\u{03c5}', '\u{0313}', '\u{0301}',), + 0x56 => return Fold::Three('\u{03c5}', '\u{0313}', '\u{0342}',), + x @ _ if 0x59 <= x && x <= 0x5f => if (from & 1) == 1 { from.wrapping_sub(0x0008) } else { from }, + x @ _ if 0x68 <= x && x <= 0x6f => from.wrapping_sub(0x0008), + 0x80 => return Fold::Two('\u{1f00}', '\u{03b9}',), + 0x81 => return Fold::Two('\u{1f01}', '\u{03b9}',), + 0x82 => return Fold::Two('\u{1f02}', '\u{03b9}',), + 0x83 => return Fold::Two('\u{1f03}', '\u{03b9}',), + 0x84 => return Fold::Two('\u{1f04}', '\u{03b9}',), + 0x85 => return Fold::Two('\u{1f05}', '\u{03b9}',), + 0x86 => return Fold::Two('\u{1f06}', '\u{03b9}',), + 0x87 => return Fold::Two('\u{1f07}', '\u{03b9}',), + 0x88 => return Fold::Two('\u{1f00}', '\u{03b9}',), + 0x89 => return Fold::Two('\u{1f01}', '\u{03b9}',), + 0x8a => return Fold::Two('\u{1f02}', '\u{03b9}',), + 0x8b => return Fold::Two('\u{1f03}', '\u{03b9}',), + 0x8c => return Fold::Two('\u{1f04}', '\u{03b9}',), + 0x8d => return Fold::Two('\u{1f05}', '\u{03b9}',), + 0x8e => return Fold::Two('\u{1f06}', '\u{03b9}',), + 0x8f => return Fold::Two('\u{1f07}', '\u{03b9}',), + 0x90 => return Fold::Two('\u{1f20}', '\u{03b9}',), + 0x91 => return Fold::Two('\u{1f21}', '\u{03b9}',), + 0x92 => return Fold::Two('\u{1f22}', '\u{03b9}',), + 0x93 => return Fold::Two('\u{1f23}', '\u{03b9}',), + 0x94 => return Fold::Two('\u{1f24}', '\u{03b9}',), + 0x95 => return Fold::Two('\u{1f25}', '\u{03b9}',), + 0x96 => return Fold::Two('\u{1f26}', '\u{03b9}',), + 0x97 => return Fold::Two('\u{1f27}', '\u{03b9}',), + 0x98 => return Fold::Two('\u{1f20}', '\u{03b9}',), + 0x99 => return Fold::Two('\u{1f21}', '\u{03b9}',), + 0x9a => return Fold::Two('\u{1f22}', '\u{03b9}',), + 0x9b => return Fold::Two('\u{1f23}', '\u{03b9}',), + 0x9c => return Fold::Two('\u{1f24}', '\u{03b9}',), + 0x9d => return Fold::Two('\u{1f25}', '\u{03b9}',), + 0x9e => return Fold::Two('\u{1f26}', '\u{03b9}',), + 0x9f => return Fold::Two('\u{1f27}', '\u{03b9}',), + 0xa0 => return Fold::Two('\u{1f60}', '\u{03b9}',), + 0xa1 => return Fold::Two('\u{1f61}', '\u{03b9}',), + 0xa2 => return Fold::Two('\u{1f62}', '\u{03b9}',), + 0xa3 => return Fold::Two('\u{1f63}', '\u{03b9}',), + 0xa4 => return Fold::Two('\u{1f64}', '\u{03b9}',), + 0xa5 => return Fold::Two('\u{1f65}', '\u{03b9}',), + 0xa6 => return Fold::Two('\u{1f66}', '\u{03b9}',), + 0xa7 => return Fold::Two('\u{1f67}', '\u{03b9}',), + 0xa8 => return Fold::Two('\u{1f60}', '\u{03b9}',), + 0xa9 => return Fold::Two('\u{1f61}', '\u{03b9}',), + 0xaa => return Fold::Two('\u{1f62}', '\u{03b9}',), + 0xab => return Fold::Two('\u{1f63}', '\u{03b9}',), + 0xac => return Fold::Two('\u{1f64}', '\u{03b9}',), + 0xad => return Fold::Two('\u{1f65}', '\u{03b9}',), + 0xae => return Fold::Two('\u{1f66}', '\u{03b9}',), + 0xaf => return Fold::Two('\u{1f67}', '\u{03b9}',), + 0xb2 => return Fold::Two('\u{1f70}', '\u{03b9}',), + 0xb3 => return Fold::Two('\u{03b1}', '\u{03b9}',), + 0xb4 => return Fold::Two('\u{03ac}', '\u{03b9}',), + 0xb6 => return Fold::Two('\u{03b1}', '\u{0342}',), + 0xb7 => return Fold::Three('\u{03b1}', '\u{0342}', '\u{03b9}',), + x @ _ if 0xb8 <= x && x <= 0xb9 => from.wrapping_sub(0x0008), + x @ _ if 0xba <= x && x <= 0xbb => from.wrapping_sub(0x004a), + 0xbc => return Fold::Two('\u{03b1}', '\u{03b9}',), + 0xbe => 0x03b9, + 0xc2 => return Fold::Two('\u{1f74}', '\u{03b9}',), + 0xc3 => return Fold::Two('\u{03b7}', '\u{03b9}',), + 0xc4 => return Fold::Two('\u{03ae}', '\u{03b9}',), + 0xc6 => return Fold::Two('\u{03b7}', '\u{0342}',), + 0xc7 => return Fold::Three('\u{03b7}', '\u{0342}', '\u{03b9}',), + x @ _ if 0xc8 <= x && x <= 0xcb => from.wrapping_sub(0x0056), + 0xcc => return Fold::Two('\u{03b7}', '\u{03b9}',), + 0xd2 => return Fold::Three('\u{03b9}', '\u{0308}', '\u{0300}',), + 0xd3 => return Fold::Three('\u{03b9}', '\u{0308}', '\u{0301}',), + 0xd6 => return Fold::Two('\u{03b9}', '\u{0342}',), + 0xd7 => return Fold::Three('\u{03b9}', '\u{0308}', '\u{0342}',), + x @ _ if 0xd8 <= x && x <= 0xd9 => from.wrapping_sub(0x0008), + x @ _ if 0xda <= x && x <= 0xdb => from.wrapping_sub(0x0064), + 0xe2 => return Fold::Three('\u{03c5}', '\u{0308}', '\u{0300}',), + 0xe3 => return Fold::Three('\u{03c5}', '\u{0308}', '\u{0301}',), + 0xe4 => return Fold::Two('\u{03c1}', '\u{0313}',), + 0xe6 => return Fold::Two('\u{03c5}', '\u{0342}',), + 0xe7 => return Fold::Three('\u{03c5}', '\u{0308}', '\u{0342}',), + x @ _ if 0xe8 <= x && x <= 0xe9 => from.wrapping_sub(0x0008), + x @ _ if 0xea <= x && x <= 0xeb => from.wrapping_sub(0x0070), + 0xec => 0x1fe5, + 0xf2 => return Fold::Two('\u{1f7c}', '\u{03b9}',), + 0xf3 => return Fold::Two('\u{03c9}', '\u{03b9}',), + 0xf4 => return Fold::Two('\u{03ce}', '\u{03b9}',), + 0xf6 => return Fold::Two('\u{03c9}', '\u{0342}',), + 0xf7 => return Fold::Three('\u{03c9}', '\u{0342}', '\u{03b9}',), + x @ _ if 0xf8 <= x && x <= 0xf9 => from.wrapping_sub(0x0080), + x @ _ if 0xfa <= x && x <= 0xfb => from.wrapping_sub(0x007e), + 0xfc => return Fold::Two('\u{03c9}', '\u{03b9}',), + _ => from + } + } + 0x20 => { + from + } + 0x21 => { + match low_byte { + 0x26 => 0x03c9, + 0x2a => 0x006b, + 0x2b => 0x00e5, + 0x32 => 0x214e, + x @ _ if 0x60 <= x && x <= 0x6f => from.wrapping_add(0x0010), + 0x83 => 0x2184, + _ => from + } + } + 0x22 => { + from + } + 0x23 => { + from + } + 0x24 => { + match low_byte { + x @ _ if 0xb6 <= x && x <= 0xcf => from.wrapping_add(0x001a), + _ => from + } + } + 0x25 => { + from + } + 0x26 => { + from + } + 0x27 => { + from + } + 0x28 => { + from + } + 0x29 => { + from + } + 0x2a => { + from + } + 0x2b => { + from + } + 0x2c => { + match low_byte { + x @ _ if x <= 0x2e => from.wrapping_add(0x0030), + 0x60 => 0x2c61, + 0x62 => 0x026b, + 0x63 => 0x1d7d, + 0x64 => 0x027d, + x @ _ if 0x67 <= x && x <= 0x6b => ((from+1) & !1), + 0x6d => 0x0251, + 0x6e => 0x0271, + 0x6f => 0x0250, + 0x70 => 0x0252, + 0x72 => 0x2c73, + 0x75 => 0x2c76, + x @ _ if 0x7e <= x && x <= 0x7f => from.wrapping_sub(0x2a3f), + x @ _ if 0x80 <= x && x <= 0xe2 => (from | 1), + x @ _ if 0xeb <= x && x <= 0xed => ((from+1) & !1), + 0xf2 => 0x2cf3, + _ => from + } + } + _ => from + }; + Fold::One( char::from_u32(single_char as u32).unwrap_or(orig) ) + } else { + let single_char: u32 = match from { + x @ _ if 0xa640 <= x && x <= 0xa66c => (from | 1), + x @ _ if 0xa680 <= x && x <= 0xa69a => (from | 1), + x @ _ if 0xa722 <= x && x <= 0xa72e => (from | 1), + x @ _ if 0xa732 <= x && x <= 0xa76e => (from | 1), + x @ _ if 0xa779 <= x && x <= 0xa77b => ((from+1) & !1), + 0xa77d => 0x1d79, + x @ _ if 0xa77e <= x && x <= 0xa786 => (from | 1), + 0xa78b => 0xa78c, + 0xa78d => 0x0265, + x @ _ if 0xa790 <= x && x <= 0xa792 => (from | 1), + x @ _ if 0xa796 <= x && x <= 0xa7a8 => (from | 1), + 0xa7aa => 0x0266, + 0xa7ab => 0x025c, + 0xa7ac => 0x0261, + 0xa7ad => 0x026c, + 0xa7ae => 0x026a, + 0xa7b0 => 0x029e, + 0xa7b1 => 0x0287, + 0xa7b2 => 0x029d, + 0xa7b3 => 0xab53, + x @ _ if 0xa7b4 <= x && x <= 0xa7be => (from | 1), + 0xa7c2 => 0xa7c3, + 0xa7c4 => 0xa794, + 0xa7c5 => 0x0282, + 0xa7c6 => 0x1d8e, + x @ _ if 0xab70 <= x && x <= 0xabbf => from.wrapping_sub(0x97d0), + 0xfb00 => return Fold::Two('\u{0066}', '\u{0066}',), + 0xfb01 => return Fold::Two('\u{0066}', '\u{0069}',), + 0xfb02 => return Fold::Two('\u{0066}', '\u{006c}',), + 0xfb03 => return Fold::Three('\u{0066}', '\u{0066}', '\u{0069}',), + 0xfb04 => return Fold::Three('\u{0066}', '\u{0066}', '\u{006c}',), + 0xfb05 => return Fold::Two('\u{0073}', '\u{0074}',), + 0xfb06 => return Fold::Two('\u{0073}', '\u{0074}',), + 0xfb13 => return Fold::Two('\u{0574}', '\u{0576}',), + 0xfb14 => return Fold::Two('\u{0574}', '\u{0565}',), + 0xfb15 => return Fold::Two('\u{0574}', '\u{056b}',), + 0xfb16 => return Fold::Two('\u{057e}', '\u{0576}',), + 0xfb17 => return Fold::Two('\u{0574}', '\u{056d}',), + x @ _ if 0xff21 <= x && x <= 0xff3a => from.wrapping_add(0x0020), + x @ _ if 0x10400 <= x && x <= 0x10427 => from.wrapping_add(0x0028), + x @ _ if 0x104b0 <= x && x <= 0x104d3 => from.wrapping_add(0x0028), + x @ _ if 0x10c80 <= x && x <= 0x10cb2 => from.wrapping_add(0x0040), + x @ _ if 0x118a0 <= x && x <= 0x118bf => from.wrapping_add(0x0020), + x @ _ if 0x16e40 <= x && x <= 0x16e5f => from.wrapping_add(0x0020), + x @ _ if 0x1e900 <= x && x <= 0x1e921 => from.wrapping_add(0x0022), + _ => from + }; + Fold::One( char::from_u32(single_char).unwrap_or(orig) ) + } +} + +#[test] +fn lookup_consistency() { + fn lookup_naive(orig: char) -> Fold { + let single_char = match orig as u32 { + 0x0041 => 0x0061, + 0x0042 => 0x0062, + 0x0043 => 0x0063, + 0x0044 => 0x0064, + 0x0045 => 0x0065, + 0x0046 => 0x0066, + 0x0047 => 0x0067, + 0x0048 => 0x0068, + 0x0049 => 0x0069, + 0x004a => 0x006a, + 0x004b => 0x006b, + 0x004c => 0x006c, + 0x004d => 0x006d, + 0x004e => 0x006e, + 0x004f => 0x006f, + 0x0050 => 0x0070, + 0x0051 => 0x0071, + 0x0052 => 0x0072, + 0x0053 => 0x0073, + 0x0054 => 0x0074, + 0x0055 => 0x0075, + 0x0056 => 0x0076, + 0x0057 => 0x0077, + 0x0058 => 0x0078, + 0x0059 => 0x0079, + 0x005a => 0x007a, + 0x00b5 => 0x03bc, + 0x00c0 => 0x00e0, + 0x00c1 => 0x00e1, + 0x00c2 => 0x00e2, + 0x00c3 => 0x00e3, + 0x00c4 => 0x00e4, + 0x00c5 => 0x00e5, + 0x00c6 => 0x00e6, + 0x00c7 => 0x00e7, + 0x00c8 => 0x00e8, + 0x00c9 => 0x00e9, + 0x00ca => 0x00ea, + 0x00cb => 0x00eb, + 0x00cc => 0x00ec, + 0x00cd => 0x00ed, + 0x00ce => 0x00ee, + 0x00cf => 0x00ef, + 0x00d0 => 0x00f0, + 0x00d1 => 0x00f1, + 0x00d2 => 0x00f2, + 0x00d3 => 0x00f3, + 0x00d4 => 0x00f4, + 0x00d5 => 0x00f5, + 0x00d6 => 0x00f6, + 0x00d8 => 0x00f8, + 0x00d9 => 0x00f9, + 0x00da => 0x00fa, + 0x00db => 0x00fb, + 0x00dc => 0x00fc, + 0x00dd => 0x00fd, + 0x00de => 0x00fe, + 0x00df => return Fold::Two('\u{0073}', '\u{0073}',), + 0x0100 => 0x0101, + 0x0102 => 0x0103, + 0x0104 => 0x0105, + 0x0106 => 0x0107, + 0x0108 => 0x0109, + 0x010a => 0x010b, + 0x010c => 0x010d, + 0x010e => 0x010f, + 0x0110 => 0x0111, + 0x0112 => 0x0113, + 0x0114 => 0x0115, + 0x0116 => 0x0117, + 0x0118 => 0x0119, + 0x011a => 0x011b, + 0x011c => 0x011d, + 0x011e => 0x011f, + 0x0120 => 0x0121, + 0x0122 => 0x0123, + 0x0124 => 0x0125, + 0x0126 => 0x0127, + 0x0128 => 0x0129, + 0x012a => 0x012b, + 0x012c => 0x012d, + 0x012e => 0x012f, + 0x0130 => return Fold::Two('\u{0069}', '\u{0307}',), + 0x0132 => 0x0133, + 0x0134 => 0x0135, + 0x0136 => 0x0137, + 0x0139 => 0x013a, + 0x013b => 0x013c, + 0x013d => 0x013e, + 0x013f => 0x0140, + 0x0141 => 0x0142, + 0x0143 => 0x0144, + 0x0145 => 0x0146, + 0x0147 => 0x0148, + 0x0149 => return Fold::Two('\u{02bc}', '\u{006e}',), + 0x014a => 0x014b, + 0x014c => 0x014d, + 0x014e => 0x014f, + 0x0150 => 0x0151, + 0x0152 => 0x0153, + 0x0154 => 0x0155, + 0x0156 => 0x0157, + 0x0158 => 0x0159, + 0x015a => 0x015b, + 0x015c => 0x015d, + 0x015e => 0x015f, + 0x0160 => 0x0161, + 0x0162 => 0x0163, + 0x0164 => 0x0165, + 0x0166 => 0x0167, + 0x0168 => 0x0169, + 0x016a => 0x016b, + 0x016c => 0x016d, + 0x016e => 0x016f, + 0x0170 => 0x0171, + 0x0172 => 0x0173, + 0x0174 => 0x0175, + 0x0176 => 0x0177, + 0x0178 => 0x00ff, + 0x0179 => 0x017a, + 0x017b => 0x017c, + 0x017d => 0x017e, + 0x017f => 0x0073, + 0x0181 => 0x0253, + 0x0182 => 0x0183, + 0x0184 => 0x0185, + 0x0186 => 0x0254, + 0x0187 => 0x0188, + 0x0189 => 0x0256, + 0x018a => 0x0257, + 0x018b => 0x018c, + 0x018e => 0x01dd, + 0x018f => 0x0259, + 0x0190 => 0x025b, + 0x0191 => 0x0192, + 0x0193 => 0x0260, + 0x0194 => 0x0263, + 0x0196 => 0x0269, + 0x0197 => 0x0268, + 0x0198 => 0x0199, + 0x019c => 0x026f, + 0x019d => 0x0272, + 0x019f => 0x0275, + 0x01a0 => 0x01a1, + 0x01a2 => 0x01a3, + 0x01a4 => 0x01a5, + 0x01a6 => 0x0280, + 0x01a7 => 0x01a8, + 0x01a9 => 0x0283, + 0x01ac => 0x01ad, + 0x01ae => 0x0288, + 0x01af => 0x01b0, + 0x01b1 => 0x028a, + 0x01b2 => 0x028b, + 0x01b3 => 0x01b4, + 0x01b5 => 0x01b6, + 0x01b7 => 0x0292, + 0x01b8 => 0x01b9, + 0x01bc => 0x01bd, + 0x01c4 => 0x01c6, + 0x01c5 => 0x01c6, + 0x01c7 => 0x01c9, + 0x01c8 => 0x01c9, + 0x01ca => 0x01cc, + 0x01cb => 0x01cc, + 0x01cd => 0x01ce, + 0x01cf => 0x01d0, + 0x01d1 => 0x01d2, + 0x01d3 => 0x01d4, + 0x01d5 => 0x01d6, + 0x01d7 => 0x01d8, + 0x01d9 => 0x01da, + 0x01db => 0x01dc, + 0x01de => 0x01df, + 0x01e0 => 0x01e1, + 0x01e2 => 0x01e3, + 0x01e4 => 0x01e5, + 0x01e6 => 0x01e7, + 0x01e8 => 0x01e9, + 0x01ea => 0x01eb, + 0x01ec => 0x01ed, + 0x01ee => 0x01ef, + 0x01f0 => return Fold::Two('\u{006a}', '\u{030c}',), + 0x01f1 => 0x01f3, + 0x01f2 => 0x01f3, + 0x01f4 => 0x01f5, + 0x01f6 => 0x0195, + 0x01f7 => 0x01bf, + 0x01f8 => 0x01f9, + 0x01fa => 0x01fb, + 0x01fc => 0x01fd, + 0x01fe => 0x01ff, + 0x0200 => 0x0201, + 0x0202 => 0x0203, + 0x0204 => 0x0205, + 0x0206 => 0x0207, + 0x0208 => 0x0209, + 0x020a => 0x020b, + 0x020c => 0x020d, + 0x020e => 0x020f, + 0x0210 => 0x0211, + 0x0212 => 0x0213, + 0x0214 => 0x0215, + 0x0216 => 0x0217, + 0x0218 => 0x0219, + 0x021a => 0x021b, + 0x021c => 0x021d, + 0x021e => 0x021f, + 0x0220 => 0x019e, + 0x0222 => 0x0223, + 0x0224 => 0x0225, + 0x0226 => 0x0227, + 0x0228 => 0x0229, + 0x022a => 0x022b, + 0x022c => 0x022d, + 0x022e => 0x022f, + 0x0230 => 0x0231, + 0x0232 => 0x0233, + 0x023a => 0x2c65, + 0x023b => 0x023c, + 0x023d => 0x019a, + 0x023e => 0x2c66, + 0x0241 => 0x0242, + 0x0243 => 0x0180, + 0x0244 => 0x0289, + 0x0245 => 0x028c, + 0x0246 => 0x0247, + 0x0248 => 0x0249, + 0x024a => 0x024b, + 0x024c => 0x024d, + 0x024e => 0x024f, + 0x0345 => 0x03b9, + 0x0370 => 0x0371, + 0x0372 => 0x0373, + 0x0376 => 0x0377, + 0x037f => 0x03f3, + 0x0386 => 0x03ac, + 0x0388 => 0x03ad, + 0x0389 => 0x03ae, + 0x038a => 0x03af, + 0x038c => 0x03cc, + 0x038e => 0x03cd, + 0x038f => 0x03ce, + 0x0390 => return Fold::Three('\u{03b9}', '\u{0308}', '\u{0301}',), + 0x0391 => 0x03b1, + 0x0392 => 0x03b2, + 0x0393 => 0x03b3, + 0x0394 => 0x03b4, + 0x0395 => 0x03b5, + 0x0396 => 0x03b6, + 0x0397 => 0x03b7, + 0x0398 => 0x03b8, + 0x0399 => 0x03b9, + 0x039a => 0x03ba, + 0x039b => 0x03bb, + 0x039c => 0x03bc, + 0x039d => 0x03bd, + 0x039e => 0x03be, + 0x039f => 0x03bf, + 0x03a0 => 0x03c0, + 0x03a1 => 0x03c1, + 0x03a3 => 0x03c3, + 0x03a4 => 0x03c4, + 0x03a5 => 0x03c5, + 0x03a6 => 0x03c6, + 0x03a7 => 0x03c7, + 0x03a8 => 0x03c8, + 0x03a9 => 0x03c9, + 0x03aa => 0x03ca, + 0x03ab => 0x03cb, + 0x03b0 => return Fold::Three('\u{03c5}', '\u{0308}', '\u{0301}',), + 0x03c2 => 0x03c3, + 0x03cf => 0x03d7, + 0x03d0 => 0x03b2, + 0x03d1 => 0x03b8, + 0x03d5 => 0x03c6, + 0x03d6 => 0x03c0, + 0x03d8 => 0x03d9, + 0x03da => 0x03db, + 0x03dc => 0x03dd, + 0x03de => 0x03df, + 0x03e0 => 0x03e1, + 0x03e2 => 0x03e3, + 0x03e4 => 0x03e5, + 0x03e6 => 0x03e7, + 0x03e8 => 0x03e9, + 0x03ea => 0x03eb, + 0x03ec => 0x03ed, + 0x03ee => 0x03ef, + 0x03f0 => 0x03ba, + 0x03f1 => 0x03c1, + 0x03f4 => 0x03b8, + 0x03f5 => 0x03b5, + 0x03f7 => 0x03f8, + 0x03f9 => 0x03f2, + 0x03fa => 0x03fb, + 0x03fd => 0x037b, + 0x03fe => 0x037c, + 0x03ff => 0x037d, + 0x0400 => 0x0450, + 0x0401 => 0x0451, + 0x0402 => 0x0452, + 0x0403 => 0x0453, + 0x0404 => 0x0454, + 0x0405 => 0x0455, + 0x0406 => 0x0456, + 0x0407 => 0x0457, + 0x0408 => 0x0458, + 0x0409 => 0x0459, + 0x040a => 0x045a, + 0x040b => 0x045b, + 0x040c => 0x045c, + 0x040d => 0x045d, + 0x040e => 0x045e, + 0x040f => 0x045f, + 0x0410 => 0x0430, + 0x0411 => 0x0431, + 0x0412 => 0x0432, + 0x0413 => 0x0433, + 0x0414 => 0x0434, + 0x0415 => 0x0435, + 0x0416 => 0x0436, + 0x0417 => 0x0437, + 0x0418 => 0x0438, + 0x0419 => 0x0439, + 0x041a => 0x043a, + 0x041b => 0x043b, + 0x041c => 0x043c, + 0x041d => 0x043d, + 0x041e => 0x043e, + 0x041f => 0x043f, + 0x0420 => 0x0440, + 0x0421 => 0x0441, + 0x0422 => 0x0442, + 0x0423 => 0x0443, + 0x0424 => 0x0444, + 0x0425 => 0x0445, + 0x0426 => 0x0446, + 0x0427 => 0x0447, + 0x0428 => 0x0448, + 0x0429 => 0x0449, + 0x042a => 0x044a, + 0x042b => 0x044b, + 0x042c => 0x044c, + 0x042d => 0x044d, + 0x042e => 0x044e, + 0x042f => 0x044f, + 0x0460 => 0x0461, + 0x0462 => 0x0463, + 0x0464 => 0x0465, + 0x0466 => 0x0467, + 0x0468 => 0x0469, + 0x046a => 0x046b, + 0x046c => 0x046d, + 0x046e => 0x046f, + 0x0470 => 0x0471, + 0x0472 => 0x0473, + 0x0474 => 0x0475, + 0x0476 => 0x0477, + 0x0478 => 0x0479, + 0x047a => 0x047b, + 0x047c => 0x047d, + 0x047e => 0x047f, + 0x0480 => 0x0481, + 0x048a => 0x048b, + 0x048c => 0x048d, + 0x048e => 0x048f, + 0x0490 => 0x0491, + 0x0492 => 0x0493, + 0x0494 => 0x0495, + 0x0496 => 0x0497, + 0x0498 => 0x0499, + 0x049a => 0x049b, + 0x049c => 0x049d, + 0x049e => 0x049f, + 0x04a0 => 0x04a1, + 0x04a2 => 0x04a3, + 0x04a4 => 0x04a5, + 0x04a6 => 0x04a7, + 0x04a8 => 0x04a9, + 0x04aa => 0x04ab, + 0x04ac => 0x04ad, + 0x04ae => 0x04af, + 0x04b0 => 0x04b1, + 0x04b2 => 0x04b3, + 0x04b4 => 0x04b5, + 0x04b6 => 0x04b7, + 0x04b8 => 0x04b9, + 0x04ba => 0x04bb, + 0x04bc => 0x04bd, + 0x04be => 0x04bf, + 0x04c0 => 0x04cf, + 0x04c1 => 0x04c2, + 0x04c3 => 0x04c4, + 0x04c5 => 0x04c6, + 0x04c7 => 0x04c8, + 0x04c9 => 0x04ca, + 0x04cb => 0x04cc, + 0x04cd => 0x04ce, + 0x04d0 => 0x04d1, + 0x04d2 => 0x04d3, + 0x04d4 => 0x04d5, + 0x04d6 => 0x04d7, + 0x04d8 => 0x04d9, + 0x04da => 0x04db, + 0x04dc => 0x04dd, + 0x04de => 0x04df, + 0x04e0 => 0x04e1, + 0x04e2 => 0x04e3, + 0x04e4 => 0x04e5, + 0x04e6 => 0x04e7, + 0x04e8 => 0x04e9, + 0x04ea => 0x04eb, + 0x04ec => 0x04ed, + 0x04ee => 0x04ef, + 0x04f0 => 0x04f1, + 0x04f2 => 0x04f3, + 0x04f4 => 0x04f5, + 0x04f6 => 0x04f7, + 0x04f8 => 0x04f9, + 0x04fa => 0x04fb, + 0x04fc => 0x04fd, + 0x04fe => 0x04ff, + 0x0500 => 0x0501, + 0x0502 => 0x0503, + 0x0504 => 0x0505, + 0x0506 => 0x0507, + 0x0508 => 0x0509, + 0x050a => 0x050b, + 0x050c => 0x050d, + 0x050e => 0x050f, + 0x0510 => 0x0511, + 0x0512 => 0x0513, + 0x0514 => 0x0515, + 0x0516 => 0x0517, + 0x0518 => 0x0519, + 0x051a => 0x051b, + 0x051c => 0x051d, + 0x051e => 0x051f, + 0x0520 => 0x0521, + 0x0522 => 0x0523, + 0x0524 => 0x0525, + 0x0526 => 0x0527, + 0x0528 => 0x0529, + 0x052a => 0x052b, + 0x052c => 0x052d, + 0x052e => 0x052f, + 0x0531 => 0x0561, + 0x0532 => 0x0562, + 0x0533 => 0x0563, + 0x0534 => 0x0564, + 0x0535 => 0x0565, + 0x0536 => 0x0566, + 0x0537 => 0x0567, + 0x0538 => 0x0568, + 0x0539 => 0x0569, + 0x053a => 0x056a, + 0x053b => 0x056b, + 0x053c => 0x056c, + 0x053d => 0x056d, + 0x053e => 0x056e, + 0x053f => 0x056f, + 0x0540 => 0x0570, + 0x0541 => 0x0571, + 0x0542 => 0x0572, + 0x0543 => 0x0573, + 0x0544 => 0x0574, + 0x0545 => 0x0575, + 0x0546 => 0x0576, + 0x0547 => 0x0577, + 0x0548 => 0x0578, + 0x0549 => 0x0579, + 0x054a => 0x057a, + 0x054b => 0x057b, + 0x054c => 0x057c, + 0x054d => 0x057d, + 0x054e => 0x057e, + 0x054f => 0x057f, + 0x0550 => 0x0580, + 0x0551 => 0x0581, + 0x0552 => 0x0582, + 0x0553 => 0x0583, + 0x0554 => 0x0584, + 0x0555 => 0x0585, + 0x0556 => 0x0586, + 0x0587 => return Fold::Two('\u{0565}', '\u{0582}',), + 0x10a0 => 0x2d00, + 0x10a1 => 0x2d01, + 0x10a2 => 0x2d02, + 0x10a3 => 0x2d03, + 0x10a4 => 0x2d04, + 0x10a5 => 0x2d05, + 0x10a6 => 0x2d06, + 0x10a7 => 0x2d07, + 0x10a8 => 0x2d08, + 0x10a9 => 0x2d09, + 0x10aa => 0x2d0a, + 0x10ab => 0x2d0b, + 0x10ac => 0x2d0c, + 0x10ad => 0x2d0d, + 0x10ae => 0x2d0e, + 0x10af => 0x2d0f, + 0x10b0 => 0x2d10, + 0x10b1 => 0x2d11, + 0x10b2 => 0x2d12, + 0x10b3 => 0x2d13, + 0x10b4 => 0x2d14, + 0x10b5 => 0x2d15, + 0x10b6 => 0x2d16, + 0x10b7 => 0x2d17, + 0x10b8 => 0x2d18, + 0x10b9 => 0x2d19, + 0x10ba => 0x2d1a, + 0x10bb => 0x2d1b, + 0x10bc => 0x2d1c, + 0x10bd => 0x2d1d, + 0x10be => 0x2d1e, + 0x10bf => 0x2d1f, + 0x10c0 => 0x2d20, + 0x10c1 => 0x2d21, + 0x10c2 => 0x2d22, + 0x10c3 => 0x2d23, + 0x10c4 => 0x2d24, + 0x10c5 => 0x2d25, + 0x10c7 => 0x2d27, + 0x10cd => 0x2d2d, + 0x13f8 => 0x13f0, + 0x13f9 => 0x13f1, + 0x13fa => 0x13f2, + 0x13fb => 0x13f3, + 0x13fc => 0x13f4, + 0x13fd => 0x13f5, + 0x1c80 => 0x0432, + 0x1c81 => 0x0434, + 0x1c82 => 0x043e, + 0x1c83 => 0x0441, + 0x1c84 => 0x0442, + 0x1c85 => 0x0442, + 0x1c86 => 0x044a, + 0x1c87 => 0x0463, + 0x1c88 => 0xa64b, + 0x1c90 => 0x10d0, + 0x1c91 => 0x10d1, + 0x1c92 => 0x10d2, + 0x1c93 => 0x10d3, + 0x1c94 => 0x10d4, + 0x1c95 => 0x10d5, + 0x1c96 => 0x10d6, + 0x1c97 => 0x10d7, + 0x1c98 => 0x10d8, + 0x1c99 => 0x10d9, + 0x1c9a => 0x10da, + 0x1c9b => 0x10db, + 0x1c9c => 0x10dc, + 0x1c9d => 0x10dd, + 0x1c9e => 0x10de, + 0x1c9f => 0x10df, + 0x1ca0 => 0x10e0, + 0x1ca1 => 0x10e1, + 0x1ca2 => 0x10e2, + 0x1ca3 => 0x10e3, + 0x1ca4 => 0x10e4, + 0x1ca5 => 0x10e5, + 0x1ca6 => 0x10e6, + 0x1ca7 => 0x10e7, + 0x1ca8 => 0x10e8, + 0x1ca9 => 0x10e9, + 0x1caa => 0x10ea, + 0x1cab => 0x10eb, + 0x1cac => 0x10ec, + 0x1cad => 0x10ed, + 0x1cae => 0x10ee, + 0x1caf => 0x10ef, + 0x1cb0 => 0x10f0, + 0x1cb1 => 0x10f1, + 0x1cb2 => 0x10f2, + 0x1cb3 => 0x10f3, + 0x1cb4 => 0x10f4, + 0x1cb5 => 0x10f5, + 0x1cb6 => 0x10f6, + 0x1cb7 => 0x10f7, + 0x1cb8 => 0x10f8, + 0x1cb9 => 0x10f9, + 0x1cba => 0x10fa, + 0x1cbd => 0x10fd, + 0x1cbe => 0x10fe, + 0x1cbf => 0x10ff, + 0x1e00 => 0x1e01, + 0x1e02 => 0x1e03, + 0x1e04 => 0x1e05, + 0x1e06 => 0x1e07, + 0x1e08 => 0x1e09, + 0x1e0a => 0x1e0b, + 0x1e0c => 0x1e0d, + 0x1e0e => 0x1e0f, + 0x1e10 => 0x1e11, + 0x1e12 => 0x1e13, + 0x1e14 => 0x1e15, + 0x1e16 => 0x1e17, + 0x1e18 => 0x1e19, + 0x1e1a => 0x1e1b, + 0x1e1c => 0x1e1d, + 0x1e1e => 0x1e1f, + 0x1e20 => 0x1e21, + 0x1e22 => 0x1e23, + 0x1e24 => 0x1e25, + 0x1e26 => 0x1e27, + 0x1e28 => 0x1e29, + 0x1e2a => 0x1e2b, + 0x1e2c => 0x1e2d, + 0x1e2e => 0x1e2f, + 0x1e30 => 0x1e31, + 0x1e32 => 0x1e33, + 0x1e34 => 0x1e35, + 0x1e36 => 0x1e37, + 0x1e38 => 0x1e39, + 0x1e3a => 0x1e3b, + 0x1e3c => 0x1e3d, + 0x1e3e => 0x1e3f, + 0x1e40 => 0x1e41, + 0x1e42 => 0x1e43, + 0x1e44 => 0x1e45, + 0x1e46 => 0x1e47, + 0x1e48 => 0x1e49, + 0x1e4a => 0x1e4b, + 0x1e4c => 0x1e4d, + 0x1e4e => 0x1e4f, + 0x1e50 => 0x1e51, + 0x1e52 => 0x1e53, + 0x1e54 => 0x1e55, + 0x1e56 => 0x1e57, + 0x1e58 => 0x1e59, + 0x1e5a => 0x1e5b, + 0x1e5c => 0x1e5d, + 0x1e5e => 0x1e5f, + 0x1e60 => 0x1e61, + 0x1e62 => 0x1e63, + 0x1e64 => 0x1e65, + 0x1e66 => 0x1e67, + 0x1e68 => 0x1e69, + 0x1e6a => 0x1e6b, + 0x1e6c => 0x1e6d, + 0x1e6e => 0x1e6f, + 0x1e70 => 0x1e71, + 0x1e72 => 0x1e73, + 0x1e74 => 0x1e75, + 0x1e76 => 0x1e77, + 0x1e78 => 0x1e79, + 0x1e7a => 0x1e7b, + 0x1e7c => 0x1e7d, + 0x1e7e => 0x1e7f, + 0x1e80 => 0x1e81, + 0x1e82 => 0x1e83, + 0x1e84 => 0x1e85, + 0x1e86 => 0x1e87, + 0x1e88 => 0x1e89, + 0x1e8a => 0x1e8b, + 0x1e8c => 0x1e8d, + 0x1e8e => 0x1e8f, + 0x1e90 => 0x1e91, + 0x1e92 => 0x1e93, + 0x1e94 => 0x1e95, + 0x1e96 => return Fold::Two('\u{0068}', '\u{0331}',), + 0x1e97 => return Fold::Two('\u{0074}', '\u{0308}',), + 0x1e98 => return Fold::Two('\u{0077}', '\u{030a}',), + 0x1e99 => return Fold::Two('\u{0079}', '\u{030a}',), + 0x1e9a => return Fold::Two('\u{0061}', '\u{02be}',), + 0x1e9b => 0x1e61, + 0x1e9e => return Fold::Two('\u{0073}', '\u{0073}',), + 0x1ea0 => 0x1ea1, + 0x1ea2 => 0x1ea3, + 0x1ea4 => 0x1ea5, + 0x1ea6 => 0x1ea7, + 0x1ea8 => 0x1ea9, + 0x1eaa => 0x1eab, + 0x1eac => 0x1ead, + 0x1eae => 0x1eaf, + 0x1eb0 => 0x1eb1, + 0x1eb2 => 0x1eb3, + 0x1eb4 => 0x1eb5, + 0x1eb6 => 0x1eb7, + 0x1eb8 => 0x1eb9, + 0x1eba => 0x1ebb, + 0x1ebc => 0x1ebd, + 0x1ebe => 0x1ebf, + 0x1ec0 => 0x1ec1, + 0x1ec2 => 0x1ec3, + 0x1ec4 => 0x1ec5, + 0x1ec6 => 0x1ec7, + 0x1ec8 => 0x1ec9, + 0x1eca => 0x1ecb, + 0x1ecc => 0x1ecd, + 0x1ece => 0x1ecf, + 0x1ed0 => 0x1ed1, + 0x1ed2 => 0x1ed3, + 0x1ed4 => 0x1ed5, + 0x1ed6 => 0x1ed7, + 0x1ed8 => 0x1ed9, + 0x1eda => 0x1edb, + 0x1edc => 0x1edd, + 0x1ede => 0x1edf, + 0x1ee0 => 0x1ee1, + 0x1ee2 => 0x1ee3, + 0x1ee4 => 0x1ee5, + 0x1ee6 => 0x1ee7, + 0x1ee8 => 0x1ee9, + 0x1eea => 0x1eeb, + 0x1eec => 0x1eed, + 0x1eee => 0x1eef, + 0x1ef0 => 0x1ef1, + 0x1ef2 => 0x1ef3, + 0x1ef4 => 0x1ef5, + 0x1ef6 => 0x1ef7, + 0x1ef8 => 0x1ef9, + 0x1efa => 0x1efb, + 0x1efc => 0x1efd, + 0x1efe => 0x1eff, + 0x1f08 => 0x1f00, + 0x1f09 => 0x1f01, + 0x1f0a => 0x1f02, + 0x1f0b => 0x1f03, + 0x1f0c => 0x1f04, + 0x1f0d => 0x1f05, + 0x1f0e => 0x1f06, + 0x1f0f => 0x1f07, + 0x1f18 => 0x1f10, + 0x1f19 => 0x1f11, + 0x1f1a => 0x1f12, + 0x1f1b => 0x1f13, + 0x1f1c => 0x1f14, + 0x1f1d => 0x1f15, + 0x1f28 => 0x1f20, + 0x1f29 => 0x1f21, + 0x1f2a => 0x1f22, + 0x1f2b => 0x1f23, + 0x1f2c => 0x1f24, + 0x1f2d => 0x1f25, + 0x1f2e => 0x1f26, + 0x1f2f => 0x1f27, + 0x1f38 => 0x1f30, + 0x1f39 => 0x1f31, + 0x1f3a => 0x1f32, + 0x1f3b => 0x1f33, + 0x1f3c => 0x1f34, + 0x1f3d => 0x1f35, + 0x1f3e => 0x1f36, + 0x1f3f => 0x1f37, + 0x1f48 => 0x1f40, + 0x1f49 => 0x1f41, + 0x1f4a => 0x1f42, + 0x1f4b => 0x1f43, + 0x1f4c => 0x1f44, + 0x1f4d => 0x1f45, + 0x1f50 => return Fold::Two('\u{03c5}', '\u{0313}',), + 0x1f52 => return Fold::Three('\u{03c5}', '\u{0313}', '\u{0300}',), + 0x1f54 => return Fold::Three('\u{03c5}', '\u{0313}', '\u{0301}',), + 0x1f56 => return Fold::Three('\u{03c5}', '\u{0313}', '\u{0342}',), + 0x1f59 => 0x1f51, + 0x1f5b => 0x1f53, + 0x1f5d => 0x1f55, + 0x1f5f => 0x1f57, + 0x1f68 => 0x1f60, + 0x1f69 => 0x1f61, + 0x1f6a => 0x1f62, + 0x1f6b => 0x1f63, + 0x1f6c => 0x1f64, + 0x1f6d => 0x1f65, + 0x1f6e => 0x1f66, + 0x1f6f => 0x1f67, + 0x1f80 => return Fold::Two('\u{1f00}', '\u{03b9}',), + 0x1f81 => return Fold::Two('\u{1f01}', '\u{03b9}',), + 0x1f82 => return Fold::Two('\u{1f02}', '\u{03b9}',), + 0x1f83 => return Fold::Two('\u{1f03}', '\u{03b9}',), + 0x1f84 => return Fold::Two('\u{1f04}', '\u{03b9}',), + 0x1f85 => return Fold::Two('\u{1f05}', '\u{03b9}',), + 0x1f86 => return Fold::Two('\u{1f06}', '\u{03b9}',), + 0x1f87 => return Fold::Two('\u{1f07}', '\u{03b9}',), + 0x1f88 => return Fold::Two('\u{1f00}', '\u{03b9}',), + 0x1f89 => return Fold::Two('\u{1f01}', '\u{03b9}',), + 0x1f8a => return Fold::Two('\u{1f02}', '\u{03b9}',), + 0x1f8b => return Fold::Two('\u{1f03}', '\u{03b9}',), + 0x1f8c => return Fold::Two('\u{1f04}', '\u{03b9}',), + 0x1f8d => return Fold::Two('\u{1f05}', '\u{03b9}',), + 0x1f8e => return Fold::Two('\u{1f06}', '\u{03b9}',), + 0x1f8f => return Fold::Two('\u{1f07}', '\u{03b9}',), + 0x1f90 => return Fold::Two('\u{1f20}', '\u{03b9}',), + 0x1f91 => return Fold::Two('\u{1f21}', '\u{03b9}',), + 0x1f92 => return Fold::Two('\u{1f22}', '\u{03b9}',), + 0x1f93 => return Fold::Two('\u{1f23}', '\u{03b9}',), + 0x1f94 => return Fold::Two('\u{1f24}', '\u{03b9}',), + 0x1f95 => return Fold::Two('\u{1f25}', '\u{03b9}',), + 0x1f96 => return Fold::Two('\u{1f26}', '\u{03b9}',), + 0x1f97 => return Fold::Two('\u{1f27}', '\u{03b9}',), + 0x1f98 => return Fold::Two('\u{1f20}', '\u{03b9}',), + 0x1f99 => return Fold::Two('\u{1f21}', '\u{03b9}',), + 0x1f9a => return Fold::Two('\u{1f22}', '\u{03b9}',), + 0x1f9b => return Fold::Two('\u{1f23}', '\u{03b9}',), + 0x1f9c => return Fold::Two('\u{1f24}', '\u{03b9}',), + 0x1f9d => return Fold::Two('\u{1f25}', '\u{03b9}',), + 0x1f9e => return Fold::Two('\u{1f26}', '\u{03b9}',), + 0x1f9f => return Fold::Two('\u{1f27}', '\u{03b9}',), + 0x1fa0 => return Fold::Two('\u{1f60}', '\u{03b9}',), + 0x1fa1 => return Fold::Two('\u{1f61}', '\u{03b9}',), + 0x1fa2 => return Fold::Two('\u{1f62}', '\u{03b9}',), + 0x1fa3 => return Fold::Two('\u{1f63}', '\u{03b9}',), + 0x1fa4 => return Fold::Two('\u{1f64}', '\u{03b9}',), + 0x1fa5 => return Fold::Two('\u{1f65}', '\u{03b9}',), + 0x1fa6 => return Fold::Two('\u{1f66}', '\u{03b9}',), + 0x1fa7 => return Fold::Two('\u{1f67}', '\u{03b9}',), + 0x1fa8 => return Fold::Two('\u{1f60}', '\u{03b9}',), + 0x1fa9 => return Fold::Two('\u{1f61}', '\u{03b9}',), + 0x1faa => return Fold::Two('\u{1f62}', '\u{03b9}',), + 0x1fab => return Fold::Two('\u{1f63}', '\u{03b9}',), + 0x1fac => return Fold::Two('\u{1f64}', '\u{03b9}',), + 0x1fad => return Fold::Two('\u{1f65}', '\u{03b9}',), + 0x1fae => return Fold::Two('\u{1f66}', '\u{03b9}',), + 0x1faf => return Fold::Two('\u{1f67}', '\u{03b9}',), + 0x1fb2 => return Fold::Two('\u{1f70}', '\u{03b9}',), + 0x1fb3 => return Fold::Two('\u{03b1}', '\u{03b9}',), + 0x1fb4 => return Fold::Two('\u{03ac}', '\u{03b9}',), + 0x1fb6 => return Fold::Two('\u{03b1}', '\u{0342}',), + 0x1fb7 => return Fold::Three('\u{03b1}', '\u{0342}', '\u{03b9}',), + 0x1fb8 => 0x1fb0, + 0x1fb9 => 0x1fb1, + 0x1fba => 0x1f70, + 0x1fbb => 0x1f71, + 0x1fbc => return Fold::Two('\u{03b1}', '\u{03b9}',), + 0x1fbe => 0x03b9, + 0x1fc2 => return Fold::Two('\u{1f74}', '\u{03b9}',), + 0x1fc3 => return Fold::Two('\u{03b7}', '\u{03b9}',), + 0x1fc4 => return Fold::Two('\u{03ae}', '\u{03b9}',), + 0x1fc6 => return Fold::Two('\u{03b7}', '\u{0342}',), + 0x1fc7 => return Fold::Three('\u{03b7}', '\u{0342}', '\u{03b9}',), + 0x1fc8 => 0x1f72, + 0x1fc9 => 0x1f73, + 0x1fca => 0x1f74, + 0x1fcb => 0x1f75, + 0x1fcc => return Fold::Two('\u{03b7}', '\u{03b9}',), + 0x1fd2 => return Fold::Three('\u{03b9}', '\u{0308}', '\u{0300}',), + 0x1fd3 => return Fold::Three('\u{03b9}', '\u{0308}', '\u{0301}',), + 0x1fd6 => return Fold::Two('\u{03b9}', '\u{0342}',), + 0x1fd7 => return Fold::Three('\u{03b9}', '\u{0308}', '\u{0342}',), + 0x1fd8 => 0x1fd0, + 0x1fd9 => 0x1fd1, + 0x1fda => 0x1f76, + 0x1fdb => 0x1f77, + 0x1fe2 => return Fold::Three('\u{03c5}', '\u{0308}', '\u{0300}',), + 0x1fe3 => return Fold::Three('\u{03c5}', '\u{0308}', '\u{0301}',), + 0x1fe4 => return Fold::Two('\u{03c1}', '\u{0313}',), + 0x1fe6 => return Fold::Two('\u{03c5}', '\u{0342}',), + 0x1fe7 => return Fold::Three('\u{03c5}', '\u{0308}', '\u{0342}',), + 0x1fe8 => 0x1fe0, + 0x1fe9 => 0x1fe1, + 0x1fea => 0x1f7a, + 0x1feb => 0x1f7b, + 0x1fec => 0x1fe5, + 0x1ff2 => return Fold::Two('\u{1f7c}', '\u{03b9}',), + 0x1ff3 => return Fold::Two('\u{03c9}', '\u{03b9}',), + 0x1ff4 => return Fold::Two('\u{03ce}', '\u{03b9}',), + 0x1ff6 => return Fold::Two('\u{03c9}', '\u{0342}',), + 0x1ff7 => return Fold::Three('\u{03c9}', '\u{0342}', '\u{03b9}',), + 0x1ff8 => 0x1f78, + 0x1ff9 => 0x1f79, + 0x1ffa => 0x1f7c, + 0x1ffb => 0x1f7d, + 0x1ffc => return Fold::Two('\u{03c9}', '\u{03b9}',), + 0x2126 => 0x03c9, + 0x212a => 0x006b, + 0x212b => 0x00e5, + 0x2132 => 0x214e, + 0x2160 => 0x2170, + 0x2161 => 0x2171, + 0x2162 => 0x2172, + 0x2163 => 0x2173, + 0x2164 => 0x2174, + 0x2165 => 0x2175, + 0x2166 => 0x2176, + 0x2167 => 0x2177, + 0x2168 => 0x2178, + 0x2169 => 0x2179, + 0x216a => 0x217a, + 0x216b => 0x217b, + 0x216c => 0x217c, + 0x216d => 0x217d, + 0x216e => 0x217e, + 0x216f => 0x217f, + 0x2183 => 0x2184, + 0x24b6 => 0x24d0, + 0x24b7 => 0x24d1, + 0x24b8 => 0x24d2, + 0x24b9 => 0x24d3, + 0x24ba => 0x24d4, + 0x24bb => 0x24d5, + 0x24bc => 0x24d6, + 0x24bd => 0x24d7, + 0x24be => 0x24d8, + 0x24bf => 0x24d9, + 0x24c0 => 0x24da, + 0x24c1 => 0x24db, + 0x24c2 => 0x24dc, + 0x24c3 => 0x24dd, + 0x24c4 => 0x24de, + 0x24c5 => 0x24df, + 0x24c6 => 0x24e0, + 0x24c7 => 0x24e1, + 0x24c8 => 0x24e2, + 0x24c9 => 0x24e3, + 0x24ca => 0x24e4, + 0x24cb => 0x24e5, + 0x24cc => 0x24e6, + 0x24cd => 0x24e7, + 0x24ce => 0x24e8, + 0x24cf => 0x24e9, + 0x2c00 => 0x2c30, + 0x2c01 => 0x2c31, + 0x2c02 => 0x2c32, + 0x2c03 => 0x2c33, + 0x2c04 => 0x2c34, + 0x2c05 => 0x2c35, + 0x2c06 => 0x2c36, + 0x2c07 => 0x2c37, + 0x2c08 => 0x2c38, + 0x2c09 => 0x2c39, + 0x2c0a => 0x2c3a, + 0x2c0b => 0x2c3b, + 0x2c0c => 0x2c3c, + 0x2c0d => 0x2c3d, + 0x2c0e => 0x2c3e, + 0x2c0f => 0x2c3f, + 0x2c10 => 0x2c40, + 0x2c11 => 0x2c41, + 0x2c12 => 0x2c42, + 0x2c13 => 0x2c43, + 0x2c14 => 0x2c44, + 0x2c15 => 0x2c45, + 0x2c16 => 0x2c46, + 0x2c17 => 0x2c47, + 0x2c18 => 0x2c48, + 0x2c19 => 0x2c49, + 0x2c1a => 0x2c4a, + 0x2c1b => 0x2c4b, + 0x2c1c => 0x2c4c, + 0x2c1d => 0x2c4d, + 0x2c1e => 0x2c4e, + 0x2c1f => 0x2c4f, + 0x2c20 => 0x2c50, + 0x2c21 => 0x2c51, + 0x2c22 => 0x2c52, + 0x2c23 => 0x2c53, + 0x2c24 => 0x2c54, + 0x2c25 => 0x2c55, + 0x2c26 => 0x2c56, + 0x2c27 => 0x2c57, + 0x2c28 => 0x2c58, + 0x2c29 => 0x2c59, + 0x2c2a => 0x2c5a, + 0x2c2b => 0x2c5b, + 0x2c2c => 0x2c5c, + 0x2c2d => 0x2c5d, + 0x2c2e => 0x2c5e, + 0x2c60 => 0x2c61, + 0x2c62 => 0x026b, + 0x2c63 => 0x1d7d, + 0x2c64 => 0x027d, + 0x2c67 => 0x2c68, + 0x2c69 => 0x2c6a, + 0x2c6b => 0x2c6c, + 0x2c6d => 0x0251, + 0x2c6e => 0x0271, + 0x2c6f => 0x0250, + 0x2c70 => 0x0252, + 0x2c72 => 0x2c73, + 0x2c75 => 0x2c76, + 0x2c7e => 0x023f, + 0x2c7f => 0x0240, + 0x2c80 => 0x2c81, + 0x2c82 => 0x2c83, + 0x2c84 => 0x2c85, + 0x2c86 => 0x2c87, + 0x2c88 => 0x2c89, + 0x2c8a => 0x2c8b, + 0x2c8c => 0x2c8d, + 0x2c8e => 0x2c8f, + 0x2c90 => 0x2c91, + 0x2c92 => 0x2c93, + 0x2c94 => 0x2c95, + 0x2c96 => 0x2c97, + 0x2c98 => 0x2c99, + 0x2c9a => 0x2c9b, + 0x2c9c => 0x2c9d, + 0x2c9e => 0x2c9f, + 0x2ca0 => 0x2ca1, + 0x2ca2 => 0x2ca3, + 0x2ca4 => 0x2ca5, + 0x2ca6 => 0x2ca7, + 0x2ca8 => 0x2ca9, + 0x2caa => 0x2cab, + 0x2cac => 0x2cad, + 0x2cae => 0x2caf, + 0x2cb0 => 0x2cb1, + 0x2cb2 => 0x2cb3, + 0x2cb4 => 0x2cb5, + 0x2cb6 => 0x2cb7, + 0x2cb8 => 0x2cb9, + 0x2cba => 0x2cbb, + 0x2cbc => 0x2cbd, + 0x2cbe => 0x2cbf, + 0x2cc0 => 0x2cc1, + 0x2cc2 => 0x2cc3, + 0x2cc4 => 0x2cc5, + 0x2cc6 => 0x2cc7, + 0x2cc8 => 0x2cc9, + 0x2cca => 0x2ccb, + 0x2ccc => 0x2ccd, + 0x2cce => 0x2ccf, + 0x2cd0 => 0x2cd1, + 0x2cd2 => 0x2cd3, + 0x2cd4 => 0x2cd5, + 0x2cd6 => 0x2cd7, + 0x2cd8 => 0x2cd9, + 0x2cda => 0x2cdb, + 0x2cdc => 0x2cdd, + 0x2cde => 0x2cdf, + 0x2ce0 => 0x2ce1, + 0x2ce2 => 0x2ce3, + 0x2ceb => 0x2cec, + 0x2ced => 0x2cee, + 0x2cf2 => 0x2cf3, + 0xa640 => 0xa641, + 0xa642 => 0xa643, + 0xa644 => 0xa645, + 0xa646 => 0xa647, + 0xa648 => 0xa649, + 0xa64a => 0xa64b, + 0xa64c => 0xa64d, + 0xa64e => 0xa64f, + 0xa650 => 0xa651, + 0xa652 => 0xa653, + 0xa654 => 0xa655, + 0xa656 => 0xa657, + 0xa658 => 0xa659, + 0xa65a => 0xa65b, + 0xa65c => 0xa65d, + 0xa65e => 0xa65f, + 0xa660 => 0xa661, + 0xa662 => 0xa663, + 0xa664 => 0xa665, + 0xa666 => 0xa667, + 0xa668 => 0xa669, + 0xa66a => 0xa66b, + 0xa66c => 0xa66d, + 0xa680 => 0xa681, + 0xa682 => 0xa683, + 0xa684 => 0xa685, + 0xa686 => 0xa687, + 0xa688 => 0xa689, + 0xa68a => 0xa68b, + 0xa68c => 0xa68d, + 0xa68e => 0xa68f, + 0xa690 => 0xa691, + 0xa692 => 0xa693, + 0xa694 => 0xa695, + 0xa696 => 0xa697, + 0xa698 => 0xa699, + 0xa69a => 0xa69b, + 0xa722 => 0xa723, + 0xa724 => 0xa725, + 0xa726 => 0xa727, + 0xa728 => 0xa729, + 0xa72a => 0xa72b, + 0xa72c => 0xa72d, + 0xa72e => 0xa72f, + 0xa732 => 0xa733, + 0xa734 => 0xa735, + 0xa736 => 0xa737, + 0xa738 => 0xa739, + 0xa73a => 0xa73b, + 0xa73c => 0xa73d, + 0xa73e => 0xa73f, + 0xa740 => 0xa741, + 0xa742 => 0xa743, + 0xa744 => 0xa745, + 0xa746 => 0xa747, + 0xa748 => 0xa749, + 0xa74a => 0xa74b, + 0xa74c => 0xa74d, + 0xa74e => 0xa74f, + 0xa750 => 0xa751, + 0xa752 => 0xa753, + 0xa754 => 0xa755, + 0xa756 => 0xa757, + 0xa758 => 0xa759, + 0xa75a => 0xa75b, + 0xa75c => 0xa75d, + 0xa75e => 0xa75f, + 0xa760 => 0xa761, + 0xa762 => 0xa763, + 0xa764 => 0xa765, + 0xa766 => 0xa767, + 0xa768 => 0xa769, + 0xa76a => 0xa76b, + 0xa76c => 0xa76d, + 0xa76e => 0xa76f, + 0xa779 => 0xa77a, + 0xa77b => 0xa77c, + 0xa77d => 0x1d79, + 0xa77e => 0xa77f, + 0xa780 => 0xa781, + 0xa782 => 0xa783, + 0xa784 => 0xa785, + 0xa786 => 0xa787, + 0xa78b => 0xa78c, + 0xa78d => 0x0265, + 0xa790 => 0xa791, + 0xa792 => 0xa793, + 0xa796 => 0xa797, + 0xa798 => 0xa799, + 0xa79a => 0xa79b, + 0xa79c => 0xa79d, + 0xa79e => 0xa79f, + 0xa7a0 => 0xa7a1, + 0xa7a2 => 0xa7a3, + 0xa7a4 => 0xa7a5, + 0xa7a6 => 0xa7a7, + 0xa7a8 => 0xa7a9, + 0xa7aa => 0x0266, + 0xa7ab => 0x025c, + 0xa7ac => 0x0261, + 0xa7ad => 0x026c, + 0xa7ae => 0x026a, + 0xa7b0 => 0x029e, + 0xa7b1 => 0x0287, + 0xa7b2 => 0x029d, + 0xa7b3 => 0xab53, + 0xa7b4 => 0xa7b5, + 0xa7b6 => 0xa7b7, + 0xa7b8 => 0xa7b9, + 0xa7ba => 0xa7bb, + 0xa7bc => 0xa7bd, + 0xa7be => 0xa7bf, + 0xa7c2 => 0xa7c3, + 0xa7c4 => 0xa794, + 0xa7c5 => 0x0282, + 0xa7c6 => 0x1d8e, + 0xab70 => 0x13a0, + 0xab71 => 0x13a1, + 0xab72 => 0x13a2, + 0xab73 => 0x13a3, + 0xab74 => 0x13a4, + 0xab75 => 0x13a5, + 0xab76 => 0x13a6, + 0xab77 => 0x13a7, + 0xab78 => 0x13a8, + 0xab79 => 0x13a9, + 0xab7a => 0x13aa, + 0xab7b => 0x13ab, + 0xab7c => 0x13ac, + 0xab7d => 0x13ad, + 0xab7e => 0x13ae, + 0xab7f => 0x13af, + 0xab80 => 0x13b0, + 0xab81 => 0x13b1, + 0xab82 => 0x13b2, + 0xab83 => 0x13b3, + 0xab84 => 0x13b4, + 0xab85 => 0x13b5, + 0xab86 => 0x13b6, + 0xab87 => 0x13b7, + 0xab88 => 0x13b8, + 0xab89 => 0x13b9, + 0xab8a => 0x13ba, + 0xab8b => 0x13bb, + 0xab8c => 0x13bc, + 0xab8d => 0x13bd, + 0xab8e => 0x13be, + 0xab8f => 0x13bf, + 0xab90 => 0x13c0, + 0xab91 => 0x13c1, + 0xab92 => 0x13c2, + 0xab93 => 0x13c3, + 0xab94 => 0x13c4, + 0xab95 => 0x13c5, + 0xab96 => 0x13c6, + 0xab97 => 0x13c7, + 0xab98 => 0x13c8, + 0xab99 => 0x13c9, + 0xab9a => 0x13ca, + 0xab9b => 0x13cb, + 0xab9c => 0x13cc, + 0xab9d => 0x13cd, + 0xab9e => 0x13ce, + 0xab9f => 0x13cf, + 0xaba0 => 0x13d0, + 0xaba1 => 0x13d1, + 0xaba2 => 0x13d2, + 0xaba3 => 0x13d3, + 0xaba4 => 0x13d4, + 0xaba5 => 0x13d5, + 0xaba6 => 0x13d6, + 0xaba7 => 0x13d7, + 0xaba8 => 0x13d8, + 0xaba9 => 0x13d9, + 0xabaa => 0x13da, + 0xabab => 0x13db, + 0xabac => 0x13dc, + 0xabad => 0x13dd, + 0xabae => 0x13de, + 0xabaf => 0x13df, + 0xabb0 => 0x13e0, + 0xabb1 => 0x13e1, + 0xabb2 => 0x13e2, + 0xabb3 => 0x13e3, + 0xabb4 => 0x13e4, + 0xabb5 => 0x13e5, + 0xabb6 => 0x13e6, + 0xabb7 => 0x13e7, + 0xabb8 => 0x13e8, + 0xabb9 => 0x13e9, + 0xabba => 0x13ea, + 0xabbb => 0x13eb, + 0xabbc => 0x13ec, + 0xabbd => 0x13ed, + 0xabbe => 0x13ee, + 0xabbf => 0x13ef, + 0xfb00 => return Fold::Two('\u{0066}', '\u{0066}',), + 0xfb01 => return Fold::Two('\u{0066}', '\u{0069}',), + 0xfb02 => return Fold::Two('\u{0066}', '\u{006c}',), + 0xfb03 => return Fold::Three('\u{0066}', '\u{0066}', '\u{0069}',), + 0xfb04 => return Fold::Three('\u{0066}', '\u{0066}', '\u{006c}',), + 0xfb05 => return Fold::Two('\u{0073}', '\u{0074}',), + 0xfb06 => return Fold::Two('\u{0073}', '\u{0074}',), + 0xfb13 => return Fold::Two('\u{0574}', '\u{0576}',), + 0xfb14 => return Fold::Two('\u{0574}', '\u{0565}',), + 0xfb15 => return Fold::Two('\u{0574}', '\u{056b}',), + 0xfb16 => return Fold::Two('\u{057e}', '\u{0576}',), + 0xfb17 => return Fold::Two('\u{0574}', '\u{056d}',), + 0xff21 => 0xff41, + 0xff22 => 0xff42, + 0xff23 => 0xff43, + 0xff24 => 0xff44, + 0xff25 => 0xff45, + 0xff26 => 0xff46, + 0xff27 => 0xff47, + 0xff28 => 0xff48, + 0xff29 => 0xff49, + 0xff2a => 0xff4a, + 0xff2b => 0xff4b, + 0xff2c => 0xff4c, + 0xff2d => 0xff4d, + 0xff2e => 0xff4e, + 0xff2f => 0xff4f, + 0xff30 => 0xff50, + 0xff31 => 0xff51, + 0xff32 => 0xff52, + 0xff33 => 0xff53, + 0xff34 => 0xff54, + 0xff35 => 0xff55, + 0xff36 => 0xff56, + 0xff37 => 0xff57, + 0xff38 => 0xff58, + 0xff39 => 0xff59, + 0xff3a => 0xff5a, + 0x10400 => 0x10428, + 0x10401 => 0x10429, + 0x10402 => 0x1042a, + 0x10403 => 0x1042b, + 0x10404 => 0x1042c, + 0x10405 => 0x1042d, + 0x10406 => 0x1042e, + 0x10407 => 0x1042f, + 0x10408 => 0x10430, + 0x10409 => 0x10431, + 0x1040a => 0x10432, + 0x1040b => 0x10433, + 0x1040c => 0x10434, + 0x1040d => 0x10435, + 0x1040e => 0x10436, + 0x1040f => 0x10437, + 0x10410 => 0x10438, + 0x10411 => 0x10439, + 0x10412 => 0x1043a, + 0x10413 => 0x1043b, + 0x10414 => 0x1043c, + 0x10415 => 0x1043d, + 0x10416 => 0x1043e, + 0x10417 => 0x1043f, + 0x10418 => 0x10440, + 0x10419 => 0x10441, + 0x1041a => 0x10442, + 0x1041b => 0x10443, + 0x1041c => 0x10444, + 0x1041d => 0x10445, + 0x1041e => 0x10446, + 0x1041f => 0x10447, + 0x10420 => 0x10448, + 0x10421 => 0x10449, + 0x10422 => 0x1044a, + 0x10423 => 0x1044b, + 0x10424 => 0x1044c, + 0x10425 => 0x1044d, + 0x10426 => 0x1044e, + 0x10427 => 0x1044f, + 0x104b0 => 0x104d8, + 0x104b1 => 0x104d9, + 0x104b2 => 0x104da, + 0x104b3 => 0x104db, + 0x104b4 => 0x104dc, + 0x104b5 => 0x104dd, + 0x104b6 => 0x104de, + 0x104b7 => 0x104df, + 0x104b8 => 0x104e0, + 0x104b9 => 0x104e1, + 0x104ba => 0x104e2, + 0x104bb => 0x104e3, + 0x104bc => 0x104e4, + 0x104bd => 0x104e5, + 0x104be => 0x104e6, + 0x104bf => 0x104e7, + 0x104c0 => 0x104e8, + 0x104c1 => 0x104e9, + 0x104c2 => 0x104ea, + 0x104c3 => 0x104eb, + 0x104c4 => 0x104ec, + 0x104c5 => 0x104ed, + 0x104c6 => 0x104ee, + 0x104c7 => 0x104ef, + 0x104c8 => 0x104f0, + 0x104c9 => 0x104f1, + 0x104ca => 0x104f2, + 0x104cb => 0x104f3, + 0x104cc => 0x104f4, + 0x104cd => 0x104f5, + 0x104ce => 0x104f6, + 0x104cf => 0x104f7, + 0x104d0 => 0x104f8, + 0x104d1 => 0x104f9, + 0x104d2 => 0x104fa, + 0x104d3 => 0x104fb, + 0x10c80 => 0x10cc0, + 0x10c81 => 0x10cc1, + 0x10c82 => 0x10cc2, + 0x10c83 => 0x10cc3, + 0x10c84 => 0x10cc4, + 0x10c85 => 0x10cc5, + 0x10c86 => 0x10cc6, + 0x10c87 => 0x10cc7, + 0x10c88 => 0x10cc8, + 0x10c89 => 0x10cc9, + 0x10c8a => 0x10cca, + 0x10c8b => 0x10ccb, + 0x10c8c => 0x10ccc, + 0x10c8d => 0x10ccd, + 0x10c8e => 0x10cce, + 0x10c8f => 0x10ccf, + 0x10c90 => 0x10cd0, + 0x10c91 => 0x10cd1, + 0x10c92 => 0x10cd2, + 0x10c93 => 0x10cd3, + 0x10c94 => 0x10cd4, + 0x10c95 => 0x10cd5, + 0x10c96 => 0x10cd6, + 0x10c97 => 0x10cd7, + 0x10c98 => 0x10cd8, + 0x10c99 => 0x10cd9, + 0x10c9a => 0x10cda, + 0x10c9b => 0x10cdb, + 0x10c9c => 0x10cdc, + 0x10c9d => 0x10cdd, + 0x10c9e => 0x10cde, + 0x10c9f => 0x10cdf, + 0x10ca0 => 0x10ce0, + 0x10ca1 => 0x10ce1, + 0x10ca2 => 0x10ce2, + 0x10ca3 => 0x10ce3, + 0x10ca4 => 0x10ce4, + 0x10ca5 => 0x10ce5, + 0x10ca6 => 0x10ce6, + 0x10ca7 => 0x10ce7, + 0x10ca8 => 0x10ce8, + 0x10ca9 => 0x10ce9, + 0x10caa => 0x10cea, + 0x10cab => 0x10ceb, + 0x10cac => 0x10cec, + 0x10cad => 0x10ced, + 0x10cae => 0x10cee, + 0x10caf => 0x10cef, + 0x10cb0 => 0x10cf0, + 0x10cb1 => 0x10cf1, + 0x10cb2 => 0x10cf2, + 0x118a0 => 0x118c0, + 0x118a1 => 0x118c1, + 0x118a2 => 0x118c2, + 0x118a3 => 0x118c3, + 0x118a4 => 0x118c4, + 0x118a5 => 0x118c5, + 0x118a6 => 0x118c6, + 0x118a7 => 0x118c7, + 0x118a8 => 0x118c8, + 0x118a9 => 0x118c9, + 0x118aa => 0x118ca, + 0x118ab => 0x118cb, + 0x118ac => 0x118cc, + 0x118ad => 0x118cd, + 0x118ae => 0x118ce, + 0x118af => 0x118cf, + 0x118b0 => 0x118d0, + 0x118b1 => 0x118d1, + 0x118b2 => 0x118d2, + 0x118b3 => 0x118d3, + 0x118b4 => 0x118d4, + 0x118b5 => 0x118d5, + 0x118b6 => 0x118d6, + 0x118b7 => 0x118d7, + 0x118b8 => 0x118d8, + 0x118b9 => 0x118d9, + 0x118ba => 0x118da, + 0x118bb => 0x118db, + 0x118bc => 0x118dc, + 0x118bd => 0x118dd, + 0x118be => 0x118de, + 0x118bf => 0x118df, + 0x16e40 => 0x16e60, + 0x16e41 => 0x16e61, + 0x16e42 => 0x16e62, + 0x16e43 => 0x16e63, + 0x16e44 => 0x16e64, + 0x16e45 => 0x16e65, + 0x16e46 => 0x16e66, + 0x16e47 => 0x16e67, + 0x16e48 => 0x16e68, + 0x16e49 => 0x16e69, + 0x16e4a => 0x16e6a, + 0x16e4b => 0x16e6b, + 0x16e4c => 0x16e6c, + 0x16e4d => 0x16e6d, + 0x16e4e => 0x16e6e, + 0x16e4f => 0x16e6f, + 0x16e50 => 0x16e70, + 0x16e51 => 0x16e71, + 0x16e52 => 0x16e72, + 0x16e53 => 0x16e73, + 0x16e54 => 0x16e74, + 0x16e55 => 0x16e75, + 0x16e56 => 0x16e76, + 0x16e57 => 0x16e77, + 0x16e58 => 0x16e78, + 0x16e59 => 0x16e79, + 0x16e5a => 0x16e7a, + 0x16e5b => 0x16e7b, + 0x16e5c => 0x16e7c, + 0x16e5d => 0x16e7d, + 0x16e5e => 0x16e7e, + 0x16e5f => 0x16e7f, + 0x1e900 => 0x1e922, + 0x1e901 => 0x1e923, + 0x1e902 => 0x1e924, + 0x1e903 => 0x1e925, + 0x1e904 => 0x1e926, + 0x1e905 => 0x1e927, + 0x1e906 => 0x1e928, + 0x1e907 => 0x1e929, + 0x1e908 => 0x1e92a, + 0x1e909 => 0x1e92b, + 0x1e90a => 0x1e92c, + 0x1e90b => 0x1e92d, + 0x1e90c => 0x1e92e, + 0x1e90d => 0x1e92f, + 0x1e90e => 0x1e930, + 0x1e90f => 0x1e931, + 0x1e910 => 0x1e932, + 0x1e911 => 0x1e933, + 0x1e912 => 0x1e934, + 0x1e913 => 0x1e935, + 0x1e914 => 0x1e936, + 0x1e915 => 0x1e937, + 0x1e916 => 0x1e938, + 0x1e917 => 0x1e939, + 0x1e918 => 0x1e93a, + 0x1e919 => 0x1e93b, + 0x1e91a => 0x1e93c, + 0x1e91b => 0x1e93d, + 0x1e91c => 0x1e93e, + 0x1e91d => 0x1e93f, + 0x1e91e => 0x1e940, + 0x1e91f => 0x1e941, + 0x1e920 => 0x1e942, + 0x1e921 => 0x1e943, + _ => orig as u32 + }; + Fold::One( char::from_u32(single_char).unwrap() ) + } + + for c_index in 0..126217 { + if let Some(c) = char::from_u32(c_index) { + let reference: Vec = lookup_naive(c).collect(); + let actual: Vec = lookup(c).collect(); + if actual != reference { + assert!(false, "case-folding {:?} (#0x{:04x}) failed: Expected {:?}, got {:?}", c, c_index, reference, actual); + } + } } } diff --git a/third_party/rust/unicase/src/unicode/mod.rs b/third_party/rust/unicase/src/unicode/mod.rs index defa51c645..8b887331bf 100644 --- a/third_party/rust/unicase/src/unicode/mod.rs +++ b/third_party/rust/unicase/src/unicode/mod.rs @@ -1,6 +1,6 @@ #[cfg(__unicase__iter_cmp)] -use std::cmp::Ordering; -use std::hash::{Hash, Hasher}; +use core::cmp::Ordering; +use core::hash::{Hash, Hasher}; use self::map::lookup; mod map; @@ -11,9 +11,25 @@ pub struct Unicode(pub S); impl, S2: AsRef> PartialEq> for Unicode { #[inline] fn eq(&self, other: &Unicode) -> bool { - self.0.as_ref().chars().flat_map(lookup) - .zip(other.0.as_ref().chars().flat_map(lookup)) - .all(|(a, b)| a == b) + let mut left = self.0.as_ref().chars().flat_map(lookup); + let mut right = other.0.as_ref().chars().flat_map(lookup); + + // inline Iterator::eq since not added until Rust 1.5 + loop { + let x = match left.next() { + None => return right.next().is_none(), + Some(val) => val, + }; + + let y = match right.next() { + None => return false, + Some(val) => val, + }; + + if x != y { + return false; + } + } } } diff --git a/third_party/rust/unicode-xid-0.1.0/.cargo-checksum.json b/third_party/rust/unicode-xid-0.1.0/.cargo-checksum.json deleted file mode 100644 index 60faf25bf1..0000000000 --- a/third_party/rust/unicode-xid-0.1.0/.cargo-checksum.json +++ /dev/null @@ -1 +0,0 @@ -{"files":{"COPYRIGHT":"23860c2a7b5d96b21569afedf033469bab9fe14a1b24a35068b8641c578ce24d","Cargo.toml":"aafcae4002bee71546a6aa40a97b9124a69f169ee7e3a9e3262338e32b4c2b9b","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"7b63ecd5f1902af1b63729947373683c32745c16a10e8e6292e2e2dcd7e90ae0","README.md":"67998486b32f4fe46abbbaa411b92528750e7f0e22452dc8a5b95d87d80fde75","scripts/unicode.py":"762eea92dd51238c6bf877570bde1149932ba15cf87be1618fc21cd53e941733","src/lib.rs":"4a89fadf452ae7c53536eaa4496f951a3153f8189dd1cbc532648731d30f0b11","src/tables.rs":"0643459b6ebeeed83aecd7604f0ea29c06bea7ce6c1cd9acd4988d27ace1ec53","src/tests.rs":"35a459382e190197e7b9a78832ae79f310b48a02a5b4227bf9bbc89d46c8deac"},"package":"fc72304796d0818e357ead4e000d19c9c174ab23dc11093ac919054d20a6a7fc"} \ No newline at end of file diff --git a/third_party/rust/unicode-xid-0.1.0/COPYRIGHT b/third_party/rust/unicode-xid-0.1.0/COPYRIGHT deleted file mode 100644 index b286ec16ab..0000000000 --- a/third_party/rust/unicode-xid-0.1.0/COPYRIGHT +++ /dev/null @@ -1,7 +0,0 @@ -Licensed under the Apache License, Version 2.0 - or the MIT -license , -at your option. All files in the project carrying such -notice may not be copied, modified, or distributed except -according to those terms. diff --git a/third_party/rust/unicode-xid-0.1.0/Cargo.toml b/third_party/rust/unicode-xid-0.1.0/Cargo.toml deleted file mode 100644 index b9b69b2937..0000000000 --- a/third_party/rust/unicode-xid-0.1.0/Cargo.toml +++ /dev/null @@ -1,26 +0,0 @@ -[package] - -name = "unicode-xid" -version = "0.1.0" -authors = ["erick.tryzelaar ", - "kwantam ", - ] - -homepage = "https://github.com/unicode-rs/unicode-xid" -repository = "https://github.com/unicode-rs/unicode-xid" -documentation = "https://unicode-rs.github.io/unicode-xid" -license = "MIT/Apache-2.0" -keywords = ["text", "unicode", "xid"] -readme = "README.md" -description = """ -Determine whether characters have the XID_Start -or XID_Continue properties according to -Unicode Standard Annex #31. -""" - -exclude = [ "target/*", "Cargo.lock" ] - -[features] -default = [] -no_std = [] -bench = [] diff --git a/third_party/rust/unicode-xid-0.1.0/LICENSE-APACHE b/third_party/rust/unicode-xid-0.1.0/LICENSE-APACHE deleted file mode 100644 index 16fe87b06e..0000000000 --- a/third_party/rust/unicode-xid-0.1.0/LICENSE-APACHE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - -TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - -1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - -2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - -3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - -4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - -5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - -6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - -7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - -8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - -9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - -END OF TERMS AND CONDITIONS - -APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - -Copyright [yyyy] [name of copyright owner] - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. diff --git a/third_party/rust/unicode-xid-0.1.0/LICENSE-MIT b/third_party/rust/unicode-xid-0.1.0/LICENSE-MIT deleted file mode 100644 index e69282e381..0000000000 --- a/third_party/rust/unicode-xid-0.1.0/LICENSE-MIT +++ /dev/null @@ -1,25 +0,0 @@ -Copyright (c) 2015 The Rust Project Developers - -Permission is hereby granted, free of charge, to any -person obtaining a copy of this software and associated -documentation files (the "Software"), to deal in the -Software without restriction, including without -limitation the rights to use, copy, modify, merge, -publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software -is furnished to do so, subject to the following -conditions: - -The above copyright notice and this permission notice -shall be included in all copies or substantial portions -of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -DEALINGS IN THE SOFTWARE. diff --git a/third_party/rust/unicode-xid-0.1.0/README.md b/third_party/rust/unicode-xid-0.1.0/README.md deleted file mode 100644 index 3a2be472d5..0000000000 --- a/third_party/rust/unicode-xid-0.1.0/README.md +++ /dev/null @@ -1,34 +0,0 @@ -# unicode-xid - -Determine if a `char` is a valid identifier for a parser and/or lexer according to -[Unicode Standard Annex #31](http://www.unicode.org/reports/tr31/) rules. - -[![Build Status](https://travis-ci.org/unicode-rs/unicode-xid.svg)](https://travis-ci.org/unicode-rs/unicode-xid) - -[Documentation](https://unicode-rs.github.io/unicode-xid/unicode_xid/index.html) - -```rust -extern crate unicode_xid; - -use unicode_xid::UnicodeXID; - -fn main() { - let ch = 'a'; - println!("Is {} a valid start of an identifier? {}", ch, UnicodeXID::is_xid_start(ch)); -} -``` - -# features - -unicode-xid supports a `no_std` feature. This eliminates dependence -on std, and instead uses equivalent functions from core. - -# crates.io - -You can use this package in your project by adding the following -to your `Cargo.toml`: - -```toml -[dependencies] -unicode-xid = "0.0.4" -``` diff --git a/third_party/rust/unicode-xid-0.1.0/scripts/unicode.py b/third_party/rust/unicode-xid-0.1.0/scripts/unicode.py deleted file mode 100644 index a30d2f2a7a..0000000000 --- a/third_party/rust/unicode-xid-0.1.0/scripts/unicode.py +++ /dev/null @@ -1,187 +0,0 @@ -#!/usr/bin/env python -# -# Copyright 2011-2015 The Rust Project Developers. See the COPYRIGHT -# file at the top-level directory of this distribution and at -# http://rust-lang.org/COPYRIGHT. -# -# Licensed under the Apache License, Version 2.0 or the MIT license -# , at your -# option. This file may not be copied, modified, or distributed -# except according to those terms. - -# This script uses the following Unicode tables: -# - DerivedCoreProperties.txt -# - ReadMe.txt -# -# Since this should not require frequent updates, we just store this -# out-of-line and check the unicode.rs file into git. - -import fileinput, re, os, sys - -preamble = '''// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -// NOTE: The following code was generated by "scripts/unicode.py", do not edit directly - -#![allow(missing_docs, non_upper_case_globals, non_snake_case)] -''' - -def fetch(f): - if not os.path.exists(os.path.basename(f)): - os.system("curl -O http://www.unicode.org/Public/UNIDATA/%s" - % f) - - if not os.path.exists(os.path.basename(f)): - sys.stderr.write("cannot load %s" % f) - exit(1) - -def group_cat(cat): - cat_out = [] - letters = sorted(set(cat)) - cur_start = letters.pop(0) - cur_end = cur_start - for letter in letters: - assert letter > cur_end, \ - "cur_end: %s, letter: %s" % (hex(cur_end), hex(letter)) - if letter == cur_end + 1: - cur_end = letter - else: - cat_out.append((cur_start, cur_end)) - cur_start = cur_end = letter - cat_out.append((cur_start, cur_end)) - return cat_out - -def ungroup_cat(cat): - cat_out = [] - for (lo, hi) in cat: - while lo <= hi: - cat_out.append(lo) - lo += 1 - return cat_out - -def format_table_content(f, content, indent): - line = " "*indent - first = True - for chunk in content.split(","): - if len(line) + len(chunk) < 98: - if first: - line += chunk - else: - line += ", " + chunk - first = False - else: - f.write(line + ",\n") - line = " "*indent + chunk - f.write(line) - -def load_properties(f, interestingprops): - fetch(f) - props = {} - re1 = re.compile("^ *([0-9A-F]+) *; *(\w+)") - re2 = re.compile("^ *([0-9A-F]+)\.\.([0-9A-F]+) *; *(\w+)") - - for line in fileinput.input(os.path.basename(f)): - prop = None - d_lo = 0 - d_hi = 0 - m = re1.match(line) - if m: - d_lo = m.group(1) - d_hi = m.group(1) - prop = m.group(2) - else: - m = re2.match(line) - if m: - d_lo = m.group(1) - d_hi = m.group(2) - prop = m.group(3) - else: - continue - if interestingprops and prop not in interestingprops: - continue - d_lo = int(d_lo, 16) - d_hi = int(d_hi, 16) - if prop not in props: - props[prop] = [] - props[prop].append((d_lo, d_hi)) - - # optimize if possible - for prop in props: - props[prop] = group_cat(ungroup_cat(props[prop])) - - return props - -def escape_char(c): - return "'\\u{%x}'" % c - -def emit_bsearch_range_table(f): - f.write(""" -fn bsearch_range_table(c: char, r: &'static [(char,char)]) -> bool { - use core::cmp::Ordering::{Equal, Less, Greater}; - - r.binary_search_by(|&(lo,hi)| { - if lo <= c && c <= hi { Equal } - else if hi < c { Less } - else { Greater } - }).is_ok() -}\n -""") - -def emit_table(f, name, t_data, t_type = "&'static [(char, char)]", is_pub=True, - pfun=lambda x: "(%s,%s)" % (escape_char(x[0]), escape_char(x[1])), is_const=True): - pub_string = "const" - if not is_const: - pub_string = "let" - if is_pub: - pub_string = "pub " + pub_string - f.write(" %s %s: %s = &[\n" % (pub_string, name, t_type)) - data = "" - first = True - for dat in t_data: - if not first: - data += "," - first = False - data += pfun(dat) - format_table_content(f, data, 8) - f.write("\n ];\n\n") - -def emit_property_module(f, mod, tbl, emit): - f.write("pub mod %s {\n" % mod) - for cat in sorted(emit): - emit_table(f, "%s_table" % cat, tbl[cat]) - f.write(" pub fn %s(c: char) -> bool {\n" % cat) - f.write(" super::bsearch_range_table(c, %s_table)\n" % cat) - f.write(" }\n\n") - f.write("}\n\n") - -if __name__ == "__main__": - r = "tables.rs" - if os.path.exists(r): - os.remove(r) - with open(r, "w") as rf: - # write the file's preamble - rf.write(preamble) - - # download and parse all the data - fetch("ReadMe.txt") - with open("ReadMe.txt") as readme: - pattern = "for Version (\d+)\.(\d+)\.(\d+) of the Unicode" - unicode_version = re.search(pattern, readme.read()).groups() - rf.write(""" -/// The version of [Unicode](http://www.unicode.org/) -/// that this version of unicode-xid is based on. -pub const UNICODE_VERSION: (u64, u64, u64) = (%s, %s, %s); -""" % unicode_version) - emit_bsearch_range_table(rf) - - want_derived = ["XID_Start", "XID_Continue"] - derived = load_properties("DerivedCoreProperties.txt", want_derived) - emit_property_module(rf, "derived_property", derived, want_derived) diff --git a/third_party/rust/unicode-xid-0.1.0/src/lib.rs b/third_party/rust/unicode-xid-0.1.0/src/lib.rs deleted file mode 100644 index 09faf97267..0000000000 --- a/third_party/rust/unicode-xid-0.1.0/src/lib.rs +++ /dev/null @@ -1,87 +0,0 @@ -// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -//! Determine if a `char` is a valid identifier for a parser and/or lexer according to -//! [Unicode Standard Annex #31](http://www.unicode.org/reports/tr31/) rules. -//! -//! ```rust -//! extern crate unicode_xid; -//! -//! use unicode_xid::UnicodeXID; -//! -//! fn main() { -//! let ch = 'a'; -//! println!("Is {} a valid start of an identifier? {}", ch, UnicodeXID::is_xid_start(ch)); -//! } -//! ``` -//! -//! # features -//! -//! unicode-xid supports a `no_std` feature. This eliminates dependence -//! on std, and instead uses equivalent functions from core. -//! -//! # crates.io -//! -//! You can use this package in your project by adding the following -//! to your `Cargo.toml`: -//! -//! ```toml -//! [dependencies] -//! unicode-xid = "0.0.4" -//! ``` - -#![deny(missing_docs, unsafe_code)] -#![doc(html_logo_url = "https://unicode-rs.github.io/unicode-rs_sm.png", - html_favicon_url = "https://unicode-rs.github.io/unicode-rs_sm.png")] - -#![no_std] -#![cfg_attr(feature = "bench", feature(test, unicode))] - -#[cfg(test)] -#[macro_use] -extern crate std; - -#[cfg(feature = "bench")] -extern crate test; - -use tables::derived_property; -pub use tables::UNICODE_VERSION; - -mod tables; - -#[cfg(test)] -mod tests; - -/// Methods for determining if a character is a valid identifier character. -pub trait UnicodeXID { - /// Returns whether the specified character satisfies the 'XID_Start' - /// Unicode property. - /// - /// 'XID_Start' is a Unicode Derived Property specified in - /// [UAX #31](http://unicode.org/reports/tr31/#NFKC_Modifications), - /// mostly similar to ID_Start but modified for closure under NFKx. - fn is_xid_start(self) -> bool; - - /// Returns whether the specified `char` satisfies the 'XID_Continue' - /// Unicode property. - /// - /// 'XID_Continue' is a Unicode Derived Property specified in - /// [UAX #31](http://unicode.org/reports/tr31/#NFKC_Modifications), - /// mostly similar to 'ID_Continue' but modified for closure under NFKx. - fn is_xid_continue(self) -> bool; -} - -impl UnicodeXID for char { - #[inline] - fn is_xid_start(self) -> bool { derived_property::XID_Start(self) } - - #[inline] - fn is_xid_continue(self) -> bool { derived_property::XID_Continue(self) } -} diff --git a/third_party/rust/unicode-xid-0.1.0/src/tables.rs b/third_party/rust/unicode-xid-0.1.0/src/tables.rs deleted file mode 100644 index 3fe0d3d114..0000000000 --- a/third_party/rust/unicode-xid-0.1.0/src/tables.rs +++ /dev/null @@ -1,426 +0,0 @@ -// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -// NOTE: The following code was generated by "scripts/unicode.py", do not edit directly - -#![allow(missing_docs, non_upper_case_globals, non_snake_case)] - -/// The version of [Unicode](http://www.unicode.org/) -/// that this version of unicode-xid is based on. -pub const UNICODE_VERSION: (u64, u64, u64) = (9, 0, 0); - -fn bsearch_range_table(c: char, r: &'static [(char,char)]) -> bool { - use core::cmp::Ordering::{Equal, Less, Greater}; - - r.binary_search_by(|&(lo,hi)| { - if lo <= c && c <= hi { Equal } - else if hi < c { Less } - else { Greater } - }).is_ok() -} - -pub mod derived_property { - pub const XID_Continue_table: &'static [(char, char)] = &[ - ('\u{30}', '\u{39}'), ('\u{41}', '\u{5a}'), ('\u{5f}', '\u{5f}'), ('\u{61}', '\u{7a}'), - ('\u{aa}', '\u{aa}'), ('\u{b5}', '\u{b5}'), ('\u{b7}', '\u{b7}'), ('\u{ba}', '\u{ba}'), - ('\u{c0}', '\u{d6}'), ('\u{d8}', '\u{f6}'), ('\u{f8}', '\u{2c1}'), ('\u{2c6}', '\u{2d1}'), - ('\u{2e0}', '\u{2e4}'), ('\u{2ec}', '\u{2ec}'), ('\u{2ee}', '\u{2ee}'), ('\u{300}', - '\u{374}'), ('\u{376}', '\u{377}'), ('\u{37b}', '\u{37d}'), ('\u{37f}', '\u{37f}'), - ('\u{386}', '\u{38a}'), ('\u{38c}', '\u{38c}'), ('\u{38e}', '\u{3a1}'), ('\u{3a3}', - '\u{3f5}'), ('\u{3f7}', '\u{481}'), ('\u{483}', '\u{487}'), ('\u{48a}', '\u{52f}'), - ('\u{531}', '\u{556}'), ('\u{559}', '\u{559}'), ('\u{561}', '\u{587}'), ('\u{591}', - '\u{5bd}'), ('\u{5bf}', '\u{5bf}'), ('\u{5c1}', '\u{5c2}'), ('\u{5c4}', '\u{5c5}'), - ('\u{5c7}', '\u{5c7}'), ('\u{5d0}', '\u{5ea}'), ('\u{5f0}', '\u{5f2}'), ('\u{610}', - '\u{61a}'), ('\u{620}', '\u{669}'), ('\u{66e}', '\u{6d3}'), ('\u{6d5}', '\u{6dc}'), - ('\u{6df}', '\u{6e8}'), ('\u{6ea}', '\u{6fc}'), ('\u{6ff}', '\u{6ff}'), ('\u{710}', - '\u{74a}'), ('\u{74d}', '\u{7b1}'), ('\u{7c0}', '\u{7f5}'), ('\u{7fa}', '\u{7fa}'), - ('\u{800}', '\u{82d}'), ('\u{840}', '\u{85b}'), ('\u{8a0}', '\u{8b4}'), ('\u{8b6}', - '\u{8bd}'), ('\u{8d4}', '\u{8e1}'), ('\u{8e3}', '\u{963}'), ('\u{966}', '\u{96f}'), - ('\u{971}', '\u{983}'), ('\u{985}', '\u{98c}'), ('\u{98f}', '\u{990}'), ('\u{993}', - '\u{9a8}'), ('\u{9aa}', '\u{9b0}'), ('\u{9b2}', '\u{9b2}'), ('\u{9b6}', '\u{9b9}'), - ('\u{9bc}', '\u{9c4}'), ('\u{9c7}', '\u{9c8}'), ('\u{9cb}', '\u{9ce}'), ('\u{9d7}', - '\u{9d7}'), ('\u{9dc}', '\u{9dd}'), ('\u{9df}', '\u{9e3}'), ('\u{9e6}', '\u{9f1}'), - ('\u{a01}', '\u{a03}'), ('\u{a05}', '\u{a0a}'), ('\u{a0f}', '\u{a10}'), ('\u{a13}', - '\u{a28}'), ('\u{a2a}', '\u{a30}'), ('\u{a32}', '\u{a33}'), ('\u{a35}', '\u{a36}'), - ('\u{a38}', '\u{a39}'), ('\u{a3c}', '\u{a3c}'), ('\u{a3e}', '\u{a42}'), ('\u{a47}', - '\u{a48}'), ('\u{a4b}', '\u{a4d}'), ('\u{a51}', '\u{a51}'), ('\u{a59}', '\u{a5c}'), - ('\u{a5e}', '\u{a5e}'), ('\u{a66}', '\u{a75}'), ('\u{a81}', '\u{a83}'), ('\u{a85}', - '\u{a8d}'), ('\u{a8f}', '\u{a91}'), ('\u{a93}', '\u{aa8}'), ('\u{aaa}', '\u{ab0}'), - ('\u{ab2}', '\u{ab3}'), ('\u{ab5}', '\u{ab9}'), ('\u{abc}', '\u{ac5}'), ('\u{ac7}', - '\u{ac9}'), ('\u{acb}', '\u{acd}'), ('\u{ad0}', '\u{ad0}'), ('\u{ae0}', '\u{ae3}'), - ('\u{ae6}', '\u{aef}'), ('\u{af9}', '\u{af9}'), ('\u{b01}', '\u{b03}'), ('\u{b05}', - '\u{b0c}'), ('\u{b0f}', '\u{b10}'), ('\u{b13}', '\u{b28}'), ('\u{b2a}', '\u{b30}'), - ('\u{b32}', '\u{b33}'), ('\u{b35}', '\u{b39}'), ('\u{b3c}', '\u{b44}'), ('\u{b47}', - '\u{b48}'), ('\u{b4b}', '\u{b4d}'), ('\u{b56}', '\u{b57}'), ('\u{b5c}', '\u{b5d}'), - ('\u{b5f}', '\u{b63}'), ('\u{b66}', '\u{b6f}'), ('\u{b71}', '\u{b71}'), ('\u{b82}', - '\u{b83}'), ('\u{b85}', '\u{b8a}'), ('\u{b8e}', '\u{b90}'), ('\u{b92}', '\u{b95}'), - ('\u{b99}', '\u{b9a}'), ('\u{b9c}', '\u{b9c}'), ('\u{b9e}', '\u{b9f}'), ('\u{ba3}', - '\u{ba4}'), ('\u{ba8}', '\u{baa}'), ('\u{bae}', '\u{bb9}'), ('\u{bbe}', '\u{bc2}'), - ('\u{bc6}', '\u{bc8}'), ('\u{bca}', '\u{bcd}'), ('\u{bd0}', '\u{bd0}'), ('\u{bd7}', - '\u{bd7}'), ('\u{be6}', '\u{bef}'), ('\u{c00}', '\u{c03}'), ('\u{c05}', '\u{c0c}'), - ('\u{c0e}', '\u{c10}'), ('\u{c12}', '\u{c28}'), ('\u{c2a}', '\u{c39}'), ('\u{c3d}', - '\u{c44}'), ('\u{c46}', '\u{c48}'), ('\u{c4a}', '\u{c4d}'), ('\u{c55}', '\u{c56}'), - ('\u{c58}', '\u{c5a}'), ('\u{c60}', '\u{c63}'), ('\u{c66}', '\u{c6f}'), ('\u{c80}', - '\u{c83}'), ('\u{c85}', '\u{c8c}'), ('\u{c8e}', '\u{c90}'), ('\u{c92}', '\u{ca8}'), - ('\u{caa}', '\u{cb3}'), ('\u{cb5}', '\u{cb9}'), ('\u{cbc}', '\u{cc4}'), ('\u{cc6}', - '\u{cc8}'), ('\u{cca}', '\u{ccd}'), ('\u{cd5}', '\u{cd6}'), ('\u{cde}', '\u{cde}'), - ('\u{ce0}', '\u{ce3}'), ('\u{ce6}', '\u{cef}'), ('\u{cf1}', '\u{cf2}'), ('\u{d01}', - '\u{d03}'), ('\u{d05}', '\u{d0c}'), ('\u{d0e}', '\u{d10}'), ('\u{d12}', '\u{d3a}'), - ('\u{d3d}', '\u{d44}'), ('\u{d46}', '\u{d48}'), ('\u{d4a}', '\u{d4e}'), ('\u{d54}', - '\u{d57}'), ('\u{d5f}', '\u{d63}'), ('\u{d66}', '\u{d6f}'), ('\u{d7a}', '\u{d7f}'), - ('\u{d82}', '\u{d83}'), ('\u{d85}', '\u{d96}'), ('\u{d9a}', '\u{db1}'), ('\u{db3}', - '\u{dbb}'), ('\u{dbd}', '\u{dbd}'), ('\u{dc0}', '\u{dc6}'), ('\u{dca}', '\u{dca}'), - ('\u{dcf}', '\u{dd4}'), ('\u{dd6}', '\u{dd6}'), ('\u{dd8}', '\u{ddf}'), ('\u{de6}', - '\u{def}'), ('\u{df2}', '\u{df3}'), ('\u{e01}', '\u{e3a}'), ('\u{e40}', '\u{e4e}'), - ('\u{e50}', '\u{e59}'), ('\u{e81}', '\u{e82}'), ('\u{e84}', '\u{e84}'), ('\u{e87}', - '\u{e88}'), ('\u{e8a}', '\u{e8a}'), ('\u{e8d}', '\u{e8d}'), ('\u{e94}', '\u{e97}'), - ('\u{e99}', '\u{e9f}'), ('\u{ea1}', '\u{ea3}'), ('\u{ea5}', '\u{ea5}'), ('\u{ea7}', - '\u{ea7}'), ('\u{eaa}', '\u{eab}'), ('\u{ead}', '\u{eb9}'), ('\u{ebb}', '\u{ebd}'), - ('\u{ec0}', '\u{ec4}'), ('\u{ec6}', '\u{ec6}'), ('\u{ec8}', '\u{ecd}'), ('\u{ed0}', - '\u{ed9}'), ('\u{edc}', '\u{edf}'), ('\u{f00}', '\u{f00}'), ('\u{f18}', '\u{f19}'), - ('\u{f20}', '\u{f29}'), ('\u{f35}', '\u{f35}'), ('\u{f37}', '\u{f37}'), ('\u{f39}', - '\u{f39}'), ('\u{f3e}', '\u{f47}'), ('\u{f49}', '\u{f6c}'), ('\u{f71}', '\u{f84}'), - ('\u{f86}', '\u{f97}'), ('\u{f99}', '\u{fbc}'), ('\u{fc6}', '\u{fc6}'), ('\u{1000}', - '\u{1049}'), ('\u{1050}', '\u{109d}'), ('\u{10a0}', '\u{10c5}'), ('\u{10c7}', '\u{10c7}'), - ('\u{10cd}', '\u{10cd}'), ('\u{10d0}', '\u{10fa}'), ('\u{10fc}', '\u{1248}'), ('\u{124a}', - '\u{124d}'), ('\u{1250}', '\u{1256}'), ('\u{1258}', '\u{1258}'), ('\u{125a}', '\u{125d}'), - ('\u{1260}', '\u{1288}'), ('\u{128a}', '\u{128d}'), ('\u{1290}', '\u{12b0}'), ('\u{12b2}', - '\u{12b5}'), ('\u{12b8}', '\u{12be}'), ('\u{12c0}', '\u{12c0}'), ('\u{12c2}', '\u{12c5}'), - ('\u{12c8}', '\u{12d6}'), ('\u{12d8}', '\u{1310}'), ('\u{1312}', '\u{1315}'), ('\u{1318}', - '\u{135a}'), ('\u{135d}', '\u{135f}'), ('\u{1369}', '\u{1371}'), ('\u{1380}', '\u{138f}'), - ('\u{13a0}', '\u{13f5}'), ('\u{13f8}', '\u{13fd}'), ('\u{1401}', '\u{166c}'), ('\u{166f}', - '\u{167f}'), ('\u{1681}', '\u{169a}'), ('\u{16a0}', '\u{16ea}'), ('\u{16ee}', '\u{16f8}'), - ('\u{1700}', '\u{170c}'), ('\u{170e}', '\u{1714}'), ('\u{1720}', '\u{1734}'), ('\u{1740}', - '\u{1753}'), ('\u{1760}', '\u{176c}'), ('\u{176e}', '\u{1770}'), ('\u{1772}', '\u{1773}'), - ('\u{1780}', '\u{17d3}'), ('\u{17d7}', '\u{17d7}'), ('\u{17dc}', '\u{17dd}'), ('\u{17e0}', - '\u{17e9}'), ('\u{180b}', '\u{180d}'), ('\u{1810}', '\u{1819}'), ('\u{1820}', '\u{1877}'), - ('\u{1880}', '\u{18aa}'), ('\u{18b0}', '\u{18f5}'), ('\u{1900}', '\u{191e}'), ('\u{1920}', - '\u{192b}'), ('\u{1930}', '\u{193b}'), ('\u{1946}', '\u{196d}'), ('\u{1970}', '\u{1974}'), - ('\u{1980}', '\u{19ab}'), ('\u{19b0}', '\u{19c9}'), ('\u{19d0}', '\u{19da}'), ('\u{1a00}', - '\u{1a1b}'), ('\u{1a20}', '\u{1a5e}'), ('\u{1a60}', '\u{1a7c}'), ('\u{1a7f}', '\u{1a89}'), - ('\u{1a90}', '\u{1a99}'), ('\u{1aa7}', '\u{1aa7}'), ('\u{1ab0}', '\u{1abd}'), ('\u{1b00}', - '\u{1b4b}'), ('\u{1b50}', '\u{1b59}'), ('\u{1b6b}', '\u{1b73}'), ('\u{1b80}', '\u{1bf3}'), - ('\u{1c00}', '\u{1c37}'), ('\u{1c40}', '\u{1c49}'), ('\u{1c4d}', '\u{1c7d}'), ('\u{1c80}', - '\u{1c88}'), ('\u{1cd0}', '\u{1cd2}'), ('\u{1cd4}', '\u{1cf6}'), ('\u{1cf8}', '\u{1cf9}'), - ('\u{1d00}', '\u{1df5}'), ('\u{1dfb}', '\u{1f15}'), ('\u{1f18}', '\u{1f1d}'), ('\u{1f20}', - '\u{1f45}'), ('\u{1f48}', '\u{1f4d}'), ('\u{1f50}', '\u{1f57}'), ('\u{1f59}', '\u{1f59}'), - ('\u{1f5b}', '\u{1f5b}'), ('\u{1f5d}', '\u{1f5d}'), ('\u{1f5f}', '\u{1f7d}'), ('\u{1f80}', - '\u{1fb4}'), ('\u{1fb6}', '\u{1fbc}'), ('\u{1fbe}', '\u{1fbe}'), ('\u{1fc2}', '\u{1fc4}'), - ('\u{1fc6}', '\u{1fcc}'), ('\u{1fd0}', '\u{1fd3}'), ('\u{1fd6}', '\u{1fdb}'), ('\u{1fe0}', - '\u{1fec}'), ('\u{1ff2}', '\u{1ff4}'), ('\u{1ff6}', '\u{1ffc}'), ('\u{203f}', '\u{2040}'), - ('\u{2054}', '\u{2054}'), ('\u{2071}', '\u{2071}'), ('\u{207f}', '\u{207f}'), ('\u{2090}', - '\u{209c}'), ('\u{20d0}', '\u{20dc}'), ('\u{20e1}', '\u{20e1}'), ('\u{20e5}', '\u{20f0}'), - ('\u{2102}', '\u{2102}'), ('\u{2107}', '\u{2107}'), ('\u{210a}', '\u{2113}'), ('\u{2115}', - '\u{2115}'), ('\u{2118}', '\u{211d}'), ('\u{2124}', '\u{2124}'), ('\u{2126}', '\u{2126}'), - ('\u{2128}', '\u{2128}'), ('\u{212a}', '\u{2139}'), ('\u{213c}', '\u{213f}'), ('\u{2145}', - '\u{2149}'), ('\u{214e}', '\u{214e}'), ('\u{2160}', '\u{2188}'), ('\u{2c00}', '\u{2c2e}'), - ('\u{2c30}', '\u{2c5e}'), ('\u{2c60}', '\u{2ce4}'), ('\u{2ceb}', '\u{2cf3}'), ('\u{2d00}', - '\u{2d25}'), ('\u{2d27}', '\u{2d27}'), ('\u{2d2d}', '\u{2d2d}'), ('\u{2d30}', '\u{2d67}'), - ('\u{2d6f}', '\u{2d6f}'), ('\u{2d7f}', '\u{2d96}'), ('\u{2da0}', '\u{2da6}'), ('\u{2da8}', - '\u{2dae}'), ('\u{2db0}', '\u{2db6}'), ('\u{2db8}', '\u{2dbe}'), ('\u{2dc0}', '\u{2dc6}'), - ('\u{2dc8}', '\u{2dce}'), ('\u{2dd0}', '\u{2dd6}'), ('\u{2dd8}', '\u{2dde}'), ('\u{2de0}', - '\u{2dff}'), ('\u{3005}', '\u{3007}'), ('\u{3021}', '\u{302f}'), ('\u{3031}', '\u{3035}'), - ('\u{3038}', '\u{303c}'), ('\u{3041}', '\u{3096}'), ('\u{3099}', '\u{309a}'), ('\u{309d}', - '\u{309f}'), ('\u{30a1}', '\u{30fa}'), ('\u{30fc}', '\u{30ff}'), ('\u{3105}', '\u{312d}'), - ('\u{3131}', '\u{318e}'), ('\u{31a0}', '\u{31ba}'), ('\u{31f0}', '\u{31ff}'), ('\u{3400}', - '\u{4db5}'), ('\u{4e00}', '\u{9fd5}'), ('\u{a000}', '\u{a48c}'), ('\u{a4d0}', '\u{a4fd}'), - ('\u{a500}', '\u{a60c}'), ('\u{a610}', '\u{a62b}'), ('\u{a640}', '\u{a66f}'), ('\u{a674}', - '\u{a67d}'), ('\u{a67f}', '\u{a6f1}'), ('\u{a717}', '\u{a71f}'), ('\u{a722}', '\u{a788}'), - ('\u{a78b}', '\u{a7ae}'), ('\u{a7b0}', '\u{a7b7}'), ('\u{a7f7}', '\u{a827}'), ('\u{a840}', - '\u{a873}'), ('\u{a880}', '\u{a8c5}'), ('\u{a8d0}', '\u{a8d9}'), ('\u{a8e0}', '\u{a8f7}'), - ('\u{a8fb}', '\u{a8fb}'), ('\u{a8fd}', '\u{a8fd}'), ('\u{a900}', '\u{a92d}'), ('\u{a930}', - '\u{a953}'), ('\u{a960}', '\u{a97c}'), ('\u{a980}', '\u{a9c0}'), ('\u{a9cf}', '\u{a9d9}'), - ('\u{a9e0}', '\u{a9fe}'), ('\u{aa00}', '\u{aa36}'), ('\u{aa40}', '\u{aa4d}'), ('\u{aa50}', - '\u{aa59}'), ('\u{aa60}', '\u{aa76}'), ('\u{aa7a}', '\u{aac2}'), ('\u{aadb}', '\u{aadd}'), - ('\u{aae0}', '\u{aaef}'), ('\u{aaf2}', '\u{aaf6}'), ('\u{ab01}', '\u{ab06}'), ('\u{ab09}', - '\u{ab0e}'), ('\u{ab11}', '\u{ab16}'), ('\u{ab20}', '\u{ab26}'), ('\u{ab28}', '\u{ab2e}'), - ('\u{ab30}', '\u{ab5a}'), ('\u{ab5c}', '\u{ab65}'), ('\u{ab70}', '\u{abea}'), ('\u{abec}', - '\u{abed}'), ('\u{abf0}', '\u{abf9}'), ('\u{ac00}', '\u{d7a3}'), ('\u{d7b0}', '\u{d7c6}'), - ('\u{d7cb}', '\u{d7fb}'), ('\u{f900}', '\u{fa6d}'), ('\u{fa70}', '\u{fad9}'), ('\u{fb00}', - '\u{fb06}'), ('\u{fb13}', '\u{fb17}'), ('\u{fb1d}', '\u{fb28}'), ('\u{fb2a}', '\u{fb36}'), - ('\u{fb38}', '\u{fb3c}'), ('\u{fb3e}', '\u{fb3e}'), ('\u{fb40}', '\u{fb41}'), ('\u{fb43}', - '\u{fb44}'), ('\u{fb46}', '\u{fbb1}'), ('\u{fbd3}', '\u{fc5d}'), ('\u{fc64}', '\u{fd3d}'), - ('\u{fd50}', '\u{fd8f}'), ('\u{fd92}', '\u{fdc7}'), ('\u{fdf0}', '\u{fdf9}'), ('\u{fe00}', - '\u{fe0f}'), ('\u{fe20}', '\u{fe2f}'), ('\u{fe33}', '\u{fe34}'), ('\u{fe4d}', '\u{fe4f}'), - ('\u{fe71}', '\u{fe71}'), ('\u{fe73}', '\u{fe73}'), ('\u{fe77}', '\u{fe77}'), ('\u{fe79}', - '\u{fe79}'), ('\u{fe7b}', '\u{fe7b}'), ('\u{fe7d}', '\u{fe7d}'), ('\u{fe7f}', '\u{fefc}'), - ('\u{ff10}', '\u{ff19}'), ('\u{ff21}', '\u{ff3a}'), ('\u{ff3f}', '\u{ff3f}'), ('\u{ff41}', - '\u{ff5a}'), ('\u{ff66}', '\u{ffbe}'), ('\u{ffc2}', '\u{ffc7}'), ('\u{ffca}', '\u{ffcf}'), - ('\u{ffd2}', '\u{ffd7}'), ('\u{ffda}', '\u{ffdc}'), ('\u{10000}', '\u{1000b}'), - ('\u{1000d}', '\u{10026}'), ('\u{10028}', '\u{1003a}'), ('\u{1003c}', '\u{1003d}'), - ('\u{1003f}', '\u{1004d}'), ('\u{10050}', '\u{1005d}'), ('\u{10080}', '\u{100fa}'), - ('\u{10140}', '\u{10174}'), ('\u{101fd}', '\u{101fd}'), ('\u{10280}', '\u{1029c}'), - ('\u{102a0}', '\u{102d0}'), ('\u{102e0}', '\u{102e0}'), ('\u{10300}', '\u{1031f}'), - ('\u{10330}', '\u{1034a}'), ('\u{10350}', '\u{1037a}'), ('\u{10380}', '\u{1039d}'), - ('\u{103a0}', '\u{103c3}'), ('\u{103c8}', '\u{103cf}'), ('\u{103d1}', '\u{103d5}'), - ('\u{10400}', '\u{1049d}'), ('\u{104a0}', '\u{104a9}'), ('\u{104b0}', '\u{104d3}'), - ('\u{104d8}', '\u{104fb}'), ('\u{10500}', '\u{10527}'), ('\u{10530}', '\u{10563}'), - ('\u{10600}', '\u{10736}'), ('\u{10740}', '\u{10755}'), ('\u{10760}', '\u{10767}'), - ('\u{10800}', '\u{10805}'), ('\u{10808}', '\u{10808}'), ('\u{1080a}', '\u{10835}'), - ('\u{10837}', '\u{10838}'), ('\u{1083c}', '\u{1083c}'), ('\u{1083f}', '\u{10855}'), - ('\u{10860}', '\u{10876}'), ('\u{10880}', '\u{1089e}'), ('\u{108e0}', '\u{108f2}'), - ('\u{108f4}', '\u{108f5}'), ('\u{10900}', '\u{10915}'), ('\u{10920}', '\u{10939}'), - ('\u{10980}', '\u{109b7}'), ('\u{109be}', '\u{109bf}'), ('\u{10a00}', '\u{10a03}'), - ('\u{10a05}', '\u{10a06}'), ('\u{10a0c}', '\u{10a13}'), ('\u{10a15}', '\u{10a17}'), - ('\u{10a19}', '\u{10a33}'), ('\u{10a38}', '\u{10a3a}'), ('\u{10a3f}', '\u{10a3f}'), - ('\u{10a60}', '\u{10a7c}'), ('\u{10a80}', '\u{10a9c}'), ('\u{10ac0}', '\u{10ac7}'), - ('\u{10ac9}', '\u{10ae6}'), ('\u{10b00}', '\u{10b35}'), ('\u{10b40}', '\u{10b55}'), - ('\u{10b60}', '\u{10b72}'), ('\u{10b80}', '\u{10b91}'), ('\u{10c00}', '\u{10c48}'), - ('\u{10c80}', '\u{10cb2}'), ('\u{10cc0}', '\u{10cf2}'), ('\u{11000}', '\u{11046}'), - ('\u{11066}', '\u{1106f}'), ('\u{1107f}', '\u{110ba}'), ('\u{110d0}', '\u{110e8}'), - ('\u{110f0}', '\u{110f9}'), ('\u{11100}', '\u{11134}'), ('\u{11136}', '\u{1113f}'), - ('\u{11150}', '\u{11173}'), ('\u{11176}', '\u{11176}'), ('\u{11180}', '\u{111c4}'), - ('\u{111ca}', '\u{111cc}'), ('\u{111d0}', '\u{111da}'), ('\u{111dc}', '\u{111dc}'), - ('\u{11200}', '\u{11211}'), ('\u{11213}', '\u{11237}'), ('\u{1123e}', '\u{1123e}'), - ('\u{11280}', '\u{11286}'), ('\u{11288}', '\u{11288}'), ('\u{1128a}', '\u{1128d}'), - ('\u{1128f}', '\u{1129d}'), ('\u{1129f}', '\u{112a8}'), ('\u{112b0}', '\u{112ea}'), - ('\u{112f0}', '\u{112f9}'), ('\u{11300}', '\u{11303}'), ('\u{11305}', '\u{1130c}'), - ('\u{1130f}', '\u{11310}'), ('\u{11313}', '\u{11328}'), ('\u{1132a}', '\u{11330}'), - ('\u{11332}', '\u{11333}'), ('\u{11335}', '\u{11339}'), ('\u{1133c}', '\u{11344}'), - ('\u{11347}', '\u{11348}'), ('\u{1134b}', '\u{1134d}'), ('\u{11350}', '\u{11350}'), - ('\u{11357}', '\u{11357}'), ('\u{1135d}', '\u{11363}'), ('\u{11366}', '\u{1136c}'), - ('\u{11370}', '\u{11374}'), ('\u{11400}', '\u{1144a}'), ('\u{11450}', '\u{11459}'), - ('\u{11480}', '\u{114c5}'), ('\u{114c7}', '\u{114c7}'), ('\u{114d0}', '\u{114d9}'), - ('\u{11580}', '\u{115b5}'), ('\u{115b8}', '\u{115c0}'), ('\u{115d8}', '\u{115dd}'), - ('\u{11600}', '\u{11640}'), ('\u{11644}', '\u{11644}'), ('\u{11650}', '\u{11659}'), - ('\u{11680}', '\u{116b7}'), ('\u{116c0}', '\u{116c9}'), ('\u{11700}', '\u{11719}'), - ('\u{1171d}', '\u{1172b}'), ('\u{11730}', '\u{11739}'), ('\u{118a0}', '\u{118e9}'), - ('\u{118ff}', '\u{118ff}'), ('\u{11ac0}', '\u{11af8}'), ('\u{11c00}', '\u{11c08}'), - ('\u{11c0a}', '\u{11c36}'), ('\u{11c38}', '\u{11c40}'), ('\u{11c50}', '\u{11c59}'), - ('\u{11c72}', '\u{11c8f}'), ('\u{11c92}', '\u{11ca7}'), ('\u{11ca9}', '\u{11cb6}'), - ('\u{12000}', '\u{12399}'), ('\u{12400}', '\u{1246e}'), ('\u{12480}', '\u{12543}'), - ('\u{13000}', '\u{1342e}'), ('\u{14400}', '\u{14646}'), ('\u{16800}', '\u{16a38}'), - ('\u{16a40}', '\u{16a5e}'), ('\u{16a60}', '\u{16a69}'), ('\u{16ad0}', '\u{16aed}'), - ('\u{16af0}', '\u{16af4}'), ('\u{16b00}', '\u{16b36}'), ('\u{16b40}', '\u{16b43}'), - ('\u{16b50}', '\u{16b59}'), ('\u{16b63}', '\u{16b77}'), ('\u{16b7d}', '\u{16b8f}'), - ('\u{16f00}', '\u{16f44}'), ('\u{16f50}', '\u{16f7e}'), ('\u{16f8f}', '\u{16f9f}'), - ('\u{16fe0}', '\u{16fe0}'), ('\u{17000}', '\u{187ec}'), ('\u{18800}', '\u{18af2}'), - ('\u{1b000}', '\u{1b001}'), ('\u{1bc00}', '\u{1bc6a}'), ('\u{1bc70}', '\u{1bc7c}'), - ('\u{1bc80}', '\u{1bc88}'), ('\u{1bc90}', '\u{1bc99}'), ('\u{1bc9d}', '\u{1bc9e}'), - ('\u{1d165}', '\u{1d169}'), ('\u{1d16d}', '\u{1d172}'), ('\u{1d17b}', '\u{1d182}'), - ('\u{1d185}', '\u{1d18b}'), ('\u{1d1aa}', '\u{1d1ad}'), ('\u{1d242}', '\u{1d244}'), - ('\u{1d400}', '\u{1d454}'), ('\u{1d456}', '\u{1d49c}'), ('\u{1d49e}', '\u{1d49f}'), - ('\u{1d4a2}', '\u{1d4a2}'), ('\u{1d4a5}', '\u{1d4a6}'), ('\u{1d4a9}', '\u{1d4ac}'), - ('\u{1d4ae}', '\u{1d4b9}'), ('\u{1d4bb}', '\u{1d4bb}'), ('\u{1d4bd}', '\u{1d4c3}'), - ('\u{1d4c5}', '\u{1d505}'), ('\u{1d507}', '\u{1d50a}'), ('\u{1d50d}', '\u{1d514}'), - ('\u{1d516}', '\u{1d51c}'), ('\u{1d51e}', '\u{1d539}'), ('\u{1d53b}', '\u{1d53e}'), - ('\u{1d540}', '\u{1d544}'), ('\u{1d546}', '\u{1d546}'), ('\u{1d54a}', '\u{1d550}'), - ('\u{1d552}', '\u{1d6a5}'), ('\u{1d6a8}', '\u{1d6c0}'), ('\u{1d6c2}', '\u{1d6da}'), - ('\u{1d6dc}', '\u{1d6fa}'), ('\u{1d6fc}', '\u{1d714}'), ('\u{1d716}', '\u{1d734}'), - ('\u{1d736}', '\u{1d74e}'), ('\u{1d750}', '\u{1d76e}'), ('\u{1d770}', '\u{1d788}'), - ('\u{1d78a}', '\u{1d7a8}'), ('\u{1d7aa}', '\u{1d7c2}'), ('\u{1d7c4}', '\u{1d7cb}'), - ('\u{1d7ce}', '\u{1d7ff}'), ('\u{1da00}', '\u{1da36}'), ('\u{1da3b}', '\u{1da6c}'), - ('\u{1da75}', '\u{1da75}'), ('\u{1da84}', '\u{1da84}'), ('\u{1da9b}', '\u{1da9f}'), - ('\u{1daa1}', '\u{1daaf}'), ('\u{1e000}', '\u{1e006}'), ('\u{1e008}', '\u{1e018}'), - ('\u{1e01b}', '\u{1e021}'), ('\u{1e023}', '\u{1e024}'), ('\u{1e026}', '\u{1e02a}'), - ('\u{1e800}', '\u{1e8c4}'), ('\u{1e8d0}', '\u{1e8d6}'), ('\u{1e900}', '\u{1e94a}'), - ('\u{1e950}', '\u{1e959}'), ('\u{1ee00}', '\u{1ee03}'), ('\u{1ee05}', '\u{1ee1f}'), - ('\u{1ee21}', '\u{1ee22}'), ('\u{1ee24}', '\u{1ee24}'), ('\u{1ee27}', '\u{1ee27}'), - ('\u{1ee29}', '\u{1ee32}'), ('\u{1ee34}', '\u{1ee37}'), ('\u{1ee39}', '\u{1ee39}'), - ('\u{1ee3b}', '\u{1ee3b}'), ('\u{1ee42}', '\u{1ee42}'), ('\u{1ee47}', '\u{1ee47}'), - ('\u{1ee49}', '\u{1ee49}'), ('\u{1ee4b}', '\u{1ee4b}'), ('\u{1ee4d}', '\u{1ee4f}'), - ('\u{1ee51}', '\u{1ee52}'), ('\u{1ee54}', '\u{1ee54}'), ('\u{1ee57}', '\u{1ee57}'), - ('\u{1ee59}', '\u{1ee59}'), ('\u{1ee5b}', '\u{1ee5b}'), ('\u{1ee5d}', '\u{1ee5d}'), - ('\u{1ee5f}', '\u{1ee5f}'), ('\u{1ee61}', '\u{1ee62}'), ('\u{1ee64}', '\u{1ee64}'), - ('\u{1ee67}', '\u{1ee6a}'), ('\u{1ee6c}', '\u{1ee72}'), ('\u{1ee74}', '\u{1ee77}'), - ('\u{1ee79}', '\u{1ee7c}'), ('\u{1ee7e}', '\u{1ee7e}'), ('\u{1ee80}', '\u{1ee89}'), - ('\u{1ee8b}', '\u{1ee9b}'), ('\u{1eea1}', '\u{1eea3}'), ('\u{1eea5}', '\u{1eea9}'), - ('\u{1eeab}', '\u{1eebb}'), ('\u{20000}', '\u{2a6d6}'), ('\u{2a700}', '\u{2b734}'), - ('\u{2b740}', '\u{2b81d}'), ('\u{2b820}', '\u{2cea1}'), ('\u{2f800}', '\u{2fa1d}'), - ('\u{e0100}', '\u{e01ef}') - ]; - - pub fn XID_Continue(c: char) -> bool { - super::bsearch_range_table(c, XID_Continue_table) - } - - pub const XID_Start_table: &'static [(char, char)] = &[ - ('\u{41}', '\u{5a}'), ('\u{61}', '\u{7a}'), ('\u{aa}', '\u{aa}'), ('\u{b5}', '\u{b5}'), - ('\u{ba}', '\u{ba}'), ('\u{c0}', '\u{d6}'), ('\u{d8}', '\u{f6}'), ('\u{f8}', '\u{2c1}'), - ('\u{2c6}', '\u{2d1}'), ('\u{2e0}', '\u{2e4}'), ('\u{2ec}', '\u{2ec}'), ('\u{2ee}', - '\u{2ee}'), ('\u{370}', '\u{374}'), ('\u{376}', '\u{377}'), ('\u{37b}', '\u{37d}'), - ('\u{37f}', '\u{37f}'), ('\u{386}', '\u{386}'), ('\u{388}', '\u{38a}'), ('\u{38c}', - '\u{38c}'), ('\u{38e}', '\u{3a1}'), ('\u{3a3}', '\u{3f5}'), ('\u{3f7}', '\u{481}'), - ('\u{48a}', '\u{52f}'), ('\u{531}', '\u{556}'), ('\u{559}', '\u{559}'), ('\u{561}', - '\u{587}'), ('\u{5d0}', '\u{5ea}'), ('\u{5f0}', '\u{5f2}'), ('\u{620}', '\u{64a}'), - ('\u{66e}', '\u{66f}'), ('\u{671}', '\u{6d3}'), ('\u{6d5}', '\u{6d5}'), ('\u{6e5}', - '\u{6e6}'), ('\u{6ee}', '\u{6ef}'), ('\u{6fa}', '\u{6fc}'), ('\u{6ff}', '\u{6ff}'), - ('\u{710}', '\u{710}'), ('\u{712}', '\u{72f}'), ('\u{74d}', '\u{7a5}'), ('\u{7b1}', - '\u{7b1}'), ('\u{7ca}', '\u{7ea}'), ('\u{7f4}', '\u{7f5}'), ('\u{7fa}', '\u{7fa}'), - ('\u{800}', '\u{815}'), ('\u{81a}', '\u{81a}'), ('\u{824}', '\u{824}'), ('\u{828}', - '\u{828}'), ('\u{840}', '\u{858}'), ('\u{8a0}', '\u{8b4}'), ('\u{8b6}', '\u{8bd}'), - ('\u{904}', '\u{939}'), ('\u{93d}', '\u{93d}'), ('\u{950}', '\u{950}'), ('\u{958}', - '\u{961}'), ('\u{971}', '\u{980}'), ('\u{985}', '\u{98c}'), ('\u{98f}', '\u{990}'), - ('\u{993}', '\u{9a8}'), ('\u{9aa}', '\u{9b0}'), ('\u{9b2}', '\u{9b2}'), ('\u{9b6}', - '\u{9b9}'), ('\u{9bd}', '\u{9bd}'), ('\u{9ce}', '\u{9ce}'), ('\u{9dc}', '\u{9dd}'), - ('\u{9df}', '\u{9e1}'), ('\u{9f0}', '\u{9f1}'), ('\u{a05}', '\u{a0a}'), ('\u{a0f}', - '\u{a10}'), ('\u{a13}', '\u{a28}'), ('\u{a2a}', '\u{a30}'), ('\u{a32}', '\u{a33}'), - ('\u{a35}', '\u{a36}'), ('\u{a38}', '\u{a39}'), ('\u{a59}', '\u{a5c}'), ('\u{a5e}', - '\u{a5e}'), ('\u{a72}', '\u{a74}'), ('\u{a85}', '\u{a8d}'), ('\u{a8f}', '\u{a91}'), - ('\u{a93}', '\u{aa8}'), ('\u{aaa}', '\u{ab0}'), ('\u{ab2}', '\u{ab3}'), ('\u{ab5}', - '\u{ab9}'), ('\u{abd}', '\u{abd}'), ('\u{ad0}', '\u{ad0}'), ('\u{ae0}', '\u{ae1}'), - ('\u{af9}', '\u{af9}'), ('\u{b05}', '\u{b0c}'), ('\u{b0f}', '\u{b10}'), ('\u{b13}', - '\u{b28}'), ('\u{b2a}', '\u{b30}'), ('\u{b32}', '\u{b33}'), ('\u{b35}', '\u{b39}'), - ('\u{b3d}', '\u{b3d}'), ('\u{b5c}', '\u{b5d}'), ('\u{b5f}', '\u{b61}'), ('\u{b71}', - '\u{b71}'), ('\u{b83}', '\u{b83}'), ('\u{b85}', '\u{b8a}'), ('\u{b8e}', '\u{b90}'), - ('\u{b92}', '\u{b95}'), ('\u{b99}', '\u{b9a}'), ('\u{b9c}', '\u{b9c}'), ('\u{b9e}', - '\u{b9f}'), ('\u{ba3}', '\u{ba4}'), ('\u{ba8}', '\u{baa}'), ('\u{bae}', '\u{bb9}'), - ('\u{bd0}', '\u{bd0}'), ('\u{c05}', '\u{c0c}'), ('\u{c0e}', '\u{c10}'), ('\u{c12}', - '\u{c28}'), ('\u{c2a}', '\u{c39}'), ('\u{c3d}', '\u{c3d}'), ('\u{c58}', '\u{c5a}'), - ('\u{c60}', '\u{c61}'), ('\u{c80}', '\u{c80}'), ('\u{c85}', '\u{c8c}'), ('\u{c8e}', - '\u{c90}'), ('\u{c92}', '\u{ca8}'), ('\u{caa}', '\u{cb3}'), ('\u{cb5}', '\u{cb9}'), - ('\u{cbd}', '\u{cbd}'), ('\u{cde}', '\u{cde}'), ('\u{ce0}', '\u{ce1}'), ('\u{cf1}', - '\u{cf2}'), ('\u{d05}', '\u{d0c}'), ('\u{d0e}', '\u{d10}'), ('\u{d12}', '\u{d3a}'), - ('\u{d3d}', '\u{d3d}'), ('\u{d4e}', '\u{d4e}'), ('\u{d54}', '\u{d56}'), ('\u{d5f}', - '\u{d61}'), ('\u{d7a}', '\u{d7f}'), ('\u{d85}', '\u{d96}'), ('\u{d9a}', '\u{db1}'), - ('\u{db3}', '\u{dbb}'), ('\u{dbd}', '\u{dbd}'), ('\u{dc0}', '\u{dc6}'), ('\u{e01}', - '\u{e30}'), ('\u{e32}', '\u{e32}'), ('\u{e40}', '\u{e46}'), ('\u{e81}', '\u{e82}'), - ('\u{e84}', '\u{e84}'), ('\u{e87}', '\u{e88}'), ('\u{e8a}', '\u{e8a}'), ('\u{e8d}', - '\u{e8d}'), ('\u{e94}', '\u{e97}'), ('\u{e99}', '\u{e9f}'), ('\u{ea1}', '\u{ea3}'), - ('\u{ea5}', '\u{ea5}'), ('\u{ea7}', '\u{ea7}'), ('\u{eaa}', '\u{eab}'), ('\u{ead}', - '\u{eb0}'), ('\u{eb2}', '\u{eb2}'), ('\u{ebd}', '\u{ebd}'), ('\u{ec0}', '\u{ec4}'), - ('\u{ec6}', '\u{ec6}'), ('\u{edc}', '\u{edf}'), ('\u{f00}', '\u{f00}'), ('\u{f40}', - '\u{f47}'), ('\u{f49}', '\u{f6c}'), ('\u{f88}', '\u{f8c}'), ('\u{1000}', '\u{102a}'), - ('\u{103f}', '\u{103f}'), ('\u{1050}', '\u{1055}'), ('\u{105a}', '\u{105d}'), ('\u{1061}', - '\u{1061}'), ('\u{1065}', '\u{1066}'), ('\u{106e}', '\u{1070}'), ('\u{1075}', '\u{1081}'), - ('\u{108e}', '\u{108e}'), ('\u{10a0}', '\u{10c5}'), ('\u{10c7}', '\u{10c7}'), ('\u{10cd}', - '\u{10cd}'), ('\u{10d0}', '\u{10fa}'), ('\u{10fc}', '\u{1248}'), ('\u{124a}', '\u{124d}'), - ('\u{1250}', '\u{1256}'), ('\u{1258}', '\u{1258}'), ('\u{125a}', '\u{125d}'), ('\u{1260}', - '\u{1288}'), ('\u{128a}', '\u{128d}'), ('\u{1290}', '\u{12b0}'), ('\u{12b2}', '\u{12b5}'), - ('\u{12b8}', '\u{12be}'), ('\u{12c0}', '\u{12c0}'), ('\u{12c2}', '\u{12c5}'), ('\u{12c8}', - '\u{12d6}'), ('\u{12d8}', '\u{1310}'), ('\u{1312}', '\u{1315}'), ('\u{1318}', '\u{135a}'), - ('\u{1380}', '\u{138f}'), ('\u{13a0}', '\u{13f5}'), ('\u{13f8}', '\u{13fd}'), ('\u{1401}', - '\u{166c}'), ('\u{166f}', '\u{167f}'), ('\u{1681}', '\u{169a}'), ('\u{16a0}', '\u{16ea}'), - ('\u{16ee}', '\u{16f8}'), ('\u{1700}', '\u{170c}'), ('\u{170e}', '\u{1711}'), ('\u{1720}', - '\u{1731}'), ('\u{1740}', '\u{1751}'), ('\u{1760}', '\u{176c}'), ('\u{176e}', '\u{1770}'), - ('\u{1780}', '\u{17b3}'), ('\u{17d7}', '\u{17d7}'), ('\u{17dc}', '\u{17dc}'), ('\u{1820}', - '\u{1877}'), ('\u{1880}', '\u{18a8}'), ('\u{18aa}', '\u{18aa}'), ('\u{18b0}', '\u{18f5}'), - ('\u{1900}', '\u{191e}'), ('\u{1950}', '\u{196d}'), ('\u{1970}', '\u{1974}'), ('\u{1980}', - '\u{19ab}'), ('\u{19b0}', '\u{19c9}'), ('\u{1a00}', '\u{1a16}'), ('\u{1a20}', '\u{1a54}'), - ('\u{1aa7}', '\u{1aa7}'), ('\u{1b05}', '\u{1b33}'), ('\u{1b45}', '\u{1b4b}'), ('\u{1b83}', - '\u{1ba0}'), ('\u{1bae}', '\u{1baf}'), ('\u{1bba}', '\u{1be5}'), ('\u{1c00}', '\u{1c23}'), - ('\u{1c4d}', '\u{1c4f}'), ('\u{1c5a}', '\u{1c7d}'), ('\u{1c80}', '\u{1c88}'), ('\u{1ce9}', - '\u{1cec}'), ('\u{1cee}', '\u{1cf1}'), ('\u{1cf5}', '\u{1cf6}'), ('\u{1d00}', '\u{1dbf}'), - ('\u{1e00}', '\u{1f15}'), ('\u{1f18}', '\u{1f1d}'), ('\u{1f20}', '\u{1f45}'), ('\u{1f48}', - '\u{1f4d}'), ('\u{1f50}', '\u{1f57}'), ('\u{1f59}', '\u{1f59}'), ('\u{1f5b}', '\u{1f5b}'), - ('\u{1f5d}', '\u{1f5d}'), ('\u{1f5f}', '\u{1f7d}'), ('\u{1f80}', '\u{1fb4}'), ('\u{1fb6}', - '\u{1fbc}'), ('\u{1fbe}', '\u{1fbe}'), ('\u{1fc2}', '\u{1fc4}'), ('\u{1fc6}', '\u{1fcc}'), - ('\u{1fd0}', '\u{1fd3}'), ('\u{1fd6}', '\u{1fdb}'), ('\u{1fe0}', '\u{1fec}'), ('\u{1ff2}', - '\u{1ff4}'), ('\u{1ff6}', '\u{1ffc}'), ('\u{2071}', '\u{2071}'), ('\u{207f}', '\u{207f}'), - ('\u{2090}', '\u{209c}'), ('\u{2102}', '\u{2102}'), ('\u{2107}', '\u{2107}'), ('\u{210a}', - '\u{2113}'), ('\u{2115}', '\u{2115}'), ('\u{2118}', '\u{211d}'), ('\u{2124}', '\u{2124}'), - ('\u{2126}', '\u{2126}'), ('\u{2128}', '\u{2128}'), ('\u{212a}', '\u{2139}'), ('\u{213c}', - '\u{213f}'), ('\u{2145}', '\u{2149}'), ('\u{214e}', '\u{214e}'), ('\u{2160}', '\u{2188}'), - ('\u{2c00}', '\u{2c2e}'), ('\u{2c30}', '\u{2c5e}'), ('\u{2c60}', '\u{2ce4}'), ('\u{2ceb}', - '\u{2cee}'), ('\u{2cf2}', '\u{2cf3}'), ('\u{2d00}', '\u{2d25}'), ('\u{2d27}', '\u{2d27}'), - ('\u{2d2d}', '\u{2d2d}'), ('\u{2d30}', '\u{2d67}'), ('\u{2d6f}', '\u{2d6f}'), ('\u{2d80}', - '\u{2d96}'), ('\u{2da0}', '\u{2da6}'), ('\u{2da8}', '\u{2dae}'), ('\u{2db0}', '\u{2db6}'), - ('\u{2db8}', '\u{2dbe}'), ('\u{2dc0}', '\u{2dc6}'), ('\u{2dc8}', '\u{2dce}'), ('\u{2dd0}', - '\u{2dd6}'), ('\u{2dd8}', '\u{2dde}'), ('\u{3005}', '\u{3007}'), ('\u{3021}', '\u{3029}'), - ('\u{3031}', '\u{3035}'), ('\u{3038}', '\u{303c}'), ('\u{3041}', '\u{3096}'), ('\u{309d}', - '\u{309f}'), ('\u{30a1}', '\u{30fa}'), ('\u{30fc}', '\u{30ff}'), ('\u{3105}', '\u{312d}'), - ('\u{3131}', '\u{318e}'), ('\u{31a0}', '\u{31ba}'), ('\u{31f0}', '\u{31ff}'), ('\u{3400}', - '\u{4db5}'), ('\u{4e00}', '\u{9fd5}'), ('\u{a000}', '\u{a48c}'), ('\u{a4d0}', '\u{a4fd}'), - ('\u{a500}', '\u{a60c}'), ('\u{a610}', '\u{a61f}'), ('\u{a62a}', '\u{a62b}'), ('\u{a640}', - '\u{a66e}'), ('\u{a67f}', '\u{a69d}'), ('\u{a6a0}', '\u{a6ef}'), ('\u{a717}', '\u{a71f}'), - ('\u{a722}', '\u{a788}'), ('\u{a78b}', '\u{a7ae}'), ('\u{a7b0}', '\u{a7b7}'), ('\u{a7f7}', - '\u{a801}'), ('\u{a803}', '\u{a805}'), ('\u{a807}', '\u{a80a}'), ('\u{a80c}', '\u{a822}'), - ('\u{a840}', '\u{a873}'), ('\u{a882}', '\u{a8b3}'), ('\u{a8f2}', '\u{a8f7}'), ('\u{a8fb}', - '\u{a8fb}'), ('\u{a8fd}', '\u{a8fd}'), ('\u{a90a}', '\u{a925}'), ('\u{a930}', '\u{a946}'), - ('\u{a960}', '\u{a97c}'), ('\u{a984}', '\u{a9b2}'), ('\u{a9cf}', '\u{a9cf}'), ('\u{a9e0}', - '\u{a9e4}'), ('\u{a9e6}', '\u{a9ef}'), ('\u{a9fa}', '\u{a9fe}'), ('\u{aa00}', '\u{aa28}'), - ('\u{aa40}', '\u{aa42}'), ('\u{aa44}', '\u{aa4b}'), ('\u{aa60}', '\u{aa76}'), ('\u{aa7a}', - '\u{aa7a}'), ('\u{aa7e}', '\u{aaaf}'), ('\u{aab1}', '\u{aab1}'), ('\u{aab5}', '\u{aab6}'), - ('\u{aab9}', '\u{aabd}'), ('\u{aac0}', '\u{aac0}'), ('\u{aac2}', '\u{aac2}'), ('\u{aadb}', - '\u{aadd}'), ('\u{aae0}', '\u{aaea}'), ('\u{aaf2}', '\u{aaf4}'), ('\u{ab01}', '\u{ab06}'), - ('\u{ab09}', '\u{ab0e}'), ('\u{ab11}', '\u{ab16}'), ('\u{ab20}', '\u{ab26}'), ('\u{ab28}', - '\u{ab2e}'), ('\u{ab30}', '\u{ab5a}'), ('\u{ab5c}', '\u{ab65}'), ('\u{ab70}', '\u{abe2}'), - ('\u{ac00}', '\u{d7a3}'), ('\u{d7b0}', '\u{d7c6}'), ('\u{d7cb}', '\u{d7fb}'), ('\u{f900}', - '\u{fa6d}'), ('\u{fa70}', '\u{fad9}'), ('\u{fb00}', '\u{fb06}'), ('\u{fb13}', '\u{fb17}'), - ('\u{fb1d}', '\u{fb1d}'), ('\u{fb1f}', '\u{fb28}'), ('\u{fb2a}', '\u{fb36}'), ('\u{fb38}', - '\u{fb3c}'), ('\u{fb3e}', '\u{fb3e}'), ('\u{fb40}', '\u{fb41}'), ('\u{fb43}', '\u{fb44}'), - ('\u{fb46}', '\u{fbb1}'), ('\u{fbd3}', '\u{fc5d}'), ('\u{fc64}', '\u{fd3d}'), ('\u{fd50}', - '\u{fd8f}'), ('\u{fd92}', '\u{fdc7}'), ('\u{fdf0}', '\u{fdf9}'), ('\u{fe71}', '\u{fe71}'), - ('\u{fe73}', '\u{fe73}'), ('\u{fe77}', '\u{fe77}'), ('\u{fe79}', '\u{fe79}'), ('\u{fe7b}', - '\u{fe7b}'), ('\u{fe7d}', '\u{fe7d}'), ('\u{fe7f}', '\u{fefc}'), ('\u{ff21}', '\u{ff3a}'), - ('\u{ff41}', '\u{ff5a}'), ('\u{ff66}', '\u{ff9d}'), ('\u{ffa0}', '\u{ffbe}'), ('\u{ffc2}', - '\u{ffc7}'), ('\u{ffca}', '\u{ffcf}'), ('\u{ffd2}', '\u{ffd7}'), ('\u{ffda}', '\u{ffdc}'), - ('\u{10000}', '\u{1000b}'), ('\u{1000d}', '\u{10026}'), ('\u{10028}', '\u{1003a}'), - ('\u{1003c}', '\u{1003d}'), ('\u{1003f}', '\u{1004d}'), ('\u{10050}', '\u{1005d}'), - ('\u{10080}', '\u{100fa}'), ('\u{10140}', '\u{10174}'), ('\u{10280}', '\u{1029c}'), - ('\u{102a0}', '\u{102d0}'), ('\u{10300}', '\u{1031f}'), ('\u{10330}', '\u{1034a}'), - ('\u{10350}', '\u{10375}'), ('\u{10380}', '\u{1039d}'), ('\u{103a0}', '\u{103c3}'), - ('\u{103c8}', '\u{103cf}'), ('\u{103d1}', '\u{103d5}'), ('\u{10400}', '\u{1049d}'), - ('\u{104b0}', '\u{104d3}'), ('\u{104d8}', '\u{104fb}'), ('\u{10500}', '\u{10527}'), - ('\u{10530}', '\u{10563}'), ('\u{10600}', '\u{10736}'), ('\u{10740}', '\u{10755}'), - ('\u{10760}', '\u{10767}'), ('\u{10800}', '\u{10805}'), ('\u{10808}', '\u{10808}'), - ('\u{1080a}', '\u{10835}'), ('\u{10837}', '\u{10838}'), ('\u{1083c}', '\u{1083c}'), - ('\u{1083f}', '\u{10855}'), ('\u{10860}', '\u{10876}'), ('\u{10880}', '\u{1089e}'), - ('\u{108e0}', '\u{108f2}'), ('\u{108f4}', '\u{108f5}'), ('\u{10900}', '\u{10915}'), - ('\u{10920}', '\u{10939}'), ('\u{10980}', '\u{109b7}'), ('\u{109be}', '\u{109bf}'), - ('\u{10a00}', '\u{10a00}'), ('\u{10a10}', '\u{10a13}'), ('\u{10a15}', '\u{10a17}'), - ('\u{10a19}', '\u{10a33}'), ('\u{10a60}', '\u{10a7c}'), ('\u{10a80}', '\u{10a9c}'), - ('\u{10ac0}', '\u{10ac7}'), ('\u{10ac9}', '\u{10ae4}'), ('\u{10b00}', '\u{10b35}'), - ('\u{10b40}', '\u{10b55}'), ('\u{10b60}', '\u{10b72}'), ('\u{10b80}', '\u{10b91}'), - ('\u{10c00}', '\u{10c48}'), ('\u{10c80}', '\u{10cb2}'), ('\u{10cc0}', '\u{10cf2}'), - ('\u{11003}', '\u{11037}'), ('\u{11083}', '\u{110af}'), ('\u{110d0}', '\u{110e8}'), - ('\u{11103}', '\u{11126}'), ('\u{11150}', '\u{11172}'), ('\u{11176}', '\u{11176}'), - ('\u{11183}', '\u{111b2}'), ('\u{111c1}', '\u{111c4}'), ('\u{111da}', '\u{111da}'), - ('\u{111dc}', '\u{111dc}'), ('\u{11200}', '\u{11211}'), ('\u{11213}', '\u{1122b}'), - ('\u{11280}', '\u{11286}'), ('\u{11288}', '\u{11288}'), ('\u{1128a}', '\u{1128d}'), - ('\u{1128f}', '\u{1129d}'), ('\u{1129f}', '\u{112a8}'), ('\u{112b0}', '\u{112de}'), - ('\u{11305}', '\u{1130c}'), ('\u{1130f}', '\u{11310}'), ('\u{11313}', '\u{11328}'), - ('\u{1132a}', '\u{11330}'), ('\u{11332}', '\u{11333}'), ('\u{11335}', '\u{11339}'), - ('\u{1133d}', '\u{1133d}'), ('\u{11350}', '\u{11350}'), ('\u{1135d}', '\u{11361}'), - ('\u{11400}', '\u{11434}'), ('\u{11447}', '\u{1144a}'), ('\u{11480}', '\u{114af}'), - ('\u{114c4}', '\u{114c5}'), ('\u{114c7}', '\u{114c7}'), ('\u{11580}', '\u{115ae}'), - ('\u{115d8}', '\u{115db}'), ('\u{11600}', '\u{1162f}'), ('\u{11644}', '\u{11644}'), - ('\u{11680}', '\u{116aa}'), ('\u{11700}', '\u{11719}'), ('\u{118a0}', '\u{118df}'), - ('\u{118ff}', '\u{118ff}'), ('\u{11ac0}', '\u{11af8}'), ('\u{11c00}', '\u{11c08}'), - ('\u{11c0a}', '\u{11c2e}'), ('\u{11c40}', '\u{11c40}'), ('\u{11c72}', '\u{11c8f}'), - ('\u{12000}', '\u{12399}'), ('\u{12400}', '\u{1246e}'), ('\u{12480}', '\u{12543}'), - ('\u{13000}', '\u{1342e}'), ('\u{14400}', '\u{14646}'), ('\u{16800}', '\u{16a38}'), - ('\u{16a40}', '\u{16a5e}'), ('\u{16ad0}', '\u{16aed}'), ('\u{16b00}', '\u{16b2f}'), - ('\u{16b40}', '\u{16b43}'), ('\u{16b63}', '\u{16b77}'), ('\u{16b7d}', '\u{16b8f}'), - ('\u{16f00}', '\u{16f44}'), ('\u{16f50}', '\u{16f50}'), ('\u{16f93}', '\u{16f9f}'), - ('\u{16fe0}', '\u{16fe0}'), ('\u{17000}', '\u{187ec}'), ('\u{18800}', '\u{18af2}'), - ('\u{1b000}', '\u{1b001}'), ('\u{1bc00}', '\u{1bc6a}'), ('\u{1bc70}', '\u{1bc7c}'), - ('\u{1bc80}', '\u{1bc88}'), ('\u{1bc90}', '\u{1bc99}'), ('\u{1d400}', '\u{1d454}'), - ('\u{1d456}', '\u{1d49c}'), ('\u{1d49e}', '\u{1d49f}'), ('\u{1d4a2}', '\u{1d4a2}'), - ('\u{1d4a5}', '\u{1d4a6}'), ('\u{1d4a9}', '\u{1d4ac}'), ('\u{1d4ae}', '\u{1d4b9}'), - ('\u{1d4bb}', '\u{1d4bb}'), ('\u{1d4bd}', '\u{1d4c3}'), ('\u{1d4c5}', '\u{1d505}'), - ('\u{1d507}', '\u{1d50a}'), ('\u{1d50d}', '\u{1d514}'), ('\u{1d516}', '\u{1d51c}'), - ('\u{1d51e}', '\u{1d539}'), ('\u{1d53b}', '\u{1d53e}'), ('\u{1d540}', '\u{1d544}'), - ('\u{1d546}', '\u{1d546}'), ('\u{1d54a}', '\u{1d550}'), ('\u{1d552}', '\u{1d6a5}'), - ('\u{1d6a8}', '\u{1d6c0}'), ('\u{1d6c2}', '\u{1d6da}'), ('\u{1d6dc}', '\u{1d6fa}'), - ('\u{1d6fc}', '\u{1d714}'), ('\u{1d716}', '\u{1d734}'), ('\u{1d736}', '\u{1d74e}'), - ('\u{1d750}', '\u{1d76e}'), ('\u{1d770}', '\u{1d788}'), ('\u{1d78a}', '\u{1d7a8}'), - ('\u{1d7aa}', '\u{1d7c2}'), ('\u{1d7c4}', '\u{1d7cb}'), ('\u{1e800}', '\u{1e8c4}'), - ('\u{1e900}', '\u{1e943}'), ('\u{1ee00}', '\u{1ee03}'), ('\u{1ee05}', '\u{1ee1f}'), - ('\u{1ee21}', '\u{1ee22}'), ('\u{1ee24}', '\u{1ee24}'), ('\u{1ee27}', '\u{1ee27}'), - ('\u{1ee29}', '\u{1ee32}'), ('\u{1ee34}', '\u{1ee37}'), ('\u{1ee39}', '\u{1ee39}'), - ('\u{1ee3b}', '\u{1ee3b}'), ('\u{1ee42}', '\u{1ee42}'), ('\u{1ee47}', '\u{1ee47}'), - ('\u{1ee49}', '\u{1ee49}'), ('\u{1ee4b}', '\u{1ee4b}'), ('\u{1ee4d}', '\u{1ee4f}'), - ('\u{1ee51}', '\u{1ee52}'), ('\u{1ee54}', '\u{1ee54}'), ('\u{1ee57}', '\u{1ee57}'), - ('\u{1ee59}', '\u{1ee59}'), ('\u{1ee5b}', '\u{1ee5b}'), ('\u{1ee5d}', '\u{1ee5d}'), - ('\u{1ee5f}', '\u{1ee5f}'), ('\u{1ee61}', '\u{1ee62}'), ('\u{1ee64}', '\u{1ee64}'), - ('\u{1ee67}', '\u{1ee6a}'), ('\u{1ee6c}', '\u{1ee72}'), ('\u{1ee74}', '\u{1ee77}'), - ('\u{1ee79}', '\u{1ee7c}'), ('\u{1ee7e}', '\u{1ee7e}'), ('\u{1ee80}', '\u{1ee89}'), - ('\u{1ee8b}', '\u{1ee9b}'), ('\u{1eea1}', '\u{1eea3}'), ('\u{1eea5}', '\u{1eea9}'), - ('\u{1eeab}', '\u{1eebb}'), ('\u{20000}', '\u{2a6d6}'), ('\u{2a700}', '\u{2b734}'), - ('\u{2b740}', '\u{2b81d}'), ('\u{2b820}', '\u{2cea1}'), ('\u{2f800}', '\u{2fa1d}') - ]; - - pub fn XID_Start(c: char) -> bool { - super::bsearch_range_table(c, XID_Start_table) - } - -} - diff --git a/third_party/rust/unicode-xid-0.1.0/src/tests.rs b/third_party/rust/unicode-xid-0.1.0/src/tests.rs deleted file mode 100644 index f4333967f9..0000000000 --- a/third_party/rust/unicode-xid-0.1.0/src/tests.rs +++ /dev/null @@ -1,113 +0,0 @@ -// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -#[cfg(feature = "bench")] -use std::iter; -#[cfg(feature = "bench")] -use test::Bencher; -#[cfg(feature = "bench")] -use std::prelude::v1::*; - -use super::UnicodeXID; - -#[cfg(feature = "bench")] -#[bench] -fn cargo_is_xid_start(b: &mut Bencher) { - let string = iter::repeat('a').take(4096).collect::(); - - b.bytes = string.len() as u64; - b.iter(|| { - string.chars().all(UnicodeXID::is_xid_start) - }); -} - -#[cfg(feature = "bench")] -#[bench] -fn stdlib_is_xid_start(b: &mut Bencher) { - let string = iter::repeat('a').take(4096).collect::(); - - b.bytes = string.len() as u64; - b.iter(|| { - string.chars().all(char::is_xid_start) - }); -} - -#[cfg(feature = "bench")] -#[bench] -fn cargo_xid_continue(b: &mut Bencher) { - let string = iter::repeat('a').take(4096).collect::(); - - b.bytes = string.len() as u64; - b.iter(|| { - string.chars().all(UnicodeXID::is_xid_continue) - }); -} - -#[cfg(feature = "bench")] -#[bench] -fn stdlib_xid_continue(b: &mut Bencher) { - let string = iter::repeat('a').take(4096).collect::(); - - b.bytes = string.len() as u64; - b.iter(|| { - string.chars().all(char::is_xid_continue) - }); -} - -#[test] -fn test_is_xid_start() { - let chars = [ - 'A', 'Z', 'a', 'z', - '\u{1000d}', '\u{10026}', - ]; - - for ch in &chars { - assert!(UnicodeXID::is_xid_start(*ch), "{}", ch); - } -} - -#[test] -fn test_is_not_xid_start() { - let chars = [ - '\x00', '\x01', - '0', '9', - ' ', '[', '<', '{', '(', - '\u{02c2}', '\u{ffff}', - ]; - - for ch in &chars { - assert!(!UnicodeXID::is_xid_start(*ch), "{}", ch); - } -} - -#[test] -fn test_is_xid_continue() { - let chars = [ - '0', '9', 'A', 'Z', 'a', 'z', '_', - '\u{1000d}', '\u{10026}', - ]; - - for ch in &chars { - assert!(UnicodeXID::is_xid_continue(*ch), "{}", ch); - } -} - -#[test] -fn test_is_not_xid_continue() { - let chars = [ - '\x00', '\x01', - ' ', '[', '<', '{', '(', - '\u{02c2}', '\u{ffff}', - ]; - - for &ch in &chars { - assert!(!UnicodeXID::is_xid_continue(ch), "{}", ch); - } -} diff --git a/third_party/rust/uuid/.cargo-checksum.json b/third_party/rust/uuid/.cargo-checksum.json index 5fed3eeb6a..64e293cef3 100644 --- a/third_party/rust/uuid/.cargo-checksum.json +++ b/third_party/rust/uuid/.cargo-checksum.json @@ -1 +1 @@ -{"files":{"CODEOWNERS":"65d3fcb4156a2d5bce80d382a34044753e384d7f1eb71cdc646de400a0b969c8","CODE_OF_CONDUCT.md":"7d9c9062ee835c2dfd348cfddb938c563f3a7b1140dd090444a03ea1d73626b1","CONTRIBUTING.md":"c2b507733d5af2de972d63237a094a135935ad45cc74dedb79c199d841f35a3e","COPYRIGHT":"b4b2c0de2a05de3372d5c828128413ce82bb7dba2272487b7729f09cc3d3519d","Cargo.toml":"2647794e162e5e764854003d4e0ca2e2d0de5f7c11e3ec61ab53fae310328aab","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"436bc5a105d8e57dcd8778730f3754f7bf39c14d2f530e4cde4bd2d17a83ec3d","README.md":"f82b58d44ed24b07cc8e3a14e233ff2d0aa297732da1b789f16a84293de39e23","README.tpl":"1d5787815cea427e5cf0cce634bc629b03d51dda355fa52de500a9658c45625b","benches/format_str.rs":"0d080946d397a2578a978105a5a27309edded7115d2081e683170f0bf96edc3e","benches/invalid_parse_str.rs":"7f44d2ebec6ee1368d179f12dd09a288d589b252434082b2de134a658b460812","benches/mod.rs":"4733d7aa62dafe3e85ab90dca518b57f350a5538ea5643c5313e63939e884a45","benches/serde_support.rs":"afc719718c9a5d705b60bc9cd39720b921d9ee63ccf11c4b2900f02beea70c1b","benches/slog_support/mod.rs":"1be626f0a6921f4e6bd333ce7ab4a6c4da1fb6f3ae1c503672b4ba168a70c01d","benches/slog_support/parse_str.rs":"9c63ee7047ac8b9d08f02b7081020dd7300f84f302068927c859bbe26cea66a3","benches/valid_parse_str.rs":"7db47c7d25b20c8da03f25107fbea2b5c97fc814ff226e8489eda374f477eeac","src/adapter/compact.rs":"fa76330d5ff33fbb0f7da5898caee64d0e74fbe435c86621fefb864ee43560ec","src/adapter/core_support/mod.rs":"65bebe5034e450782ec9b0942bd4d758795ee315095fcc3f54412630a987f938","src/adapter/mod.rs":"6051f59190e438bbbd702e173fc85cc410b0d9b07b553991600064a34d53d2da","src/builder.rs":"86d8607e783a4a7429712edeae4ed34aff776064f1ce4b99d8b71461146b3e38","src/core_support.rs":"c5c94c3eab19a5833ec588db86d10aa94731f724d7fc5554f6e47d072ccdd88b","src/lib.rs":"2794fa11c36c09f4e356ce8ad0859f0df7eced8b03716e7df2aa971f30ff5059","src/parser/core_support.rs":"e37812662674fef191aca4574d705cbfaac6516a99a6871d88f7843494e5e653","src/parser/mod.rs":"51526e211c95730c830512007da23dfc9f88d1feccc9ddf881c541c1d5e01e2a","src/parser/std_support.rs":"4398d708bd42e8d3cb31eed8ada92615fb1cbfc70bfb3c7cbe952f47c7fe1183","src/prelude.rs":"c2c359c483993ffa3b2469ee5017d68d5c9d0c4226758112f585949e76971fff","src/serde_support.rs":"8821ba4b73f35d9a1ab19b3a32922cbdc991a7dce07062ca98230f45fdd57d98","src/slog_support.rs":"370f891a73f99436baecd21f5f2b7d7c89842336aad99167b07ca3f03c48a70c","src/std_support.rs":"eeb0d6560f96c8ce3cacafa6fe5342a8ad3b86387bf3455fb66459e28b39a6e1","src/test_util.rs":"1dfc1ab68bb403dd6d696fafeb7c00be59c37b51155703f3033ebf1062dd629f","src/u128_support.rs":"97ca20af9117e44bad72f987488efb0173699a22e0c646b268e0fe3dd90355a7","src/v1.rs":"82654b0cadfa56fd0140d78db5ab2d9869ea3d8eaaede0b975d42904317e9da4","src/v3.rs":"d25899b070bd791bc2b784d828399f5bce25f77300765dfd96e76583f31047f3","src/v4.rs":"c38784386b1f44d6333c4447140dd8ba0deec2d8c5bace5abd0e48f523716b0b","src/v5.rs":"11aeea13d38c5e3c5d7cc8bf571ac1ce57a0d46f363b90a991ed43dc1cc9caaa","src/winapi_support.rs":"13d2d83dd14ece29dfd88b4c5985ef62ff8017278bac0809dba334483881c457"},"package":"90dbc611eb48397705a6b0f6e917da23ae517e4d127123d2cf7674206627d32a"} \ No newline at end of file +{"files":{"CODEOWNERS":"65d3fcb4156a2d5bce80d382a34044753e384d7f1eb71cdc646de400a0b969c8","CODE_OF_CONDUCT.md":"7d9c9062ee835c2dfd348cfddb938c563f3a7b1140dd090444a03ea1d73626b1","CONTRIBUTING.md":"c2b507733d5af2de972d63237a094a135935ad45cc74dedb79c199d841f35a3e","COPYRIGHT":"b4b2c0de2a05de3372d5c828128413ce82bb7dba2272487b7729f09cc3d3519d","Cargo.toml":"94e4df2b560d316a5dbadb38bd95e338d82504f3a7e0d8bf4a98ab33bd12d54b","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"436bc5a105d8e57dcd8778730f3754f7bf39c14d2f530e4cde4bd2d17a83ec3d","README.md":"a58d1918e2e0e9d2c5ae50ebc21c79e52227f3e012f45e7932a2c67d554ee641","README.tpl":"5e94c2dfce3f674229cc3326873da36d94f9592a213d7785b3dfbde9b23ca27f","benches/format_str.rs":"d8cbdcc43d5b78f1674cbdfb4ab7917e110e0e92d91eaa1b0419de1fe38cb4a3","benches/invalid_parse_str.rs":"c1ed6e57b6d7078d698e7301cd05f2ce272acea9856bfb43683bec9b8984ff11","benches/mod.rs":"48b2b66f176d37f207336e2eb074d39273376e24dff30bf375491f8a4532219b","benches/serde_support.rs":"4d051017a7de4d54d5afe5cd33ab97fc725a76e21fdea0f3e3b1efd32afc3f16","benches/slog_support/mod.rs":"26031e0f77ff53fbdf85f519c65e3acdde44971841a2a637fa6b2c4ec0d1b13e","benches/slog_support/parse_str.rs":"b520e1f43baedba95d6d60e184dc4300e8c272b2ad4bf779b357c392c3d319e4","benches/valid_parse_str.rs":"4a8c28d721455ecd9de9c780d421740508e456d545a7ca42fda8909ae3ffb110","src/adapter/compact.rs":"018c02bc2b7f34d669eacff6e00c87cb7a73c8498f0a0927b0ac0f6c95cc23f6","src/adapter/mod.rs":"86124a0ac594f46911bf5197dfef0887964760e90136c0effa3656e5dc2179e9","src/builder/error.rs":"ab2d652a5897cf108dedde0de00776140d4eaaaee81922bcf0310edcabbdfdc1","src/builder/mod.rs":"a9a980bc120d39ffddd598259a2e9c32ceb7c8885e948c1efe73d634601e6873","src/error.rs":"3a1a6d9109f526dc10ccc7e63738ec500d0c0a063dd3322413878474a15e5e9f","src/lib.rs":"5b9200da120444062202ef247c087995328efeb9b27ba6dce0f4026ea91fff79","src/parser/error.rs":"ec136a5497017402797784053f127c4812f126a7f43053624446c4deb301b9a8","src/parser/mod.rs":"39605f6897298df0cd1dc543c7bd735eea369936e5d30d908851fab1d5c014dd","src/prelude.rs":"351f692d433ac98164123413fe2b0a37c8781aeb9680fb2bd7108a833b9e0123","src/serde_support.rs":"3b15f0594db978d6b10dcb27ed5c56956207dd682d7d7e02b262e27916c63941","src/slog_support.rs":"88be60521752f0185256815d9a494a1d2a68f2e5f28910da7a753196ff18d4e2","src/test_util.rs":"2a6b9d22241a8ab598adb6d7634f29c5ab8c575e67a89f261eb45c9df322127b","src/v1.rs":"b2427f37f9a7d6afc226b32179ced8a16c239283cfae89b5eb6ac52ca1fddebb","src/v3.rs":"1ed2ed955feab877944b445eed1638050d4e97a49e1f2954d00d5935454f1269","src/v4.rs":"25715d366c8082d59449654ceaad52814ce8c11f991ca3e88b950837da0e73b5","src/v5.rs":"5acae85b24d6bcf368ee09f07d08fc47f44f9cbae83ff49a6dee1bde8a55a20e","src/winapi_support.rs":"c84279fca11326b6dfe3db823c7b054f697bcbcabea9f4dafcaf822620415d70"},"package":"9fde2f6a4bea1d6e007c4ad38c6839fa71cbb63b6dbf5b595aa38dc9b1093c11"} \ No newline at end of file diff --git a/third_party/rust/uuid/Cargo.toml b/third_party/rust/uuid/Cargo.toml index 81d607d373..dd6684eebf 100644 --- a/third_party/rust/uuid/Cargo.toml +++ b/third_party/rust/uuid/Cargo.toml @@ -3,7 +3,7 @@ # When uploading crates to the registry Cargo will automatically # "normalize" Cargo.toml files for maximal compatibility # with all versions of Cargo and also rewrite `path` dependencies -# to registry (e.g. crates.io) dependencies +# to registry (e.g., crates.io) dependencies # # If you believe there's an error in this file please file an # issue against the rust-lang/cargo repository. If you're @@ -11,14 +11,17 @@ # will likely look very different (and much more reasonable) [package] +edition = "2018" name = "uuid" -version = "0.7.4" +version = "0.8.1" authors = ["Ashley Mannix", "Christopher Armstrong", "Dylan DPC", "Hunar Roop Kahlon"] exclude = [".github/**", ".travis.yml", "appveyor.yml", "bors.toml"] description = "A library to generate and parse UUIDs." homepage = "https://github.com/uuid-rs/uuid" documentation = "https://docs.rs/uuid" readme = "README.md" +keywords = ["guid", "unique", "uuid"] +categories = ["data-structures", "no-std", "parser-implementations", "wasm"] license = "Apache-2.0 OR MIT" repository = "https://github.com/uuid-rs/uuid" [package.metadata.docs.rs] @@ -26,23 +29,18 @@ default-target = "x86_64-pc-windows-msvc" features = ["guid", "serde", "slog", "v1", "v3", "v4", "v5"] [package.metadata.playground] -features = ["serde", "u128", "v1", "v3", "v4", "v5"] -[dependencies.byteorder] -version = "1" -features = ["i128"] -optional = true -default-features = false - +features = ["serde", "v1", "v3", "v4", "v5"] [dependencies.md5] version = "0.6" optional = true [dependencies.rand] -version = "0.6" +version = "0.7" optional = true [dependencies.serde] version = "1.0.56" +features = ["serde_derive"] optional = true default-features = false @@ -66,13 +64,10 @@ version = "1.0" version = "1.0.56" [features] -const_fn = ["nightly"] default = ["std"] guid = ["winapi"] -nightly = [] std = [] stdweb = ["rand/stdweb"] -u128 = ["byteorder"] v1 = [] v3 = ["md5"] v4 = ["rand"] diff --git a/third_party/rust/uuid/README.md b/third_party/rust/uuid/README.md index 32b5db0218..5908eb62e6 100644 --- a/third_party/rust/uuid/README.md +++ b/third_party/rust/uuid/README.md @@ -3,7 +3,7 @@ uuid [![Latest Version](https://img.shields.io/crates/v/uuid.svg)](https://crates.io/crates/uuid) [![Join the chat at https://gitter.im/uuid-rs/Lobby](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/uuid-rs/Lobby?utm_source=badge&utm_medium=badge&utm_content=badge) -![Minimum rustc version](https://img.shields.io/badge/rustc-1.22.0+-yellow.svg) +![Minimum rustc version](https://img.shields.io/badge/rustc-1.31.0+-yellow.svg) [![Build Status](https://ci.appveyor.com/api/projects/status/github/uuid-rs/uuid?branch=master&svg=true)](https://ci.appveyor.com/project/uuid-rs/uuid/branch/master) [![Build Status](https://travis-ci.org/uuid-rs/uuid.svg?branch=master)](https://travis-ci.org/uuid-rs/uuid) [![Average time to resolve an issue](https://isitmaintained.com/badge/resolution/uuid-rs/uuid.svg)](https://isitmaintained.com/project/uuid-rs/uuid "Average time to resolve an issue") @@ -57,21 +57,21 @@ By default, `uuid` can be depended on with: ```toml [dependencies] -uuid = "0.7" +uuid = "0.8" ``` To activate various features, use syntax like: ```toml [dependencies] -uuid = { version = "0.7", features = ["serde", "v4"] } +uuid = { version = "0.8", features = ["serde", "v4"] } ``` You can disable default features with: ```toml [dependencies] -uuid = { version = "0.7", default-features = false } +uuid = { version = "0.8", default-features = false } ``` ## Examples @@ -90,7 +90,7 @@ fn main() { To create a new random (V4) UUID and print it out in hexadecimal form: -```ignore,rust +```rust // Note that this requires the `v4` feature enabled in the uuid crate. use uuid::Uuid; @@ -116,7 +116,7 @@ Examples of string representations: [`wasm-bindgen`]: https://github.com/rustwasm/wasm-bindgen -[`Uuid`]: https://docs.rs/uuid/0.7.4/uuid/struct.Uuid.html +[`Uuid`]: https://docs.rs/uuid/0.8.1/uuid/struct.Uuid.html --- # License diff --git a/third_party/rust/uuid/README.tpl b/third_party/rust/uuid/README.tpl index 186ef0b7a4..82b2960390 100644 --- a/third_party/rust/uuid/README.tpl +++ b/third_party/rust/uuid/README.tpl @@ -3,7 +3,7 @@ [![Latest Version](https://img.shields.io/crates/v/uuid.svg)](https://crates.io/crates/uuid) [![Join the chat at https://gitter.im/uuid-rs/Lobby](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/uuid-rs/Lobby?utm_source=badge&utm_medium=badge&utm_content=badge) -![Minimum rustc version](https://img.shields.io/badge/rustc-1.22.0+-yellow.svg) +![Minimum rustc version](https://img.shields.io/badge/rustc-1.31.0+-yellow.svg) {{badges}} --- diff --git a/third_party/rust/uuid/benches/format_str.rs b/third_party/rust/uuid/benches/format_str.rs index 0bb67e3101..f029cf03d1 100644 --- a/third_party/rust/uuid/benches/format_str.rs +++ b/third_party/rust/uuid/benches/format_str.rs @@ -1,10 +1,9 @@ #![feature(test)] extern crate test; -extern crate uuid; use std::io::Write; use test::Bencher; -use uuid::prelude::*; +use uuid::Uuid; #[bench] fn bench_hyphen(b: &mut Bencher) { diff --git a/third_party/rust/uuid/benches/invalid_parse_str.rs b/third_party/rust/uuid/benches/invalid_parse_str.rs index 4363f70859..7fde28b3bb 100644 --- a/third_party/rust/uuid/benches/invalid_parse_str.rs +++ b/third_party/rust/uuid/benches/invalid_parse_str.rs @@ -1,14 +1,8 @@ #![feature(test)] -#[cfg(feature = "slog")] -#[macro_use] -extern crate slog; extern crate test; -extern crate uuid; -#[cfg(feature = "slog")] -use slog::Drain; use test::Bencher; -use uuid::prelude::*; +use uuid::Uuid; #[bench] fn bench_parse_invalid_strings(b: &mut Bencher) { diff --git a/third_party/rust/uuid/benches/mod.rs b/third_party/rust/uuid/benches/mod.rs index e3df218294..2ef574f012 100644 --- a/third_party/rust/uuid/benches/mod.rs +++ b/third_party/rust/uuid/benches/mod.rs @@ -1,8 +1,4 @@ #![feature(test)] -#[cfg(feature = "slog")] -#[macro_use] -extern crate slog; -extern crate test; -extern crate uuid; +#[cfg(feature = "slog")] pub mod slog_support; diff --git a/third_party/rust/uuid/benches/serde_support.rs b/third_party/rust/uuid/benches/serde_support.rs index 0daab5e804..a7ce64f824 100644 --- a/third_party/rust/uuid/benches/serde_support.rs +++ b/third_party/rust/uuid/benches/serde_support.rs @@ -1,13 +1,12 @@ #![cfg(feature = "serde")] #![feature(test)] -extern crate bincode; -extern crate serde_json; +use bincode; +use serde_json; extern crate test; -extern crate uuid; use test::Bencher; -use uuid::prelude::*; +use uuid::Uuid; #[bench] fn bench_json_encode(b: &mut Bencher) { diff --git a/third_party/rust/uuid/benches/slog_support/mod.rs b/third_party/rust/uuid/benches/slog_support/mod.rs index bc564fc1db..cdc37a7359 100644 --- a/third_party/rust/uuid/benches/slog_support/mod.rs +++ b/third_party/rust/uuid/benches/slog_support/mod.rs @@ -1,7 +1 @@ -#[cfg(feature = "slog")] -// #[macro_use] -// extern crate slog; -// extern crate test; -extern crate uuid; - pub mod parse_str; diff --git a/third_party/rust/uuid/benches/slog_support/parse_str.rs b/third_party/rust/uuid/benches/slog_support/parse_str.rs index fe778982b1..fa0e5f7020 100644 --- a/third_party/rust/uuid/benches/slog_support/parse_str.rs +++ b/third_party/rust/uuid/benches/slog_support/parse_str.rs @@ -1,15 +1,15 @@ -use test::Bencher; -use uuid::prelude::*; +extern crate test; #[bench] #[cfg(feature = "slog")] -pub fn bench_log_discard_kv(b: &mut Bencher) { - let u1 = Uuid::parse_str("F9168C5E-CEB2-4FAB-B6BF-329BF39FA1E4").unwrap(); - let root = ::slog::Logger::root(::slog::Drain::fuse(::slog::Discard), o!()); - // let root = ::slog::Logger::root(::slog::Discard.fuse(), o!()); +pub fn bench_log_discard_kv(b: &mut test::Bencher) { + let u1 = + uuid::Uuid::parse_str("F9168C5E-CEB2-4FAB-B6BF-329BF39FA1E4").unwrap(); + let root = + slog::Logger::root(::slog::Drain::fuse(::slog::Discard), slog::o!()); b.iter(|| { #[cfg(feature = "slog")] - crit!(root, "test"; "u1" => u1); + slog::crit!(root, "test"; "u1" => u1); }); } diff --git a/third_party/rust/uuid/benches/valid_parse_str.rs b/third_party/rust/uuid/benches/valid_parse_str.rs index 78a3279b68..f20d6e320f 100644 --- a/third_party/rust/uuid/benches/valid_parse_str.rs +++ b/third_party/rust/uuid/benches/valid_parse_str.rs @@ -1,14 +1,9 @@ #![feature(test)] -#[cfg(feature = "slog")] -#[macro_use] -extern crate slog; + extern crate test; -extern crate uuid; -#[cfg(feature = "slog")] -use slog::Drain; use test::Bencher; -use uuid::prelude::*; +use uuid::Uuid; #[bench] fn bench_parse_valid_strings(b: &mut Bencher) { diff --git a/third_party/rust/uuid/src/adapter/compact.rs b/third_party/rust/uuid/src/adapter/compact.rs index 57891f0f7a..ad5362affd 100644 --- a/third_party/rust/uuid/src/adapter/compact.rs +++ b/third_party/rust/uuid/src/adapter/compact.rs @@ -1,52 +1,47 @@ //! Module for use with `#[serde(with = "...")]` to serialize a [`Uuid`] -//! as a `[u8; 16] +//! as a `[u8; 16]`. //! //! [`Uuid`]: ../../struct.Uuid.html -use serde::{Deserialize, Deserializer, Serialize, Serializer}; - -use prelude::*; - /// Serializer for a [`Uuid`] into a `[u8; 16]` /// /// [`Uuid`]: ../../struct.Uuid.html -pub fn serialize( - u: &Uuid, - serializer: S, -) -> Result { - u.as_bytes().serialize(serializer) +pub fn serialize(u: &crate::Uuid, serializer: S) -> Result +where + S: serde::Serializer, +{ + serde::Serialize::serialize(u.as_bytes(), serializer) } /// Deserializer from a `[u8; 16]` into a [`Uuid`] /// /// [`Uuid`]: ../../struct.Uuid.html -pub fn deserialize<'de, D: Deserializer<'de>>( - deserializer: D, -) -> Result { - let bytes = <[u8; 16]>::deserialize(deserializer)?; +pub fn deserialize<'de, D>(deserializer: D) -> Result +where + D: serde::Deserializer<'de>, +{ + let bytes: [u8; 16] = serde::Deserialize::deserialize(deserializer)?; - Ok(Uuid::from_bytes(bytes)) + Ok(crate::Uuid::from_bytes(bytes)) } #[cfg(test)] mod tests { - use serde_test; - use prelude::*; - - #[derive(Serialize, Debug, Deserialize, PartialEq)] - struct UuidContainer { - #[serde(with = "super")] - u: Uuid, - } + use serde_test; #[test] fn test_serialize_compact() { + #[derive(serde::Serialize, Debug, serde::Deserialize, PartialEq)] + struct UuidContainer { + #[serde(with = "super")] + u: crate::Uuid, + } use serde_test::Configure; let uuid_bytes = b"F9168C5E-CEB2-4F"; let container = UuidContainer { - u: Uuid::from_slice(uuid_bytes).unwrap(), + u: crate::Uuid::from_slice(uuid_bytes).unwrap(), }; // more complex because of the struct wrapping the actual UUID diff --git a/third_party/rust/uuid/src/adapter/core_support/mod.rs b/third_party/rust/uuid/src/adapter/core_support/mod.rs deleted file mode 100644 index 2ffaee9a0c..0000000000 --- a/third_party/rust/uuid/src/adapter/core_support/mod.rs +++ /dev/null @@ -1,175 +0,0 @@ -// Copyright 2013-2014 The Rust Project Developers. -// Copyright 2018 The Uuid Project Developers. -// -// See the COPYRIGHT file at the top-level directory of this distribution. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use core::fmt; -use prelude::*; - -impl fmt::Display for super::Hyphenated { - #[inline] - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - fmt::LowerHex::fmt(self, f) - } -} - -impl<'a> fmt::Display for super::HyphenatedRef<'a> { - #[inline] - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - fmt::LowerHex::fmt(self, f) - } -} - -impl fmt::Display for super::Simple { - #[inline] - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - fmt::LowerHex::fmt(self, f) - } -} - -impl<'a> fmt::Display for super::SimpleRef<'a> { - #[inline] - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - fmt::LowerHex::fmt(self, f) - } -} - -impl fmt::Display for super::Urn { - #[inline] - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - fmt::LowerHex::fmt(self, f) - } -} - -impl<'a> fmt::Display for super::UrnRef<'a> { - #[inline] - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - fmt::LowerHex::fmt(self, f) - } -} - -impl fmt::LowerHex for super::Hyphenated { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.write_str(self.encode_lower(&mut [0; Self::LENGTH])) - } -} - -impl<'a> fmt::LowerHex for super::HyphenatedRef<'a> { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - // TODO: Self doesn't work https://github.com/rust-lang/rust/issues/52808 - f.write_str(self.encode_lower(&mut [0; super::HyphenatedRef::LENGTH])) - } -} - -impl fmt::LowerHex for super::Simple { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.write_str(self.encode_lower(&mut [0; Self::LENGTH])) - } -} - -impl<'a> fmt::LowerHex for super::SimpleRef<'a> { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - // TODO: Self doesn't work https://github.com/rust-lang/rust/issues/52808 - f.write_str(self.encode_lower(&mut [0; super::SimpleRef::LENGTH])) - } -} - -impl fmt::LowerHex for super::Urn { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.write_str(self.encode_lower(&mut [0; Self::LENGTH])) - } -} - -impl<'a> fmt::LowerHex for super::UrnRef<'a> { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - // TODO: Self doesn't work https://github.com/rust-lang/rust/issues/52808 - f.write_str(self.encode_lower(&mut [0; super::UrnRef::LENGTH])) - } -} - -impl fmt::UpperHex for super::Hyphenated { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.write_str(self.encode_upper(&mut [0; Self::LENGTH])) - } -} - -impl<'a> fmt::UpperHex for super::HyphenatedRef<'a> { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - // TODO: Self doesn't work https://github.com/rust-lang/rust/issues/52808 - f.write_str(self.encode_upper(&mut [0; super::HyphenatedRef::LENGTH])) - } -} - -impl fmt::UpperHex for super::Simple { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.write_str(self.encode_upper(&mut [0; Self::LENGTH])) - } -} - -impl<'a> fmt::UpperHex for super::SimpleRef<'a> { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - // TODO: Self doesn't work https://github.com/rust-lang/rust/issues/52808 - f.write_str(self.encode_upper(&mut [0; super::SimpleRef::LENGTH])) - } -} - -impl fmt::UpperHex for super::Urn { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.write_str(self.encode_upper(&mut [0; Self::LENGTH])) - } -} - -impl<'a> fmt::UpperHex for super::UrnRef<'a> { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - // TODO: Self doesn't work https://github.com/rust-lang/rust/issues/52808 - f.write_str(self.encode_upper(&mut [0; super::UrnRef::LENGTH])) - } -} - -impl From for super::Hyphenated { - #[inline] - fn from(f: Uuid) -> Self { - super::Hyphenated::from_uuid(f) - } -} - -impl<'a> From<&'a Uuid> for super::HyphenatedRef<'a> { - #[inline] - fn from(f: &'a Uuid) -> Self { - super::HyphenatedRef::from_uuid_ref(f) - } -} - -impl From for super::Simple { - #[inline] - fn from(f: Uuid) -> Self { - super::Simple::from_uuid(f) - } -} - -impl<'a> From<&'a Uuid> for super::SimpleRef<'a> { - #[inline] - fn from(f: &'a Uuid) -> Self { - super::SimpleRef::from_uuid_ref(f) - } -} - -impl From for super::Urn { - #[inline] - fn from(f: Uuid) -> Self { - super::Urn::from_uuid(f) - } -} - -impl<'a> From<&'a Uuid> for super::UrnRef<'a> { - #[inline] - fn from(f: &'a Uuid) -> Self { - super::UrnRef::from_uuid_ref(f) - } -} diff --git a/third_party/rust/uuid/src/adapter/mod.rs b/third_party/rust/uuid/src/adapter/mod.rs index 88fb2719c9..2f2ea765a3 100644 --- a/third_party/rust/uuid/src/adapter/mod.rs +++ b/third_party/rust/uuid/src/adapter/mod.rs @@ -9,14 +9,10 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -//! Adapters for various formats for [`Uuid`]s -//! -//! [`Uuid`]: ../struct.Uuid.html +//! Adapters for various formats for UUIDs -use core::str; -use prelude::*; - -mod core_support; +use crate::prelude::*; +use crate::std::{fmt, str}; #[cfg(feature = "serde")] pub mod compact; @@ -70,123 +66,53 @@ pub struct Urn(Uuid); pub struct UrnRef<'a>(&'a Uuid); impl Uuid { - /// Creates a [`Hyphenated`] instance from a [`Uuid`]. + /// Get a [`Hyphenated`] formatter. /// - /// [`Uuid`]: ../struct.Uuid.html - /// [`Hyphenated`]: adapter/struct.Hyphenated.html - #[cfg(not(feature = "const_fn"))] - #[inline] - pub fn to_hyphenated(self) -> Hyphenated { - Hyphenated::from_uuid(self) - } - - /// Creates a [`Hyphenated`] instance from a [`Uuid`]. - /// - /// [`Uuid`]: ../struct.Uuid.html /// [`Hyphenated`]: adapter/struct.Hyphenated.html - #[cfg(feature = "const_fn")] #[inline] pub const fn to_hyphenated(self) -> Hyphenated { Hyphenated::from_uuid(self) } - /// Creates a [`HyphenatedRef`] instance from a [`Uuid`] reference. + /// Get a borrowed [`HyphenatedRef`] formatter. /// - /// [`Uuid`]: ../struct.Uuid.html - /// [`HyphenatedRef`]: adapter/struct.HyphenatedRef.html - #[cfg(not(feature = "const_fn"))] - #[inline] - pub fn to_hyphenated_ref(&self) -> HyphenatedRef { - HyphenatedRef::from_uuid_ref(self) - } - - /// Creates a [`HyphenatedRef`] instance from a [`Uuid`] reference. - /// - /// [`Uuid`]: ../struct.Uuid.html /// [`HyphenatedRef`]: adapter/struct.HyphenatedRef.html - #[cfg(feature = "const_fn")] #[inline] - pub const fn to_hyphenated_ref(&self) -> HyphenatedRef { + pub const fn to_hyphenated_ref(&self) -> HyphenatedRef<'_> { HyphenatedRef::from_uuid_ref(self) } - /// Creates a [`Simple`] instance from a [`Uuid`]. + /// Get a [`Simple`] formatter. /// - /// [`Uuid`]: ../struct.Uuid.html /// [`Simple`]: adapter/struct.Simple.html - #[cfg(not(feature = "const_fn"))] - #[inline] - pub fn to_simple(self) -> Simple { - Simple::from_uuid(self) - } - - /// Creates a [`Simple`] instance from a [`Uuid`]. - /// - /// [`Uuid`]: ../struct.Uuid.html - /// [`Simple`]: adapter/struct.Simple.html - #[cfg(feature = "const_fn")] #[inline] pub const fn to_simple(self) -> Simple { Simple::from_uuid(self) } - /// Creates a [`SimpleRef`] instance from a [`Uuid`] reference. + /// Get a borrowed [`SimpleRef`] formatter. /// - /// [`Uuid`]: ../struct.Uuid.html /// [`SimpleRef`]: adapter/struct.SimpleRef.html - #[cfg(not(feature = "const_fn"))] #[inline] - pub fn to_simple_ref(&self) -> SimpleRef { + pub const fn to_simple_ref(&self) -> SimpleRef<'_> { SimpleRef::from_uuid_ref(self) } - /// Creates a [`SimpleRef`] instance from a [`Uuid`] reference. - /// - /// [`Uuid`]: ../struct.Uuid.html - /// [`SimpleRef`]: adapter/struct.SimpleRef.html - #[cfg(feature = "const_fn")] - #[inline] - pub const fn to_simple_ref(&self) -> SimpleRef { - SimpleRef::from_uuid_ref(self) - } - - /// Creates a [`Urn`] instance from a [`Uuid`]. + /// Get a [`Urn`] formatter. /// /// [`Uuid`]: ../struct.Uuid.html /// [`Urn`]: adapter/struct.Urn.html - #[cfg(not(feature = "const_fn"))] - #[inline] - pub fn to_urn(self) -> Urn { - Urn::from_uuid(self) - } - - /// Creates a [`Urn`] instance from a [`Uuid`]. - /// - /// [`Uuid`]: ../struct.Uuid.html - /// [`Urn`]: adapter/struct.Urn.html - #[cfg(feature = "const_fn")] #[inline] pub const fn to_urn(self) -> Urn { Urn::from_uuid(self) } - /// Creates a [`UrnRef`] instance from a [`Uuid`] reference. + /// Get a borrowed [`UrnRef`] formatter. /// /// [`Uuid`]: ../struct.Uuid.html /// [`UrnRef`]: adapter/struct.UrnRef.html - #[cfg(not(feature = "const_fn"))] #[inline] - pub fn to_urn_ref(&self) -> UrnRef { - UrnRef::from_uuid_ref(self) - } - - /// Creates a [`UrnRef`] instance from a [`Uuid`] reference. - /// - /// [`Uuid`]: ../struct.Uuid.html - /// [`UrnRef`]: adapter/struct.UrnRef.html - #[cfg(feature = "const_fn")] - #[inline] - pub const fn to_urn_ref(&self) -> UrnRef { + pub const fn to_urn_ref(&self) -> UrnRef<'_> { UrnRef::from_uuid_ref(self) } } @@ -261,16 +187,6 @@ impl Hyphenated { /// /// [`Uuid`]: ../struct.Uuid.html /// [`Hyphenated`]: struct.Hyphenated.html - #[cfg(not(feature = "const_fn"))] - pub fn from_uuid(uuid: Uuid) -> Self { - Hyphenated(uuid) - } - - /// Creates a [`Hyphenated`] from a [`Uuid`]. - /// - /// [`Uuid`]: ../struct.Uuid.html - /// [`Hyphenated`]: struct.Hyphenated.html - #[cfg(feature = "const_fn")] pub const fn from_uuid(uuid: Uuid) -> Self { Hyphenated(uuid) } @@ -378,16 +294,6 @@ impl<'a> HyphenatedRef<'a> { /// /// [`Uuid`]: ../struct.Uuid.html /// [`HyphenatedRef`]: struct.HyphenatedRef.html - #[cfg(not(feature = "const_fn"))] - pub fn from_uuid_ref(uuid: &'a Uuid) -> Self { - HyphenatedRef(uuid) - } - - /// Creates a [`HyphenatedRef`] from a [`Uuid`] reference. - /// - /// [`Uuid`]: ../struct.Uuid.html - /// [`HyphenatedRef`]: struct.HyphenatedRef.html - #[cfg(feature = "const_fn")] pub const fn from_uuid_ref(uuid: &'a Uuid) -> Self { HyphenatedRef(uuid) } @@ -502,16 +408,6 @@ impl Simple { /// /// [`Uuid`]: ../struct.Uuid.html /// [`Simple`]: struct.Simple.html - #[cfg(not(feature = "const_fn"))] - pub fn from_uuid(uuid: Uuid) -> Self { - Simple(uuid) - } - - /// Creates a [`Simple`] from a [`Uuid`]. - /// - /// [`Uuid`]: ../struct.Uuid.html - /// [`Simple`]: struct.Simple.html - #[cfg(feature = "const_fn")] pub const fn from_uuid(uuid: Uuid) -> Self { Simple(uuid) } @@ -617,16 +513,6 @@ impl<'a> SimpleRef<'a> { /// /// [`Uuid`]: ../struct.Uuid.html /// [`SimpleRef`]: struct.SimpleRef.html - #[cfg(not(feature = "const_fn"))] - pub fn from_uuid_ref(uuid: &'a Uuid) -> Self { - SimpleRef(uuid) - } - - /// Creates a [`SimpleRef`] from a [`Uuid`] reference. - /// - /// [`Uuid`]: ../struct.Uuid.html - /// [`SimpleRef`]: struct.SimpleRef.html - #[cfg(feature = "const_fn")] pub const fn from_uuid_ref(uuid: &'a Uuid) -> Self { SimpleRef(uuid) } @@ -732,16 +618,6 @@ impl Urn { /// /// [`Uuid`]: ../struct.Uuid.html /// [`Urn`]: struct.Urn.html - #[cfg(not(feature = "const_fn"))] - pub fn from_uuid(uuid: Uuid) -> Self { - Urn(uuid) - } - - /// Creates a [`Urn`] from a [`Uuid`]. - /// - /// [`Uuid`]: ../struct.Uuid.html - /// [`Urn`]: struct.Urn.html - #[cfg(feature = "const_fn")] pub const fn from_uuid(uuid: Uuid) -> Self { Urn(uuid) } @@ -856,16 +732,6 @@ impl<'a> UrnRef<'a> { /// /// [`Uuid`]: ../struct.Uuid.html /// [`UrnRef`]: struct.UrnRef.html - #[cfg(not(feature = "const_fn"))] - pub fn from_uuid_ref(uuid: &'a Uuid) -> Self { - UrnRef(uuid) - } - - /// Creates a [`UrnRef`] from a [`Uuid`] reference. - /// - /// [`Uuid`]: ../struct.Uuid.html - /// [`UrnRef`]: struct.UrnRef.html - #[cfg(feature = "const_fn")] pub const fn from_uuid_ref(uuid: &'a Uuid) -> Self { UrnRef(&uuid) } @@ -970,85 +836,143 @@ impl<'a> UrnRef<'a> { } } -// TODO: uncomment when we undo the pub(crate) change -// #[cfg(test)] -// mod tests { -// use Uuid; -// -// #[test] -// fn hyphenated_trailing() { -// let mut buf = [b'x'; 100]; -// let len = Uuid::nil().to_hyphenated().encode_lower(&mut buf).len(); -// assert_eq!(len, super::Hyphenated::LENGTH); -// assert!(buf[len..].iter().all(|x| *x == b'x')); -// } -// #[test] -// fn hyphenated_ref_trailing() { -// let mut buf = [b'x'; 100]; -// let len = Uuid::nil().to_hyphenated().encode_lower(&mut buf).len(); -// assert_eq!(len, super::HyphenatedRef::LENGTH); -// assert!(buf[len..].iter().all(|x| *x == b'x')); -// } -// -// #[test] -// fn simple_trailing() { -// let mut buf = [b'x'; 100]; -// let len = Uuid::nil().to_simple().encode_lower(&mut buf).len(); -// assert_eq!(len, super::Simple::LENGTH); -// assert!(buf[len..].iter().all(|x| *x == b'x')); -// } -// #[test] -// fn simple_ref_trailing() { -// let mut buf = [b'x'; 100]; -// let len = Uuid::nil().to_simple().encode_lower(&mut buf).len(); -// assert_eq!(len, super::SimpleRef::LENGTH); -// assert!(buf[len..].iter().all(|x| *x == b'x')); -// } -// -// #[test] -// fn urn_trailing() { -// let mut buf = [b'x'; 100]; -// let len = Uuid::nil().to_urn().encode_lower(&mut buf).len(); -// assert_eq!(len, super::Urn::LENGTH); -// assert!(buf[len..].iter().all(|x| *x == b'x')); -// } -// #[test] -// fn urn_ref_trailing() { -// let mut buf = [b'x'; 100]; -// let len = Uuid::nil().to_urn().encode_lower(&mut buf).len(); -// assert_eq!(len, super::UrnRef::LENGTH); -// assert!(buf[len..].iter().all(|x| *x == b'x')); -// } -// -// #[test] -// #[should_panic] -// fn hyphenated_too_small() { -// Uuid::nil().to_hyphenated().encode_lower(&mut [0; 35]); -// } -// #[test] -// #[should_panic] -// fn hyphenated_ref_too_small() { -// Uuid::nil().to_hyphenated_ref().encode_lower(&mut [0; 35]); -// } -// -// #[test] -// #[should_panic] -// fn simple_too_small() { -// Uuid::nil().to_simple().encode_lower(&mut [0; 31]); -// } -// #[test] -// #[should_panic] -// fn simple_ref_too_small() { -// Uuid::nil().to_simple_ref().encode_lower(&mut [0; 31]); -// } -// #[test] -// #[should_panic] -// fn urn_too_small() { -// Uuid::nil().to_urn().encode_lower(&mut [0; 44]); -// } -// #[test] -// #[should_panic] -// fn urn_ref_too_small() { -// Uuid::nil().to_urn_ref().encode_lower(&mut [0; 44]); -// } -// } +macro_rules! impl_adapter_traits { + ($($T:ident<$($a:lifetime),*>),+) => {$( + impl<$($a),*> fmt::Display for $T<$($a),*> { + #[inline] + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt::LowerHex::fmt(self, f) + } + } + + impl<$($a),*> fmt::LowerHex for $T<$($a),*> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + // TODO: Self doesn't work https://github.com/rust-lang/rust/issues/52808 + f.write_str(self.encode_lower(&mut [0; $T::LENGTH])) + } + } + + impl<$($a),*> fmt::UpperHex for $T<$($a),*> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + // TODO: Self doesn't work https://github.com/rust-lang/rust/issues/52808 + f.write_str(self.encode_upper(&mut [0; $T::LENGTH])) + } + } + + impl_adapter_from!($T<$($a),*>); + )+} +} + +macro_rules! impl_adapter_from { + ($T:ident<>) => { + impl From for $T { + #[inline] + fn from(f: Uuid) -> Self { + $T::from_uuid(f) + } + } + }; + ($T:ident<$a:lifetime>) => { + impl<$a> From<&$a Uuid> for $T<$a> { + #[inline] + fn from(f: &$a Uuid) -> Self { + $T::from_uuid_ref(f) + } + } + }; +} + +impl_adapter_traits! { + Hyphenated<>, + HyphenatedRef<'a>, + Simple<>, + SimpleRef<'a>, + Urn<>, + UrnRef<'a> +} + +#[cfg(test)] +mod tests { + use crate::prelude::*; + + #[test] + fn hyphenated_trailing() { + let mut buf = [b'x'; 100]; + let len = Uuid::nil().to_hyphenated().encode_lower(&mut buf).len(); + assert_eq!(len, super::Hyphenated::LENGTH); + assert!(buf[len..].iter().all(|x| *x == b'x')); + } + + #[test] + fn hyphenated_ref_trailing() { + let mut buf = [b'x'; 100]; + let len = Uuid::nil().to_hyphenated().encode_lower(&mut buf).len(); + assert_eq!(len, super::HyphenatedRef::LENGTH); + assert!(buf[len..].iter().all(|x| *x == b'x')); + } + + #[test] + fn simple_trailing() { + let mut buf = [b'x'; 100]; + let len = Uuid::nil().to_simple().encode_lower(&mut buf).len(); + assert_eq!(len, super::Simple::LENGTH); + assert!(buf[len..].iter().all(|x| *x == b'x')); + } + + #[test] + fn simple_ref_trailing() { + let mut buf = [b'x'; 100]; + let len = Uuid::nil().to_simple().encode_lower(&mut buf).len(); + assert_eq!(len, super::SimpleRef::LENGTH); + assert!(buf[len..].iter().all(|x| *x == b'x')); + } + + #[test] + fn urn_trailing() { + let mut buf = [b'x'; 100]; + let len = Uuid::nil().to_urn().encode_lower(&mut buf).len(); + assert_eq!(len, super::Urn::LENGTH); + assert!(buf[len..].iter().all(|x| *x == b'x')); + } + + #[test] + fn urn_ref_trailing() { + let mut buf = [b'x'; 100]; + let len = Uuid::nil().to_urn().encode_lower(&mut buf).len(); + assert_eq!(len, super::UrnRef::LENGTH); + assert!(buf[len..].iter().all(|x| *x == b'x')); + } + + #[test] + #[should_panic] + fn hyphenated_too_small() { + Uuid::nil().to_hyphenated().encode_lower(&mut [0; 35]); + } + + #[test] + #[should_panic] + fn hyphenated_ref_too_small() { + Uuid::nil().to_hyphenated_ref().encode_lower(&mut [0; 35]); + } + + #[test] + #[should_panic] + fn simple_too_small() { + Uuid::nil().to_simple().encode_lower(&mut [0; 31]); + } + #[test] + #[should_panic] + fn simple_ref_too_small() { + Uuid::nil().to_simple_ref().encode_lower(&mut [0; 31]); + } + #[test] + #[should_panic] + fn urn_too_small() { + Uuid::nil().to_urn().encode_lower(&mut [0; 44]); + } + #[test] + #[should_panic] + fn urn_ref_too_small() { + Uuid::nil().to_urn_ref().encode_lower(&mut [0; 44]); + } +} diff --git a/third_party/rust/uuid/src/builder.rs b/third_party/rust/uuid/src/builder.rs deleted file mode 100644 index a807bc67cd..0000000000 --- a/third_party/rust/uuid/src/builder.rs +++ /dev/null @@ -1,224 +0,0 @@ -// Copyright 2013-2014 The Rust Project Developers. -// Copyright 2018 The Uuid Project Developers. -// -// See the COPYRIGHT file at the top-level directory of this distribution. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -//! A Builder type for [`Uuid`]s. -//! -//! [`Uuid`]: ../struct.Uuid.html - -use prelude::*; -use BytesError; - -/// A builder struct for creating a [`Uuid`] -/// -/// # Examples -/// -/// Creating a v4 `Uuid` from externally generated bytes: -/// -/// ``` -/// use uuid::{Builder, Variant, Version}; -/// -/// # let rng = || [ -/// # 70, 235, 208, 238, 14, 109, 67, 201, 185, 13, 204, 195, 90, -/// # 145, 63, 62, -/// # ]; -/// let random_bytes = rng(); -/// let uuid = Builder::from_bytes(random_bytes) -/// .set_variant(Variant::RFC4122) -/// .set_version(Version::Random) -/// .build(); -/// ``` -#[allow(missing_copy_implementations)] -#[derive(Debug)] -pub struct Builder(Uuid); - -impl Builder { - /// Creates a `Builder` using the supplied big-endian bytes. - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// use uuid::Builder; - /// use uuid::Bytes; - /// - /// let bytes: Bytes = [ - /// 70, 235, 208, 238, 14, 109, 67, 201, 185, 13, 204, 195, 90, 145, 63, 62, - /// ]; - /// - /// let mut builder = Builder::from_bytes(bytes); - /// let uuid = builder.build().to_hyphenated().to_string(); - /// - /// let expected_uuid = String::from("46ebd0ee-0e6d-43c9-b90d-ccc35a913f3e"); - /// - /// assert_eq!(expected_uuid, uuid); - /// ``` - /// - /// An incorrect number of bytes: - /// - /// ```compile_fail - /// use uuid::Builder; - /// use uuid::Bytes; - /// - /// let bytes: Bytes = [4, 54, 67, 12, 43, 2, 98, 76]; // doesn't compile - /// - /// let uuid = Builder::from_bytes(bytes); - /// ``` - pub fn from_bytes(b: Bytes) -> Self { - Builder(Uuid::from_bytes(b)) - } - - /// Creates a `Builder` using the supplied big-endian bytes. - /// - /// # Errors - /// - /// This function will return an error if `b` has any length other than 16. - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// use uuid::Builder; - /// - /// let bytes = [4, 54, 67, 12, 43, 2, 98, 76, 32, 50, 87, 5, 1, 33, 43, 87]; - /// - /// let builder = Builder::from_slice(&bytes); - /// let uuid = - /// builder.map(|mut builder| builder.build().to_hyphenated().to_string()); - /// - /// let expected_uuid = - /// Ok(String::from("0436430c-2b02-624c-2032-570501212b57")); - /// - /// assert_eq!(expected_uuid, uuid); - /// ``` - /// - /// An incorrect number of bytes: - /// - /// ``` - /// use uuid::prelude::*; - /// use uuid::Builder; - /// - /// let bytes = [4, 54, 67, 12, 43, 2, 98, 76]; - /// - /// let builder = Builder::from_slice(&bytes); - /// - /// assert!(builder.is_err()); - /// ``` - pub fn from_slice(b: &[u8]) -> Result { - const BYTES_LEN: usize = 16; - - let len = b.len(); - - if len != BYTES_LEN { - return Err(BytesError::new(BYTES_LEN, len)); - } - - let mut bytes: Bytes = [0; 16]; - bytes.copy_from_slice(b); - Ok(Self::from_bytes(bytes)) - } - - /// Creates a `Builder` from four field values. - /// - /// # Errors - /// - /// This function will return an error if `d4`'s length is not 8 bytes. - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// use uuid::Builder; - /// - /// let d4 = [12, 3, 9, 56, 54, 43, 8, 9]; - /// - /// let builder = Builder::from_fields(42, 12, 5, &d4); - /// let uuid = - /// builder.map(|mut builder| builder.build().to_hyphenated().to_string()); - /// - /// let expected_uuid = - /// Ok(String::from("0000002a-000c-0005-0c03-0938362b0809")); - /// - /// assert_eq!(expected_uuid, uuid); - /// ``` - /// - /// An invalid length: - /// - /// ``` - /// use uuid::prelude::*; - /// - /// let d4 = [12]; - /// - /// let builder = uuid::Builder::from_fields(42, 12, 5, &d4); - /// - /// assert!(builder.is_err()); - /// ``` - pub fn from_fields( - d1: u32, - d2: u16, - d3: u16, - d4: &[u8], - ) -> Result { - Uuid::from_fields(d1, d2, d3, d4).map(Builder) - } - - /// Creates a `Builder` with an initial [`Uuid::nil`] - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// use uuid::Builder; - /// - /// let mut builder = Builder::nil(); - /// - /// assert_eq!( - /// builder.build().to_hyphenated().to_string(), - /// "00000000-0000-0000-0000-000000000000" - /// ); - /// ``` - pub fn nil() -> Self { - Builder(Uuid::nil()) - } - - /// Specifies the variant of the internal [`Uuid`]. - pub fn set_variant(&mut self, v: Variant) -> &mut Self { - self.0.set_variant(v); - self - } - - /// Specifies the version number of the internal [`Uuid`]. - pub fn set_version(&mut self, v: Version) -> &mut Self { - self.0.set_version(v); - self - } - - /// Hands over the internal constructed [`Uuid`] - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// use uuid::Builder; - /// - /// let uuid = Builder::nil().build(); - /// - /// assert_eq!( - /// uuid.to_hyphenated().to_string(), - /// "00000000-0000-0000-0000-000000000000" - /// ); - /// ``` - pub fn build(&mut self) -> Uuid { - self.0 - } -} diff --git a/third_party/rust/uuid/src/builder/error.rs b/third_party/rust/uuid/src/builder/error.rs new file mode 100644 index 0000000000..2c42798f66 --- /dev/null +++ b/third_party/rust/uuid/src/builder/error.rs @@ -0,0 +1,52 @@ +use crate::std::fmt; + +/// The error that can occur when creating a [`Uuid`]. +/// +/// [`Uuid`]: struct.Uuid.html +#[derive(Clone, Debug, Eq, Hash, PartialEq)] +pub(crate) struct Error { + expected: usize, + found: usize, +} + +impl Error { + /// The expected number of bytes. + #[inline] + const fn expected(&self) -> usize { + self.expected + } + + /// The number of bytes found. + #[inline] + const fn found(&self) -> usize { + self.found + } + + /// Create a new [`UuidError`]. + /// + /// [`UuidError`]: struct.UuidError.html + #[inline] + pub(crate) const fn new(expected: usize, found: usize) -> Self { + Error { expected, found } + } +} + +impl fmt::Display for Error { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!( + f, + "invalid bytes length: expected {}, found {}", + self.expected(), + self.found() + ) + } +} + +#[cfg(feature = "std")] +mod std_support { + use super::*; + + use crate::std::error; + + impl error::Error for Error {} +} diff --git a/third_party/rust/uuid/src/builder/mod.rs b/third_party/rust/uuid/src/builder/mod.rs new file mode 100644 index 0000000000..3b5c7491a5 --- /dev/null +++ b/third_party/rust/uuid/src/builder/mod.rs @@ -0,0 +1,474 @@ +// Copyright 2013-2014 The Rust Project Developers. +// Copyright 2018 The Uuid Project Developers. +// +// See the COPYRIGHT file at the top-level directory of this distribution. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! A Builder type for [`Uuid`]s. +//! +//! [`Uuid`]: ../struct.Uuid.html + +mod error; +pub(crate) use self::error::Error; + +use crate::prelude::*; + +impl Uuid { + /// The 'nil UUID'. + /// + /// The nil UUID is special form of UUID that is specified to have all + /// 128 bits set to zero, as defined in [IETF RFC 4122 Section 4.1.7][RFC]. + /// + /// [RFC]: https://tools.ietf.org/html/rfc4122.html#section-4.1.7 + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// use uuid::Uuid; + /// + /// let uuid = Uuid::nil(); + /// + /// assert_eq!( + /// uuid.to_hyphenated().to_string(), + /// "00000000-0000-0000-0000-000000000000" + /// ); + /// ``` + pub const fn nil() -> Self { + Uuid::from_bytes([0; 16]) + } + + /// Creates a UUID from four field values in big-endian order. + /// + /// # Errors + /// + /// This function will return an error if `d4`'s length is not 8 bytes. + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// use uuid::Uuid; + /// + /// let d4 = [12, 3, 9, 56, 54, 43, 8, 9]; + /// + /// let uuid = Uuid::from_fields(42, 12, 5, &d4); + /// let uuid = uuid.map(|uuid| uuid.to_hyphenated().to_string()); + /// + /// let expected_uuid = + /// Ok(String::from("0000002a-000c-0005-0c03-0938362b0809")); + /// + /// assert_eq!(expected_uuid, uuid); + /// ``` + pub fn from_fields( + d1: u32, + d2: u16, + d3: u16, + d4: &[u8], + ) -> Result { + const D4_LEN: usize = 8; + + let len = d4.len(); + + if len != D4_LEN { + Err(Error::new(D4_LEN, len))?; + } + + Ok(Uuid::from_bytes([ + (d1 >> 24) as u8, + (d1 >> 16) as u8, + (d1 >> 8) as u8, + d1 as u8, + (d2 >> 8) as u8, + d2 as u8, + (d3 >> 8) as u8, + d3 as u8, + d4[0], + d4[1], + d4[2], + d4[3], + d4[4], + d4[5], + d4[6], + d4[7], + ])) + } + + /// Creates a UUID from four field values in little-endian order. + /// + /// The bytes in the `d1`, `d2` and `d3` fields will + /// be converted into big-endian order. + /// + /// # Examples + /// + /// ``` + /// use uuid::Uuid; + /// + /// let d1 = 0xAB3F1097u32; + /// let d2 = 0x501Eu16; + /// let d3 = 0xB736u16; + /// let d4 = [12, 3, 9, 56, 54, 43, 8, 9]; + /// + /// let uuid = Uuid::from_fields_le(d1, d2, d3, &d4); + /// let uuid = uuid.map(|uuid| uuid.to_hyphenated().to_string()); + /// + /// let expected_uuid = + /// Ok(String::from("97103fab-1e50-36b7-0c03-0938362b0809")); + /// + /// assert_eq!(expected_uuid, uuid); + /// ``` + pub fn from_fields_le( + d1: u32, + d2: u16, + d3: u16, + d4: &[u8], + ) -> Result { + const D4_LEN: usize = 8; + + let len = d4.len(); + + if len != D4_LEN { + Err(Error::new(D4_LEN, len))?; + } + + Ok(Uuid::from_bytes([ + d1 as u8, + (d1 >> 8) as u8, + (d1 >> 16) as u8, + (d1 >> 24) as u8, + (d2) as u8, + (d2 >> 8) as u8, + d3 as u8, + (d3 >> 8) as u8, + d4[0], + d4[1], + d4[2], + d4[3], + d4[4], + d4[5], + d4[6], + d4[7], + ])) + } + + /// Creates a UUID from a 128bit value in big-endian order. + pub const fn from_u128(v: u128) -> Self { + Uuid::from_bytes([ + (v >> 120) as u8, + (v >> 112) as u8, + (v >> 104) as u8, + (v >> 96) as u8, + (v >> 88) as u8, + (v >> 80) as u8, + (v >> 72) as u8, + (v >> 64) as u8, + (v >> 56) as u8, + (v >> 48) as u8, + (v >> 40) as u8, + (v >> 32) as u8, + (v >> 24) as u8, + (v >> 16) as u8, + (v >> 8) as u8, + v as u8, + ]) + } + + /// Creates a UUID from a 128bit value in little-endian order. + pub const fn from_u128_le(v: u128) -> Self { + Uuid::from_bytes([ + v as u8, + (v >> 8) as u8, + (v >> 16) as u8, + (v >> 24) as u8, + (v >> 32) as u8, + (v >> 40) as u8, + (v >> 48) as u8, + (v >> 56) as u8, + (v >> 64) as u8, + (v >> 72) as u8, + (v >> 80) as u8, + (v >> 88) as u8, + (v >> 96) as u8, + (v >> 104) as u8, + (v >> 112) as u8, + (v >> 120) as u8, + ]) + } + + /// Creates a UUID using the supplied big-endian bytes. + /// + /// # Errors + /// + /// This function will return an error if `b` has any length other than 16. + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// use uuid::Uuid; + /// + /// let bytes = [4, 54, 67, 12, 43, 2, 98, 76, 32, 50, 87, 5, 1, 33, 43, 87]; + /// + /// let uuid = Uuid::from_slice(&bytes); + /// let uuid = uuid.map(|uuid| uuid.to_hyphenated().to_string()); + /// + /// let expected_uuid = + /// Ok(String::from("0436430c-2b02-624c-2032-570501212b57")); + /// + /// assert_eq!(expected_uuid, uuid); + /// ``` + /// + /// An incorrect number of bytes: + /// + /// ``` + /// use uuid::Uuid; + /// + /// let bytes = [4, 54, 67, 12, 43, 2, 98, 76]; + /// + /// let uuid = Uuid::from_slice(&bytes); + /// + /// assert!(uuid.is_err()); + /// ``` + pub fn from_slice(b: &[u8]) -> Result { + const BYTES_LEN: usize = 16; + + let len = b.len(); + + if len != BYTES_LEN { + Err(Error::new(BYTES_LEN, len))?; + } + + let mut bytes: Bytes = [0; 16]; + bytes.copy_from_slice(b); + Ok(Uuid::from_bytes(bytes)) + } + + /// Creates a UUID using the supplied big-endian bytes. + pub const fn from_bytes(bytes: Bytes) -> Uuid { + Uuid(bytes) + } +} + +/// A builder struct for creating a UUID. +/// +/// # Examples +/// +/// Creating a v4 UUID from externally generated bytes: +/// +/// ``` +/// use uuid::{Builder, Variant, Version}; +/// +/// # let rng = || [ +/// # 70, 235, 208, 238, 14, 109, 67, 201, 185, 13, 204, 195, 90, +/// # 145, 63, 62, +/// # ]; +/// let random_bytes = rng(); +/// let uuid = Builder::from_bytes(random_bytes) +/// .set_variant(Variant::RFC4122) +/// .set_version(Version::Random) +/// .build(); +/// ``` +#[allow(missing_copy_implementations)] +#[derive(Debug)] +pub struct Builder(crate::Bytes); + +impl Builder { + /// Creates a `Builder` using the supplied big-endian bytes. + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// let bytes: uuid::Bytes = [ + /// 70, 235, 208, 238, 14, 109, 67, 201, 185, 13, 204, 195, 90, 145, 63, 62, + /// ]; + /// + /// let mut builder = uuid::Builder::from_bytes(bytes); + /// let uuid = builder.build().to_hyphenated().to_string(); + /// + /// let expected_uuid = String::from("46ebd0ee-0e6d-43c9-b90d-ccc35a913f3e"); + /// + /// assert_eq!(expected_uuid, uuid); + /// ``` + /// + /// An incorrect number of bytes: + /// + /// ```compile_fail + /// let bytes: uuid::Bytes = [4, 54, 67, 12, 43, 2, 98, 76]; // doesn't compile + /// + /// let uuid = uuid::Builder::from_bytes(bytes); + /// ``` + pub const fn from_bytes(b: Bytes) -> Self { + Builder(b) + } + + /// Creates a `Builder` using the supplied big-endian bytes. + /// + /// # Errors + /// + /// This function will return an error if `b` has any length other than 16. + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// let bytes = [4, 54, 67, 12, 43, 2, 98, 76, 32, 50, 87, 5, 1, 33, 43, 87]; + /// + /// let builder = uuid::Builder::from_slice(&bytes); + /// let uuid = + /// builder.map(|mut builder| builder.build().to_hyphenated().to_string()); + /// + /// let expected_uuid = + /// Ok(String::from("0436430c-2b02-624c-2032-570501212b57")); + /// + /// assert_eq!(expected_uuid, uuid); + /// ``` + /// + /// An incorrect number of bytes: + /// + /// ``` + /// let bytes = [4, 54, 67, 12, 43, 2, 98, 76]; + /// + /// let builder = uuid::Builder::from_slice(&bytes); + /// + /// assert!(builder.is_err()); + /// ``` + pub fn from_slice(b: &[u8]) -> Result { + const BYTES_LEN: usize = 16; + + let len = b.len(); + + if len != BYTES_LEN { + Err(Error::new(BYTES_LEN, len))?; + } + + let mut bytes: crate::Bytes = [0; 16]; + bytes.copy_from_slice(b); + Ok(Self::from_bytes(bytes)) + } + + /// Creates a `Builder` from four big-endian field values. + /// + /// # Errors + /// + /// This function will return an error if `d4`'s length is not 8 bytes. + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// let d4 = [12, 3, 9, 56, 54, 43, 8, 9]; + /// + /// let builder = uuid::Builder::from_fields(42, 12, 5, &d4); + /// let uuid = + /// builder.map(|mut builder| builder.build().to_hyphenated().to_string()); + /// + /// let expected_uuid = + /// Ok(String::from("0000002a-000c-0005-0c03-0938362b0809")); + /// + /// assert_eq!(expected_uuid, uuid); + /// ``` + /// + /// An invalid length: + /// + /// ``` + /// let d4 = [12]; + /// + /// let builder = uuid::Builder::from_fields(42, 12, 5, &d4); + /// + /// assert!(builder.is_err()); + /// ``` + pub fn from_fields( + d1: u32, + d2: u16, + d3: u16, + d4: &[u8], + ) -> Result { + Uuid::from_fields(d1, d2, d3, d4).map(|uuid| { + let bytes = *uuid.as_bytes(); + + Builder::from_bytes(bytes) + }) + } + + /// Creates a `Builder` from a big-endian 128bit value. + pub fn from_u128(v: u128) -> Self { + Builder::from_bytes(*Uuid::from_u128(v).as_bytes()) + } + + /// Creates a `Builder` with an initial [`Uuid::nil`]. + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// use uuid::Builder; + /// + /// let mut builder = Builder::nil(); + /// + /// assert_eq!( + /// builder.build().to_hyphenated().to_string(), + /// "00000000-0000-0000-0000-000000000000" + /// ); + /// ``` + pub const fn nil() -> Self { + Builder([0; 16]) + } + + /// Specifies the variant of the UUID. + pub fn set_variant(&mut self, v: crate::Variant) -> &mut Self { + let byte = self.0[8]; + + self.0[8] = match v { + crate::Variant::NCS => byte & 0x7f, + crate::Variant::RFC4122 => (byte & 0x3f) | 0x80, + crate::Variant::Microsoft => (byte & 0x1f) | 0xc0, + crate::Variant::Future => (byte & 0x1f) | 0xe0, + }; + + self + } + + /// Specifies the version number of the UUID. + pub fn set_version(&mut self, v: crate::Version) -> &mut Self { + self.0[6] = (self.0[6] & 0x0f) | ((v as u8) << 4); + + self + } + + /// Hands over the internal constructed [`Uuid`]. + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// use uuid::Builder; + /// + /// let uuid = Builder::nil().build(); + /// + /// assert_eq!( + /// uuid.to_hyphenated().to_string(), + /// "00000000-0000-0000-0000-000000000000" + /// ); + /// ``` + /// + /// [`Uuid`]: struct.Uuid.html + pub fn build(&mut self) -> Uuid { + let uuid = Uuid::from_bytes(self.0); + + uuid + } +} diff --git a/third_party/rust/uuid/src/core_support.rs b/third_party/rust/uuid/src/core_support.rs deleted file mode 100644 index d01f21074b..0000000000 --- a/third_party/rust/uuid/src/core_support.rs +++ /dev/null @@ -1,201 +0,0 @@ -// Copyright 2013-2014 The Rust Project Developers. -// Copyright 2018 The Uuid Project Developers. -// -// See the COPYRIGHT file at the top-level directory of this distribution. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use core::{fmt, str}; -use parser; -use prelude::*; - -impl From for super::Error { - fn from(err: super::BytesError) -> Self { - super::Error::Bytes(err) - } -} - -impl fmt::Debug for Uuid { - #[inline] - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - fmt::LowerHex::fmt(self, f) - } -} - -impl fmt::Display for super::Error { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - match *self { - super::Error::Bytes(ref err) => fmt::Display::fmt(&err, f), - super::Error::Parse(ref err) => fmt::Display::fmt(&err, f), - } - } -} - -impl fmt::Display for Uuid { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - fmt::LowerHex::fmt(self, f) - } -} - -impl fmt::Display for Variant { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - match *self { - Variant::NCS => write!(f, "NCS"), - Variant::RFC4122 => write!(f, "RFC4122"), - Variant::Microsoft => write!(f, "Microsoft"), - Variant::Future => write!(f, "Future"), - } - } -} - -impl fmt::Display for ::BytesError { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!( - f, - "invalid bytes length: expected {}, found {}", - self.expected(), - self.found() - ) - } -} - -impl fmt::LowerHex for Uuid { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - fmt::LowerHex::fmt(&self.to_hyphenated_ref(), f) - } -} - -impl fmt::UpperHex for Uuid { - #[inline] - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - fmt::UpperHex::fmt(&self.to_hyphenated_ref(), f) - } -} - -impl str::FromStr for Uuid { - type Err = parser::ParseError; - - fn from_str(uuid_str: &str) -> Result { - Uuid::parse_str(uuid_str) - } -} - -impl Default for Uuid { - #[inline] - fn default() -> Self { - Uuid::nil() - } -} - -#[cfg(test)] -mod tests { - extern crate std; - - use self::std::prelude::v1::*; - use prelude::*; - use test_util; - - macro_rules! check { - ($buf:ident, $format:expr, $target:expr, $len:expr, $cond:expr) => { - $buf.clear(); - write!($buf, $format, $target).unwrap(); - assert!($buf.len() == $len); - assert!($buf.chars().all($cond), "{}", $buf); - }; - } - - #[test] - fn test_uuid_compare() { - let uuid1 = test_util::new(); - let uuid2 = test_util::new2(); - - assert_eq!(uuid1, uuid1); - assert_eq!(uuid2, uuid2); - - assert_ne!(uuid1, uuid2); - assert_ne!(uuid2, uuid1); - } - - #[test] - fn test_uuid_default() { - let default_uuid = Uuid::default(); - let nil_uuid = Uuid::nil(); - - assert_eq!(default_uuid, nil_uuid); - } - - #[test] - fn test_uuid_display() { - use super::fmt::Write; - - let uuid = test_util::new(); - let s = uuid.to_string(); - let mut buffer = String::new(); - - assert_eq!(s, uuid.to_hyphenated().to_string()); - - check!(buffer, "{}", uuid, 36, |c| c.is_lowercase() - || c.is_digit(10) - || c == '-'); - } - - #[test] - fn test_uuid_lowerhex() { - use super::fmt::Write; - - let mut buffer = String::new(); - let uuid = test_util::new(); - - check!(buffer, "{:x}", uuid, 36, |c| c.is_lowercase() - || c.is_digit(10) - || c == '-'); - } - - // noinspection RsAssertEqual - #[test] - fn test_uuid_operator_eq() { - let uuid1 = test_util::new(); - let uuid1_dup = uuid1.clone(); - let uuid2 = test_util::new2(); - - assert!(uuid1 == uuid1); - assert!(uuid1 == uuid1_dup); - assert!(uuid1_dup == uuid1); - - assert!(uuid1 != uuid2); - assert!(uuid2 != uuid1); - assert!(uuid1_dup != uuid2); - assert!(uuid2 != uuid1_dup); - } - - #[test] - fn test_uuid_to_string() { - use super::fmt::Write; - - let uuid = test_util::new(); - let s = uuid.to_string(); - let mut buffer = String::new(); - - assert_eq!(s.len(), 36); - - check!(buffer, "{}", s, 36, |c| c.is_lowercase() - || c.is_digit(10) - || c == '-'); - } - - #[test] - fn test_uuid_upperhex() { - use super::fmt::Write; - - let mut buffer = String::new(); - let uuid = test_util::new(); - - check!(buffer, "{:X}", uuid, 36, |c| c.is_uppercase() - || c.is_digit(10) - || c == '-'); - } -} diff --git a/third_party/rust/uuid/src/error.rs b/third_party/rust/uuid/src/error.rs new file mode 100644 index 0000000000..433432b98b --- /dev/null +++ b/third_party/rust/uuid/src/error.rs @@ -0,0 +1,79 @@ +use crate::std::fmt; +use crate::{builder, parser}; + +/// A general error that can occur when working with UUIDs. +// TODO: improve the doc +// BODY: This detail should be fine for initial merge +#[derive(Clone, Debug, Eq, Hash, PartialEq)] +pub struct Error(Inner); + +// TODO: write tests for Error +// BODY: not immediately blocking, but should be covered for 1.0 +#[derive(Clone, Debug, Eq, Hash, PartialEq)] +enum Inner { + /// An error occurred while handling [`Uuid`] bytes. + /// + /// See [`BytesError`] + /// + /// [`BytesError`]: struct.BytesError.html + /// [`Uuid`]: struct.Uuid.html + Build(builder::Error), + + /// An error occurred while parsing a [`Uuid`] string. + /// + /// See [`parser::ParseError`] + /// + /// [`parser::ParseError`]: parser/enum.ParseError.html + /// [`Uuid`]: struct.Uuid.html + Parser(parser::Error), +} + +impl From for Error { + fn from(err: builder::Error) -> Self { + Error(Inner::Build(err)) + } +} + +impl From for Error { + fn from(err: parser::Error) -> Self { + Error(Inner::Parser(err)) + } +} + +impl fmt::Display for Error { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self.0 { + Inner::Build(ref err) => fmt::Display::fmt(&err, f), + Inner::Parser(ref err) => fmt::Display::fmt(&err, f), + } + } +} + +#[cfg(feature = "std")] +mod std_support { + use super::*; + use crate::std::error; + + impl error::Error for Error { + fn source(&self) -> Option<&(dyn error::Error + 'static)> { + match self.0 { + Inner::Build(ref err) => Some(err), + Inner::Parser(ref err) => Some(err), + } + } + } +} + +#[cfg(test)] +mod test_util { + use super::*; + + impl Error { + pub(crate) fn expect_parser(self) -> parser::Error { + match self.0 { + Inner::Parser(err) => err, + _ => panic!("expected a `parser::Error` variant"), + } + } + } +} diff --git a/third_party/rust/uuid/src/lib.rs b/third_party/rust/uuid/src/lib.rs index 9547171f41..c3d0d225d2 100644 --- a/third_party/rust/uuid/src/lib.rs +++ b/third_party/rust/uuid/src/lib.rs @@ -28,48 +28,48 @@ //! # Dependencies //! //! By default, this crate depends on nothing but `std` and cannot generate -//! [`Uuid`]s. You need to enable the following Cargo features to enable +//! UUIDs. You need to enable the following Cargo features to enable //! various pieces of functionality: //! -//! * `v1` - adds the `Uuid::new_v1` function and the ability to create a V1 -//! using an implementation of `uuid::v1::ClockSequence` (usually -//! `uuid::v1::Context`) and a timestamp from `time::timespec`. -//! * `v3` - adds the `Uuid::new_v3` function and the ability to create a V3 +//! * `v1` - adds the [`Uuid::new_v1`] function and the ability to create a V1 +//! using an implementation of [`v1::ClockSequence`] (usually +//! [`v1::Context`]) and a timestamp from `time::timespec`. +//! * `v3` - adds the [`Uuid::new_v3`] function and the ability to create a V3 //! UUID based on the MD5 hash of some data. -//! * `v4` - adds the `Uuid::new_v4` function and the ability to randomly -//! generate a `Uuid`. -//! * `v5` - adds the `Uuid::new_v5` function and the ability to create a V5 +//! * `v4` - adds the [`Uuid::new_v4`] function and the ability to randomly +//! generate a UUID. +//! * `v5` - adds the [`Uuid::new_v5`] function and the ability to create a V5 //! UUID based on the SHA1 hash of some data. -//! * `serde` - adds the ability to serialize and deserialize a `Uuid` using the +//! * `serde` - adds the ability to serialize and deserialize a UUID using the //! `serde` crate. //! //! You need to enable one of the following Cargo features together with //! `v3`, `v4` or `v5` feature if you're targeting `wasm32` architecture: //! //! * `stdweb` - enables support for `OsRng` on `wasm32-unknown-unknown` via -//! `stdweb` combined with `cargo-web` -//! * `wasm-bindgen` - `wasm-bindgen` enables support for `OsRng` on -//! `wasm32-unknown-unknown` via [`wasm-bindgen`] +//! [`stdweb`] combined with [`cargo-web`] +//! * `wasm-bindgen` - enables support for `OsRng` on `wasm32-unknown-unknown` +//! via [`wasm-bindgen`] //! //! By default, `uuid` can be depended on with: //! //! ```toml //! [dependencies] -//! uuid = "0.7" +//! uuid = "0.8" //! ``` //! //! To activate various features, use syntax like: //! //! ```toml //! [dependencies] -//! uuid = { version = "0.7", features = ["serde", "v4"] } +//! uuid = { version = "0.8", features = ["serde", "v4"] } //! ``` //! //! You can disable default features with: //! //! ```toml //! [dependencies] -//! uuid = { version = "0.7", default-features = false } +//! uuid = { version = "0.8", default-features = false } //! ``` //! //! # Examples @@ -109,67 +109,53 @@ //! //! # References //! -//! * [Wikipedia: Universally Unique Identifier]( http://en.wikipedia.org/wiki/Universally_unique_identifier) -//! * [RFC4122: A Universally Unique IDentifier (UUID) URN Namespace]( http://tools.ietf.org/html/rfc4122) +//! * [Wikipedia: Universally Unique +//! Identifier](http://en.wikipedia.org/wiki/Universally_unique_identifier) +//! * [RFC4122: A Universally Unique IDentifier (UUID) URN +//! Namespace](http://tools.ietf.org/html/rfc4122) //! -//! [`wasm-bindgen`]: https://github.com/rustwasm/wasm-bindgen - -#![cfg_attr(not(feature = "std"), no_std)] -#![cfg_attr(feature = "const_fn", feature(const_fn))] -#![deny( - missing_copy_implementations, - missing_debug_implementations, - missing_docs -)] +//! [`wasm-bindgen`]: https://crates.io/crates/wasm-bindgen +//! [`cargo-web`]: https://crates.io/crates/cargo-web +//! [`stdweb`]: https://crates.io/crates/stdweb +//! [`Uuid`]: struct.Uuid.html +//! [`Uuid::new_v1`]: struct.Uuid.html#method.new_v1 +//! [`Uuid::new_v3`]: struct.Uuid.html#method.new_v3 +//! [`Uuid::new_v4`]: struct.Uuid.html#method.new_v4 +//! [`Uuid::new_v5`]: struct.Uuid.html#method.new_v5 +//! [`v1::ClockSequence`]: v1/trait.ClockSequence.html +//! [`v1::Context`]: v1/struct.Context.html + +#![no_std] +#![deny(missing_debug_implementations, missing_docs)] #![doc( html_logo_url = "https://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png", html_favicon_url = "https://www.rust-lang.org/favicon.ico", - html_root_url = "https://docs.rs/uuid/0.7.4" + html_root_url = "https://docs.rs/uuid/0.8.1" )] -#[cfg(feature = "byteorder")] -extern crate byteorder; -#[cfg(feature = "std")] -extern crate core; -#[cfg(feature = "md5")] -extern crate md5; -#[cfg(feature = "rand")] -extern crate rand; -#[cfg(feature = "serde")] -extern crate serde; -#[cfg(all(feature = "serde", test))] -extern crate serde_test; -#[cfg(all(feature = "serde", test))] +#[cfg(any(feature = "std", test))] #[macro_use] -extern crate serde_derive; -#[cfg(feature = "sha1")] -extern crate sha1; -#[cfg(feature = "slog")] -#[cfg_attr(test, macro_use)] -extern crate slog; -#[cfg(feature = "winapi")] -extern crate winapi; +extern crate std; + +#[cfg(all(not(feature = "std"), not(test)))] +#[macro_use] +extern crate core as std; + +mod builder; +mod error; +mod parser; +mod prelude; pub mod adapter; -pub mod builder; -pub mod parser; -pub mod prelude; #[cfg(feature = "v1")] pub mod v1; -pub use builder::Builder; - -mod core_support; #[cfg(feature = "serde")] mod serde_support; #[cfg(feature = "slog")] mod slog_support; -#[cfg(feature = "std")] -mod std_support; #[cfg(test)] mod test_util; -#[cfg(feature = "u128")] -mod u128_support; #[cfg(all( feature = "v3", any( @@ -206,81 +192,40 @@ mod v5; #[cfg(all(windows, feature = "winapi"))] mod winapi_support; -/// A 128-bit (16 byte) buffer containing the ID. -pub type Bytes = [u8; 16]; - -/// The error that can occur when creating a [`Uuid`]. -/// -/// [`Uuid`]: struct.Uuid.html -#[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)] -pub struct BytesError { - expected: usize, - found: usize, -} +use crate::std::{fmt, str}; -/// A general error that can occur when handling [`Uuid`]s. -/// -/// Although specialized error types exist in the crate, -/// sometimes where particular error type occurred is hidden -/// until errors need to be handled. This allows to enumerate -/// the errors. -/// -/// [`Uuid`]: struct.Uuid.html -// TODO: improve the doc -// BODY: This detail should be fine for initial merge - -// TODO: write tests for Error -// BODY: not immediately blocking, but should be covered for 1.0 -#[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)] -pub enum Error { - /// An error occurred while handling [`Uuid`] bytes. - /// - /// See [`BytesError`] - /// - /// [`BytesError`]: struct.BytesError.html - /// [`Uuid`]: struct.Uuid.html - Bytes(BytesError), +pub use crate::{builder::Builder, error::Error}; - /// An error occurred while parsing a [`Uuid`] string. - /// - /// See [`parser::ParseError`] - /// - /// [`parser::ParseError`]: parser/enum.ParseError.html - /// [`Uuid`]: struct.Uuid.html - Parse(parser::ParseError), -} +/// A 128-bit (16 byte) buffer containing the ID. +pub type Bytes = [u8; 16]; /// The version of the UUID, denoting the generating algorithm. -#[derive(Debug, PartialEq, Copy, Clone)] -#[repr(C)] +#[derive(Clone, Copy, Debug, PartialEq)] pub enum Version { - /// Special case for `nil` [`Uuid`]. - /// - /// [`Uuid`]: struct.Uuid.html + /// Special case for `nil` UUID. Nil = 0, - /// Version 1: MAC address + /// Version 1: MAC address. Mac, - /// Version 2: DCE Security + /// Version 2: DCE Security. Dce, - /// Version 3: MD5 hash + /// Version 3: MD5 hash. Md5, - /// Version 4: Random + /// Version 4: Random. Random, - /// Version 5: SHA-1 hash + /// Version 5: SHA-1 hash. Sha1, } /// The reserved variants of UUIDs. #[derive(Clone, Copy, Debug, PartialEq)] -#[repr(C)] pub enum Variant { - /// Reserved by the NCS for backward compatibility + /// Reserved by the NCS for backward compatibility. NCS = 0, - /// As described in the RFC4122 Specification (default) + /// As described in the RFC4122 Specification (default). RFC4122, - /// Reserved by Microsoft for backward compatibility + /// Reserved by Microsoft for backward compatibility. Microsoft, - /// Reserved for future expansion + /// Reserved for future expansion. Future, } @@ -288,374 +233,32 @@ pub enum Variant { #[derive(Clone, Copy, Eq, Hash, Ord, PartialEq, PartialOrd)] pub struct Uuid(Bytes); -impl BytesError { - /// The expected number of bytes. - #[cfg(feature = "const_fn")] - #[inline] - pub const fn expected(&self) -> usize { - self.expected - } - - /// The expected number of bytes. - #[cfg(not(feature = "const_fn"))] - #[inline] - pub fn expected(&self) -> usize { - self.expected - } - - /// The number of bytes found. - #[cfg(feature = "const_fn")] - #[inline] - pub const fn found(&self) -> usize { - self.found - } - - /// The number of bytes found. - #[cfg(not(feature = "const_fn"))] - #[inline] - pub fn found(&self) -> usize { - self.found - } - - /// Create a new [`UuidError`]. - /// - /// [`UuidError`]: struct.UuidError.html - #[cfg(feature = "const_fn")] - #[inline] - pub const fn new(expected: usize, found: usize) -> Self { - BytesError { expected, found } - } - - /// Create a new [`UuidError`]. - /// - /// [`UuidError`]: struct.UuidError.html - #[cfg(not(feature = "const_fn"))] - #[inline] - pub fn new(expected: usize, found: usize) -> Self { - BytesError { expected, found } - } -} - impl Uuid { - /// [`Uuid`] namespace for Domain Name System (DNS). - /// - /// [`Uuid`]: struct.Uuid.html + /// UUID namespace for Domain Name System (DNS). pub const NAMESPACE_DNS: Self = Uuid([ 0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8, ]); - /// [`Uuid`] namespace for ISO Object Identifiers (OIDs). - /// - /// [`Uuid`]: struct.Uuid.html + /// UUID namespace for ISO Object Identifiers (OIDs). pub const NAMESPACE_OID: Self = Uuid([ 0x6b, 0xa7, 0xb8, 0x12, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8, ]); - /// [`Uuid`] namespace for Uniform Resource Locators (URLs). - /// - /// [`Uuid`]: struct.Uuid.html + /// UUID namespace for Uniform Resource Locators (URLs). pub const NAMESPACE_URL: Self = Uuid([ 0x6b, 0xa7, 0xb8, 0x11, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8, ]); - /// [`Uuid`] namespace for X.500 Distinguished Names (DNs). - /// - /// [`Uuid`]: struct.Uuid.html + /// UUID namespace for X.500 Distinguished Names (DNs). pub const NAMESPACE_X500: Self = Uuid([ 0x6b, 0xa7, 0xb8, 0x14, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8, ]); - /// The 'nil UUID'. - /// - /// The nil UUID is special form of UUID that is specified to have all - /// 128 bits set to zero, as defined in [IETF RFC 4122 Section 4.1.7][RFC]. - /// - /// [RFC]: https://tools.ietf.org/html/rfc4122.html#section-4.1.7 - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// use uuid::Uuid; - /// - /// let uuid = Uuid::nil(); - /// - /// assert_eq!( - /// uuid.to_hyphenated().to_string(), - /// "00000000-0000-0000-0000-000000000000" - /// ); - /// ``` - #[cfg(feature = "const_fn")] - pub const fn nil() -> Self { - Uuid::from_bytes([0; 16]) - } - - /// The 'nil UUID'. - /// - /// The nil UUID is special form of UUID that is specified to have all - /// 128 bits set to zero, as defined in [IETF RFC 4122 Section 4.1.7][RFC]. - /// - /// [RFC]: https://tools.ietf.org/html/rfc4122.html#section-4.1.7 - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// use uuid::Uuid; - /// - /// let uuid = Uuid::nil(); - /// - /// assert_eq!( - /// uuid.to_hyphenated().to_string(), - /// "00000000-0000-0000-0000-000000000000" - /// ); - /// ``` - #[cfg(not(feature = "const_fn"))] - pub fn nil() -> Uuid { - Uuid::from_bytes([0; 16]) - } - - /// Creates a `Uuid` from four field values in big-endian order. - /// - /// # Errors - /// - /// This function will return an error if `d4`'s length is not 8 bytes. - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// use uuid::Uuid; - /// - /// let d4 = [12, 3, 9, 56, 54, 43, 8, 9]; - /// - /// let uuid = Uuid::from_fields(42, 12, 5, &d4); - /// let uuid = uuid.map(|uuid| uuid.to_hyphenated().to_string()); - /// - /// let expected_uuid = - /// Ok(String::from("0000002a-000c-0005-0c03-0938362b0809")); - /// - /// assert_eq!(expected_uuid, uuid); - /// ``` - /// - /// An invalid length: - /// - /// ``` - /// use uuid::prelude::*; - /// - /// let d4 = [12]; - /// - /// let uuid = uuid::Uuid::from_fields(42, 12, 5, &d4); - /// - /// let expected_uuid = Err(uuid::BytesError::new(8, d4.len())); - /// - /// assert_eq!(expected_uuid, uuid); - /// ``` - pub fn from_fields( - d1: u32, - d2: u16, - d3: u16, - d4: &[u8], - ) -> Result { - const D4_LEN: usize = 8; - - let len = d4.len(); - - if len != D4_LEN { - return Err(BytesError::new(D4_LEN, len)); - } - - Ok(Uuid::from_bytes([ - (d1 >> 24) as u8, - (d1 >> 16) as u8, - (d1 >> 8) as u8, - d1 as u8, - (d2 >> 8) as u8, - d2 as u8, - (d3 >> 8) as u8, - d3 as u8, - d4[0], - d4[1], - d4[2], - d4[3], - d4[4], - d4[5], - d4[6], - d4[7], - ])) - } - - /// Creates a `Uuid` from four field values in little-endian order. - /// - /// The bytes in the `d1`, `d2` and `d3` fields will - /// be converted into big-endian order. - /// - /// # Examples - /// - /// ``` - /// use uuid::Uuid; - /// - /// let d1 = 0xAB3F1097u32; - /// let d2 = 0x501Eu16; - /// let d3 = 0xB736u16; - /// let d4 = [12, 3, 9, 56, 54, 43, 8, 9]; - /// - /// let uuid = Uuid::from_fields_le(d1, d2, d3, &d4); - /// let uuid = uuid.map(|uuid| uuid.to_hyphenated().to_string()); - /// - /// let expected_uuid = - /// Ok(String::from("97103fab-1e50-36b7-0c03-0938362b0809")); - /// - /// assert_eq!(expected_uuid, uuid); - /// ``` - pub fn from_fields_le( - d1: u32, - d2: u16, - d3: u16, - d4: &[u8], - ) -> Result { - const D4_LEN: usize = 8; - - let len = d4.len(); - - if len != D4_LEN { - return Err(BytesError::new(D4_LEN, len)); - } - - Ok(Uuid::from_bytes([ - d1 as u8, - (d1 >> 8) as u8, - (d1 >> 16) as u8, - (d1 >> 24) as u8, - (d2) as u8, - (d2 >> 8) as u8, - d3 as u8, - (d3 >> 8) as u8, - d4[0], - d4[1], - d4[2], - d4[3], - d4[4], - d4[5], - d4[6], - d4[7], - ])) - } - - /// Creates a `Uuid` using the supplied big-endian bytes. - /// - /// # Errors - /// - /// This function will return an error if `b` has any length other than 16. - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// use uuid::Uuid; - /// - /// let bytes = [4, 54, 67, 12, 43, 2, 98, 76, 32, 50, 87, 5, 1, 33, 43, 87]; - /// - /// let uuid = Uuid::from_slice(&bytes); - /// let uuid = uuid.map(|uuid| uuid.to_hyphenated().to_string()); - /// - /// let expected_uuid = - /// Ok(String::from("0436430c-2b02-624c-2032-570501212b57")); - /// - /// assert_eq!(expected_uuid, uuid); - /// ``` - /// - /// An incorrect number of bytes: - /// - /// ``` - /// use uuid::prelude::*; - /// - /// let bytes = [4, 54, 67, 12, 43, 2, 98, 76]; - /// - /// let uuid = Uuid::from_slice(&bytes); - /// - /// let expected_uuid = Err(uuid::BytesError::new(16, 8)); - /// - /// assert_eq!(expected_uuid, uuid); - /// ``` - pub fn from_slice(b: &[u8]) -> Result { - const BYTES_LEN: usize = 16; - - let len = b.len(); - - if len != BYTES_LEN { - return Err(BytesError::new(BYTES_LEN, len)); - } - - let mut bytes: Bytes = [0; 16]; - bytes.copy_from_slice(b); - Ok(Uuid::from_bytes(bytes)) - } - - /// Creates a `Uuid` using the supplied big-endian bytes. - #[cfg(not(feature = "const_fn"))] - pub fn from_bytes(bytes: Bytes) -> Uuid { - Uuid(bytes) - } - - /// Creates a `Uuid` using the supplied big-endian bytes. - #[cfg(feature = "const_fn")] - pub const fn from_bytes(bytes: Bytes) -> Uuid { - Uuid(bytes) - } - - /// Creates a v4 Uuid from random bytes (e.g. bytes supplied from `Rand` - /// crate) - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// use uuid::Bytes; - /// use uuid::Uuid; - /// - /// let bytes: Bytes = [ - /// 70, 235, 208, 238, 14, 109, 67, 201, 185, 13, 204, 195, 90, 145, 63, 62, - /// ]; - /// let uuid = Uuid::from_random_bytes(bytes); - /// let uuid = uuid.to_hyphenated().to_string(); - /// - /// let expected_uuid = String::from("46ebd0ee-0e6d-43c9-b90d-ccc35a913f3e"); - /// - /// assert_eq!(expected_uuid, uuid); - /// ``` - #[deprecated( - since = "0.7.2", - note = "please use the `uuid::Builder` instead to set the variant and version" - )] - pub fn from_random_bytes(bytes: Bytes) -> Uuid { - let mut uuid = Uuid::from_bytes(bytes); - uuid.set_variant(Variant::RFC4122); - uuid.set_version(Version::Random); - uuid - } - - /// Specifies the variant of the UUID structure - fn set_variant(&mut self, v: Variant) { - // Octet 8 contains the variant in the most significant 3 bits - self.0[8] = match v { - Variant::NCS => self.as_bytes()[8] & 0x7f, // b0xx... - Variant::RFC4122 => (self.as_bytes()[8] & 0x3f) | 0x80, // b10x... - Variant::Microsoft => (self.as_bytes()[8] & 0x1f) | 0xc0, // b110... - Variant::Future => (self.as_bytes()[8] & 0x1f) | 0xe0, // b111... - } - } - - /// Returns the variant of the `Uuid` structure. + /// Returns the variant of the UUID structure. /// /// This determines the interpretation of the structure of the UUID. /// Currently only the RFC4122 variant is generated by this module. @@ -671,12 +274,7 @@ impl Uuid { } } - /// Specifies the version number of the `Uuid`. - fn set_version(&mut self, v: Version) { - self.0[6] = (self.as_bytes()[6] & 0xF) | ((v as u8) << 4); - } - - /// Returns the version number of the `Uuid`. + /// Returns the version number of the UUID. /// /// This represents the algorithm used to generate the contents. /// @@ -687,11 +285,11 @@ impl Uuid { /// details. /// /// * [Version Reference](http://tools.ietf.org/html/rfc4122#section-4.1.3) - pub fn get_version_num(&self) -> usize { + pub const fn get_version_num(&self) -> usize { (self.as_bytes()[6] >> 4) as usize } - /// Returns the version of the `Uuid`. + /// Returns the version of the UUID. /// /// This represents the algorithm used to generate the contents pub fn get_version(&self) -> Option { @@ -801,197 +399,85 @@ impl Uuid { (d1, d2, d3, d4) } - /// Returns an array of 16 octets containing the UUID data. - /// This method wraps [`Uuid::as_bytes`] - #[cfg(feature = "const_fn")] - pub const fn as_bytes(&self) -> &Bytes { - &self.0 - } - - /// Returns an array of 16 octets containing the UUID data. - /// This method wraps [`Uuid::as_bytes`] - #[cfg(not(feature = "const_fn"))] - pub fn as_bytes(&self) -> &Bytes { - &self.0 - } - - /// Returns an Optional Tuple of (u64, u16) representing the timestamp and - /// counter portion of a V1 UUID. If the supplied UUID is not V1, this - /// will return None - pub fn to_timestamp(&self) -> Option<(u64, u16)> { - if self - .get_version() - .map(|v| v != Version::Mac) - .unwrap_or(true) - { - return None; - } - - let ts: u64 = u64::from(self.as_bytes()[6] & 0x0F) << 56 - | u64::from(self.as_bytes()[7]) << 48 - | u64::from(self.as_bytes()[4]) << 40 - | u64::from(self.as_bytes()[5]) << 32 - | u64::from(self.as_bytes()[0]) << 24 - | u64::from(self.as_bytes()[1]) << 16 - | u64::from(self.as_bytes()[2]) << 8 - | u64::from(self.as_bytes()[3]); - - let count: u16 = u16::from(self.as_bytes()[8] & 0x3F) << 8 - | u16::from(self.as_bytes()[9]); - - Some((ts, count)) + /// Returns a 128bit value containing the UUID data. + /// + /// The bytes in the UUID will be packed into a `u128`, like the + /// [`Uuid::as_bytes`] method. + /// + /// # Examples + /// + /// ``` + /// use uuid::Uuid; + /// + /// let uuid = Uuid::parse_str("936DA01F-9ABD-4D9D-80C7-02AF85C822A8").unwrap(); + /// assert_eq!( + /// uuid.as_u128(), + /// 0x936DA01F9ABD4D9D80C702AF85C822A8, + /// ) + /// ``` + pub fn as_u128(&self) -> u128 { + u128::from(self.as_bytes()[0]) << 120 + | u128::from(self.as_bytes()[1]) << 112 + | u128::from(self.as_bytes()[2]) << 104 + | u128::from(self.as_bytes()[3]) << 96 + | u128::from(self.as_bytes()[4]) << 88 + | u128::from(self.as_bytes()[5]) << 80 + | u128::from(self.as_bytes()[6]) << 72 + | u128::from(self.as_bytes()[7]) << 64 + | u128::from(self.as_bytes()[8]) << 56 + | u128::from(self.as_bytes()[9]) << 48 + | u128::from(self.as_bytes()[10]) << 40 + | u128::from(self.as_bytes()[11]) << 32 + | u128::from(self.as_bytes()[12]) << 24 + | u128::from(self.as_bytes()[13]) << 16 + | u128::from(self.as_bytes()[14]) << 8 + | u128::from(self.as_bytes()[15]) } - /// Parses a `Uuid` from a string of hexadecimal digits with optional - /// hyphens. + /// Returns a 128bit little-endian value containing the UUID data. /// - /// Any of the formats generated by this module (simple, hyphenated, urn) - /// are supported by this parsing function. - pub fn parse_str(mut input: &str) -> Result { - // Ensure length is valid for any of the supported formats - let len = input.len(); - - if len == adapter::Urn::LENGTH && input.starts_with("urn:uuid:") { - input = &input[9..]; - } else if !parser::len_matches_any( - len, - &[adapter::Hyphenated::LENGTH, adapter::Simple::LENGTH], - ) { - return Err(parser::ParseError::InvalidLength { - expected: parser::Expected::Any(&[ - adapter::Hyphenated::LENGTH, - adapter::Simple::LENGTH, - ]), - found: len, - }); - } - - // `digit` counts only hexadecimal digits, `i_char` counts all chars. - let mut digit = 0; - let mut group = 0; - let mut acc = 0; - let mut buffer = [0u8; 16]; - - for (i_char, chr) in input.bytes().enumerate() { - if digit as usize >= adapter::Simple::LENGTH && group != 4 { - if group == 0 { - return Err(parser::ParseError::InvalidLength { - expected: parser::Expected::Any(&[ - adapter::Hyphenated::LENGTH, - adapter::Simple::LENGTH, - ]), - found: len, - }); - } - - return Err(parser::ParseError::InvalidGroupCount { - expected: parser::Expected::Any(&[1, 5]), - found: group + 1, - }); - } - - if digit % 2 == 0 { - // First digit of the byte. - match chr { - // Calulate upper half. - b'0'...b'9' => acc = chr - b'0', - b'a'...b'f' => acc = chr - b'a' + 10, - b'A'...b'F' => acc = chr - b'A' + 10, - // Found a group delimiter - b'-' => { - // TODO: remove the u8 cast - // BODY: this only needed until we switch to - // ParseError - if parser::ACC_GROUP_LENS[group] as u8 != digit { - // Calculate how many digits this group consists of - // in the input. - let found = if group > 0 { - // TODO: remove the u8 cast - // BODY: this only needed until we switch to - // ParseError - digit - parser::ACC_GROUP_LENS[group - 1] as u8 - } else { - digit - }; - - return Err( - parser::ParseError::InvalidGroupLength { - expected: parser::Expected::Exact( - parser::GROUP_LENS[group], - ), - found: found as usize, - group, - }, - ); - } - // Next group, decrement digit, it is incremented again - // at the bottom. - group += 1; - digit -= 1; - } - _ => { - return Err(parser::ParseError::InvalidCharacter { - expected: "0123456789abcdefABCDEF-", - found: input[i_char..].chars().next().unwrap(), - index: i_char, - }); - } - } - } else { - // Second digit of the byte, shift the upper half. - acc *= 16; - match chr { - b'0'...b'9' => acc += chr - b'0', - b'a'...b'f' => acc += chr - b'a' + 10, - b'A'...b'F' => acc += chr - b'A' + 10, - b'-' => { - // The byte isn't complete yet. - let found = if group > 0 { - // TODO: remove the u8 cast - // BODY: this only needed until we switch to - // ParseError - digit - parser::ACC_GROUP_LENS[group - 1] as u8 - } else { - digit - }; - - return Err(parser::ParseError::InvalidGroupLength { - expected: parser::Expected::Exact( - parser::GROUP_LENS[group], - ), - found: found as usize, - group, - }); - } - _ => { - return Err(parser::ParseError::InvalidCharacter { - expected: "0123456789abcdefABCDEF-", - found: input[i_char..].chars().next().unwrap(), - index: i_char, - }); - } - } - buffer[(digit / 2) as usize] = acc; - } - digit += 1; - } - - // Now check the last group. - // TODO: remove the u8 cast - // BODY: this only needed until we switch to - // ParseError - if parser::ACC_GROUP_LENS[4] as u8 != digit { - return Err(parser::ParseError::InvalidGroupLength { - expected: parser::Expected::Exact(parser::GROUP_LENS[4]), - found: (digit as usize - parser::ACC_GROUP_LENS[3]), - group, - }); - } + /// The bytes in the UUID will be reversed and packed into a `u128`. + /// Note that this will produce a different result than + /// [`Uuid::to_fields_le`], because the entire UUID is reversed, rather + /// than reversing the individual fields in-place. + /// + /// # Examples + /// + /// ``` + /// use uuid::Uuid; + /// + /// let uuid = Uuid::parse_str("936DA01F-9ABD-4D9D-80C7-02AF85C822A8").unwrap(); + /// + /// assert_eq!( + /// uuid.to_u128_le(), + /// 0xA822C885AF02C7809D4DBD9A1FA06D93, + /// ) + /// ``` + pub fn to_u128_le(&self) -> u128 { + u128::from(self.as_bytes()[0]) + | u128::from(self.as_bytes()[1]) << 8 + | u128::from(self.as_bytes()[2]) << 16 + | u128::from(self.as_bytes()[3]) << 24 + | u128::from(self.as_bytes()[4]) << 32 + | u128::from(self.as_bytes()[5]) << 40 + | u128::from(self.as_bytes()[6]) << 48 + | u128::from(self.as_bytes()[7]) << 56 + | u128::from(self.as_bytes()[8]) << 64 + | u128::from(self.as_bytes()[9]) << 72 + | u128::from(self.as_bytes()[10]) << 80 + | u128::from(self.as_bytes()[11]) << 88 + | u128::from(self.as_bytes()[12]) << 96 + | u128::from(self.as_bytes()[13]) << 104 + | u128::from(self.as_bytes()[14]) << 112 + | u128::from(self.as_bytes()[15]) << 120 + } - Ok(Uuid::from_bytes(buffer)) + /// Returns an array of 16 octets containing the UUID data. + pub const fn as_bytes(&self) -> &Bytes { + &self.0 } - /// Tests if the UUID is nil + /// Tests if the UUID is nil. pub fn is_nil(&self) -> bool { self.as_bytes().iter().all(|&b| b == 0) } @@ -1022,18 +508,170 @@ impl Uuid { /// "urn:uuid:00000000-0000-0000-0000-000000000000" /// ); /// ``` - pub fn encode_buffer() -> [u8; adapter::Urn::LENGTH] { + pub const fn encode_buffer() -> [u8; adapter::Urn::LENGTH] { [0; adapter::Urn::LENGTH] } } +impl fmt::Debug for Uuid { + #[inline] + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt::LowerHex::fmt(self, f) + } +} + +impl fmt::Display for Uuid { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt::LowerHex::fmt(self, f) + } +} + +impl fmt::Display for Variant { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match *self { + Variant::NCS => write!(f, "NCS"), + Variant::RFC4122 => write!(f, "RFC4122"), + Variant::Microsoft => write!(f, "Microsoft"), + Variant::Future => write!(f, "Future"), + } + } +} + +impl fmt::LowerHex for Uuid { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt::LowerHex::fmt(&self.to_hyphenated_ref(), f) + } +} + +impl fmt::UpperHex for Uuid { + #[inline] + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt::UpperHex::fmt(&self.to_hyphenated_ref(), f) + } +} + +impl str::FromStr for Uuid { + type Err = Error; + + fn from_str(uuid_str: &str) -> Result { + Uuid::parse_str(uuid_str) + } +} + +impl Default for Uuid { + #[inline] + fn default() -> Self { + Uuid::nil() + } +} + #[cfg(test)] mod tests { - extern crate std; + use crate::{ + prelude::*, + std::string::{String, ToString}, + test_util, + }; + + macro_rules! check { + ($buf:ident, $format:expr, $target:expr, $len:expr, $cond:expr) => { + $buf.clear(); + write!($buf, $format, $target).unwrap(); + assert!($buf.len() == $len); + assert!($buf.chars().all($cond), "{}", $buf); + }; + } + + #[test] + fn test_uuid_compare() { + let uuid1 = test_util::new(); + let uuid2 = test_util::new2(); + + assert_eq!(uuid1, uuid1); + assert_eq!(uuid2, uuid2); + + assert_ne!(uuid1, uuid2); + assert_ne!(uuid2, uuid1); + } + + #[test] + fn test_uuid_default() { + let default_uuid = Uuid::default(); + let nil_uuid = Uuid::nil(); + + assert_eq!(default_uuid, nil_uuid); + } + + #[test] + fn test_uuid_display() { + use super::fmt::Write; + + let uuid = test_util::new(); + let s = uuid.to_string(); + let mut buffer = String::new(); + + assert_eq!(s, uuid.to_hyphenated().to_string()); - use self::std::prelude::v1::*; - use super::test_util; - use prelude::*; + check!(buffer, "{}", uuid, 36, |c| c.is_lowercase() + || c.is_digit(10) + || c == '-'); + } + + #[test] + fn test_uuid_lowerhex() { + use super::fmt::Write; + + let mut buffer = String::new(); + let uuid = test_util::new(); + + check!(buffer, "{:x}", uuid, 36, |c| c.is_lowercase() + || c.is_digit(10) + || c == '-'); + } + + // noinspection RsAssertEqual + #[test] + fn test_uuid_operator_eq() { + let uuid1 = test_util::new(); + let uuid1_dup = uuid1.clone(); + let uuid2 = test_util::new2(); + + assert!(uuid1 == uuid1); + assert!(uuid1 == uuid1_dup); + assert!(uuid1_dup == uuid1); + + assert!(uuid1 != uuid2); + assert!(uuid2 != uuid1); + assert!(uuid1_dup != uuid2); + assert!(uuid2 != uuid1_dup); + } + + #[test] + fn test_uuid_to_string() { + use super::fmt::Write; + + let uuid = test_util::new(); + let s = uuid.to_string(); + let mut buffer = String::new(); + + assert_eq!(s.len(), 36); + + check!(buffer, "{}", s, 36, |c| c.is_lowercase() + || c.is_digit(10) + || c == '-'); + } + + #[test] + fn test_uuid_upperhex() { + use super::fmt::Write; + + let mut buffer = String::new(); + let uuid = test_util::new(); + + check!(buffer, "{:X}", uuid, 36, |c| c.is_uppercase() + || c.is_digit(10) + || c == '-'); + } #[test] fn test_nil() { @@ -1104,227 +742,6 @@ mod tests { assert_eq!(uuid6.get_variant().unwrap(), Variant::NCS); } - #[test] - fn test_parse_uuid_v4() { - use adapter; - use parser; - - const EXPECTED_UUID_LENGTHS: parser::Expected = - parser::Expected::Any(&[ - adapter::Hyphenated::LENGTH, - adapter::Simple::LENGTH, - ]); - - const EXPECTED_GROUP_COUNTS: parser::Expected = - parser::Expected::Any(&[1, 5]); - - const EXPECTED_CHARS: &'static str = "0123456789abcdefABCDEF-"; - - // Invalid - assert_eq!( - Uuid::parse_str(""), - Err(parser::ParseError::InvalidLength { - expected: EXPECTED_UUID_LENGTHS, - found: 0, - }) - ); - - assert_eq!( - Uuid::parse_str("!"), - Err(parser::ParseError::InvalidLength { - expected: EXPECTED_UUID_LENGTHS, - found: 1 - }) - ); - - assert_eq!( - Uuid::parse_str("F9168C5E-CEB2-4faa-B6BF-329BF39FA1E45"), - Err(parser::ParseError::InvalidLength { - expected: EXPECTED_UUID_LENGTHS, - found: 37, - }) - ); - - assert_eq!( - Uuid::parse_str("F9168C5E-CEB2-4faa-BBF-329BF39FA1E4"), - Err(parser::ParseError::InvalidLength { - expected: EXPECTED_UUID_LENGTHS, - found: 35 - }) - ); - - assert_eq!( - Uuid::parse_str("F9168C5E-CEB2-4faa-BGBF-329BF39FA1E4"), - Err(parser::ParseError::InvalidCharacter { - expected: EXPECTED_CHARS, - found: 'G', - index: 20, - }) - ); - - assert_eq!( - Uuid::parse_str("F9168C5E-CEB2F4faaFB6BFF329BF39FA1E4"), - Err(parser::ParseError::InvalidGroupCount { - expected: EXPECTED_GROUP_COUNTS, - found: 2 - }) - ); - - assert_eq!( - Uuid::parse_str("F9168C5E-CEB2-4faaFB6BFF329BF39FA1E4"), - Err(parser::ParseError::InvalidGroupCount { - expected: EXPECTED_GROUP_COUNTS, - found: 3, - }) - ); - - assert_eq!( - Uuid::parse_str("F9168C5E-CEB2-4faa-B6BFF329BF39FA1E4"), - Err(parser::ParseError::InvalidGroupCount { - expected: EXPECTED_GROUP_COUNTS, - found: 4, - }) - ); - - assert_eq!( - Uuid::parse_str("F9168C5E-CEB2-4faa"), - Err(parser::ParseError::InvalidLength { - expected: EXPECTED_UUID_LENGTHS, - found: 18, - }) - ); - - assert_eq!( - Uuid::parse_str("F9168C5E-CEB2-4faaXB6BFF329BF39FA1E4"), - Err(parser::ParseError::InvalidCharacter { - expected: EXPECTED_CHARS, - found: 'X', - index: 18, - }) - ); - - assert_eq!( - Uuid::parse_str("F9168C5E-CEB-24fa-eB6BFF32-BF39FA1E4"), - Err(parser::ParseError::InvalidGroupLength { - expected: parser::Expected::Exact(4), - found: 3, - group: 1, - }) - ); - // (group, found, expecting) - // - assert_eq!( - Uuid::parse_str("01020304-1112-2122-3132-41424344"), - Err(parser::ParseError::InvalidGroupLength { - expected: parser::Expected::Exact(12), - found: 8, - group: 4, - }) - ); - - assert_eq!( - Uuid::parse_str("67e5504410b1426f9247bb680e5fe0c"), - Err(parser::ParseError::InvalidLength { - expected: EXPECTED_UUID_LENGTHS, - found: 31, - }) - ); - - assert_eq!( - Uuid::parse_str("67e5504410b1426f9247bb680e5fe0c88"), - Err(parser::ParseError::InvalidLength { - expected: EXPECTED_UUID_LENGTHS, - found: 33, - }) - ); - - assert_eq!( - Uuid::parse_str("67e5504410b1426f9247bb680e5fe0cg8"), - Err(parser::ParseError::InvalidLength { - expected: EXPECTED_UUID_LENGTHS, - found: 33, - }) - ); - - assert_eq!( - Uuid::parse_str("67e5504410b1426%9247bb680e5fe0c8"), - Err(parser::ParseError::InvalidCharacter { - expected: EXPECTED_CHARS, - found: '%', - index: 15, - }) - ); - - assert_eq!( - Uuid::parse_str("231231212212423424324323477343246663"), - Err(parser::ParseError::InvalidLength { - expected: EXPECTED_UUID_LENGTHS, - found: 36, - }) - ); - - // Valid - assert!(Uuid::parse_str("00000000000000000000000000000000").is_ok()); - assert!(Uuid::parse_str("67e55044-10b1-426f-9247-bb680e5fe0c8").is_ok()); - assert!(Uuid::parse_str("F9168C5E-CEB2-4faa-B6BF-329BF39FA1E4").is_ok()); - assert!(Uuid::parse_str("67e5504410b1426f9247bb680e5fe0c8").is_ok()); - assert!(Uuid::parse_str("01020304-1112-2122-3132-414243444546").is_ok()); - assert!(Uuid::parse_str( - "urn:uuid:67e55044-10b1-426f-9247-bb680e5fe0c8" - ) - .is_ok()); - - // Nil - let nil = Uuid::nil(); - assert_eq!( - Uuid::parse_str("00000000000000000000000000000000").unwrap(), - nil - ); - assert_eq!( - Uuid::parse_str("00000000-0000-0000-0000-000000000000").unwrap(), - nil - ); - - // Round-trip - let uuid_orig = test_util::new(); - let orig_str = uuid_orig.to_string(); - let uuid_out = Uuid::parse_str(&orig_str).unwrap(); - assert_eq!(uuid_orig, uuid_out); - - // Test error reporting - assert_eq!( - Uuid::parse_str("67e5504410b1426f9247bb680e5fe0c"), - Err(parser::ParseError::InvalidLength { - expected: EXPECTED_UUID_LENGTHS, - found: 31, - }) - ); - assert_eq!( - Uuid::parse_str("67e550X410b1426f9247bb680e5fe0cd"), - Err(parser::ParseError::InvalidCharacter { - expected: EXPECTED_CHARS, - found: 'X', - index: 6, - }) - ); - assert_eq!( - Uuid::parse_str("67e550-4105b1426f9247bb680e5fe0c"), - Err(parser::ParseError::InvalidGroupLength { - expected: parser::Expected::Exact(8), - found: 6, - group: 0, - }) - ); - assert_eq!( - Uuid::parse_str("F9168C5E-CEB2-4faa-B6BF1-02BF39FA1E4"), - Err(parser::ParseError::InvalidGroupLength { - expected: parser::Expected::Exact(4), - found: 5, - group: 3, - }) - ); - } - #[test] fn test_to_simple_string() { let uuid1 = test_util::new(); @@ -1345,7 +762,7 @@ mod tests { #[test] fn test_upper_lower_hex() { - use tests::std::fmt::Write; + use std::fmt::Write; let mut buf = String::new(); let u = test_util::new(); @@ -1499,6 +916,58 @@ mod tests { assert_eq!(d4_in, d4_out); } + #[test] + fn test_from_u128() { + let v_in: u128 = 0xa1a2a3a4b1b2c1c2d1d2d3d4d5d6d7d8; + + let u = Uuid::from_u128(v_in); + + let expected = "a1a2a3a4b1b2c1c2d1d2d3d4d5d6d7d8"; + let result = u.to_simple().to_string(); + assert_eq!(result, expected); + } + + #[test] + fn test_from_u128_le() { + let v_in: u128 = 0xd8d7d6d5d4d3d2d1c2c1b2b1a4a3a2a1; + + let u = Uuid::from_u128_le(v_in); + + let expected = "a1a2a3a4b1b2c1c2d1d2d3d4d5d6d7d8"; + let result = u.to_simple().to_string(); + assert_eq!(result, expected); + } + + #[test] + fn test_u128_roundtrip() { + let v_in: u128 = 0xa1a2a3a4b1b2c1c2d1d2d3d4d5d6d7d8; + + let u = Uuid::from_u128(v_in); + let v_out = u.as_u128(); + + assert_eq!(v_in, v_out); + } + + #[test] + fn test_u128_le_roundtrip() { + let v_in: u128 = 0xd8d7d6d5d4d3d2d1c2c1b2b1a4a3a2a1; + + let u = Uuid::from_u128_le(v_in); + let v_out = u.to_u128_le(); + + assert_eq!(v_in, v_out); + } + + #[test] + fn test_u128_le_is_actually_le() { + let v_in: u128 = 0xa1a2a3a4b1b2c1c2d1d2d3d4d5d6d7d8; + + let u = Uuid::from_u128(v_in); + let v_out = u.to_u128_le(); + + assert_eq!(v_in, v_out.swap_bytes()); + } + #[test] fn test_from_slice() { let b = [ @@ -1536,7 +1005,7 @@ mod tests { #[test] fn test_bytes_roundtrip() { - let b_in: ::Bytes = [ + let b_in: crate::Bytes = [ 0xa1, 0xa2, 0xa3, 0xa4, 0xb1, 0xb2, 0xc1, 0xc2, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, 0xd8, ]; @@ -1548,20 +1017,6 @@ mod tests { assert_eq!(&b_in, b_out); } - #[test] - #[allow(deprecated)] - fn test_from_random_bytes() { - let b = [ - 0xa1, 0xa2, 0xa3, 0xa4, 0xb1, 0xb2, 0xc1, 0xc2, 0xd1, 0xd2, 0xd3, - 0xd4, 0xd5, 0xd6, 0xd7, 0xd8, - ]; - - let u = Uuid::from_random_bytes(b); - let expected = "a1a2a3a4b1b241c291d2d3d4d5d6d7d8"; - - assert_eq!(u.to_simple().to_string(), expected); - } - #[test] fn test_iterbytes_impl_for_uuid() { let mut set = std::collections::HashSet::new(); diff --git a/third_party/rust/uuid/src/parser/core_support.rs b/third_party/rust/uuid/src/parser/core_support.rs deleted file mode 100644 index d050d743ff..0000000000 --- a/third_party/rust/uuid/src/parser/core_support.rs +++ /dev/null @@ -1,64 +0,0 @@ -// Copyright 2013-2014 The Rust Project Developers. -// Copyright 2018 The Uuid Project Developers. -// -// See the COPYRIGHT file at the top-level directory of this distribution. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use core::fmt; -use parser; - -impl From for ::Error { - fn from(err: parser::ParseError) -> Self { - ::Error::Parse(err) - } -} - -impl<'a> fmt::Display for parser::Expected { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - match *self { - parser::Expected::Any(ref crits) => write!(f, "one of {:?}", crits), - parser::Expected::Exact(crit) => write!(f, "{}", crit), - parser::Expected::Range { min, max } => { - write!(f, "{}..{} inclusive", min, max) - } - } - } -} - -impl fmt::Display for parser::ParseError { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "{}: ", self._description())?; - - match *self { - parser::ParseError::InvalidCharacter { - expected, - found, - index, - } => { - write!(f, "expected {}, found {} at {}", expected, found, index) - } - parser::ParseError::InvalidGroupCount { - ref expected, - found, - } => write!(f, "expected {}, found {}", expected, found), - parser::ParseError::InvalidGroupLength { - ref expected, - found, - group, - } => write!( - f, - "expected {}, found {} in group {}", - expected, found, group, - ), - parser::ParseError::InvalidLength { - ref expected, - found, - } => write!(f, "expected {}, found {}", expected, found), - } - } -} diff --git a/third_party/rust/uuid/src/parser/error.rs b/third_party/rust/uuid/src/parser/error.rs new file mode 100644 index 0000000000..39d502f3be --- /dev/null +++ b/third_party/rust/uuid/src/parser/error.rs @@ -0,0 +1,148 @@ +use crate::std::fmt; + +/// An error that can occur while parsing a [`Uuid`] string. +/// +/// [`Uuid`]: ../struct.Uuid.html +#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)] +pub(crate) enum Error { + /// Invalid character in the [`Uuid`] string. + /// + /// [`Uuid`]: ../struct.Uuid.html + InvalidCharacter { + /// The expected characters. + expected: &'static str, + /// The invalid character found. + found: char, + /// The invalid character position. + index: usize, + /// Indicates the [`Uuid`] starts with `urn:uuid:`. + /// + /// This is a special case for [`Urn`] adapter parsing. + /// + /// [`Uuid`]: ../Uuid.html + urn: UrnPrefix, + }, + /// Invalid number of segments in the [`Uuid`] string. + /// + /// [`Uuid`]: ../struct.Uuid.html + InvalidGroupCount { + /// The expected number of segments. + // TODO: explain multiple segment count. + // BODY: Parsers can expect a range of Uuid segment count. + // This needs to be expanded on. + expected: ExpectedLength, + /// The number of segments found. + found: usize, + }, + /// Invalid length of a segment in a [`Uuid`] string. + /// + /// [`Uuid`]: ../struct.Uuid.html + InvalidGroupLength { + /// The expected length of the segment. + expected: ExpectedLength, + /// The length of segment found. + found: usize, + /// The segment with invalid length. + group: usize, + }, + /// Invalid length of the [`Uuid`] string. + /// + /// [`Uuid`]: ../struct.Uuid.html + InvalidLength { + /// The expected length(s). + // TODO: explain multiple lengths. + // BODY: Parsers can expect a range of Uuid lenghts. + // This needs to be expanded on. + expected: ExpectedLength, + /// The invalid length found. + found: usize, + }, +} + +/// The expected length. +#[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)] +pub(crate) enum ExpectedLength { + /// Expected any one of the given values. + Any(&'static [usize]), + /// Expected the given value. + Exact(usize), +} + +/// Urn prefix value. +#[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)] +pub(crate) enum UrnPrefix { + /// The `urn:uuid:` prefix should optionally provided. + Optional, +} + +impl Error { + fn _description(&self) -> &str { + match *self { + Error::InvalidCharacter { .. } => "invalid character", + Error::InvalidGroupCount { .. } => "invalid number of groups", + Error::InvalidGroupLength { .. } => "invalid group length", + Error::InvalidLength { .. } => "invalid length", + } + } +} + +impl fmt::Display for ExpectedLength { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match *self { + ExpectedLength::Any(crits) => write!(f, "one of {:?}", crits), + ExpectedLength::Exact(crit) => write!(f, "{}", crit), + } + } +} + +impl fmt::Display for Error { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{}: ", self._description())?; + + match *self { + Error::InvalidCharacter { + expected, + found, + index, + urn, + } => { + let urn_str = match urn { + UrnPrefix::Optional => { + " an optional prefix of `urn:uuid:` followed by" + } + }; + + write!( + f, + "expected{} {}, found {} at {}", + urn_str, expected, found, index + ) + } + Error::InvalidGroupCount { + ref expected, + found, + } => write!(f, "expected {}, found {}", expected, found), + Error::InvalidGroupLength { + ref expected, + found, + group, + } => write!( + f, + "expected {}, found {} in group {}", + expected, found, group, + ), + Error::InvalidLength { + ref expected, + found, + } => write!(f, "expected {}, found {}", expected, found), + } + } +} + +#[cfg(feature = "std")] +mod std_support { + use super::*; + use crate::std::error; + + impl error::Error for Error {} +} diff --git a/third_party/rust/uuid/src/parser/mod.rs b/third_party/rust/uuid/src/parser/mod.rs index edd959d5ed..f5a2e436b4 100644 --- a/third_party/rust/uuid/src/parser/mod.rs +++ b/third_party/rust/uuid/src/parser/mod.rs @@ -13,92 +13,13 @@ //! //! [`Uuid`]: ../struct.Uuid.html -mod core_support; -#[cfg(feature = "std")] -mod std_support; - -/// The expected value. -#[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)] -pub enum Expected { - /// Expected any one of the given values. - Any(&'static [usize]), - /// Expected the given value. - Exact(usize), - /// Expected any values in the given range. - Range { - /// The minimum expected value. - min: usize, - /// The maximum expected value. - max: usize, - }, -} +pub(crate) mod error; +pub(crate) use self::error::Error; -/// An error that can occur while parsing a [`Uuid`] string. -/// -/// [`Uuid`]: ../struct.Uuid.html -#[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)] -pub enum ParseError { - /// Invalid character in the [`Uuid`] string. - /// - /// [`Uuid`]: ../struct.Uuid.html - InvalidCharacter { - /// The expected characters. - expected: &'static str, - /// The invalid character found. - found: char, - /// The invalid character position. - index: usize, - }, - /// Invalid number of segments in the [`Uuid`] string. - /// - /// [`Uuid`]: ../struct.Uuid.html - InvalidGroupCount { - /// The expected number of segments. - // TODO: explain multiple segment count. - // BODY: Parsers can expect a range of Uuid segment count. - // This needs to be expanded on. - expected: Expected, - /// The number of segments found. - found: usize, - }, - /// Invalid length of a segment in a [`Uuid`] string. - /// - /// [`Uuid`]: ../struct.Uuid.html - InvalidGroupLength { - /// The expected length of the segment. - expected: Expected, - /// The length of segment found. - found: usize, - /// The segment with invalid length. - group: usize, - }, - /// Invalid length of the [`Uuid`] string. - /// - /// [`Uuid`]: ../struct.Uuid.html - InvalidLength { - /// The expected length(s). - // TODO: explain multiple lengths. - // BODY: Parsers can expect a range of Uuid lenghts. - // This needs to be expanded on. - expected: Expected, - /// The invalid length found. - found: usize, - }, -} - -impl ParseError { - fn _description(&self) -> &str { - match *self { - ParseError::InvalidCharacter { .. } => "invalid character", - ParseError::InvalidGroupCount { .. } => "invalid number of groups", - ParseError::InvalidGroupLength { .. } => "invalid group length", - ParseError::InvalidLength { .. } => "invalid length", - } - } -} +use crate::{adapter, Uuid}; /// Check if the length matches any of the given criteria lengths. -pub(crate) fn len_matches_any(len: usize, crits: &[usize]) -> bool { +fn len_matches_any(len: usize, crits: &[usize]) -> bool { for crit in crits { if len == *crit { return true; @@ -111,7 +32,7 @@ pub(crate) fn len_matches_any(len: usize, crits: &[usize]) -> bool { /// Check if the length matches any criteria lengths in the given range /// (inclusive). #[allow(dead_code)] -pub(crate) fn len_matches_range(len: usize, min: usize, max: usize) -> bool { +fn len_matches_range(len: usize, min: usize, max: usize) -> bool { for crit in min..(max + 1) { if len == crit { return true; @@ -122,7 +43,405 @@ pub(crate) fn len_matches_range(len: usize, min: usize, max: usize) -> bool { } // Accumulated length of each hyphenated group in hex digits. -pub(crate) const ACC_GROUP_LENS: [usize; 5] = [8, 12, 16, 20, 32]; +const ACC_GROUP_LENS: [usize; 5] = [8, 12, 16, 20, 32]; // Length of each hyphenated group in hex digits. -pub(crate) const GROUP_LENS: [usize; 5] = [8, 4, 4, 4, 12]; +const GROUP_LENS: [usize; 5] = [8, 4, 4, 4, 12]; + +impl Uuid { + /// Parses a `Uuid` from a string of hexadecimal digits with optional + /// hyphens. + /// + /// Any of the formats generated by this module (simple, hyphenated, urn) + /// are supported by this parsing function. + pub fn parse_str(mut input: &str) -> Result { + // Ensure length is valid for any of the supported formats + let len = input.len(); + + if len == adapter::Urn::LENGTH && input.starts_with("urn:uuid:") { + input = &input[9..]; + } else if !len_matches_any( + len, + &[adapter::Hyphenated::LENGTH, adapter::Simple::LENGTH], + ) { + Err(Error::InvalidLength { + expected: error::ExpectedLength::Any(&[ + adapter::Hyphenated::LENGTH, + adapter::Simple::LENGTH, + ]), + found: len, + })?; + } + + // `digit` counts only hexadecimal digits, `i_char` counts all chars. + let mut digit = 0; + let mut group = 0; + let mut acc = 0; + let mut buffer = [0u8; 16]; + + for (i_char, chr) in input.bytes().enumerate() { + if digit as usize >= adapter::Simple::LENGTH && group != 4 { + if group == 0 { + Err(Error::InvalidLength { + expected: error::ExpectedLength::Any(&[ + adapter::Hyphenated::LENGTH, + adapter::Simple::LENGTH, + ]), + found: len, + })?; + } + + Err(Error::InvalidGroupCount { + expected: error::ExpectedLength::Any(&[1, 5]), + found: group + 1, + })?; + } + + if digit % 2 == 0 { + // First digit of the byte. + match chr { + // Calulate upper half. + b'0'..=b'9' => acc = chr - b'0', + b'a'..=b'f' => acc = chr - b'a' + 10, + b'A'..=b'F' => acc = chr - b'A' + 10, + // Found a group delimiter + b'-' => { + // TODO: remove the u8 cast + // BODY: this only needed until we switch to + // ParseError + if ACC_GROUP_LENS[group] as u8 != digit { + // Calculate how many digits this group consists of + // in the input. + let found = if group > 0 { + // TODO: remove the u8 cast + // BODY: this only needed until we switch to + // ParseError + digit - ACC_GROUP_LENS[group - 1] as u8 + } else { + digit + }; + + Err(Error::InvalidGroupLength { + expected: error::ExpectedLength::Exact( + GROUP_LENS[group], + ), + found: found as usize, + group, + })?; + } + // Next group, decrement digit, it is incremented again + // at the bottom. + group += 1; + digit -= 1; + } + _ => { + Err(Error::InvalidCharacter { + expected: "0123456789abcdefABCDEF-", + found: input[i_char..].chars().next().unwrap(), + index: i_char, + urn: error::UrnPrefix::Optional, + })?; + } + } + } else { + // Second digit of the byte, shift the upper half. + acc *= 16; + match chr { + b'0'..=b'9' => acc += chr - b'0', + b'a'..=b'f' => acc += chr - b'a' + 10, + b'A'..=b'F' => acc += chr - b'A' + 10, + b'-' => { + // The byte isn't complete yet. + let found = if group > 0 { + // TODO: remove the u8 cast + // BODY: this only needed until we switch to + // ParseError + digit - ACC_GROUP_LENS[group - 1] as u8 + } else { + digit + }; + + Err(Error::InvalidGroupLength { + expected: error::ExpectedLength::Exact( + GROUP_LENS[group], + ), + found: found as usize, + group, + })?; + } + _ => { + Err(Error::InvalidCharacter { + expected: "0123456789abcdefABCDEF-", + found: input[i_char..].chars().next().unwrap(), + index: i_char, + urn: error::UrnPrefix::Optional, + })?; + } + } + buffer[(digit / 2) as usize] = acc; + } + digit += 1; + } + + // Now check the last group. + // TODO: remove the u8 cast + // BODY: this only needed until we switch to + // ParseError + if ACC_GROUP_LENS[4] as u8 != digit { + Err(Error::InvalidGroupLength { + expected: error::ExpectedLength::Exact(GROUP_LENS[4]), + found: (digit as usize - ACC_GROUP_LENS[3]), + group, + })?; + } + + Ok(Uuid::from_bytes(buffer)) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::{adapter, std::string::ToString, test_util}; + + #[test] + fn test_parse_uuid_v4() { + const EXPECTED_UUID_LENGTHS: error::ExpectedLength = + error::ExpectedLength::Any(&[ + adapter::Hyphenated::LENGTH, + adapter::Simple::LENGTH, + ]); + + const EXPECTED_GROUP_COUNTS: error::ExpectedLength = + error::ExpectedLength::Any(&[1, 5]); + + const EXPECTED_CHARS: &'static str = "0123456789abcdefABCDEF-"; + + // Invalid + assert_eq!( + Uuid::parse_str("").map_err(crate::Error::expect_parser), + Err(Error::InvalidLength { + expected: EXPECTED_UUID_LENGTHS, + found: 0, + }) + ); + + assert_eq!( + Uuid::parse_str("!").map_err(crate::Error::expect_parser), + Err(Error::InvalidLength { + expected: EXPECTED_UUID_LENGTHS, + found: 1 + }) + ); + + assert_eq!( + Uuid::parse_str("F9168C5E-CEB2-4faa-B6BF-329BF39FA1E45") + .map_err(crate::Error::expect_parser), + Err(Error::InvalidLength { + expected: EXPECTED_UUID_LENGTHS, + found: 37, + }) + ); + + assert_eq!( + Uuid::parse_str("F9168C5E-CEB2-4faa-BBF-329BF39FA1E4") + .map_err(crate::Error::expect_parser), + Err(Error::InvalidLength { + expected: EXPECTED_UUID_LENGTHS, + found: 35 + }) + ); + + assert_eq!( + Uuid::parse_str("F9168C5E-CEB2-4faa-BGBF-329BF39FA1E4") + .map_err(crate::Error::expect_parser), + Err(Error::InvalidCharacter { + expected: EXPECTED_CHARS, + found: 'G', + index: 20, + urn: error::UrnPrefix::Optional, + }) + ); + + assert_eq!( + Uuid::parse_str("F9168C5E-CEB2F4faaFB6BFF329BF39FA1E4") + .map_err(crate::Error::expect_parser), + Err(Error::InvalidGroupCount { + expected: EXPECTED_GROUP_COUNTS, + found: 2 + }) + ); + + assert_eq!( + Uuid::parse_str("F9168C5E-CEB2-4faaFB6BFF329BF39FA1E4") + .map_err(crate::Error::expect_parser), + Err(Error::InvalidGroupCount { + expected: EXPECTED_GROUP_COUNTS, + found: 3, + }) + ); + + assert_eq!( + Uuid::parse_str("F9168C5E-CEB2-4faa-B6BFF329BF39FA1E4") + .map_err(crate::Error::expect_parser), + Err(Error::InvalidGroupCount { + expected: EXPECTED_GROUP_COUNTS, + found: 4, + }) + ); + + assert_eq!( + Uuid::parse_str("F9168C5E-CEB2-4faa") + .map_err(crate::Error::expect_parser), + Err(Error::InvalidLength { + expected: EXPECTED_UUID_LENGTHS, + found: 18, + }) + ); + + assert_eq!( + Uuid::parse_str("F9168C5E-CEB2-4faaXB6BFF329BF39FA1E4") + .map_err(crate::Error::expect_parser), + Err(Error::InvalidCharacter { + expected: EXPECTED_CHARS, + found: 'X', + index: 18, + urn: error::UrnPrefix::Optional, + }) + ); + + assert_eq!( + Uuid::parse_str("F9168C5E-CEB-24fa-eB6BFF32-BF39FA1E4") + .map_err(crate::Error::expect_parser), + Err(Error::InvalidGroupLength { + expected: error::ExpectedLength::Exact(4), + found: 3, + group: 1, + }) + ); + // (group, found, expecting) + // + assert_eq!( + Uuid::parse_str("01020304-1112-2122-3132-41424344") + .map_err(crate::Error::expect_parser), + Err(Error::InvalidGroupLength { + expected: error::ExpectedLength::Exact(12), + found: 8, + group: 4, + }) + ); + + assert_eq!( + Uuid::parse_str("67e5504410b1426f9247bb680e5fe0c") + .map_err(crate::Error::expect_parser), + Err(Error::InvalidLength { + expected: EXPECTED_UUID_LENGTHS, + found: 31, + }) + ); + + assert_eq!( + Uuid::parse_str("67e5504410b1426f9247bb680e5fe0c88") + .map_err(crate::Error::expect_parser), + Err(Error::InvalidLength { + expected: EXPECTED_UUID_LENGTHS, + found: 33, + }) + ); + + assert_eq!( + Uuid::parse_str("67e5504410b1426f9247bb680e5fe0cg8") + .map_err(crate::Error::expect_parser), + Err(Error::InvalidLength { + expected: EXPECTED_UUID_LENGTHS, + found: 33, + }) + ); + + assert_eq!( + Uuid::parse_str("67e5504410b1426%9247bb680e5fe0c8") + .map_err(crate::Error::expect_parser), + Err(Error::InvalidCharacter { + expected: EXPECTED_CHARS, + found: '%', + index: 15, + urn: error::UrnPrefix::Optional, + }) + ); + + assert_eq!( + Uuid::parse_str("231231212212423424324323477343246663") + .map_err(crate::Error::expect_parser), + Err(Error::InvalidLength { + expected: EXPECTED_UUID_LENGTHS, + found: 36, + }) + ); + + // Valid + assert!(Uuid::parse_str("00000000000000000000000000000000").is_ok()); + assert!(Uuid::parse_str("67e55044-10b1-426f-9247-bb680e5fe0c8").is_ok()); + assert!(Uuid::parse_str("F9168C5E-CEB2-4faa-B6BF-329BF39FA1E4").is_ok()); + assert!(Uuid::parse_str("67e5504410b1426f9247bb680e5fe0c8").is_ok()); + assert!(Uuid::parse_str("01020304-1112-2122-3132-414243444546").is_ok()); + assert!(Uuid::parse_str( + "urn:uuid:67e55044-10b1-426f-9247-bb680e5fe0c8" + ) + .is_ok()); + + // Nil + let nil = Uuid::nil(); + assert_eq!( + Uuid::parse_str("00000000000000000000000000000000").unwrap(), + nil + ); + assert_eq!( + Uuid::parse_str("00000000-0000-0000-0000-000000000000").unwrap(), + nil + ); + + // Round-trip + let uuid_orig = test_util::new(); + let orig_str = uuid_orig.to_string(); + let uuid_out = Uuid::parse_str(&orig_str).unwrap(); + assert_eq!(uuid_orig, uuid_out); + + // Test error reporting + assert_eq!( + Uuid::parse_str("67e5504410b1426f9247bb680e5fe0c") + .map_err(crate::Error::expect_parser), + Err(Error::InvalidLength { + expected: EXPECTED_UUID_LENGTHS, + found: 31, + }) + ); + assert_eq!( + Uuid::parse_str("67e550X410b1426f9247bb680e5fe0cd") + .map_err(crate::Error::expect_parser), + Err(Error::InvalidCharacter { + expected: EXPECTED_CHARS, + found: 'X', + index: 6, + urn: error::UrnPrefix::Optional, + }) + ); + assert_eq!( + Uuid::parse_str("67e550-4105b1426f9247bb680e5fe0c") + .map_err(crate::Error::expect_parser), + Err(Error::InvalidGroupLength { + expected: error::ExpectedLength::Exact(8), + found: 6, + group: 0, + }) + ); + assert_eq!( + Uuid::parse_str("F9168C5E-CEB2-4faa-B6BF1-02BF39FA1E4") + .map_err(crate::Error::expect_parser), + Err(Error::InvalidGroupLength { + expected: error::ExpectedLength::Exact(4), + found: 5, + group: 3, + }) + ); + } +} diff --git a/third_party/rust/uuid/src/parser/std_support.rs b/third_party/rust/uuid/src/parser/std_support.rs deleted file mode 100644 index 23070124fa..0000000000 --- a/third_party/rust/uuid/src/parser/std_support.rs +++ /dev/null @@ -1,19 +0,0 @@ -// Copyright 2013-2014 The Rust Project Developers. -// Copyright 2018 The Uuid Project Developers. -// -// See the COPYRIGHT file at the top-level directory of this distribution. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use parser; -use std::error; - -impl error::Error for parser::ParseError { - fn description(&self) -> &str { - self._description() - } -} diff --git a/third_party/rust/uuid/src/prelude.rs b/third_party/rust/uuid/src/prelude.rs index e8f76d44e2..ebacc1b30c 100644 --- a/third_party/rust/uuid/src/prelude.rs +++ b/third_party/rust/uuid/src/prelude.rs @@ -19,12 +19,6 @@ //! extern crate uuid; //! ``` //! -//! and the following in every module: -//! -//! ```rust -//! use uuid::prelude::*; -//! ``` -//! //! # Prelude Contents //! //! Currently the prelude reexports the following: @@ -50,4 +44,4 @@ handling uuid version 1. Requires feature `v1`. pub use super::{Builder, Bytes, Error, Uuid, Variant, Version}; #[cfg(feature = "v1")] -pub use v1::{ClockSequence, Context}; +pub use crate::v1::{ClockSequence, Context}; diff --git a/third_party/rust/uuid/src/serde_support.rs b/third_party/rust/uuid/src/serde_support.rs index 826ff53007..d0baf67123 100644 --- a/third_party/rust/uuid/src/serde_support.rs +++ b/third_party/rust/uuid/src/serde_support.rs @@ -9,8 +9,8 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. +use crate::prelude::*; use core::fmt; -use prelude::*; use serde::{de, Deserialize, Deserializer, Serialize, Serializer}; #[cfg(feature = "serde")] @@ -41,7 +41,7 @@ impl<'de> Deserialize<'de> for Uuid { fn expecting( &self, - formatter: &mut fmt::Formatter, + formatter: &mut fmt::Formatter<'_>, ) -> fmt::Result { write!(formatter, "a UUID string") } @@ -70,7 +70,7 @@ impl<'de> Deserialize<'de> for Uuid { fn expecting( &self, - formatter: &mut fmt::Formatter, + formatter: &mut fmt::Formatter<'_>, ) -> fmt::Result { write!(formatter, "bytes") } @@ -92,7 +92,7 @@ impl<'de> Deserialize<'de> for Uuid { mod serde_tests { use serde_test; - use prelude::*; + use crate::prelude::*; #[test] fn test_serialize_readable() { diff --git a/third_party/rust/uuid/src/slog_support.rs b/third_party/rust/uuid/src/slog_support.rs index 2de9ce96bd..4046ae9040 100644 --- a/third_party/rust/uuid/src/slog_support.rs +++ b/third_party/rust/uuid/src/slog_support.rs @@ -9,15 +9,15 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use prelude::*; +use crate::prelude::*; use slog; impl slog::Value for Uuid { fn serialize( &self, - _: &slog::Record, + _: &slog::Record<'_>, key: slog::Key, - serializer: &mut slog::Serializer, + serializer: &mut dyn slog::Serializer, ) -> Result<(), slog::Error> { serializer.emit_arguments(key, &format_args!("{}", self)) } @@ -28,11 +28,11 @@ mod tests { #[test] fn test_slog_kv() { + use crate::test_util; use slog; - use slog::Drain; - use test_util; + use slog::{crit, Drain}; - let root = slog::Logger::root(slog::Discard.fuse(), o!()); + let root = slog::Logger::root(slog::Discard.fuse(), slog::o!()); let u1 = test_util::new(); crit!(root, "test"; "u1" => u1); } diff --git a/third_party/rust/uuid/src/std_support.rs b/third_party/rust/uuid/src/std_support.rs deleted file mode 100644 index c561d60acc..0000000000 --- a/third_party/rust/uuid/src/std_support.rs +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright 2013-2014 The Rust Project Developers. -// Copyright 2018 The Uuid Project Developers. -// -// See the COPYRIGHT file at the top-level directory of this distribution. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use std::error; - -impl error::Error for super::BytesError { - fn description(&self) -> &str { - "invalid number of uuid bytes" - } -} - -impl error::Error for super::Error { - fn description(&self) -> &str { - match *self { - super::Error::Bytes(ref err) => error::Error::description(err), - super::Error::Parse(ref err) => error::Error::description(err), - } - } -} diff --git a/third_party/rust/uuid/src/test_util.rs b/third_party/rust/uuid/src/test_util.rs index 22b38c19bb..9eec117507 100644 --- a/third_party/rust/uuid/src/test_util.rs +++ b/third_party/rust/uuid/src/test_util.rs @@ -9,16 +9,16 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use prelude::*; +use crate::prelude::*; -pub fn new() -> Uuid { +pub const fn new() -> Uuid { Uuid::from_bytes([ 0xF9, 0x16, 0x8C, 0x5E, 0xCE, 0xB2, 0x4F, 0xAA, 0xB6, 0xBF, 0x32, 0x9B, 0xF3, 0x9F, 0xA1, 0xE4, ]) } -pub fn new2() -> Uuid { +pub const fn new2() -> Uuid { Uuid::from_bytes([ 0xF9, 0x16, 0x8C, 0x5E, 0xCE, 0xB2, 0x4F, 0xAB, 0xB6, 0xBF, 0x32, 0x9B, 0xF3, 0x9F, 0xA1, 0xE4, diff --git a/third_party/rust/uuid/src/u128_support.rs b/third_party/rust/uuid/src/u128_support.rs deleted file mode 100644 index 3fc52d9477..0000000000 --- a/third_party/rust/uuid/src/u128_support.rs +++ /dev/null @@ -1,57 +0,0 @@ -// Copyright 2013-2014 The Rust Project Developers. -// Copyright 2018 The Uuid Project Developers. -// -// See the COPYRIGHT file at the top-level directory of this distribution. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use byteorder; -use prelude::*; - -impl Uuid { - /// Creates a new [`Uuid`] from a `u128` value. - /// - /// To create a [`Uuid`] from `u128`s, you need `u128` feature enabled for - /// this crate. - /// - /// [`Uuid`]: ../struct.Uuid.html - #[inline] - pub fn from_u128(quad: u128) -> Self { - Uuid::from(quad) - } -} - -impl From for Uuid { - fn from(f: u128) -> Self { - let mut bytes: ::Bytes = [0; 16]; - - { - use byteorder::ByteOrder; - - byteorder::NativeEndian::write_u128(&mut bytes[..], f); - } - - Uuid::from_bytes(bytes) - } -} - -#[cfg(test)] -mod tests { - use prelude::*; - - #[test] - fn test_from_u128() { - const U128: u128 = 0x3a0724b4_93a0_4d87_ac28_759c6caa13c4; - - let uuid = Uuid::from(U128); - - let uuid2: Uuid = U128.into(); - - assert_eq!(uuid, uuid2) - } - -} diff --git a/third_party/rust/uuid/src/v1.rs b/third_party/rust/uuid/src/v1.rs index dc5a057927..8bd98a4773 100644 --- a/third_party/rust/uuid/src/v1.rs +++ b/third_party/rust/uuid/src/v1.rs @@ -1,11 +1,13 @@ -//! The implementation for Version 1 [`Uuid`]s. +//! The implementation for Version 1 UUIDs. //! //! Note that you need feature `v1` in order to use these features. -//! -//! [`Uuid`]: ../struct.Uuid.html +use crate::prelude::*; use core::sync::atomic; -use prelude::*; + +/// The number of 100 ns ticks between the UUID epoch +/// `1582-10-15 00:00:00` and the Unix epoch `1970-01-01 00:00:00`. +const UUID_TICKS_BETWEEN_EPOCHS: u64 = 0x01B2_1DD2_1381_4000; /// A thread-safe, stateful context for the v1 generator to help ensure /// process-wide uniqueness. @@ -14,118 +16,241 @@ pub struct Context { count: atomic::AtomicUsize, } -/// A trait that abstracts over generation of Uuid v1 "Clock Sequence" values. +/// Stores the number of nanoseconds from an epoch and a counter for ensuring +/// V1 ids generated on the same host are unique. +#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] +pub struct Timestamp { + ticks: u64, + counter: u16, +} + +impl Timestamp { + /// Construct a `Timestamp` from its raw component values: an RFC4122 + /// timestamp and counter. + /// + /// RFC4122, which defines the V1 UUID, specifies a 60-byte timestamp format + /// as the number of 100-nanosecond intervals elapsed since 00:00:00.00, + /// 15 Oct 1582, "the date of the Gregorian reform of the Christian + /// calendar." + /// + /// The counter value is used to differentiate between ids generated by + /// the same host computer in rapid succession (i.e. with the same observed + /// time). See the [`ClockSequence`] trait for a generic interface to any + /// counter generators that might be used. + /// + /// Internally, the timestamp is stored as a `u64`. For this reason, dates + /// prior to October 1582 are not supported. + /// + /// [`ClockSequence`]: trait.ClockSequence.html + pub const fn from_rfc4122(ticks: u64, counter: u16) -> Self { + Timestamp { ticks, counter } + } + + /// Construct a `Timestamp` from a unix timestamp and sequence-generating + /// `context`. + /// + /// A unix timestamp represents the elapsed time since Jan 1 1970. Libc's + /// `clock_gettime` and other popular implementations traditionally + /// represent this duration as a `timespec`: a struct with `u64` and + /// `u32` fields representing the seconds, and "subsecond" or fractional + /// nanoseconds elapsed since the timestamp's second began, + /// respectively. + /// + /// This constructs a `Timestamp` from the seconds and fractional + /// nanoseconds of a unix timestamp, converting the duration since 1970 + /// into the number of 100-nanosecond intervals since 00:00:00.00, 15 + /// Oct 1982 specified by RFC4122 and used internally by `Timestamp`. + /// + /// The function is not guaranteed to produce monotonically increasing + /// values however. There is a slight possibility that two successive + /// equal time values could be supplied and the sequence counter wraps back + /// over to 0. + /// + /// If uniqueness and monotonicity is required, the user is responsible for + /// ensuring that the time value always increases between calls (including + /// between restarts of the process and device). + pub fn from_unix( + context: impl ClockSequence, + seconds: u64, + subsec_nanos: u32, + ) -> Self { + let counter = context.generate_sequence(seconds, subsec_nanos); + let ticks = UUID_TICKS_BETWEEN_EPOCHS + + seconds * 10_000_000 + + (subsec_nanos as u64 / 100); + Timestamp { ticks, counter } + } + + /// Returns the raw RFC4122 timestamp and counter values stored by the + /// `Timestamp`. + /// + /// The timestamp (the first, `u64` element in the tuple) represents the + /// number of 100-nanosecond intervals since 00:00:00.00, 15 Oct 1582. + /// The counter is used to differentiate between ids generated on the + /// same host computer with the same observed time. + pub const fn to_rfc4122(&self) -> (u64, u16) { + (self.ticks, self.counter) + } + + /// Returns the timestamp converted to the seconds and fractional + /// nanoseconds since Jan 1 1970. + /// + /// Internally, the time is stored in 100-nanosecond intervals, + /// thus the maximum precision represented by the fractional nanoseconds + /// value is less than its unit size (100 ns vs. 1 ns). + pub const fn to_unix(&self) -> (u64, u32) { + let unix_ticks = self.ticks - UUID_TICKS_BETWEEN_EPOCHS; + ( + unix_ticks / 10_000_000, + (unix_ticks % 10_000_000) as u32 * 100, + ) + } + + /// Returns the timestamp converted into nanoseconds elapsed since Jan 1 + /// 1970. Internally, the time is stored in 100-nanosecond intervals, + /// thus the maximum precision represented is less than the units it is + /// measured in (100 ns vs. 1 ns). The value returned represents the + /// same duration as [`Timestamp::to_unix`]; this provides it in nanosecond + /// units for convenience. + pub const fn to_unix_nanos(&self) -> u64 { + (self.ticks - UUID_TICKS_BETWEEN_EPOCHS) * 100 + } +} + +/// A trait that abstracts over generation of UUID v1 "Clock Sequence" values. pub trait ClockSequence { /// Return a 16-bit number that will be used as the "clock sequence" in - /// the Uuid. The number must be different if the time has changed since + /// the UUID. The number must be different if the time has changed since /// the last time a clock sequence was requested. - fn generate_sequence(&self, seconds: u64, nano_seconds: u32) -> u16; + fn generate_sequence(&self, seconds: u64, subsec_nanos: u32) -> u16; +} + +impl<'a, T: ClockSequence + ?Sized> ClockSequence for &'a T { + fn generate_sequence(&self, seconds: u64, subsec_nanos: u32) -> u16 { + (**self).generate_sequence(seconds, subsec_nanos) + } } impl Uuid { - /// Create a new [`Uuid`] (version 1) using a time value + sequence + + /// Create a new UUID (version 1) using a time value + sequence + /// *NodeId*. /// - /// This expects two values representing a monotonically increasing value - /// as well as a unique 6 byte NodeId, and an implementation of - /// [`ClockSequence`]. This function is only guaranteed to produce - /// unique values if the following conditions hold: + /// When generating [`Timestamp`]s using a [`ClockSequence`], this function + /// is only guaranteed to produce unique values if the following conditions + /// hold: /// /// 1. The *NodeId* is unique for this process, /// 2. The *Context* is shared across all threads which are generating v1 - /// [`Uuid`]s, + /// UUIDs, /// 3. The [`ClockSequence`] implementation reliably returns unique /// clock sequences (this crate provides [`Context`] for this /// purpose. However you can create your own [`ClockSequence`] /// implementation, if [`Context`] does not meet your needs). /// - /// The NodeID must be exactly 6 bytes long. If the NodeID is not a valid - /// length this will return a [`ParseError`]`::InvalidLength`. - /// - /// The function is not guaranteed to produce monotonically increasing - /// values however. There is a slight possibility that two successive - /// equal time values could be supplied and the sequence counter wraps back - /// over to 0. - /// - /// If uniqueness and monotonicity is required, the user is responsible for - /// ensuring that the time value always increases between calls (including - /// between restarts of the process and device). + /// The NodeID must be exactly 6 bytes long. /// /// Note that usage of this method requires the `v1` feature of this crate /// to be enabled. /// /// # Examples /// - /// Basic usage: + /// A UUID can be created from a unix [`Timestamp`] with a + /// [`ClockSequence`]: /// /// ```rust - /// use uuid::v1::Context; + /// use uuid::v1::{Timestamp, Context}; /// use uuid::Uuid; /// /// let context = Context::new(42); - /// if let Ok(uuid) = - /// Uuid::new_v1(&context, 1497624119, 1234, &[1, 2, 3, 4, 5, 6]) - /// { - /// assert_eq!( - /// uuid.to_hyphenated().to_string(), - /// "f3b4958c-52a1-11e7-802a-010203040506" - /// ) - /// } else { - /// panic!() - /// } + /// let ts = Timestamp::from_unix(&context, 1497624119, 1234); + /// let uuid = Uuid::new_v1(ts, &[1, 2, 3, 4, 5, 6]).expect("failed to generate UUID"); + /// + /// assert_eq!( + /// uuid.to_hyphenated().to_string(), + /// "f3b4958c-52a1-11e7-802a-010203040506" + /// ); /// ``` /// - /// [`ParseError`]: ../enum.ParseError.html - /// [`Uuid`]: ../struct.Uuid.html - /// [`ClockSequence`]: struct.ClockSequence.html - /// [`Context`]: struct.Context.html - pub fn new_v1( - context: &T, - seconds: u64, - nano_seconds: u32, - node_id: &[u8], - ) -> Result - where - T: ClockSequence, - { + /// The timestamp can also be created manually as per RFC4122: + /// + /// ``` + /// use uuid::v1::{Timestamp, Context}; + /// use uuid::Uuid; + /// + /// let context = Context::new(42); + /// let ts = Timestamp::from_rfc4122(1497624119, 0); + /// let uuid = Uuid::new_v1(ts, &[1, 2, 3, 4, 5, 6]).expect("failed to generate UUID"); + /// + /// assert_eq!( + /// uuid.to_hyphenated().to_string(), + /// "5943ee37-0000-1000-8000-010203040506" + /// ); + /// ``` + /// + /// [`Timestamp`]: v1/struct.Timestamp.html + /// [`ClockSequence`]: v1/struct.ClockSequence.html + /// [`Context`]: v1/struct.Context.html + pub fn new_v1(ts: Timestamp, node_id: &[u8]) -> Result { const NODE_ID_LEN: usize = 6; let len = node_id.len(); if len != NODE_ID_LEN { - return Err(::BytesError::new(NODE_ID_LEN, len)); + Err(crate::builder::Error::new(NODE_ID_LEN, len))?; } - let time_low; - let time_mid; - let time_high_and_version; - - { - /// The number of 100 ns ticks between the UUID epoch - /// `1582-10-15 00:00:00` and the Unix epoch `1970-01-01 00:00:00`. - const UUID_TICKS_BETWEEN_EPOCHS: u64 = 0x01B2_1DD2_1381_4000; - - let timestamp = - seconds * 10_000_000 + u64::from(nano_seconds / 100); - let uuid_time = timestamp + UUID_TICKS_BETWEEN_EPOCHS; - - time_low = (uuid_time & 0xFFFF_FFFF) as u32; - time_mid = ((uuid_time >> 32) & 0xFFFF) as u16; - time_high_and_version = - (((uuid_time >> 48) & 0x0FFF) as u16) | (1 << 12); - } + let time_low = (ts.ticks & 0xFFFF_FFFF) as u32; + let time_mid = ((ts.ticks >> 32) & 0xFFFF) as u16; + let time_high_and_version = + (((ts.ticks >> 48) & 0x0FFF) as u16) | (1 << 12); let mut d4 = [0; 8]; { - let count = context.generate_sequence(seconds, nano_seconds); - d4[0] = (((count & 0x3F00) >> 8) as u8) | 0x80; - d4[1] = (count & 0xFF) as u8; + d4[0] = (((ts.counter & 0x3F00) >> 8) as u8) | 0x80; + d4[1] = (ts.counter & 0xFF) as u8; } d4[2..].copy_from_slice(node_id); Uuid::from_fields(time_low, time_mid, time_high_and_version, &d4) } + + /// Returns an optional [`Timestamp`] storing the timestamp and + /// counter portion parsed from a V1 UUID. + /// + /// Returns `None` if the supplied UUID is not V1. + /// + /// The V1 timestamp format defined in RFC4122 specifies a 60-bit + /// integer representing the number of 100-nanosecond intervals + /// since 00:00:00.00, 15 Oct 1582. + /// + /// [`Timestamp`] offers several options for converting the raw RFC4122 + /// value into more commonly-used formats, such as a unix timestamp. + /// + /// [`Timestamp`]: v1/struct.Timestamp.html + pub fn to_timestamp(&self) -> Option { + if self + .get_version() + .map(|v| v != Version::Mac) + .unwrap_or(true) + { + return None; + } + + let ticks: u64 = u64::from(self.as_bytes()[6] & 0x0F) << 56 + | u64::from(self.as_bytes()[7]) << 48 + | u64::from(self.as_bytes()[4]) << 40 + | u64::from(self.as_bytes()[5]) << 32 + | u64::from(self.as_bytes()[0]) << 24 + | u64::from(self.as_bytes()[1]) << 16 + | u64::from(self.as_bytes()[2]) << 8 + | u64::from(self.as_bytes()[3]); + + let counter: u16 = u16::from(self.as_bytes()[8] & 0x3F) << 8 + | u16::from(self.as_bytes()[9]); + + Some(Timestamp::from_rfc4122(ticks, counter)) + } } impl Context { @@ -134,12 +259,10 @@ impl Context { /// /// This is a context which can be shared across threads. It maintains an /// internal counter that is incremented at every request, the value ends - /// up in the clock_seq portion of the [`Uuid`] (the fourth group). This - /// will improve the probability that the [`Uuid`] is unique across the + /// up in the clock_seq portion of the UUID (the fourth group). This + /// will improve the probability that the UUID is unique across the /// process. - /// - /// [`Uuid`]: ../struct.Uuid.html - pub fn new(count: u16) -> Self { + pub const fn new(count: u16) -> Self { Self { count: atomic::AtomicUsize::new(count as usize), } @@ -154,19 +277,23 @@ impl ClockSequence for Context { #[cfg(test)] mod tests { + use super::*; + + use crate::std::string::ToString; + #[test] fn test_new_v1() { - use super::Context; - use prelude::*; - let time: u64 = 1_496_854_535; let time_fraction: u32 = 812_946_000; let node = [1, 2, 3, 4, 5, 6]; let context = Context::new(0); { - let uuid = - Uuid::new_v1(&context, time, time_fraction, &node).unwrap(); + let uuid = Uuid::new_v1( + Timestamp::from_unix(&context, time, time_fraction), + &node, + ) + .unwrap(); assert_eq!(uuid.get_version(), Some(Version::Mac)); assert_eq!(uuid.get_variant(), Some(Variant::RFC4122)); @@ -175,21 +302,24 @@ mod tests { "20616934-4ba2-11e7-8000-010203040506" ); - let ts = uuid.to_timestamp().unwrap(); + let ts = uuid.to_timestamp().unwrap().to_rfc4122(); assert_eq!(ts.0 - 0x01B2_1DD2_1381_4000, 14_968_545_358_129_460); assert_eq!(ts.1, 0); }; { - let uuid2 = - Uuid::new_v1(&context, time, time_fraction, &node).unwrap(); + let uuid2 = Uuid::new_v1( + Timestamp::from_unix(&context, time, time_fraction), + &node, + ) + .unwrap(); assert_eq!( uuid2.to_hyphenated().to_string(), "20616934-4ba2-11e7-8001-010203040506" ); - assert_eq!(uuid2.to_timestamp().unwrap().1, 1) + assert_eq!(uuid2.to_timestamp().unwrap().to_rfc4122().1, 1) }; } } diff --git a/third_party/rust/uuid/src/v3.rs b/third_party/rust/uuid/src/v3.rs index f2248d78e7..c598239e76 100644 --- a/third_party/rust/uuid/src/v3.rs +++ b/third_party/rust/uuid/src/v3.rs @@ -1,8 +1,8 @@ +use crate::prelude::*; use md5; -use prelude::*; impl Uuid { - /// Creates a [`Uuid`] using a name from a namespace, based on the MD5 + /// Creates a UUID using a name from a namespace, based on the MD5 /// hash. /// /// A number of namespaces are available as constants in this crate: @@ -15,28 +15,34 @@ impl Uuid { /// Note that usage of this method requires the `v3` feature of this crate /// to be enabled. /// - /// [`NAMESPACE_DNS`]: ../ns/const.NAMESPACE_DNS.html - /// [`NAMESPACE_OID`]: ../ns/const.NAMESPACE_OID.html - /// [`NAMESPACE_URL`]: ../ns/const.NAMESPACE_URL.html - /// [`NAMESPACE_X500`]: ../ns/const.NAMESPACE_X500.html - /// [`Uuid`]: ../struct.Uuid.html + /// [`NAMESPACE_DNS`]: ns/const.NAMESPACE_DNS.html + /// [`NAMESPACE_OID`]: ns/const.NAMESPACE_OID.html + /// [`NAMESPACE_URL`]: ns/const.NAMESPACE_URL.html + /// [`NAMESPACE_X500`]: ns/const.NAMESPACE_X500.html pub fn new_v3(namespace: &Uuid, name: &[u8]) -> Uuid { let mut context = md5::Context::new(); context.consume(namespace.as_bytes()); context.consume(name); - let mut uuid = Uuid::from_bytes(context.compute().into()); + let computed = context.compute(); + let bytes = computed.into(); - uuid.set_variant(Variant::RFC4122); - uuid.set_version(Version::Md5); - uuid + let mut builder = crate::builder::Builder::from_bytes(bytes); + + builder + .set_variant(Variant::RFC4122) + .set_version(Version::Md5); + + builder.build() } } #[cfg(test)] mod tests { - use prelude::*; + use super::*; + + use crate::std::string::ToString; static FIXTURE: &'static [(&'static Uuid, &'static str, &'static str)] = &[ ( diff --git a/third_party/rust/uuid/src/v4.rs b/third_party/rust/uuid/src/v4.rs index fe2ee818b4..fe1af0f633 100644 --- a/third_party/rust/uuid/src/v4.rs +++ b/third_party/rust/uuid/src/v4.rs @@ -1,8 +1,8 @@ -use prelude::*; +use crate::prelude::*; use rand; impl Uuid { - /// Creates a random [`Uuid`]. + /// Creates a random UUID. /// /// This uses the [`rand`] crate's default task RNG as the source of random /// numbers. If you'd like to use a custom generator, don't use this @@ -22,7 +22,6 @@ impl Uuid { /// ``` /// /// [`rand`]: https://crates.io/crates/rand - /// [`Uuid`]: ../struct.Uuid.html pub fn new_v4() -> Self { use rand::RngCore; @@ -40,7 +39,7 @@ impl Uuid { #[cfg(test)] mod tests { - use prelude::*; + use crate::prelude::*; #[test] fn test_new() { diff --git a/third_party/rust/uuid/src/v5.rs b/third_party/rust/uuid/src/v5.rs index d2d878edcd..b71d6d433e 100644 --- a/third_party/rust/uuid/src/v5.rs +++ b/third_party/rust/uuid/src/v5.rs @@ -1,4 +1,4 @@ -use prelude::*; +use crate::prelude::*; use sha1; impl Uuid { @@ -25,19 +25,24 @@ impl Uuid { hash.update(name); let buffer = hash.digest().bytes(); - let mut uuid = Uuid::default(); - uuid.0.copy_from_slice(&buffer[..16]); - uuid.set_variant(Variant::RFC4122); - uuid.set_version(Version::Sha1); + let mut bytes = crate::Bytes::default(); + bytes.copy_from_slice(&buffer[..16]); - uuid + let mut builder = crate::builder::Builder::from_bytes(bytes); + builder + .set_variant(Variant::RFC4122) + .set_version(Version::Sha1); + + builder.build() } } #[cfg(test)] mod tests { - use prelude::*; + use super::*; + + use crate::std::string::ToString; static FIXTURE: &'static [(&'static Uuid, &'static str, &'static str)] = &[ ( diff --git a/third_party/rust/uuid/src/winapi_support.rs b/third_party/rust/uuid/src/winapi_support.rs index 82d62093ee..e11ccde6e0 100644 --- a/third_party/rust/uuid/src/winapi_support.rs +++ b/third_party/rust/uuid/src/winapi_support.rs @@ -1,7 +1,4 @@ -use prelude::*; - -use BytesError; - +use crate::prelude::*; use winapi::shared::guiddef; #[cfg(feature = "guid")] @@ -9,7 +6,7 @@ impl Uuid { /// Attempts to create a [`Uuid`] from a little endian winapi `GUID` /// /// [`Uuid`]: ../struct.Uuid.html - pub fn from_guid(guid: guiddef::GUID) -> Result { + pub fn from_guid(guid: guiddef::GUID) -> Result { Uuid::from_fields_le( guid.Data1 as u32, guid.Data2 as u16, @@ -36,8 +33,9 @@ impl Uuid { #[cfg(feature = "guid")] #[cfg(test)] mod tests { - use prelude::*; - use std::str::FromStr; + use super::*; + + use crate::std::string::ToString; use winapi::shared::guiddef; #[test] diff --git a/third_party/rust/version_check/.cargo-checksum.json b/third_party/rust/version_check/.cargo-checksum.json index f93491f862..f4066d52f2 100644 --- a/third_party/rust/version_check/.cargo-checksum.json +++ b/third_party/rust/version_check/.cargo-checksum.json @@ -1 +1 @@ -{"files":{"Cargo.toml":"95aa7965cb8a20e6b196cda8122e9855cc04ac4a42bc93e0616b45605a11bc2b","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"b7e650f3fce5c53249d1cdc608b54df156a97edd636cf9d23498d0cfe7aec63e","README.md":"8e5c0390f2dd2e9c082ea8feb1539af74f60c536faf6b836b8afd17fe605004c","src/lib.rs":"100373e52a857267090c78910b7b3f734949dd74092c1caf31f49cc64f8835e7"},"package":"914b1a6776c4c929a602fafd8bc742e06365d4bcbe48c30f9cca5824f70dc9dd"} \ No newline at end of file +{"files":{"Cargo.toml":"8e79e76a669227ea5f61530ad74ea835434efb34b3143dd8afe7fbd327e1f8f9","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"b7e650f3fce5c53249d1cdc608b54df156a97edd636cf9d23498d0cfe7aec63e","README.md":"d45a7a97623a56bf9cb7766976c3807312f7d4ac0cfaf4563ff76bc4d6ad1835","src/channel.rs":"f916ece9beeb7f3d512b423ae6da05d45f284bf42ddf7c14f80b77398d52dac7","src/date.rs":"d31e158a2b49f81da512150c5c93194655dac4114825e285fe2f688c09b001a4","src/lib.rs":"a15eb43cec1acfb0db42e8f93bdf70246ebceb1684ac39496bd28184722e4480","src/version.rs":"7022a20252f8beee0833f5d4f67b863e2f10963a24010e1300cb6603bcb7d514"},"package":"078775d0255232fb988e6fccf26ddc9d1ac274299aaedcedce21c6f72cc533ce"} \ No newline at end of file diff --git a/third_party/rust/version_check/Cargo.toml b/third_party/rust/version_check/Cargo.toml index 2844801448..6e01ed8c31 100644 --- a/third_party/rust/version_check/Cargo.toml +++ b/third_party/rust/version_check/Cargo.toml @@ -3,7 +3,7 @@ # When uploading crates to the registry Cargo will automatically # "normalize" Cargo.toml files for maximal compatibility # with all versions of Cargo and also rewrite `path` dependencies -# to registry (e.g. crates.io) dependencies +# to registry (e.g., crates.io) dependencies # # If you believe there's an error in this file please file an # issue against the rust-lang/cargo repository. If you're @@ -12,7 +12,7 @@ [package] name = "version_check" -version = "0.1.5" +version = "0.9.1" authors = ["Sergio Benitez "] description = "Tiny crate to check the version of the installed/running rustc." documentation = "https://docs.rs/version_check/" diff --git a/third_party/rust/version_check/README.md b/third_party/rust/version_check/README.md index 63b3a623e9..9d4c169ce7 100644 --- a/third_party/rust/version_check/README.md +++ b/third_party/rust/version_check/README.md @@ -1,5 +1,9 @@ # version\_check +[![Build Status](https://travis-ci.com/SergioBenitez/version_check.svg?branch=master)](https://travis-ci.com/SergioBenitez/version_check) +[![Current Crates.io Version](https://img.shields.io/crates/v/version_check.svg)](https://crates.io/crates/version_check) +[![rustdocs on docs.rs](https://docs.rs/version_check/badge.svg)](https://docs.rs/version_check) + This tiny crate checks that the running or installed `rustc` meets some version requirements. The version is queried by calling the Rust compiler with `--version`. The path to the compiler is determined first via the `RUSTC` @@ -12,52 +16,57 @@ Add to your `Cargo.toml` file, typically as a build dependency: ```toml [build-dependencies] -version_check = "0.1" +version_check = "0.9" ``` +`version_check` is compatible and compiles with Rust 1.0.0 and beyond. + ## Examples -Check that the running compiler is a nightly release: +Set a `cfg` flag in `build.rs` if the running compiler was determined to be +at least version `1.13.0`: ```rust -extern crate version_check; +extern crate version_check as rustc; -match version_check::is_nightly() { - Some(true) => "running a nightly", - Some(false) => "not nightly", - None => "couldn't figure it out" -}; +if rustc::is_min_version("1.13.0").unwrap_or(false) { + println!("cargo:rustc-cfg=question_mark_operator"); +} ``` -Check that the running compiler is at least version `1.13.0`: +Check that the running compiler was released on or after `2018-12-18`: ```rust -extern crate version_check; +extern crate version_check as rustc; -match version_check::is_min_version("1.13.0") { - Some((true, version)) => format!("Yes! It's: {}", version), - Some((false, version)) => format!("No! {} is too old!", version), - None => "couldn't figure it out".into() +match rustc::is_min_date("2018-12-18") { + Some(true) => "Yep! It's recent!", + Some(false) => "No, it's older.", + None => "Couldn't determine the rustc version." }; ``` -Check that the running compiler was released on or after `2016-12-18`: +Check that the running compiler supports feature flags: ```rust -extern crate version_check; +extern crate version_check as rustc; -match version_check::is_min_date("2016-12-18") { - Some((true, date)) => format!("Yes! It's: {}", date), - Some((false, date)) => format!("No! {} is too long ago!", date), - None => "couldn't figure it out".into() +match rustc::is_feature_flaggable() { + Some(true) => "Yes! It's a dev or nightly release!", + Some(false) => "No, it's stable or beta.", + None => "Couldn't determine the rustc version." }; ``` +See the [rustdocs](https://docs.rs/version_check) for more examples and complete +documentation. + ## Alternatives -This crate is dead simple with no dependencies. If you need something more and -don't care about panicking if the version cannot be obtained or adding -dependencies, see [rustc_version](https://crates.io/crates/rustc_version). +This crate is dead simple with no dependencies. If you need something more +and don't care about panicking if the version cannot be obtained, or if you +don't mind adding dependencies, see +[rustc_version](https://crates.io/crates/rustc_version). ## License diff --git a/third_party/rust/version_check/src/channel.rs b/third_party/rust/version_check/src/channel.rs new file mode 100644 index 0000000000..2332a014c2 --- /dev/null +++ b/third_party/rust/version_check/src/channel.rs @@ -0,0 +1,192 @@ +use std::fmt; + +#[derive(Debug, PartialEq, Eq, Copy, Clone)] +enum Kind { + Dev, + Nightly, + Beta, + Stable, +} + +/// Release channel: "dev", "nightly", "beta", or "stable". +#[derive(Debug, PartialEq, Eq, Copy, Clone)] +pub struct Channel(Kind); + +impl Channel { + /// Reads the release channel of the running compiler. If it cannot be + /// determined (see the [top-level documentation](crate)), returns `None`. + /// + /// # Example + /// + /// ```rust + /// use version_check::Channel; + /// + /// match Channel::read() { + /// Some(c) => format!("The channel is: {}", c), + /// None => format!("Failed to read the release channel.") + /// }; + /// ``` + pub fn read() -> Option { + ::get_version_and_date() + .and_then(|(version, _)| version) + .and_then(|version| Channel::parse(&version)) + } + + /// Parse a Rust release channel from a Rust release version string (of the + /// form `major[.minor[.patch[-channel]]]`). Returns `None` if `version` is + /// not a valid Rust version string. + /// + /// # Example + /// + /// ```rust + /// use version_check::Channel; + /// + /// let dev = Channel::parse("1.3.0-dev").unwrap(); + /// assert!(dev.is_dev()); + /// + /// let nightly = Channel::parse("1.42.2-nightly").unwrap(); + /// assert!(nightly.is_nightly()); + /// + /// let beta = Channel::parse("1.32.0-beta").unwrap(); + /// assert!(beta.is_beta()); + /// + /// let stable = Channel::parse("1.4.0").unwrap(); + /// assert!(stable.is_stable()); + /// ``` + pub fn parse(version: &str) -> Option { + if version.contains("-dev") { + Some(Channel(Kind::Dev)) + } else if version.contains("-nightly") { + Some(Channel(Kind::Nightly)) + } else if version.contains("-beta") { + Some(Channel(Kind::Beta)) + } else if !version.contains("-") { + Some(Channel(Kind::Stable)) + } else { + None + } + } + + /// Returns the name of the release channel. + fn as_str(&self) -> &'static str { + match self.0 { + Kind::Dev => "dev", + Kind::Beta => "beta", + Kind::Nightly => "nightly", + Kind::Stable => "stable", + } + } + + /// Returns `true` if this channel supports feature flags. In other words, + /// returns `true` if the channel is either `dev` or `nightly`. + /// + /// # Example + /// + /// ```rust + /// use version_check::Channel; + /// + /// let dev = Channel::parse("1.3.0-dev").unwrap(); + /// assert!(dev.supports_features()); + /// + /// let nightly = Channel::parse("1.42.2-nightly").unwrap(); + /// assert!(nightly.supports_features()); + /// + /// let beta = Channel::parse("1.32.0-beta").unwrap(); + /// assert!(!beta.supports_features()); + /// + /// let stable = Channel::parse("1.4.0").unwrap(); + /// assert!(!stable.supports_features()); + /// ``` + pub fn supports_features(&self) -> bool { + match self.0 { + Kind::Dev | Kind::Nightly => true, + Kind::Beta | Kind::Stable => false + } + } + + /// Returns `true` if this channel is `dev` and `false` otherwise. + /// + /// # Example + /// + /// ```rust + /// use version_check::Channel; + /// + /// let dev = Channel::parse("1.3.0-dev").unwrap(); + /// assert!(dev.is_dev()); + /// + /// let stable = Channel::parse("1.0.0").unwrap(); + /// assert!(!stable.is_dev()); + /// ``` + pub fn is_dev(&self) -> bool { + match self.0 { + Kind::Dev => true, + _ => false + } + } + + /// Returns `true` if this channel is `nightly` and `false` otherwise. + /// + /// # Example + /// + /// ```rust + /// use version_check::Channel; + /// + /// let nightly = Channel::parse("1.3.0-nightly").unwrap(); + /// assert!(nightly.is_nightly()); + /// + /// let stable = Channel::parse("1.0.0").unwrap(); + /// assert!(!stable.is_nightly()); + /// ``` + pub fn is_nightly(&self) -> bool { + match self.0 { + Kind::Nightly => true, + _ => false + } + } + + /// Returns `true` if this channel is `beta` and `false` otherwise. + /// + /// # Example + /// + /// ```rust + /// use version_check::Channel; + /// + /// let beta = Channel::parse("1.3.0-beta").unwrap(); + /// assert!(beta.is_beta()); + /// + /// let stable = Channel::parse("1.0.0").unwrap(); + /// assert!(!stable.is_beta()); + /// ``` + pub fn is_beta(&self) -> bool { + match self.0 { + Kind::Beta => true, + _ => false + } + } + + /// Returns `true` if this channel is `stable` and `false` otherwise. + /// + /// # Example + /// + /// ```rust + /// use version_check::Channel; + /// + /// let stable = Channel::parse("1.0.0").unwrap(); + /// assert!(stable.is_stable()); + /// + /// let beta = Channel::parse("1.3.0-beta").unwrap(); + /// assert!(!beta.is_stable()); + /// ``` + pub fn is_stable(&self) -> bool { + match self.0 { + Kind::Stable => true, + _ => false + } + } +} + +impl fmt::Display for Channel { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.as_str()) + } +} diff --git a/third_party/rust/version_check/src/date.rs b/third_party/rust/version_check/src/date.rs new file mode 100644 index 0000000000..55d9b24317 --- /dev/null +++ b/third_party/rust/version_check/src/date.rs @@ -0,0 +1,167 @@ +use std::fmt; + +/// Release date including year, month, and day. +// Internal storage is: y[31..9] | m[8..5] | d[5...0]. +#[derive(Debug, PartialEq, Eq, Copy, Clone, PartialOrd, Ord)] +pub struct Date(u32); + +impl Date { + /// Reads the release date of the running compiler. If it cannot be + /// determined (see the [top-level documentation](crate)), returns `None`. + /// + /// # Example + /// + /// ```rust + /// use version_check::Date; + /// + /// match Date::read() { + /// Some(d) => format!("The release date is: {}", d), + /// None => format!("Failed to read the release date.") + /// }; + /// ``` + pub fn read() -> Option { + ::get_version_and_date() + .and_then(|(_, date)| date) + .and_then(|date| Date::parse(&date)) + } + + /// Return the original (YYYY, MM, DD). + fn to_ymd(&self) -> (u16, u8, u8) { + let y = self.0 >> 9; + let m = (self.0 << 23) >> 28; + let d = (self.0 << 27) >> 27; + (y as u16, m as u8, d as u8) + } + + /// Parse a release date of the form `%Y-%m-%d`. Returns `None` if `date` is + /// not in `%Y-%m-%d` format. + /// + /// # Example + /// + /// ```rust + /// use version_check::Date; + /// + /// let date = Date::parse("2016-04-20").unwrap(); + /// + /// assert!(date.at_least("2016-01-10")); + /// assert!(date.at_most("2016-04-20")); + /// assert!(date.exactly("2016-04-20")); + /// + /// assert!(Date::parse("March 13, 2018").is_none()); + /// assert!(Date::parse("1-2-3-4-5").is_none()); + /// ``` + pub fn parse(date: &str) -> Option { + let ymd: Vec = date.split("-") + .filter_map(|s| s.parse::().ok()) + .collect(); + + if ymd.len() != 3 { + return None + } + + let (y, m, d) = (ymd[0], ymd[1], ymd[2]); + Some(Date((y << 9) | ((m & 0xF) << 5) | (d & 0x1F))) + } + + /// Returns `true` if `self` occurs on or after `date`. + /// + /// If `date` occurs before `self`, or if `date` is not in `%Y-%m-%d` + /// format, returns `false`. + /// + /// # Example + /// + /// ```rust + /// use version_check::Date; + /// + /// let date = Date::parse("2020-01-01").unwrap(); + /// + /// assert!(date.at_least("2019-12-31")); + /// assert!(date.at_least("2020-01-01")); + /// assert!(date.at_least("2014-04-31")); + /// + /// assert!(!date.at_least("2020-01-02")); + /// assert!(!date.at_least("2024-08-18")); + /// ``` + pub fn at_least(&self, date: &str) -> bool { + Date::parse(date) + .map(|date| self >= &date) + .unwrap_or(false) + } + + /// Returns `true` if `self` occurs on or before `date`. + /// + /// If `date` occurs after `self`, or if `date` is not in `%Y-%m-%d` + /// format, returns `false`. + /// + /// # Example + /// + /// ```rust + /// use version_check::Date; + /// + /// let date = Date::parse("2020-01-01").unwrap(); + /// + /// assert!(date.at_most("2020-01-01")); + /// assert!(date.at_most("2020-01-02")); + /// assert!(date.at_most("2024-08-18")); + /// + /// assert!(!date.at_most("2019-12-31")); + /// assert!(!date.at_most("2014-04-31")); + /// ``` + pub fn at_most(&self, date: &str) -> bool { + Date::parse(date) + .map(|date| self <= &date) + .unwrap_or(false) + } + + /// Returns `true` if `self` occurs exactly on `date`. + /// + /// If `date` is not exactly `self`, or if `date` is not in `%Y-%m-%d` + /// format, returns `false`. + /// + /// # Example + /// + /// ```rust + /// use version_check::Date; + /// + /// let date = Date::parse("2020-01-01").unwrap(); + /// + /// assert!(date.exactly("2020-01-01")); + /// + /// assert!(!date.exactly("2019-12-31")); + /// assert!(!date.exactly("2014-04-31")); + /// assert!(!date.exactly("2020-01-02")); + /// assert!(!date.exactly("2024-08-18")); + /// ``` + pub fn exactly(&self, date: &str) -> bool { + Date::parse(date) + .map(|date| self == &date) + .unwrap_or(false) + } +} + +impl fmt::Display for Date { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let (y, m, d) = self.to_ymd(); + write!(f, "{}-{:02}-{:02}", y, m, d) + } +} + +#[cfg(test)] +mod tests { + use super::Date; + + macro_rules! reflexive_display { + ($string:expr) => ( + assert_eq!(Date::parse($string).unwrap().to_string(), $string); + ) + } + + #[test] + fn display() { + reflexive_display!("2019-05-08"); + reflexive_display!("2000-01-01"); + reflexive_display!("2000-12-31"); + reflexive_display!("2090-12-31"); + reflexive_display!("1999-02-19"); + } +} diff --git a/third_party/rust/version_check/src/lib.rs b/third_party/rust/version_check/src/lib.rs index a062062286..6df728e25a 100644 --- a/third_party/rust/version_check/src/lib.rs +++ b/third_party/rust/version_check/src/lib.rs @@ -4,96 +4,96 @@ //! `RUSTC` environment variable. If it is not set, then `rustc` is used. If //! that fails, no determination is made, and calls return `None`. //! -//! # Example +//! # Examples //! -//! Check that the running compiler is a nightly release: +//! Set a `cfg` flag in `build.rs` if the running compiler was determined to be +//! at least version `1.13.0`: //! //! ```rust -//! extern crate version_check; +//! extern crate version_check as rustc; //! -//! match version_check::is_nightly() { -//! Some(true) => "running a nightly", -//! Some(false) => "not nightly", -//! None => "couldn't figure it out" +//! if rustc::is_min_version("1.13.0").unwrap_or(false) { +//! println!("cargo:rustc-cfg=question_mark_operator"); +//! } +//! ``` +//! +//! See [`is_max_version`] or [`is_exact_version`] to check if the compiler +//! is _at most_ or _exactly_ a certain version. +//! +//! Check that the running compiler was released on or after `2018-12-18`: +//! +//! ```rust +//! extern crate version_check as rustc; +//! +//! match rustc::is_min_date("2018-12-18") { +//! Some(true) => "Yep! It's recent!", +//! Some(false) => "No, it's older.", +//! None => "Couldn't determine the rustc version." //! }; //! ``` //! -//! Check that the running compiler is at least version `1.13.0`: +//! See [`is_max_date`] or [`is_exact_date`] to check if the compiler was +//! released _prior to_ or _exactly on_ a certain date. +//! +//! Check that the running compiler supports feature flags: //! //! ```rust -//! extern crate version_check; +//! extern crate version_check as rustc; //! -//! match version_check::is_min_version("1.13.0") { -//! Some((true, version)) => format!("Yes! It's: {}", version), -//! Some((false, version)) => format!("No! {} is too old!", version), -//! None => "couldn't figure it out".into() +//! match rustc::is_feature_flaggable() { +//! Some(true) => "Yes! It's a dev or nightly release!", +//! Some(false) => "No, it's stable or beta.", +//! None => "Couldn't determine the rustc version." //! }; //! ``` //! -//! Check that the running compiler was released on or after `2016-12-18`: +//! Check that the running compiler is on the stable channel: //! //! ```rust -//! extern crate version_check; +//! extern crate version_check as rustc; //! -//! match version_check::is_min_date("2016-12-18") { -//! Some((true, date)) => format!("Yes! It's: {}", date), -//! Some((false, date)) => format!("No! {} is too long ago!", date), -//! None => "couldn't figure it out".into() +//! match rustc::Channel::read() { +//! Some(c) if c.is_stable() => format!("Yes! It's stable."), +//! Some(c) => format!("No, the channel {} is not stable.", c), +//! None => format!("Couldn't determine the rustc version.") //! }; //! ``` //! +//! To interact with the version, release date, and release channel as structs, +//! use [`Version`], [`Date`], and [`Channel`], respectively. The [`triple()`] +//! function returns all three values efficiently. +//! //! # Alternatives //! //! This crate is dead simple with no dependencies. If you need something more -//! and don't care about panicking if the version cannot be obtained or adding -//! dependencies, see [rustc_version](https://crates.io/crates/rustc_version). +//! and don't care about panicking if the version cannot be obtained, or if you +//! don't mind adding dependencies, see +//! [rustc_version](https://crates.io/crates/rustc_version). -use std::env; -use std::process::Command; - -// Convert a string of %Y-%m-%d to a single u32 maintaining ordering. -fn str_to_ymd(ymd: &str) -> Option { - let ymd: Vec = ymd.split("-").filter_map(|s| s.parse::().ok()).collect(); - if ymd.len() != 3 { - return None - } - - let (y, m, d) = (ymd[0], ymd[1], ymd[2]); - Some((y << 9) | (m << 5) | d) -} +#![allow(deprecated)] -// Convert a string with prefix major-minor-patch to a single u64 maintaining -// ordering. Assumes none of the components are > 1048576. -fn str_to_mmp(mmp: &str) -> Option { - let mut mmp: Vec = mmp.split('-') - .nth(0) - .unwrap_or("") - .split('.') - .filter_map(|s| s.parse::().ok()) - .collect(); - - if mmp.is_empty() { - return None - } +mod version; +mod channel; +mod date; - while mmp.len() < 3 { - mmp.push(0); - } +use std::env; +use std::process::Command; - let (maj, min, patch) = (mmp[0] as u64, mmp[1] as u64, mmp[2] as u64); - Some((maj << 32) | (min << 16) | patch) -} +#[doc(inline)] pub use version::*; +#[doc(inline)] pub use channel::*; +#[doc(inline)] pub use date::*; -/// Returns (version, date) as available. +/// Parses (version, date) as available from rustc version string. fn version_and_date_from_rustc_version(s: &str) -> (Option, Option) { let last_line = s.lines().last().unwrap_or(s); let mut components = last_line.trim().split(" "); let version = components.nth(1); - let date = components.nth(1).map(|s| s.trim_right().trim_right_matches(")")); + let date = components.filter(|c| c.ends_with(')')).next() + .map(|s| s.trim_right().trim_right_matches(")").trim_left().trim_left_matches('(')); (version.map(|s| s.to_string()), date.map(|s| s.to_string())) } -/// Returns (version, date) as available. +/// Returns (version, date) as available from `rustc --version`. fn get_version_and_date() -> Option<(Option, Option)> { env::var("RUSTC").ok() .and_then(|rustc| Command::new(rustc).arg("--version").output().ok()) @@ -102,154 +102,187 @@ fn get_version_and_date() -> Option<(Option, Option)> { .map(|s| version_and_date_from_rustc_version(&s)) } -/// Checks that the running or installed `rustc` was released no earlier than +/// Reads the triple of [`Version`], [`Channel`], and [`Date`] of the installed +/// or running `rustc`. +/// +/// If any attribute cannot be determined (see the [top-level +/// documentation](crate)), returns `None`. +/// +/// To obtain only one of three attributes, use [`Version::read()`], +/// [`Channel::read()`], or [`Date::read()`]. +pub fn triple() -> Option<(Version, Channel, Date)> { + let (version_str, date_str) = match get_version_and_date() { + Some((Some(version), Some(date))) => (version, date), + _ => return None + }; + + // Can't use `?` or `try!` for `Option` in 1.0.0. + match Version::parse(&version_str) { + Some(version) => match Channel::parse(&version_str) { + Some(channel) => match Date::parse(&date_str) { + Some(date) => Some((version, channel, date)), + _ => None, + }, + _ => None, + }, + _ => None + } +} + +/// Checks that the running or installed `rustc` was released **on or after** /// some date. /// /// The format of `min_date` must be YYYY-MM-DD. For instance: `2016-12-20` or /// `2017-01-09`. /// /// If the date cannot be retrieved or parsed, or if `min_date` could not be -/// parsed, returns `None`. Otherwise returns a tuple where the first value is -/// `true` if the installed `rustc` is at least from `min_data` and the second -/// value is the date (in YYYY-MM-DD) of the installed `rustc`. -pub fn is_min_date(min_date: &str) -> Option<(bool, String)> { - if let Some((_, Some(actual_date_str))) = get_version_and_date() { - str_to_ymd(&actual_date_str) - .and_then(|actual| str_to_ymd(min_date).map(|min| (min, actual))) - .map(|(min, actual)| (actual >= min, actual_date_str)) - } else { - None +/// parsed, returns `None`. Otherwise returns `true` if the installed `rustc` +/// was release on or after `min_date` and `false` otherwise. +pub fn is_min_date(min_date: &str) -> Option { + match (Date::read(), Date::parse(min_date)) { + (Some(rustc_date), Some(min_date)) => Some(rustc_date >= min_date), + _ => None } } -/// Checks that the running or installed `rustc` is at least some minimum -/// version. +/// Checks that the running or installed `rustc` was released **on or before** +/// some date. /// -/// The format of `min_version` is a semantic version: `1.3.0`, `1.15.0-beta`, -/// `1.14.0`, `1.16.0-nightly`, etc. +/// The format of `max_date` must be YYYY-MM-DD. For instance: `2016-12-20` or +/// `2017-01-09`. /// -/// If the version cannot be retrieved or parsed, or if `min_version` could not -/// be parsed, returns `None`. Otherwise returns a tuple where the first value -/// is `true` if the installed `rustc` is at least `min_version` and the second -/// value is the version (semantic) of the installed `rustc`. -pub fn is_min_version(min_version: &str) -> Option<(bool, String)> { - if let Some((Some(actual_version_str), _)) = get_version_and_date() { - str_to_mmp(&actual_version_str) - .and_then(|actual| str_to_mmp(min_version).map(|min| (min, actual))) - .map(|(min, actual)| (actual >= min, actual_version_str)) - } else { - None +/// If the date cannot be retrieved or parsed, or if `max_date` could not be +/// parsed, returns `None`. Otherwise returns `true` if the installed `rustc` +/// was release on or before `max_date` and `false` otherwise. +pub fn is_max_date(max_date: &str) -> Option { + match (Date::read(), Date::parse(max_date)) { + (Some(rustc_date), Some(max_date)) => Some(rustc_date <= max_date), + _ => None } } -fn version_channel_is(channel: &str) -> Option { - get_version_and_date() - .and_then(|(version_str_opt, _)| version_str_opt) - .map(|version_str| version_str.contains(channel)) +/// Checks that the running or installed `rustc` was released **exactly** on +/// some date. +/// +/// The format of `date` must be YYYY-MM-DD. For instance: `2016-12-20` or +/// `2017-01-09`. +/// +/// If the date cannot be retrieved or parsed, or if `date` could not be parsed, +/// returns `None`. Otherwise returns `true` if the installed `rustc` was +/// release on `date` and `false` otherwise. +pub fn is_exact_date(date: &str) -> Option { + match (Date::read(), Date::parse(date)) { + (Some(rustc_date), Some(date)) => Some(rustc_date == date), + _ => None + } } -/// Determines whether the running or installed `rustc` is on the nightly -/// channel. +/// Checks that the running or installed `rustc` is **at least** some minimum +/// version. /// -/// If the version could not be determined, returns `None`. Otherwise returns -/// `Some(true)` if the running version is a nightly release, and `Some(false)` -/// otherwise. -pub fn is_nightly() -> Option { - version_channel_is("nightly") +/// The format of `min_version` is a semantic version: `1.3.0`, `1.15.0-beta`, +/// `1.14.0`, `1.16.0-nightly`, etc. +/// +/// If the version cannot be retrieved or parsed, or if `min_version` could not +/// be parsed, returns `None`. Otherwise returns `true` if the installed `rustc` +/// is at least `min_version` and `false` otherwise. +pub fn is_min_version(min_version: &str) -> Option { + match (Version::read(), Version::parse(min_version)) { + (Some(rustc_ver), Some(min_ver)) => Some(rustc_ver >= min_ver), + _ => None + } } -/// Determines whether the running or installed `rustc` is on the beta channel. +/// Checks that the running or installed `rustc` is **at most** some maximum +/// version. /// -/// If the version could not be determined, returns `None`. Otherwise returns -/// `Some(true)` if the running version is a beta release, and `Some(false)` -/// otherwise. -pub fn is_beta() -> Option { - version_channel_is("beta") +/// The format of `max_version` is a semantic version: `1.3.0`, `1.15.0-beta`, +/// `1.14.0`, `1.16.0-nightly`, etc. +/// +/// If the version cannot be retrieved or parsed, or if `max_version` could not +/// be parsed, returns `None`. Otherwise returns `true` if the installed `rustc` +/// is at most `max_version` and `false` otherwise. +pub fn is_max_version(max_version: &str) -> Option { + match (Version::read(), Version::parse(max_version)) { + (Some(rustc_ver), Some(max_ver)) => Some(rustc_ver <= max_ver), + _ => None + } } -/// Determines whether the running or installed `rustc` is on the dev channel. +/// Checks that the running or installed `rustc` is **exactly** some version. /// -/// If the version could not be determined, returns `None`. Otherwise returns -/// `Some(true)` if the running version is a dev release, and `Some(false)` -/// otherwise. -pub fn is_dev() -> Option { - version_channel_is("dev") +/// The format of `version` is a semantic version: `1.3.0`, `1.15.0-beta`, +/// `1.14.0`, `1.16.0-nightly`, etc. +/// +/// If the version cannot be retrieved or parsed, or if `version` could not be +/// parsed, returns `None`. Otherwise returns `true` if the installed `rustc` is +/// exactly `version` and `false` otherwise. +pub fn is_exact_version(version: &str) -> Option { + match (Version::read(), Version::parse(version)) { + (Some(rustc_ver), Some(version)) => Some(rustc_ver == version), + _ => None + } } -/// Determines whether the running or installed `rustc` supports feature flags. +/// Checks whether the running or installed `rustc` supports feature flags. +/// /// In other words, if the channel is either "nightly" or "dev". /// /// If the version could not be determined, returns `None`. Otherwise returns -/// `Some(true)` if the running version supports features, and `Some(false)` -/// otherwise. -pub fn supports_features() -> Option { - match is_nightly() { - b@Some(true) => b, - _ => is_dev() - } +/// `true` if the running version supports feature flags and `false` otherwise. +pub fn is_feature_flaggable() -> Option { + Channel::read().map(|c| c.supports_features()) } #[cfg(test)] mod tests { use super::version_and_date_from_rustc_version; - use super::str_to_mmp; - - macro_rules! check_mmp { - ($string:expr => ($x:expr, $y:expr, $z:expr)) => ( - if let Some(mmp) = str_to_mmp($string) { - let expected = $x << 32 | $y << 16 | $z; - if mmp != expected { - panic!("{:?} didn't parse as {}.{}.{}.", $string, $x, $y, $z); - } - } else { - panic!("{:?} didn't parse for mmp testing.", $string); - } - ) - } - macro_rules! check_version { - ($s:expr => ($x:expr, $y:expr, $z:expr)) => ( - if let (Some(version_str), _) = version_and_date_from_rustc_version($s) { - check_mmp!(&version_str => ($x, $y, $z)); + macro_rules! check_parse { + ($s:expr => $v:expr, $d:expr) => ( + if let (Some(v), d) = version_and_date_from_rustc_version($s) { + let e_d: Option<&str> = $d.into(); + assert_eq!((v, d), ($v.into(), e_d.map(|s| s.into()))); } else { panic!("{:?} didn't parse for version testing.", $s); } ) } - #[test] - fn test_str_to_mmp() { - check_mmp!("1.18.0" => (1, 18, 0)); - check_mmp!("1.19.0" => (1, 19, 0)); - check_mmp!("1.19.0-nightly" => (1, 19, 0)); - check_mmp!("1.12.2349" => (1, 12, 2349)); - check_mmp!("0.12" => (0, 12, 0)); - check_mmp!("1.12.5" => (1, 12, 5)); - check_mmp!("1.12" => (1, 12, 0)); - check_mmp!("1" => (1, 0, 0)); - } - #[test] fn test_version_parse() { - check_version!("rustc 1.18.0" => (1, 18, 0)); - check_version!("rustc 1.8.0" => (1, 8, 0)); - check_version!("rustc 1.20.0-nightly" => (1, 20, 0)); - check_version!("rustc 1.20" => (1, 20, 0)); - check_version!("rustc 1.3" => (1, 3, 0)); - check_version!("rustc 1" => (1, 0, 0)); - check_version!("rustc 1.2.5.6" => (1, 2, 5)); - check_version!("rustc 1.5.1-beta" => (1, 5, 1)); - check_version!("rustc 1.20.0-nightly (d84693b93 2017-07-09)" => (1, 20, 0)); - check_version!("rustc 1.20.0 (d84693b93 2017-07-09)" => (1, 20, 0)); - check_version!("rustc 1.20.0 (2017-07-09)" => (1, 20, 0)); - check_version!("rustc 1.20.0-dev (2017-07-09)" => (1, 20, 0)); - - check_version!("warning: invalid logging spec 'warning', ignoring it - rustc 1.30.0-nightly (3bc2ca7e4 2018-09-20)" => (1, 30, 0)); - check_version!("warning: invalid logging spec 'warning', ignoring it\n - rustc 1.30.0-nightly (3bc2ca7e4 2018-09-20)" => (1, 30, 0)); - check_version!("warning: invalid logging spec 'warning', ignoring it - warning: something else went wrong - rustc 1.30.0-nightly (3bc2ca7e4 2018-09-20)" => (1, 30, 0)); + check_parse!("rustc 1.18.0" => "1.18.0", None); + check_parse!("rustc 1.8.0" => "1.8.0", None); + check_parse!("rustc 1.20.0-nightly" => "1.20.0-nightly", None); + check_parse!("rustc 1.20" => "1.20", None); + check_parse!("rustc 1.3" => "1.3", None); + check_parse!("rustc 1" => "1", None); + check_parse!("rustc 1.5.1-beta" => "1.5.1-beta", None); + + // Because of 1.0.0, we can't use Option: From. + check_parse!("rustc 1.20.0 (2017-07-09)" + => "1.20.0", Some("2017-07-09")); + + check_parse!("rustc 1.20.0-dev (2017-07-09)" + => "1.20.0-dev", Some("2017-07-09")); + + check_parse!("rustc 1.20.0-nightly (d84693b93 2017-07-09)" + => "1.20.0-nightly", Some("2017-07-09")); + + check_parse!("rustc 1.20.0 (d84693b93 2017-07-09)" + => "1.20.0", Some("2017-07-09")); + + check_parse!("warning: invalid logging spec 'warning', ignoring it + rustc 1.30.0-nightly (3bc2ca7e4 2018-09-20)" + => "1.30.0-nightly", Some("2018-09-20")); + + check_parse!("warning: invalid logging spec 'warning', ignoring it\n + rustc 1.30.0-nightly (3bc2ca7e4 2018-09-20)" + => "1.30.0-nightly", Some("2018-09-20")); + + check_parse!("warning: invalid logging spec 'warning', ignoring it + warning: something else went wrong + rustc 1.30.0-nightly (3bc2ca7e4 2018-09-20)" + => "1.30.0-nightly", Some("2018-09-20")); } } diff --git a/third_party/rust/version_check/src/version.rs b/third_party/rust/version_check/src/version.rs new file mode 100644 index 0000000000..a0738d983a --- /dev/null +++ b/third_party/rust/version_check/src/version.rs @@ -0,0 +1,238 @@ +use std::fmt; + +/// Version number: `major.minor.patch`, ignoring release channel. +#[derive(Debug, PartialEq, Eq, Copy, Clone, PartialOrd, Ord)] +pub struct Version(u64); + +impl Version { + fn to_mmp(&self) -> (u16, u16, u16) { + let major = self.0 >> 32; + let minor = (self.0 << 32) >> 48; + let patch = (self.0 << 48) >> 48; + (major as u16, minor as u16, patch as u16) + } + + /// Reads the version of the running compiler. If it cannot be determined + /// (see the [top-level documentation](crate)), returns `None`. + /// + /// # Example + /// + /// ```rust + /// use version_check::Version; + /// + /// match Version::read() { + /// Some(d) => format!("Version is: {}", d), + /// None => format!("Failed to read the version.") + /// }; + /// ``` + pub fn read() -> Option { + ::get_version_and_date() + .and_then(|(version, _)| version) + .and_then(|version| Version::parse(&version)) + } + + + /// Parse a Rust release version (of the form + /// `major[.minor[.patch[-channel]]]`), ignoring the release channel, if + /// any. Returns `None` if `version` is not a valid Rust version string. + /// + /// # Example + /// + /// ```rust + /// use version_check::Version; + /// + /// let version = Version::parse("1.18.0").unwrap(); + /// assert!(version.exactly("1.18.0")); + /// + /// let version = Version::parse("1.20.0-nightly").unwrap(); + /// assert!(version.exactly("1.20.0")); + /// assert!(version.exactly("1.20.0-beta")); + /// + /// let version = Version::parse("1.3").unwrap(); + /// assert!(version.exactly("1.3.0")); + /// + /// let version = Version::parse("1").unwrap(); + /// assert!(version.exactly("1.0.0")); + /// + /// assert!(Version::parse("one.two.three").is_none()); + /// ``` + pub fn parse(version: &str) -> Option { + let mut mmp: Vec = version.split('-') + .nth(0) + .unwrap_or("") + .split('.') + .filter_map(|s| s.parse::().ok()) + .collect(); + + if mmp.is_empty() { + return None + } + + while mmp.len() < 3 { + mmp.push(0); + } + + let (maj, min, patch) = (mmp[0] as u64, mmp[1] as u64, mmp[2] as u64); + Some(Version((maj << 32) | (min << 16) | patch)) + } + + /// Returns `true` if `self` is greater than or equal to `version`. + /// + /// If `version` is greater than `self`, or if `version` is not a valid Rust + /// version string, returns `false`. + /// + /// # Example + /// + /// ```rust + /// use version_check::Version; + /// + /// let version = Version::parse("1.35.0").unwrap(); + /// + /// assert!(version.at_least("1.33.0")); + /// assert!(version.at_least("1.35.0")); + /// assert!(version.at_least("1.13.2")); + /// + /// assert!(!version.at_least("1.35.1")); + /// assert!(!version.at_least("1.55.0")); + /// ``` + pub fn at_least(&self, version: &str) -> bool { + Version::parse(version) + .map(|version| self >= &version) + .unwrap_or(false) + } + + /// Returns `true` if `self` is less than or equal to `version`. + /// + /// If `version` is less than `self`, or if `version` is not a valid Rust + /// version string, returns `false`. + /// + /// # Example + /// + /// ```rust + /// use version_check::Version; + /// + /// let version = Version::parse("1.35.0").unwrap(); + /// + /// assert!(version.at_most("1.35.1")); + /// assert!(version.at_most("1.55.0")); + /// assert!(version.at_most("1.35.0")); + /// + /// assert!(!version.at_most("1.33.0")); + /// assert!(!version.at_most("1.13.2")); + /// ``` + pub fn at_most(&self, version: &str) -> bool { + Version::parse(version) + .map(|version| self <= &version) + .unwrap_or(false) + } + + /// Returns `true` if `self` is exactly equal to `version`. + /// + /// If `version` is not equal to `self`, or if `version` is not a valid Rust + /// version string, returns `false`. + /// + /// # Example + /// + /// ```rust + /// use version_check::Version; + /// + /// let version = Version::parse("1.35.0").unwrap(); + /// + /// assert!(version.exactly("1.35.0")); + /// + /// assert!(!version.exactly("1.33.0")); + /// assert!(!version.exactly("1.35.1")); + /// assert!(!version.exactly("1.13.2")); + /// ``` + pub fn exactly(&self, version: &str) -> bool { + Version::parse(version) + .map(|version| self == &version) + .unwrap_or(false) + } +} + +impl fmt::Display for Version { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let (major, minor, patch) = self.to_mmp(); + write!(f, "{}.{}.{}", major, minor, patch) + } +} + +#[cfg(test)] +mod tests { + use super::Version; + + macro_rules! check_mmp { + ($s:expr => ($x:expr, $y:expr, $z:expr)) => ( + if let Some(v) = Version::parse($s) { + if v.to_mmp() != ($x, $y, $z) { + panic!("{:?} ({}) didn't parse as {}.{}.{}.", $s, v, $x, $y, $z); + } + } else { + panic!("{:?} didn't parse for mmp testing.", $s); + } + ) + } + + #[test] + fn test_str_to_mmp() { + check_mmp!("1.18.0" => (1, 18, 0)); + check_mmp!("3.19.0" => (3, 19, 0)); + check_mmp!("1.19.0-nightly" => (1, 19, 0)); + check_mmp!("1.12.2349" => (1, 12, 2349)); + check_mmp!("0.12" => (0, 12, 0)); + check_mmp!("1.12.5" => (1, 12, 5)); + check_mmp!("1.12" => (1, 12, 0)); + check_mmp!("1" => (1, 0, 0)); + check_mmp!("1.4.4-nightly (d84693b93 2017-07-09)" => (1, 4, 4)); + check_mmp!("1.58879.4478-dev" => (1, 58879, 4478)); + check_mmp!("1.58879.4478-dev (d84693b93 2017-07-09)" => (1, 58879, 4478)); + } + + #[test] + fn test_comparisons() { + let version = Version::parse("1.18.0").unwrap(); + assert!(version.exactly("1.18.0")); + assert!(version.at_least("1.12.0")); + assert!(version.at_least("1.12")); + assert!(version.at_least("1")); + assert!(version.at_most("1.18.1")); + assert!(!version.exactly("1.19.0")); + assert!(!version.exactly("1.18.1")); + + let version = Version::parse("1.20.0-nightly").unwrap(); + assert!(version.exactly("1.20.0-beta")); + assert!(version.exactly("1.20.0-nightly")); + assert!(version.exactly("1.20.0")); + assert!(!version.exactly("1.19")); + + let version = Version::parse("1.3").unwrap(); + assert!(version.exactly("1.3.0")); + assert!(version.exactly("1.3.0-stable")); + assert!(version.exactly("1.3")); + assert!(!version.exactly("1.5.0-stable")); + + let version = Version::parse("1").unwrap(); + assert!(version.exactly("1.0.0")); + assert!(version.exactly("1.0")); + assert!(version.exactly("1")); + + assert!(Version::parse("one.two.three").is_none()); + } + + macro_rules! reflexive_display { + ($s:expr) => ( + assert_eq!(Version::parse($s).unwrap().to_string(), $s); + ) + } + + #[test] + fn display() { + reflexive_display!("1.0.0"); + reflexive_display!("1.2.3"); + reflexive_display!("1.12.1438"); + reflexive_display!("1.44.0"); + reflexive_display!("2.44.0"); + reflexive_display!("23459.28923.3483"); + } +} diff --git a/third_party/rust/weedle/.cargo-checksum.json b/third_party/rust/weedle/.cargo-checksum.json index 7045d2a652..3fadecee00 100644 --- a/third_party/rust/weedle/.cargo-checksum.json +++ b/third_party/rust/weedle/.cargo-checksum.json @@ -1 +1 @@ -{"files":{"Cargo.toml":"ad1bfb42e7fc39eb9c2094164bf3a918795a0ae93e187977725b4bdeae414dbe","LICENSE.md":"47cdc00faacda4d0ac0dabfcef03ab6f4bef6a02f80cb3fd0482047f00f9a16d","README.md":"5eb88c1f2a2caae987dddd81108019cf39402e9e55cd5217d9b12e006cbff4f1","src/argument.rs":"eb29c4625ffa5474ac2b3f7575ac3ffb04ab01e474ca7b4c403b706d881bc32e","src/attribute.rs":"dd056cdeb031ba8eaf8670f64a0e90fd415d9bc042c072131f5ba1e9910da8d4","src/common.rs":"14024d889404f79f025a6764e0ef30de8b2a87bc7778498d30d4e0ab3b96c884","src/dictionary.rs":"ff64cda1390e3e8d66d4eec2217ab2b46969ab7c057931f48d193e0d27e4c317","src/interface.rs":"4277746a1b621222dea2bbf076a8bc7a4474465a7156c900033fda1859353b4f","src/lib.rs":"d8c8a39d2e92eff670731c077fd5f3ec453477f6f0a6ade1eede28cef83b9c46","src/literal.rs":"f95f8eeb26c713ee0423b223a7918a76f7e210eb9de0f4778d5fdfac89f4ec27","src/macros.rs":"fd5584c0103b4882d1f4c8bf064fb5e946234d0b5224c2cd46b81a0105fcef45","src/mixin.rs":"7f4324723851bfeb2a4e047e100f8db4676a1a4397aefe734cd664a353b92266","src/namespace.rs":"42dc795bd4202ce6f735eadcb0d69991ae82192ec6e841e18084c250efefcd84","src/term.rs":"e4a226abd22b9a0f91b7da7dde3aed88906915f3485d4115e123be7c190ae26a","src/types.rs":"2f897b3a7192d1e2f689dfac2343f8462516c06cdd16ea41b97e7597b538c73e","src/whitespace.rs":"b568c9749f25ce0fa5868c1d1badb7dd862f3e970ebc33c199d11ec4788870b5","tests/defs/dom.webidl":"93a70734538233a6b9db7578375b5a3d8783bcdcfc9970cc61cddcd03c832d6c","tests/defs/html.webidl":"acc65caf32ac946f457321dc0d0f1e3beb3d4f2ec6abb5f13e5e52fc01df1603","tests/webidl.rs":"ba69b110599170e5e9d303e9735dd8073a2e1cbacc357f5e4e88d9eec2c1c435"},"package":"26a4c67f132386d965390b8a734d5d10adbcd30eb5cc74bd9229af8b83f10044"} \ No newline at end of file +{"files":{"Cargo.toml":"84dc605d368f68a3e25b2e240552ce6e2a8731e80f73cfd4cb0b931528b473a7","LICENSE.md":"47cdc00faacda4d0ac0dabfcef03ab6f4bef6a02f80cb3fd0482047f00f9a16d","README.md":"e4363ea12b63bb6d3e461396f6fd46fe4cbf9a8635258499f4d551dadfc06655","src/argument.rs":"be162d59a9f1ef7d9d51d0f3492946f0dd263202af8355eeaee14ce4181775fc","src/attribute.rs":"b3577059be9c1262d3360b4b4cf4cc6cb052251e5b9d64906fb40fc5e925da69","src/common.rs":"7ac264f83ff596914af32fb08947cc500a75df57e43d9b2fce93163e5a20a738","src/dictionary.rs":"be209d70b0db33acf753ca63998a63dd37a9d4b193583a1490782781360c2d01","src/interface.rs":"4e1f37079dec75633b26289b82bda0d3b4a1479a4aede74c01e20fe6c2d93496","src/lib.rs":"762c35dc6ef77b6b1c871bd0391d09203a479937e4688a029a17c8d8a5f9e8ba","src/literal.rs":"dd58d410e2329633e2b2151157b29671ad5f78c8be0bec5c81be083168311f94","src/macros.rs":"aa95fd957d5175fb2779faec5d40b717fb5efd581f0f13ef7ba3510f37092da3","src/mixin.rs":"d65ff3a49615dabdf3f7845723f01ae730a83c8ef5bdd9a945d35149a9ef2858","src/namespace.rs":"fe6b406c2ab8bd904d0dff8a12321c77f4082ab364559e578e53ab415c8541fa","src/term.rs":"48ac583ae73d115afbedca6e7e9f32b316b7a308ba37cb9153eeac9773561028","src/types.rs":"3f81a1d820f0b6777f46c74366359953bf79465c3eb1ace4b3a383c22f4e1d88","src/whitespace.rs":"069df2ae0f37d621a8ef84b08408c090ff1ead8164e52caa8ea7d57838f04cac","tests/defs/dom.webidl":"93a70734538233a6b9db7578375b5a3d8783bcdcfc9970cc61cddcd03c832d6c","tests/defs/html.webidl":"4b2be1a0435b1fc26e60083006e2ecf7158611c190f95e5c4c8290510de22777","tests/defs/interface-constructor.webidl":"8b31240c838607e460086cb55a57a111605199f88f4291a83f7125c8c3c4fd1a","tests/defs/mediacapture-streams.webidl":"82615b15129e87aac56a0ca74bd763c5416732020a2b567dc4582835e0846144","tests/webidl.rs":"097683a01f3c058be3341d7d8b0af18964686059485941cbee22e63b94406c1f"},"package":"8a7d4f9feb723a800d8f7b74edc9fa44ff35cb0b2ec64886714362f423427f37"} \ No newline at end of file diff --git a/third_party/rust/weedle/Cargo.toml b/third_party/rust/weedle/Cargo.toml index 6a4ad31586..ea808e820a 100644 --- a/third_party/rust/weedle/Cargo.toml +++ b/third_party/rust/weedle/Cargo.toml @@ -3,7 +3,7 @@ # When uploading crates to the registry Cargo will automatically # "normalize" Cargo.toml files for maximal compatibility # with all versions of Cargo and also rewrite `path` dependencies -# to registry (e.g. crates.io) dependencies +# to registry (e.g., crates.io) dependencies # # If you believe there's an error in this file please file an # issue against the rust-lang/cargo repository. If you're @@ -11,8 +11,9 @@ # will likely look very different (and much more reasonable) [package] +edition = "2018" name = "weedle" -version = "0.8.0" +version = "0.11.0" authors = ["Sharad Chand "] description = "A WebIDL Parser" homepage = "https://github.com/rustwasm/weedle" @@ -21,4 +22,6 @@ readme = "./README.md" license = "MIT" repository = "https://github.com/rustwasm/weedle" [dependencies.nom] -version = "4.0.0" +version = "5.0.0" +features = ["std"] +default-features = false diff --git a/third_party/rust/weedle/README.md b/third_party/rust/weedle/README.md index f133c1e916..acd0182f21 100644 --- a/third_party/rust/weedle/README.md +++ b/third_party/rust/weedle/README.md @@ -1,26 +1,49 @@ -# Weedle - A WebIDL Parser +

+ +## About Parses valid WebIDL definitions & produces a data structure starting from -[`Definitions`](https://docs.rs/weedle/0.5.0/weedle/struct.Definitions.html). +[`Definitions`](https://docs.rs/weedle/latest/weedle/type.Definitions.html). -### Basic Usage +## Usage -In Cargo.toml -``` +### `Cargo.toml` + +```toml [dependencies] -weedle = "0.5.0" +weedle = "0.9.0" ``` -Then, in `src/main.rs` -```rust -extern crate weedle; +### `src/main.rs` +```rust fn main() { let parsed = weedle::parse(" interface Window { readonly attribute Storage sessionStorage; }; ").unwrap(); + println!("{:?}", parsed); } -``` \ No newline at end of file +``` diff --git a/third_party/rust/weedle/src/argument.rs b/third_party/rust/weedle/src/argument.rs index 884c603ca9..f2ac5a0133 100644 --- a/third_party/rust/weedle/src/argument.rs +++ b/third_party/rust/weedle/src/argument.rs @@ -1,6 +1,6 @@ -use attribute::ExtendedAttributeList; -use common::{Default, Identifier, Punctuated}; -use types::{AttributedType, Type}; +use crate::attribute::ExtendedAttributeList; +use crate::common::{Default, Identifier, Punctuated}; +use crate::types::{AttributedType, Type}; /// Parses a list of argument. Ex: `double v1, double v2, double v3, optional double alpha` pub type ArgumentList<'a> = Punctuated, term!(,)>; @@ -34,8 +34,8 @@ ast_types! { #[cfg(test)] mod test { use super::*; - use literal::{DecLit, DefaultValue, IntegerLit}; - use Parse; + use crate::literal::{DecLit, DefaultValue, IntegerLit}; + use crate::Parse; test!(should_parse_single_argument { "short a" => ""; diff --git a/third_party/rust/weedle/src/attribute.rs b/third_party/rust/weedle/src/attribute.rs index e4665cd0b0..0a70450023 100644 --- a/third_party/rust/weedle/src/attribute.rs +++ b/third_party/rust/weedle/src/attribute.rs @@ -1,6 +1,6 @@ -use argument::ArgumentList; -use common::{Braced, Bracketed, Identifier, Punctuated}; -use literal::StringLit; +use crate::argument::ArgumentList; +use crate::common::{Bracketed, Identifier, Parenthesized, Punctuated}; +use crate::literal::StringLit; /// Parses a list of attributes. Ex: `[ attribute1, attribute2 ]` pub type ExtendedAttributeList<'a> = Bracketed, term!(,)>>; @@ -16,7 +16,7 @@ ast_types! { /// (( )) means ( ) chars ArgList(struct ExtendedAttributeArgList<'a> { identifier: Identifier<'a>, - args: Braced>, + args: Parenthesized>, }), /// Parses a named argument list. Ex: `NamedConstructor=Image((DOMString src))` /// @@ -25,7 +25,7 @@ ast_types! { lhs_identifier: Identifier<'a>, assign: term!(=), rhs_identifier: Identifier<'a>, - args: Braced>, + args: Parenthesized>, }), /// Parses an identifier list. Ex: `Exposed=((Window,Worker))` @@ -34,7 +34,7 @@ ast_types! { IdentList(struct ExtendedAttributeIdentList<'a> { identifier: Identifier<'a>, assign: term!(=), - list: Braced>, + list: Parenthesized>, }), /// Parses an attribute with an identifier. Ex: `PutForwards=name` #[derive(Copy)] @@ -61,7 +61,7 @@ ast_types! { #[cfg(test)] mod test { use super::*; - use Parse; + use crate::Parse; test!(should_parse_attribute_no_args { "Replaceable" => ""; diff --git a/third_party/rust/weedle/src/common.rs b/third_party/rust/weedle/src/common.rs index 4ba6f0f5c5..81c6aa21a0 100644 --- a/third_party/rust/weedle/src/common.rs +++ b/third_party/rust/weedle/src/common.rs @@ -1,6 +1,6 @@ -use literal::DefaultValue; -use term; -use Parse; +use crate::literal::DefaultValue; +use crate::term; +use crate::Parse; impl<'a, T: Parse<'a>> Parse<'a> for Option { parser!(opt!(weedle!(T))); @@ -26,7 +26,7 @@ impl<'a, T: Parse<'a>, U: Parse<'a>, V: Parse<'a>> Parse<'a> for (T, U, V) { } ast_types! { - /// Parses `{ body }` + /// Parses `( body )` #[derive(Copy, Default)] struct Parenthesized where [T: Parse<'a>] { open_paren: term::OpenParen, @@ -42,7 +42,7 @@ ast_types! { close_bracket: term::CloseBracket, } - /// Parses `( body )` + /// Parses `{ body }` #[derive(Copy, Default)] struct Braced where [T: Parse<'a>] { open_brace: term::OpenBrace, @@ -87,7 +87,7 @@ ast_types! { take_while!(|c: char| c.is_ascii_alphanumeric() || c == '_' || c == '-') >> (()) )) >> - (id.0) + (id) )), ) @@ -126,7 +126,7 @@ mod test { len() == 3; }); - test!(should_parse_parenthesized { "{ one }" => + test!(should_parse_parenthesized { "( one )" => ""; Parenthesized; body.0 == "one"; @@ -138,7 +138,7 @@ mod test { body.0 == "one"; }); - test!(should_parse_braced { "( one )" => + test!(should_parse_braced { "{ one }" => ""; Braced; body.0 == "one"; diff --git a/third_party/rust/weedle/src/dictionary.rs b/third_party/rust/weedle/src/dictionary.rs index 84a0dfefa2..3c9b23cac5 100644 --- a/third_party/rust/weedle/src/dictionary.rs +++ b/third_party/rust/weedle/src/dictionary.rs @@ -1,6 +1,6 @@ -use attribute::ExtendedAttributeList; -use common::{Default, Identifier}; -use types::Type; +use crate::attribute::ExtendedAttributeList; +use crate::common::{Default, Identifier}; +use crate::types::Type; /// Parses dictionary members pub type DictionaryMembers<'a> = Vec>; @@ -20,7 +20,7 @@ ast_types! { #[cfg(test)] mod test { use super::*; - use Parse; + use crate::Parse; test!(should_parse_dictionary_member { "required long num = 5;" => ""; diff --git a/third_party/rust/weedle/src/interface.rs b/third_party/rust/weedle/src/interface.rs index 1b104898dd..27945669cb 100644 --- a/third_party/rust/weedle/src/interface.rs +++ b/third_party/rust/weedle/src/interface.rs @@ -1,8 +1,8 @@ -use argument::ArgumentList; -use attribute::ExtendedAttributeList; -use common::{Braced, Generics, Identifier}; -use literal::ConstValue; -use types::{AttributedType, ConstType, ReturnType}; +use crate::argument::ArgumentList; +use crate::attribute::ExtendedAttributeList; +use crate::common::{Generics, Identifier, Parenthesized}; +use crate::literal::ConstValue; +use crate::types::{AttributedType, ConstType, ReturnType}; /// Parses interface members pub type InterfaceMembers<'a> = Vec>; @@ -37,6 +37,15 @@ ast_types! { identifier: Identifier<'a>, semi_colon: term!(;), }), + /// Parses `[attributes]? constructor(( args ));` + /// + /// (( )) means ( ) chars + Constructor(struct ConstructorInterfaceMember<'a> { + attributes: Option>, + constructor: term!(constructor), + args: Parenthesized>, + semi_colon: term!(;), + }), /// Parses `[attributes]? (stringifier|static)? special? returntype identifier? (( args ));` /// /// (( )) means ( ) chars @@ -46,7 +55,7 @@ ast_types! { special: Option, return_type: ReturnType<'a>, identifier: Option>, - args: Braced>, + args: Parenthesized>, semi_colon: term!(;), }), /// Parses an iterable declaration `[attributes]? (iterable | iterable) ;` @@ -118,7 +127,7 @@ ast_types! { #[cfg(test)] mod test { use super::*; - use Parse; + use crate::Parse; test!(should_parse_stringifier_member { "stringifier;" => ""; @@ -169,6 +178,12 @@ mod test { attributes.is_none(); }); + test!(should_parse_constructor_interface_member { "constructor(long a);" => + ""; + ConstructorInterfaceMember; + attributes.is_none(); + }); + test!(should_parse_operation_interface_member { "void readString(long a, long b);" => ""; OperationInterfaceMember; diff --git a/third_party/rust/weedle/src/lib.rs b/third_party/rust/weedle/src/lib.rs index f6cca61721..1b7bef8473 100644 --- a/third_party/rust/weedle/src/lib.rs +++ b/third_party/rust/weedle/src/lib.rs @@ -24,25 +24,36 @@ // need a higher recusion limit for macros #![recursion_limit = "128"] -#[macro_use] +#[macro_use( + alt, + cond, + do_parse, + map, + many0, + opt, + recognize, + separated_list, + separated_nonempty_list, + terminated +)] extern crate nom; -use argument::ArgumentList; -use attribute::ExtendedAttributeList; -use common::{Braced, Identifier, Parenthesized, PunctuatedNonEmpty}; -use dictionary::DictionaryMembers; -use interface::{Inheritance, InterfaceMembers}; -use literal::StringLit; -use mixin::MixinMembers; -use namespace::NamespaceMembers; -pub use nom::{types::CompleteStr, Err, Context, IResult}; -use types::{AttributedType, ReturnType}; +use self::argument::ArgumentList; +use self::attribute::ExtendedAttributeList; +use self::common::{Braced, Identifier, Parenthesized, PunctuatedNonEmpty}; +use self::dictionary::DictionaryMembers; +use self::interface::{Inheritance, InterfaceMembers}; +use self::literal::StringLit; +use self::mixin::MixinMembers; +use self::namespace::NamespaceMembers; +use self::types::{AttributedType, ReturnType}; +pub use nom::{error::ErrorKind, Err, IResult}; -#[macro_use] -mod whitespace; #[macro_use] mod macros; #[macro_use] +mod whitespace; +#[macro_use] pub mod term; pub mod argument; pub mod attribute; @@ -69,30 +80,30 @@ pub mod types; /// /// println!("{:?}", parsed); /// ``` -pub fn parse<'a>(raw: &'a str) -> Result, Err, u32>> { - let (remaining, parsed) = Definitions::parse(CompleteStr(raw))?; - if remaining.len() > 0 { - Result::Err(Err::Failure(nom::Context::Code(remaining, nom::ErrorKind::Custom(0)))) - } else { - Ok(parsed) - } +pub fn parse(raw: &str) -> Result, Err<(&str, ErrorKind)>> { + let (remaining, parsed) = Definitions::parse(raw)?; + assert!( + remaining.is_empty(), + "There is redundant raw data after parsing" + ); + Ok(parsed) } pub trait Parse<'a>: Sized { - fn parse(input: CompleteStr<'a>) -> IResult, Self>; + fn parse(input: &'a str) -> IResult<&'a str, Self>; } /// Parses WebIDL definitions. It is the root struct for a complete WebIDL definition. /// /// ### Example /// ``` -/// use weedle::{Definitions, CompleteStr, Parse}; +/// use weedle::{Definitions, Parse}; /// -/// let (_, parsed) = Definitions::parse(CompleteStr(" +/// let (_, parsed) = Definitions::parse(" /// interface Window { /// readonly attribute Storage sessionStorage; /// }; -/// ")).unwrap(); +/// ").unwrap(); /// /// println!("{:?}", parsed); /// ``` @@ -110,7 +121,7 @@ ast_types! { identifier: Identifier<'a>, assign: term!(=), return_type: ReturnType<'a>, - arguments: Braced>, + arguments: Parenthesized>, semi_colon: term!(;), }), /// Parses `[attributes]? callback interface identifier ( : inheritance )? { members };` @@ -120,7 +131,7 @@ ast_types! { interface: term!(interface), identifier: Identifier<'a>, inheritance: Option>, - members: Parenthesized>, + members: Braced>, semi_colon: term!(;), }), /// Parses `[attributes]? interface identifier ( : inheritance )? { members };` @@ -129,7 +140,7 @@ ast_types! { interface: term!(interface), identifier: Identifier<'a>, inheritance: Option>, - members: Parenthesized>, + members: Braced>, semi_colon: term!(;), }), /// Parses `[attributes]? interface mixin identifier { members };` @@ -138,7 +149,7 @@ ast_types! { interface: term!(interface), mixin: term!(mixin), identifier: Identifier<'a>, - members: Parenthesized>, + members: Braced>, semi_colon: term!(;), }), /// Parses `[attributes]? namespace identifier { members };` @@ -146,7 +157,7 @@ ast_types! { attributes: Option>, namespace: term!(namespace), identifier: Identifier<'a>, - members: Parenthesized>, + members: Braced>, semi_colon: term!(;), }), /// Parses `[attributes]? dictionary identifier ( : inheritance )? { members };` @@ -155,7 +166,7 @@ ast_types! { dictionary: term!(dictionary), identifier: Identifier<'a>, inheritance: Option>, - members: Parenthesized>, + members: Braced>, semi_colon: term!(;), }), /// Parses `[attributes]? partial interface identifier { members };` @@ -164,7 +175,7 @@ ast_types! { partial: term!(partial), interface: term!(interface), identifier: Identifier<'a>, - members: Parenthesized>, + members: Braced>, semi_colon: term!(;), }), /// Parses `[attributes]? partial interface mixin identifier { members };` @@ -174,7 +185,7 @@ ast_types! { interface: term!(interface), mixin: term!(mixin), identifier: Identifier<'a>, - members: Parenthesized>, + members: Braced>, semi_colon: term!(;), }), /// Parses `[attributes]? partial dictionary identifier { members };` @@ -183,7 +194,7 @@ ast_types! { partial: term!(partial), dictionary: term!(dictionary), identifier: Identifier<'a>, - members: Parenthesized>, + members: Braced>, semi_colon: term!(;), }), /// Parses `[attributes]? partial namespace identifier { members };` @@ -192,7 +203,7 @@ ast_types! { partial: term!(partial), namespace: term!(namespace), identifier: Identifier<'a>, - members: Parenthesized>, + members: Braced>, semi_colon: term!(;), }), /// Parses `[attributes]? enum identifier { values };` @@ -200,7 +211,7 @@ ast_types! { attributes: Option>, enum_: term!(enum), identifier: Identifier<'a>, - values: Parenthesized>, + values: Braced>, semi_colon: term!(;), }), /// Parses `[attributes]? typedef attributedtype identifier;` diff --git a/third_party/rust/weedle/src/literal.rs b/third_party/rust/weedle/src/literal.rs index fa573b7446..34d9bb1421 100644 --- a/third_party/rust/weedle/src/literal.rs +++ b/third_party/rust/weedle/src/literal.rs @@ -5,42 +5,33 @@ ast_types! { /// Parses `-?[1-9][0-9]*` #[derive(Copy)] Dec(struct DecLit<'a>( - &'a str = map!( - ws!(recognize!(do_parse!( - opt!(char!('-')) >> - one_of!("123456789") >> - take_while!(|c: char| c.is_ascii_digit()) >> - (()) - ))), - |inner| inner.0 - ), + &'a str = ws!(recognize!(do_parse!( + opt!(char!('-')) >> + one_of!("123456789") >> + take_while!(|c: char| c.is_ascii_digit()) >> + (()) + ))), )), /// Parses `-?0[Xx][0-9A-Fa-f]+)` #[derive(Copy)] Hex(struct HexLit<'a>( - &'a str = map!( - ws!(recognize!(do_parse!( - opt!(char!('-')) >> - char!('0') >> - alt!(char!('x') | char!('X')) >> - take_while!(|c: char| c.is_ascii_hexdigit()) >> - (()) - ))), - |inner| inner.0 - ), + &'a str = ws!(recognize!(do_parse!( + opt!(char!('-')) >> + char!('0') >> + alt!(char!('x') | char!('X')) >> + take_while!(|c: char| c.is_ascii_hexdigit()) >> + (()) + ))), )), /// Parses `-?0[0-7]*` #[derive(Copy)] Oct(struct OctLit<'a>( - &'a str = map!( - ws!(recognize!(do_parse!( - opt!(char!('-')) >> - char!('0') >> - take_while!(|c| '0' <= c && c <= '7') >> - (()) - ))), - |inner| inner.0 - ), + &'a str = ws!(recognize!(do_parse!( + opt!(char!('-')) >> + char!('0') >> + take_while!(|c| '0' <= c && c <= '7') >> + (()) + ))), )), } @@ -53,7 +44,7 @@ ast_types! { char!('"') >> s: take_while!(|c| c != '"') >> char!('"') >> - (s.0) + (s) )), ) @@ -67,6 +58,12 @@ ast_types! { open_bracket: term!(OpenBracket), close_bracket: term!(CloseBracket), }), + /// Represents `{ }` + #[derive(Copy, Default)] + EmptyDictionary(struct EmptyDictionaryLit { + open_brace: term!(OpenBrace), + close_brace: term!(CloseBrace), + }), Float(FloatLit<'a>), Integer(IntegerLit<'a>), Null(term!(null)), @@ -97,50 +94,47 @@ ast_types! { /// Parses `/-?(([0-9]+\.[0-9]*|[0-9]*\.[0-9]+)([Ee][+-]?[0-9]+)?|[0-9]+[Ee][+-]?[0-9]+)/` #[derive(Copy)] Value(struct FloatValueLit<'a>( - &'a str = map!( - ws!(recognize!(do_parse!( - opt!(char!('-')) >> - alt!( - do_parse!( - // (?:[0-9]+\.[0-9]*|[0-9]*\.[0-9]+) - alt!( - do_parse!( - take_while1!(|c: char| c.is_ascii_digit()) >> - char!('.') >> - take_while!(|c: char| c.is_ascii_digit()) >> - (()) - ) - | - do_parse!( - take_while!(|c: char| c.is_ascii_digit()) >> - char!('.') >> - take_while1!(|c: char| c.is_ascii_digit()) >> - (()) - ) - ) >> - // (?:[Ee][+-]?[0-9]+)? - opt!(do_parse!( - alt!(char!('e') | char!('E')) >> - opt!(alt!(char!('+') | char!('-'))) >> + &'a str = ws!(recognize!(do_parse!( + opt!(char!('-')) >> + alt!( + do_parse!( + // (?:[0-9]+\.[0-9]*|[0-9]*\.[0-9]+) + alt!( + do_parse!( take_while1!(|c: char| c.is_ascii_digit()) >> + char!('.') >> + take_while!(|c: char| c.is_ascii_digit()) >> (()) - )) >> - (()) - ) - | - // [0-9]+[Ee][+-]?[0-9]+ - do_parse!( - take_while1!(|c: char| c.is_ascii_digit()) >> + ) + | + do_parse!( + take_while!(|c: char| c.is_ascii_digit()) >> + char!('.') >> + take_while1!(|c: char| c.is_ascii_digit()) >> + (()) + ) + ) >> + // (?:[Ee][+-]?[0-9]+)? + opt!(do_parse!( alt!(char!('e') | char!('E')) >> opt!(alt!(char!('+') | char!('-'))) >> take_while1!(|c: char| c.is_ascii_digit()) >> (()) - ) - ) >> - (()) - ))), - |inner| inner.0 - ), + )) >> + (()) + ) + | + // [0-9]+[Ee][+-]?[0-9]+ + do_parse!( + take_while1!(|c: char| c.is_ascii_digit()) >> + alt!(char!('e') | char!('E')) >> + opt!(alt!(char!('+') | char!('-'))) >> + take_while1!(|c: char| c.is_ascii_digit()) >> + (()) + ) + ) >> + (()) + ))), )), NegInfinity(term!(-Infinity)), Infinity(term!(Infinity)), @@ -151,8 +145,8 @@ ast_types! { #[cfg(test)] mod test { use super::*; - use term::*; - use Parse; + use crate::term::*; + use crate::Parse; test!(should_parse_integer { "45" => ""; @@ -249,6 +243,22 @@ mod test { StringLit => StringLit("this is first") }); + test!(should_parse_string_with_spaces { r#" " this is a string " "# => + ""; + StringLit => StringLit(" this is a string ") + }); + + test!(should_parse_string_with_comment { r#" "// this is still a string" + "# => + ""; + StringLit => StringLit("// this is still a string") + }); + + test!(should_parse_string_with_multiline_comment { r#" "/*" "*/" "# => + r#""*/" "#; + StringLit => StringLit("/*") + }); + test!(should_parse_null { "null" => ""; Null => Null diff --git a/third_party/rust/weedle/src/macros.rs b/third_party/rust/weedle/src/macros.rs index 454f775168..cb55a1bc23 100644 --- a/third_party/rust/weedle/src/macros.rs +++ b/third_party/rust/weedle/src/macros.rs @@ -1,6 +1,48 @@ +macro_rules! tag { + ($i:expr, $tag: expr) => { + nom::bytes::complete::tag($tag)($i) + }; +} + +macro_rules! take_while { + ($input:expr, $submac:ident!( $($args:tt)* )) => { + $crate::macros::take_while!($input, (|c| $submac!(c, $($args)*))) + }; + ($input:expr, $f:expr) => { + nom::bytes::complete::take_while($f)($input) + }; +} + +macro_rules! take_while1 { + ($input:expr, $submac:ident!( $($args:tt)* )) => { + $crate::macros::take_while1!($input, (|c| $submac!(c, $($args)*))) + }; + ($input:expr, $f:expr) => { + nom::bytes::complete::take_while1($f)($input) + }; +} + +macro_rules! take_until { + ($i:expr, $substr:expr) => { + nom::bytes::complete::take_until($substr)($i) + }; +} + +macro_rules! one_of { + ($i:expr, $inp: expr) => { + nom::character::complete::one_of($inp)($i) + }; +} + +macro_rules! char { + ($i:expr, $c: expr) => { + nom::character::complete::char($c)($i) + }; +} + macro_rules! parser { ($submac:ident!( $($args:tt)* )) => { - fn parse(input: $crate::CompleteStr<'a>) -> $crate::IResult<$crate::CompleteStr<'a>, Self> { + fn parse(input: &'a str) -> $crate::IResult<&'a str, Self> { $submac!(input, $($args)*) } }; @@ -150,7 +192,7 @@ macro_rules! __ast_tuple_struct { pub struct $name<$($maybe_a)*>(pub $inner); impl<'a> $crate::Parse<'a> for $name<$($maybe_a)*> { - fn parse(input: $crate::CompleteStr<'a>) -> $crate::IResult<$crate::CompleteStr<'a>, Self> { + fn parse(input: &'a str) -> $crate::IResult<&'a str, Self> { use $crate::nom::lib::std::result::Result::*; match $submac!(input, $($args)*) { @@ -251,8 +293,7 @@ macro_rules! __ast_struct { { $($prev:tt)* } { $field:ident : $type:ty = marker, $($rest:tt)* } ) => ({ - let $field = $crate::std::default::Default::default(); - + let $field = ::std::default::Default::default(); __ast_struct! { @build_parser { $($prev)* $field } @@ -289,7 +330,7 @@ macro_rules! __ast_struct { } impl<'a> $crate::Parse<'a> for $name { - fn parse(input: $crate::CompleteStr<'a>) -> $crate::IResult<$crate::CompleteStr<'a>, Self> { + fn parse(input: &'a str) -> $crate::IResult<&'a str, Self> { __ast_struct! { @build_parser { input, } @@ -317,7 +358,7 @@ macro_rules! __ast_struct { } impl<'a> $crate::Parse<'a> for $name<'a> { - fn parse(input: $crate::CompleteStr<'a>) -> $crate::IResult<$crate::CompleteStr<'a>, Self> { + fn parse(input: &'a str) -> $crate::IResult<&'a str, Self> { __ast_struct! { @build_parser { input, } @@ -345,7 +386,7 @@ macro_rules! __ast_struct { } impl<'a, $($generics),+> $crate::Parse<'a> for $name<$($generics),+> where $($bounds)+ { - fn parse(input: $crate::CompleteStr<'a>) -> $crate::IResult<$crate::CompleteStr<'a>, Self> { + fn parse(input: &'a str) -> $crate::IResult<&'a str, Self> { __ast_struct! { @build_parser { input, } @@ -528,22 +569,22 @@ macro_rules! test { (err $name:ident { $raw:expr => $typ:ty }) => { #[test] fn $name() { - <$typ>::parse($crate::nom::types::CompleteStr($raw)).unwrap_err(); + <$typ>::parse($raw).unwrap_err(); } }; ($name:ident { $raw:expr => $rem:expr; $typ:ty => $val:expr }) => { #[test] fn $name() { - let (rem, parsed) = <$typ>::parse($crate::nom::types::CompleteStr($raw)).unwrap(); - assert_eq!(rem, $crate::nom::types::CompleteStr($rem)); + let (rem, parsed) = <$typ>::parse($raw).unwrap(); + assert_eq!(rem, $rem); assert_eq!(parsed, $val); } }; ($name:ident { $raw:expr => $rem:expr; $typ:ty; $($body:tt)* }) => { #[test] fn $name() { - let (_rem, _parsed) = <$typ>::parse($crate::nom::types::CompleteStr($raw)).unwrap(); - assert_eq!(_rem, $crate::nom::types::CompleteStr($rem)); + let (_rem, _parsed) = <$typ>::parse($raw).unwrap(); + assert_eq!(_rem, $rem); test!(@arg _parsed $($body)*); } }; @@ -557,11 +598,10 @@ macro_rules! test_variants { $( mod $variant { use $crate::types::*; - use $crate::nom::types::CompleteStr; #[test] fn should_parse() { - let (rem, parsed) = $struct_::parse(CompleteStr($value)).unwrap(); - assert_eq!(rem, CompleteStr("")); + let (rem, parsed) = $struct_::parse($value).unwrap(); + assert_eq!(rem, ""); match parsed { $struct_::$variant(_) => {}, _ => { panic!("Failed to parse"); } diff --git a/third_party/rust/weedle/src/mixin.rs b/third_party/rust/weedle/src/mixin.rs index a8b305f549..dcb40d50c5 100644 --- a/third_party/rust/weedle/src/mixin.rs +++ b/third_party/rust/weedle/src/mixin.rs @@ -1,8 +1,8 @@ -use argument::ArgumentList; -use attribute::ExtendedAttributeList; -use common::{Braced, Identifier}; -use interface::{ConstMember, StringifierMember}; -use types::{AttributedType, ReturnType}; +use crate::argument::ArgumentList; +use crate::attribute::ExtendedAttributeList; +use crate::common::{Identifier, Parenthesized}; +use crate::interface::{ConstMember, StringifierMember}; +use crate::types::{AttributedType, ReturnType}; /// Parses the members declarations of a mixin pub type MixinMembers<'a> = Vec>; @@ -19,7 +19,7 @@ ast_types! { stringifier: Option, return_type: ReturnType<'a>, identifier: Option>, - args: Braced>, + args: Parenthesized>, semi_colon: term!(;), }), /// Parses `[attributes]? stringifier? readonly? attribute attributedtype identifier;` @@ -39,7 +39,7 @@ ast_types! { #[cfg(test)] mod test { use super::*; - use Parse; + use crate::Parse; test!(should_parse_attribute_mixin_member { "stringifier readonly attribute short name;" => ""; diff --git a/third_party/rust/weedle/src/namespace.rs b/third_party/rust/weedle/src/namespace.rs index a022973e0f..ed28573218 100644 --- a/third_party/rust/weedle/src/namespace.rs +++ b/third_party/rust/weedle/src/namespace.rs @@ -1,7 +1,7 @@ -use argument::ArgumentList; -use attribute::ExtendedAttributeList; -use common::{Braced, Identifier}; -use types::{AttributedType, ReturnType}; +use crate::argument::ArgumentList; +use crate::attribute::ExtendedAttributeList; +use crate::common::{Identifier, Parenthesized}; +use crate::types::{AttributedType, ReturnType}; /// Parses namespace members declaration pub type NamespaceMembers<'a> = Vec>; @@ -16,7 +16,7 @@ ast_types! { attributes: Option>, return_type: ReturnType<'a>, identifier: Option>, - args: Braced>, + args: Parenthesized>, semi_colon: term!(;), }), /// Parses `[attribute]? readonly attributetype type identifier;` @@ -34,7 +34,7 @@ ast_types! { #[cfg(test)] mod test { use super::*; - use Parse; + use crate::Parse; test!(should_parse_attribute_namespace_member { "readonly attribute short name;" => ""; diff --git a/third_party/rust/weedle/src/term.rs b/third_party/rust/weedle/src/term.rs index 7d7d432e7b..008a538f0e 100644 --- a/third_party/rust/weedle/src/term.rs +++ b/third_party/rust/weedle/src/term.rs @@ -21,11 +21,11 @@ macro_rules! ident_tag ( match tag!($i, $tok) { Err(e) => Err(e), Ok((i, o)) => { + use nom::{character::is_alphanumeric, Err as NomErr, error::ErrorKind}; let mut res = Ok((i, o)); if let Some(&c) = i.as_bytes().first() { - use $crate::nom::{Context, Err, ErrorKind, is_alphanumeric}; if is_alphanumeric(c) || c == b'_' || c == b'-' { - res = Err(Err::Error(Context::Code($i, ErrorKind::Tag::))); + res = Err(NomErr::Error(($i, ErrorKind::Tag))); } } res @@ -53,11 +53,11 @@ macro_rules! generate_terms_for_names { } generate_terms! { - /// Represents the terminal symbol `{` - OpenParen => "{", + /// Represents the terminal symbol `(` + OpenParen => "(", - /// Represents the terminal symbol `}` - CloseParen => "}", + /// Represents the terminal symbol `)` + CloseParen => ")", /// Represents the terminal symbol `[` OpenBracket => "[", @@ -65,11 +65,11 @@ generate_terms! { /// Represents the terminal symbol `]` CloseBracket => "]", - /// Represents the terminal symbol `(` - OpenBrace => "(", + /// Represents the terminal symbol `{` + OpenBrace => "{", - /// Represents the terminal symbol `)` - CloseBrace => ")", + /// Represents the terminal symbol `}` + CloseBrace => "}", /// Represents the terminal symbol `,` Comma => ",", @@ -300,6 +300,9 @@ generate_terms_for_names! { /// Represents the terminal symbol `legacycaller` LegacyCaller => "legacycaller", + + /// Represents the terminal symbol `constructor` + Constructor => "constructor", } #[macro_export] @@ -550,6 +553,9 @@ macro_rules! term { (legacycaller) => { $crate::term::LegacyCaller }; + (constructor) => { + $crate::term::Constructor + }; } #[cfg(test)] @@ -559,41 +565,40 @@ mod test { $( mod $m { use super::super::$typ; - use Parse; - use nom::types::CompleteStr; + use crate::Parse; #[test] fn should_parse() { - let (rem, parsed) = $typ::parse(CompleteStr(concat!($string))).unwrap(); - assert_eq!(rem, CompleteStr("")); + let (rem, parsed) = $typ::parse(concat!($string)).unwrap(); + assert_eq!(rem, ""); assert_eq!(parsed, $typ); } #[test] fn should_parse_with_preceding_spaces() { - let (rem, parsed) = $typ::parse(CompleteStr(concat!(" ", $string))).unwrap(); - assert_eq!(rem, CompleteStr("")); + let (rem, parsed) = $typ::parse(concat!(" ", $string)).unwrap(); + assert_eq!(rem, ""); assert_eq!(parsed, $typ); } #[test] fn should_parse_with_succeeding_spaces() { - let (rem, parsed) = $typ::parse(CompleteStr(concat!($string, " "))).unwrap(); - assert_eq!(rem, CompleteStr("")); + let (rem, parsed) = $typ::parse(concat!($string, " ")).unwrap(); + assert_eq!(rem, ""); assert_eq!(parsed, $typ); } #[test] fn should_parse_with_surrounding_spaces() { - let (rem, parsed) = $typ::parse(CompleteStr(concat!(" ", $string, " "))).unwrap(); - assert_eq!(rem, CompleteStr("")); + let (rem, parsed) = $typ::parse(concat!(" ", $string, " ")).unwrap(); + assert_eq!(rem, ""); assert_eq!(parsed, $typ); } #[test] fn should_parse_if_anything_next() { - let (rem, parsed) = $typ::parse(CompleteStr(concat!($string, " anything"))).unwrap(); - assert_eq!(rem, CompleteStr("anything")); + let (rem, parsed) = $typ::parse(concat!($string, " anything")).unwrap(); + assert_eq!(rem, "anything"); assert_eq!(parsed, $typ); } } @@ -602,12 +607,12 @@ mod test { } generate_tests![ - openparen, OpenParen, "{"; - closeparen, CloseParen, "}"; + openparen, OpenParen, "("; + closeparen, CloseParen, ")"; openbracket, OpenBracket, "["; closebracket, CloseBracket, "]"; - openbrace, OpenBrace, "("; - closebrace, CloseBrace, ")"; + openbrace, OpenBrace, "{"; + closebrace, CloseBrace, "}"; comma, Comma, ","; minus, Minus, "-"; dot, Dot, "."; @@ -680,5 +685,6 @@ mod test { error, Error, "Error"; implements, Implements, "implements"; legacycaller, LegacyCaller, "legacycaller"; + constructor, Constructor, "constructor"; ]; } diff --git a/third_party/rust/weedle/src/types.rs b/third_party/rust/weedle/src/types.rs index 5c261401d8..8ee7e176f9 100644 --- a/third_party/rust/weedle/src/types.rs +++ b/third_party/rust/weedle/src/types.rs @@ -1,10 +1,10 @@ -use attribute::ExtendedAttributeList; -use common::{Braced, Generics, Identifier, Punctuated}; -use term; -use Parse; +use crate::attribute::ExtendedAttributeList; +use crate::common::{Generics, Identifier, Parenthesized, Punctuated}; +use crate::term; +use crate::Parse; /// Parses a union of types -pub type UnionType<'a> = Braced, term!(or)>>; +pub type UnionType<'a> = Parenthesized, term!(or)>>; ast_types! { /// Parses either single type or a union type @@ -133,7 +133,7 @@ ast_types! { /// Parses one of the member of a union type enum UnionMemberType<'a> { - Single(NonAnyType<'a>), + Single(AttributedNonAnyType<'a>), Union(MayBeNull>), } @@ -158,6 +158,12 @@ ast_types! { attributes: Option>, type_: Type<'a>, } + + /// Parses `[attributes]? type` where the type is a single non-any type + struct AttributedNonAnyType<'a> { + attributes: Option>, + type_: NonAnyType<'a>, + } } #[cfg(test)] @@ -166,13 +172,13 @@ mod test { test!(should_parse_may_be_null { "short" => ""; - MayBeNull<::types::IntegerType>; + MayBeNull; q_mark.is_none(); }); test!(should_parse_nullable { "short?" => ""; - MayBeNull<::types::IntegerType>; + MayBeNull; q_mark.is_some(); }); @@ -231,7 +237,7 @@ mod test { test_variants!( UnionMemberType { Single == "byte", - Union == "(byte or byte)" + Union == "([Clamp] unsigned long or byte)" } ); @@ -326,6 +332,49 @@ mod test { test!(should_parse_type_as_identifier { "DOMStringMap" => // if type is not parsed as identifier, it is parsed as `DOMString` and 'Map' is left ""; - ::types::Type; + crate::types::Type; }); + + #[test] + fn should_parse_union_member_type_attributed_union() { + use crate::types::UnionMemberType; + let (rem, parsed) = UnionMemberType::parse("([Clamp] byte or [Named] byte)").unwrap(); + assert_eq!(rem, ""); + match parsed { + UnionMemberType::Union(MayBeNull { + type_: + Parenthesized { + body: Punctuated { list, .. }, + .. + }, + .. + }) => { + assert_eq!(list.len(), 2); + + match list[0] { + UnionMemberType::Single(AttributedNonAnyType { ref attributes, .. }) => { + assert!(attributes.is_some()); + } + + _ => { + panic!("Failed to parse list[0] attributes"); + } + }; + + match list[1] { + UnionMemberType::Single(AttributedNonAnyType { ref attributes, .. }) => { + assert!(attributes.is_some()); + } + + _ => { + panic!("Failed to parse list[1] attributes"); + } + }; + } + + _ => { + panic!("Failed to parse"); + } + } + } } diff --git a/third_party/rust/weedle/src/whitespace.rs b/third_party/rust/weedle/src/whitespace.rs index 5ba7b80adf..e6bbf0a61f 100644 --- a/third_party/rust/weedle/src/whitespace.rs +++ b/third_party/rust/weedle/src/whitespace.rs @@ -1,25 +1,18 @@ -use {CompleteStr, IResult}; +use crate::IResult; -pub fn sp(input: CompleteStr) -> IResult { +pub(crate) fn sp(input: &str) -> IResult<&str, &str> { recognize!( input, - many0!( - alt!( - do_parse!(tag!("//") >> take_until!("\n") >> char!('\n') >> (())) - | - map!( - take_while1!(|c| c == '\t' || c == '\n' || c == '\r' || c == ' '), - |_| () - ) - | - do_parse!( - tag!("/*") >> - take_until!("*/") >> - tag!("*/") >> - (()) - ) - ) - ) + many0!(alt!( + // ignores line comments + do_parse!(tag!("//") >> take_until!("\n") >> char!('\n') >> (())) + | + // ignores whitespace + map!(take_while1!(|c| c == '\t' || c == '\n' || c == '\r' || c == ' '), |_| ()) + | + // ignores block comments + do_parse!(tag!("/*") >> take_until!("*/") >> tag!("*/") >> (())) + )) ) } @@ -27,18 +20,12 @@ pub fn sp(input: CompleteStr) -> IResult { macro_rules! ws ( ($i:expr, $($args:tt)*) => ({ use $crate::whitespace::sp; - use $crate::nom::Convert; - use $crate::nom::Err; - use $crate::nom::lib::std::result::Result::*; - match sep!($i, sp, $($args)*) { - Err(e) => Err(e), - Ok((i1, o)) => { - match (sp)(i1) { - Err(e) => Err(Err::convert(e)), - Ok((i2, _)) => Ok((i2, o)) - } - } - } + do_parse!($i, + sp >> + s: $($args)* >> + sp >> + (s) + ) }); ); diff --git a/third_party/rust/weedle/tests/defs/html.webidl b/third_party/rust/weedle/tests/defs/html.webidl index 2b20bad4a4..83e009aac5 100644 --- a/third_party/rust/weedle/tests/defs/html.webidl +++ b/third_party/rust/weedle/tests/defs/html.webidl @@ -124,7 +124,7 @@ interface mixin HTMLOrSVGElement { attribute DOMString nonce; [CEReactions] attribute long tabIndex; - void focus(optional FocusOptions options); + void focus(optional FocusOptions options = {}); void blur(); }; HTMLElement includes HTMLOrSVGElement; @@ -613,7 +613,7 @@ interface TimeRanges { }; [Exposed=Window, - Constructor(DOMString type, optional TrackEventInit eventInitDict)] + Constructor(DOMString type, optional TrackEventInit eventInitDict = {})] interface TrackEvent : Event { readonly attribute (VideoTrack or AudioTrack or TextTrack)? track; }; @@ -1063,8 +1063,8 @@ interface HTMLTemplateElement : HTMLElement { HTMLConstructor] interface HTMLSlotElement : HTMLElement { [CEReactions] attribute DOMString name; - sequence assignedNodes(optional AssignedNodesOptions options); - sequence assignedElements(optional AssignedNodesOptions options); + sequence assignedNodes(optional AssignedNodesOptions options = {}); + sequence assignedElements(optional AssignedNodesOptions options = {}); }; dictionary AssignedNodesOptions { @@ -1142,7 +1142,7 @@ interface mixin CanvasTransform { [NewObject] DOMMatrix getTransform(); void setTransform(unrestricted double a, unrestricted double b, unrestricted double c, unrestricted double d, unrestricted double e, unrestricted double f); - void setTransform(optional DOMMatrix2DInit transform); + void setTransform(optional DOMMatrix2DInit transform = {}); void resetTransform(); }; @@ -1285,7 +1285,7 @@ interface CanvasGradient { [Exposed=(Window,Worker)] interface CanvasPattern { // opaque object - void setTransform(optional DOMMatrix2DInit transform); + void setTransform(optional DOMMatrix2DInit transform = {}); }; [Exposed=Window] @@ -1320,7 +1320,7 @@ interface ImageData { [Constructor(optional (Path2D or DOMString) path), Exposed=(Window,Worker)] interface Path2D { - void addPath(Path2D path, optional DOMMatrix2DInit transform); + void addPath(Path2D path, optional DOMMatrix2DInit transform = {}); }; Path2D includes CanvasPath; @@ -1351,7 +1351,7 @@ interface OffscreenCanvas : EventTarget { OffscreenRenderingContext? getContext(OffscreenRenderingContextId contextId, optional any options = null); ImageBitmap transferToImageBitmap(); - Promise convertToBlob(optional ImageEncodeOptions options); + Promise convertToBlob(optional ImageEncodeOptions options = {}); }; [Exposed=(Window,Worker)] @@ -1376,7 +1376,7 @@ OffscreenCanvasRenderingContext2D includes CanvasPath; [Exposed=Window] interface CustomElementRegistry { - [CEReactions] void define(DOMString name, Function constructor, optional ElementDefinitionOptions options); + [CEReactions] void define(DOMString name, Function constructor, optional ElementDefinitionOptions options = {}); any get(DOMString name); Promise whenDefined(DOMString name); [CEReactions] void upgrade(Node root); @@ -1435,7 +1435,7 @@ interface DataTransferItem { callback FunctionStringCallback = void (DOMString data); [Exposed=Window, - Constructor(DOMString type, optional DragEventInit eventInitDict)] + Constructor(DOMString type, optional DragEventInit eventInitDict = {})] interface DragEvent : MouseEvent { readonly attribute DataTransfer? dataTransfer; }; @@ -1542,7 +1542,7 @@ interface Location { // but see also additional creation steps and overridden in }; [Exposed=Window, - Constructor(DOMString type, optional PopStateEventInit eventInitDict)] + Constructor(DOMString type, optional PopStateEventInit eventInitDict = {})] interface PopStateEvent : Event { readonly attribute any state; }; @@ -1552,7 +1552,7 @@ dictionary PopStateEventInit : EventInit { }; [Exposed=Window, - Constructor(DOMString type, optional HashChangeEventInit eventInitDict)] + Constructor(DOMString type, optional HashChangeEventInit eventInitDict = {})] interface HashChangeEvent : Event { readonly attribute USVString oldURL; readonly attribute USVString newURL; @@ -1564,7 +1564,7 @@ dictionary HashChangeEventInit : EventInit { }; [Exposed=Window, - Constructor(DOMString type, optional PageTransitionEventInit eventInitDict)] + Constructor(DOMString type, optional PageTransitionEventInit eventInitDict = {})] interface PageTransitionEvent : Event { readonly attribute boolean persisted; }; @@ -1610,7 +1610,7 @@ interface mixin NavigatorOnLine { readonly attribute boolean onLine; }; -[Constructor(DOMString type, optional ErrorEventInit eventInitDict), Exposed=(Window,Worker)] +[Constructor(DOMString type, optional ErrorEventInit eventInitDict = {}), Exposed=(Window,Worker)] interface ErrorEvent : Event { readonly attribute DOMString message; readonly attribute USVString filename; @@ -1756,8 +1756,8 @@ interface mixin WindowOrWorkerGlobalScope { void clearInterval(optional long handle = 0); // ImageBitmap - Promise createImageBitmap(ImageBitmapSource image, optional ImageBitmapOptions options); - Promise createImageBitmap(ImageBitmapSource image, long sx, long sy, long sw, long sh, optional ImageBitmapOptions options); + Promise createImageBitmap(ImageBitmapSource image, optional ImageBitmapOptions options = {}); + Promise createImageBitmap(ImageBitmapSource image, long sx, long sy, long sw, long sh, optional ImageBitmapOptions options = {}); }; Window includes WindowOrWorkerGlobalScope; WorkerGlobalScope includes WindowOrWorkerGlobalScope; @@ -1872,7 +1872,7 @@ dictionary ImageBitmapOptions { ResizeQuality resizeQuality = "low"; }; -[Constructor(DOMString type, optional MessageEventInit eventInitDict), Exposed=(Window,Worker,AudioWorklet)] +[Constructor(DOMString type, optional MessageEventInit eventInitDict = {}), Exposed=(Window,Worker,AudioWorklet)] interface MessageEvent : Event { readonly attribute any data; readonly attribute USVString origin; @@ -1893,7 +1893,7 @@ dictionary MessageEventInit : EventInit { typedef (WindowProxy or MessagePort or ServiceWorker) MessageEventSource; -[Constructor(USVString url, optional EventSourceInit eventSourceInitDict), Exposed=(Window,Worker)] +[Constructor(USVString url, optional EventSourceInit eventSourceInitDict = {}), Exposed=(Window,Worker)] interface EventSource : EventTarget { readonly attribute USVString url; readonly attribute boolean withCredentials; @@ -1945,7 +1945,7 @@ interface WebSocket : EventTarget { void send(ArrayBufferView data); }; -[Constructor(DOMString type, optional CloseEventInit eventInitDict), Exposed=(Window,Worker)] +[Constructor(DOMString type, optional CloseEventInit eventInitDict = {}), Exposed=(Window,Worker)] interface CloseEvent : Event { readonly attribute boolean wasClean; readonly attribute unsigned short code; @@ -2024,7 +2024,7 @@ interface mixin AbstractWorker { attribute EventHandler onerror; }; -[Constructor(USVString scriptURL, optional WorkerOptions options), Exposed=(Window,Worker)] +[Constructor(USVString scriptURL, optional WorkerOptions options = {}), Exposed=(Window,Worker)] interface Worker : EventTarget { void terminate(); @@ -2043,7 +2043,7 @@ enum WorkerType { "classic", "module" }; Worker includes AbstractWorker; -[Constructor(USVString scriptURL, optional (DOMString or WorkerOptions) options), +[Constructor(USVString scriptURL, optional (DOMString or WorkerOptions) options = {}), Exposed=(Window,Worker)] interface SharedWorker : EventTarget { readonly attribute MessagePort port; @@ -2095,7 +2095,7 @@ interface mixin WindowLocalStorage { Window includes WindowLocalStorage; [Exposed=Window, - Constructor(DOMString type, optional StorageEventInit eventInitDict)] + Constructor(DOMString type, optional StorageEventInit eventInitDict = {})] interface StorageEvent : Event { readonly attribute DOMString? key; readonly attribute DOMString? oldValue; diff --git a/third_party/rust/weedle/tests/defs/interface-constructor.webidl b/third_party/rust/weedle/tests/defs/interface-constructor.webidl new file mode 100644 index 0000000000..93cb28a4af --- /dev/null +++ b/third_party/rust/weedle/tests/defs/interface-constructor.webidl @@ -0,0 +1,4 @@ +interface InterfaceWithConstructor { + [Throws] + constructor(long a); +}; diff --git a/third_party/rust/weedle/tests/defs/mediacapture-streams.webidl b/third_party/rust/weedle/tests/defs/mediacapture-streams.webidl new file mode 100644 index 0000000000..5780cfb34e --- /dev/null +++ b/third_party/rust/weedle/tests/defs/mediacapture-streams.webidl @@ -0,0 +1,246 @@ +[Exposed=Window, + Constructor, + Constructor(MediaStream stream), + Constructor(sequence tracks)] +interface MediaStream : EventTarget { + readonly attribute DOMString id; + sequence getAudioTracks(); + sequence getVideoTracks(); + sequence getTracks(); + MediaStreamTrack? getTrackById(DOMString trackId); + void addTrack(MediaStreamTrack track); + void removeTrack(MediaStreamTrack track); + MediaStream clone(); + readonly attribute boolean active; + attribute EventHandler onaddtrack; + attribute EventHandler onremovetrack; +}; + +[Exposed=Window] +interface MediaStreamTrack : EventTarget { + readonly attribute DOMString kind; + readonly attribute DOMString id; + readonly attribute DOMString label; + attribute boolean enabled; + readonly attribute boolean muted; + attribute EventHandler onmute; + attribute EventHandler onunmute; + readonly attribute MediaStreamTrackState readyState; + attribute EventHandler onended; + MediaStreamTrack clone(); + void stop(); + MediaTrackCapabilities getCapabilities(); + MediaTrackConstraints getConstraints(); + MediaTrackSettings getSettings(); + Promise applyConstraints(optional MediaTrackConstraints constraints = {}); +}; + +enum MediaStreamTrackState { + "live", + "ended" +}; + +dictionary MediaTrackSupportedConstraints { + boolean width = true; + boolean height = true; + boolean aspectRatio = true; + boolean frameRate = true; + boolean facingMode = true; + boolean resizeMode = true; + boolean volume = true; + boolean sampleRate = true; + boolean sampleSize = true; + boolean echoCancellation = true; + boolean autoGainControl = true; + boolean noiseSuppression = true; + boolean latency = true; + boolean channelCount = true; + boolean deviceId = true; + boolean groupId = true; +}; + +dictionary MediaTrackCapabilities { + ULongRange width; + ULongRange height; + DoubleRange aspectRatio; + DoubleRange frameRate; + sequence facingMode; + sequence resizeMode; + DoubleRange volume; + ULongRange sampleRate; + ULongRange sampleSize; + sequence echoCancellation; + sequence autoGainControl; + sequence noiseSuppression; + DoubleRange latency; + ULongRange channelCount; + DOMString deviceId; + DOMString groupId; +}; + +dictionary MediaTrackConstraints : MediaTrackConstraintSet { + sequence advanced; +}; + +dictionary MediaTrackConstraintSet { + ConstrainULong width; + ConstrainULong height; + ConstrainDouble aspectRatio; + ConstrainDouble frameRate; + ConstrainDOMString facingMode; + ConstrainDOMString resizeMode; + ConstrainDouble volume; + ConstrainULong sampleRate; + ConstrainULong sampleSize; + ConstrainBoolean echoCancellation; + ConstrainBoolean autoGainControl; + ConstrainBoolean noiseSuppression; + ConstrainDouble latency; + ConstrainULong channelCount; + ConstrainDOMString deviceId; + ConstrainDOMString groupId; +}; + +dictionary MediaTrackSettings { + long width; + long height; + double aspectRatio; + double frameRate; + DOMString facingMode; + DOMString resizeMode; + double volume; + long sampleRate; + long sampleSize; + boolean echoCancellation; + boolean autoGainControl; + boolean noiseSuppression; + double latency; + long channelCount; + DOMString deviceId; + DOMString groupId; +}; + +enum VideoFacingModeEnum { + "user", + "environment", + "left", + "right" +}; + +enum VideoResizeModeEnum { + "none", + "crop-and-scale" +}; + +[Exposed=Window, + Constructor(DOMString type, MediaStreamTrackEventInit eventInitDict)] +interface MediaStreamTrackEvent : Event { + [SameObject] + readonly attribute MediaStreamTrack track; +}; + +dictionary MediaStreamTrackEventInit : EventInit { + required MediaStreamTrack track; +}; + +partial interface Navigator { + [SameObject, SecureContext] + readonly attribute MediaDevices mediaDevices; +}; + +[Exposed=Window, SecureContext] +interface MediaDevices : EventTarget { + attribute EventHandler ondevicechange; + Promise> enumerateDevices(); +}; + +[Exposed=Window, SecureContext] +interface MediaDeviceInfo { + readonly attribute DOMString deviceId; + readonly attribute MediaDeviceKind kind; + readonly attribute DOMString label; + readonly attribute DOMString groupId; + [Default] object toJSON(); +}; + +enum MediaDeviceKind { + "audioinput", + "audiooutput", + "videoinput" +}; + +[Exposed=Window] interface InputDeviceInfo : MediaDeviceInfo { + MediaTrackCapabilities getCapabilities(); +}; + +partial interface Navigator { + [SecureContext] + void getUserMedia(MediaStreamConstraints constraints, NavigatorUserMediaSuccessCallback successCallback, NavigatorUserMediaErrorCallback errorCallback); +}; + +partial interface MediaDevices { + MediaTrackSupportedConstraints getSupportedConstraints(); + Promise getUserMedia(optional MediaStreamConstraints constraints = {}); +}; + +dictionary MediaStreamConstraints { + (boolean or MediaTrackConstraints) video = false; + (boolean or MediaTrackConstraints) audio = false; +}; + +callback NavigatorUserMediaSuccessCallback = void (MediaStream stream); + +callback NavigatorUserMediaErrorCallback = void (MediaStreamError error); + +typedef object MediaStreamError; + +dictionary DoubleRange { + double max; + double min; +}; + +dictionary ConstrainDoubleRange : DoubleRange { + double exact; + double ideal; +}; + +dictionary ULongRange { + [Clamp] unsigned long max; + [Clamp] unsigned long min; +}; + +dictionary ConstrainULongRange : ULongRange { + [Clamp] unsigned long exact; + [Clamp] unsigned long ideal; +}; + +dictionary ConstrainBooleanParameters { + boolean exact; + boolean ideal; +}; + +dictionary ConstrainDOMStringParameters { + (DOMString or sequence) exact; + (DOMString or sequence) ideal; +}; + +typedef ([Clamp] unsigned long or ConstrainULongRange) ConstrainULong; + +typedef (double or ConstrainDoubleRange) ConstrainDouble; + +typedef (boolean or ConstrainBooleanParameters) ConstrainBoolean; + +typedef (DOMString or sequence or ConstrainDOMStringParameters) ConstrainDOMString; + +dictionary Capabilities { +}; + +dictionary Settings { +}; + +dictionary ConstraintSet { +}; + +dictionary Constraints : ConstraintSet { + sequence advanced; +}; diff --git a/third_party/rust/weedle/tests/webidl.rs b/third_party/rust/weedle/tests/webidl.rs index 89dbffca5e..7aaf815218 100644 --- a/third_party/rust/weedle/tests/webidl.rs +++ b/third_party/rust/weedle/tests/webidl.rs @@ -3,6 +3,8 @@ extern crate weedle; use std::fs; use std::io::Read; +use weedle::*; + fn read_file(path: &str) -> String { let mut file = fs::File::open(path).unwrap(); let mut file_content = String::new(); @@ -25,3 +27,73 @@ fn should_parse_html_webidl() { assert_eq!(parsed.len(), 325); } + +#[test] +fn should_parse_mediacapture_streams_webidl() { + let content = read_file("./tests/defs/mediacapture-streams.webidl"); + let parsed = weedle::parse(&content).unwrap(); + + assert_eq!(parsed.len(), 37); +} + +#[test] +fn interface_constructor() { + let content = read_file("./tests/defs/interface-constructor.webidl"); + let mut parsed = weedle::parse(&content).unwrap(); + + assert_eq!(parsed.len(), 1); + + let definition = parsed.pop().unwrap(); + + match definition { + Definition::Interface(mut interface) => { + assert!(interface.attributes.is_none()); + assert_eq!(interface.interface, term!(interface)); + assert_eq!(interface.identifier.0, "InterfaceWithConstructor"); + assert_eq!(interface.inheritance, None); + + assert_eq!(interface.members.body.len(), 1); + + let body = interface.members.body.pop().unwrap(); + + match body { + interface::InterfaceMember::Constructor(constructor) => { + let mut attributes = constructor.attributes.unwrap().body.list; + assert_eq!(attributes.len(), 1); + let attribute = attributes.pop().unwrap(); + + match attribute { + attribute::ExtendedAttribute::NoArgs(attribute) => { + assert_eq!((attribute.0).0, "Throws"); + } + _ => unreachable!(), + } + + let mut args = constructor.args.body.list; + assert_eq!(args.len(), 1); + let arg = args.pop().unwrap(); + + match arg { + argument::Argument::Single(arg) => { + assert!(arg.attributes.is_none()); + assert!(arg.optional.is_none()); + assert!(arg.type_.attributes.is_none()); + + match arg.type_.type_ { + types::Type::Single(types::SingleType::NonAny( + types::NonAnyType::Integer(_), + )) => {} + _ => unreachable!(), + } + } + _ => unreachable!(), + }; + + assert_eq!(constructor.constructor, term::Constructor); + } + _ => unreachable!(), + } + } + _ => unreachable!(), + } +}