diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 0fc28da..81cd5af 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -19,7 +19,6 @@ jobs: - 4.05.0 - 4.04.2 - 4.03.0 - - 4.02.3 steps: - uses: actions/checkout@v2 diff --git a/Makefile b/Makefile index 3f4c0c2..183a70b 100644 --- a/Makefile +++ b/Makefile @@ -97,8 +97,6 @@ package-docs : check-doc-prereqs docs .PHONY : check-doc-prereqs check-doc-prereqs : - @test $(OCAML_VERSION) -ne 402 \ - || (echo "\nocamldoc is broken in 4.02" && false) @ocamlfind query lwt.unix > /dev/null 2> /dev/null \ || (echo "\nLwt not installed" && false) @ocamlfind query lambdasoup > /dev/null 2> /dev/null \ diff --git a/markup-lwt.opam b/markup-lwt.opam index cf9b92e..21e5ac2 100644 --- a/markup-lwt.opam +++ b/markup-lwt.opam @@ -16,7 +16,7 @@ depends: [ "dune" {>= "2.7.0"} "lwt" "markup" - "ocaml" + "ocaml" {>= "4.03.0"} ] build: [ diff --git a/markup.opam b/markup.opam index d49407a..073f164 100644 --- a/markup.opam +++ b/markup.opam @@ -13,7 +13,7 @@ dev-repo: "git+https://github.com/aantron/markup.ml.git" depends: [ "dune" {>= "2.7.0"} - "ocaml" {>= "4.02.0"} + "ocaml" {>= "4.03.0"} "uchar" "uutf" {>= "1.0.0"} diff --git a/src/common.ml b/src/common.ml index 9c12379..cbf60a4 100644 --- a/src/common.ml +++ b/src/common.ml @@ -1,19 +1,6 @@ (* This file is part of Markup.ml, released under the MIT license. See LICENSE.md for details, or visit https://github.com/aantron/markup.ml. *) -(* Aliases for reducing the number of deprecation warnings. *) -module String = -struct - include String - let lowercase = lowercase [@ocaml.warning "-3"] -end - -module Char = -struct - include Char - let lowercase = lowercase [@ocaml.warning "-3"] -end - type 'a cont = 'a -> unit type 'a cps = exn cont -> 'a cont -> unit diff --git a/src/detect.ml b/src/detect.ml index 383f407..9fe9fec 100644 --- a/src/detect.ml +++ b/src/detect.ml @@ -53,7 +53,7 @@ let guess_family_xml source throw k = (* 5.2 in the Encoding Candidate Recommendation. *) let normalize_name for_html s = - match String.lowercase (trim_string s) with + match String.lowercase_ascii (trim_string s) with | "unicode-1-1-utf-8" | "utf-8" | "utf8" -> "utf-8" @@ -223,7 +223,7 @@ let meta_tag_prescan = let rec iterate () = next source throw (fun () -> k "") (function | c when c = quote -> k (Buffer.contents buffer) - | c -> add_utf_8 buffer (Char.code (Char.lowercase c)); iterate ()) + | c -> add_utf_8 buffer (Char.code (Char.lowercase_ascii c)); iterate ()) in iterate () in @@ -237,7 +237,7 @@ let meta_tag_prescan = push source c; k (Buffer.contents buffer) | c -> - add_utf_8 buffer (Char.code (Char.lowercase c)); + add_utf_8 buffer (Char.code (Char.lowercase_ascii c)); iterate ()) in iterate () @@ -249,7 +249,7 @@ let meta_tag_prescan = next source throw (fun () -> k None) begin function | 'c' -> next_n 6 source throw begin fun l -> - match List.map Char.lowercase l with + match List.map Char.lowercase_ascii l with | ['h'; 'a'; 'r'; 's'; 'e'; 't'] -> skip_whitespace source throw (fun () -> next source throw (fun () -> k None) begin function @@ -316,7 +316,7 @@ let meta_tag_prescan = k (Buffer.contents buffer) | Some c -> - add_utf_8 buffer (Char.code (Char.lowercase c)); + add_utf_8 buffer (Char.code (Char.lowercase_ascii c)); iterate () end in @@ -463,7 +463,7 @@ let meta_tag_prescan = | 'm' -> peek_n 5 source throw (fun l -> - match List.map Char.lowercase l with + match List.map Char.lowercase_ascii l with | ['m'; 'e'; 't'; 'a'; c] when is_whitespace c || c = '/' -> next_n 4 source throw (fun _ -> process_meta_tag scan) diff --git a/src/html_parser.ml b/src/html_parser.ml index ed7c4b4..16dd3fd 100644 --- a/src/html_parser.ml +++ b/src/html_parser.ml @@ -240,7 +240,7 @@ struct Lowercase the element name given by the user before analysis by the parser, to match this convention. [String.lowercase] is acceptable here because the API assumes the string [element] is in UTF-8. *) - k (`Fragment (String.lowercase element), None) + k (`Fragment (String.lowercase_ascii element), None) | Some (`Document as c) -> k (c, None) | None -> detect tokens throw k) (fun (detected_context, deciding_token) -> @@ -2825,7 +2825,7 @@ let parse requested_context report (tokens, set_tokenizer_state, set_foreign) = | l, `End {name} -> (fun mode' -> match Stack.current_element open_elements with - | Some {element_name = _, name'} when String.lowercase name' = name -> + | Some {element_name = _, name'} when String.lowercase_ascii name' = name -> mode' () | _ -> report l (`Unmatched_end_tag name) !throw (fun () -> @@ -2834,7 +2834,7 @@ let parse requested_context report (tokens, set_tokenizer_state, set_foreign) = let rec scan = function | [] -> mode () | {element_name = ns, name'}::_ - when String.lowercase name' = name -> + when String.lowercase_ascii name' = name -> close_element ~ns l name mode | {element_name = `HTML, _}::_ -> force_html () | _::rest -> scan rest diff --git a/src/markup.ml b/src/markup.ml index 3a6dddc..04ba209 100644 --- a/src/markup.ml +++ b/src/markup.ml @@ -27,21 +27,11 @@ struct | None -> raise Not_synchronous | Some v -> v - (* Used in to_cps to avoid the need for a match .. with | exception .. - expression, which would break compatibility with OCaml < 4.02. Flambda - seems to optimizes the allocation of these results away completely. There - is a small performance penalty when not using Flambda. *) - type 'a result = Value of 'a | Exn of exn - let to_cps f = fun throw k -> - let result = - try Value (f ()) - with exn -> Exn exn - in - match result with - | Value v -> k v - | Exn exn -> throw exn + match f () with + | v -> k v + | exception exn -> throw exn end diff --git a/src/stream_io.ml b/src/stream_io.ml index e89f728..c83cb7b 100644 --- a/src/stream_io.ml +++ b/src/stream_io.ml @@ -1,7 +1,6 @@ (* This file is part of Markup.ml, released under the MIT license. See LICENSE.md for details, or visit https://github.com/aantron/markup.ml. *) -open Common open Kstream let state_fold f initial = diff --git a/src/xml_tokenizer.ml b/src/xml_tokenizer.ml index 298e0da..27d5b61 100644 --- a/src/xml_tokenizer.ml +++ b/src/xml_tokenizer.ml @@ -382,7 +382,7 @@ let tokenize report resolve_reference (input, get_location) = and target_state () = next' pi finish_pi begin function | _, c when is_whitespace c -> - if String.lowercase (Buffer.contents target_buffer) = "xml" then + if String.lowercase_ascii (Buffer.contents target_buffer) = "xml" then xml_declaration_state () else text_state () @@ -422,7 +422,7 @@ let tokenize report resolve_reference (input, get_location) = report l (`Bad_token (" k None) else - if String.lowercase (Buffer.contents target_buffer) = "xml" then + if String.lowercase_ascii (Buffer.contents target_buffer) = "xml" then finish_xml () else k (Some @@ -438,7 +438,7 @@ let tokenize report resolve_reference (input, get_location) = scan [] l in - let matches s (_, name, _) = String.lowercase name = s in + let matches s (_, name, _) = String.lowercase_ascii name = s in let version_valid s = String.length s = 3 && @@ -498,7 +498,7 @@ let tokenize report resolve_reference (input, get_location) = report l (`Bad_token (value, xml, "must be 'yes' or 'no'")) !throw (fun () -> - match String.lowercase value with + match String.lowercase_ascii value with | "yes" -> k (Some true) | "no" -> k (Some false) | _ -> k None)) diff --git a/test/test_detect.ml b/test/test_detect.ml index 12094ac..80f4977 100644 --- a/test/test_detect.ml +++ b/test/test_detect.ml @@ -4,7 +4,6 @@ open OUnit2 open Test_support -open Markup__Common open Markup__Kstream open Markup__Stream_io open Markup__Detect diff --git a/test/test_utility.ml b/test/test_utility.ml index 06198ae..d9c3a0f 100644 --- a/test/test_utility.ml +++ b/test/test_utility.ml @@ -4,7 +4,6 @@ open OUnit2 open Test_support -open Markup__Common open! Markup module Kstream = Markup__Kstream