From 53cafa9f3d0c8be33821fc7338b1da97e91d9cc6 Mon Sep 17 00:00:00 2001 From: Benau Date: Wed, 25 Aug 2021 04:32:50 +0800 Subject: [PATCH] Convert .tgs with go libraries (and cgo) (telegram) (#1569) This commit adds support for go/cgo tgs conversion when building with the -tags `cgo` The default binaries are still "pure" go and uses the old way of converting. * Move lottie_convert.py conversion code to its own file * Add optional libtgsconverter * Update vendor * Apply suggestions from code review * Update bridge/helper/libtgsconverter.go Co-authored-by: Wim --- bridge/helper/helper.go | 66 - bridge/helper/libtgsconverter.go | 34 + bridge/helper/lottie_convert.go | 89 + bridge/telegram/handlers.go | 18 +- bridge/telegram/telegram.go | 8 +- go.mod | 1 + go.sum | 10 + vendor/github.com/Benau/go_rlottie/LICENSE | 24 + vendor/github.com/Benau/go_rlottie/README.md | 1 + .../binding_c_lottieanimation_capi.cpp | 284 + vendor/github.com/Benau/go_rlottie/config.h | 10 + .../Benau/go_rlottie/generate_from_rlottie.py | 122 + vendor/github.com/Benau/go_rlottie/go.mod | 1 + .../github.com/Benau/go_rlottie/go_rlottie.go | 56 + .../go_rlottie/lottie_lottieanimation.cpp | 457 + .../go_rlottie/lottie_lottiefiltermodel.h | 435 + .../Benau/go_rlottie/lottie_lottieitem.cpp | 1491 ++++ .../Benau/go_rlottie/lottie_lottieitem.h | 626 ++ .../go_rlottie/lottie_lottieitem_capi.cpp | 339 + .../Benau/go_rlottie/lottie_lottiekeypath.cpp | 86 + .../Benau/go_rlottie/lottie_lottiekeypath.h | 53 + .../Benau/go_rlottie/lottie_lottieloader.cpp | 169 + .../Benau/go_rlottie/lottie_lottiemodel.cpp | 390 + .../Benau/go_rlottie/lottie_lottiemodel.h | 1148 +++ .../Benau/go_rlottie/lottie_lottieparser.cpp | 2390 ++++++ .../go_rlottie/lottie_lottieproxymodel.cpp | 0 .../go_rlottie/lottie_rapidjson_allocators.h | 284 + .../lottie_rapidjson_cursorstreamwrapper.h | 78 + .../go_rlottie/lottie_rapidjson_document.h | 2732 ++++++ .../lottie_rapidjson_encodedstream.h | 299 + .../go_rlottie/lottie_rapidjson_encodings.h | 716 ++ .../go_rlottie/lottie_rapidjson_error_en.h | 74 + .../go_rlottie/lottie_rapidjson_error_error.h | 161 + .../lottie_rapidjson_filereadstream.h | 99 + .../lottie_rapidjson_filewritestream.h | 104 + .../Benau/go_rlottie/lottie_rapidjson_fwd.h | 151 + .../lottie_rapidjson_internal_biginteger.h | 290 + .../lottie_rapidjson_internal_clzll.h | 71 + .../lottie_rapidjson_internal_diyfp.h | 257 + .../lottie_rapidjson_internal_dtoa.h | 245 + .../lottie_rapidjson_internal_ieee754.h | 78 + .../lottie_rapidjson_internal_itoa.h | 308 + .../lottie_rapidjson_internal_meta.h | 186 + .../lottie_rapidjson_internal_pow10.h | 55 + .../lottie_rapidjson_internal_regex.h | 739 ++ .../lottie_rapidjson_internal_stack.h | 232 + .../lottie_rapidjson_internal_strfunc.h | 69 + .../lottie_rapidjson_internal_strtod.h | 290 + .../lottie_rapidjson_internal_swap.h | 46 + .../lottie_rapidjson_istreamwrapper.h | 128 + .../lottie_rapidjson_memorybuffer.h | 70 + .../lottie_rapidjson_memorystream.h | 71 + .../lottie_rapidjson_msinttypes_inttypes.h | 316 + .../lottie_rapidjson_msinttypes_stdint.h | 300 + .../lottie_rapidjson_ostreamwrapper.h | 81 + .../go_rlottie/lottie_rapidjson_pointer.h | 1415 ++++ .../lottie_rapidjson_prettywriter.h | 277 + .../go_rlottie/lottie_rapidjson_rapidjson.h | 692 ++ .../go_rlottie/lottie_rapidjson_reader.h | 2244 +++++ .../go_rlottie/lottie_rapidjson_schema.h | 2496 ++++++ .../go_rlottie/lottie_rapidjson_stream.h | 223 + .../lottie_rapidjson_stringbuffer.h | 121 + .../go_rlottie/lottie_rapidjson_writer.h | 710 ++ vendor/github.com/Benau/go_rlottie/rlottie.h | 525 ++ .../Benau/go_rlottie/rlottie_capi.h | 299 + .../Benau/go_rlottie/rlottiecommon.h | 231 + .../go_rlottie/vector_freetype_v_ft_math.cpp | 461 + .../go_rlottie/vector_freetype_v_ft_math.h | 438 + .../vector_freetype_v_ft_raster.cpp | 1423 ++++ .../go_rlottie/vector_freetype_v_ft_raster.h | 607 ++ .../vector_freetype_v_ft_stroker.cpp | 1936 +++++ .../go_rlottie/vector_freetype_v_ft_stroker.h | 319 + .../go_rlottie/vector_freetype_v_ft_types.h | 160 + .../vector_pixman_pixman-arm-neon-asm.S | 500 ++ .../vector_pixman_pixman-arm-neon-asm.h | 1126 +++ .../Benau/go_rlottie/vector_stb_stb_image.cpp | 59 + .../Benau/go_rlottie/vector_stb_stb_image.h | 7509 +++++++++++++++++ .../Benau/go_rlottie/vector_varenaalloc.cpp | 166 + .../Benau/go_rlottie/vector_varenaalloc.h | 232 + .../Benau/go_rlottie/vector_vbezier.cpp | 135 + .../Benau/go_rlottie/vector_vbezier.h | 139 + .../Benau/go_rlottie/vector_vbitmap.cpp | 219 + .../Benau/go_rlottie/vector_vbitmap.h | 94 + .../Benau/go_rlottie/vector_vbrush.cpp | 69 + .../Benau/go_rlottie/vector_vbrush.h | 92 + .../Benau/go_rlottie/vector_vcowptr.h | 126 + .../Benau/go_rlottie/vector_vdasher.cpp | 254 + .../Benau/go_rlottie/vector_vdasher.h | 65 + .../Benau/go_rlottie/vector_vdebug.cpp | 758 ++ .../Benau/go_rlottie/vector_vdebug.h | 187 + .../Benau/go_rlottie/vector_vdrawable.cpp | 130 + .../Benau/go_rlottie/vector_vdrawable.h | 93 + .../Benau/go_rlottie/vector_vdrawhelper.cpp | 767 ++ .../Benau/go_rlottie/vector_vdrawhelper.h | 270 + .../go_rlottie/vector_vdrawhelper_common.cpp | 190 + .../go_rlottie/vector_vdrawhelper_neon.cpp | 34 + .../go_rlottie/vector_vdrawhelper_sse2.cpp | 261 + .../Benau/go_rlottie/vector_velapsedtimer.cpp | 50 + .../Benau/go_rlottie/vector_velapsedtimer.h | 41 + .../Benau/go_rlottie/vector_vglobal.h | 302 + .../Benau/go_rlottie/vector_vimageloader.cpp | 220 + .../Benau/go_rlottie/vector_vimageloader.h | 26 + .../Benau/go_rlottie/vector_vinterpolator.cpp | 124 + .../Benau/go_rlottie/vector_vinterpolator.h | 88 + .../Benau/go_rlottie/vector_vline.h | 97 + .../Benau/go_rlottie/vector_vmatrix.cpp | 684 ++ .../Benau/go_rlottie/vector_vmatrix.h | 116 + .../Benau/go_rlottie/vector_vpainter.cpp | 172 + .../Benau/go_rlottie/vector_vpainter.h | 60 + .../Benau/go_rlottie/vector_vpath.cpp | 709 ++ .../Benau/go_rlottie/vector_vpath.h | 285 + .../Benau/go_rlottie/vector_vpathmesure.cpp | 67 + .../Benau/go_rlottie/vector_vpathmesure.h | 44 + .../Benau/go_rlottie/vector_vpoint.h | 210 + .../Benau/go_rlottie/vector_vraster.cpp | 563 ++ .../Benau/go_rlottie/vector_vraster.h | 50 + .../Benau/go_rlottie/vector_vrect.cpp | 68 + .../Benau/go_rlottie/vector_vrect.h | 172 + .../Benau/go_rlottie/vector_vrle.cpp | 748 ++ .../github.com/Benau/go_rlottie/vector_vrle.h | 121 + .../Benau/go_rlottie/vector_vsharedptr.h | 123 + .../Benau/go_rlottie/vector_vstackallocator.h | 156 + .../Benau/go_rlottie/vector_vtaskqueue.h | 87 + vendor/github.com/Benau/tgsconverter/LICENSE | 24 + .../tgsconverter/libtgsconverter/apng.go | 51 + .../Benau/tgsconverter/libtgsconverter/gif.go | 81 + .../libtgsconverter/imagewriter.go | 40 + .../Benau/tgsconverter/libtgsconverter/lib.go | 160 + .../Benau/tgsconverter/libtgsconverter/png.go | 30 + .../libtgsconverter/quantize_bucket.go | 119 + .../libtgsconverter/quantize_mediancut.go | 209 + .../tgsconverter/libtgsconverter/webp.go | 39 + .../go-decimal-to-rational/.travis.yml | 10 + .../av-elier/go-decimal-to-rational/LICENSE | 21 + .../av-elier/go-decimal-to-rational/README.md | 54 + .../av-elier/go-decimal-to-rational/frac.go | 44 + vendor/github.com/kettek/apng/LICENSE | 30 + vendor/github.com/kettek/apng/README.md | 114 + vendor/github.com/kettek/apng/apng.go | 13 + vendor/github.com/kettek/apng/frame.go | 47 + vendor/github.com/kettek/apng/paeth.go | 71 + vendor/github.com/kettek/apng/reader.go | 1141 +++ vendor/github.com/kettek/apng/writer.go | 713 ++ .../sizeofint/webpanimation/.gitignore | 1 + .../sizeofint/webpanimation/LICENSE | 27 + .../sizeofint/webpanimation/capi.go | 362 + .../sizeofint/webpanimation/config.h | 4 + .../sizeofint/webpanimation/dec_alpha_dec.c | 232 + .../sizeofint/webpanimation/dec_alphai_dec.h | 54 + .../sizeofint/webpanimation/dec_buffer_dec.c | 312 + .../sizeofint/webpanimation/dec_common_dec.h | 54 + .../sizeofint/webpanimation/dec_frame_dec.c | 803 ++ .../sizeofint/webpanimation/dec_idec_dec.c | 908 ++ .../sizeofint/webpanimation/dec_io_dec.c | 644 ++ .../sizeofint/webpanimation/dec_quant_dec.c | 115 + .../sizeofint/webpanimation/dec_tree_dec.c | 537 ++ .../sizeofint/webpanimation/dec_vp8_dec.c | 722 ++ .../sizeofint/webpanimation/dec_vp8_dec.h | 185 + .../sizeofint/webpanimation/dec_vp8i_dec.h | 319 + .../sizeofint/webpanimation/dec_vp8l_dec.c | 1738 ++++ .../sizeofint/webpanimation/dec_vp8li_dec.h | 135 + .../sizeofint/webpanimation/dec_webp_dec.c | 845 ++ .../sizeofint/webpanimation/dec_webpi_dec.h | 133 + .../webpanimation/demux_anim_decode.c | 457 + .../sizeofint/webpanimation/demux_demux.c | 972 +++ .../webpanimation/dsp_alpha_processing.c | 480 ++ .../dsp_alpha_processing_mips_dsp_r2.c | 228 + .../webpanimation/dsp_alpha_processing_neon.c | 191 + .../webpanimation/dsp_alpha_processing_sse2.c | 365 + .../dsp_alpha_processing_sse41.c | 92 + .../sizeofint/webpanimation/dsp_common_sse2.h | 194 + .../webpanimation/dsp_common_sse41.h | 132 + .../sizeofint/webpanimation/dsp_cost.c | 411 + .../sizeofint/webpanimation/dsp_cost_mips32.c | 154 + .../webpanimation/dsp_cost_mips_dsp_r2.c | 107 + .../sizeofint/webpanimation/dsp_cost_neon.c | 122 + .../sizeofint/webpanimation/dsp_cost_sse2.c | 119 + .../sizeofint/webpanimation/dsp_cpu.c | 252 + .../sizeofint/webpanimation/dsp_dec.c | 887 ++ .../webpanimation/dsp_dec_clip_tables.c | 369 + .../sizeofint/webpanimation/dsp_dec_mips32.c | 587 ++ .../webpanimation/dsp_dec_mips_dsp_r2.c | 994 +++ .../sizeofint/webpanimation/dsp_dec_msa.c | 1020 +++ .../sizeofint/webpanimation/dsp_dec_neon.c | 1663 ++++ .../sizeofint/webpanimation/dsp_dec_sse2.c | 1227 +++ .../sizeofint/webpanimation/dsp_dec_sse41.c | 46 + .../sizeofint/webpanimation/dsp_dsp.h | 686 ++ .../sizeofint/webpanimation/dsp_enc.c | 830 ++ .../sizeofint/webpanimation/dsp_enc_mips32.c | 677 ++ .../webpanimation/dsp_enc_mips_dsp_r2.c | 1517 ++++ .../sizeofint/webpanimation/dsp_enc_msa.c | 896 ++ .../sizeofint/webpanimation/dsp_enc_neon.c | 938 ++ .../sizeofint/webpanimation/dsp_enc_sse2.c | 1381 +++ .../sizeofint/webpanimation/dsp_enc_sse41.c | 339 + .../sizeofint/webpanimation/dsp_filters.c | 287 + .../webpanimation/dsp_filters_mips_dsp_r2.c | 402 + .../sizeofint/webpanimation/dsp_filters_msa.c | 202 + .../webpanimation/dsp_filters_neon.c | 329 + .../webpanimation/dsp_filters_sse2.c | 335 + .../sizeofint/webpanimation/dsp_lossless.c | 660 ++ .../sizeofint/webpanimation/dsp_lossless.h | 244 + .../webpanimation/dsp_lossless_common.h | 191 + .../webpanimation/dsp_lossless_enc.c | 929 ++ .../webpanimation/dsp_lossless_enc_mips32.c | 397 + .../dsp_lossless_enc_mips_dsp_r2.c | 281 + .../webpanimation/dsp_lossless_enc_msa.c | 148 + .../webpanimation/dsp_lossless_enc_neon.c | 144 + .../webpanimation/dsp_lossless_enc_sse2.c | 696 ++ .../webpanimation/dsp_lossless_enc_sse41.c | 148 + .../webpanimation/dsp_lossless_mips_dsp_r2.c | 696 ++ .../webpanimation/dsp_lossless_msa.c | 356 + .../webpanimation/dsp_lossless_neon.c | 641 ++ .../webpanimation/dsp_lossless_sse2.c | 708 ++ .../sizeofint/webpanimation/dsp_mips_macro.h | 200 + .../sizeofint/webpanimation/dsp_msa_macro.h | 1392 +++ .../sizeofint/webpanimation/dsp_neon.h | 101 + .../sizeofint/webpanimation/dsp_quant.h | 85 + .../sizeofint/webpanimation/dsp_rescaler.c | 250 + .../webpanimation/dsp_rescaler_mips32.c | 295 + .../webpanimation/dsp_rescaler_mips_dsp_r2.c | 314 + .../webpanimation/dsp_rescaler_msa.c | 443 + .../webpanimation/dsp_rescaler_neon.c | 192 + .../webpanimation/dsp_rescaler_sse2.c | 366 + .../sizeofint/webpanimation/dsp_ssim.c | 159 + .../sizeofint/webpanimation/dsp_ssim_sse2.c | 165 + .../sizeofint/webpanimation/dsp_upsampling.c | 327 + .../dsp_upsampling_mips_dsp_r2.c | 291 + .../webpanimation/dsp_upsampling_msa.c | 688 ++ .../webpanimation/dsp_upsampling_neon.c | 285 + .../webpanimation/dsp_upsampling_sse2.c | 267 + .../webpanimation/dsp_upsampling_sse41.c | 239 + .../sizeofint/webpanimation/dsp_yuv.c | 308 + .../sizeofint/webpanimation/dsp_yuv.h | 210 + .../sizeofint/webpanimation/dsp_yuv_mips32.c | 103 + .../webpanimation/dsp_yuv_mips_dsp_r2.c | 134 + .../sizeofint/webpanimation/dsp_yuv_neon.c | 288 + .../sizeofint/webpanimation/dsp_yuv_sse2.c | 874 ++ .../sizeofint/webpanimation/dsp_yuv_sse41.c | 613 ++ .../sizeofint/webpanimation/enc_alpha_enc.c | 443 + .../webpanimation/enc_analysis_enc.c | 475 ++ .../enc_backward_references_cost_enc.c | 790 ++ .../enc_backward_references_enc.c | 1030 +++ .../enc_backward_references_enc.h | 240 + .../sizeofint/webpanimation/enc_config_enc.c | 157 + .../sizeofint/webpanimation/enc_cost_enc.c | 342 + .../sizeofint/webpanimation/enc_cost_enc.h | 82 + .../sizeofint/webpanimation/enc_filter_enc.c | 235 + .../sizeofint/webpanimation/enc_frame_enc.c | 894 ++ .../webpanimation/enc_histogram_enc.c | 1250 +++ .../webpanimation/enc_histogram_enc.h | 128 + .../webpanimation/enc_iterator_enc.c | 459 + .../webpanimation/enc_near_lossless_enc.c | 151 + .../webpanimation/enc_picture_csp_enc.c | 1210 +++ .../sizeofint/webpanimation/enc_picture_enc.c | 296 + .../webpanimation/enc_picture_psnr_enc.c | 258 + .../webpanimation/enc_picture_rescale_enc.c | 309 + .../webpanimation/enc_picture_tools_enc.c | 273 + .../webpanimation/enc_predictor_enc.c | 772 ++ .../sizeofint/webpanimation/enc_quant_enc.c | 1372 +++ .../sizeofint/webpanimation/enc_syntax_enc.c | 388 + .../sizeofint/webpanimation/enc_token_enc.c | 262 + .../sizeofint/webpanimation/enc_tree_enc.c | 504 ++ .../sizeofint/webpanimation/enc_vp8i_enc.h | 518 ++ .../sizeofint/webpanimation/enc_vp8l_enc.c | 1930 +++++ .../sizeofint/webpanimation/enc_vp8li_enc.h | 118 + .../sizeofint/webpanimation/enc_webp_enc.c | 410 + .../webpanimation/generate_from_libwebp.py | 73 + .../github.com/sizeofint/webpanimation/go.mod | 3 + .../sizeofint/webpanimation/mux_anim_encode.c | 1581 ++++ .../sizeofint/webpanimation/mux_animi.h | 43 + .../sizeofint/webpanimation/mux_muxedit.c | 657 ++ .../sizeofint/webpanimation/mux_muxi.h | 234 + .../sizeofint/webpanimation/mux_muxinternal.c | 548 ++ .../sizeofint/webpanimation/mux_muxread.c | 556 ++ .../sizeofint/webpanimation/readme.md | 11 + .../utils_bit_reader_inl_utils.h | 195 + .../webpanimation/utils_bit_reader_utils.c | 298 + .../webpanimation/utils_bit_reader_utils.h | 194 + .../webpanimation/utils_bit_writer_utils.c | 347 + .../webpanimation/utils_bit_writer_utils.h | 154 + .../webpanimation/utils_color_cache_utils.c | 49 + .../webpanimation/utils_color_cache_utils.h | 89 + .../webpanimation/utils_endian_inl_utils.h | 93 + .../webpanimation/utils_filters_utils.c | 76 + .../webpanimation/utils_filters_utils.h | 32 + .../utils_huffman_encode_utils.c | 417 + .../utils_huffman_encode_utils.h | 60 + .../webpanimation/utils_huffman_utils.c | 235 + .../webpanimation/utils_huffman_utils.h | 90 + .../utils_quant_levels_dec_utils.c | 291 + .../utils_quant_levels_dec_utils.h | 35 + .../webpanimation/utils_quant_levels_utils.c | 140 + .../webpanimation/utils_quant_levels_utils.h | 36 + .../webpanimation/utils_random_utils.c | 43 + .../webpanimation/utils_random_utils.h | 63 + .../webpanimation/utils_rescaler_utils.c | 148 + .../webpanimation/utils_rescaler_utils.h | 101 + .../webpanimation/utils_thread_utils.c | 369 + .../webpanimation/utils_thread_utils.h | 90 + .../sizeofint/webpanimation/utils_utils.c | 335 + .../sizeofint/webpanimation/utils_utils.h | 178 + .../sizeofint/webpanimation/webp_decode.h | 503 ++ .../sizeofint/webpanimation/webp_demux.h | 363 + .../sizeofint/webpanimation/webp_encode.h | 552 ++ .../webpanimation/webp_format_constants.h | 87 + .../sizeofint/webpanimation/webp_mux.h | 530 ++ .../sizeofint/webpanimation/webp_mux_types.h | 98 + .../sizeofint/webpanimation/webp_types.h | 68 + .../sizeofint/webpanimation/webpanimation.go | 109 + vendor/modules.txt | 11 + 310 files changed, 121526 insertions(+), 85 deletions(-) create mode 100644 bridge/helper/libtgsconverter.go create mode 100644 bridge/helper/lottie_convert.go create mode 100644 vendor/github.com/Benau/go_rlottie/LICENSE create mode 100644 vendor/github.com/Benau/go_rlottie/README.md create mode 100644 vendor/github.com/Benau/go_rlottie/binding_c_lottieanimation_capi.cpp create mode 100644 vendor/github.com/Benau/go_rlottie/config.h create mode 100644 vendor/github.com/Benau/go_rlottie/generate_from_rlottie.py create mode 100644 vendor/github.com/Benau/go_rlottie/go.mod create mode 100644 vendor/github.com/Benau/go_rlottie/go_rlottie.go create mode 100644 vendor/github.com/Benau/go_rlottie/lottie_lottieanimation.cpp create mode 100644 vendor/github.com/Benau/go_rlottie/lottie_lottiefiltermodel.h create mode 100644 vendor/github.com/Benau/go_rlottie/lottie_lottieitem.cpp create mode 100644 vendor/github.com/Benau/go_rlottie/lottie_lottieitem.h create mode 100644 vendor/github.com/Benau/go_rlottie/lottie_lottieitem_capi.cpp create mode 100644 vendor/github.com/Benau/go_rlottie/lottie_lottiekeypath.cpp create mode 100644 vendor/github.com/Benau/go_rlottie/lottie_lottiekeypath.h create mode 100644 vendor/github.com/Benau/go_rlottie/lottie_lottieloader.cpp create mode 100644 vendor/github.com/Benau/go_rlottie/lottie_lottiemodel.cpp create mode 100644 vendor/github.com/Benau/go_rlottie/lottie_lottiemodel.h create mode 100644 vendor/github.com/Benau/go_rlottie/lottie_lottieparser.cpp create mode 100644 vendor/github.com/Benau/go_rlottie/lottie_lottieproxymodel.cpp create mode 100644 vendor/github.com/Benau/go_rlottie/lottie_rapidjson_allocators.h create mode 100644 vendor/github.com/Benau/go_rlottie/lottie_rapidjson_cursorstreamwrapper.h create mode 100644 vendor/github.com/Benau/go_rlottie/lottie_rapidjson_document.h create mode 100644 vendor/github.com/Benau/go_rlottie/lottie_rapidjson_encodedstream.h create mode 100644 vendor/github.com/Benau/go_rlottie/lottie_rapidjson_encodings.h create mode 100644 vendor/github.com/Benau/go_rlottie/lottie_rapidjson_error_en.h create mode 100644 vendor/github.com/Benau/go_rlottie/lottie_rapidjson_error_error.h create mode 100644 vendor/github.com/Benau/go_rlottie/lottie_rapidjson_filereadstream.h create mode 100644 vendor/github.com/Benau/go_rlottie/lottie_rapidjson_filewritestream.h create mode 100644 vendor/github.com/Benau/go_rlottie/lottie_rapidjson_fwd.h create mode 100644 vendor/github.com/Benau/go_rlottie/lottie_rapidjson_internal_biginteger.h create mode 100644 vendor/github.com/Benau/go_rlottie/lottie_rapidjson_internal_clzll.h create mode 100644 vendor/github.com/Benau/go_rlottie/lottie_rapidjson_internal_diyfp.h create mode 100644 vendor/github.com/Benau/go_rlottie/lottie_rapidjson_internal_dtoa.h create mode 100644 vendor/github.com/Benau/go_rlottie/lottie_rapidjson_internal_ieee754.h create mode 100644 vendor/github.com/Benau/go_rlottie/lottie_rapidjson_internal_itoa.h create mode 100644 vendor/github.com/Benau/go_rlottie/lottie_rapidjson_internal_meta.h create mode 100644 vendor/github.com/Benau/go_rlottie/lottie_rapidjson_internal_pow10.h create mode 100644 vendor/github.com/Benau/go_rlottie/lottie_rapidjson_internal_regex.h create mode 100644 vendor/github.com/Benau/go_rlottie/lottie_rapidjson_internal_stack.h create mode 100644 vendor/github.com/Benau/go_rlottie/lottie_rapidjson_internal_strfunc.h create mode 100644 vendor/github.com/Benau/go_rlottie/lottie_rapidjson_internal_strtod.h create mode 100644 vendor/github.com/Benau/go_rlottie/lottie_rapidjson_internal_swap.h create mode 100644 vendor/github.com/Benau/go_rlottie/lottie_rapidjson_istreamwrapper.h create mode 100644 vendor/github.com/Benau/go_rlottie/lottie_rapidjson_memorybuffer.h create mode 100644 vendor/github.com/Benau/go_rlottie/lottie_rapidjson_memorystream.h create mode 100644 vendor/github.com/Benau/go_rlottie/lottie_rapidjson_msinttypes_inttypes.h create mode 100644 vendor/github.com/Benau/go_rlottie/lottie_rapidjson_msinttypes_stdint.h create mode 100644 vendor/github.com/Benau/go_rlottie/lottie_rapidjson_ostreamwrapper.h create mode 100644 vendor/github.com/Benau/go_rlottie/lottie_rapidjson_pointer.h create mode 100644 vendor/github.com/Benau/go_rlottie/lottie_rapidjson_prettywriter.h create mode 100644 vendor/github.com/Benau/go_rlottie/lottie_rapidjson_rapidjson.h create mode 100644 vendor/github.com/Benau/go_rlottie/lottie_rapidjson_reader.h create mode 100644 vendor/github.com/Benau/go_rlottie/lottie_rapidjson_schema.h create mode 100644 vendor/github.com/Benau/go_rlottie/lottie_rapidjson_stream.h create mode 100644 vendor/github.com/Benau/go_rlottie/lottie_rapidjson_stringbuffer.h create mode 100644 vendor/github.com/Benau/go_rlottie/lottie_rapidjson_writer.h create mode 100644 vendor/github.com/Benau/go_rlottie/rlottie.h create mode 100644 vendor/github.com/Benau/go_rlottie/rlottie_capi.h create mode 100644 vendor/github.com/Benau/go_rlottie/rlottiecommon.h create mode 100644 vendor/github.com/Benau/go_rlottie/vector_freetype_v_ft_math.cpp create mode 100644 vendor/github.com/Benau/go_rlottie/vector_freetype_v_ft_math.h create mode 100644 vendor/github.com/Benau/go_rlottie/vector_freetype_v_ft_raster.cpp create mode 100644 vendor/github.com/Benau/go_rlottie/vector_freetype_v_ft_raster.h create mode 100644 vendor/github.com/Benau/go_rlottie/vector_freetype_v_ft_stroker.cpp create mode 100644 vendor/github.com/Benau/go_rlottie/vector_freetype_v_ft_stroker.h create mode 100644 vendor/github.com/Benau/go_rlottie/vector_freetype_v_ft_types.h create mode 100644 vendor/github.com/Benau/go_rlottie/vector_pixman_pixman-arm-neon-asm.S create mode 100644 vendor/github.com/Benau/go_rlottie/vector_pixman_pixman-arm-neon-asm.h create mode 100644 vendor/github.com/Benau/go_rlottie/vector_stb_stb_image.cpp create mode 100644 vendor/github.com/Benau/go_rlottie/vector_stb_stb_image.h create mode 100644 vendor/github.com/Benau/go_rlottie/vector_varenaalloc.cpp create mode 100644 vendor/github.com/Benau/go_rlottie/vector_varenaalloc.h create mode 100644 vendor/github.com/Benau/go_rlottie/vector_vbezier.cpp create mode 100644 vendor/github.com/Benau/go_rlottie/vector_vbezier.h create mode 100644 vendor/github.com/Benau/go_rlottie/vector_vbitmap.cpp create mode 100644 vendor/github.com/Benau/go_rlottie/vector_vbitmap.h create mode 100644 vendor/github.com/Benau/go_rlottie/vector_vbrush.cpp create mode 100644 vendor/github.com/Benau/go_rlottie/vector_vbrush.h create mode 100644 vendor/github.com/Benau/go_rlottie/vector_vcowptr.h create mode 100644 vendor/github.com/Benau/go_rlottie/vector_vdasher.cpp create mode 100644 vendor/github.com/Benau/go_rlottie/vector_vdasher.h create mode 100644 vendor/github.com/Benau/go_rlottie/vector_vdebug.cpp create mode 100644 vendor/github.com/Benau/go_rlottie/vector_vdebug.h create mode 100644 vendor/github.com/Benau/go_rlottie/vector_vdrawable.cpp create mode 100644 vendor/github.com/Benau/go_rlottie/vector_vdrawable.h create mode 100644 vendor/github.com/Benau/go_rlottie/vector_vdrawhelper.cpp create mode 100644 vendor/github.com/Benau/go_rlottie/vector_vdrawhelper.h create mode 100644 vendor/github.com/Benau/go_rlottie/vector_vdrawhelper_common.cpp create mode 100644 vendor/github.com/Benau/go_rlottie/vector_vdrawhelper_neon.cpp create mode 100644 vendor/github.com/Benau/go_rlottie/vector_vdrawhelper_sse2.cpp create mode 100644 vendor/github.com/Benau/go_rlottie/vector_velapsedtimer.cpp create mode 100644 vendor/github.com/Benau/go_rlottie/vector_velapsedtimer.h create mode 100644 vendor/github.com/Benau/go_rlottie/vector_vglobal.h create mode 100644 vendor/github.com/Benau/go_rlottie/vector_vimageloader.cpp create mode 100644 vendor/github.com/Benau/go_rlottie/vector_vimageloader.h create mode 100644 vendor/github.com/Benau/go_rlottie/vector_vinterpolator.cpp create mode 100644 vendor/github.com/Benau/go_rlottie/vector_vinterpolator.h create mode 100644 vendor/github.com/Benau/go_rlottie/vector_vline.h create mode 100644 vendor/github.com/Benau/go_rlottie/vector_vmatrix.cpp create mode 100644 vendor/github.com/Benau/go_rlottie/vector_vmatrix.h create mode 100644 vendor/github.com/Benau/go_rlottie/vector_vpainter.cpp create mode 100644 vendor/github.com/Benau/go_rlottie/vector_vpainter.h create mode 100644 vendor/github.com/Benau/go_rlottie/vector_vpath.cpp create mode 100644 vendor/github.com/Benau/go_rlottie/vector_vpath.h create mode 100644 vendor/github.com/Benau/go_rlottie/vector_vpathmesure.cpp create mode 100644 vendor/github.com/Benau/go_rlottie/vector_vpathmesure.h create mode 100644 vendor/github.com/Benau/go_rlottie/vector_vpoint.h create mode 100644 vendor/github.com/Benau/go_rlottie/vector_vraster.cpp create mode 100644 vendor/github.com/Benau/go_rlottie/vector_vraster.h create mode 100644 vendor/github.com/Benau/go_rlottie/vector_vrect.cpp create mode 100644 vendor/github.com/Benau/go_rlottie/vector_vrect.h create mode 100644 vendor/github.com/Benau/go_rlottie/vector_vrle.cpp create mode 100644 vendor/github.com/Benau/go_rlottie/vector_vrle.h create mode 100644 vendor/github.com/Benau/go_rlottie/vector_vsharedptr.h create mode 100644 vendor/github.com/Benau/go_rlottie/vector_vstackallocator.h create mode 100644 vendor/github.com/Benau/go_rlottie/vector_vtaskqueue.h create mode 100644 vendor/github.com/Benau/tgsconverter/LICENSE create mode 100644 vendor/github.com/Benau/tgsconverter/libtgsconverter/apng.go create mode 100644 vendor/github.com/Benau/tgsconverter/libtgsconverter/gif.go create mode 100644 vendor/github.com/Benau/tgsconverter/libtgsconverter/imagewriter.go create mode 100644 vendor/github.com/Benau/tgsconverter/libtgsconverter/lib.go create mode 100644 vendor/github.com/Benau/tgsconverter/libtgsconverter/png.go create mode 100644 vendor/github.com/Benau/tgsconverter/libtgsconverter/quantize_bucket.go create mode 100644 vendor/github.com/Benau/tgsconverter/libtgsconverter/quantize_mediancut.go create mode 100644 vendor/github.com/Benau/tgsconverter/libtgsconverter/webp.go create mode 100644 vendor/github.com/av-elier/go-decimal-to-rational/.travis.yml create mode 100644 vendor/github.com/av-elier/go-decimal-to-rational/LICENSE create mode 100644 vendor/github.com/av-elier/go-decimal-to-rational/README.md create mode 100644 vendor/github.com/av-elier/go-decimal-to-rational/frac.go create mode 100644 vendor/github.com/kettek/apng/LICENSE create mode 100644 vendor/github.com/kettek/apng/README.md create mode 100644 vendor/github.com/kettek/apng/apng.go create mode 100644 vendor/github.com/kettek/apng/frame.go create mode 100644 vendor/github.com/kettek/apng/paeth.go create mode 100644 vendor/github.com/kettek/apng/reader.go create mode 100644 vendor/github.com/kettek/apng/writer.go create mode 100644 vendor/github.com/sizeofint/webpanimation/.gitignore create mode 100644 vendor/github.com/sizeofint/webpanimation/LICENSE create mode 100644 vendor/github.com/sizeofint/webpanimation/capi.go create mode 100644 vendor/github.com/sizeofint/webpanimation/config.h create mode 100644 vendor/github.com/sizeofint/webpanimation/dec_alpha_dec.c create mode 100644 vendor/github.com/sizeofint/webpanimation/dec_alphai_dec.h create mode 100644 vendor/github.com/sizeofint/webpanimation/dec_buffer_dec.c create mode 100644 vendor/github.com/sizeofint/webpanimation/dec_common_dec.h create mode 100644 vendor/github.com/sizeofint/webpanimation/dec_frame_dec.c create mode 100644 vendor/github.com/sizeofint/webpanimation/dec_idec_dec.c create mode 100644 vendor/github.com/sizeofint/webpanimation/dec_io_dec.c create mode 100644 vendor/github.com/sizeofint/webpanimation/dec_quant_dec.c create mode 100644 vendor/github.com/sizeofint/webpanimation/dec_tree_dec.c create mode 100644 vendor/github.com/sizeofint/webpanimation/dec_vp8_dec.c create mode 100644 vendor/github.com/sizeofint/webpanimation/dec_vp8_dec.h create mode 100644 vendor/github.com/sizeofint/webpanimation/dec_vp8i_dec.h create mode 100644 vendor/github.com/sizeofint/webpanimation/dec_vp8l_dec.c create mode 100644 vendor/github.com/sizeofint/webpanimation/dec_vp8li_dec.h create mode 100644 vendor/github.com/sizeofint/webpanimation/dec_webp_dec.c create mode 100644 vendor/github.com/sizeofint/webpanimation/dec_webpi_dec.h create mode 100644 vendor/github.com/sizeofint/webpanimation/demux_anim_decode.c create mode 100644 vendor/github.com/sizeofint/webpanimation/demux_demux.c create mode 100644 vendor/github.com/sizeofint/webpanimation/dsp_alpha_processing.c create mode 100644 vendor/github.com/sizeofint/webpanimation/dsp_alpha_processing_mips_dsp_r2.c create mode 100644 vendor/github.com/sizeofint/webpanimation/dsp_alpha_processing_neon.c create mode 100644 vendor/github.com/sizeofint/webpanimation/dsp_alpha_processing_sse2.c create mode 100644 vendor/github.com/sizeofint/webpanimation/dsp_alpha_processing_sse41.c create mode 100644 vendor/github.com/sizeofint/webpanimation/dsp_common_sse2.h create mode 100644 vendor/github.com/sizeofint/webpanimation/dsp_common_sse41.h create mode 100644 vendor/github.com/sizeofint/webpanimation/dsp_cost.c create mode 100644 vendor/github.com/sizeofint/webpanimation/dsp_cost_mips32.c create mode 100644 vendor/github.com/sizeofint/webpanimation/dsp_cost_mips_dsp_r2.c create mode 100644 vendor/github.com/sizeofint/webpanimation/dsp_cost_neon.c create mode 100644 vendor/github.com/sizeofint/webpanimation/dsp_cost_sse2.c create mode 100644 vendor/github.com/sizeofint/webpanimation/dsp_cpu.c create mode 100644 vendor/github.com/sizeofint/webpanimation/dsp_dec.c create mode 100644 vendor/github.com/sizeofint/webpanimation/dsp_dec_clip_tables.c create mode 100644 vendor/github.com/sizeofint/webpanimation/dsp_dec_mips32.c create mode 100644 vendor/github.com/sizeofint/webpanimation/dsp_dec_mips_dsp_r2.c create mode 100644 vendor/github.com/sizeofint/webpanimation/dsp_dec_msa.c create mode 100644 vendor/github.com/sizeofint/webpanimation/dsp_dec_neon.c create mode 100644 vendor/github.com/sizeofint/webpanimation/dsp_dec_sse2.c create mode 100644 vendor/github.com/sizeofint/webpanimation/dsp_dec_sse41.c create mode 100644 vendor/github.com/sizeofint/webpanimation/dsp_dsp.h create mode 100644 vendor/github.com/sizeofint/webpanimation/dsp_enc.c create mode 100644 vendor/github.com/sizeofint/webpanimation/dsp_enc_mips32.c create mode 100644 vendor/github.com/sizeofint/webpanimation/dsp_enc_mips_dsp_r2.c create mode 100644 vendor/github.com/sizeofint/webpanimation/dsp_enc_msa.c create mode 100644 vendor/github.com/sizeofint/webpanimation/dsp_enc_neon.c create mode 100644 vendor/github.com/sizeofint/webpanimation/dsp_enc_sse2.c create mode 100644 vendor/github.com/sizeofint/webpanimation/dsp_enc_sse41.c create mode 100644 vendor/github.com/sizeofint/webpanimation/dsp_filters.c create mode 100644 vendor/github.com/sizeofint/webpanimation/dsp_filters_mips_dsp_r2.c create mode 100644 vendor/github.com/sizeofint/webpanimation/dsp_filters_msa.c create mode 100644 vendor/github.com/sizeofint/webpanimation/dsp_filters_neon.c create mode 100644 vendor/github.com/sizeofint/webpanimation/dsp_filters_sse2.c create mode 100644 vendor/github.com/sizeofint/webpanimation/dsp_lossless.c create mode 100644 vendor/github.com/sizeofint/webpanimation/dsp_lossless.h create mode 100644 vendor/github.com/sizeofint/webpanimation/dsp_lossless_common.h create mode 100644 vendor/github.com/sizeofint/webpanimation/dsp_lossless_enc.c create mode 100644 vendor/github.com/sizeofint/webpanimation/dsp_lossless_enc_mips32.c create mode 100644 vendor/github.com/sizeofint/webpanimation/dsp_lossless_enc_mips_dsp_r2.c create mode 100644 vendor/github.com/sizeofint/webpanimation/dsp_lossless_enc_msa.c create mode 100644 vendor/github.com/sizeofint/webpanimation/dsp_lossless_enc_neon.c create mode 100644 vendor/github.com/sizeofint/webpanimation/dsp_lossless_enc_sse2.c create mode 100644 vendor/github.com/sizeofint/webpanimation/dsp_lossless_enc_sse41.c create mode 100644 vendor/github.com/sizeofint/webpanimation/dsp_lossless_mips_dsp_r2.c create mode 100644 vendor/github.com/sizeofint/webpanimation/dsp_lossless_msa.c create mode 100644 vendor/github.com/sizeofint/webpanimation/dsp_lossless_neon.c create mode 100644 vendor/github.com/sizeofint/webpanimation/dsp_lossless_sse2.c create mode 100644 vendor/github.com/sizeofint/webpanimation/dsp_mips_macro.h create mode 100644 vendor/github.com/sizeofint/webpanimation/dsp_msa_macro.h create mode 100644 vendor/github.com/sizeofint/webpanimation/dsp_neon.h create mode 100644 vendor/github.com/sizeofint/webpanimation/dsp_quant.h create mode 100644 vendor/github.com/sizeofint/webpanimation/dsp_rescaler.c create mode 100644 vendor/github.com/sizeofint/webpanimation/dsp_rescaler_mips32.c create mode 100644 vendor/github.com/sizeofint/webpanimation/dsp_rescaler_mips_dsp_r2.c create mode 100644 vendor/github.com/sizeofint/webpanimation/dsp_rescaler_msa.c create mode 100644 vendor/github.com/sizeofint/webpanimation/dsp_rescaler_neon.c create mode 100644 vendor/github.com/sizeofint/webpanimation/dsp_rescaler_sse2.c create mode 100644 vendor/github.com/sizeofint/webpanimation/dsp_ssim.c create mode 100644 vendor/github.com/sizeofint/webpanimation/dsp_ssim_sse2.c create mode 100644 vendor/github.com/sizeofint/webpanimation/dsp_upsampling.c create mode 100644 vendor/github.com/sizeofint/webpanimation/dsp_upsampling_mips_dsp_r2.c create mode 100644 vendor/github.com/sizeofint/webpanimation/dsp_upsampling_msa.c create mode 100644 vendor/github.com/sizeofint/webpanimation/dsp_upsampling_neon.c create mode 100644 vendor/github.com/sizeofint/webpanimation/dsp_upsampling_sse2.c create mode 100644 vendor/github.com/sizeofint/webpanimation/dsp_upsampling_sse41.c create mode 100644 vendor/github.com/sizeofint/webpanimation/dsp_yuv.c create mode 100644 vendor/github.com/sizeofint/webpanimation/dsp_yuv.h create mode 100644 vendor/github.com/sizeofint/webpanimation/dsp_yuv_mips32.c create mode 100644 vendor/github.com/sizeofint/webpanimation/dsp_yuv_mips_dsp_r2.c create mode 100644 vendor/github.com/sizeofint/webpanimation/dsp_yuv_neon.c create mode 100644 vendor/github.com/sizeofint/webpanimation/dsp_yuv_sse2.c create mode 100644 vendor/github.com/sizeofint/webpanimation/dsp_yuv_sse41.c create mode 100644 vendor/github.com/sizeofint/webpanimation/enc_alpha_enc.c create mode 100644 vendor/github.com/sizeofint/webpanimation/enc_analysis_enc.c create mode 100644 vendor/github.com/sizeofint/webpanimation/enc_backward_references_cost_enc.c create mode 100644 vendor/github.com/sizeofint/webpanimation/enc_backward_references_enc.c create mode 100644 vendor/github.com/sizeofint/webpanimation/enc_backward_references_enc.h create mode 100644 vendor/github.com/sizeofint/webpanimation/enc_config_enc.c create mode 100644 vendor/github.com/sizeofint/webpanimation/enc_cost_enc.c create mode 100644 vendor/github.com/sizeofint/webpanimation/enc_cost_enc.h create mode 100644 vendor/github.com/sizeofint/webpanimation/enc_filter_enc.c create mode 100644 vendor/github.com/sizeofint/webpanimation/enc_frame_enc.c create mode 100644 vendor/github.com/sizeofint/webpanimation/enc_histogram_enc.c create mode 100644 vendor/github.com/sizeofint/webpanimation/enc_histogram_enc.h create mode 100644 vendor/github.com/sizeofint/webpanimation/enc_iterator_enc.c create mode 100644 vendor/github.com/sizeofint/webpanimation/enc_near_lossless_enc.c create mode 100644 vendor/github.com/sizeofint/webpanimation/enc_picture_csp_enc.c create mode 100644 vendor/github.com/sizeofint/webpanimation/enc_picture_enc.c create mode 100644 vendor/github.com/sizeofint/webpanimation/enc_picture_psnr_enc.c create mode 100644 vendor/github.com/sizeofint/webpanimation/enc_picture_rescale_enc.c create mode 100644 vendor/github.com/sizeofint/webpanimation/enc_picture_tools_enc.c create mode 100644 vendor/github.com/sizeofint/webpanimation/enc_predictor_enc.c create mode 100644 vendor/github.com/sizeofint/webpanimation/enc_quant_enc.c create mode 100644 vendor/github.com/sizeofint/webpanimation/enc_syntax_enc.c create mode 100644 vendor/github.com/sizeofint/webpanimation/enc_token_enc.c create mode 100644 vendor/github.com/sizeofint/webpanimation/enc_tree_enc.c create mode 100644 vendor/github.com/sizeofint/webpanimation/enc_vp8i_enc.h create mode 100644 vendor/github.com/sizeofint/webpanimation/enc_vp8l_enc.c create mode 100644 vendor/github.com/sizeofint/webpanimation/enc_vp8li_enc.h create mode 100644 vendor/github.com/sizeofint/webpanimation/enc_webp_enc.c create mode 100644 vendor/github.com/sizeofint/webpanimation/generate_from_libwebp.py create mode 100644 vendor/github.com/sizeofint/webpanimation/go.mod create mode 100644 vendor/github.com/sizeofint/webpanimation/mux_anim_encode.c create mode 100644 vendor/github.com/sizeofint/webpanimation/mux_animi.h create mode 100644 vendor/github.com/sizeofint/webpanimation/mux_muxedit.c create mode 100644 vendor/github.com/sizeofint/webpanimation/mux_muxi.h create mode 100644 vendor/github.com/sizeofint/webpanimation/mux_muxinternal.c create mode 100644 vendor/github.com/sizeofint/webpanimation/mux_muxread.c create mode 100644 vendor/github.com/sizeofint/webpanimation/readme.md create mode 100644 vendor/github.com/sizeofint/webpanimation/utils_bit_reader_inl_utils.h create mode 100644 vendor/github.com/sizeofint/webpanimation/utils_bit_reader_utils.c create mode 100644 vendor/github.com/sizeofint/webpanimation/utils_bit_reader_utils.h create mode 100644 vendor/github.com/sizeofint/webpanimation/utils_bit_writer_utils.c create mode 100644 vendor/github.com/sizeofint/webpanimation/utils_bit_writer_utils.h create mode 100644 vendor/github.com/sizeofint/webpanimation/utils_color_cache_utils.c create mode 100644 vendor/github.com/sizeofint/webpanimation/utils_color_cache_utils.h create mode 100644 vendor/github.com/sizeofint/webpanimation/utils_endian_inl_utils.h create mode 100644 vendor/github.com/sizeofint/webpanimation/utils_filters_utils.c create mode 100644 vendor/github.com/sizeofint/webpanimation/utils_filters_utils.h create mode 100644 vendor/github.com/sizeofint/webpanimation/utils_huffman_encode_utils.c create mode 100644 vendor/github.com/sizeofint/webpanimation/utils_huffman_encode_utils.h create mode 100644 vendor/github.com/sizeofint/webpanimation/utils_huffman_utils.c create mode 100644 vendor/github.com/sizeofint/webpanimation/utils_huffman_utils.h create mode 100644 vendor/github.com/sizeofint/webpanimation/utils_quant_levels_dec_utils.c create mode 100644 vendor/github.com/sizeofint/webpanimation/utils_quant_levels_dec_utils.h create mode 100644 vendor/github.com/sizeofint/webpanimation/utils_quant_levels_utils.c create mode 100644 vendor/github.com/sizeofint/webpanimation/utils_quant_levels_utils.h create mode 100644 vendor/github.com/sizeofint/webpanimation/utils_random_utils.c create mode 100644 vendor/github.com/sizeofint/webpanimation/utils_random_utils.h create mode 100644 vendor/github.com/sizeofint/webpanimation/utils_rescaler_utils.c create mode 100644 vendor/github.com/sizeofint/webpanimation/utils_rescaler_utils.h create mode 100644 vendor/github.com/sizeofint/webpanimation/utils_thread_utils.c create mode 100644 vendor/github.com/sizeofint/webpanimation/utils_thread_utils.h create mode 100644 vendor/github.com/sizeofint/webpanimation/utils_utils.c create mode 100644 vendor/github.com/sizeofint/webpanimation/utils_utils.h create mode 100644 vendor/github.com/sizeofint/webpanimation/webp_decode.h create mode 100644 vendor/github.com/sizeofint/webpanimation/webp_demux.h create mode 100644 vendor/github.com/sizeofint/webpanimation/webp_encode.h create mode 100644 vendor/github.com/sizeofint/webpanimation/webp_format_constants.h create mode 100644 vendor/github.com/sizeofint/webpanimation/webp_mux.h create mode 100644 vendor/github.com/sizeofint/webpanimation/webp_mux_types.h create mode 100644 vendor/github.com/sizeofint/webpanimation/webp_types.h create mode 100644 vendor/github.com/sizeofint/webpanimation/webpanimation.go diff --git a/bridge/helper/helper.go b/bridge/helper/helper.go index 1bdd8a40..581a2c43 100644 --- a/bridge/helper/helper.go +++ b/bridge/helper/helper.go @@ -5,10 +5,7 @@ import ( "fmt" "image/png" "io" - "io/ioutil" "net/http" - "os" - "os/exec" "regexp" "strings" "time" @@ -239,66 +236,3 @@ func ConvertWebPToPNG(data *[]byte) error { *data = w.Bytes() return nil } - -// CanConvertTgsToX Checks whether the external command necessary for ConvertTgsToX works. -func CanConvertTgsToX() error { - // We depend on the fact that `lottie_convert.py --help` has exit status 0. - // Hyrum's Law predicted this, and Murphy's Law predicts that this will break eventually. - // However, there is no alternative like `lottie_convert.py --is-properly-installed` - cmd := exec.Command("lottie_convert.py", "--help") - return cmd.Run() -} - -// ConvertTgsToWebP convert input data (which should be tgs format) to WebP format -// This relies on an external command, which is ugly, but works. -func ConvertTgsToX(data *[]byte, outputFormat string, logger *logrus.Entry) error { - // lottie can't handle input from a pipe, so write to a temporary file: - tmpInFile, err := ioutil.TempFile(os.TempDir(), "matterbridge-lottie-input-*.tgs") - if err != nil { - return err - } - tmpInFileName := tmpInFile.Name() - defer func() { - if removeErr := os.Remove(tmpInFileName); removeErr != nil { - logger.Errorf("Could not delete temporary (input) file %s: %v", tmpInFileName, removeErr) - } - }() - // lottie can handle writing to a pipe, but there is no way to do that platform-independently. - // "/dev/stdout" won't work on Windows, and "-" upsets Cairo for some reason. So we need another file: - tmpOutFile, err := ioutil.TempFile(os.TempDir(), "matterbridge-lottie-output-*.data") - if err != nil { - return err - } - tmpOutFileName := tmpOutFile.Name() - defer func() { - if removeErr := os.Remove(tmpOutFileName); removeErr != nil { - logger.Errorf("Could not delete temporary (output) file %s: %v", tmpOutFileName, removeErr) - } - }() - - if _, writeErr := tmpInFile.Write(*data); writeErr != nil { - return writeErr - } - // Must close before calling lottie to avoid data races: - if closeErr := tmpInFile.Close(); closeErr != nil { - return closeErr - } - - // Call lottie to transform: - cmd := exec.Command("lottie_convert.py", "--input-format", "lottie", "--output-format", outputFormat, tmpInFileName, tmpOutFileName) - cmd.Stdout = nil - cmd.Stderr = nil - // NB: lottie writes progress into to stderr in all cases. - _, stderr := cmd.Output() - if stderr != nil { - // 'stderr' already contains some parts of Stderr, because it was set to 'nil'. - return stderr - } - dataContents, err := ioutil.ReadFile(tmpOutFileName) - if err != nil { - return err - } - - *data = dataContents - return nil -} diff --git a/bridge/helper/libtgsconverter.go b/bridge/helper/libtgsconverter.go new file mode 100644 index 00000000..7181da90 --- /dev/null +++ b/bridge/helper/libtgsconverter.go @@ -0,0 +1,34 @@ +// +build cgo + +package helper + +import ( + "fmt" + "github.com/Benau/tgsconverter/libtgsconverter" + "github.com/sirupsen/logrus" +) + +func CanConvertTgsToX() error { + return nil +} + +// ConvertTgsToX convert input data (which should be tgs format) to any format supported by libtgsconverter +func ConvertTgsToX(data *[]byte, outputFormat string, logger *logrus.Entry) error { + options := libtgsconverter.NewConverterOptions() + options.SetExtension(outputFormat) + blob, err := libtgsconverter.ImportFromData(*data, options) + if err != nil { + return fmt.Errorf("failed to run libtgsconverter.ImportFromData: %s", err.Error()) + } + + *data = blob + return nil +} + +func SupportsFormat(format string) bool { + return libtgsconverter.SupportsExtension(format) +} + +func LottieBackend() string { + return "libtgsconverter" +} diff --git a/bridge/helper/lottie_convert.go b/bridge/helper/lottie_convert.go new file mode 100644 index 00000000..c6ae0a7a --- /dev/null +++ b/bridge/helper/lottie_convert.go @@ -0,0 +1,89 @@ +// +build !cgo + +package helper + +import ( + "io/ioutil" + "os" + "os/exec" + "github.com/sirupsen/logrus" +) + +// CanConvertTgsToX Checks whether the external command necessary for ConvertTgsToX works. +func CanConvertTgsToX() error { + // We depend on the fact that `lottie_convert.py --help` has exit status 0. + // Hyrum's Law predicted this, and Murphy's Law predicts that this will break eventually. + // However, there is no alternative like `lottie_convert.py --is-properly-installed` + cmd := exec.Command("lottie_convert.py", "--help") + return cmd.Run() +} + +// ConvertTgsToWebP convert input data (which should be tgs format) to WebP format +// This relies on an external command, which is ugly, but works. +func ConvertTgsToX(data *[]byte, outputFormat string, logger *logrus.Entry) error { + // lottie can't handle input from a pipe, so write to a temporary file: + tmpInFile, err := ioutil.TempFile(os.TempDir(), "matterbridge-lottie-input-*.tgs") + if err != nil { + return err + } + tmpInFileName := tmpInFile.Name() + defer func() { + if removeErr := os.Remove(tmpInFileName); removeErr != nil { + logger.Errorf("Could not delete temporary (input) file %s: %v", tmpInFileName, removeErr) + } + }() + // lottie can handle writing to a pipe, but there is no way to do that platform-independently. + // "/dev/stdout" won't work on Windows, and "-" upsets Cairo for some reason. So we need another file: + tmpOutFile, err := ioutil.TempFile(os.TempDir(), "matterbridge-lottie-output-*.data") + if err != nil { + return err + } + tmpOutFileName := tmpOutFile.Name() + defer func() { + if removeErr := os.Remove(tmpOutFileName); removeErr != nil { + logger.Errorf("Could not delete temporary (output) file %s: %v", tmpOutFileName, removeErr) + } + }() + + if _, writeErr := tmpInFile.Write(*data); writeErr != nil { + return writeErr + } + // Must close before calling lottie to avoid data races: + if closeErr := tmpInFile.Close(); closeErr != nil { + return closeErr + } + + // Call lottie to transform: + cmd := exec.Command("lottie_convert.py", "--input-format", "lottie", "--output-format", outputFormat, tmpInFileName, tmpOutFileName) + cmd.Stdout = nil + cmd.Stderr = nil + // NB: lottie writes progress into to stderr in all cases. + _, stderr := cmd.Output() + if stderr != nil { + // 'stderr' already contains some parts of Stderr, because it was set to 'nil'. + return stderr + } + dataContents, err := ioutil.ReadFile(tmpOutFileName) + if err != nil { + return err + } + + *data = dataContents + return nil +} + +func SupportsFormat(format string) bool { + switch format { + case "png": + fallthrough + case "webp": + return true + default: + return false + } + return false +} + +func LottieBackend() string { + return "lottie_convert.py" +} diff --git a/bridge/telegram/handlers.go b/bridge/telegram/handlers.go index a93c71bb..84881f7f 100644 --- a/bridge/telegram/handlers.go +++ b/bridge/telegram/handlers.go @@ -220,20 +220,10 @@ func (b *Btelegram) handleDownloadAvatar(userid int, channel string) { } func (b *Btelegram) maybeConvertTgs(name *string, data *[]byte) { - var format string - switch b.GetString("MediaConvertTgs") { - case FormatWebp: - b.Log.Debugf("Tgs to WebP conversion enabled, converting %v", name) - format = FormatWebp - case FormatPng: - // The WebP to PNG converter can't handle animated webp files yet, - // and I'm not going to write a path for x/image/webp. - // The error message would be: - // conversion failed: webp: non-Alpha VP8X is not implemented - // So instead, we tell lottie to directly go to PNG. - b.Log.Debugf("Tgs to PNG conversion enabled, converting %v", name) - format = FormatPng - default: + format := b.GetString("MediaConvertTgs") + if helper.SupportsFormat(format) { + b.Log.Debugf("Format supported by %s, converting %v", helper.LottieBackend(), name) + } else { // Otherwise, no conversion was requested. Trying to run the usual webp // converter would fail, because '.tgs.webp' is actually a gzipped JSON // file, and has nothing to do with WebP. diff --git a/bridge/telegram/telegram.go b/bridge/telegram/telegram.go index 0f08a45b..199a76ab 100644 --- a/bridge/telegram/telegram.go +++ b/bridge/telegram/telegram.go @@ -17,8 +17,6 @@ const ( HTMLFormat = "HTML" HTMLNick = "htmlnick" MarkdownV2 = "MarkdownV2" - FormatPng = "png" - FormatWebp = "webp" ) type Btelegram struct { @@ -32,10 +30,10 @@ func New(cfg *bridge.Config) bridge.Bridger { if tgsConvertFormat != "" { err := helper.CanConvertTgsToX() if err != nil { - log.Fatalf("Telegram bridge configured to convert .tgs files to '%s', but lottie does not appear to work:\n%#v", tgsConvertFormat, err) + log.Fatalf("Telegram bridge configured to convert .tgs files to '%s', but %s does not appear to work:\n%#v", tgsConvertFormat, helper.LottieBackend(), err) } - if tgsConvertFormat != FormatPng && tgsConvertFormat != FormatWebp { - log.Fatalf("Telegram bridge configured to convert .tgs files to '%s', but only '%s' and '%s' are supported.", FormatPng, FormatWebp, tgsConvertFormat) + if !helper.SupportsFormat(tgsConvertFormat) { + log.Fatalf("Telegram bridge configured to convert .tgs files to '%s', but %s doesn't support it.", tgsConvertFormat, helper.LottieBackend()) } } return &Btelegram{Config: cfg, avatarMap: make(map[string]string)} diff --git a/go.mod b/go.mod index f1802fd0..143c3e40 100644 --- a/go.mod +++ b/go.mod @@ -3,6 +3,7 @@ module github.com/42wim/matterbridge require ( github.com/42wim/go-gitter v0.0.0-20170828205020-017310c2d557 github.com/Baozisoftware/qrcode-terminal-go v0.0.0-20170407111555-c0650d8dff0f + github.com/Benau/tgsconverter v0.0.0-20210809170556-99f4a4f6337f github.com/Jeffail/gabs v1.4.0 // indirect github.com/Philipp15b/go-steam v1.0.1-0.20200727090957-6ae9b3c0a560 github.com/Rhymen/go-whatsapp v0.1.2-0.20210615184944-2b8a3e9b8aa2 diff --git a/go.sum b/go.sum index a3f9b9ee..04ecbc45 100644 --- a/go.sum +++ b/go.sum @@ -56,6 +56,10 @@ github.com/Azure/azure-sdk-for-go v26.5.0+incompatible/go.mod h1:9XXNKU+eRnpl9mo github.com/Azure/go-autorest v11.5.2+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= github.com/Baozisoftware/qrcode-terminal-go v0.0.0-20170407111555-c0650d8dff0f h1:2dk3eOnYllh+wUOuDhOoC2vUVoJF/5z478ryJ+wzEII= github.com/Baozisoftware/qrcode-terminal-go v0.0.0-20170407111555-c0650d8dff0f/go.mod h1:4a58ifQTEe2uwwsaqbh3i2un5/CBPg+At/qHpt18Tmk= +github.com/Benau/go_rlottie v0.0.0-20210807002906-98c1b2421989 h1:+wrfJITuBoQOE6ST4k3c4EortNVQXVhfAbwt0M/j0+Y= +github.com/Benau/go_rlottie v0.0.0-20210807002906-98c1b2421989/go.mod h1:aDWSWjsayFyGTvHZH3v4ijGXEBe51xcEkAK+NUWeOeo= +github.com/Benau/tgsconverter v0.0.0-20210809170556-99f4a4f6337f h1:aUkwZDEMJIGRcWlSDifSLoKG37UCOH/DPeG52/xwois= +github.com/Benau/tgsconverter v0.0.0-20210809170556-99f4a4f6337f/go.mod h1:AQiQKKI/YIIctvDt3hI3c1S05/JXMM7v/sQcRd0paVE= github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= @@ -118,6 +122,8 @@ github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmV github.com/armon/go-metrics v0.3.4/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb4QAOwNTFc= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/aryann/difflib v0.0.0-20170710044230-e206f873d14a/go.mod h1:DAHtR1m6lCRdSC2Tm3DSWRPvIPr6xNKyeHdqDQSQT+A= +github.com/av-elier/go-decimal-to-rational v0.0.0-20191127152832-89e6aad02ecf h1:csfEAyvOG4/498Q4SyF48ysFqQC9ESj3o8ppRtg+Rog= +github.com/av-elier/go-decimal-to-rational v0.0.0-20191127152832-89e6aad02ecf/go.mod h1:POPnOeaYF7U9o3PjLTb9icRfEOxjBNLRXh9BLximJGM= github.com/avct/uasurfer v0.0.0-20191028135549-26b5daa857f1/go.mod h1:noBAuukeYOXa0aXGqxr24tADqkwDO2KRD15FsuaZ5a8= github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQwij/eHl5CU= github.com/aws/aws-sdk-go v1.19.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= @@ -501,6 +507,8 @@ github.com/kataras/golog v0.0.9/go.mod h1:12HJgwBIZFNGL0EJnMRhmvGA0PQGx8VFwrZtM4 github.com/kataras/iris/v12 v12.0.1/go.mod h1:udK4vLQKkdDqMGJJVd/msuMtN6hpYJhg/lSzuxjhO+U= github.com/kataras/neffos v0.0.10/go.mod h1:ZYmJC07hQPW67eKuzlfY7SO3bC0mw83A3j6im82hfqw= github.com/kataras/pio v0.0.0-20190103105442-ea782b38602d/go.mod h1:NV88laa9UiiDuX9AhMbDPkGYSPugBOV6yTZB1l2K9Z0= +github.com/kettek/apng v0.0.0-20191108220231-414630eed80f h1:dnCYnTSltLuPMfc7dMrkz2uBUcEf/OFBR8yRh3oRT98= +github.com/kettek/apng v0.0.0-20191108220231-414630eed80f/go.mod h1:x78/VRQYKuCftMWS0uK5e+F5RJ7S4gSlESRWI0Prl6Q= github.com/keybase/go-keybase-chat-bot v0.0.0-20200505163032-5cacf52379da h1:LK+8uBG3kNikj664cjFt88RBmuGmonxkXv2rUVfbqz4= github.com/keybase/go-keybase-chat-bot v0.0.0-20200505163032-5cacf52379da/go.mod h1:xJA+X9ZVyT/irGldcb7q1XnJBq5F9s5H9h2L44Y+poY= github.com/keybase/go-ps v0.0.0-20190827175125-91aafc93ba19/go.mod h1:hY+WOq6m2FpbvyrI93sMaypsttvaIL5nhVR92dTMUcQ= @@ -838,6 +846,8 @@ github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrf github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE= github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= +github.com/sizeofint/webpanimation v0.0.0-20210809145948-1d2b32119882 h1:A7o8tOERTtpD/poS+2VoassCjXpjHn916luXbf5QKD0= +github.com/sizeofint/webpanimation v0.0.0-20210809145948-1d2b32119882/go.mod h1:5IwJoz9Pw7JsrCN4/skkxUtSWT7myuUPLhCgv6Q5vvQ= github.com/skip2/go-qrcode v0.0.0-20200617195104-da1b6568686e h1:MRM5ITcdelLK2j1vwZ3Je0FKVCfqOLp5zO6trqMLYs0= github.com/skip2/go-qrcode v0.0.0-20200617195104-da1b6568686e/go.mod h1:XV66xRDqSt+GTGFMVlhk3ULuV0y9ZmzeVGR4mloJI3M= github.com/slack-go/slack v0.9.3 h1:H1UwldF1zWQakjaSymbHMgG3Pg1BiClez/a7JRLdxKc= diff --git a/vendor/github.com/Benau/go_rlottie/LICENSE b/vendor/github.com/Benau/go_rlottie/LICENSE new file mode 100644 index 00000000..86fd0417 --- /dev/null +++ b/vendor/github.com/Benau/go_rlottie/LICENSE @@ -0,0 +1,24 @@ +The MIT License + +Copyright (c) 2021, (see AUTHORS) + +Permission is hereby granted, free of charge, to any person +obtaining a copy of this software and associated documentation +files (the "Software"), to deal in the Software without +restriction, including without limitation the rights to use, +copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the +Software is furnished to do so, subject to the following +conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES +OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT +HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR +OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/Benau/go_rlottie/README.md b/vendor/github.com/Benau/go_rlottie/README.md new file mode 100644 index 00000000..68fbb245 --- /dev/null +++ b/vendor/github.com/Benau/go_rlottie/README.md @@ -0,0 +1 @@ +Go binding for https://github.com/Samsung/rlottie, example at https://github.com/Benau/tgsconverter diff --git a/vendor/github.com/Benau/go_rlottie/binding_c_lottieanimation_capi.cpp b/vendor/github.com/Benau/go_rlottie/binding_c_lottieanimation_capi.cpp new file mode 100644 index 00000000..9dc851e9 --- /dev/null +++ b/vendor/github.com/Benau/go_rlottie/binding_c_lottieanimation_capi.cpp @@ -0,0 +1,284 @@ +/* + * Copyright (c) 2020 Samsung Electronics Co., Ltd. All rights reserved. + + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include "rlottie.h" +#include "rlottie_capi.h" +#include "vector_vdebug.h" + +using namespace rlottie; + +extern "C" { +#include +#include + +struct Lottie_Animation_S +{ + std::unique_ptr mAnimation; + std::future mRenderTask; + uint32_t *mBufferRef; + LOTMarkerList *mMarkerList; +}; + +RLOTTIE_API Lottie_Animation_S *lottie_animation_from_file(const char *path) +{ + if (auto animation = Animation::loadFromFile(path) ) { + Lottie_Animation_S *handle = new Lottie_Animation_S(); + handle->mAnimation = std::move(animation); + return handle; + } else { + return nullptr; + } +} + +RLOTTIE_API Lottie_Animation_S *lottie_animation_from_data(const char *data, const char *key, const char *resourcePath) +{ + if (auto animation = Animation::loadFromData(data, key, resourcePath) ) { + Lottie_Animation_S *handle = new Lottie_Animation_S(); + handle->mAnimation = std::move(animation); + return handle; + } else { + return nullptr; + } +} + +RLOTTIE_API void lottie_animation_destroy(Lottie_Animation_S *animation) +{ + if (animation) { + if (animation->mMarkerList) { + for(size_t i = 0; i < animation->mMarkerList->size; i++) { + if (animation->mMarkerList->ptr[i].name) free(animation->mMarkerList->ptr[i].name); + } + delete[] animation->mMarkerList->ptr; + delete animation->mMarkerList; + } + + if (animation->mRenderTask.valid()) { + animation->mRenderTask.get(); + } + animation->mAnimation = nullptr; + delete animation; + } +} + +RLOTTIE_API void lottie_animation_get_size(const Lottie_Animation_S *animation, size_t *width, size_t *height) +{ + if (!animation) return; + + animation->mAnimation->size(*width, *height); +} + +RLOTTIE_API double lottie_animation_get_duration(const Lottie_Animation_S *animation) +{ + if (!animation) return 0; + + return animation->mAnimation->duration(); +} + +RLOTTIE_API size_t lottie_animation_get_totalframe(const Lottie_Animation_S *animation) +{ + if (!animation) return 0; + + return animation->mAnimation->totalFrame(); +} + + +RLOTTIE_API double lottie_animation_get_framerate(const Lottie_Animation_S *animation) +{ + if (!animation) return 0; + + return animation->mAnimation->frameRate(); +} + +RLOTTIE_API const LOTLayerNode * lottie_animation_render_tree(Lottie_Animation_S *animation, size_t frame_num, size_t width, size_t height) +{ + if (!animation) return nullptr; + + return animation->mAnimation->renderTree(frame_num, width, height); +} + +RLOTTIE_API size_t +lottie_animation_get_frame_at_pos(const Lottie_Animation_S *animation, float pos) +{ + if (!animation) return 0; + + return animation->mAnimation->frameAtPos(pos); +} + +RLOTTIE_API void +lottie_animation_render(Lottie_Animation_S *animation, + size_t frame_number, + uint32_t *buffer, + size_t width, + size_t height, + size_t bytes_per_line) +{ + if (!animation) return; + + rlottie::Surface surface(buffer, width, height, bytes_per_line); + animation->mAnimation->renderSync(frame_number, surface); +} + +RLOTTIE_API void +lottie_animation_render_async(Lottie_Animation_S *animation, + size_t frame_number, + uint32_t *buffer, + size_t width, + size_t height, + size_t bytes_per_line) +{ + if (!animation) return; + + rlottie::Surface surface(buffer, width, height, bytes_per_line); + animation->mRenderTask = animation->mAnimation->render(frame_number, surface); + animation->mBufferRef = buffer; +} + +RLOTTIE_API uint32_t * +lottie_animation_render_flush(Lottie_Animation_S *animation) +{ + if (!animation) return nullptr; + + if (animation->mRenderTask.valid()) { + animation->mRenderTask.get(); + } + + return animation->mBufferRef; +} + +RLOTTIE_API void +lottie_animation_property_override(Lottie_Animation_S *animation, + const Lottie_Animation_Property type, + const char *keypath, + ...) +{ + va_list prop; + va_start(prop, keypath); + const int arg_count = [type](){ + switch (type) { + case LOTTIE_ANIMATION_PROPERTY_FILLCOLOR: + case LOTTIE_ANIMATION_PROPERTY_STROKECOLOR: + return 3; + case LOTTIE_ANIMATION_PROPERTY_FILLOPACITY: + case LOTTIE_ANIMATION_PROPERTY_STROKEOPACITY: + case LOTTIE_ANIMATION_PROPERTY_STROKEWIDTH: + case LOTTIE_ANIMATION_PROPERTY_TR_ROTATION: + return 1; + case LOTTIE_ANIMATION_PROPERTY_TR_POSITION: + case LOTTIE_ANIMATION_PROPERTY_TR_SCALE: + return 2; + default: + return 0; + } + }(); + double v[3] = {0}; + for (int i = 0; i < arg_count ; i++) { + v[i] = va_arg(prop, double); + } + va_end(prop); + + switch(type) { + case LOTTIE_ANIMATION_PROPERTY_FILLCOLOR: { + double r = v[0]; + double g = v[1]; + double b = v[2]; + if (r > 1 || r < 0 || g > 1 || g < 0 || b > 1 || b < 0) break; + animation->mAnimation->setValue(keypath, rlottie::Color(r, g, b)); + break; + } + case LOTTIE_ANIMATION_PROPERTY_FILLOPACITY: { + double opacity = v[0]; + if (opacity > 100 || opacity < 0) break; + animation->mAnimation->setValue(keypath, (float)opacity); + break; + } + case LOTTIE_ANIMATION_PROPERTY_STROKECOLOR: { + double r = v[0]; + double g = v[1]; + double b = v[2]; + if (r > 1 || r < 0 || g > 1 || g < 0 || b > 1 || b < 0) break; + animation->mAnimation->setValue(keypath, rlottie::Color(r, g, b)); + break; + } + case LOTTIE_ANIMATION_PROPERTY_STROKEOPACITY: { + double opacity = v[0]; + if (opacity > 100 || opacity < 0) break; + animation->mAnimation->setValue(keypath, (float)opacity); + break; + } + case LOTTIE_ANIMATION_PROPERTY_STROKEWIDTH: { + double width = v[0]; + if (width < 0) break; + animation->mAnimation->setValue(keypath, (float)width); + break; + } + case LOTTIE_ANIMATION_PROPERTY_TR_POSITION: { + double x = v[0]; + double y = v[1]; + animation->mAnimation->setValue(keypath, rlottie::Point((float)x, (float)y)); + break; + } + case LOTTIE_ANIMATION_PROPERTY_TR_SCALE: { + double w = v[0]; + double h = v[1]; + animation->mAnimation->setValue(keypath, rlottie::Size((float)w, (float)h)); + break; + } + case LOTTIE_ANIMATION_PROPERTY_TR_ROTATION: { + double r = v[0]; + animation->mAnimation->setValue(keypath, (float)r); + break; + } + case LOTTIE_ANIMATION_PROPERTY_TR_ANCHOR: + case LOTTIE_ANIMATION_PROPERTY_TR_OPACITY: + //@TODO handle propery update. + break; + } +} + +RLOTTIE_API const LOTMarkerList* +lottie_animation_get_markerlist(Lottie_Animation_S *animation) +{ + if (!animation) return nullptr; + + auto markers = animation->mAnimation->markers(); + if (markers.size() == 0) return nullptr; + if (animation->mMarkerList) return (const LOTMarkerList*)animation->mMarkerList; + + animation->mMarkerList = new LOTMarkerList(); + animation->mMarkerList->size = markers.size(); + animation->mMarkerList->ptr = new LOTMarker[markers.size()](); + + for(size_t i = 0; i < markers.size(); i++) { + animation->mMarkerList->ptr[i].name = strdup(std::get<0>(markers[i]).c_str()); + animation->mMarkerList->ptr[i].startframe= std::get<1>(markers[i]); + animation->mMarkerList->ptr[i].endframe= std::get<2>(markers[i]); + } + return (const LOTMarkerList*)animation->mMarkerList; +} + +RLOTTIE_API void +lottie_configure_model_cache_size(size_t cacheSize) +{ + rlottie::configureModelCacheSize(cacheSize); +} + +} diff --git a/vendor/github.com/Benau/go_rlottie/config.h b/vendor/github.com/Benau/go_rlottie/config.h new file mode 100644 index 00000000..21b16ee4 --- /dev/null +++ b/vendor/github.com/Benau/go_rlottie/config.h @@ -0,0 +1,10 @@ +#ifndef GO_RLOTTIE_HPP +#define GO_RLOTTIE_HPP +#ifndef __APPLE__ +#ifdef __ARM_NEON__ +#define USE_ARM_NEON +#endif +#endif +#define LOTTIE_THREAD_SUPPORT +#define LOTTIE_CACHE_SUPPORT +#endif diff --git a/vendor/github.com/Benau/go_rlottie/generate_from_rlottie.py b/vendor/github.com/Benau/go_rlottie/generate_from_rlottie.py new file mode 100644 index 00000000..6c756b17 --- /dev/null +++ b/vendor/github.com/Benau/go_rlottie/generate_from_rlottie.py @@ -0,0 +1,122 @@ +#!/usr/bin/python3 +# ./generate_from_rlottie.py /path/to/clean/rlottie/src/ /path/to/clean/rlottie/inc/ +import glob +import os +import re +import sys + +FILE_KEYS = {} + +def get_closest_local_header(header): + for full_path, local in FILE_KEYS.items(): + if os.path.basename(full_path) == header: + return local + return '' + +def fix_headers(code_text): + out = '' + has_neon = False + for line in code_text: + # Special fixes + if line == '#include ': + line = '#include "vpoint.h"' + if line == '#include ': + line = '#include "vsharedptr.h"' + if line == '#include ': + line = '#include "vglobal.h"' + if line == '#include ': + line = '#include "vrect.h"' + # ARM on apple fixes + if '__ARM_NEON__' in line: + has_neon = True + line = line.replace('__ARM_NEON__', 'USE_ARM_NEON') + header_file = re.match('#include\s+["]([^"]+)["].*', line) + # regex to search for <, > too + #header_file = re.match('#include\s+[<"]([^>"]+)[>"].*', line) + if header_file: + header = header_file.groups()[0] + abs_header = os.path.abspath(header) + header_exists = os.path.exists(abs_header) + if header_exists and abs_header in FILE_KEYS: + out += '#include "' + FILE_KEYS[abs_header] + '"\n' + else: + local = get_closest_local_header(header) + if local != '': + out += '#include "' + local + '"\n' + else: + out += line + '\n' + else: + out += line + '\n' + if has_neon: + out = '#include "config.h"\n' + out + return out + +if len(sys.argv) < 2: + print('usage: ./generate_from_rlottie.py /path/to/clean/rlottie/src/ /path/to/clean/rlottie/inc/') + os._exit(1) + +code = ['.c', '.s', '.S', '.sx', 'cc', 'cpp', 'cpp' ] +header = ['.h', '.hh', '.hpp', '.hxx' ] + +# Remove old files +files = os.listdir('.') +for file in files: + if file.endswith(tuple(code)) or file.endswith(tuple(header)): + os.remove(os.path.join('.', file)) + +paths = [] +it = iter(sys.argv) +next(it, None) +for argv in it: + paths.append(argv) + +for path in paths: + for file in glob.iglob(path + '/**', recursive=True): + # Ignore msvc config.h and wasm file + if file.endswith('config.h') or 'wasm' in file: + continue + if file.endswith(tuple(code)) or file.endswith(tuple(header)): + key = os.path.abspath(file) + val = file.replace(path, '').replace('/', '_') + FILE_KEYS[key] = val + +header_check = [] +for full_path, local in FILE_KEYS.items(): + header_file = os.path.basename(full_path) + if header_file.endswith(tuple(code)): + continue + if not header_file in header_check: + header_check.append(header_file) + else: + print('WARNING: ' + header_file + ' has multiple reference in subdirectories') + +cur_dir = os.path.abspath('.') +for full_path, local in FILE_KEYS.items(): + os.chdir(os.path.dirname(full_path)) + with open(full_path) as code: + code_text = code.read().splitlines() + code.close() + fixed = fix_headers(code_text) + os.chdir(cur_dir) + local_file = open(local, "w") + local_file.write(fixed) + local_file.close() + +# Write config.h +config = '#ifndef GO_RLOTTIE_HPP\n#define GO_RLOTTIE_HPP\n' +# ARM on apple won't compile +config += '#ifndef __APPLE__\n#ifdef __ARM_NEON__\n#define USE_ARM_NEON\n#endif\n#endif\n' +config += '#define LOTTIE_THREAD_SUPPORT\n#define LOTTIE_CACHE_SUPPORT\n' +config += '#endif\n' +config_file = open('config.h', "w") +config_file.write(config) +config_file.close() + +# Fix vector_pixman_pixman-arm-neon-asm.S +with open('vector_pixman_pixman-arm-neon-asm.S') as code: + assembly = code.read() +code.close() +assembly = '#include "config.h"\n#ifdef USE_ARM_NEON\n' + assembly + '#endif\n' +fixed_assembly = open('vector_pixman_pixman-arm-neon-asm.S', "w") +fixed_assembly.write(assembly) +fixed_assembly.close() diff --git a/vendor/github.com/Benau/go_rlottie/go.mod b/vendor/github.com/Benau/go_rlottie/go.mod new file mode 100644 index 00000000..a2af3cf5 --- /dev/null +++ b/vendor/github.com/Benau/go_rlottie/go.mod @@ -0,0 +1 @@ +module github.com/Benau/go_rlottie diff --git a/vendor/github.com/Benau/go_rlottie/go_rlottie.go b/vendor/github.com/Benau/go_rlottie/go_rlottie.go new file mode 100644 index 00000000..c53392e3 --- /dev/null +++ b/vendor/github.com/Benau/go_rlottie/go_rlottie.go @@ -0,0 +1,56 @@ +package go_rlottie + +/* +#cgo !windows LDFLAGS: -lm +#cgo windows CFLAGS: -DRLOTTIE_BUILD=0 +#cgo windows CXXFLAGS: -DRLOTTIE_BUILD=0 +#cgo CXXFLAGS: -std=c++14 -fno-exceptions -fno-asynchronous-unwind-tables -fno-rtti -Wall -fvisibility=hidden -Wnon-virtual-dtor -Woverloaded-virtual -Wno-unused-parameter +#include "rlottie_capi.h" +void lottie_configure_model_cache_size(size_t cacheSize); +*/ +import "C" +import "unsafe" + +type Lottie_Animation *C.Lottie_Animation + +func LottieConfigureModelCacheSize(size uint) { + C.lottie_configure_model_cache_size(C.size_t(size)) +} + +func LottieAnimationFromData(data string, key string, resource_path string) Lottie_Animation { + var animation Lottie_Animation + animation = C.lottie_animation_from_data(C.CString(data), C.CString(key), C.CString(resource_path)) + return animation +} + +func LottieAnimationDestroy(animation Lottie_Animation) { + C.lottie_animation_destroy(animation) +} + +func LottieAnimationGetSize(animation Lottie_Animation) (uint, uint) { + var width C.size_t + var height C.size_t + C.lottie_animation_get_size(animation, &width, &height) + return uint(width), uint(height) +} + +func LottieAnimationGetTotalframe(animation Lottie_Animation) uint { + return uint(C.lottie_animation_get_totalframe(animation)) +} + +func LottieAnimationGetFramerate(animation Lottie_Animation) float64 { + return float64(C.lottie_animation_get_framerate(animation)) +} + +func LottieAnimationGetFrameAtPos(animation Lottie_Animation, pos float32) uint { + return uint(C.lottie_animation_get_frame_at_pos(animation, C.float(pos))) +} + +func LottieAnimationGetDuration(animation Lottie_Animation) float64 { + return float64(C.lottie_animation_get_duration(animation)) +} + +func LottieAnimationRender(animation Lottie_Animation, frame_num uint, buffer []byte, width uint, height uint, bytes_per_line uint) { + var ptr *C.uint32_t = (*C.uint32_t)(unsafe.Pointer(&buffer[0])); + C.lottie_animation_render(animation, C.size_t(frame_num), ptr, C.size_t(width), C.size_t(height), C.size_t(bytes_per_line)) +} diff --git a/vendor/github.com/Benau/go_rlottie/lottie_lottieanimation.cpp b/vendor/github.com/Benau/go_rlottie/lottie_lottieanimation.cpp new file mode 100644 index 00000000..341489b3 --- /dev/null +++ b/vendor/github.com/Benau/go_rlottie/lottie_lottieanimation.cpp @@ -0,0 +1,457 @@ +#include "config.h" +/* + * Copyright (c) 2020 Samsung Electronics Co., Ltd. All rights reserved. + + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#include "config.h" +#include "lottie_lottieitem.h" +#include "lottie_lottiemodel.h" +#include "rlottie.h" + +#include + +using namespace rlottie; +using namespace rlottie::internal; + +RLOTTIE_API void rlottie::configureModelCacheSize(size_t cacheSize) +{ + internal::model::configureModelCacheSize(cacheSize); +} + +struct RenderTask { + RenderTask() { receiver = sender.get_future(); } + std::promise sender; + std::future receiver; + AnimationImpl * playerImpl{nullptr}; + size_t frameNo{0}; + Surface surface; + bool keepAspectRatio{true}; +}; +using SharedRenderTask = std::shared_ptr; + +class AnimationImpl { +public: + void init(std::shared_ptr composition); + bool update(size_t frameNo, const VSize &size, bool keepAspectRatio); + VSize size() const { return mModel->size(); } + double duration() const { return mModel->duration(); } + double frameRate() const { return mModel->frameRate(); } + size_t totalFrame() const { return mModel->totalFrame(); } + size_t frameAtPos(double pos) const { return mModel->frameAtPos(pos); } + Surface render(size_t frameNo, const Surface &surface, + bool keepAspectRatio); + std::future renderAsync(size_t frameNo, Surface &&surface, + bool keepAspectRatio); + const LOTLayerNode * renderTree(size_t frameNo, const VSize &size); + + const LayerInfoList &layerInfoList() const + { + if (mLayerList.empty()) { + mLayerList = mModel->layerInfoList(); + } + return mLayerList; + } + const MarkerList &markers() const { return mModel->markers(); } + void setValue(const std::string &keypath, LOTVariant &&value); + void removeFilter(const std::string &keypath, Property prop); + +private: + mutable LayerInfoList mLayerList; + model::Composition * mModel; + SharedRenderTask mTask; + std::atomic mRenderInProgress; + std::unique_ptr mRenderer{nullptr}; +}; + +void AnimationImpl::setValue(const std::string &keypath, LOTVariant &&value) +{ + if (keypath.empty()) return; + mRenderer->setValue(keypath, value); +} + +const LOTLayerNode *AnimationImpl::renderTree(size_t frameNo, const VSize &size) +{ + if (update(frameNo, size, true)) { + mRenderer->buildRenderTree(); + } + return mRenderer->renderTree(); +} + +bool AnimationImpl::update(size_t frameNo, const VSize &size, + bool keepAspectRatio) +{ + frameNo += mModel->startFrame(); + + if (frameNo > mModel->endFrame()) frameNo = mModel->endFrame(); + + if (frameNo < mModel->startFrame()) frameNo = mModel->startFrame(); + + return mRenderer->update(int(frameNo), size, keepAspectRatio); +} + +Surface AnimationImpl::render(size_t frameNo, const Surface &surface, + bool keepAspectRatio) +{ + bool renderInProgress = mRenderInProgress.load(); + if (renderInProgress) { + vCritical << "Already Rendering Scheduled for this Animation"; + return surface; + } + + mRenderInProgress.store(true); + update( + frameNo, + VSize(int(surface.drawRegionWidth()), int(surface.drawRegionHeight())), + keepAspectRatio); + mRenderer->render(surface); + mRenderInProgress.store(false); + + return surface; +} + +void AnimationImpl::init(std::shared_ptr composition) +{ + mModel = composition.get(); + mRenderer = std::make_unique(composition); + mRenderInProgress = false; +} + +#ifdef LOTTIE_THREAD_SUPPORT + +#include +#include "vector_vtaskqueue.h" + +/* + * Implement a task stealing schduler to perform render task + * As each player draws into its own buffer we can delegate this + * task to a slave thread. The scheduler creates a threadpool depending + * on the number of cores available in the system and does a simple fair + * scheduling by assigning the task in a round-robin fashion. Each thread + * in the threadpool has its own queue. once it finishes all the task on its + * own queue it goes through rest of the queue and looks for task if it founds + * one it steals the task from it and executes. if it couldn't find one then it + * just waits for new task on its own queue. + */ +class RenderTaskScheduler { + const unsigned _count{std::thread::hardware_concurrency()}; + std::vector _threads; + std::vector> _q{_count}; + std::atomic _index{0}; + + void run(unsigned i) + { + while (true) { + bool success = false; + SharedRenderTask task; + for (unsigned n = 0; n != _count * 2; ++n) { + if (_q[(i + n) % _count].try_pop(task)) { + success = true; + break; + } + } + if (!success && !_q[i].pop(task)) break; + + auto result = task->playerImpl->render(task->frameNo, task->surface, + task->keepAspectRatio); + task->sender.set_value(result); + } + } + + RenderTaskScheduler() + { + for (unsigned n = 0; n != _count; ++n) { + _threads.emplace_back([&, n] { run(n); }); + } + } + +public: + static RenderTaskScheduler &instance() + { + static RenderTaskScheduler singleton; + return singleton; + } + + ~RenderTaskScheduler() + { + for (auto &e : _q) e.done(); + + for (auto &e : _threads) e.join(); + } + + std::future process(SharedRenderTask task) + { + auto receiver = std::move(task->receiver); + auto i = _index++; + + for (unsigned n = 0; n != _count; ++n) { + if (_q[(i + n) % _count].try_push(std::move(task))) return receiver; + } + + if (_count > 0) { + _q[i % _count].push(std::move(task)); + } + + return receiver; + } +}; + +#else +class RenderTaskScheduler { +public: + static RenderTaskScheduler &instance() + { + static RenderTaskScheduler singleton; + return singleton; + } + + std::future process(SharedRenderTask task) + { + auto result = task->playerImpl->render(task->frameNo, task->surface, + task->keepAspectRatio); + task->sender.set_value(result); + return std::move(task->receiver); + } +}; +#endif + +std::future AnimationImpl::renderAsync(size_t frameNo, + Surface &&surface, + bool keepAspectRatio) +{ + if (!mTask) { + mTask = std::make_shared(); + } else { + mTask->sender = std::promise(); + mTask->receiver = mTask->sender.get_future(); + } + mTask->playerImpl = this; + mTask->frameNo = frameNo; + mTask->surface = std::move(surface); + mTask->keepAspectRatio = keepAspectRatio; + + return RenderTaskScheduler::instance().process(mTask); +} + +/** + * \breif Brief abput the Api. + * Description about the setFilePath Api + * @param path add the details + */ +std::unique_ptr Animation::loadFromData( + std::string jsonData, const std::string &key, + const std::string &resourcePath, bool cachePolicy) +{ + if (jsonData.empty()) { + vWarning << "jason data is empty"; + return nullptr; + } + + auto composition = model::loadFromData(std::move(jsonData), key, + resourcePath, cachePolicy); + if (composition) { + auto animation = std::unique_ptr(new Animation); + animation->d->init(std::move(composition)); + return animation; + } + + return nullptr; +} + +std::unique_ptr Animation::loadFromData(std::string jsonData, + std::string resourcePath, + ColorFilter filter) +{ + if (jsonData.empty()) { + vWarning << "jason data is empty"; + return nullptr; + } + + auto composition = model::loadFromData( + std::move(jsonData), std::move(resourcePath), std::move(filter)); + if (composition) { + auto animation = std::unique_ptr(new Animation); + animation->d->init(std::move(composition)); + return animation; + } + return nullptr; +} + +std::unique_ptr Animation::loadFromFile(const std::string &path, + bool cachePolicy) +{ + if (path.empty()) { + vWarning << "File path is empty"; + return nullptr; + } + + auto composition = model::loadFromFile(path, cachePolicy); + if (composition) { + auto animation = std::unique_ptr(new Animation); + animation->d->init(std::move(composition)); + return animation; + } + return nullptr; +} + +void Animation::size(size_t &width, size_t &height) const +{ + VSize sz = d->size(); + + width = sz.width(); + height = sz.height(); +} + +double Animation::duration() const +{ + return d->duration(); +} + +double Animation::frameRate() const +{ + return d->frameRate(); +} + +size_t Animation::totalFrame() const +{ + return d->totalFrame(); +} + +size_t Animation::frameAtPos(double pos) +{ + return d->frameAtPos(pos); +} + +const LOTLayerNode *Animation::renderTree(size_t frameNo, size_t width, + size_t height) const +{ + return d->renderTree(frameNo, VSize(int(width), int(height))); +} + +std::future Animation::render(size_t frameNo, Surface surface, + bool keepAspectRatio) +{ + return d->renderAsync(frameNo, std::move(surface), keepAspectRatio); +} + +void Animation::renderSync(size_t frameNo, Surface surface, + bool keepAspectRatio) +{ + d->render(frameNo, surface, keepAspectRatio); +} + +const LayerInfoList &Animation::layers() const +{ + return d->layerInfoList(); +} + +const MarkerList &Animation::markers() const +{ + return d->markers(); +} + +void Animation::setValue(Color_Type, Property prop, const std::string &keypath, + Color value) +{ + d->setValue(keypath, + LOTVariant(prop, [value](const FrameInfo &) { return value; })); +} + +void Animation::setValue(Float_Type, Property prop, const std::string &keypath, + float value) +{ + d->setValue(keypath, + LOTVariant(prop, [value](const FrameInfo &) { return value; })); +} + +void Animation::setValue(Size_Type, Property prop, const std::string &keypath, + Size value) +{ + d->setValue(keypath, + LOTVariant(prop, [value](const FrameInfo &) { return value; })); +} + +void Animation::setValue(Point_Type, Property prop, const std::string &keypath, + Point value) +{ + d->setValue(keypath, + LOTVariant(prop, [value](const FrameInfo &) { return value; })); +} + +void Animation::setValue(Color_Type, Property prop, const std::string &keypath, + std::function &&value) +{ + d->setValue(keypath, LOTVariant(prop, value)); +} + +void Animation::setValue(Float_Type, Property prop, const std::string &keypath, + std::function &&value) +{ + d->setValue(keypath, LOTVariant(prop, value)); +} + +void Animation::setValue(Size_Type, Property prop, const std::string &keypath, + std::function &&value) +{ + d->setValue(keypath, LOTVariant(prop, value)); +} + +void Animation::setValue(Point_Type, Property prop, const std::string &keypath, + std::function &&value) +{ + d->setValue(keypath, LOTVariant(prop, value)); +} + +Animation::~Animation() = default; +Animation::Animation() : d(std::make_unique()) {} + +Surface::Surface(uint32_t *buffer, size_t width, size_t height, + size_t bytesPerLine) + : mBuffer(buffer), + mWidth(width), + mHeight(height), + mBytesPerLine(bytesPerLine) +{ + mDrawArea.w = mWidth; + mDrawArea.h = mHeight; +} + +void Surface::setDrawRegion(size_t x, size_t y, size_t width, size_t height) +{ + if ((x + width > mWidth) || (y + height > mHeight)) return; + + mDrawArea.x = x; + mDrawArea.y = y; + mDrawArea.w = width; + mDrawArea.h = height; +} + +#ifdef LOTTIE_LOGGING_SUPPORT +void initLogging() +{ +#if defined(USE_ARM_NEON) + set_log_level(LogLevel::OFF); +#else + initialize(GuaranteedLogger(), "/tmp/", "rlottie", 1); + set_log_level(LogLevel::INFO); +#endif +} + +V_CONSTRUCTOR_FUNCTION(initLogging) +#endif diff --git a/vendor/github.com/Benau/go_rlottie/lottie_lottiefiltermodel.h b/vendor/github.com/Benau/go_rlottie/lottie_lottiefiltermodel.h new file mode 100644 index 00000000..130de590 --- /dev/null +++ b/vendor/github.com/Benau/go_rlottie/lottie_lottiefiltermodel.h @@ -0,0 +1,435 @@ +/* + * Copyright (c) 2020 Samsung Electronics Co., Ltd. All rights reserved. + + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef LOTTIEFILTERMODEL_H +#define LOTTIEFILTERMODEL_H + +#include +#include +#include +#include "lottie_lottiemodel.h" +#include "rlottie.h" + +using namespace rlottie::internal; +// Naive way to implement std::variant +// refactor it when we move to c++17 +// users should make sure proper combination +// of id and value are passed while creating the object. +class LOTVariant { +public: + using ValueFunc = std::function; + using ColorFunc = std::function; + using PointFunc = std::function; + using SizeFunc = std::function; + + LOTVariant(rlottie::Property prop, const ValueFunc& v) + : mPropery(prop), mTag(Value) + { + construct(impl.valueFunc, v); + } + + LOTVariant(rlottie::Property prop, ValueFunc&& v) + : mPropery(prop), mTag(Value) + { + moveConstruct(impl.valueFunc, std::move(v)); + } + + LOTVariant(rlottie::Property prop, const ColorFunc& v) + : mPropery(prop), mTag(Color) + { + construct(impl.colorFunc, v); + } + + LOTVariant(rlottie::Property prop, ColorFunc&& v) + : mPropery(prop), mTag(Color) + { + moveConstruct(impl.colorFunc, std::move(v)); + } + + LOTVariant(rlottie::Property prop, const PointFunc& v) + : mPropery(prop), mTag(Point) + { + construct(impl.pointFunc, v); + } + + LOTVariant(rlottie::Property prop, PointFunc&& v) + : mPropery(prop), mTag(Point) + { + moveConstruct(impl.pointFunc, std::move(v)); + } + + LOTVariant(rlottie::Property prop, const SizeFunc& v) + : mPropery(prop), mTag(Size) + { + construct(impl.sizeFunc, v); + } + + LOTVariant(rlottie::Property prop, SizeFunc&& v) + : mPropery(prop), mTag(Size) + { + moveConstruct(impl.sizeFunc, std::move(v)); + } + + rlottie::Property property() const { return mPropery; } + + const ColorFunc& color() const + { + assert(mTag == Color); + return impl.colorFunc; + } + + const ValueFunc& value() const + { + assert(mTag == Value); + return impl.valueFunc; + } + + const PointFunc& point() const + { + assert(mTag == Point); + return impl.pointFunc; + } + + const SizeFunc& size() const + { + assert(mTag == Size); + return impl.sizeFunc; + } + + LOTVariant() = default; + ~LOTVariant() noexcept { Destroy(); } + LOTVariant(const LOTVariant& other) { Copy(other); } + LOTVariant(LOTVariant&& other) noexcept { Move(std::move(other)); } + LOTVariant& operator=(LOTVariant&& other) + { + Destroy(); + Move(std::move(other)); + return *this; + } + LOTVariant& operator=(const LOTVariant& other) + { + Destroy(); + Copy(other); + return *this; + } + +private: + template + void construct(T& member, const T& val) + { + new (&member) T(val); + } + + template + void moveConstruct(T& member, T&& val) + { + new (&member) T(std::move(val)); + } + + void Move(LOTVariant&& other) + { + switch (other.mTag) { + case Type::Value: + moveConstruct(impl.valueFunc, std::move(other.impl.valueFunc)); + break; + case Type::Color: + moveConstruct(impl.colorFunc, std::move(other.impl.colorFunc)); + break; + case Type::Point: + moveConstruct(impl.pointFunc, std::move(other.impl.pointFunc)); + break; + case Type::Size: + moveConstruct(impl.sizeFunc, std::move(other.impl.sizeFunc)); + break; + default: + break; + } + mTag = other.mTag; + mPropery = other.mPropery; + other.mTag = MonoState; + } + + void Copy(const LOTVariant& other) + { + switch (other.mTag) { + case Type::Value: + construct(impl.valueFunc, other.impl.valueFunc); + break; + case Type::Color: + construct(impl.colorFunc, other.impl.colorFunc); + break; + case Type::Point: + construct(impl.pointFunc, other.impl.pointFunc); + break; + case Type::Size: + construct(impl.sizeFunc, other.impl.sizeFunc); + break; + default: + break; + } + mTag = other.mTag; + mPropery = other.mPropery; + } + + void Destroy() + { + switch (mTag) { + case MonoState: { + break; + } + case Value: { + impl.valueFunc.~ValueFunc(); + break; + } + case Color: { + impl.colorFunc.~ColorFunc(); + break; + } + case Point: { + impl.pointFunc.~PointFunc(); + break; + } + case Size: { + impl.sizeFunc.~SizeFunc(); + break; + } + } + } + + enum Type { MonoState, Value, Color, Point, Size }; + rlottie::Property mPropery; + Type mTag{MonoState}; + union details { + ColorFunc colorFunc; + ValueFunc valueFunc; + PointFunc pointFunc; + SizeFunc sizeFunc; + details() {} + ~details() noexcept {} + } impl; +}; + +namespace rlottie { + +namespace internal { + +namespace model { + +class FilterData { +public: + void addValue(LOTVariant& value) + { + uint index = static_cast(value.property()); + if (mBitset.test(index)) { + std::replace_if(mFilters.begin(), mFilters.end(), + [&value](const LOTVariant& e) { + return e.property() == value.property(); + }, + value); + } else { + mBitset.set(index); + mFilters.push_back(value); + } + } + + void removeValue(LOTVariant& value) + { + uint index = static_cast(value.property()); + if (mBitset.test(index)) { + mBitset.reset(index); + mFilters.erase(std::remove_if(mFilters.begin(), mFilters.end(), + [&value](const LOTVariant& e) { + return e.property() == + value.property(); + }), + mFilters.end()); + } + } + bool hasFilter(rlottie::Property prop) const + { + return mBitset.test(static_cast(prop)); + } + model::Color color(rlottie::Property prop, int frame) const + { + rlottie::FrameInfo info(frame); + rlottie::Color col = data(prop).color()(info); + return model::Color(col.r(), col.g(), col.b()); + } + VPointF point(rlottie::Property prop, int frame) const + { + rlottie::FrameInfo info(frame); + rlottie::Point pt = data(prop).point()(info); + return VPointF(pt.x(), pt.y()); + } + VSize scale(rlottie::Property prop, int frame) const + { + rlottie::FrameInfo info(frame); + rlottie::Size sz = data(prop).size()(info); + return VSize(sz.w(), sz.h()); + } + float opacity(rlottie::Property prop, int frame) const + { + rlottie::FrameInfo info(frame); + float val = data(prop).value()(info); + return val / 100; + } + float value(rlottie::Property prop, int frame) const + { + rlottie::FrameInfo info(frame); + return data(prop).value()(info); + } + +private: + const LOTVariant& data(rlottie::Property prop) const + { + auto result = std::find_if( + mFilters.begin(), mFilters.end(), + [prop](const LOTVariant& e) { return e.property() == prop; }); + return *result; + } + std::bitset<32> mBitset{0}; + std::vector mFilters; +}; + +template +struct FilterBase +{ + FilterBase(T *model): model_(model){} + + const char* name() const { return model_->name(); } + + FilterData* filter() { + if (!filterData_) filterData_ = std::make_unique(); + return filterData_.get(); + } + + const FilterData * filter() const { return filterData_.get(); } + const T* model() const { return model_;} + + bool hasFilter(rlottie::Property prop) const { + return filterData_ ? filterData_->hasFilter(prop) + : false; + } + + T* model_{nullptr}; + std::unique_ptr filterData_{nullptr}; +}; + + +template +class Filter : public FilterBase { +public: + Filter(T* model): FilterBase(model){} + model::Color color(int frame) const + { + if (this->hasFilter(rlottie::Property::StrokeColor)) { + return this->filter()->color(rlottie::Property::StrokeColor, frame); + } + return this->model()->color(frame); + } + float opacity(int frame) const + { + if (this->hasFilter(rlottie::Property::StrokeOpacity)) { + return this->filter()->opacity(rlottie::Property::StrokeOpacity, frame); + } + return this->model()->opacity(frame); + } + + float strokeWidth(int frame) const + { + if (this->hasFilter(rlottie::Property::StrokeWidth)) { + return this->filter()->value(rlottie::Property::StrokeWidth, frame); + } + return this->model()->strokeWidth(frame); + } + + float miterLimit() const { return this->model()->miterLimit(); } + CapStyle capStyle() const { return this->model()->capStyle(); } + JoinStyle joinStyle() const { return this->model()->joinStyle(); } + bool hasDashInfo() const { return this->model()->hasDashInfo(); } + void getDashInfo(int frameNo, std::vector& result) const + { + return this->model()->getDashInfo(frameNo, result); + } +}; + + +template <> +class Filter: public FilterBase +{ +public: + Filter(model::Fill* model) : FilterBase(model) {} + + model::Color color(int frame) const + { + if (this->hasFilter(rlottie::Property::FillColor)) { + return this->filter()->color(rlottie::Property::FillColor, frame); + } + return this->model()->color(frame); + } + + float opacity(int frame) const + { + if (this->hasFilter(rlottie::Property::FillOpacity)) { + return this->filter()->opacity(rlottie::Property::FillOpacity, frame); + } + return this->model()->opacity(frame); + } + + FillRule fillRule() const { return this->model()->fillRule(); } +}; + +template <> +class Filter : public FilterBase +{ +public: + Filter(model::Group* model = nullptr) : FilterBase(model) {} + + bool hasModel() const { return this->model() ? true : false; } + + model::Transform* transform() const { return this->model() ? this->model()->mTransform : nullptr; } + VMatrix matrix(int frame) const + { + VMatrix mS, mR, mT; + if (this->hasFilter(rlottie::Property::TrScale)) { + VSize s = this->filter()->scale(rlottie::Property::TrScale, frame); + mS.scale(s.width() / 100.0, s.height() / 100.0); + } + if (this->hasFilter(rlottie::Property::TrRotation)) { + mR.rotate(this->filter()->value(rlottie::Property::TrRotation, frame)); + } + if (this->hasFilter(rlottie::Property::TrPosition)) { + mT.translate(this->filter()->point(rlottie::Property::TrPosition, frame)); + } + + return this->model()->mTransform->matrix(frame) * mS * mR * mT; + } +}; + + +} // namespace model + +} // namespace internal + +} // namespace rlottie + +#endif // LOTTIEFILTERMODEL_H diff --git a/vendor/github.com/Benau/go_rlottie/lottie_lottieitem.cpp b/vendor/github.com/Benau/go_rlottie/lottie_lottieitem.cpp new file mode 100644 index 00000000..99f80044 --- /dev/null +++ b/vendor/github.com/Benau/go_rlottie/lottie_lottieitem.cpp @@ -0,0 +1,1491 @@ +/* + * Copyright (c) 2020 Samsung Electronics Co., Ltd. All rights reserved. + + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include "lottie_lottieitem.h" +#include +#include +#include +#include "lottie_lottiekeypath.h" +#include "vector_vbitmap.h" +#include "vector_vpainter.h" +#include "vector_vraster.h" + +/* Lottie Layer Rules + * 1. time stretch is pre calculated and applied to all the properties of the + * lottilayer model and all its children + * 2. The frame property could be reversed using,time-reverse layer property in + * AE. which means (start frame > endFrame) 3. + */ + +static bool transformProp(rlottie::Property prop) +{ + switch (prop) { + case rlottie::Property::TrAnchor: + case rlottie::Property::TrScale: + case rlottie::Property::TrOpacity: + case rlottie::Property::TrPosition: + case rlottie::Property::TrRotation: + return true; + default: + return false; + } +} +static bool fillProp(rlottie::Property prop) +{ + switch (prop) { + case rlottie::Property::FillColor: + case rlottie::Property::FillOpacity: + return true; + default: + return false; + } +} + +static bool strokeProp(rlottie::Property prop) +{ + switch (prop) { + case rlottie::Property::StrokeColor: + case rlottie::Property::StrokeOpacity: + case rlottie::Property::StrokeWidth: + return true; + default: + return false; + } +} + +static renderer::Layer *createLayerItem(model::Layer *layerData, + VArenaAlloc * allocator) +{ + switch (layerData->mLayerType) { + case model::Layer::Type::Precomp: { + return allocator->make(layerData, allocator); + } + case model::Layer::Type::Solid: { + return allocator->make(layerData); + } + case model::Layer::Type::Shape: { + return allocator->make(layerData, allocator); + } + case model::Layer::Type::Null: { + return allocator->make(layerData); + } + case model::Layer::Type::Image: { + return allocator->make(layerData); + } + default: + return nullptr; + break; + } +} + +renderer::Composition::Composition(std::shared_ptr model) + : mCurFrameNo(-1) +{ + mModel = std::move(model); + mRootLayer = createLayerItem(mModel->mRootLayer, &mAllocator); + mRootLayer->setComplexContent(false); + mViewSize = mModel->size(); +} + +void renderer::Composition::setValue(const std::string &keypath, + LOTVariant & value) +{ + LOTKeyPath key(keypath); + mRootLayer->resolveKeyPath(key, 0, value); +} + +bool renderer::Composition::update(int frameNo, const VSize &size, + bool keepAspectRatio) +{ + // check if cached frame is same as requested frame. + if ((mViewSize == size) && (mCurFrameNo == frameNo) && + (mKeepAspectRatio == keepAspectRatio)) + return false; + + mViewSize = size; + mCurFrameNo = frameNo; + mKeepAspectRatio = keepAspectRatio; + + /* + * if viewbox dosen't scale exactly to the viewport + * we scale the viewbox keeping AspectRatioPreserved and then align the + * viewbox to the viewport using AlignCenter rule. + */ + VMatrix m; + VSize viewPort = mViewSize; + VSize viewBox = mModel->size(); + float sx = float(viewPort.width()) / viewBox.width(); + float sy = float(viewPort.height()) / viewBox.height(); + if (mKeepAspectRatio) { + float scale = std::min(sx, sy); + float tx = (viewPort.width() - viewBox.width() * scale) * 0.5f; + float ty = (viewPort.height() - viewBox.height() * scale) * 0.5f; + m.translate(tx, ty).scale(scale, scale); + } else { + m.scale(sx, sy); + } + mRootLayer->update(frameNo, m, 1.0); + return true; +} + +bool renderer::Composition::render(const rlottie::Surface &surface) +{ + mSurface.reset(reinterpret_cast(surface.buffer()), + uint(surface.width()), uint(surface.height()), + uint(surface.bytesPerLine()), + VBitmap::Format::ARGB32_Premultiplied); + + /* schedule all preprocess task for this frame at once. + */ + VRect clip(0, 0, int(surface.drawRegionWidth()), + int(surface.drawRegionHeight())); + mRootLayer->preprocess(clip); + + VPainter painter(&mSurface); + // set sub surface area for drawing. + painter.setDrawRegion( + VRect(int(surface.drawRegionPosX()), int(surface.drawRegionPosY()), + int(surface.drawRegionWidth()), int(surface.drawRegionHeight()))); + mRootLayer->render(&painter, {}, {}, mSurfaceCache); + painter.end(); + return true; +} + +void renderer::Mask::update(int frameNo, const VMatrix &parentMatrix, + float /*parentAlpha*/, const DirtyFlag &flag) +{ + bool dirtyPath = false; + + if (flag.testFlag(DirtyFlagBit::None) && mData->isStatic()) return; + + if (mData->mShape.isStatic()) { + if (mLocalPath.empty()) { + dirtyPath = true; + mData->mShape.value(frameNo, mLocalPath); + } + } else { + dirtyPath = true; + mData->mShape.value(frameNo, mLocalPath); + } + /* mask item dosen't inherit opacity */ + mCombinedAlpha = mData->opacity(frameNo); + + if ( flag.testFlag(DirtyFlagBit::Matrix) || dirtyPath ) { + mFinalPath.clone(mLocalPath); + mFinalPath.transform(parentMatrix); + mRasterRequest = true; + } +} + +VRle renderer::Mask::rle() +{ + if (!vCompare(mCombinedAlpha, 1.0f)) { + VRle obj = mRasterizer.rle(); + obj *= uchar(mCombinedAlpha * 255); + return obj; + } else { + return mRasterizer.rle(); + } +} + +void renderer::Mask::preprocess(const VRect &clip) +{ + if (mRasterRequest) + mRasterizer.rasterize(mFinalPath, FillRule::Winding, clip); +} + +void renderer::Layer::render(VPainter *painter, const VRle &inheritMask, + const VRle &matteRle, SurfaceCache &) +{ + auto renderlist = renderList(); + + if (renderlist.empty()) return; + + VRle mask; + if (mLayerMask) { + mask = mLayerMask->maskRle(painter->clipBoundingRect()); + if (!inheritMask.empty()) mask = mask & inheritMask; + // if resulting mask is empty then return. + if (mask.empty()) return; + } else { + mask = inheritMask; + } + + for (auto &i : renderlist) { + painter->setBrush(i->mBrush); + VRle rle = i->rle(); + if (matteRle.empty()) { + if (mask.empty()) { + // no mask no matte + painter->drawRle(VPoint(), rle); + } else { + // only mask + painter->drawRle(rle, mask); + } + + } else { + if (!mask.empty()) rle = rle & mask; + + if (rle.empty()) continue; + if (matteType() == model::MatteType::AlphaInv) { + rle = rle - matteRle; + painter->drawRle(VPoint(), rle); + } else { + // render with matteRle as clip. + painter->drawRle(rle, matteRle); + } + } + } +} + +void renderer::LayerMask::preprocess(const VRect &clip) +{ + for (auto &i : mMasks) { + i.preprocess(clip); + } +} + +renderer::LayerMask::LayerMask(model::Layer *layerData) +{ + if (!layerData->mExtra) return; + + mMasks.reserve(layerData->mExtra->mMasks.size()); + + for (auto &i : layerData->mExtra->mMasks) { + mMasks.emplace_back(i); + mStatic &= i->isStatic(); + } +} + +void renderer::LayerMask::update(int frameNo, const VMatrix &parentMatrix, + float parentAlpha, const DirtyFlag &flag) +{ + if (flag.testFlag(DirtyFlagBit::None) && isStatic()) return; + + for (auto &i : mMasks) { + i.update(frameNo, parentMatrix, parentAlpha, flag); + } + mDirty = true; +} + +VRle renderer::LayerMask::maskRle(const VRect &clipRect) +{ + if (!mDirty) return mRle; + + VRle rle; + for (auto &e : mMasks) { + const auto cur = [&]() { + if (e.inverted()) + return clipRect - e.rle(); + else + return e.rle(); + }(); + + switch (e.maskMode()) { + case model::Mask::Mode::Add: { + rle = rle + cur; + break; + } + case model::Mask::Mode::Substarct: { + if (rle.empty() && !clipRect.empty()) + rle = clipRect - cur; + else + rle = rle - cur; + break; + } + case model::Mask::Mode::Intersect: { + if (rle.empty() && !clipRect.empty()) + rle = clipRect & cur; + else + rle = rle & cur; + break; + } + case model::Mask::Mode::Difference: { + rle = rle ^ cur; + break; + } + default: + break; + } + } + + if (!rle.empty() && !rle.unique()) { + mRle.clone(rle); + } else { + mRle = rle; + } + mDirty = false; + return mRle; +} + +renderer::Layer::Layer(model::Layer *layerData) : mLayerData(layerData) +{ + if (mLayerData->mHasMask) + mLayerMask = std::make_unique(mLayerData); +} + +bool renderer::Layer::resolveKeyPath(LOTKeyPath &keyPath, uint depth, + LOTVariant &value) +{ + if (!keyPath.matches(name(), depth)) { + return false; + } + + if (!keyPath.skip(name())) { + if (keyPath.fullyResolvesTo(name(), depth) && + transformProp(value.property())) { + //@TODO handle propery update. + } + } + return true; +} + +bool renderer::ShapeLayer::resolveKeyPath(LOTKeyPath &keyPath, uint depth, + LOTVariant &value) +{ + if (renderer::Layer::resolveKeyPath(keyPath, depth, value)) { + if (keyPath.propagate(name(), depth)) { + uint newDepth = keyPath.nextDepth(name(), depth); + mRoot->resolveKeyPath(keyPath, newDepth, value); + } + return true; + } + return false; +} + +bool renderer::CompLayer::resolveKeyPath(LOTKeyPath &keyPath, uint depth, + LOTVariant &value) +{ + if (renderer::Layer::resolveKeyPath(keyPath, depth, value)) { + if (keyPath.propagate(name(), depth)) { + uint newDepth = keyPath.nextDepth(name(), depth); + for (const auto &layer : mLayers) { + layer->resolveKeyPath(keyPath, newDepth, value); + } + } + return true; + } + return false; +} + +void renderer::Layer::update(int frameNumber, const VMatrix &parentMatrix, + float parentAlpha) +{ + mFrameNo = frameNumber; + // 1. check if the layer is part of the current frame + if (!visible()) return; + + float alpha = parentAlpha * opacity(frameNo()); + if (vIsZero(alpha)) { + mCombinedAlpha = 0; + return; + } + + // 2. calculate the parent matrix and alpha + VMatrix m = matrix(frameNo()); + m *= parentMatrix; + + // 3. update the dirty flag based on the change + if (mCombinedMatrix != m) { + mDirtyFlag |= DirtyFlagBit::Matrix; + mCombinedMatrix = m; + } + + if (!vCompare(mCombinedAlpha, alpha)) { + mDirtyFlag |= DirtyFlagBit::Alpha; + mCombinedAlpha = alpha; + } + + // 4. update the mask + if (mLayerMask) { + mLayerMask->update(frameNo(), mCombinedMatrix, mCombinedAlpha, + mDirtyFlag); + } + + // 5. if no parent property change and layer is static then nothing to do. + if (!mLayerData->precompLayer() && flag().testFlag(DirtyFlagBit::None) && + isStatic()) + return; + + // 6. update the content of the layer + updateContent(); + + // 7. reset the dirty flag + mDirtyFlag = DirtyFlagBit::None; +} + +VMatrix renderer::Layer::matrix(int frameNo) const +{ + return mParentLayer + ? (mLayerData->matrix(frameNo) * mParentLayer->matrix(frameNo)) + : mLayerData->matrix(frameNo); +} + +bool renderer::Layer::visible() const +{ + return (frameNo() >= mLayerData->inFrame() && + frameNo() < mLayerData->outFrame()); +} + +void renderer::Layer::preprocess(const VRect &clip) +{ + // layer dosen't contribute to the frame + if (skipRendering()) return; + + // preprocess layer masks + if (mLayerMask) mLayerMask->preprocess(clip); + + preprocessStage(clip); +} + +renderer::CompLayer::CompLayer(model::Layer *layerModel, VArenaAlloc *allocator) + : renderer::Layer(layerModel) +{ + if (!mLayerData->mChildren.empty()) + mLayers.reserve(mLayerData->mChildren.size()); + + // 1. keep the layer in back-to-front order. + // as lottie model keeps the data in front-toback-order. + for (auto it = mLayerData->mChildren.crbegin(); + it != mLayerData->mChildren.rend(); ++it) { + auto model = static_cast(*it); + auto item = createLayerItem(model, allocator); + if (item) mLayers.push_back(item); + } + + // 2. update parent layer + for (const auto &layer : mLayers) { + int id = layer->parentId(); + if (id >= 0) { + auto search = + std::find_if(mLayers.begin(), mLayers.end(), + [id](const auto &val) { return val->id() == id; }); + if (search != mLayers.end()) layer->setParentLayer(*search); + } + } + + // 4. check if its a nested composition + if (!layerModel->layerSize().empty()) { + mClipper = std::make_unique(layerModel->layerSize()); + } + + if (mLayers.size() > 1) setComplexContent(true); +} + +void renderer::CompLayer::render(VPainter *painter, const VRle &inheritMask, + const VRle &matteRle, SurfaceCache &cache) +{ + if (vIsZero(combinedAlpha())) return; + + if (vCompare(combinedAlpha(), 1.0)) { + renderHelper(painter, inheritMask, matteRle, cache); + } else { + if (complexContent()) { + VSize size = painter->clipBoundingRect().size(); + VPainter srcPainter; + VBitmap srcBitmap = cache.make_surface(size.width(), size.height()); + srcPainter.begin(&srcBitmap); + renderHelper(&srcPainter, inheritMask, matteRle, cache); + srcPainter.end(); + painter->drawBitmap(VPoint(), srcBitmap, + uchar(combinedAlpha() * 255.0f)); + cache.release_surface(srcBitmap); + } else { + renderHelper(painter, inheritMask, matteRle, cache); + } + } +} + +void renderer::CompLayer::renderHelper(VPainter * painter, + const VRle & inheritMask, + const VRle & matteRle, + SurfaceCache &cache) +{ + VRle mask; + if (mLayerMask) { + mask = mLayerMask->maskRle(painter->clipBoundingRect()); + if (!inheritMask.empty()) mask = mask & inheritMask; + // if resulting mask is empty then return. + if (mask.empty()) return; + } else { + mask = inheritMask; + } + + if (mClipper) { + mask = mClipper->rle(mask); + if (mask.empty()) return; + } + + renderer::Layer *matte = nullptr; + for (const auto &layer : mLayers) { + if (layer->hasMatte()) { + matte = layer; + } else { + if (layer->visible()) { + if (matte) { + if (matte->visible()) + renderMatteLayer(painter, mask, matteRle, matte, layer, + cache); + } else { + layer->render(painter, mask, matteRle, cache); + } + } + matte = nullptr; + } + } +} + +void renderer::CompLayer::renderMatteLayer(VPainter *painter, const VRle &mask, + const VRle & matteRle, + renderer::Layer *layer, + renderer::Layer *src, + SurfaceCache & cache) +{ + VSize size = painter->clipBoundingRect().size(); + // Decide if we can use fast matte. + // 1. draw src layer to matte buffer + VPainter srcPainter; + VBitmap srcBitmap = cache.make_surface(size.width(), size.height()); + srcPainter.begin(&srcBitmap); + src->render(&srcPainter, mask, matteRle, cache); + srcPainter.end(); + + // 2. draw layer to layer buffer + VPainter layerPainter; + VBitmap layerBitmap = cache.make_surface(size.width(), size.height()); + layerPainter.begin(&layerBitmap); + layer->render(&layerPainter, mask, matteRle, cache); + + // 2.1update composition mode + switch (layer->matteType()) { + case model::MatteType::Alpha: + case model::MatteType::Luma: { + layerPainter.setBlendMode(BlendMode::DestIn); + break; + } + case model::MatteType::AlphaInv: + case model::MatteType::LumaInv: { + layerPainter.setBlendMode(BlendMode::DestOut); + break; + } + default: + break; + } + + // 2.2 update srcBuffer if the matte is luma type + if (layer->matteType() == model::MatteType::Luma || + layer->matteType() == model::MatteType::LumaInv) { + srcBitmap.updateLuma(); + } + + auto clip = layerPainter.clipBoundingRect(); + + // if the layer has only one renderer then use it as the clip rect + // when blending 2 buffer and copy back to final buffer to avoid + // unnecessary pixel processing. + if (layer->renderList().size() == 1) + { + clip = layer->renderList()[0]->rle().boundingRect(); + } + + // 2.3 draw src buffer as mask + layerPainter.drawBitmap(clip, srcBitmap, clip); + layerPainter.end(); + // 3. draw the result buffer into painter + painter->drawBitmap(clip, layerBitmap, clip); + + cache.release_surface(srcBitmap); + cache.release_surface(layerBitmap); +} + +void renderer::Clipper::update(const VMatrix &matrix) +{ + mPath.reset(); + mPath.addRect(VRectF(0, 0, mSize.width(), mSize.height())); + mPath.transform(matrix); + mRasterRequest = true; +} + +void renderer::Clipper::preprocess(const VRect &clip) +{ + if (mRasterRequest) mRasterizer.rasterize(mPath, FillRule::Winding, clip); + + mRasterRequest = false; +} + +VRle renderer::Clipper::rle(const VRle &mask) +{ + if (mask.empty()) return mRasterizer.rle(); + + mMaskedRle.clone(mask); + mMaskedRle &= mRasterizer.rle(); + return mMaskedRle; +} + +void renderer::CompLayer::updateContent() +{ + if (mClipper && flag().testFlag(DirtyFlagBit::Matrix)) { + mClipper->update(combinedMatrix()); + } + int mappedFrame = mLayerData->timeRemap(frameNo()); + float alpha = combinedAlpha(); + if (complexContent()) alpha = 1; + for (const auto &layer : mLayers) { + layer->update(mappedFrame, combinedMatrix(), alpha); + } +} + +void renderer::CompLayer::preprocessStage(const VRect &clip) +{ + // if layer has clipper + if (mClipper) mClipper->preprocess(clip); + + renderer::Layer *matte = nullptr; + for (const auto &layer : mLayers) { + if (layer->hasMatte()) { + matte = layer; + } else { + if (layer->visible()) { + if (matte) { + if (matte->visible()) { + layer->preprocess(clip); + matte->preprocess(clip); + } + } else { + layer->preprocess(clip); + } + } + matte = nullptr; + } + } +} + +renderer::SolidLayer::SolidLayer(model::Layer *layerData) + : renderer::Layer(layerData) +{ + mDrawableList = &mRenderNode; +} + +void renderer::SolidLayer::updateContent() +{ + if (flag() & DirtyFlagBit::Matrix) { + mPath.reset(); + mPath.addRect(VRectF(0, 0, mLayerData->layerSize().width(), + mLayerData->layerSize().height())); + mPath.transform(combinedMatrix()); + mRenderNode.mFlag |= VDrawable::DirtyState::Path; + mRenderNode.mPath = mPath; + } + if (flag() & DirtyFlagBit::Alpha) { + model::Color color = mLayerData->solidColor(); + VBrush brush(color.toColor(combinedAlpha())); + mRenderNode.setBrush(brush); + mRenderNode.mFlag |= VDrawable::DirtyState::Brush; + } +} + +void renderer::SolidLayer::preprocessStage(const VRect &clip) +{ + mRenderNode.preprocess(clip); +} + +renderer::DrawableList renderer::SolidLayer::renderList() +{ + if (skipRendering()) return {}; + + return {&mDrawableList, 1}; +} + +renderer::ImageLayer::ImageLayer(model::Layer *layerData) + : renderer::Layer(layerData) +{ + mDrawableList = &mRenderNode; + + if (!mLayerData->asset()) return; + + mTexture.mBitmap = mLayerData->asset()->bitmap(); + VBrush brush(&mTexture); + mRenderNode.setBrush(brush); +} + +void renderer::ImageLayer::updateContent() +{ + if (!mLayerData->asset()) return; + + if (flag() & DirtyFlagBit::Matrix) { + mPath.reset(); + mPath.addRect(VRectF(0, 0, mLayerData->asset()->mWidth, + mLayerData->asset()->mHeight)); + mPath.transform(combinedMatrix()); + mRenderNode.mFlag |= VDrawable::DirtyState::Path; + mRenderNode.mPath = mPath; + mTexture.mMatrix = combinedMatrix(); + } + + if (flag() & DirtyFlagBit::Alpha) { + mTexture.mAlpha = int(combinedAlpha() * 255); + } +} + +void renderer::ImageLayer::preprocessStage(const VRect &clip) +{ + mRenderNode.preprocess(clip); +} + +renderer::DrawableList renderer::ImageLayer::renderList() +{ + if (skipRendering()) return {}; + + return {&mDrawableList, 1}; +} + +renderer::NullLayer::NullLayer(model::Layer *layerData) + : renderer::Layer(layerData) +{ +} +void renderer::NullLayer::updateContent() {} + +static renderer::Object *createContentItem(model::Object *contentData, + VArenaAlloc * allocator) +{ + switch (contentData->type()) { + case model::Object::Type::Group: { + return allocator->make( + static_cast(contentData), allocator); + } + case model::Object::Type::Rect: { + return allocator->make( + static_cast(contentData)); + } + case model::Object::Type::Ellipse: { + return allocator->make( + static_cast(contentData)); + } + case model::Object::Type::Path: { + return allocator->make( + static_cast(contentData)); + } + case model::Object::Type::Polystar: { + return allocator->make( + static_cast(contentData)); + } + case model::Object::Type::Fill: { + return allocator->make( + static_cast(contentData)); + } + case model::Object::Type::GFill: { + return allocator->make( + static_cast(contentData)); + } + case model::Object::Type::Stroke: { + return allocator->make( + static_cast(contentData)); + } + case model::Object::Type::GStroke: { + return allocator->make( + static_cast(contentData)); + } + case model::Object::Type::Repeater: { + return allocator->make( + static_cast(contentData), allocator); + } + case model::Object::Type::Trim: { + return allocator->make( + static_cast(contentData)); + } + default: + return nullptr; + break; + } +} + +renderer::ShapeLayer::ShapeLayer(model::Layer *layerData, + VArenaAlloc * allocator) + : renderer::Layer(layerData), + mRoot(allocator->make(nullptr, allocator)) +{ + mRoot->addChildren(layerData, allocator); + + std::vector list; + mRoot->processPaintItems(list); + + if (layerData->hasPathOperator()) { + list.clear(); + mRoot->processTrimItems(list); + } +} + +void renderer::ShapeLayer::updateContent() +{ + mRoot->update(frameNo(), combinedMatrix(), combinedAlpha(), flag()); + + if (mLayerData->hasPathOperator()) { + mRoot->applyTrim(); + } +} + +void renderer::ShapeLayer::preprocessStage(const VRect &clip) +{ + mDrawableList.clear(); + mRoot->renderList(mDrawableList); + + for (auto &drawable : mDrawableList) drawable->preprocess(clip); +} + +renderer::DrawableList renderer::ShapeLayer::renderList() +{ + if (skipRendering()) return {}; + + mDrawableList.clear(); + mRoot->renderList(mDrawableList); + + if (mDrawableList.empty()) return {}; + + return {mDrawableList.data(), mDrawableList.size()}; +} + +bool renderer::Group::resolveKeyPath(LOTKeyPath &keyPath, uint depth, + LOTVariant &value) +{ + if (!keyPath.skip(name())) { + if (!keyPath.matches(mModel.name(), depth)) { + return false; + } + + if (!keyPath.skip(mModel.name())) { + if (keyPath.fullyResolvesTo(mModel.name(), depth) && + transformProp(value.property())) { + mModel.filter()->addValue(value); + } + } + } + + if (keyPath.propagate(name(), depth)) { + uint newDepth = keyPath.nextDepth(name(), depth); + for (auto &child : mContents) { + child->resolveKeyPath(keyPath, newDepth, value); + } + } + return true; +} + +bool renderer::Fill::resolveKeyPath(LOTKeyPath &keyPath, uint depth, + LOTVariant &value) +{ + if (!keyPath.matches(mModel.name(), depth)) { + return false; + } + + if (keyPath.fullyResolvesTo(mModel.name(), depth) && + fillProp(value.property())) { + mModel.filter()->addValue(value); + return true; + } + return false; +} + +bool renderer::Stroke::resolveKeyPath(LOTKeyPath &keyPath, uint depth, + LOTVariant &value) +{ + if (!keyPath.matches(mModel.name(), depth)) { + return false; + } + + if (keyPath.fullyResolvesTo(mModel.name(), depth) && + strokeProp(value.property())) { + mModel.filter()->addValue(value); + return true; + } + return false; +} + +renderer::Group::Group(model::Group *data, VArenaAlloc *allocator) + : mModel(data) +{ + addChildren(data, allocator); +} + +void renderer::Group::addChildren(model::Group *data, VArenaAlloc *allocator) +{ + if (!data) return; + + if (!data->mChildren.empty()) mContents.reserve(data->mChildren.size()); + + // keep the content in back-to-front order. + // as lottie model keeps it in front-to-back order. + for (auto it = data->mChildren.crbegin(); it != data->mChildren.rend(); + ++it) { + auto content = createContentItem(*it, allocator); + if (content) { + mContents.push_back(content); + } + } +} + +void renderer::Group::update(int frameNo, const VMatrix &parentMatrix, + float parentAlpha, const DirtyFlag &flag) +{ + DirtyFlag newFlag = flag; + float alpha; + + if (mModel.hasModel() && mModel.transform()) { + VMatrix m = mModel.matrix(frameNo); + + m *= parentMatrix; + if (!(flag & DirtyFlagBit::Matrix) && !mModel.transform()->isStatic() && + (m != mMatrix)) { + newFlag |= DirtyFlagBit::Matrix; + } + + mMatrix = m; + + alpha = parentAlpha * mModel.transform()->opacity(frameNo); + if (!vCompare(alpha, parentAlpha)) { + newFlag |= DirtyFlagBit::Alpha; + } + } else { + mMatrix = parentMatrix; + alpha = parentAlpha; + } + + for (const auto &content : mContents) { + content->update(frameNo, matrix(), alpha, newFlag); + } +} + +void renderer::Group::applyTrim() +{ + for (auto i = mContents.rbegin(); i != mContents.rend(); ++i) { + auto content = (*i); + switch (content->type()) { + case renderer::Object::Type::Trim: { + static_cast(content)->update(); + break; + } + case renderer::Object::Type::Group: { + static_cast(content)->applyTrim(); + break; + } + default: + break; + } + } +} + +void renderer::Group::renderList(std::vector &list) +{ + for (const auto &content : mContents) { + content->renderList(list); + } +} + +void renderer::Group::processPaintItems(std::vector &list) +{ + size_t curOpCount = list.size(); + for (auto i = mContents.rbegin(); i != mContents.rend(); ++i) { + auto content = (*i); + switch (content->type()) { + case renderer::Object::Type::Shape: { + auto pathItem = static_cast(content); + pathItem->setParent(this); + list.push_back(pathItem); + break; + } + case renderer::Object::Type::Paint: { + static_cast(content)->addPathItems(list, + curOpCount); + break; + } + case renderer::Object::Type::Group: { + static_cast(content)->processPaintItems(list); + break; + } + default: + break; + } + } +} + +void renderer::Group::processTrimItems(std::vector &list) +{ + size_t curOpCount = list.size(); + for (auto i = mContents.rbegin(); i != mContents.rend(); ++i) { + auto content = (*i); + + switch (content->type()) { + case renderer::Object::Type::Shape: { + list.push_back(static_cast(content)); + break; + } + case renderer::Object::Type::Trim: { + static_cast(content)->addPathItems(list, + curOpCount); + break; + } + case renderer::Object::Type::Group: { + static_cast(content)->processTrimItems(list); + break; + } + default: + break; + } + } +} + +/* + * renderer::Shape uses 2 path objects for path object reuse. + * mLocalPath - keeps track of the local path of the item before + * applying path operation and transformation. + * mTemp - keeps a referece to the mLocalPath and can be updated by the + * path operation objects(trim, merge path), + * We update the DirtyPath flag if the path needs to be updated again + * beacuse of local path or matrix or some path operation has changed which + * affects the final path. + * The PaintObject queries the dirty flag to check if it needs to compute the + * final path again and calls finalPath() api to do the same. + * finalPath() api passes a result Object so that we keep only one copy of + * the path object in the paintItem (for memory efficiency). + * NOTE: As path objects are COW objects we have to be + * carefull about the refcount so that we don't generate deep copy while + * modifying the path objects. + */ +void renderer::Shape::update(int frameNo, const VMatrix &, float, + const DirtyFlag &flag) +{ + mDirtyPath = false; + + // 1. update the local path if needed + if (hasChanged(frameNo)) { + // loose the reference to mLocalPath if any + // from the last frame update. + mTemp = VPath(); + + updatePath(mLocalPath, frameNo); + mDirtyPath = true; + } + // 2. keep a reference path in temp in case there is some + // path operation like trim which will update the path. + // we don't want to update the local path. + mTemp = mLocalPath; + + // 3. mark the path dirty if matrix has changed. + if (flag & DirtyFlagBit::Matrix) { + mDirtyPath = true; + } +} + +void renderer::Shape::finalPath(VPath &result) +{ + result.addPath(mTemp, static_cast(parent())->matrix()); +} + +renderer::Rect::Rect(model::Rect *data) + : renderer::Shape(data->isStatic()), mData(data) +{ +} + +void renderer::Rect::updatePath(VPath &path, int frameNo) +{ + VPointF pos = mData->mPos.value(frameNo); + VPointF size = mData->mSize.value(frameNo); + float roundness = mData->roundness(frameNo); + VRectF r(pos.x() - size.x() / 2, pos.y() - size.y() / 2, size.x(), + size.y()); + + path.reset(); + path.addRoundRect(r, roundness, mData->direction()); +} + +renderer::Ellipse::Ellipse(model::Ellipse *data) + : renderer::Shape(data->isStatic()), mData(data) +{ +} + +void renderer::Ellipse::updatePath(VPath &path, int frameNo) +{ + VPointF pos = mData->mPos.value(frameNo); + VPointF size = mData->mSize.value(frameNo); + VRectF r(pos.x() - size.x() / 2, pos.y() - size.y() / 2, size.x(), + size.y()); + + path.reset(); + path.addOval(r, mData->direction()); +} + +renderer::Path::Path(model::Path *data) + : renderer::Shape(data->isStatic()), mData(data) +{ +} + +void renderer::Path::updatePath(VPath &path, int frameNo) +{ + mData->mShape.value(frameNo, path); +} + +renderer::Polystar::Polystar(model::Polystar *data) + : renderer::Shape(data->isStatic()), mData(data) +{ +} + +void renderer::Polystar::updatePath(VPath &path, int frameNo) +{ + VPointF pos = mData->mPos.value(frameNo); + float points = mData->mPointCount.value(frameNo); + float innerRadius = mData->mInnerRadius.value(frameNo); + float outerRadius = mData->mOuterRadius.value(frameNo); + float innerRoundness = mData->mInnerRoundness.value(frameNo); + float outerRoundness = mData->mOuterRoundness.value(frameNo); + float rotation = mData->mRotation.value(frameNo); + + path.reset(); + VMatrix m; + + if (mData->mPolyType == model::Polystar::PolyType::Star) { + path.addPolystar(points, innerRadius, outerRadius, innerRoundness, + outerRoundness, 0.0, 0.0, 0.0, mData->direction()); + } else { + path.addPolygon(points, outerRadius, outerRoundness, 0.0, 0.0, 0.0, + mData->direction()); + } + + m.translate(pos.x(), pos.y()).rotate(rotation); + m.rotate(rotation); + path.transform(m); +} + +/* + * PaintData Node handling + * + */ +renderer::Paint::Paint(bool staticContent) : mStaticContent(staticContent) {} + +void renderer::Paint::update(int frameNo, const VMatrix &parentMatrix, + float parentAlpha, const DirtyFlag & /*flag*/) +{ + mRenderNodeUpdate = true; + mContentToRender = updateContent(frameNo, parentMatrix, parentAlpha); +} + +void renderer::Paint::updateRenderNode() +{ + bool dirty = false; + for (auto &i : mPathItems) { + if (i->dirty()) { + dirty = true; + break; + } + } + + if (dirty) { + mPath.reset(); + for (const auto &i : mPathItems) { + i->finalPath(mPath); + } + mDrawable.setPath(mPath); + } else { + if (mDrawable.mFlag & VDrawable::DirtyState::Path) + mDrawable.mPath = mPath; + } +} + +void renderer::Paint::renderList(std::vector &list) +{ + if (mRenderNodeUpdate) { + updateRenderNode(); + mRenderNodeUpdate = false; + } + + // Q: Why we even update the final path if we don't have content + // to render ? + // Ans: We update the render nodes because we will loose the + // dirty path information at end of this frame. + // so if we return early without updating the final path. + // in the subsequent frame when we have content to render but + // we may not able to update our final path properly as we + // don't know what paths got changed in between. + if (mContentToRender) list.push_back(&mDrawable); +} + +void renderer::Paint::addPathItems(std::vector &list, + size_t startOffset) +{ + std::copy(list.begin() + startOffset, list.end(), + back_inserter(mPathItems)); +} + +renderer::Fill::Fill(model::Fill *data) + : renderer::Paint(data->isStatic()), mModel(data) +{ + mDrawable.setName(mModel.name()); +} + +bool renderer::Fill::updateContent(int frameNo, const VMatrix &, float alpha) +{ + auto combinedAlpha = alpha * mModel.opacity(frameNo); + auto color = mModel.color(frameNo).toColor(combinedAlpha); + + VBrush brush(color); + mDrawable.setBrush(brush); + mDrawable.setFillRule(mModel.fillRule()); + + return !color.isTransparent(); +} + +renderer::GradientFill::GradientFill(model::GradientFill *data) + : renderer::Paint(data->isStatic()), mData(data) +{ + mDrawable.setName(mData->name()); +} + +bool renderer::GradientFill::updateContent(int frameNo, const VMatrix &matrix, + float alpha) +{ + float combinedAlpha = alpha * mData->opacity(frameNo); + + mData->update(mGradient, frameNo); + mGradient->setAlpha(combinedAlpha); + mGradient->mMatrix = matrix; + mDrawable.setBrush(VBrush(mGradient.get())); + mDrawable.setFillRule(mData->fillRule()); + + return !vIsZero(combinedAlpha); +} + +renderer::Stroke::Stroke(model::Stroke *data) + : renderer::Paint(data->isStatic()), mModel(data) +{ + mDrawable.setName(mModel.name()); + if (mModel.hasDashInfo()) { + mDrawable.setType(VDrawable::Type::StrokeWithDash); + } else { + mDrawable.setType(VDrawable::Type::Stroke); + } +} + +static vthread_local std::vector Dash_Vector; + +bool renderer::Stroke::updateContent(int frameNo, const VMatrix &matrix, + float alpha) +{ + auto combinedAlpha = alpha * mModel.opacity(frameNo); + auto color = mModel.color(frameNo).toColor(combinedAlpha); + + VBrush brush(color); + mDrawable.setBrush(brush); + float scale = matrix.scale(); + mDrawable.setStrokeInfo(mModel.capStyle(), mModel.joinStyle(), + mModel.miterLimit(), + mModel.strokeWidth(frameNo) * scale); + + if (mModel.hasDashInfo()) { + Dash_Vector.clear(); + mModel.getDashInfo(frameNo, Dash_Vector); + if (!Dash_Vector.empty()) { + for (auto &elm : Dash_Vector) elm *= scale; + mDrawable.setDashInfo(Dash_Vector); + } + } + + return !color.isTransparent(); +} + +renderer::GradientStroke::GradientStroke(model::GradientStroke *data) + : renderer::Paint(data->isStatic()), mData(data) +{ + mDrawable.setName(mData->name()); + if (mData->hasDashInfo()) { + mDrawable.setType(VDrawable::Type::StrokeWithDash); + } else { + mDrawable.setType(VDrawable::Type::Stroke); + } +} + +bool renderer::GradientStroke::updateContent(int frameNo, const VMatrix &matrix, + float alpha) +{ + float combinedAlpha = alpha * mData->opacity(frameNo); + + mData->update(mGradient, frameNo); + mGradient->setAlpha(combinedAlpha); + mGradient->mMatrix = matrix; + auto scale = mGradient->mMatrix.scale(); + mDrawable.setBrush(VBrush(mGradient.get())); + mDrawable.setStrokeInfo(mData->capStyle(), mData->joinStyle(), + mData->miterLimit(), mData->width(frameNo) * scale); + + if (mData->hasDashInfo()) { + Dash_Vector.clear(); + mData->getDashInfo(frameNo, Dash_Vector); + if (!Dash_Vector.empty()) { + for (auto &elm : Dash_Vector) elm *= scale; + mDrawable.setDashInfo(Dash_Vector); + } + } + + return !vIsZero(combinedAlpha); +} + +void renderer::Trim::update(int frameNo, const VMatrix & /*parentMatrix*/, + float /*parentAlpha*/, const DirtyFlag & /*flag*/) +{ + mDirty = false; + + if (mCache.mFrameNo == frameNo) return; + + model::Trim::Segment segment = mData->segment(frameNo); + + if (!(vCompare(mCache.mSegment.start, segment.start) && + vCompare(mCache.mSegment.end, segment.end))) { + mDirty = true; + mCache.mSegment = segment; + } + mCache.mFrameNo = frameNo; +} + +void renderer::Trim::update() +{ + // when both path and trim are not dirty + if (!(mDirty || pathDirty())) return; + + if (vCompare(mCache.mSegment.start, mCache.mSegment.end)) { + for (auto &i : mPathItems) { + i->updatePath(VPath()); + } + return; + } + + if (vCompare(std::fabs(mCache.mSegment.start - mCache.mSegment.end), 1)) { + for (auto &i : mPathItems) { + i->updatePath(i->localPath()); + } + return; + } + + if (mData->type() == model::Trim::TrimType::Simultaneously) { + for (auto &i : mPathItems) { + mPathMesure.setRange(mCache.mSegment.start, mCache.mSegment.end); + i->updatePath(mPathMesure.trim(i->localPath())); + } + } else { // model::Trim::TrimType::Individually + float totalLength = 0.0; + for (auto &i : mPathItems) { + totalLength += i->localPath().length(); + } + float start = totalLength * mCache.mSegment.start; + float end = totalLength * mCache.mSegment.end; + + if (start < end) { + float curLen = 0.0; + for (auto &i : mPathItems) { + if (curLen > end) { + // update with empty path. + i->updatePath(VPath()); + continue; + } + float len = i->localPath().length(); + + if (curLen < start && curLen + len < start) { + curLen += len; + // update with empty path. + i->updatePath(VPath()); + continue; + } else if (start <= curLen && end >= curLen + len) { + // inside segment + curLen += len; + continue; + } else { + float local_start = start > curLen ? start - curLen : 0; + local_start /= len; + float local_end = curLen + len < end ? len : end - curLen; + local_end /= len; + mPathMesure.setRange(local_start, local_end); + i->updatePath(mPathMesure.trim(i->localPath())); + curLen += len; + } + } + } + } +} + +void renderer::Trim::addPathItems(std::vector &list, + size_t startOffset) +{ + std::copy(list.begin() + startOffset, list.end(), + back_inserter(mPathItems)); +} + +renderer::Repeater::Repeater(model::Repeater *data, VArenaAlloc *allocator) + : mRepeaterData(data) +{ + assert(mRepeaterData->content()); + + mCopies = mRepeaterData->maxCopies(); + + for (int i = 0; i < mCopies; i++) { + auto content = allocator->make( + mRepeaterData->content(), allocator); + // content->setParent(this); + mContents.push_back(content); + } +} + +void renderer::Repeater::update(int frameNo, const VMatrix &parentMatrix, + float parentAlpha, const DirtyFlag &flag) +{ + DirtyFlag newFlag = flag; + + float copies = mRepeaterData->copies(frameNo); + int visibleCopies = int(copies); + + if (visibleCopies == 0) { + mHidden = true; + return; + } + + mHidden = false; + + if (!mRepeaterData->isStatic()) newFlag |= DirtyFlagBit::Matrix; + + float offset = mRepeaterData->offset(frameNo); + float startOpacity = mRepeaterData->mTransform.startOpacity(frameNo); + float endOpacity = mRepeaterData->mTransform.endOpacity(frameNo); + + newFlag |= DirtyFlagBit::Alpha; + + for (int i = 0; i < mCopies; ++i) { + float newAlpha = + parentAlpha * lerp(startOpacity, endOpacity, i / copies); + + // hide rest of the copies , @TODO find a better solution. + if (i >= visibleCopies) newAlpha = 0; + + VMatrix result = mRepeaterData->mTransform.matrix(frameNo, i + offset) * + parentMatrix; + mContents[i]->update(frameNo, result, newAlpha, newFlag); + } +} + +void renderer::Repeater::renderList(std::vector &list) +{ + if (mHidden) return; + return renderer::Group::renderList(list); +} diff --git a/vendor/github.com/Benau/go_rlottie/lottie_lottieitem.h b/vendor/github.com/Benau/go_rlottie/lottie_lottieitem.h new file mode 100644 index 00000000..e1276113 --- /dev/null +++ b/vendor/github.com/Benau/go_rlottie/lottie_lottieitem.h @@ -0,0 +1,626 @@ +/* + * Copyright (c) 2020 Samsung Electronics Co., Ltd. All rights reserved. + + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef LOTTIEITEM_H +#define LOTTIEITEM_H + +#include +#include + +#include "lottie_lottiekeypath.h" +#include "lottie_lottiefiltermodel.h" +#include "rlottie.h" +#include "rlottiecommon.h" +#include "vector_varenaalloc.h" +#include "vector_vdrawable.h" +#include "vector_vmatrix.h" +#include "vector_vpainter.h" +#include "vector_vpath.h" +#include "vector_vpathmesure.h" +#include "vector_vpoint.h" + +V_USE_NAMESPACE + +namespace rlottie { + +namespace internal { + +template +class VSpan { +public: + using reference = T &; + using pointer = T *; + using const_pointer = T const *; + using const_reference = T const &; + using index_type = size_t; + + using iterator = pointer; + using const_iterator = const_pointer; + + VSpan() = default; + VSpan(pointer data, index_type size) : _data(data), _size(size) {} + + constexpr pointer data() const noexcept { return _data; } + constexpr index_type size() const noexcept { return _size; } + constexpr bool empty() const noexcept { return size() == 0; } + constexpr iterator begin() const noexcept { return data(); } + constexpr iterator end() const noexcept { return data() + size(); } + constexpr const_iterator cbegin() const noexcept { return data(); } + constexpr const_iterator cend() const noexcept { return data() + size(); } + constexpr reference operator[](index_type idx) const + { + return *(data() + idx); + } + +private: + pointer _data{nullptr}; + index_type _size{0}; +}; + +namespace renderer { + +using DrawableList = VSpan; + +enum class DirtyFlagBit : uchar { + None = 0x00, + Matrix = 0x01, + Alpha = 0x02, + All = (Matrix | Alpha) +}; +typedef vFlag DirtyFlag; + +class SurfaceCache { +public: + SurfaceCache() { mCache.reserve(10); } + + VBitmap make_surface( + size_t width, size_t height, + VBitmap::Format format = VBitmap::Format::ARGB32_Premultiplied) + { + if (mCache.empty()) return {width, height, format}; + + auto surface = mCache.back(); + surface.reset(width, height, format); + + mCache.pop_back(); + return surface; + } + + void release_surface(VBitmap &surface) { mCache.push_back(surface); } + +private: + std::vector mCache; +}; + +class Drawable final : public VDrawable { +public: + void sync(); + +public: + std::unique_ptr mCNode{nullptr}; + + ~Drawable() noexcept + { + if (mCNode && mCNode->mGradient.stopPtr) + free(mCNode->mGradient.stopPtr); + } +}; + +struct CApiData { + CApiData(); + LOTLayerNode mLayer; + std::vector mMasks; + std::vector mLayers; + std::vector mCNodeList; +}; + +class Clipper { +public: + explicit Clipper(VSize size) : mSize(size) {} + void update(const VMatrix &matrix); + void preprocess(const VRect &clip); + VRle rle(const VRle &mask); + +public: + VSize mSize; + VPath mPath; + VRle mMaskedRle; + VRasterizer mRasterizer; + bool mRasterRequest{false}; +}; + +class Mask { +public: + explicit Mask(model::Mask *data) : mData(data) {} + void update(int frameNo, const VMatrix &parentMatrix, float parentAlpha, + const DirtyFlag &flag); + model::Mask::Mode maskMode() const { return mData->mMode; } + VRle rle(); + void preprocess(const VRect &clip); + bool inverted() const { return mData->mInv; } +public: + model::Mask *mData{nullptr}; + VPath mLocalPath; + VPath mFinalPath; + VRasterizer mRasterizer; + float mCombinedAlpha{0}; + bool mRasterRequest{false}; +}; + +/* + * Handels mask property of a layer item + */ +class LayerMask { +public: + explicit LayerMask(model::Layer *layerData); + void update(int frameNo, const VMatrix &parentMatrix, float parentAlpha, + const DirtyFlag &flag); + bool isStatic() const { return mStatic; } + VRle maskRle(const VRect &clipRect); + void preprocess(const VRect &clip); + +public: + std::vector mMasks; + VRle mRle; + bool mStatic{true}; + bool mDirty{true}; +}; + +class Layer; + +class Composition { +public: + explicit Composition(std::shared_ptr composition); + bool update(int frameNo, const VSize &size, bool keepAspectRatio); + VSize size() const { return mViewSize; } + void buildRenderTree(); + const LOTLayerNode *renderTree() const; + bool render(const rlottie::Surface &surface); + void setValue(const std::string &keypath, LOTVariant &value); + +private: + SurfaceCache mSurfaceCache; + VBitmap mSurface; + VMatrix mScaleMatrix; + VSize mViewSize; + std::shared_ptr mModel; + Layer * mRootLayer{nullptr}; + VArenaAlloc mAllocator{2048}; + int mCurFrameNo; + bool mKeepAspectRatio{true}; +}; + +class Layer { +public: + virtual ~Layer() = default; + Layer &operator=(Layer &&) noexcept = delete; + Layer(model::Layer *layerData); + int id() const { return mLayerData->id(); } + int parentId() const { return mLayerData->parentId(); } + void setParentLayer(Layer *parent) { mParentLayer = parent; } + void setComplexContent(bool value) { mComplexContent = value; } + bool complexContent() const { return mComplexContent; } + virtual void update(int frameNo, const VMatrix &parentMatrix, + float parentAlpha); + VMatrix matrix(int frameNo) const; + void preprocess(const VRect &clip); + virtual DrawableList renderList() { return {}; } + virtual void render(VPainter *painter, const VRle &mask, + const VRle &matteRle, SurfaceCache &cache); + bool hasMatte() + { + if (mLayerData->mMatteType == model::MatteType::None) return false; + return true; + } + model::MatteType matteType() const { return mLayerData->mMatteType; } + bool visible() const; + virtual void buildLayerNode(); + LOTLayerNode & clayer() { return mCApiData->mLayer; } + std::vector &clayers() { return mCApiData->mLayers; } + std::vector & cmasks() { return mCApiData->mMasks; } + std::vector & cnodes() { return mCApiData->mCNodeList; } + const char * name() const { return mLayerData->name(); } + virtual bool resolveKeyPath(LOTKeyPath &keyPath, uint depth, + LOTVariant &value); + +protected: + virtual void preprocessStage(const VRect &clip) = 0; + virtual void updateContent() = 0; + inline VMatrix combinedMatrix() const { return mCombinedMatrix; } + inline int frameNo() const { return mFrameNo; } + inline float combinedAlpha() const { return mCombinedAlpha; } + inline bool isStatic() const { return mLayerData->isStatic(); } + float opacity(int frameNo) const { return mLayerData->opacity(frameNo); } + inline DirtyFlag flag() const { return mDirtyFlag; } + bool skipRendering() const + { + return (!visible() || vIsZero(combinedAlpha())); + } + +protected: + std::unique_ptr mLayerMask; + model::Layer * mLayerData{nullptr}; + Layer * mParentLayer{nullptr}; + VMatrix mCombinedMatrix; + float mCombinedAlpha{0.0}; + int mFrameNo{-1}; + DirtyFlag mDirtyFlag{DirtyFlagBit::All}; + bool mComplexContent{false}; + std::unique_ptr mCApiData; +}; + +class CompLayer final : public Layer { +public: + explicit CompLayer(model::Layer *layerData, VArenaAlloc *allocator); + + void render(VPainter *painter, const VRle &mask, const VRle &matteRle, + SurfaceCache &cache) final; + void buildLayerNode() final; + bool resolveKeyPath(LOTKeyPath &keyPath, uint depth, + LOTVariant &value) override; + +protected: + void preprocessStage(const VRect &clip) final; + void updateContent() final; + +private: + void renderHelper(VPainter *painter, const VRle &mask, const VRle &matteRle, + SurfaceCache &cache); + void renderMatteLayer(VPainter *painter, const VRle &inheritMask, + const VRle &matteRle, Layer *layer, Layer *src, + SurfaceCache &cache); + +private: + std::vector mLayers; + std::unique_ptr mClipper; +}; + +class SolidLayer final : public Layer { +public: + explicit SolidLayer(model::Layer *layerData); + void buildLayerNode() final; + DrawableList renderList() final; + +protected: + void preprocessStage(const VRect &clip) final; + void updateContent() final; + +private: + Drawable mRenderNode; + VPath mPath; + VDrawable *mDrawableList{nullptr}; // to work with the Span api +}; + +class Group; + +class ShapeLayer final : public Layer { +public: + explicit ShapeLayer(model::Layer *layerData, VArenaAlloc *allocator); + DrawableList renderList() final; + void buildLayerNode() final; + bool resolveKeyPath(LOTKeyPath &keyPath, uint depth, + LOTVariant &value) override; + +protected: + void preprocessStage(const VRect &clip) final; + void updateContent() final; + std::vector mDrawableList; + Group * mRoot{nullptr}; +}; + +class NullLayer final : public Layer { +public: + explicit NullLayer(model::Layer *layerData); + +protected: + void preprocessStage(const VRect &) final {} + void updateContent() final; +}; + +class ImageLayer final : public Layer { +public: + explicit ImageLayer(model::Layer *layerData); + void buildLayerNode() final; + DrawableList renderList() final; + +protected: + void preprocessStage(const VRect &clip) final; + void updateContent() final; + +private: + Drawable mRenderNode; + VTexture mTexture; + VPath mPath; + VDrawable *mDrawableList{nullptr}; // to work with the Span api +}; + +class Object { +public: + enum class Type : uchar { Unknown, Group, Shape, Paint, Trim }; + virtual ~Object() = default; + Object & operator=(Object &&) noexcept = delete; + virtual void update(int frameNo, const VMatrix &parentMatrix, + float parentAlpha, const DirtyFlag &flag) = 0; + virtual void renderList(std::vector &) {} + virtual bool resolveKeyPath(LOTKeyPath &, uint, LOTVariant &) + { + return false; + } + virtual Object::Type type() const { return Object::Type::Unknown; } +}; + +class Shape; +class Group : public Object { +public: + Group() = default; + explicit Group(model::Group *data, VArenaAlloc *allocator); + void addChildren(model::Group *data, VArenaAlloc *allocator); + void update(int frameNo, const VMatrix &parentMatrix, float parentAlpha, + const DirtyFlag &flag) override; + void applyTrim(); + void processTrimItems(std::vector &list); + void processPaintItems(std::vector &list); + void renderList(std::vector &list) override; + Object::Type type() const final { return Object::Type::Group; } + const VMatrix &matrix() const { return mMatrix; } + const char * name() const + { + static const char *TAG = "__"; + return mModel.hasModel() ? mModel.name() : TAG; + } + bool resolveKeyPath(LOTKeyPath &keyPath, uint depth, + LOTVariant &value) override; + +protected: + std::vector mContents; + VMatrix mMatrix; + +private: + model::Filter mModel; +}; + +class Shape : public Object { +public: + Shape(bool staticPath) : mStaticPath(staticPath) {} + void update(int frameNo, const VMatrix &parentMatrix, float parentAlpha, + const DirtyFlag &flag) final; + Object::Type type() const final { return Object::Type::Shape; } + bool dirty() const { return mDirtyPath; } + const VPath &localPath() const { return mTemp; } + void finalPath(VPath &result); + void updatePath(const VPath &path) + { + mTemp = path; + mDirtyPath = true; + } + bool staticPath() const { return mStaticPath; } + void setParent(Group *parent) { mParent = parent; } + Group *parent() const { return mParent; } + +protected: + virtual void updatePath(VPath &path, int frameNo) = 0; + virtual bool hasChanged(int prevFrame, int curFrame) = 0; + +private: + bool hasChanged(int frameNo) + { + int prevFrame = mFrameNo; + mFrameNo = frameNo; + if (prevFrame == -1) return true; + if (mStaticPath || (prevFrame == frameNo)) return false; + return hasChanged(prevFrame, frameNo); + } + Group *mParent{nullptr}; + VPath mLocalPath; + VPath mTemp; + int mFrameNo{-1}; + bool mDirtyPath{true}; + bool mStaticPath; +}; + +class Rect final : public Shape { +public: + explicit Rect(model::Rect *data); + +protected: + void updatePath(VPath &path, int frameNo) final; + model::Rect *mData{nullptr}; + + bool hasChanged(int prevFrame, int curFrame) final + { + return (mData->mPos.changed(prevFrame, curFrame) || + mData->mSize.changed(prevFrame, curFrame) || + mData->roundnessChanged(prevFrame, curFrame)); + } +}; + +class Ellipse final : public Shape { +public: + explicit Ellipse(model::Ellipse *data); + +private: + void updatePath(VPath &path, int frameNo) final; + model::Ellipse *mData{nullptr}; + bool hasChanged(int prevFrame, int curFrame) final + { + return (mData->mPos.changed(prevFrame, curFrame) || + mData->mSize.changed(prevFrame, curFrame)); + } +}; + +class Path final : public Shape { +public: + explicit Path(model::Path *data); + +private: + void updatePath(VPath &path, int frameNo) final; + model::Path *mData{nullptr}; + bool hasChanged(int prevFrame, int curFrame) final + { + return mData->mShape.changed(prevFrame, curFrame); + } +}; + +class Polystar final : public Shape { +public: + explicit Polystar(model::Polystar *data); + +private: + void updatePath(VPath &path, int frameNo) final; + model::Polystar *mData{nullptr}; + + bool hasChanged(int prevFrame, int curFrame) final + { + return (mData->mPos.changed(prevFrame, curFrame) || + mData->mPointCount.changed(prevFrame, curFrame) || + mData->mInnerRadius.changed(prevFrame, curFrame) || + mData->mOuterRadius.changed(prevFrame, curFrame) || + mData->mInnerRoundness.changed(prevFrame, curFrame) || + mData->mOuterRoundness.changed(prevFrame, curFrame) || + mData->mRotation.changed(prevFrame, curFrame)); + } +}; + +class Paint : public Object { +public: + Paint(bool staticContent); + void addPathItems(std::vector &list, size_t startOffset); + void update(int frameNo, const VMatrix &parentMatrix, float parentAlpha, + const DirtyFlag &flag) override; + void renderList(std::vector &list) final; + Object::Type type() const final { return Object::Type::Paint; } + +protected: + virtual bool updateContent(int frameNo, const VMatrix &matrix, + float alpha) = 0; + +private: + void updateRenderNode(); + +protected: + std::vector mPathItems; + Drawable mDrawable; + VPath mPath; + DirtyFlag mFlag; + bool mStaticContent; + bool mRenderNodeUpdate{true}; + bool mContentToRender{true}; +}; + +class Fill final : public Paint { +public: + explicit Fill(model::Fill *data); + +protected: + bool updateContent(int frameNo, const VMatrix &matrix, float alpha) final; + bool resolveKeyPath(LOTKeyPath &keyPath, uint depth, + LOTVariant &value) final; + +private: + model::Filter mModel; +}; + +class GradientFill final : public Paint { +public: + explicit GradientFill(model::GradientFill *data); + +protected: + bool updateContent(int frameNo, const VMatrix &matrix, float alpha) final; + +private: + model::GradientFill * mData{nullptr}; + std::unique_ptr mGradient; +}; + +class Stroke : public Paint { +public: + explicit Stroke(model::Stroke *data); + +protected: + bool updateContent(int frameNo, const VMatrix &matrix, float alpha) final; + bool resolveKeyPath(LOTKeyPath &keyPath, uint depth, + LOTVariant &value) final; + +private: + model::Filter mModel; +}; + +class GradientStroke final : public Paint { +public: + explicit GradientStroke(model::GradientStroke *data); + +protected: + bool updateContent(int frameNo, const VMatrix &matrix, float alpha) final; + +private: + model::GradientStroke * mData{nullptr}; + std::unique_ptr mGradient; +}; + +class Trim final : public Object { +public: + explicit Trim(model::Trim *data) : mData(data) {} + void update(int frameNo, const VMatrix &parentMatrix, float parentAlpha, + const DirtyFlag &flag) final; + Object::Type type() const final { return Object::Type::Trim; } + void update(); + void addPathItems(std::vector &list, size_t startOffset); + +private: + bool pathDirty() const + { + for (auto &i : mPathItems) { + if (i->dirty()) return true; + } + return false; + } + struct Cache { + int mFrameNo{-1}; + model::Trim::Segment mSegment{}; + }; + Cache mCache; + std::vector mPathItems; + model::Trim * mData{nullptr}; + VPathMesure mPathMesure; + bool mDirty{true}; +}; + +class Repeater final : public Group { +public: + explicit Repeater(model::Repeater *data, VArenaAlloc *allocator); + void update(int frameNo, const VMatrix &parentMatrix, float parentAlpha, + const DirtyFlag &flag) final; + void renderList(std::vector &list) final; + +private: + model::Repeater *mRepeaterData{nullptr}; + bool mHidden{false}; + int mCopies{0}; +}; + +} // namespace renderer + +} // namespace internal + +} // namespace rlottie + +#endif // LOTTIEITEM_H diff --git a/vendor/github.com/Benau/go_rlottie/lottie_lottieitem_capi.cpp b/vendor/github.com/Benau/go_rlottie/lottie_lottieitem_capi.cpp new file mode 100644 index 00000000..8ab8f842 --- /dev/null +++ b/vendor/github.com/Benau/go_rlottie/lottie_lottieitem_capi.cpp @@ -0,0 +1,339 @@ +/* + * Implements LottieItem functions needed + * to support renderTree() api. + * Moving all those implementation to its own + * file make clear separation as well easy of + * maintenance. + */ + +#include "lottie_lottieitem.h" +#include "vector_vdasher.h" + +using namespace rlottie::internal; + +renderer::CApiData::CApiData() +{ + mLayer.mMaskList.ptr = nullptr; + mLayer.mMaskList.size = 0; + mLayer.mLayerList.ptr = nullptr; + mLayer.mLayerList.size = 0; + mLayer.mNodeList.ptr = nullptr; + mLayer.mNodeList.size = 0; + mLayer.mMatte = MatteNone; + mLayer.mVisible = 0; + mLayer.mAlpha = 255; + mLayer.mClipPath.ptPtr = nullptr; + mLayer.mClipPath.elmPtr = nullptr; + mLayer.mClipPath.ptCount = 0; + mLayer.mClipPath.elmCount = 0; + mLayer.keypath = nullptr; +} + +void renderer::Composition::buildRenderTree() +{ + mRootLayer->buildLayerNode(); +} + +const LOTLayerNode *renderer::Composition::renderTree() const +{ + return &mRootLayer->clayer(); +} + +void renderer::CompLayer::buildLayerNode() +{ + renderer::Layer::buildLayerNode(); + if (mClipper) { + const auto &elm = mClipper->mPath.elements(); + const auto &pts = mClipper->mPath.points(); + auto ptPtr = reinterpret_cast(pts.data()); + auto elmPtr = reinterpret_cast(elm.data()); + clayer().mClipPath.ptPtr = ptPtr; + clayer().mClipPath.elmPtr = elmPtr; + clayer().mClipPath.ptCount = 2 * pts.size(); + clayer().mClipPath.elmCount = elm.size(); + } + if (mLayers.size() != clayers().size()) { + for (const auto &layer : mLayers) { + layer->buildLayerNode(); + clayers().push_back(&layer->clayer()); + } + clayer().mLayerList.ptr = clayers().data(); + clayer().mLayerList.size = clayers().size(); + } else { + for (const auto &layer : mLayers) { + layer->buildLayerNode(); + } + } +} + +void renderer::ShapeLayer::buildLayerNode() +{ + renderer::Layer::buildLayerNode(); + + auto renderlist = renderList(); + + cnodes().clear(); + for (auto &i : renderlist) { + auto lotDrawable = static_cast(i); + lotDrawable->sync(); + cnodes().push_back(lotDrawable->mCNode.get()); + } + clayer().mNodeList.ptr = cnodes().data(); + clayer().mNodeList.size = cnodes().size(); +} + +void renderer::Layer::buildLayerNode() +{ + if (!mCApiData) { + mCApiData = std::make_unique(); + clayer().keypath = name(); + } + if (complexContent()) clayer().mAlpha = uchar(combinedAlpha() * 255.f); + clayer().mVisible = visible(); + // update matte + if (hasMatte()) { + switch (mLayerData->mMatteType) { + case model::MatteType::Alpha: + clayer().mMatte = MatteAlpha; + break; + case model::MatteType::AlphaInv: + clayer().mMatte = MatteAlphaInv; + break; + case model::MatteType::Luma: + clayer().mMatte = MatteLuma; + break; + case model::MatteType::LumaInv: + clayer().mMatte = MatteLumaInv; + break; + default: + clayer().mMatte = MatteNone; + break; + } + } + if (mLayerMask) { + cmasks().clear(); + cmasks().resize(mLayerMask->mMasks.size()); + size_t i = 0; + for (const auto &mask : mLayerMask->mMasks) { + auto & cNode = cmasks()[i++]; + const auto &elm = mask.mFinalPath.elements(); + const auto &pts = mask.mFinalPath.points(); + auto ptPtr = reinterpret_cast(pts.data()); + auto elmPtr = reinterpret_cast(elm.data()); + cNode.mPath.ptPtr = ptPtr; + cNode.mPath.ptCount = 2 * pts.size(); + cNode.mPath.elmPtr = elmPtr; + cNode.mPath.elmCount = elm.size(); + cNode.mAlpha = uchar(mask.mCombinedAlpha * 255.0f); + switch (mask.maskMode()) { + case model::Mask::Mode::Add: + cNode.mMode = MaskAdd; + break; + case model::Mask::Mode::Substarct: + cNode.mMode = MaskSubstract; + break; + case model::Mask::Mode::Intersect: + cNode.mMode = MaskIntersect; + break; + case model::Mask::Mode::Difference: + cNode.mMode = MaskDifference; + break; + default: + cNode.mMode = MaskAdd; + break; + } + } + clayer().mMaskList.ptr = cmasks().data(); + clayer().mMaskList.size = cmasks().size(); + } +} + +void renderer::SolidLayer::buildLayerNode() +{ + renderer::Layer::buildLayerNode(); + + auto renderlist = renderList(); + + cnodes().clear(); + for (auto &i : renderlist) { + auto lotDrawable = static_cast(i); + lotDrawable->sync(); + cnodes().push_back(lotDrawable->mCNode.get()); + } + clayer().mNodeList.ptr = cnodes().data(); + clayer().mNodeList.size = cnodes().size(); +} + +void renderer::ImageLayer::buildLayerNode() +{ + renderer::Layer::buildLayerNode(); + + auto renderlist = renderList(); + + cnodes().clear(); + for (auto &i : renderlist) { + auto lotDrawable = static_cast(i); + lotDrawable->sync(); + + lotDrawable->mCNode->mImageInfo.data = + lotDrawable->mBrush.mTexture->mBitmap.data(); + lotDrawable->mCNode->mImageInfo.width = + int(lotDrawable->mBrush.mTexture->mBitmap.width()); + lotDrawable->mCNode->mImageInfo.height = + int(lotDrawable->mBrush.mTexture->mBitmap.height()); + + lotDrawable->mCNode->mImageInfo.mMatrix.m11 = combinedMatrix().m_11(); + lotDrawable->mCNode->mImageInfo.mMatrix.m12 = combinedMatrix().m_12(); + lotDrawable->mCNode->mImageInfo.mMatrix.m13 = combinedMatrix().m_13(); + + lotDrawable->mCNode->mImageInfo.mMatrix.m21 = combinedMatrix().m_21(); + lotDrawable->mCNode->mImageInfo.mMatrix.m22 = combinedMatrix().m_22(); + lotDrawable->mCNode->mImageInfo.mMatrix.m23 = combinedMatrix().m_23(); + + lotDrawable->mCNode->mImageInfo.mMatrix.m31 = combinedMatrix().m_tx(); + lotDrawable->mCNode->mImageInfo.mMatrix.m32 = combinedMatrix().m_ty(); + lotDrawable->mCNode->mImageInfo.mMatrix.m33 = combinedMatrix().m_33(); + + // Alpha calculation already combined. + lotDrawable->mCNode->mImageInfo.mAlpha = + uchar(lotDrawable->mBrush.mTexture->mAlpha); + + cnodes().push_back(lotDrawable->mCNode.get()); + } + clayer().mNodeList.ptr = cnodes().data(); + clayer().mNodeList.size = cnodes().size(); +} + +static void updateGStops(LOTNode *n, const VGradient *grad) +{ + if (grad->mStops.size() != n->mGradient.stopCount) { + if (n->mGradient.stopCount) free(n->mGradient.stopPtr); + n->mGradient.stopCount = grad->mStops.size(); + n->mGradient.stopPtr = (LOTGradientStop *)malloc( + n->mGradient.stopCount * sizeof(LOTGradientStop)); + } + + LOTGradientStop *ptr = n->mGradient.stopPtr; + for (const auto &i : grad->mStops) { + ptr->pos = i.first; + ptr->a = uchar(i.second.alpha() * grad->alpha()); + ptr->r = i.second.red(); + ptr->g = i.second.green(); + ptr->b = i.second.blue(); + ptr++; + } +} + +void renderer::Drawable::sync() +{ + if (!mCNode) { + mCNode = std::make_unique(); + mCNode->mGradient.stopPtr = nullptr; + mCNode->mGradient.stopCount = 0; + } + + mCNode->mFlag = ChangeFlagNone; + if (mFlag & DirtyState::None) return; + + if (mFlag & DirtyState::Path) { + applyDashOp(); + const std::vector &elm = mPath.elements(); + const std::vector & pts = mPath.points(); + const float *ptPtr = reinterpret_cast(pts.data()); + const char * elmPtr = reinterpret_cast(elm.data()); + mCNode->mPath.elmPtr = elmPtr; + mCNode->mPath.elmCount = elm.size(); + mCNode->mPath.ptPtr = ptPtr; + mCNode->mPath.ptCount = 2 * pts.size(); + mCNode->mFlag |= ChangeFlagPath; + mCNode->keypath = name(); + } + + if (mStrokeInfo) { + mCNode->mStroke.width = mStrokeInfo->width; + mCNode->mStroke.miterLimit = mStrokeInfo->miterLimit; + mCNode->mStroke.enable = 1; + + switch (mStrokeInfo->cap) { + case CapStyle::Flat: + mCNode->mStroke.cap = LOTCapStyle::CapFlat; + break; + case CapStyle::Square: + mCNode->mStroke.cap = LOTCapStyle::CapSquare; + break; + case CapStyle::Round: + mCNode->mStroke.cap = LOTCapStyle::CapRound; + break; + } + + switch (mStrokeInfo->join) { + case JoinStyle::Miter: + mCNode->mStroke.join = LOTJoinStyle::JoinMiter; + break; + case JoinStyle::Bevel: + mCNode->mStroke.join = LOTJoinStyle::JoinBevel; + break; + case JoinStyle::Round: + mCNode->mStroke.join = LOTJoinStyle::JoinRound; + break; + default: + mCNode->mStroke.join = LOTJoinStyle::JoinMiter; + break; + } + } else { + mCNode->mStroke.enable = 0; + } + + switch (mFillRule) { + case FillRule::EvenOdd: + mCNode->mFillRule = LOTFillRule::FillEvenOdd; + break; + default: + mCNode->mFillRule = LOTFillRule::FillWinding; + break; + } + + switch (mBrush.type()) { + case VBrush::Type::Solid: + mCNode->mBrushType = LOTBrushType::BrushSolid; + mCNode->mColor.r = mBrush.mColor.r; + mCNode->mColor.g = mBrush.mColor.g; + mCNode->mColor.b = mBrush.mColor.b; + mCNode->mColor.a = mBrush.mColor.a; + break; + case VBrush::Type::LinearGradient: { + mCNode->mBrushType = LOTBrushType::BrushGradient; + mCNode->mGradient.type = LOTGradientType::GradientLinear; + VPointF s = mBrush.mGradient->mMatrix.map( + {mBrush.mGradient->linear.x1, mBrush.mGradient->linear.y1}); + VPointF e = mBrush.mGradient->mMatrix.map( + {mBrush.mGradient->linear.x2, mBrush.mGradient->linear.y2}); + mCNode->mGradient.start.x = s.x(); + mCNode->mGradient.start.y = s.y(); + mCNode->mGradient.end.x = e.x(); + mCNode->mGradient.end.y = e.y(); + updateGStops(mCNode.get(), mBrush.mGradient); + break; + } + case VBrush::Type::RadialGradient: { + mCNode->mBrushType = LOTBrushType::BrushGradient; + mCNode->mGradient.type = LOTGradientType::GradientRadial; + VPointF c = mBrush.mGradient->mMatrix.map( + {mBrush.mGradient->radial.cx, mBrush.mGradient->radial.cy}); + VPointF f = mBrush.mGradient->mMatrix.map( + {mBrush.mGradient->radial.fx, mBrush.mGradient->radial.fy}); + mCNode->mGradient.center.x = c.x(); + mCNode->mGradient.center.y = c.y(); + mCNode->mGradient.focal.x = f.x(); + mCNode->mGradient.focal.y = f.y(); + + float scale = mBrush.mGradient->mMatrix.scale(); + mCNode->mGradient.cradius = mBrush.mGradient->radial.cradius * scale; + mCNode->mGradient.fradius = mBrush.mGradient->radial.fradius * scale; + updateGStops(mCNode.get(), mBrush.mGradient); + break; + } + default: + break; + } +} diff --git a/vendor/github.com/Benau/go_rlottie/lottie_lottiekeypath.cpp b/vendor/github.com/Benau/go_rlottie/lottie_lottiekeypath.cpp new file mode 100644 index 00000000..4fd090e9 --- /dev/null +++ b/vendor/github.com/Benau/go_rlottie/lottie_lottiekeypath.cpp @@ -0,0 +1,86 @@ +#include "lottie_lottiekeypath.h" + +#include + +LOTKeyPath::LOTKeyPath(const std::string &keyPath) +{ + std::stringstream ss(keyPath); + std::string item; + + while (getline(ss, item, '.')) { + mKeys.push_back(item); + } +} + +bool LOTKeyPath::matches(const std::string &key, uint depth) +{ + if (skip(key)) { + // This is an object we programatically create. + return true; + } + if (depth > size()) { + return false; + } + if ((mKeys[depth] == key) || (mKeys[depth] == "*") || + (mKeys[depth] == "**")) { + return true; + } + return false; +} + +uint LOTKeyPath::nextDepth(const std::string key, uint depth) +{ + if (skip(key)) { + // If it's a container then we added programatically and it isn't a part + // of the keypath. + return depth; + } + if (mKeys[depth] != "**") { + // If it's not a globstar then it is part of the keypath. + return depth + 1; + } + if (depth == size()) { + // The last key is a globstar. + return depth; + } + if (mKeys[depth + 1] == key) { + // We are a globstar and the next key is our current key so consume + // both. + return depth + 2; + } + return depth; +} + +bool LOTKeyPath::fullyResolvesTo(const std::string key, uint depth) +{ + if (depth > mKeys.size()) { + return false; + } + + bool isLastDepth = (depth == size()); + + if (!isGlobstar(depth)) { + bool matches = (mKeys[depth] == key) || isGlob(depth); + return (isLastDepth || (depth == size() - 1 && endsWithGlobstar())) && + matches; + } + + bool isGlobstarButNextKeyMatches = !isLastDepth && mKeys[depth + 1] == key; + if (isGlobstarButNextKeyMatches) { + return depth == size() - 1 || + (depth == size() - 2 && endsWithGlobstar()); + } + + if (isLastDepth) { + return true; + } + + if (depth + 1 < size()) { + // We are a globstar but there is more than 1 key after the globstar we + // we can't fully match. + return false; + } + // Return whether the next key (which we now know is the last one) is the + // same as the current key. + return mKeys[depth + 1] == key; +} diff --git a/vendor/github.com/Benau/go_rlottie/lottie_lottiekeypath.h b/vendor/github.com/Benau/go_rlottie/lottie_lottiekeypath.h new file mode 100644 index 00000000..c2fd8511 --- /dev/null +++ b/vendor/github.com/Benau/go_rlottie/lottie_lottiekeypath.h @@ -0,0 +1,53 @@ +/* + * Copyright (c) 2020 Samsung Electronics Co., Ltd. All rights reserved. + + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef LOTTIEKEYPATH_H +#define LOTTIEKEYPATH_H + +#include +#include +#include "vector_vglobal.h" + +class LOTKeyPath { +public: + LOTKeyPath(const std::string &keyPath); + bool matches(const std::string &key, uint depth); + uint nextDepth(const std::string key, uint depth); + bool fullyResolvesTo(const std::string key, uint depth); + + bool propagate(const std::string key, uint depth) + { + return skip(key) ? true : (depth < size()) || (mKeys[depth] == "**"); + } + bool skip(const std::string &key) const { return key == "__"; } + +private: + bool isGlobstar(uint depth) const { return mKeys[depth] == "**"; } + bool isGlob(uint depth) const { return mKeys[depth] == "*"; } + bool endsWithGlobstar() const { return mKeys.back() == "**"; } + size_t size() const { return mKeys.size() - 1; } + +private: + std::vector mKeys; +}; + +#endif // LOTTIEKEYPATH_H diff --git a/vendor/github.com/Benau/go_rlottie/lottie_lottieloader.cpp b/vendor/github.com/Benau/go_rlottie/lottie_lottieloader.cpp new file mode 100644 index 00000000..cfcd8f84 --- /dev/null +++ b/vendor/github.com/Benau/go_rlottie/lottie_lottieloader.cpp @@ -0,0 +1,169 @@ +/* + * Copyright (c) 2020 Samsung Electronics Co., Ltd. All rights reserved. + + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include +#include +#include + +#include "lottie_lottiemodel.h" + +using namespace rlottie::internal; + +#ifdef LOTTIE_CACHE_SUPPORT + +#include +#include + +class ModelCache { +public: + static ModelCache &instance() + { + static ModelCache singleton; + return singleton; + } + std::shared_ptr find(const std::string &key) + { + std::lock_guard guard(mMutex); + + if (!mcacheSize) return nullptr; + + auto search = mHash.find(key); + + return (search != mHash.end()) ? search->second : nullptr; + } + void add(const std::string &key, std::shared_ptr value) + { + std::lock_guard guard(mMutex); + + if (!mcacheSize) return; + + //@TODO just remove the 1st element + // not the best of LRU logic + if (mcacheSize == mHash.size()) mHash.erase(mHash.cbegin()); + + mHash[key] = std::move(value); + } + + void configureCacheSize(size_t cacheSize) + { + std::lock_guard guard(mMutex); + mcacheSize = cacheSize; + + if (!mcacheSize) mHash.clear(); + } + +private: + ModelCache() = default; + + std::unordered_map> mHash; + std::mutex mMutex; + size_t mcacheSize{10}; +}; + +#else + +class ModelCache { +public: + static ModelCache &instance() + { + static ModelCache singleton; + return singleton; + } + std::shared_ptr find(const std::string &) + { + return nullptr; + } + void add(const std::string &, std::shared_ptr) {} + void configureCacheSize(size_t) {} +}; + +#endif + +static std::string dirname(const std::string &path) +{ + const char *ptr = strrchr(path.c_str(), '/'); +#ifdef _WIN32 + if (ptr) ptr = strrchr(ptr + 1, '\\'); +#endif + int len = int(ptr + 1 - path.c_str()); // +1 to include '/' + return std::string(path, 0, len); +} + +void model::configureModelCacheSize(size_t cacheSize) +{ + ModelCache::instance().configureCacheSize(cacheSize); +} + +std::shared_ptr model::loadFromFile(const std::string &path, + bool cachePolicy) +{ + if (cachePolicy) { + auto obj = ModelCache::instance().find(path); + if (obj) return obj; + } + + std::ifstream f; + f.open(path); + + if (!f.is_open()) { + vCritical << "failed to open file = " << path.c_str(); + return {}; + } else { + std::string content; + + std::getline(f, content, '\0'); + f.close(); + + if (content.empty()) return {}; + + auto obj = internal::model::parse(const_cast(content.c_str()), + dirname(path)); + + if (obj && cachePolicy) ModelCache::instance().add(path, obj); + + return obj; + } +} + +std::shared_ptr model::loadFromData( + std::string jsonData, const std::string &key, std::string resourcePath, + bool cachePolicy) +{ + if (cachePolicy) { + auto obj = ModelCache::instance().find(key); + if (obj) return obj; + } + + auto obj = internal::model::parse(const_cast(jsonData.c_str()), + std::move(resourcePath)); + + if (obj && cachePolicy) ModelCache::instance().add(key, obj); + + return obj; +} + +std::shared_ptr model::loadFromData( + std::string jsonData, std::string resourcePath, model::ColorFilter filter) +{ + return internal::model::parse(const_cast(jsonData.c_str()), + std::move(resourcePath), std::move(filter)); +} diff --git a/vendor/github.com/Benau/go_rlottie/lottie_lottiemodel.cpp b/vendor/github.com/Benau/go_rlottie/lottie_lottiemodel.cpp new file mode 100644 index 00000000..0389e8aa --- /dev/null +++ b/vendor/github.com/Benau/go_rlottie/lottie_lottiemodel.cpp @@ -0,0 +1,390 @@ +/* + * Copyright (c) 2020 Samsung Electronics Co., Ltd. All rights reserved. + + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include "lottie_lottiemodel.h" +#include +#include +#include +#include "vector_vimageloader.h" +#include "vector_vline.h" + +using namespace rlottie::internal; + +/* + * We process the iterator objects in the children list + * by iterating from back to front. when we find a repeater object + * we remove the objects from satrt till repeater object and then place + * under a new shape group object which we add it as children to the repeater + * object. + * Then we visit the childrens of the newly created shape group object to + * process the remaining repeater object(when children list contains more than + * one repeater). + * + */ +class LottieRepeaterProcesser { +public: + void visitChildren(model::Group *obj) + { + for (auto i = obj->mChildren.rbegin(); i != obj->mChildren.rend(); + ++i) { + auto child = (*i); + if (child->type() == model::Object::Type::Repeater) { + model::Repeater *repeater = + static_cast(child); + // check if this repeater is already processed + // can happen if the layer is an asset and referenced by + // multiple layer. + if (repeater->processed()) continue; + + repeater->markProcessed(); + + auto content = repeater->content(); + // 1. increment the reverse iterator to point to the + // object before the repeater + ++i; + // 2. move all the children till repater to the group + std::move(obj->mChildren.begin(), i.base(), + back_inserter(content->mChildren)); + // 3. erase the objects from the original children list + obj->mChildren.erase(obj->mChildren.begin(), i.base()); + + // 5. visit newly created group to process remaining repeater + // object. + visitChildren(content); + // 6. exit the loop as the current iterators are invalid + break; + } + visit(child); + } + } + + void visit(model::Object *obj) + { + switch (obj->type()) { + case model::Object::Type::Group: + case model::Object::Type::Layer: { + visitChildren(static_cast(obj)); + break; + } + default: + break; + } + } +}; + +class LottieUpdateStatVisitor { + model::Composition::Stats *stat; + +public: + explicit LottieUpdateStatVisitor(model::Composition::Stats *s) : stat(s) {} + void visitChildren(model::Group *obj) + { + for (const auto &child : obj->mChildren) { + if (child) visit(child); + } + } + void visitLayer(model::Layer *layer) + { + switch (layer->mLayerType) { + case model::Layer::Type::Precomp: + stat->precompLayerCount++; + break; + case model::Layer::Type::Null: + stat->nullLayerCount++; + break; + case model::Layer::Type::Shape: + stat->shapeLayerCount++; + break; + case model::Layer::Type::Solid: + stat->solidLayerCount++; + break; + case model::Layer::Type::Image: + stat->imageLayerCount++; + break; + default: + break; + } + visitChildren(layer); + } + void visit(model::Object *obj) + { + switch (obj->type()) { + case model::Object::Type::Layer: { + visitLayer(static_cast(obj)); + break; + } + case model::Object::Type::Repeater: { + visitChildren(static_cast(obj)->content()); + break; + } + case model::Object::Type::Group: { + visitChildren(static_cast(obj)); + break; + } + default: + break; + } + } +}; + +void model::Composition::processRepeaterObjects() +{ + LottieRepeaterProcesser visitor; + visitor.visit(mRootLayer); +} + +void model::Composition::updateStats() +{ + LottieUpdateStatVisitor visitor(&mStats); + visitor.visit(mRootLayer); +} + +VMatrix model::Repeater::Transform::matrix(int frameNo, float multiplier) const +{ + VPointF scale = mScale.value(frameNo) / 100.f; + scale.setX(std::pow(scale.x(), multiplier)); + scale.setY(std::pow(scale.y(), multiplier)); + VMatrix m; + m.translate(mPosition.value(frameNo) * multiplier) + .translate(mAnchor.value(frameNo)) + .scale(scale) + .rotate(mRotation.value(frameNo) * multiplier) + .translate(-mAnchor.value(frameNo)); + + return m; +} + +VMatrix model::Transform::Data::matrix(int frameNo, bool autoOrient) const +{ + VMatrix m; + VPointF position; + if (mExtra && mExtra->mSeparate) { + position.setX(mExtra->mSeparateX.value(frameNo)); + position.setY(mExtra->mSeparateY.value(frameNo)); + } else { + position = mPosition.value(frameNo); + } + + float angle = autoOrient ? mPosition.angle(frameNo) : 0; + if (mExtra && mExtra->m3DData) { + m.translate(position) + .rotate(mExtra->m3DRz.value(frameNo) + angle) + .rotate(mExtra->m3DRy.value(frameNo), VMatrix::Axis::Y) + .rotate(mExtra->m3DRx.value(frameNo), VMatrix::Axis::X) + .scale(mScale.value(frameNo) / 100.f) + .translate(-mAnchor.value(frameNo)); + } else { + m.translate(position) + .rotate(mRotation.value(frameNo) + angle) + .scale(mScale.value(frameNo) / 100.f) + .translate(-mAnchor.value(frameNo)); + } + return m; +} + +void model::Dash::getDashInfo(int frameNo, std::vector &result) const +{ + result.clear(); + + if (mData.size() <= 1) return; + + if (result.capacity() < mData.size()) result.reserve(mData.size() + 1); + + for (const auto &elm : mData) result.push_back(elm.value(frameNo)); + + // if the size is even then we are missing last + // gap information which is same as the last dash value + // copy it from the last dash value. + // NOTE: last value is the offset and last-1 is the last dash value. + auto size = result.size(); + if ((size % 2) == 0) { + // copy offset value to end. + result.push_back(result.back()); + // copy dash value to gap. + result[size - 1] = result[size - 2]; + } +} + +/** + * Both the color stops and opacity stops are in the same array. + * There are {@link #colorPoints} colors sequentially as: + * [ + * ..., + * position, + * red, + * green, + * blue, + * ... + * ] + * + * The remainder of the array is the opacity stops sequentially as: + * [ + * ..., + * position, + * opacity, + * ... + * ] + */ +void model::Gradient::populate(VGradientStops &stops, int frameNo) +{ + model::Gradient::Data gradData = mGradient.value(frameNo); + auto size = gradData.mGradient.size(); + float * ptr = gradData.mGradient.data(); + int colorPoints = mColorPoints; + if (colorPoints == -1) { // for legacy bodymovin (ref: lottie-android) + colorPoints = int(size / 4); + } + auto opacityArraySize = size - colorPoints * 4; + float *opacityPtr = ptr + (colorPoints * 4); + stops.clear(); + size_t j = 0; + for (int i = 0; i < colorPoints; i++) { + float colorStop = ptr[0]; + model::Color color = model::Color(ptr[1], ptr[2], ptr[3]); + if (opacityArraySize) { + if (j == opacityArraySize) { + // already reached the end + float stop1 = opacityPtr[j - 4]; + float op1 = opacityPtr[j - 3]; + float stop2 = opacityPtr[j - 2]; + float op2 = opacityPtr[j - 1]; + if (colorStop > stop2) { + stops.push_back( + std::make_pair(colorStop, color.toColor(op2))); + } else { + float progress = (colorStop - stop1) / (stop2 - stop1); + float opacity = op1 + progress * (op2 - op1); + stops.push_back( + std::make_pair(colorStop, color.toColor(opacity))); + } + continue; + } + for (; j < opacityArraySize; j += 2) { + float opacityStop = opacityPtr[j]; + if (opacityStop < colorStop) { + // add a color using opacity stop + stops.push_back(std::make_pair( + opacityStop, color.toColor(opacityPtr[j + 1]))); + continue; + } + // add a color using color stop + if (j == 0) { + stops.push_back(std::make_pair( + colorStop, color.toColor(opacityPtr[j + 1]))); + } else { + float progress = (colorStop - opacityPtr[j - 2]) / + (opacityPtr[j] - opacityPtr[j - 2]); + float opacity = + opacityPtr[j - 1] + + progress * (opacityPtr[j + 1] - opacityPtr[j - 1]); + stops.push_back( + std::make_pair(colorStop, color.toColor(opacity))); + } + j += 2; + break; + } + } else { + stops.push_back(std::make_pair(colorStop, color.toColor())); + } + ptr += 4; + } +} + +void model::Gradient::update(std::unique_ptr &grad, int frameNo) +{ + bool init = false; + if (!grad) { + if (mGradientType == 1) + grad = std::make_unique(VGradient::Type::Linear); + else + grad = std::make_unique(VGradient::Type::Radial); + grad->mSpread = VGradient::Spread::Pad; + init = true; + } + + if (!mGradient.isStatic() || init) { + populate(grad->mStops, frameNo); + } + + if (mGradientType == 1) { // linear gradient + VPointF start = mStartPoint.value(frameNo); + VPointF end = mEndPoint.value(frameNo); + grad->linear.x1 = start.x(); + grad->linear.y1 = start.y(); + grad->linear.x2 = end.x(); + grad->linear.y2 = end.y(); + } else { // radial gradient + VPointF start = mStartPoint.value(frameNo); + VPointF end = mEndPoint.value(frameNo); + grad->radial.cx = start.x(); + grad->radial.cy = start.y(); + grad->radial.cradius = + VLine::length(start.x(), start.y(), end.x(), end.y()); + /* + * Focal point is the point lives in highlight length distance from + * center along the line (start, end) and rotated by highlight angle. + * below calculation first finds the quadrant(angle) on which the point + * lives by applying inverse slope formula then adds the rotation angle + * to find the final angle. then point is retrived using circle equation + * of center, angle and distance. + */ + float progress = mHighlightLength.value(frameNo) / 100.0f; + if (vCompare(progress, 1.0f)) progress = 0.99f; + float startAngle = VLine(start, end).angle(); + float highlightAngle = mHighlightAngle.value(frameNo); + static constexpr float K_PI = 3.1415926f; + float angle = (startAngle + highlightAngle) * (K_PI / 180.0f); + grad->radial.fx = + grad->radial.cx + std::cos(angle) * progress * grad->radial.cradius; + grad->radial.fy = + grad->radial.cy + std::sin(angle) * progress * grad->radial.cradius; + // Lottie dosen't have any focal radius concept. + grad->radial.fradius = 0; + } +} + +void model::Asset::loadImageData(std::string data) +{ + if (!data.empty()) + mBitmap = VImageLoader::instance().load(data.c_str(), data.length()); +} + +void model::Asset::loadImagePath(std::string path) +{ + if (!path.empty()) mBitmap = VImageLoader::instance().load(path.c_str()); +} + +std::vector model::Composition::layerInfoList() const +{ + if (!mRootLayer || mRootLayer->mChildren.empty()) return {}; + + std::vector result; + + result.reserve(mRootLayer->mChildren.size()); + + for (auto it : mRootLayer->mChildren) { + auto layer = static_cast(it); + result.emplace_back(layer->name(), layer->mInFrame, layer->mOutFrame); + } + + return result; +} diff --git a/vendor/github.com/Benau/go_rlottie/lottie_lottiemodel.h b/vendor/github.com/Benau/go_rlottie/lottie_lottiemodel.h new file mode 100644 index 00000000..defec500 --- /dev/null +++ b/vendor/github.com/Benau/go_rlottie/lottie_lottiemodel.h @@ -0,0 +1,1148 @@ +/* + * Copyright (c) 2020 Samsung Electronics Co., Ltd. All rights reserved. + + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef LOTModel_H +#define LOTModel_H + +#include +#include +#include +#include +#include +#include +#include +#include "vector_varenaalloc.h" +#include "vector_vbezier.h" +#include "vector_vbrush.h" +#include "vector_vinterpolator.h" +#include "vector_vmatrix.h" +#include "vector_vpath.h" +#include "vector_vpoint.h" +#include "vector_vrect.h" + +V_USE_NAMESPACE + +namespace rlottie { + +namespace internal { + +using Marker = std::tuple; + +using LayerInfo = Marker; + +template +inline T lerp(const T &start, const T &end, float t) +{ + return start + t * (end - start); +} + +namespace model { + +enum class MatteType : uchar { None = 0, Alpha = 1, AlphaInv, Luma, LumaInv }; + +enum class BlendMode : uchar { + Normal = 0, + Multiply = 1, + Screen = 2, + OverLay = 3 +}; + +class Color { +public: + Color() = default; + Color(float red, float green, float blue) : r(red), g(green), b(blue) {} + VColor toColor(float a = 1) + { + return VColor(uchar(255 * r), uchar(255 * g), uchar(255 * b), + uchar(255 * a)); + } + friend inline Color operator+(const Color &c1, const Color &c2); + friend inline Color operator-(const Color &c1, const Color &c2); + +public: + float r{1}; + float g{1}; + float b{1}; +}; + +inline Color operator-(const Color &c1, const Color &c2) +{ + return Color(c1.r - c2.r, c1.g - c2.g, c1.b - c2.b); +} +inline Color operator+(const Color &c1, const Color &c2) +{ + return Color(c1.r + c2.r, c1.g + c2.g, c1.b + c2.b); +} + +inline const Color operator*(const Color &c, float m) +{ + return Color(c.r * m, c.g * m, c.b * m); +} + +inline const Color operator*(float m, const Color &c) +{ + return Color(c.r * m, c.g * m, c.b * m); +} + +struct PathData { + std::vector mPoints; + bool mClosed = false; /* "c" */ + void reserve(size_t size) { mPoints.reserve(mPoints.size() + size); } + static void lerp(const PathData &start, const PathData &end, float t, + VPath &result) + { + result.reset(); + // test for empty animation data. + if (start.mPoints.empty() || end.mPoints.empty()) + { + return; + } + auto size = std::min(start.mPoints.size(), end.mPoints.size()); + /* reserve exact memory requirement at once + * ptSize = size + 1(size + close) + * elmSize = size/3 cubic + 1 move + 1 close + */ + result.reserve(size + 1, size / 3 + 2); + result.moveTo(start.mPoints[0] + + t * (end.mPoints[0] - start.mPoints[0])); + for (size_t i = 1; i < size; i += 3) { + result.cubicTo( + start.mPoints[i] + t * (end.mPoints[i] - start.mPoints[i]), + start.mPoints[i + 1] + + t * (end.mPoints[i + 1] - start.mPoints[i + 1]), + start.mPoints[i + 2] + + t * (end.mPoints[i + 2] - start.mPoints[i + 2])); + } + if (start.mClosed) result.close(); + } + void toPath(VPath &path) const + { + path.reset(); + + if (mPoints.empty()) return; + + auto size = mPoints.size(); + auto points = mPoints.data(); + /* reserve exact memory requirement at once + * ptSize = size + 1(size + close) + * elmSize = size/3 cubic + 1 move + 1 close + */ + path.reserve(size + 1, size / 3 + 2); + path.moveTo(points[0]); + for (size_t i = 1; i < size; i += 3) { + path.cubicTo(points[i], points[i + 1], points[i + 2]); + } + if (mClosed) path.close(); + } +}; + +template +struct Value { + T start_; + T end_; + T at(float t) const { return lerp(start_, end_, t); } + float angle(float) const { return 0; } + void cache() {} +}; + +struct Position; + +template +struct Value { + T start_; + T end_; + T inTangent_; + T outTangent_; + float length_{0}; + bool hasTangent_{false}; + + void cache() + { + if (hasTangent_) { + inTangent_ = end_ + inTangent_; + outTangent_ = start_ + outTangent_; + length_ = VBezier::fromPoints(start_, outTangent_, inTangent_, end_) + .length(); + if (vIsZero(length_)) { + // this segment has zero length. + // so disable expensive path computaion. + hasTangent_ = false; + } + } + } + + T at(float t) const + { + if (hasTangent_) { + /* + * position along the path calcualated + * using bezier at progress length (t * bezlen) + */ + VBezier b = + VBezier::fromPoints(start_, outTangent_, inTangent_, end_); + return b.pointAt(b.tAtLength(t * length_, length_)); + } + return lerp(start_, end_, t); + } + + float angle(float t) const + { + if (hasTangent_) { + VBezier b = + VBezier::fromPoints(start_, outTangent_, inTangent_, end_); + return b.angleAt(b.tAtLength(t * length_, length_)); + } + return 0; + } +}; + +template +class KeyFrames { +public: + struct Frame { + float progress(int frameNo) const + { + return interpolator_ ? interpolator_->value((frameNo - start_) / + (end_ - start_)) + : 0; + } + T value(int frameNo) const { return value_.at(progress(frameNo)); } + float angle(int frameNo) const + { + return value_.angle(progress(frameNo)); + } + + float start_{0}; + float end_{0}; + VInterpolator *interpolator_{nullptr}; + Value value_; + }; + + T value(int frameNo) const + { + if (frames_.front().start_ >= frameNo) + return frames_.front().value_.start_; + if (frames_.back().end_ <= frameNo) return frames_.back().value_.end_; + + for (const auto &keyFrame : frames_) { + if (frameNo >= keyFrame.start_ && frameNo < keyFrame.end_) + return keyFrame.value(frameNo); + } + return {}; + } + + float angle(int frameNo) const + { + if ((frames_.front().start_ >= frameNo) || + (frames_.back().end_ <= frameNo)) + return 0; + + for (const auto &frame : frames_) { + if (frameNo >= frame.start_ && frameNo < frame.end_) + return frame.angle(frameNo); + } + return 0; + } + + bool changed(int prevFrame, int curFrame) const + { + auto first = frames_.front().start_; + auto last = frames_.back().end_; + + return !((first > prevFrame && first > curFrame) || + (last < prevFrame && last < curFrame)); + } + void cache() + { + for (auto &e : frames_) e.value_.cache(); + } + +public: + std::vector frames_; +}; + +template +class Property { +public: + using Animation = KeyFrames; + + Property() { construct(impl_.value_, {}); } + explicit Property(T value) { construct(impl_.value_, std::move(value)); } + + const Animation &animation() const { return *(impl_.animation_.get()); } + const T & value() const { return impl_.value_; } + + Animation &animation() + { + if (isValue_) { + destroy(); + construct(impl_.animation_, std::make_unique()); + isValue_ = false; + } + return *(impl_.animation_.get()); + } + + T &value() + { + assert(isValue_); + return impl_.value_; + } + + Property(Property &&other) noexcept + { + if (!other.isValue_) { + construct(impl_.animation_, std::move(other.impl_.animation_)); + isValue_ = false; + } else { + construct(impl_.value_, std::move(other.impl_.value_)); + isValue_ = true; + } + } + // delete special member functions + Property(const Property &) = delete; + Property &operator=(const Property &) = delete; + Property &operator=(Property &&) = delete; + + ~Property() { destroy(); } + + bool isStatic() const { return isValue_; } + + T value(int frameNo) const + { + return isStatic() ? value() : animation().value(frameNo); + } + + // special function only for type T=PathData + template + auto value(int frameNo, VPath &path) const -> + typename std::enable_if_t::value, void> + { + if (isStatic()) { + value().toPath(path); + } else { + const auto &vec = animation().frames_; + if (vec.front().start_ >= frameNo) + return vec.front().value_.start_.toPath(path); + if (vec.back().end_ <= frameNo) + return vec.back().value_.end_.toPath(path); + + for (const auto &keyFrame : vec) { + if (frameNo >= keyFrame.start_ && frameNo < keyFrame.end_) { + T::lerp(keyFrame.value_.start_, keyFrame.value_.end_, + keyFrame.progress(frameNo), path); + } + } + } + } + + float angle(int frameNo) const + { + return isStatic() ? 0 : animation().angle(frameNo); + } + + bool changed(int prevFrame, int curFrame) const + { + return isStatic() ? false : animation().changed(prevFrame, curFrame); + } + void cache() + { + if (!isStatic()) animation().cache(); + } + +private: + template + void construct(Tp &member, Tp &&val) + { + new (&member) Tp(std::move(val)); + } + + void destroy() + { + if (isValue_) { + impl_.value_.~T(); + } else { + using std::unique_ptr; + impl_.animation_.~unique_ptr(); + } + } + union details { + std::unique_ptr animation_; + T value_; + details(){}; + details(const details &) = delete; + details(details &&) = delete; + details &operator=(details &&) = delete; + details &operator=(const details &) = delete; + ~details() noexcept {}; + } impl_; + bool isValue_{true}; +}; + +class Path; +struct PathData; +struct Dash { + std::vector> mData; + bool empty() const { return mData.empty(); } + size_t size() const { return mData.size(); } + bool isStatic() const + { + for (const auto &elm : mData) + if (!elm.isStatic()) return false; + return true; + } + void getDashInfo(int frameNo, std::vector &result) const; +}; + +class Mask { +public: + enum class Mode { None, Add, Substarct, Intersect, Difference }; + float opacity(int frameNo) const + { + return mOpacity.value(frameNo) / 100.0f; + } + bool isStatic() const { return mIsStatic; } + +public: + Property mShape; + Property mOpacity{100}; + bool mInv{false}; + bool mIsStatic{true}; + Mask::Mode mMode; +}; + +class Object { +public: + enum class Type : unsigned char { + Composition = 1, + Layer, + Group, + Transform, + Fill, + Stroke, + GFill, + GStroke, + Rect, + Ellipse, + Path, + Polystar, + Trim, + Repeater, + RoundedCorner + }; + + explicit Object(Object::Type type) : mPtr(nullptr) + { + mData._type = type; + mData._static = true; + mData._shortString = true; + mData._hidden = false; + } + ~Object() noexcept + { + if (!shortString() && mPtr) free(mPtr); + } + Object(const Object &) = delete; + Object &operator=(const Object &) = delete; + + void setStatic(bool value) { mData._static = value; } + bool isStatic() const { return mData._static; } + bool hidden() const { return mData._hidden; } + void setHidden(bool value) { mData._hidden = value; } + void setType(Object::Type type) { mData._type = type; } + Object::Type type() const { return mData._type; } + void setName(const char *name) + { + if (name) { + auto len = strlen(name); + if (len < maxShortStringLength) { + setShortString(true); + strncpy(mData._buffer, name, len + 1); + } else { + setShortString(false); + mPtr = strdup(name); + } + } + } + const char *name() const { return shortString() ? mData._buffer : mPtr; } + +private: + static constexpr unsigned char maxShortStringLength = 14; + void setShortString(bool value) { mData._shortString = value; } + bool shortString() const { return mData._shortString; } + struct Data { + char _buffer[maxShortStringLength]; + Object::Type _type; + bool _static : 1; + bool _hidden : 1; + bool _shortString : 1; + }; + union { + Data mData; + char *mPtr{nullptr}; + }; +}; + +struct Asset { + enum class Type : unsigned char { Precomp, Image, Char }; + bool isStatic() const { return mStatic; } + void setStatic(bool value) { mStatic = value; } + VBitmap bitmap() const { return mBitmap; } + void loadImageData(std::string data); + void loadImagePath(std::string Path); + Type mAssetType{Type::Precomp}; + bool mStatic{true}; + std::string mRefId; // ref id + std::vector mLayers; + // image asset data + int mWidth{0}; + int mHeight{0}; + VBitmap mBitmap; +}; + +class Layer; + +class Composition : public Object { +public: + Composition() : Object(Object::Type::Composition) {} + std::vector layerInfoList() const; + const std::vector &markers() const { return mMarkers; } + double duration() const + { + return frameDuration() / frameRate(); // in second + } + size_t frameAtPos(double pos) const + { + if (pos < 0) pos = 0; + if (pos > 1) pos = 1; + return size_t(round(pos * frameDuration())); + } + long frameAtTime(double timeInSec) const + { + return long(frameAtPos(timeInSec / duration())); + } + size_t totalFrame() const { return mEndFrame - mStartFrame; } + long frameDuration() const { return mEndFrame - mStartFrame - 1; } + float frameRate() const { return mFrameRate; } + size_t startFrame() const { return mStartFrame; } + size_t endFrame() const { return mEndFrame; } + VSize size() const { return mSize; } + void processRepeaterObjects(); + void updateStats(); + +public: + struct Stats { + uint16_t precompLayerCount{0}; + uint16_t solidLayerCount{0}; + uint16_t shapeLayerCount{0}; + uint16_t imageLayerCount{0}; + uint16_t nullLayerCount{0}; + }; + +public: + std::string mVersion; + VSize mSize; + long mStartFrame{0}; + long mEndFrame{0}; + float mFrameRate{60}; + BlendMode mBlendMode{BlendMode::Normal}; + Layer * mRootLayer{nullptr}; + std::unordered_map mAssets; + + std::vector mMarkers; + VArenaAlloc mArenaAlloc{2048}; + Stats mStats; +}; + +class Transform : public Object { +public: + struct Data { + struct Extra { + Property m3DRx{0}; + Property m3DRy{0}; + Property m3DRz{0}; + Property mSeparateX{0}; + Property mSeparateY{0}; + bool mSeparate{false}; + bool m3DData{false}; + }; + VMatrix matrix(int frameNo, bool autoOrient = false) const; + float opacity(int frameNo) const + { + return mOpacity.value(frameNo) / 100.0f; + } + void createExtraData() + { + if (!mExtra) mExtra = std::make_unique(); + } + Property mRotation{0}; /* "r" */ + Property mScale{{100, 100}}; /* "s" */ + Property mPosition; /* "p" */ + Property mAnchor; /* "a" */ + Property mOpacity{100}; /* "o" */ + std::unique_ptr mExtra; + }; + + Transform() : Object(Object::Type::Transform) {} + void set(Transform::Data *data, bool staticFlag) + { + setStatic(staticFlag); + if (isStatic()) { + new (&impl.mStaticData) + StaticData(data->matrix(0), data->opacity(0)); + } else { + impl.mData = data; + } + } + VMatrix matrix(int frameNo, bool autoOrient = false) const + { + if (isStatic()) return impl.mStaticData.mMatrix; + return impl.mData->matrix(frameNo, autoOrient); + } + float opacity(int frameNo) const + { + if (isStatic()) return impl.mStaticData.mOpacity; + return impl.mData->opacity(frameNo); + } + Transform(const Transform &) = delete; + Transform(Transform &&) = delete; + Transform &operator=(Transform &) = delete; + Transform &operator=(Transform &&) = delete; + ~Transform() noexcept { destroy(); } + +private: + void destroy() + { + if (isStatic()) { + impl.mStaticData.~StaticData(); + } + } + struct StaticData { + StaticData(VMatrix &&m, float opacity) + : mOpacity(opacity), mMatrix(std::move(m)) + { + } + float mOpacity; + VMatrix mMatrix; + }; + union details { + Data * mData{nullptr}; + StaticData mStaticData; + details(){}; + details(const details &) = delete; + details(details &&) = delete; + details &operator=(details &&) = delete; + details &operator=(const details &) = delete; + ~details() noexcept {}; + } impl; +}; + +class Group : public Object { +public: + Group() : Object(Object::Type::Group) {} + explicit Group(Object::Type type) : Object(type) {} + +public: + std::vector mChildren; + Transform * mTransform{nullptr}; +}; + +class Layer : public Group { +public: + enum class Type : uchar { + Precomp = 0, + Solid = 1, + Image = 2, + Null = 3, + Shape = 4, + Text = 5 + }; + Layer() : Group(Object::Type::Layer) {} + bool hasRoundedCorner() const noexcept { return mHasRoundedCorner; } + bool hasPathOperator() const noexcept { return mHasPathOperator; } + bool hasGradient() const noexcept { return mHasGradient; } + bool hasMask() const noexcept { return mHasMask; } + bool hasRepeater() const noexcept { return mHasRepeater; } + int id() const noexcept { return mId; } + int parentId() const noexcept { return mParentId; } + bool hasParent() const noexcept { return mParentId != -1; } + int inFrame() const noexcept { return mInFrame; } + int outFrame() const noexcept { return mOutFrame; } + int startFrame() const noexcept { return mStartFrame; } + Color solidColor() const noexcept + { + return mExtra ? mExtra->mSolidColor : Color(); + } + bool autoOrient() const noexcept { return mAutoOrient; } + int timeRemap(int frameNo) const; + VSize layerSize() const { return mLayerSize; } + bool precompLayer() const { return mLayerType == Type::Precomp; } + VMatrix matrix(int frameNo) const + { + return mTransform ? mTransform->matrix(frameNo, autoOrient()) + : VMatrix{}; + } + float opacity(int frameNo) const + { + return mTransform ? mTransform->opacity(frameNo) : 1.0f; + } + Asset *asset() const { return mExtra ? mExtra->mAsset : nullptr; } + struct Extra { + Color mSolidColor; + std::string mPreCompRefId; + Property mTimeRemap; /* "tm" */ + Composition * mCompRef{nullptr}; + Asset * mAsset{nullptr}; + std::vector mMasks; + }; + + Layer::Extra *extra() + { + if (!mExtra) mExtra = std::make_unique(); + return mExtra.get(); + } + +public: + MatteType mMatteType{MatteType::None}; + Type mLayerType{Layer::Type::Null}; + BlendMode mBlendMode{BlendMode::Normal}; + bool mHasRoundedCorner{false}; + bool mHasPathOperator{false}; + bool mHasMask{false}; + bool mHasRepeater{false}; + bool mHasGradient{false}; + bool mAutoOrient{false}; + VSize mLayerSize; + int mParentId{-1}; // Lottie the id of the parent in the composition + int mId{-1}; // Lottie the group id used for parenting. + float mTimeStreatch{1.0f}; + int mInFrame{0}; + int mOutFrame{0}; + int mStartFrame{0}; + std::unique_ptr mExtra{nullptr}; +}; + +/** + * TimeRemap has the value in time domain(in sec) + * To get the proper mapping first we get the mapped time at the current frame + * Number then we need to convert mapped time to frame number using the + * composition time line Ex: at frame 10 the mappend time is 0.5(500 ms) which + * will be convert to frame number 30 if the frame rate is 60. or will result to + * frame number 15 if the frame rate is 30. + */ +inline int Layer::timeRemap(int frameNo) const +{ + /* + * only consider startFrame() when there is no timeRemap. + * when a layer has timeremap bodymovin updates the startFrame() + * of all child layer so we don't have to take care of it. + */ + if (!mExtra || mExtra->mTimeRemap.isStatic()) + frameNo = frameNo - startFrame(); + else + frameNo = + mExtra->mCompRef->frameAtTime(mExtra->mTimeRemap.value(frameNo)); + /* Apply time streatch if it has any. + * Time streatch is just a factor by which the animation will speedup or + * slow down with respect to the overal animation. Time streach factor is + * already applied to the layers inFrame and outFrame. + * @TODO need to find out if timestreatch also affects the in and out frame + * of the child layers or not. */ + return int(frameNo / mTimeStreatch); +} + +class Stroke : public Object { +public: + Stroke() : Object(Object::Type::Stroke) {} + Color color(int frameNo) const { return mColor.value(frameNo); } + float opacity(int frameNo) const + { + return mOpacity.value(frameNo) / 100.0f; + } + float strokeWidth(int frameNo) const { return mWidth.value(frameNo); } + CapStyle capStyle() const { return mCapStyle; } + JoinStyle joinStyle() const { return mJoinStyle; } + float miterLimit() const { return mMiterLimit; } + bool hasDashInfo() const { return !mDash.empty(); } + void getDashInfo(int frameNo, std::vector &result) const + { + return mDash.getDashInfo(frameNo, result); + } + +public: + Property mColor; /* "c" */ + Property mOpacity{100}; /* "o" */ + Property mWidth{0}; /* "w" */ + CapStyle mCapStyle{CapStyle::Flat}; /* "lc" */ + JoinStyle mJoinStyle{JoinStyle::Miter}; /* "lj" */ + float mMiterLimit{0}; /* "ml" */ + Dash mDash; + bool mEnabled{true}; /* "fillEnabled" */ +}; + +class Gradient : public Object { +public: + class Data { + public: + friend inline Gradient::Data operator+(const Gradient::Data &g1, + const Gradient::Data &g2); + friend inline Gradient::Data operator-(const Gradient::Data &g1, + const Gradient::Data &g2); + friend inline Gradient::Data operator*(float m, + const Gradient::Data &g); + + public: + std::vector mGradient; + }; + explicit Gradient(Object::Type type) : Object(type) {} + inline float opacity(int frameNo) const + { + return mOpacity.value(frameNo) / 100.0f; + } + void update(std::unique_ptr &grad, int frameNo); + +private: + void populate(VGradientStops &stops, int frameNo); + +public: + int mGradientType{1}; /* "t" Linear=1 , Radial = 2*/ + Property mStartPoint; /* "s" */ + Property mEndPoint; /* "e" */ + Property mHighlightLength{0}; /* "h" */ + Property mHighlightAngle{0}; /* "a" */ + Property mOpacity{100}; /* "o" */ + Property mGradient; /* "g" */ + int mColorPoints{-1}; + bool mEnabled{true}; /* "fillEnabled" */ +}; + +class GradientStroke : public Gradient { +public: + GradientStroke() : Gradient(Object::Type::GStroke) {} + float width(int frameNo) const { return mWidth.value(frameNo); } + CapStyle capStyle() const { return mCapStyle; } + JoinStyle joinStyle() const { return mJoinStyle; } + float miterLimit() const { return mMiterLimit; } + bool hasDashInfo() const { return !mDash.empty(); } + void getDashInfo(int frameNo, std::vector &result) const + { + return mDash.getDashInfo(frameNo, result); + } + +public: + Property mWidth; /* "w" */ + CapStyle mCapStyle{CapStyle::Flat}; /* "lc" */ + JoinStyle mJoinStyle{JoinStyle::Miter}; /* "lj" */ + float mMiterLimit{0}; /* "ml" */ + Dash mDash; +}; + +class GradientFill : public Gradient { +public: + GradientFill() : Gradient(Object::Type::GFill) {} + FillRule fillRule() const { return mFillRule; } + +public: + FillRule mFillRule{FillRule::Winding}; /* "r" */ +}; + +class Fill : public Object { +public: + Fill() : Object(Object::Type::Fill) {} + Color color(int frameNo) const { return mColor.value(frameNo); } + float opacity(int frameNo) const + { + return mOpacity.value(frameNo) / 100.0f; + } + FillRule fillRule() const { return mFillRule; } + +public: + FillRule mFillRule{FillRule::Winding}; /* "r" */ + bool mEnabled{true}; /* "fillEnabled" */ + Property mColor; /* "c" */ + Property mOpacity{100}; /* "o" */ +}; + +class Shape : public Object { +public: + explicit Shape(Object::Type type) : Object(type) {} + VPath::Direction direction() + { + return (mDirection == 3) ? VPath::Direction::CCW : VPath::Direction::CW; + } + +public: + int mDirection{1}; +}; + +class Path : public Shape { +public: + Path() : Shape(Object::Type::Path) {} + +public: + Property mShape; +}; + +class RoundedCorner : public Object { +public: + RoundedCorner() : Object(Object::Type::RoundedCorner) {} + float radius(int frameNo) const { return mRadius.value(frameNo);} +public: + Property mRadius{0}; +}; + +class Rect : public Shape { +public: + Rect() : Shape(Object::Type::Rect) {} + float roundness(int frameNo) + { + return mRoundedCorner ? mRoundedCorner->radius(frameNo) : + mRound.value(frameNo); + } + + bool roundnessChanged(int prevFrame, int curFrame) + { + return mRoundedCorner ? mRoundedCorner->mRadius.changed(prevFrame, curFrame) : + mRound.changed(prevFrame, curFrame); + } +public: + RoundedCorner* mRoundedCorner{nullptr}; + Property mPos; + Property mSize; + Property mRound{0}; +}; + +class Ellipse : public Shape { +public: + Ellipse() : Shape(Object::Type::Ellipse) {} + +public: + Property mPos; + Property mSize; +}; + +class Polystar : public Shape { +public: + enum class PolyType { Star = 1, Polygon = 2 }; + Polystar() : Shape(Object::Type::Polystar) {} + +public: + Polystar::PolyType mPolyType{PolyType::Polygon}; + Property mPos; + Property mPointCount{0}; + Property mInnerRadius{0}; + Property mOuterRadius{0}; + Property mInnerRoundness{0}; + Property mOuterRoundness{0}; + Property mRotation{0}; +}; + +class Repeater : public Object { +public: + struct Transform { + VMatrix matrix(int frameNo, float multiplier) const; + float startOpacity(int frameNo) const + { + return mStartOpacity.value(frameNo) / 100; + } + float endOpacity(int frameNo) const + { + return mEndOpacity.value(frameNo) / 100; + } + bool isStatic() const + { + return mRotation.isStatic() && mScale.isStatic() && + mPosition.isStatic() && mAnchor.isStatic() && + mStartOpacity.isStatic() && mEndOpacity.isStatic(); + } + Property mRotation{0}; /* "r" */ + Property mScale{{100, 100}}; /* "s" */ + Property mPosition; /* "p" */ + Property mAnchor; /* "a" */ + Property mStartOpacity{100}; /* "so" */ + Property mEndOpacity{100}; /* "eo" */ + }; + Repeater() : Object(Object::Type::Repeater) {} + Group *content() const { return mContent ? mContent : nullptr; } + void setContent(Group *content) { mContent = content; } + int maxCopies() const { return int(mMaxCopies); } + float copies(int frameNo) const { return mCopies.value(frameNo); } + float offset(int frameNo) const { return mOffset.value(frameNo); } + bool processed() const { return mProcessed; } + void markProcessed() { mProcessed = true; } + +public: + Group * mContent{nullptr}; + Transform mTransform; + Property mCopies{0}; + Property mOffset{0}; + float mMaxCopies{0.0}; + bool mProcessed{false}; +}; + +class Trim : public Object { +public: + struct Segment { + float start{0}; + float end{0}; + Segment() = default; + explicit Segment(float s, float e) : start(s), end(e) {} + }; + enum class TrimType { Simultaneously, Individually }; + Trim() : Object(Object::Type::Trim) {} + /* + * if start > end vector trims the path as a loop ( 2 segment) + * if start < end vector trims the path without loop ( 1 segment). + * if no offset then there is no loop. + */ + Segment segment(int frameNo) const + { + float start = mStart.value(frameNo) / 100.0f; + float end = mEnd.value(frameNo) / 100.0f; + float offset = std::fmod(mOffset.value(frameNo), 360.0f) / 360.0f; + + float diff = std::abs(start - end); + if (vCompare(diff, 0.0f)) return Segment(0, 0); + if (vCompare(diff, 1.0f)) return Segment(0, 1); + + if (offset > 0) { + start += offset; + end += offset; + if (start <= 1 && end <= 1) { + return noloop(start, end); + } else if (start > 1 && end > 1) { + return noloop(start - 1, end - 1); + } else { + return (start > 1) ? loop(start - 1, end) + : loop(start, end - 1); + } + } else { + start += offset; + end += offset; + if (start >= 0 && end >= 0) { + return noloop(start, end); + } else if (start < 0 && end < 0) { + return noloop(1 + start, 1 + end); + } else { + return (start < 0) ? loop(1 + start, end) + : loop(start, 1 + end); + } + } + } + Trim::TrimType type() const { return mTrimType; } + +private: + Segment noloop(float start, float end) const + { + assert(start >= 0); + assert(end >= 0); + Segment s; + s.start = std::min(start, end); + s.end = std::max(start, end); + return s; + } + Segment loop(float start, float end) const + { + assert(start >= 0); + assert(end >= 0); + Segment s; + s.start = std::max(start, end); + s.end = std::min(start, end); + return s; + } + +public: + Property mStart{0}; + Property mEnd{0}; + Property mOffset{0}; + Trim::TrimType mTrimType{TrimType::Simultaneously}; +}; + +inline Gradient::Data operator+(const Gradient::Data &g1, + const Gradient::Data &g2) +{ + if (g1.mGradient.size() != g2.mGradient.size()) return g1; + + Gradient::Data newG; + newG.mGradient = g1.mGradient; + + auto g2It = g2.mGradient.begin(); + for (auto &i : newG.mGradient) { + i = i + *g2It; + g2It++; + } + + return newG; +} + +inline Gradient::Data operator-(const Gradient::Data &g1, + const Gradient::Data &g2) +{ + if (g1.mGradient.size() != g2.mGradient.size()) return g1; + Gradient::Data newG; + newG.mGradient = g1.mGradient; + + auto g2It = g2.mGradient.begin(); + for (auto &i : newG.mGradient) { + i = i - *g2It; + g2It++; + } + + return newG; +} + +inline Gradient::Data operator*(float m, const Gradient::Data &g) +{ + Gradient::Data newG; + newG.mGradient = g.mGradient; + + for (auto &i : newG.mGradient) { + i = i * m; + } + return newG; +} + +using ColorFilter = std::function; + +void configureModelCacheSize(size_t cacheSize); + +std::shared_ptr loadFromFile(const std::string &filePath, + bool cachePolicy); + +std::shared_ptr loadFromData(std::string jsonData, + const std::string &key, + std::string resourcePath, + bool cachePolicy); + +std::shared_ptr loadFromData(std::string jsonData, + std::string resourcePath, + ColorFilter filter); + +std::shared_ptr parse(char *str, std::string dir_path, + ColorFilter filter = {}); + +} // namespace model + +} // namespace internal + +} // namespace rlottie + +#endif // LOTModel_H diff --git a/vendor/github.com/Benau/go_rlottie/lottie_lottieparser.cpp b/vendor/github.com/Benau/go_rlottie/lottie_lottieparser.cpp new file mode 100644 index 00000000..91839d41 --- /dev/null +++ b/vendor/github.com/Benau/go_rlottie/lottie_lottieparser.cpp @@ -0,0 +1,2390 @@ +/* + * Copyright (c) 2020 Samsung Electronics Co., Ltd. All rights reserved. + + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +//#define DEBUG_PARSER + +// This parser implements JSON token-by-token parsing with an API that is +// more direct; we don't have to create handler object and +// callbacks. Instead, we retrieve values from the JSON stream by calling +// GetInt(), GetDouble(), GetString() and GetBool(), traverse into structures +// by calling EnterObject() and EnterArray(), and skip over unwanted data by +// calling SkipValue(). As we know the lottie file structure this way will be +// the efficient way of parsing the file. +// +// If you aren't sure of what's next in the JSON data, you can use PeekType() +// and PeekValue() to look ahead to the next object before reading it. +// +// If you call the wrong retrieval method--e.g. GetInt when the next JSON token +// is not an int, EnterObject or EnterArray when there isn't actually an object +// or array to read--the stream parsing will end immediately and no more data +// will be delivered. +// +// After calling EnterObject, you retrieve keys via NextObjectKey() and values +// via the normal getters. When NextObjectKey() returns null, you have exited +// the object, or you can call SkipObject() to skip to the end of the object +// immediately. If you fetch the entire object (i.e. NextObjectKey() returned +// null), you should not call SkipObject(). +// +// After calling EnterArray(), you must alternate between calling +// NextArrayValue() to see if the array has more data, and then retrieving +// values via the normal getters. You can call SkipArray() to skip to the end of +// the array immediately. If you fetch the entire array (i.e. NextArrayValue() +// returned null), you should not call SkipArray(). +// +// This parser uses in-situ strings, so the JSON buffer will be altered during +// the parse. + +#include + +#include "lottie_lottiemodel.h" +#include "lottie_rapidjson_document.h" + +RAPIDJSON_DIAG_PUSH +#ifdef __GNUC__ +RAPIDJSON_DIAG_OFF(effc++) +#endif + +using namespace rapidjson; + +using namespace rlottie::internal; + +class LookaheadParserHandler { +public: + bool Null() + { + st_ = kHasNull; + v_.SetNull(); + return true; + } + bool Bool(bool b) + { + st_ = kHasBool; + v_.SetBool(b); + return true; + } + bool Int(int i) + { + st_ = kHasNumber; + v_.SetInt(i); + return true; + } + bool Uint(unsigned u) + { + st_ = kHasNumber; + v_.SetUint(u); + return true; + } + bool Int64(int64_t i) + { + st_ = kHasNumber; + v_.SetInt64(i); + return true; + } + bool Uint64(int64_t u) + { + st_ = kHasNumber; + v_.SetUint64(u); + return true; + } + bool Double(double d) + { + st_ = kHasNumber; + v_.SetDouble(d); + return true; + } + bool RawNumber(const char *, SizeType, bool) { return false; } + bool String(const char *str, SizeType length, bool) + { + st_ = kHasString; + v_.SetString(str, length); + return true; + } + bool StartObject() + { + st_ = kEnteringObject; + return true; + } + bool Key(const char *str, SizeType length, bool) + { + st_ = kHasKey; + v_.SetString(str, length); + return true; + } + bool EndObject(SizeType) + { + st_ = kExitingObject; + return true; + } + bool StartArray() + { + st_ = kEnteringArray; + return true; + } + bool EndArray(SizeType) + { + st_ = kExitingArray; + return true; + } + + void Error() + { + st_ = kError; + } +protected: + explicit LookaheadParserHandler(char *str); + +protected: + enum LookaheadParsingState { + kInit, + kError, + kHasNull, + kHasBool, + kHasNumber, + kHasString, + kHasKey, + kEnteringObject, + kExitingObject, + kEnteringArray, + kExitingArray + }; + + Value v_; + LookaheadParsingState st_; + Reader r_; + InsituStringStream ss_; + + static const int parseFlags = kParseDefaultFlags | kParseInsituFlag; +}; + +class LottieParserImpl : public LookaheadParserHandler { +public: + LottieParserImpl(char *str, std::string dir_path, model::ColorFilter filter) + : LookaheadParserHandler(str), + mColorFilter(std::move(filter)), + mDirPath(std::move(dir_path)) + { + } + bool VerifyType(); + bool ParseNext(); + +public: + VArenaAlloc &allocator() { return compRef->mArenaAlloc; } + bool EnterObject(); + bool EnterArray(); + const char * NextObjectKey(); + bool NextArrayValue(); + int GetInt(); + double GetDouble(); + const char * GetString(); + std::string GetStringObject(); + bool GetBool(); + void GetNull(); + + void SkipObject(); + void SkipArray(); + void SkipValue(); + Value *PeekValue(); + int PeekType() const; + bool IsValid() { return st_ != kError; } + + void Skip(const char *key); + model::BlendMode getBlendMode(); + CapStyle getLineCap(); + JoinStyle getLineJoin(); + FillRule getFillRule(); + model::Trim::TrimType getTrimType(); + model::MatteType getMatteType(); + model::Layer::Type getLayerType(); + + std::shared_ptr composition() const + { + return mComposition; + } + void parseComposition(); + void parseMarkers(); + void parseMarker(); + void parseAssets(model::Composition *comp); + model::Asset * parseAsset(); + void parseLayers(model::Composition *comp); + model::Layer * parseLayer(); + void parseMaskProperty(model::Layer *layer); + void parseShapesAttr(model::Layer *layer); + void parseObject(model::Group *parent); + model::Mask * parseMaskObject(); + model::Object * parseObjectTypeAttr(); + model::Object * parseGroupObject(); + model::Rect * parseRectObject(); + model::RoundedCorner * parseRoundedCorner(); + void updateRoundedCorner(model::Group *parent, model::RoundedCorner *rc); + + model::Ellipse * parseEllipseObject(); + model::Path * parseShapeObject(); + model::Polystar *parsePolystarObject(); + + model::Transform * parseTransformObject(bool ddd = false); + model::Fill * parseFillObject(); + model::GradientFill * parseGFillObject(); + model::Stroke * parseStrokeObject(); + model::GradientStroke *parseGStrokeObject(); + model::Trim * parseTrimObject(); + model::Repeater * parseReapeaterObject(); + + void parseGradientProperty(model::Gradient *gradient, const char *key); + + VPointF parseInperpolatorPoint(); + + void getValue(VPointF &pt); + void getValue(float &fval); + void getValue(model::Color &color); + void getValue(int &ival); + void getValue(model::PathData &shape); + void getValue(model::Gradient::Data &gradient); + void getValue(std::vector &v); + void getValue(model::Repeater::Transform &); + + template + bool parseKeyFrameValue(const char *, model::Value &) + { + return false; + } + + template + bool parseKeyFrameValue(const char * key, + model::Value &value); + template + void parseKeyFrame(model::KeyFrames &obj); + template + void parseProperty(model::Property &obj); + template + void parsePropertyHelper(model::Property &obj); + + void parseShapeProperty(model::Property &obj); + void parseDashProperty(model::Dash &dash); + + VInterpolator *interpolator(VPointF, VPointF, std::string); + + model::Color toColor(const char *str); + + void resolveLayerRefs(); + void parsePathInfo(); + +private: + model::ColorFilter mColorFilter; + struct { + std::vector mInPoint; /* "i" */ + std::vector mOutPoint; /* "o" */ + std::vector mVertices; /* "v" */ + std::vector mResult; + bool mClosed{false}; + + void convert() + { + // shape data could be empty. + if (mInPoint.empty() || mOutPoint.empty() || mVertices.empty()) { + mResult.clear(); + return; + } + + /* + * Convert the AE shape format to + * list of bazier curves + * The final structure will be Move +size*Cubic + Cubic (if the path + * is closed one) + */ + if (mInPoint.size() != mOutPoint.size() || + mInPoint.size() != mVertices.size()) { + mResult.clear(); + } else { + auto size = mVertices.size(); + mResult.push_back(mVertices[0]); + for (size_t i = 1; i < size; i++) { + mResult.push_back( + mVertices[i - 1] + + mOutPoint[i - 1]); // CP1 = start + outTangent + mResult.push_back(mVertices[i] + + mInPoint[i]); // CP2 = end + inTangent + mResult.push_back(mVertices[i]); // end point + } + + if (mClosed) { + mResult.push_back( + mVertices[size - 1] + + mOutPoint[size - 1]); // CP1 = start + outTangent + mResult.push_back(mVertices[0] + + mInPoint[0]); // CP2 = end + inTangent + mResult.push_back(mVertices[0]); // end point + } + } + } + void reset() + { + mInPoint.clear(); + mOutPoint.clear(); + mVertices.clear(); + mResult.clear(); + mClosed = false; + } + void updatePath(VPath &out) + { + if (mResult.empty()) return; + + auto size = mResult.size(); + auto points = mResult.data(); + /* reserve exact memory requirement at once + * ptSize = size + 1(size + close) + * elmSize = size/3 cubic + 1 move + 1 close + */ + out.reserve(size + 1, size / 3 + 2); + out.moveTo(points[0]); + for (size_t i = 1; i < size; i += 3) { + out.cubicTo(points[i], points[i + 1], points[i + 2]); + } + if (mClosed) out.close(); + } + } mPathInfo; + +protected: + std::unordered_map mInterpolatorCache; + std::shared_ptr mComposition; + model::Composition * compRef{nullptr}; + model::Layer * curLayerRef{nullptr}; + std::vector mLayersToUpdate; + std::string mDirPath; + void SkipOut(int depth); +}; + +LookaheadParserHandler::LookaheadParserHandler(char *str) + : v_(), st_(kInit), ss_(str) +{ + r_.IterativeParseInit(); +} + +bool LottieParserImpl::VerifyType() +{ + /* Verify the media type is lottie json. + Could add more strict check. */ + return ParseNext(); +} + +bool LottieParserImpl::ParseNext() +{ + if (r_.HasParseError()) { + st_ = kError; + return false; + } + + if (!r_.IterativeParseNext(ss_, *this)) { + vCritical << "Lottie file parsing error"; + st_ = kError; + return false; + } + return true; +} + +bool LottieParserImpl::EnterObject() +{ + if (st_ != kEnteringObject) { + st_ = kError; + return false; + } + + ParseNext(); + return true; +} + +bool LottieParserImpl::EnterArray() +{ + if (st_ != kEnteringArray) { + st_ = kError; + return false; + } + + ParseNext(); + return true; +} + +const char *LottieParserImpl::NextObjectKey() +{ + if (st_ == kHasKey) { + const char *result = v_.GetString(); + ParseNext(); + return result; + } + + /* SPECIAL CASE + * The parser works with a prdefined rule that it will be only + * while (NextObjectKey()) for each object but in case of our nested group + * object we can call multiple time NextObjectKey() while exiting the object + * so ignore those and don't put parser in the error state. + * */ + if (st_ == kExitingArray || st_ == kEnteringObject) { + // #ifdef DEBUG_PARSER + // vDebug<<"Object: Exiting nested loop"; + // #endif + return nullptr; + } + + if (st_ != kExitingObject) { + st_ = kError; + return nullptr; + } + + ParseNext(); + return nullptr; +} + +bool LottieParserImpl::NextArrayValue() +{ + if (st_ == kExitingArray) { + ParseNext(); + return false; + } + + /* SPECIAL CASE + * same as NextObjectKey() + */ + if (st_ == kExitingObject) { + return false; + } + + if (st_ == kError || st_ == kHasKey) { + st_ = kError; + return false; + } + + return true; +} + +int LottieParserImpl::GetInt() +{ + if (st_ != kHasNumber || !v_.IsInt()) { + st_ = kError; + return 0; + } + + int result = v_.GetInt(); + ParseNext(); + return result; +} + +double LottieParserImpl::GetDouble() +{ + if (st_ != kHasNumber) { + st_ = kError; + return 0.; + } + + double result = v_.GetDouble(); + ParseNext(); + return result; +} + +bool LottieParserImpl::GetBool() +{ + if (st_ != kHasBool) { + st_ = kError; + return false; + } + + bool result = v_.GetBool(); + ParseNext(); + return result; +} + +void LottieParserImpl::GetNull() +{ + if (st_ != kHasNull) { + st_ = kError; + return; + } + + ParseNext(); +} + +const char *LottieParserImpl::GetString() +{ + if (st_ != kHasString) { + st_ = kError; + return nullptr; + } + + const char *result = v_.GetString(); + ParseNext(); + return result; +} + +std::string LottieParserImpl::GetStringObject() +{ + auto str = GetString(); + + if (str) { + return std::string(str); + } + + return {}; +} + +void LottieParserImpl::SkipOut(int depth) +{ + do { + if (st_ == kEnteringArray || st_ == kEnteringObject) { + ++depth; + } else if (st_ == kExitingArray || st_ == kExitingObject) { + --depth; + } else if (st_ == kError) { + return; + } + + ParseNext(); + } while (depth > 0); +} + +void LottieParserImpl::SkipValue() +{ + SkipOut(0); +} + +void LottieParserImpl::SkipArray() +{ + SkipOut(1); +} + +void LottieParserImpl::SkipObject() +{ + SkipOut(1); +} + +Value *LottieParserImpl::PeekValue() +{ + if (st_ >= kHasNull && st_ <= kHasKey) { + return &v_; + } + + return nullptr; +} + +// returns a rapidjson::Type, or -1 for no value (at end of +// object/array) +int LottieParserImpl::PeekType() const +{ + if (st_ >= kHasNull && st_ <= kHasKey) { + return v_.GetType(); + } + + if (st_ == kEnteringArray) { + return kArrayType; + } + + if (st_ == kEnteringObject) { + return kObjectType; + } + + return -1; +} + +void LottieParserImpl::Skip(const char * /*key*/) +{ + if (PeekType() == kArrayType) { + EnterArray(); + SkipArray(); + } else if (PeekType() == kObjectType) { + EnterObject(); + SkipObject(); + } else { + SkipValue(); + } +} + +model::BlendMode LottieParserImpl::getBlendMode() +{ + auto mode = model::BlendMode::Normal; + + switch (GetInt()) { + case 1: + mode = model::BlendMode::Multiply; + break; + case 2: + mode = model::BlendMode::Screen; + break; + case 3: + mode = model::BlendMode::OverLay; + break; + default: + break; + } + return mode; +} + +void LottieParserImpl::resolveLayerRefs() +{ + for (const auto &layer : mLayersToUpdate) { + auto search = compRef->mAssets.find(layer->extra()->mPreCompRefId); + if (search != compRef->mAssets.end()) { + if (layer->mLayerType == model::Layer::Type::Image) { + layer->extra()->mAsset = search->second; + } else if (layer->mLayerType == model::Layer::Type::Precomp) { + layer->mChildren = search->second->mLayers; + layer->setStatic(layer->isStatic() && + search->second->isStatic()); + } + } + } +} + +void LottieParserImpl::parseComposition() +{ + EnterObject(); + std::shared_ptr sharedComposition = + std::make_shared(); + model::Composition *comp = sharedComposition.get(); + compRef = comp; + while (const char *key = NextObjectKey()) { + if (0 == strcmp(key, "v")) { + comp->mVersion = GetStringObject(); + } else if (0 == strcmp(key, "w")) { + comp->mSize.setWidth(GetInt()); + } else if (0 == strcmp(key, "h")) { + comp->mSize.setHeight(GetInt()); + } else if (0 == strcmp(key, "ip")) { + comp->mStartFrame = GetDouble(); + } else if (0 == strcmp(key, "op")) { + comp->mEndFrame = GetDouble(); + } else if (0 == strcmp(key, "fr")) { + comp->mFrameRate = GetDouble(); + } else if (0 == strcmp(key, "assets")) { + parseAssets(comp); + } else if (0 == strcmp(key, "layers")) { + parseLayers(comp); + } else if (0 == strcmp(key, "markers")) { + parseMarkers(); + } else { +#ifdef DEBUG_PARSER + vWarning << "Composition Attribute Skipped : " << key; +#endif + Skip(key); + } + } + + if (comp->mVersion.empty() || !comp->mRootLayer) { + // don't have a valid bodymovin header + return; + } + if (comp->mStartFrame > comp->mEndFrame) { + // reveresed animation? missing data? + return; + } + if (!IsValid()) { + return; + } + + resolveLayerRefs(); + comp->setStatic(comp->mRootLayer->isStatic()); + comp->mRootLayer->mInFrame = comp->mStartFrame; + comp->mRootLayer->mOutFrame = comp->mEndFrame; + + mComposition = sharedComposition; +} + +void LottieParserImpl::parseMarker() +{ + EnterObject(); + std::string comment; + int timeframe{0}; + int duration{0}; + while (const char *key = NextObjectKey()) { + if (0 == strcmp(key, "cm")) { + comment = GetStringObject(); + } else if (0 == strcmp(key, "tm")) { + timeframe = GetDouble(); + } else if (0 == strcmp(key, "dr")) { + duration = GetDouble(); + + } else { +#ifdef DEBUG_PARSER + vWarning << "Marker Attribute Skipped : " << key; +#endif + Skip(key); + } + } + compRef->mMarkers.emplace_back(std::move(comment), timeframe, + timeframe + duration); +} + +void LottieParserImpl::parseMarkers() +{ + EnterArray(); + while (NextArrayValue()) { + parseMarker(); + } + // update the precomp layers with the actual layer object +} + +void LottieParserImpl::parseAssets(model::Composition *composition) +{ + EnterArray(); + while (NextArrayValue()) { + auto asset = parseAsset(); + composition->mAssets[asset->mRefId] = asset; + } + // update the precomp layers with the actual layer object +} + +static constexpr const unsigned char B64index[256] = { + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 62, 63, 62, 62, 63, 52, 53, 54, 55, 56, 57, + 58, 59, 60, 61, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 3, 4, 5, 6, + 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, + 25, 0, 0, 0, 0, 63, 0, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, + 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51}; + +std::string b64decode(const char *data, const size_t len) +{ + auto p = reinterpret_cast(data); + int pad = len > 0 && (len % 4 || p[len - 1] == '='); + const size_t L = ((len + 3) / 4 - pad) * 4; + std::string str(L / 4 * 3 + pad, '\0'); + + for (size_t i = 0, j = 0; i < L; i += 4) { + int n = B64index[p[i]] << 18 | B64index[p[i + 1]] << 12 | + B64index[p[i + 2]] << 6 | B64index[p[i + 3]]; + str[j++] = n >> 16; + str[j++] = n >> 8 & 0xFF; + str[j++] = n & 0xFF; + } + if (pad) { + int n = B64index[p[L]] << 18 | B64index[p[L + 1]] << 12; + str[str.size() - 1] = n >> 16; + + if (len > L + 2 && p[L + 2] != '=') { + n |= B64index[p[L + 2]] << 6; + str.push_back(n >> 8 & 0xFF); + } + } + return str; +} + +static std::string convertFromBase64(const std::string &str) +{ + // usual header look like "data:image/png;base64," + // so need to skip till ','. + size_t startIndex = str.find(",", 0); + startIndex += 1; // skip "," + size_t length = str.length() - startIndex; + + const char *b64Data = str.c_str() + startIndex; + + return b64decode(b64Data, length); +} + +/* + * std::to_string() function is missing in VS2017 + * so this is workaround for windows build + */ +#include +template +static std::string toString(const T &value) +{ + std::ostringstream os; + os << value; + return os.str(); +} + +/* + * https://github.com/airbnb/lottie-web/blob/master/docs/json/layers/shape.json + * + */ +model::Asset *LottieParserImpl::parseAsset() +{ + auto asset = allocator().make(); + std::string filename; + std::string relativePath; + bool embededResource = false; + EnterObject(); + while (const char *key = NextObjectKey()) { + if (0 == strcmp(key, "w")) { + asset->mWidth = GetInt(); + } else if (0 == strcmp(key, "h")) { + asset->mHeight = GetInt(); + } else if (0 == strcmp(key, "p")) { /* image name */ + asset->mAssetType = model::Asset::Type::Image; + filename = GetStringObject(); + } else if (0 == strcmp(key, "u")) { /* relative image path */ + relativePath = GetStringObject(); + } else if (0 == strcmp(key, "e")) { /* relative image path */ + embededResource = GetInt(); + } else if (0 == strcmp(key, "id")) { /* reference id*/ + if (PeekType() == kStringType) { + asset->mRefId = GetStringObject(); + } else { + asset->mRefId = toString(GetInt()); + } + } else if (0 == strcmp(key, "layers")) { + asset->mAssetType = model::Asset::Type::Precomp; + EnterArray(); + bool staticFlag = true; + while (NextArrayValue()) { + auto layer = parseLayer(); + if (layer) { + staticFlag = staticFlag && layer->isStatic(); + asset->mLayers.push_back(layer); + } + } + asset->setStatic(staticFlag); + } else { +#ifdef DEBUG_PARSER + vWarning << "Asset Attribute Skipped : " << key; +#endif + Skip(key); + } + } + + if (asset->mAssetType == model::Asset::Type::Image) { + if (embededResource) { + // embeder resource should start with "data:" + if (filename.compare(0, 5, "data:") == 0) { + asset->loadImageData(convertFromBase64(filename)); + } + } else { + asset->loadImagePath(mDirPath + relativePath + filename); + } + } + + return asset; +} + +void LottieParserImpl::parseLayers(model::Composition *comp) +{ + comp->mRootLayer = allocator().make(); + comp->mRootLayer->mLayerType = model::Layer::Type::Precomp; + comp->mRootLayer->setName("__"); + bool staticFlag = true; + EnterArray(); + while (NextArrayValue()) { + auto layer = parseLayer(); + if (layer) { + staticFlag = staticFlag && layer->isStatic(); + comp->mRootLayer->mChildren.push_back(layer); + } + } + comp->mRootLayer->setStatic(staticFlag); +} + +model::Color LottieParserImpl::toColor(const char *str) +{ + if (!str) return {}; + + model::Color color; + auto len = strlen(str); + + // some resource has empty color string + // return a default color for those cases. + if (len != 7 || str[0] != '#') return color; + + char tmp[3] = {'\0', '\0', '\0'}; + tmp[0] = str[1]; + tmp[1] = str[2]; + color.r = std::strtol(tmp, nullptr, 16) / 255.0f; + + tmp[0] = str[3]; + tmp[1] = str[4]; + color.g = std::strtol(tmp, nullptr, 16) / 255.0f; + + tmp[0] = str[5]; + tmp[1] = str[6]; + color.b = std::strtol(tmp, nullptr, 16) / 255.0f; + + return color; +} + +model::MatteType LottieParserImpl::getMatteType() +{ + switch (GetInt()) { + case 1: + return model::MatteType::Alpha; + break; + case 2: + return model::MatteType::AlphaInv; + break; + case 3: + return model::MatteType::Luma; + break; + case 4: + return model::MatteType::LumaInv; + break; + default: + return model::MatteType::None; + break; + } +} + +model::Layer::Type LottieParserImpl::getLayerType() +{ + switch (GetInt()) { + case 0: + return model::Layer::Type::Precomp; + break; + case 1: + return model::Layer::Type::Solid; + break; + case 2: + return model::Layer::Type::Image; + break; + case 3: + return model::Layer::Type::Null; + break; + case 4: + return model::Layer::Type::Shape; + break; + case 5: + return model::Layer::Type::Text; + break; + default: + return model::Layer::Type::Null; + break; + } +} + +/* + * https://github.com/airbnb/lottie-web/blob/master/docs/json/layers/shape.json + * + */ +model::Layer *LottieParserImpl::parseLayer() +{ + model::Layer *layer = allocator().make(); + curLayerRef = layer; + bool ddd = true; + EnterObject(); + while (const char *key = NextObjectKey()) { + if (0 == strcmp(key, "ty")) { /* Type of layer*/ + layer->mLayerType = getLayerType(); + } else if (0 == strcmp(key, "nm")) { /*Layer name*/ + layer->setName(GetString()); + } else if (0 == strcmp(key, "ind")) { /*Layer index in AE. Used for + parenting and expressions.*/ + layer->mId = GetInt(); + } else if (0 == strcmp(key, "ddd")) { /*3d layer */ + ddd = GetInt(); + } else if (0 == + strcmp(key, + "parent")) { /*Layer Parent. Uses "ind" of parent.*/ + layer->mParentId = GetInt(); + } else if (0 == strcmp(key, "refId")) { /*preComp Layer reference id*/ + layer->extra()->mPreCompRefId = GetStringObject(); + layer->mHasGradient = true; + mLayersToUpdate.push_back(layer); + } else if (0 == strcmp(key, "sr")) { // "Layer Time Stretching" + layer->mTimeStreatch = GetDouble(); + } else if (0 == strcmp(key, "tm")) { // time remapping + parseProperty(layer->extra()->mTimeRemap); + } else if (0 == strcmp(key, "ip")) { + layer->mInFrame = std::lround(GetDouble()); + } else if (0 == strcmp(key, "op")) { + layer->mOutFrame = std::lround(GetDouble()); + } else if (0 == strcmp(key, "st")) { + layer->mStartFrame = GetDouble(); + } else if (0 == strcmp(key, "bm")) { + layer->mBlendMode = getBlendMode(); + } else if (0 == strcmp(key, "ks")) { + EnterObject(); + layer->mTransform = parseTransformObject(ddd); + } else if (0 == strcmp(key, "shapes")) { + parseShapesAttr(layer); + } else if (0 == strcmp(key, "w")) { + layer->mLayerSize.setWidth(GetInt()); + } else if (0 == strcmp(key, "h")) { + layer->mLayerSize.setHeight(GetInt()); + } else if (0 == strcmp(key, "sw")) { + layer->mLayerSize.setWidth(GetInt()); + } else if (0 == strcmp(key, "sh")) { + layer->mLayerSize.setHeight(GetInt()); + } else if (0 == strcmp(key, "sc")) { + layer->extra()->mSolidColor = toColor(GetString()); + } else if (0 == strcmp(key, "tt")) { + layer->mMatteType = getMatteType(); + } else if (0 == strcmp(key, "hasMask")) { + layer->mHasMask = GetBool(); + } else if (0 == strcmp(key, "masksProperties")) { + parseMaskProperty(layer); + } else if (0 == strcmp(key, "ao")) { + layer->mAutoOrient = GetInt(); + } else if (0 == strcmp(key, "hd")) { + layer->setHidden(GetBool()); + } else { +#ifdef DEBUG_PARSER + vWarning << "Layer Attribute Skipped : " << key; +#endif + Skip(key); + } + } + + if (!layer->mTransform) { + // not a valid layer + return nullptr; + } + + // make sure layer data is not corrupted. + if (layer->hasParent() && (layer->id() == layer->parentId())) + return nullptr; + + if (layer->mExtra) layer->mExtra->mCompRef = compRef; + + if (layer->hidden()) { + // if layer is hidden, only data that is usefull is its + // transform matrix(when it is a parent of some other layer) + // so force it to be a Null Layer and release all resource. + layer->setStatic(layer->mTransform->isStatic()); + layer->mLayerType = model::Layer::Type::Null; + layer->mChildren = {}; + return layer; + } + + // update the static property of layer + bool staticFlag = true; + for (const auto &child : layer->mChildren) { + staticFlag &= child->isStatic(); + } + + if (layer->hasMask() && layer->mExtra) { + for (const auto &mask : layer->mExtra->mMasks) { + staticFlag &= mask->isStatic(); + } + } + + layer->setStatic(staticFlag && layer->mTransform->isStatic()); + + return layer; +} + +void LottieParserImpl::parseMaskProperty(model::Layer *layer) +{ + EnterArray(); + while (NextArrayValue()) { + layer->extra()->mMasks.push_back(parseMaskObject()); + } +} + +model::Mask *LottieParserImpl::parseMaskObject() +{ + auto obj = allocator().make(); + + EnterObject(); + while (const char *key = NextObjectKey()) { + if (0 == strcmp(key, "inv")) { + obj->mInv = GetBool(); + } else if (0 == strcmp(key, "mode")) { + const char *str = GetString(); + if (!str) { + obj->mMode = model::Mask::Mode::None; + continue; + } + switch (str[0]) { + case 'n': + obj->mMode = model::Mask::Mode::None; + break; + case 'a': + obj->mMode = model::Mask::Mode::Add; + break; + case 's': + obj->mMode = model::Mask::Mode::Substarct; + break; + case 'i': + obj->mMode = model::Mask::Mode::Intersect; + break; + case 'f': + obj->mMode = model::Mask::Mode::Difference; + break; + default: + obj->mMode = model::Mask::Mode::None; + break; + } + } else if (0 == strcmp(key, "pt")) { + parseShapeProperty(obj->mShape); + } else if (0 == strcmp(key, "o")) { + parseProperty(obj->mOpacity); + } else { + Skip(key); + } + } + obj->mIsStatic = obj->mShape.isStatic() && obj->mOpacity.isStatic(); + return obj; +} + +void LottieParserImpl::parseShapesAttr(model::Layer *layer) +{ + EnterArray(); + while (NextArrayValue()) { + parseObject(layer); + } +} + +model::Object *LottieParserImpl::parseObjectTypeAttr() +{ + const char *type = GetString(); + if (0 == strcmp(type, "gr")) { + return parseGroupObject(); + } else if (0 == strcmp(type, "rc")) { + return parseRectObject(); + } else if (0 == strcmp(type, "rd")) { + curLayerRef->mHasRoundedCorner = true; + return parseRoundedCorner(); + } else if (0 == strcmp(type, "el")) { + return parseEllipseObject(); + } else if (0 == strcmp(type, "tr")) { + return parseTransformObject(); + } else if (0 == strcmp(type, "fl")) { + return parseFillObject(); + } else if (0 == strcmp(type, "st")) { + return parseStrokeObject(); + } else if (0 == strcmp(type, "gf")) { + curLayerRef->mHasGradient = true; + return parseGFillObject(); + } else if (0 == strcmp(type, "gs")) { + curLayerRef->mHasGradient = true; + return parseGStrokeObject(); + } else if (0 == strcmp(type, "sh")) { + return parseShapeObject(); + } else if (0 == strcmp(type, "sr")) { + return parsePolystarObject(); + } else if (0 == strcmp(type, "tm")) { + curLayerRef->mHasPathOperator = true; + return parseTrimObject(); + } else if (0 == strcmp(type, "rp")) { + curLayerRef->mHasRepeater = true; + return parseReapeaterObject(); + } else if (0 == strcmp(type, "mm")) { + vWarning << "Merge Path is not supported yet"; + return nullptr; + } else { +#ifdef DEBUG_PARSER + vDebug << "The Object Type not yet handled = " << type; +#endif + return nullptr; + } +} + +void LottieParserImpl::parseObject(model::Group *parent) +{ + EnterObject(); + while (const char *key = NextObjectKey()) { + if (0 == strcmp(key, "ty")) { + auto child = parseObjectTypeAttr(); + if (child && !child->hidden()) { + if (child->type() == model::Object::Type::RoundedCorner) { + updateRoundedCorner(parent, static_cast(child)); + } + parent->mChildren.push_back(child); + } + } else { + Skip(key); + } + } +} + +void LottieParserImpl::updateRoundedCorner(model::Group *group, model::RoundedCorner *rc) +{ + for(auto &e : group->mChildren) + { + if (e->type() == model::Object::Type::Rect) { + static_cast(e)->mRoundedCorner = rc; + if (!rc->isStatic()) { + e->setStatic(false); + group->setStatic(false); + //@TODO need to propagate. + } + } else if ( e->type() == model::Object::Type::Group) { + updateRoundedCorner(static_cast(e), rc); + } + } +} + +model::Object *LottieParserImpl::parseGroupObject() +{ + auto group = allocator().make(); + + while (const char *key = NextObjectKey()) { + if (0 == strcmp(key, "nm")) { + group->setName(GetString()); + } else if (0 == strcmp(key, "it")) { + EnterArray(); + while (NextArrayValue()) { + parseObject(group); + } + if (!group->mChildren.empty() + && group->mChildren.back()->type() + == model::Object::Type::Transform) { + group->mTransform = + static_cast(group->mChildren.back()); + group->mChildren.pop_back(); + } + } else { + Skip(key); + } + } + bool staticFlag = true; + for (const auto &child : group->mChildren) { + staticFlag &= child->isStatic(); + } + + if (group->mTransform) { + group->setStatic(staticFlag && group->mTransform->isStatic()); + } + + return group; +} + +/* + * https://github.com/airbnb/lottie-web/blob/master/docs/json/shapes/rect.json + */ +model::Rect *LottieParserImpl::parseRectObject() +{ + auto obj = allocator().make(); + + while (const char *key = NextObjectKey()) { + if (0 == strcmp(key, "nm")) { + obj->setName(GetString()); + } else if (0 == strcmp(key, "p")) { + parseProperty(obj->mPos); + } else if (0 == strcmp(key, "s")) { + parseProperty(obj->mSize); + } else if (0 == strcmp(key, "r")) { + parseProperty(obj->mRound); + } else if (0 == strcmp(key, "d")) { + obj->mDirection = GetInt(); + } else if (0 == strcmp(key, "hd")) { + obj->setHidden(GetBool()); + } else { + Skip(key); + } + } + obj->setStatic(obj->mPos.isStatic() && obj->mSize.isStatic() && + obj->mRound.isStatic()); + return obj; +} + +/* + * https://github.com/airbnb/lottie-web/blob/master/docs/json/shapes/rect.json + */ +model::RoundedCorner *LottieParserImpl::parseRoundedCorner() +{ + auto obj = allocator().make(); + + while (const char *key = NextObjectKey()) { + if (0 == strcmp(key, "nm")) { + obj->setName(GetString()); + } else if (0 == strcmp(key, "r")) { + parseProperty(obj->mRadius); + } else if (0 == strcmp(key, "hd")) { + obj->setHidden(GetBool()); + } else { + Skip(key); + } + } + obj->setStatic(obj->mRadius.isStatic()); + return obj; +} + +/* + * https://github.com/airbnb/lottie-web/blob/master/docs/json/shapes/ellipse.json + */ +model::Ellipse *LottieParserImpl::parseEllipseObject() +{ + auto obj = allocator().make(); + + while (const char *key = NextObjectKey()) { + if (0 == strcmp(key, "nm")) { + obj->setName(GetString()); + } else if (0 == strcmp(key, "p")) { + parseProperty(obj->mPos); + } else if (0 == strcmp(key, "s")) { + parseProperty(obj->mSize); + } else if (0 == strcmp(key, "d")) { + obj->mDirection = GetInt(); + } else if (0 == strcmp(key, "hd")) { + obj->setHidden(GetBool()); + } else { + Skip(key); + } + } + obj->setStatic(obj->mPos.isStatic() && obj->mSize.isStatic()); + return obj; +} + +/* + * https://github.com/airbnb/lottie-web/blob/master/docs/json/shapes/shape.json + */ +model::Path *LottieParserImpl::parseShapeObject() +{ + auto obj = allocator().make(); + + while (const char *key = NextObjectKey()) { + if (0 == strcmp(key, "nm")) { + obj->setName(GetString()); + } else if (0 == strcmp(key, "ks")) { + parseShapeProperty(obj->mShape); + } else if (0 == strcmp(key, "d")) { + obj->mDirection = GetInt(); + } else if (0 == strcmp(key, "hd")) { + obj->setHidden(GetBool()); + } else { +#ifdef DEBUG_PARSER + vDebug << "Shape property ignored :" << key; +#endif + Skip(key); + } + } + obj->setStatic(obj->mShape.isStatic()); + + return obj; +} + +/* + * https://github.com/airbnb/lottie-web/blob/master/docs/json/shapes/star.json + */ +model::Polystar *LottieParserImpl::parsePolystarObject() +{ + auto obj = allocator().make(); + + while (const char *key = NextObjectKey()) { + if (0 == strcmp(key, "nm")) { + obj->setName(GetString()); + } else if (0 == strcmp(key, "p")) { + parseProperty(obj->mPos); + } else if (0 == strcmp(key, "pt")) { + parseProperty(obj->mPointCount); + } else if (0 == strcmp(key, "ir")) { + parseProperty(obj->mInnerRadius); + } else if (0 == strcmp(key, "is")) { + parseProperty(obj->mInnerRoundness); + } else if (0 == strcmp(key, "or")) { + parseProperty(obj->mOuterRadius); + } else if (0 == strcmp(key, "os")) { + parseProperty(obj->mOuterRoundness); + } else if (0 == strcmp(key, "r")) { + parseProperty(obj->mRotation); + } else if (0 == strcmp(key, "sy")) { + int starType = GetInt(); + if (starType == 1) obj->mPolyType = model::Polystar::PolyType::Star; + if (starType == 2) + obj->mPolyType = model::Polystar::PolyType::Polygon; + } else if (0 == strcmp(key, "d")) { + obj->mDirection = GetInt(); + } else if (0 == strcmp(key, "hd")) { + obj->setHidden(GetBool()); + } else { +#ifdef DEBUG_PARSER + vDebug << "Polystar property ignored :" << key; +#endif + Skip(key); + } + } + obj->setStatic( + obj->mPos.isStatic() && obj->mPointCount.isStatic() && + obj->mInnerRadius.isStatic() && obj->mInnerRoundness.isStatic() && + obj->mOuterRadius.isStatic() && obj->mOuterRoundness.isStatic() && + obj->mRotation.isStatic()); + + return obj; +} + +model::Trim::TrimType LottieParserImpl::getTrimType() +{ + switch (GetInt()) { + case 1: + return model::Trim::TrimType::Simultaneously; + break; + case 2: + return model::Trim::TrimType::Individually; + break; + default: + Error(); + return model::Trim::TrimType::Simultaneously; + break; + } +} + +/* + * https://github.com/airbnb/lottie-web/blob/master/docs/json/shapes/trim.json + */ +model::Trim *LottieParserImpl::parseTrimObject() +{ + auto obj = allocator().make(); + + while (const char *key = NextObjectKey()) { + if (0 == strcmp(key, "nm")) { + obj->setName(GetString()); + } else if (0 == strcmp(key, "s")) { + parseProperty(obj->mStart); + } else if (0 == strcmp(key, "e")) { + parseProperty(obj->mEnd); + } else if (0 == strcmp(key, "o")) { + parseProperty(obj->mOffset); + } else if (0 == strcmp(key, "m")) { + obj->mTrimType = getTrimType(); + } else if (0 == strcmp(key, "hd")) { + obj->setHidden(GetBool()); + } else { +#ifdef DEBUG_PARSER + vDebug << "Trim property ignored :" << key; +#endif + Skip(key); + } + } + obj->setStatic(obj->mStart.isStatic() && obj->mEnd.isStatic() && + obj->mOffset.isStatic()); + return obj; +} + +void LottieParserImpl::getValue(model::Repeater::Transform &obj) +{ + EnterObject(); + + while (const char *key = NextObjectKey()) { + if (0 == strcmp(key, "a")) { + parseProperty(obj.mAnchor); + } else if (0 == strcmp(key, "p")) { + parseProperty(obj.mPosition); + } else if (0 == strcmp(key, "r")) { + parseProperty(obj.mRotation); + } else if (0 == strcmp(key, "s")) { + parseProperty(obj.mScale); + } else if (0 == strcmp(key, "so")) { + parseProperty(obj.mStartOpacity); + } else if (0 == strcmp(key, "eo")) { + parseProperty(obj.mEndOpacity); + } else { + Skip(key); + } + } +} + +model::Repeater *LottieParserImpl::parseReapeaterObject() +{ + auto obj = allocator().make(); + + obj->setContent(allocator().make()); + + while (const char *key = NextObjectKey()) { + if (0 == strcmp(key, "nm")) { + obj->setName(GetString()); + } else if (0 == strcmp(key, "c")) { + parseProperty(obj->mCopies); + float maxCopy = 0.0; + if (!obj->mCopies.isStatic()) { + for (auto &keyFrame : obj->mCopies.animation().frames_) { + if (maxCopy < keyFrame.value_.start_) + maxCopy = keyFrame.value_.start_; + if (maxCopy < keyFrame.value_.end_) + maxCopy = keyFrame.value_.end_; + } + } else { + maxCopy = obj->mCopies.value(); + } + obj->mMaxCopies = maxCopy; + } else if (0 == strcmp(key, "o")) { + parseProperty(obj->mOffset); + } else if (0 == strcmp(key, "tr")) { + getValue(obj->mTransform); + } else if (0 == strcmp(key, "hd")) { + obj->setHidden(GetBool()); + } else { +#ifdef DEBUG_PARSER + vDebug << "Repeater property ignored :" << key; +#endif + Skip(key); + } + } + obj->setStatic(obj->mCopies.isStatic() && obj->mOffset.isStatic() && + obj->mTransform.isStatic()); + + return obj; +} + +/* + * https://github.com/airbnb/lottie-web/blob/master/docs/json/shapes/transform.json + */ +model::Transform *LottieParserImpl::parseTransformObject(bool ddd) +{ + auto objT = allocator().make(); + + auto obj = allocator().make(); + if (ddd) { + obj->createExtraData(); + obj->mExtra->m3DData = true; + } + + while (const char *key = NextObjectKey()) { + if (0 == strcmp(key, "nm")) { + objT->setName(GetString()); + } else if (0 == strcmp(key, "a")) { + parseProperty(obj->mAnchor); + } else if (0 == strcmp(key, "p")) { + EnterObject(); + bool separate = false; + while (const char *key = NextObjectKey()) { + if (0 == strcmp(key, "k")) { + parsePropertyHelper(obj->mPosition); + } else if (0 == strcmp(key, "s")) { + obj->createExtraData(); + obj->mExtra->mSeparate = GetBool(); + separate = true; + } else if (separate && (0 == strcmp(key, "x"))) { + parseProperty(obj->mExtra->mSeparateX); + } else if (separate && (0 == strcmp(key, "y"))) { + parseProperty(obj->mExtra->mSeparateY); + } else { + Skip(key); + } + } + } else if (0 == strcmp(key, "r")) { + parseProperty(obj->mRotation); + } else if (0 == strcmp(key, "s")) { + parseProperty(obj->mScale); + } else if (0 == strcmp(key, "o")) { + parseProperty(obj->mOpacity); + } else if (0 == strcmp(key, "hd")) { + objT->setHidden(GetBool()); + } else if (0 == strcmp(key, "rx")) { + if (!obj->mExtra) return nullptr; + parseProperty(obj->mExtra->m3DRx); + } else if (0 == strcmp(key, "ry")) { + if (!obj->mExtra) return nullptr; + parseProperty(obj->mExtra->m3DRy); + } else if (0 == strcmp(key, "rz")) { + if (!obj->mExtra) return nullptr; + parseProperty(obj->mExtra->m3DRz); + } else { + Skip(key); + } + } + bool isStatic = obj->mAnchor.isStatic() && obj->mPosition.isStatic() && + obj->mRotation.isStatic() && obj->mScale.isStatic() && + obj->mOpacity.isStatic(); + if (obj->mExtra) { + isStatic = isStatic && obj->mExtra->m3DRx.isStatic() && + obj->mExtra->m3DRy.isStatic() && + obj->mExtra->m3DRz.isStatic() && + obj->mExtra->mSeparateX.isStatic() && + obj->mExtra->mSeparateY.isStatic(); + } + + objT->set(obj, isStatic); + + return objT; +} + +/* + * https://github.com/airbnb/lottie-web/blob/master/docs/json/shapes/fill.json + */ +model::Fill *LottieParserImpl::parseFillObject() +{ + auto obj = allocator().make(); + + while (const char *key = NextObjectKey()) { + if (0 == strcmp(key, "nm")) { + obj->setName(GetString()); + } else if (0 == strcmp(key, "c")) { + parseProperty(obj->mColor); + } else if (0 == strcmp(key, "o")) { + parseProperty(obj->mOpacity); + } else if (0 == strcmp(key, "fillEnabled")) { + obj->mEnabled = GetBool(); + } else if (0 == strcmp(key, "r")) { + obj->mFillRule = getFillRule(); + } else if (0 == strcmp(key, "hd")) { + obj->setHidden(GetBool()); + } else { +#ifdef DEBUG_PARSER + vWarning << "Fill property skipped = " << key; +#endif + Skip(key); + } + } + obj->setStatic(obj->mColor.isStatic() && obj->mOpacity.isStatic()); + + return obj; +} + +/* + * https://github.com/airbnb/lottie-web/blob/master/docs/json/helpers/lineCap.json + */ +CapStyle LottieParserImpl::getLineCap() +{ + switch (GetInt()) { + case 1: + return CapStyle::Flat; + break; + case 2: + return CapStyle::Round; + break; + default: + return CapStyle::Square; + break; + } +} + +FillRule LottieParserImpl::getFillRule() +{ + switch (GetInt()) { + case 1: + return FillRule::Winding; + break; + case 2: + return FillRule::EvenOdd; + break; + default: + return FillRule::Winding; + break; + } +} + +/* + * https://github.com/airbnb/lottie-web/blob/master/docs/json/helpers/lineJoin.json + */ +JoinStyle LottieParserImpl::getLineJoin() +{ + switch (GetInt()) { + case 1: + return JoinStyle::Miter; + break; + case 2: + return JoinStyle::Round; + break; + default: + return JoinStyle::Bevel; + break; + } +} + +/* + * https://github.com/airbnb/lottie-web/blob/master/docs/json/shapes/stroke.json + */ +model::Stroke *LottieParserImpl::parseStrokeObject() +{ + auto obj = allocator().make(); + + while (const char *key = NextObjectKey()) { + if (0 == strcmp(key, "nm")) { + obj->setName(GetString()); + } else if (0 == strcmp(key, "c")) { + parseProperty(obj->mColor); + } else if (0 == strcmp(key, "o")) { + parseProperty(obj->mOpacity); + } else if (0 == strcmp(key, "w")) { + parseProperty(obj->mWidth); + } else if (0 == strcmp(key, "fillEnabled")) { + obj->mEnabled = GetBool(); + } else if (0 == strcmp(key, "lc")) { + obj->mCapStyle = getLineCap(); + } else if (0 == strcmp(key, "lj")) { + obj->mJoinStyle = getLineJoin(); + } else if (0 == strcmp(key, "ml")) { + obj->mMiterLimit = GetDouble(); + } else if (0 == strcmp(key, "d")) { + parseDashProperty(obj->mDash); + } else if (0 == strcmp(key, "hd")) { + obj->setHidden(GetBool()); + } else { +#ifdef DEBUG_PARSER + vWarning << "Stroke property skipped = " << key; +#endif + Skip(key); + } + } + obj->setStatic(obj->mColor.isStatic() && obj->mOpacity.isStatic() && + obj->mWidth.isStatic() && obj->mDash.isStatic()); + return obj; +} + +void LottieParserImpl::parseGradientProperty(model::Gradient *obj, + const char * key) +{ + if (0 == strcmp(key, "t")) { + obj->mGradientType = GetInt(); + } else if (0 == strcmp(key, "o")) { + parseProperty(obj->mOpacity); + } else if (0 == strcmp(key, "s")) { + parseProperty(obj->mStartPoint); + } else if (0 == strcmp(key, "e")) { + parseProperty(obj->mEndPoint); + } else if (0 == strcmp(key, "h")) { + parseProperty(obj->mHighlightLength); + } else if (0 == strcmp(key, "a")) { + parseProperty(obj->mHighlightAngle); + } else if (0 == strcmp(key, "g")) { + EnterObject(); + while (const char *key = NextObjectKey()) { + if (0 == strcmp(key, "k")) { + parseProperty(obj->mGradient); + } else if (0 == strcmp(key, "p")) { + obj->mColorPoints = GetInt(); + } else { + Skip(nullptr); + } + } + } else if (0 == strcmp(key, "hd")) { + obj->setHidden(GetBool()); + } else { +#ifdef DEBUG_PARSER + vWarning << "Gradient property skipped = " << key; +#endif + Skip(key); + } + obj->setStatic( + obj->mOpacity.isStatic() && obj->mStartPoint.isStatic() && + obj->mEndPoint.isStatic() && obj->mHighlightAngle.isStatic() && + obj->mHighlightLength.isStatic() && obj->mGradient.isStatic()); +} + +/* + * https://github.com/airbnb/lottie-web/blob/master/docs/json/shapes/gfill.json + */ +model::GradientFill *LottieParserImpl::parseGFillObject() +{ + auto obj = allocator().make(); + + while (const char *key = NextObjectKey()) { + if (0 == strcmp(key, "nm")) { + obj->setName(GetString()); + } else if (0 == strcmp(key, "r")) { + obj->mFillRule = getFillRule(); + } else { + parseGradientProperty(obj, key); + } + } + return obj; +} + +void LottieParserImpl::parseDashProperty(model::Dash &dash) +{ + EnterArray(); + while (NextArrayValue()) { + EnterObject(); + while (const char *key = NextObjectKey()) { + if (0 == strcmp(key, "v")) { + dash.mData.emplace_back(); + parseProperty(dash.mData.back()); + } else { + Skip(key); + } + } + } +} + +/* + * https://github.com/airbnb/lottie-web/blob/master/docs/json/shapes/gstroke.json + */ +model::GradientStroke *LottieParserImpl::parseGStrokeObject() +{ + auto obj = allocator().make(); + + while (const char *key = NextObjectKey()) { + if (0 == strcmp(key, "nm")) { + obj->setName(GetString()); + } else if (0 == strcmp(key, "w")) { + parseProperty(obj->mWidth); + } else if (0 == strcmp(key, "lc")) { + obj->mCapStyle = getLineCap(); + } else if (0 == strcmp(key, "lj")) { + obj->mJoinStyle = getLineJoin(); + } else if (0 == strcmp(key, "ml")) { + obj->mMiterLimit = GetDouble(); + } else if (0 == strcmp(key, "d")) { + parseDashProperty(obj->mDash); + } else { + parseGradientProperty(obj, key); + } + } + + obj->setStatic(obj->isStatic() && obj->mWidth.isStatic() && + obj->mDash.isStatic()); + return obj; +} + +void LottieParserImpl::getValue(std::vector &v) +{ + EnterArray(); + while (NextArrayValue()) { + EnterArray(); + VPointF pt; + getValue(pt); + v.push_back(pt); + } +} + +void LottieParserImpl::getValue(VPointF &pt) +{ + float val[4] = {0.f}; + int i = 0; + + if (PeekType() == kArrayType) EnterArray(); + + while (NextArrayValue()) { + const auto value = GetDouble(); + if (i < 4) { + val[i++] = value; + } + } + pt.setX(val[0]); + pt.setY(val[1]); +} + +void LottieParserImpl::getValue(float &val) +{ + if (PeekType() == kArrayType) { + EnterArray(); + if (NextArrayValue()) val = GetDouble(); + // discard rest + while (NextArrayValue()) { + GetDouble(); + } + } else if (PeekType() == kNumberType) { + val = GetDouble(); + } else { + Error(); + } +} + +void LottieParserImpl::getValue(model::Color &color) +{ + float val[4] = {0.f}; + int i = 0; + if (PeekType() == kArrayType) EnterArray(); + + while (NextArrayValue()) { + const auto value = GetDouble(); + if (i < 4) { + val[i++] = value; + } + } + + if (mColorFilter) mColorFilter(val[0], val[1], val[2]); + + color.r = val[0]; + color.g = val[1]; + color.b = val[2]; +} + +void LottieParserImpl::getValue(model::Gradient::Data &grad) +{ + if (PeekType() == kArrayType) EnterArray(); + + while (NextArrayValue()) { + grad.mGradient.push_back(GetDouble()); + } +} + +void LottieParserImpl::getValue(int &val) +{ + if (PeekType() == kArrayType) { + EnterArray(); + while (NextArrayValue()) { + val = GetInt(); + } + } else if (PeekType() == kNumberType) { + val = GetInt(); + } else { + Error(); + } +} + +void LottieParserImpl::parsePathInfo() +{ + mPathInfo.reset(); + + /* + * The shape object could be wrapped by a array + * if its part of the keyframe object + */ + bool arrayWrapper = (PeekType() == kArrayType); + if (arrayWrapper) EnterArray(); + + EnterObject(); + while (const char *key = NextObjectKey()) { + if (0 == strcmp(key, "i")) { + getValue(mPathInfo.mInPoint); + } else if (0 == strcmp(key, "o")) { + getValue(mPathInfo.mOutPoint); + } else if (0 == strcmp(key, "v")) { + getValue(mPathInfo.mVertices); + } else if (0 == strcmp(key, "c")) { + mPathInfo.mClosed = GetBool(); + } else { + Error(); + Skip(nullptr); + } + } + // exit properly from the array + if (arrayWrapper) NextArrayValue(); + + mPathInfo.convert(); +} + +void LottieParserImpl::getValue(model::PathData &obj) +{ + parsePathInfo(); + obj.mPoints = mPathInfo.mResult; + obj.mClosed = mPathInfo.mClosed; +} + +VPointF LottieParserImpl::parseInperpolatorPoint() +{ + VPointF cp; + EnterObject(); + while (const char *key = NextObjectKey()) { + if (0 == strcmp(key, "x")) { + getValue(cp.rx()); + } + if (0 == strcmp(key, "y")) { + getValue(cp.ry()); + } + } + return cp; +} + +template +bool LottieParserImpl::parseKeyFrameValue( + const char *key, model::Value &value) +{ + if (0 == strcmp(key, "ti")) { + value.hasTangent_ = true; + getValue(value.inTangent_); + } else if (0 == strcmp(key, "to")) { + value.hasTangent_ = true; + getValue(value.outTangent_); + } else { + return false; + } + return true; +} + +VInterpolator *LottieParserImpl::interpolator(VPointF inTangent, + VPointF outTangent, + std::string key) +{ + if (key.empty()) { + std::array temp; + snprintf(temp.data(), temp.size(), "%.2f_%.2f_%.2f_%.2f", inTangent.x(), + inTangent.y(), outTangent.x(), outTangent.y()); + key = temp.data(); + } + + auto search = mInterpolatorCache.find(key); + + if (search != mInterpolatorCache.end()) { + return search->second; + } + + auto obj = allocator().make(outTangent, inTangent); + mInterpolatorCache[std::move(key)] = obj; + return obj; +} + +/* + * https://github.com/airbnb/lottie-web/blob/master/docs/json/properties/multiDimensionalKeyframed.json + */ +template +void LottieParserImpl::parseKeyFrame(model::KeyFrames &obj) +{ + struct ParsedField { + std::string interpolatorKey; + bool interpolator{false}; + bool value{false}; + bool hold{false}; + bool noEndValue{true}; + }; + + EnterObject(); + ParsedField parsed; + typename model::KeyFrames::Frame keyframe; + VPointF inTangent; + VPointF outTangent; + + while (const char *key = NextObjectKey()) { + if (0 == strcmp(key, "i")) { + parsed.interpolator = true; + inTangent = parseInperpolatorPoint(); + } else if (0 == strcmp(key, "o")) { + outTangent = parseInperpolatorPoint(); + } else if (0 == strcmp(key, "t")) { + keyframe.start_ = GetDouble(); + } else if (0 == strcmp(key, "s")) { + parsed.value = true; + getValue(keyframe.value_.start_); + continue; + } else if (0 == strcmp(key, "e")) { + parsed.noEndValue = false; + getValue(keyframe.value_.end_); + continue; + } else if (0 == strcmp(key, "n")) { + if (PeekType() == kStringType) { + parsed.interpolatorKey = GetStringObject(); + } else { + EnterArray(); + while (NextArrayValue()) { + if (parsed.interpolatorKey.empty()) { + parsed.interpolatorKey = GetStringObject(); + } else { + // skip rest of the string + Skip(nullptr); + } + } + } + continue; + } else if (parseKeyFrameValue(key, keyframe.value_)) { + continue; + } else if (0 == strcmp(key, "h")) { + parsed.hold = GetInt(); + continue; + } else { +#ifdef DEBUG_PARSER + vDebug << "key frame property skipped = " << key; +#endif + Skip(key); + } + } + + auto &list = obj.frames_; + if (!list.empty()) { + // update the endFrame value of current keyframe + list.back().end_ = keyframe.start_; + // if no end value provided, copy start value to previous frame + if (parsed.value && parsed.noEndValue) { + list.back().value_.end_ = keyframe.value_.start_; + } + } + + if (parsed.hold) { + keyframe.value_.end_ = keyframe.value_.start_; + keyframe.end_ = keyframe.start_; + list.push_back(std::move(keyframe)); + } else if (parsed.interpolator) { + keyframe.interpolator_ = interpolator( + inTangent, outTangent, std::move(parsed.interpolatorKey)); + list.push_back(std::move(keyframe)); + } else { + // its the last frame discard. + } +} + +/* + * https://github.com/airbnb/lottie-web/blob/master/docs/json/properties/shapeKeyframed.json + */ + +/* + * https://github.com/airbnb/lottie-web/blob/master/docs/json/properties/shape.json + */ +void LottieParserImpl::parseShapeProperty(model::Property &obj) +{ + EnterObject(); + while (const char *key = NextObjectKey()) { + if (0 == strcmp(key, "k")) { + if (PeekType() == kArrayType) { + EnterArray(); + while (NextArrayValue()) { + parseKeyFrame(obj.animation()); + } + } else { + if (!obj.isStatic()) { + st_ = kError; + return; + } + getValue(obj.value()); + } + } else { +#ifdef DEBUG_PARSER + vDebug << "shape property ignored = " << key; +#endif + Skip(nullptr); + } + } + obj.cache(); +} + +template +void LottieParserImpl::parsePropertyHelper(model::Property &obj) +{ + if (PeekType() == kNumberType) { + if (!obj.isStatic()) { + st_ = kError; + return; + } + /*single value property with no animation*/ + getValue(obj.value()); + } else { + EnterArray(); + while (NextArrayValue()) { + /* property with keyframe info*/ + if (PeekType() == kObjectType) { + parseKeyFrame(obj.animation()); + } else { + /* Read before modifying. + * as there is no way of knowing if the + * value of the array is either array of numbers + * or array of object without entering the array + * thats why this hack is there + */ + if (!obj.isStatic()) { + st_ = kError; + return; + } + /*multi value property with no animation*/ + getValue(obj.value()); + /*break here as we already reached end of array*/ + break; + } + } + obj.cache(); + } +} + +/* + * https://github.com/airbnb/lottie-web/tree/master/docs/json/properties + */ +template +void LottieParserImpl::parseProperty(model::Property &obj) +{ + EnterObject(); + while (const char *key = NextObjectKey()) { + if (0 == strcmp(key, "k")) { + parsePropertyHelper(obj); + } else { + Skip(key); + } + } +} + +#ifdef LOTTIE_DUMP_TREE_SUPPORT + +class ObjectInspector { +public: + void visit(model::Composition *obj, std::string level) + { + vDebug << " { " << level << "Composition:: a: " << !obj->isStatic() + << ", v: " << obj->mVersion << ", stFm: " << obj->startFrame() + << ", endFm: " << obj->endFrame() + << ", W: " << obj->size().width() + << ", H: " << obj->size().height() << "\n"; + level.append("\t"); + visit(obj->mRootLayer, level); + level.erase(level.end() - 1, level.end()); + vDebug << " } " << level << "Composition End\n"; + } + void visit(model::Layer *obj, std::string level) + { + vDebug << level << "{ " << layerType(obj->mLayerType) + << ", name: " << obj->name() << ", id:" << obj->mId + << " Pid:" << obj->mParentId << ", a:" << !obj->isStatic() + << ", " << matteType(obj->mMatteType) + << ", mask:" << obj->hasMask() << ", inFm:" << obj->mInFrame + << ", outFm:" << obj->mOutFrame << ", stFm:" << obj->mStartFrame + << ", ts:" << obj->mTimeStreatch << ", ao:" << obj->autoOrient() + << ", W:" << obj->layerSize().width() + << ", H:" << obj->layerSize().height(); + + if (obj->mLayerType == model::Layer::Type::Image) + vDebug << level << "\t{ " + << "ImageInfo:" + << " W :" << obj->extra()->mAsset->mWidth + << ", H :" << obj->extra()->mAsset->mHeight << " }" + << "\n"; + else { + vDebug << level; + } + visitChildren(static_cast(obj), level); + vDebug << level << "} " << layerType(obj->mLayerType).c_str() + << ", id: " << obj->mId << "\n"; + } + void visitChildren(model::Group *obj, std::string level) + { + level.append("\t"); + for (const auto &child : obj->mChildren) visit(child, level); + if (obj->mTransform) visit(obj->mTransform, level); + } + + void visit(model::Object *obj, std::string level) + { + switch (obj->type()) { + case model::Object::Type::Repeater: { + auto r = static_cast(obj); + vDebug << level << "{ Repeater: name: " << obj->name() + << " , a:" << !obj->isStatic() + << ", copies:" << r->maxCopies() + << ", offset:" << r->offset(0); + visitChildren(r->mContent, level); + vDebug << level << "} Repeater"; + break; + } + case model::Object::Type::Group: { + vDebug << level << "{ Group: name: " << obj->name() + << " , a:" << !obj->isStatic(); + visitChildren(static_cast(obj), level); + vDebug << level << "} Group"; + break; + } + case model::Object::Type::Layer: { + visit(static_cast(obj), level); + break; + } + case model::Object::Type::Trim: { + vDebug << level << "{ Trim: name: " << obj->name() + << " , a:" << !obj->isStatic() << " }"; + break; + } + case model::Object::Type::Rect: { + vDebug << level << "{ Rect: name: " << obj->name() + << " , a:" << !obj->isStatic() << " }"; + break; + } + case model::Object::Type::RoundedCorner: { + vDebug << level << "{ RoundedCorner: name: " << obj->name() + << " , a:" << !obj->isStatic() << " }"; + break; + } + case model::Object::Type::Ellipse: { + vDebug << level << "{ Ellipse: name: " << obj->name() + << " , a:" << !obj->isStatic() << " }"; + break; + } + case model::Object::Type::Path: { + vDebug << level << "{ Shape: name: " << obj->name() + << " , a:" << !obj->isStatic() << " }"; + break; + } + case model::Object::Type::Polystar: { + vDebug << level << "{ Polystar: name: " << obj->name() + << " , a:" << !obj->isStatic() << " }"; + break; + } + case model::Object::Type::Transform: { + vDebug << level << "{ Transform: name: " << obj->name() + << " , a: " << !obj->isStatic() << " }"; + break; + } + case model::Object::Type::Stroke: { + vDebug << level << "{ Stroke: name: " << obj->name() + << " , a:" << !obj->isStatic() << " }"; + break; + } + case model::Object::Type::GStroke: { + vDebug << level << "{ GStroke: name: " << obj->name() + << " , a:" << !obj->isStatic() << " }"; + break; + } + case model::Object::Type::Fill: { + vDebug << level << "{ Fill: name: " << obj->name() + << " , a:" << !obj->isStatic() << " }"; + break; + } + case model::Object::Type::GFill: { + auto f = static_cast(obj); + vDebug << level << "{ GFill: name: " << obj->name() + << " , a:" << !f->isStatic() << ", ty:" << f->mGradientType + << ", s:" << f->mStartPoint.value(0) + << ", e:" << f->mEndPoint.value(0) << " }"; + break; + } + default: + break; + } + } + + std::string matteType(model::MatteType type) + { + switch (type) { + case model::MatteType::None: + return "Matte::None"; + break; + case model::MatteType::Alpha: + return "Matte::Alpha"; + break; + case model::MatteType::AlphaInv: + return "Matte::AlphaInv"; + break; + case model::MatteType::Luma: + return "Matte::Luma"; + break; + case model::MatteType::LumaInv: + return "Matte::LumaInv"; + break; + default: + return "Matte::Unknown"; + break; + } + } + std::string layerType(model::Layer::Type type) + { + switch (type) { + case model::Layer::Type::Precomp: + return "Layer::Precomp"; + break; + case model::Layer::Type::Null: + return "Layer::Null"; + break; + case model::Layer::Type::Shape: + return "Layer::Shape"; + break; + case model::Layer::Type::Solid: + return "Layer::Solid"; + break; + case model::Layer::Type::Image: + return "Layer::Image"; + break; + case model::Layer::Type::Text: + return "Layer::Text"; + break; + default: + return "Layer::Unknown"; + break; + } + } +}; + +#endif + +std::shared_ptr model::parse(char * str, + std::string dir_path, + model::ColorFilter filter) +{ + LottieParserImpl obj(str, std::move(dir_path), std::move(filter)); + + if (obj.VerifyType()) { + obj.parseComposition(); + auto composition = obj.composition(); + if (composition) { + composition->processRepeaterObjects(); + composition->updateStats(); + +#ifdef LOTTIE_DUMP_TREE_SUPPORT + ObjectInspector inspector; + inspector.visit(composition.get(), ""); +#endif + + return composition; + } + } + + vWarning << "Input data is not Lottie format!"; + return {}; +} + +RAPIDJSON_DIAG_POP diff --git a/vendor/github.com/Benau/go_rlottie/lottie_lottieproxymodel.cpp b/vendor/github.com/Benau/go_rlottie/lottie_lottieproxymodel.cpp new file mode 100644 index 00000000..e69de29b diff --git a/vendor/github.com/Benau/go_rlottie/lottie_rapidjson_allocators.h b/vendor/github.com/Benau/go_rlottie/lottie_rapidjson_allocators.h new file mode 100644 index 00000000..20109a19 --- /dev/null +++ b/vendor/github.com/Benau/go_rlottie/lottie_rapidjson_allocators.h @@ -0,0 +1,284 @@ +// Tencent is pleased to support the open source community by making RapidJSON available. +// +// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved. +// +// Licensed under the MIT License (the "License"); you may not use this file except +// in compliance with the License. You may obtain a copy of the License at +// +// http://opensource.org/licenses/MIT +// +// Unless required by applicable law or agreed to in writing, software distributed +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +// CONDITIONS OF ANY KIND, either express or implied. See the License for the +// specific language governing permissions and limitations under the License. + +#ifndef RAPIDJSON_ALLOCATORS_H_ +#define RAPIDJSON_ALLOCATORS_H_ + +#include "lottie_rapidjson_rapidjson.h" + +RAPIDJSON_NAMESPACE_BEGIN + +/////////////////////////////////////////////////////////////////////////////// +// Allocator + +/*! \class rapidjson::Allocator + \brief Concept for allocating, resizing and freeing memory block. + + Note that Malloc() and Realloc() are non-static but Free() is static. + + So if an allocator need to support Free(), it needs to put its pointer in + the header of memory block. + +\code +concept Allocator { + static const bool kNeedFree; //!< Whether this allocator needs to call Free(). + + // Allocate a memory block. + // \param size of the memory block in bytes. + // \returns pointer to the memory block. + void* Malloc(size_t size); + + // Resize a memory block. + // \param originalPtr The pointer to current memory block. Null pointer is permitted. + // \param originalSize The current size in bytes. (Design issue: since some allocator may not book-keep this, explicitly pass to it can save memory.) + // \param newSize the new size in bytes. + void* Realloc(void* originalPtr, size_t originalSize, size_t newSize); + + // Free a memory block. + // \param pointer to the memory block. Null pointer is permitted. + static void Free(void *ptr); +}; +\endcode +*/ + + +/*! \def RAPIDJSON_ALLOCATOR_DEFAULT_CHUNK_CAPACITY + \ingroup RAPIDJSON_CONFIG + \brief User-defined kDefaultChunkCapacity definition. + + User can define this as any \c size that is a power of 2. +*/ + +#ifndef RAPIDJSON_ALLOCATOR_DEFAULT_CHUNK_CAPACITY +#define RAPIDJSON_ALLOCATOR_DEFAULT_CHUNK_CAPACITY (64 * 1024) +#endif + + +/////////////////////////////////////////////////////////////////////////////// +// CrtAllocator + +//! C-runtime library allocator. +/*! This class is just wrapper for standard C library memory routines. + \note implements Allocator concept +*/ +class CrtAllocator { +public: + static const bool kNeedFree = true; + void* Malloc(size_t size) { + if (size) // behavior of malloc(0) is implementation defined. + return RAPIDJSON_MALLOC(size); + else + return NULL; // standardize to returning NULL. + } + void* Realloc(void* originalPtr, size_t originalSize, size_t newSize) { + (void)originalSize; + if (newSize == 0) { + RAPIDJSON_FREE(originalPtr); + return NULL; + } + return RAPIDJSON_REALLOC(originalPtr, newSize); + } + static void Free(void *ptr) { RAPIDJSON_FREE(ptr); } +}; + +/////////////////////////////////////////////////////////////////////////////// +// MemoryPoolAllocator + +//! Default memory allocator used by the parser and DOM. +/*! This allocator allocate memory blocks from pre-allocated memory chunks. + + It does not free memory blocks. And Realloc() only allocate new memory. + + The memory chunks are allocated by BaseAllocator, which is CrtAllocator by default. + + User may also supply a buffer as the first chunk. + + If the user-buffer is full then additional chunks are allocated by BaseAllocator. + + The user-buffer is not deallocated by this allocator. + + \tparam BaseAllocator the allocator type for allocating memory chunks. Default is CrtAllocator. + \note implements Allocator concept +*/ +template +class MemoryPoolAllocator { +public: + static const bool kNeedFree = false; //!< Tell users that no need to call Free() with this allocator. (concept Allocator) + + //! Constructor with chunkSize. + /*! \param chunkSize The size of memory chunk. The default is kDefaultChunkSize. + \param baseAllocator The allocator for allocating memory chunks. + */ + MemoryPoolAllocator(size_t chunkSize = kDefaultChunkCapacity, BaseAllocator* baseAllocator = 0) : + chunkHead_(0), chunk_capacity_(chunkSize), userBuffer_(0), baseAllocator_(baseAllocator), ownBaseAllocator_(0) + { + } + + //! Constructor with user-supplied buffer. + /*! The user buffer will be used firstly. When it is full, memory pool allocates new chunk with chunk size. + + The user buffer will not be deallocated when this allocator is destructed. + + \param buffer User supplied buffer. + \param size Size of the buffer in bytes. It must at least larger than sizeof(ChunkHeader). + \param chunkSize The size of memory chunk. The default is kDefaultChunkSize. + \param baseAllocator The allocator for allocating memory chunks. + */ + MemoryPoolAllocator(void *buffer, size_t size, size_t chunkSize = kDefaultChunkCapacity, BaseAllocator* baseAllocator = 0) : + chunkHead_(0), chunk_capacity_(chunkSize), userBuffer_(buffer), baseAllocator_(baseAllocator), ownBaseAllocator_(0) + { + RAPIDJSON_ASSERT(buffer != 0); + RAPIDJSON_ASSERT(size > sizeof(ChunkHeader)); + chunkHead_ = reinterpret_cast(buffer); + chunkHead_->capacity = size - sizeof(ChunkHeader); + chunkHead_->size = 0; + chunkHead_->next = 0; + } + + //! Destructor. + /*! This deallocates all memory chunks, excluding the user-supplied buffer. + */ + ~MemoryPoolAllocator() { + Clear(); + RAPIDJSON_DELETE(ownBaseAllocator_); + } + + //! Deallocates all memory chunks, excluding the user-supplied buffer. + void Clear() { + while (chunkHead_ && chunkHead_ != userBuffer_) { + ChunkHeader* next = chunkHead_->next; + baseAllocator_->Free(chunkHead_); + chunkHead_ = next; + } + if (chunkHead_ && chunkHead_ == userBuffer_) + chunkHead_->size = 0; // Clear user buffer + } + + //! Computes the total capacity of allocated memory chunks. + /*! \return total capacity in bytes. + */ + size_t Capacity() const { + size_t capacity = 0; + for (ChunkHeader* c = chunkHead_; c != 0; c = c->next) + capacity += c->capacity; + return capacity; + } + + //! Computes the memory blocks allocated. + /*! \return total used bytes. + */ + size_t Size() const { + size_t size = 0; + for (ChunkHeader* c = chunkHead_; c != 0; c = c->next) + size += c->size; + return size; + } + + //! Allocates a memory block. (concept Allocator) + void* Malloc(size_t size) { + if (!size) + return NULL; + + size = RAPIDJSON_ALIGN(size); + if (chunkHead_ == 0 || chunkHead_->size + size > chunkHead_->capacity) + if (!AddChunk(chunk_capacity_ > size ? chunk_capacity_ : size)) + return NULL; + + void *buffer = reinterpret_cast(chunkHead_) + RAPIDJSON_ALIGN(sizeof(ChunkHeader)) + chunkHead_->size; + chunkHead_->size += size; + return buffer; + } + + //! Resizes a memory block (concept Allocator) + void* Realloc(void* originalPtr, size_t originalSize, size_t newSize) { + if (originalPtr == 0) + return Malloc(newSize); + + if (newSize == 0) + return NULL; + + originalSize = RAPIDJSON_ALIGN(originalSize); + newSize = RAPIDJSON_ALIGN(newSize); + + // Do not shrink if new size is smaller than original + if (originalSize >= newSize) + return originalPtr; + + // Simply expand it if it is the last allocation and there is sufficient space + if (originalPtr == reinterpret_cast(chunkHead_) + RAPIDJSON_ALIGN(sizeof(ChunkHeader)) + chunkHead_->size - originalSize) { + size_t increment = static_cast(newSize - originalSize); + if (chunkHead_->size + increment <= chunkHead_->capacity) { + chunkHead_->size += increment; + return originalPtr; + } + } + + // Realloc process: allocate and copy memory, do not free original buffer. + if (void* newBuffer = Malloc(newSize)) { + if (originalSize) + std::memcpy(newBuffer, originalPtr, originalSize); + return newBuffer; + } + else + return NULL; + } + + //! Frees a memory block (concept Allocator) + static void Free(void *ptr) { (void)ptr; } // Do nothing + +private: + //! Copy constructor is not permitted. + MemoryPoolAllocator(const MemoryPoolAllocator& rhs) /* = delete */; + //! Copy assignment operator is not permitted. + MemoryPoolAllocator& operator=(const MemoryPoolAllocator& rhs) /* = delete */; + + //! Creates a new chunk. + /*! \param capacity Capacity of the chunk in bytes. + \return true if success. + */ + bool AddChunk(size_t capacity) { + if (!baseAllocator_) + ownBaseAllocator_ = baseAllocator_ = RAPIDJSON_NEW(BaseAllocator)(); + if (ChunkHeader* chunk = reinterpret_cast(baseAllocator_->Malloc(RAPIDJSON_ALIGN(sizeof(ChunkHeader)) + capacity))) { + chunk->capacity = capacity; + chunk->size = 0; + chunk->next = chunkHead_; + chunkHead_ = chunk; + return true; + } + else + return false; + } + + static const int kDefaultChunkCapacity = RAPIDJSON_ALLOCATOR_DEFAULT_CHUNK_CAPACITY; //!< Default chunk capacity. + + //! Chunk header for perpending to each chunk. + /*! Chunks are stored as a singly linked list. + */ + struct ChunkHeader { + size_t capacity; //!< Capacity of the chunk in bytes (excluding the header itself). + size_t size; //!< Current size of allocated memory in bytes. + ChunkHeader *next; //!< Next chunk in the linked list. + }; + + ChunkHeader *chunkHead_; //!< Head of the chunk linked-list. Only the head chunk serves allocation. + size_t chunk_capacity_; //!< The minimum capacity of chunk when they are allocated. + void *userBuffer_; //!< User supplied buffer. + BaseAllocator* baseAllocator_; //!< base allocator for allocating memory chunks. + BaseAllocator* ownBaseAllocator_; //!< base allocator created by this object. +}; + +RAPIDJSON_NAMESPACE_END + +#endif // RAPIDJSON_ENCODINGS_H_ diff --git a/vendor/github.com/Benau/go_rlottie/lottie_rapidjson_cursorstreamwrapper.h b/vendor/github.com/Benau/go_rlottie/lottie_rapidjson_cursorstreamwrapper.h new file mode 100644 index 00000000..36d8e4c6 --- /dev/null +++ b/vendor/github.com/Benau/go_rlottie/lottie_rapidjson_cursorstreamwrapper.h @@ -0,0 +1,78 @@ +// Tencent is pleased to support the open source community by making RapidJSON available. +// +// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved. +// +// Licensed under the MIT License (the "License"); you may not use this file except +// in compliance with the License. You may obtain a copy of the License at +// +// http://opensource.org/licenses/MIT +// +// Unless required by applicable law or agreed to in writing, software distributed +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +// CONDITIONS OF ANY KIND, either express or implied. See the License for the +// specific language governing permissions and limitations under the License. + +#ifndef RAPIDJSON_CURSORSTREAMWRAPPER_H_ +#define RAPIDJSON_CURSORSTREAMWRAPPER_H_ + +#include "lottie_rapidjson_stream.h" + +#if defined(__GNUC__) +RAPIDJSON_DIAG_PUSH +RAPIDJSON_DIAG_OFF(effc++) +#endif + +#if defined(_MSC_VER) && _MSC_VER <= 1800 +RAPIDJSON_DIAG_PUSH +RAPIDJSON_DIAG_OFF(4702) // unreachable code +RAPIDJSON_DIAG_OFF(4512) // assignment operator could not be generated +#endif + +RAPIDJSON_NAMESPACE_BEGIN + + +//! Cursor stream wrapper for counting line and column number if error exists. +/*! + \tparam InputStream Any stream that implements Stream Concept +*/ +template > +class CursorStreamWrapper : public GenericStreamWrapper { +public: + typedef typename Encoding::Ch Ch; + + CursorStreamWrapper(InputStream& is): + GenericStreamWrapper(is), line_(1), col_(0) {} + + // counting line and column number + Ch Take() { + Ch ch = this->is_.Take(); + if(ch == '\n') { + line_ ++; + col_ = 0; + } else { + col_ ++; + } + return ch; + } + + //! Get the error line number, if error exists. + size_t GetLine() const { return line_; } + //! Get the error column number, if error exists. + size_t GetColumn() const { return col_; } + +private: + size_t line_; //!< Current Line + size_t col_; //!< Current Column +}; + +#if defined(_MSC_VER) && _MSC_VER <= 1800 +RAPIDJSON_DIAG_POP +#endif + +#if defined(__GNUC__) +RAPIDJSON_DIAG_POP +#endif + +RAPIDJSON_NAMESPACE_END + +#endif // RAPIDJSON_CURSORSTREAMWRAPPER_H_ diff --git a/vendor/github.com/Benau/go_rlottie/lottie_rapidjson_document.h b/vendor/github.com/Benau/go_rlottie/lottie_rapidjson_document.h new file mode 100644 index 00000000..09cbf454 --- /dev/null +++ b/vendor/github.com/Benau/go_rlottie/lottie_rapidjson_document.h @@ -0,0 +1,2732 @@ +// Tencent is pleased to support the open source community by making RapidJSON available. +// +// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved. +// +// Licensed under the MIT License (the "License"); you may not use this file except +// in compliance with the License. You may obtain a copy of the License at +// +// http://opensource.org/licenses/MIT +// +// Unless required by applicable law or agreed to in writing, software distributed +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +// CONDITIONS OF ANY KIND, either express or implied. See the License for the +// specific language governing permissions and limitations under the License. + +#ifndef RAPIDJSON_DOCUMENT_H_ +#define RAPIDJSON_DOCUMENT_H_ + +/*! \file document.h */ + +#include "lottie_rapidjson_reader.h" +#include "lottie_rapidjson_internal_meta.h" +#include "lottie_rapidjson_internal_strfunc.h" +#include "lottie_rapidjson_memorystream.h" +#include "lottie_rapidjson_encodedstream.h" +#include // placement new +#include +#ifdef __cpp_lib_three_way_comparison +#include +#endif + +RAPIDJSON_DIAG_PUSH +#ifdef __clang__ +RAPIDJSON_DIAG_OFF(padded) +RAPIDJSON_DIAG_OFF(switch-enum) +RAPIDJSON_DIAG_OFF(c++98-compat) +#elif defined(_MSC_VER) +RAPIDJSON_DIAG_OFF(4127) // conditional expression is constant +RAPIDJSON_DIAG_OFF(4244) // conversion from kXxxFlags to 'uint16_t', possible loss of data +#endif + +#ifdef __GNUC__ +RAPIDJSON_DIAG_OFF(effc++) +#endif // __GNUC__ + +#ifndef RAPIDJSON_NOMEMBERITERATORCLASS +#include // std::random_access_iterator_tag +#endif + +#if RAPIDJSON_HAS_CXX11_RVALUE_REFS +#include // std::move +#endif + +RAPIDJSON_NAMESPACE_BEGIN + +// Forward declaration. +template +class GenericValue; + +template +class GenericDocument; + +/*! \def RAPIDJSON_DEFAULT_ALLOCATOR + \ingroup RAPIDJSON_CONFIG + \brief Allows to choose default allocator. + + User can define this to use CrtAllocator or MemoryPoolAllocator. +*/ +#ifndef RAPIDJSON_DEFAULT_ALLOCATOR +#define RAPIDJSON_DEFAULT_ALLOCATOR MemoryPoolAllocator +#endif + +/*! \def RAPIDJSON_DEFAULT_STACK_ALLOCATOR + \ingroup RAPIDJSON_CONFIG + \brief Allows to choose default stack allocator for Document. + + User can define this to use CrtAllocator or MemoryPoolAllocator. +*/ +#ifndef RAPIDJSON_DEFAULT_STACK_ALLOCATOR +#define RAPIDJSON_DEFAULT_STACK_ALLOCATOR CrtAllocator +#endif + +/*! \def RAPIDJSON_VALUE_DEFAULT_OBJECT_CAPACITY + \ingroup RAPIDJSON_CONFIG + \brief User defined kDefaultObjectCapacity value. + + User can define this as any natural number. +*/ +#ifndef RAPIDJSON_VALUE_DEFAULT_OBJECT_CAPACITY +// number of objects that rapidjson::Value allocates memory for by default +#define RAPIDJSON_VALUE_DEFAULT_OBJECT_CAPACITY 16 +#endif + +/*! \def RAPIDJSON_VALUE_DEFAULT_ARRAY_CAPACITY + \ingroup RAPIDJSON_CONFIG + \brief User defined kDefaultArrayCapacity value. + + User can define this as any natural number. +*/ +#ifndef RAPIDJSON_VALUE_DEFAULT_ARRAY_CAPACITY +// number of array elements that rapidjson::Value allocates memory for by default +#define RAPIDJSON_VALUE_DEFAULT_ARRAY_CAPACITY 16 +#endif + +//! Name-value pair in a JSON object value. +/*! + This class was internal to GenericValue. It used to be a inner struct. + But a compiler (IBM XL C/C++ for AIX) have reported to have problem with that so it moved as a namespace scope struct. + https://code.google.com/p/rapidjson/issues/detail?id=64 +*/ +template +class GenericMember { +public: + GenericValue name; //!< name of member (must be a string) + GenericValue value; //!< value of member. + +#if RAPIDJSON_HAS_CXX11_RVALUE_REFS + //! Move constructor in C++11 + GenericMember(GenericMember&& rhs) RAPIDJSON_NOEXCEPT + : name(std::move(rhs.name)), + value(std::move(rhs.value)) + { + } + + //! Move assignment in C++11 + GenericMember& operator=(GenericMember&& rhs) RAPIDJSON_NOEXCEPT { + return *this = static_cast(rhs); + } +#endif + + //! Assignment with move semantics. + /*! \param rhs Source of the assignment. Its name and value will become a null value after assignment. + */ + GenericMember& operator=(GenericMember& rhs) RAPIDJSON_NOEXCEPT { + if (RAPIDJSON_LIKELY(this != &rhs)) { + name = rhs.name; + value = rhs.value; + } + return *this; + } + + // swap() for std::sort() and other potential use in STL. + friend inline void swap(GenericMember& a, GenericMember& b) RAPIDJSON_NOEXCEPT { + a.name.Swap(b.name); + a.value.Swap(b.value); + } + +private: + //! Copy constructor is not permitted. + GenericMember(const GenericMember& rhs); +}; + +/////////////////////////////////////////////////////////////////////////////// +// GenericMemberIterator + +#ifndef RAPIDJSON_NOMEMBERITERATORCLASS + +//! (Constant) member iterator for a JSON object value +/*! + \tparam Const Is this a constant iterator? + \tparam Encoding Encoding of the value. (Even non-string values need to have the same encoding in a document) + \tparam Allocator Allocator type for allocating memory of object, array and string. + + This class implements a Random Access Iterator for GenericMember elements + of a GenericValue, see ISO/IEC 14882:2003(E) C++ standard, 24.1 [lib.iterator.requirements]. + + \note This iterator implementation is mainly intended to avoid implicit + conversions from iterator values to \c NULL, + e.g. from GenericValue::FindMember. + + \note Define \c RAPIDJSON_NOMEMBERITERATORCLASS to fall back to a + pointer-based implementation, if your platform doesn't provide + the C++ header. + + \see GenericMember, GenericValue::MemberIterator, GenericValue::ConstMemberIterator + */ +template +class GenericMemberIterator { + + friend class GenericValue; + template friend class GenericMemberIterator; + + typedef GenericMember PlainType; + typedef typename internal::MaybeAddConst::Type ValueType; + +public: + //! Iterator type itself + typedef GenericMemberIterator Iterator; + //! Constant iterator type + typedef GenericMemberIterator ConstIterator; + //! Non-constant iterator type + typedef GenericMemberIterator NonConstIterator; + + /** \name std::iterator_traits support */ + //@{ + typedef ValueType value_type; + typedef ValueType * pointer; + typedef ValueType & reference; + typedef std::ptrdiff_t difference_type; + typedef std::random_access_iterator_tag iterator_category; + //@} + + //! Pointer to (const) GenericMember + typedef pointer Pointer; + //! Reference to (const) GenericMember + typedef reference Reference; + //! Signed integer type (e.g. \c ptrdiff_t) + typedef difference_type DifferenceType; + + //! Default constructor (singular value) + /*! Creates an iterator pointing to no element. + \note All operations, except for comparisons, are undefined on such values. + */ + GenericMemberIterator() : ptr_() {} + + //! Iterator conversions to more const + /*! + \param it (Non-const) iterator to copy from + + Allows the creation of an iterator from another GenericMemberIterator + that is "less const". Especially, creating a non-constant iterator + from a constant iterator are disabled: + \li const -> non-const (not ok) + \li const -> const (ok) + \li non-const -> const (ok) + \li non-const -> non-const (ok) + + \note If the \c Const template parameter is already \c false, this + constructor effectively defines a regular copy-constructor. + Otherwise, the copy constructor is implicitly defined. + */ + GenericMemberIterator(const NonConstIterator & it) : ptr_(it.ptr_) {} + Iterator& operator=(const NonConstIterator & it) { ptr_ = it.ptr_; return *this; } + + //! @name stepping + //@{ + Iterator& operator++(){ ++ptr_; return *this; } + Iterator& operator--(){ --ptr_; return *this; } + Iterator operator++(int){ Iterator old(*this); ++ptr_; return old; } + Iterator operator--(int){ Iterator old(*this); --ptr_; return old; } + //@} + + //! @name increment/decrement + //@{ + Iterator operator+(DifferenceType n) const { return Iterator(ptr_+n); } + Iterator operator-(DifferenceType n) const { return Iterator(ptr_-n); } + + Iterator& operator+=(DifferenceType n) { ptr_+=n; return *this; } + Iterator& operator-=(DifferenceType n) { ptr_-=n; return *this; } + //@} + + //! @name relations + //@{ + template bool operator==(const GenericMemberIterator& that) const { return ptr_ == that.ptr_; } + template bool operator!=(const GenericMemberIterator& that) const { return ptr_ != that.ptr_; } + template bool operator<=(const GenericMemberIterator& that) const { return ptr_ <= that.ptr_; } + template bool operator>=(const GenericMemberIterator& that) const { return ptr_ >= that.ptr_; } + template bool operator< (const GenericMemberIterator& that) const { return ptr_ < that.ptr_; } + template bool operator> (const GenericMemberIterator& that) const { return ptr_ > that.ptr_; } + +#ifdef __cpp_lib_three_way_comparison + template std::strong_ordering operator<=>(const GenericMemberIterator& that) const { return ptr_ <=> that.ptr_; } +#endif + //@} + + //! @name dereference + //@{ + Reference operator*() const { return *ptr_; } + Pointer operator->() const { return ptr_; } + Reference operator[](DifferenceType n) const { return ptr_[n]; } + //@} + + //! Distance + DifferenceType operator-(ConstIterator that) const { return ptr_-that.ptr_; } + +private: + //! Internal constructor from plain pointer + explicit GenericMemberIterator(Pointer p) : ptr_(p) {} + + Pointer ptr_; //!< raw pointer +}; + +#else // RAPIDJSON_NOMEMBERITERATORCLASS + +// class-based member iterator implementation disabled, use plain pointers + +template +class GenericMemberIterator; + +//! non-const GenericMemberIterator +template +class GenericMemberIterator { + //! use plain pointer as iterator type + typedef GenericMember* Iterator; +}; +//! const GenericMemberIterator +template +class GenericMemberIterator { + //! use plain const pointer as iterator type + typedef const GenericMember* Iterator; +}; + +#endif // RAPIDJSON_NOMEMBERITERATORCLASS + +/////////////////////////////////////////////////////////////////////////////// +// GenericStringRef + +//! Reference to a constant string (not taking a copy) +/*! + \tparam CharType character type of the string + + This helper class is used to automatically infer constant string + references for string literals, especially from \c const \b (!) + character arrays. + + The main use is for creating JSON string values without copying the + source string via an \ref Allocator. This requires that the referenced + string pointers have a sufficient lifetime, which exceeds the lifetime + of the associated GenericValue. + + \b Example + \code + Value v("foo"); // ok, no need to copy & calculate length + const char foo[] = "foo"; + v.SetString(foo); // ok + + const char* bar = foo; + // Value x(bar); // not ok, can't rely on bar's lifetime + Value x(StringRef(bar)); // lifetime explicitly guaranteed by user + Value y(StringRef(bar, 3)); // ok, explicitly pass length + \endcode + + \see StringRef, GenericValue::SetString +*/ +template +struct GenericStringRef { + typedef CharType Ch; //!< character type of the string + + //! Create string reference from \c const character array +#ifndef __clang__ // -Wdocumentation + /*! + This constructor implicitly creates a constant string reference from + a \c const character array. It has better performance than + \ref StringRef(const CharType*) by inferring the string \ref length + from the array length, and also supports strings containing null + characters. + + \tparam N length of the string, automatically inferred + + \param str Constant character array, lifetime assumed to be longer + than the use of the string in e.g. a GenericValue + + \post \ref s == str + + \note Constant complexity. + \note There is a hidden, private overload to disallow references to + non-const character arrays to be created via this constructor. + By this, e.g. function-scope arrays used to be filled via + \c snprintf are excluded from consideration. + In such cases, the referenced string should be \b copied to the + GenericValue instead. + */ +#endif + template + GenericStringRef(const CharType (&str)[N]) RAPIDJSON_NOEXCEPT + : s(str), length(N-1) {} + + //! Explicitly create string reference from \c const character pointer +#ifndef __clang__ // -Wdocumentation + /*! + This constructor can be used to \b explicitly create a reference to + a constant string pointer. + + \see StringRef(const CharType*) + + \param str Constant character pointer, lifetime assumed to be longer + than the use of the string in e.g. a GenericValue + + \post \ref s == str + + \note There is a hidden, private overload to disallow references to + non-const character arrays to be created via this constructor. + By this, e.g. function-scope arrays used to be filled via + \c snprintf are excluded from consideration. + In such cases, the referenced string should be \b copied to the + GenericValue instead. + */ +#endif + explicit GenericStringRef(const CharType* str) + : s(str), length(NotNullStrLen(str)) {} + + //! Create constant string reference from pointer and length +#ifndef __clang__ // -Wdocumentation + /*! \param str constant string, lifetime assumed to be longer than the use of the string in e.g. a GenericValue + \param len length of the string, excluding the trailing NULL terminator + + \post \ref s == str && \ref length == len + \note Constant complexity. + */ +#endif + GenericStringRef(const CharType* str, SizeType len) + : s(RAPIDJSON_LIKELY(str) ? str : emptyString), length(len) { RAPIDJSON_ASSERT(str != 0 || len == 0u); } + + GenericStringRef(const GenericStringRef& rhs) : s(rhs.s), length(rhs.length) {} + + //! implicit conversion to plain CharType pointer + operator const Ch *() const { return s; } + + const Ch* const s; //!< plain CharType pointer + const SizeType length; //!< length of the string (excluding the trailing NULL terminator) + +private: + SizeType NotNullStrLen(const CharType* str) { + RAPIDJSON_ASSERT(str != 0); + return internal::StrLen(str); + } + + /// Empty string - used when passing in a NULL pointer + static const Ch emptyString[]; + + //! Disallow construction from non-const array + template + GenericStringRef(CharType (&str)[N]) /* = delete */; + //! Copy assignment operator not permitted - immutable type + GenericStringRef& operator=(const GenericStringRef& rhs) /* = delete */; +}; + +template +const CharType GenericStringRef::emptyString[] = { CharType() }; + +//! Mark a character pointer as constant string +/*! Mark a plain character pointer as a "string literal". This function + can be used to avoid copying a character string to be referenced as a + value in a JSON GenericValue object, if the string's lifetime is known + to be valid long enough. + \tparam CharType Character type of the string + \param str Constant string, lifetime assumed to be longer than the use of the string in e.g. a GenericValue + \return GenericStringRef string reference object + \relatesalso GenericStringRef + + \see GenericValue::GenericValue(StringRefType), GenericValue::operator=(StringRefType), GenericValue::SetString(StringRefType), GenericValue::PushBack(StringRefType, Allocator&), GenericValue::AddMember +*/ +template +inline GenericStringRef StringRef(const CharType* str) { + return GenericStringRef(str); +} + +//! Mark a character pointer as constant string +/*! Mark a plain character pointer as a "string literal". This function + can be used to avoid copying a character string to be referenced as a + value in a JSON GenericValue object, if the string's lifetime is known + to be valid long enough. + + This version has better performance with supplied length, and also + supports string containing null characters. + + \tparam CharType character type of the string + \param str Constant string, lifetime assumed to be longer than the use of the string in e.g. a GenericValue + \param length The length of source string. + \return GenericStringRef string reference object + \relatesalso GenericStringRef +*/ +template +inline GenericStringRef StringRef(const CharType* str, size_t length) { + return GenericStringRef(str, SizeType(length)); +} + +#if RAPIDJSON_HAS_STDSTRING +//! Mark a string object as constant string +/*! Mark a string object (e.g. \c std::string) as a "string literal". + This function can be used to avoid copying a string to be referenced as a + value in a JSON GenericValue object, if the string's lifetime is known + to be valid long enough. + + \tparam CharType character type of the string + \param str Constant string, lifetime assumed to be longer than the use of the string in e.g. a GenericValue + \return GenericStringRef string reference object + \relatesalso GenericStringRef + \note Requires the definition of the preprocessor symbol \ref RAPIDJSON_HAS_STDSTRING. +*/ +template +inline GenericStringRef StringRef(const std::basic_string& str) { + return GenericStringRef(str.data(), SizeType(str.size())); +} +#endif + +/////////////////////////////////////////////////////////////////////////////// +// GenericValue type traits +namespace internal { + +template +struct IsGenericValueImpl : FalseType {}; + +// select candidates according to nested encoding and allocator types +template struct IsGenericValueImpl::Type, typename Void::Type> + : IsBaseOf, T>::Type {}; + +// helper to match arbitrary GenericValue instantiations, including derived classes +template struct IsGenericValue : IsGenericValueImpl::Type {}; + +} // namespace internal + +/////////////////////////////////////////////////////////////////////////////// +// TypeHelper + +namespace internal { + +template +struct TypeHelper {}; + +template +struct TypeHelper { + static bool Is(const ValueType& v) { return v.IsBool(); } + static bool Get(const ValueType& v) { return v.GetBool(); } + static ValueType& Set(ValueType& v, bool data) { return v.SetBool(data); } + static ValueType& Set(ValueType& v, bool data, typename ValueType::AllocatorType&) { return v.SetBool(data); } +}; + +template +struct TypeHelper { + static bool Is(const ValueType& v) { return v.IsInt(); } + static int Get(const ValueType& v) { return v.GetInt(); } + static ValueType& Set(ValueType& v, int data) { return v.SetInt(data); } + static ValueType& Set(ValueType& v, int data, typename ValueType::AllocatorType&) { return v.SetInt(data); } +}; + +template +struct TypeHelper { + static bool Is(const ValueType& v) { return v.IsUint(); } + static unsigned Get(const ValueType& v) { return v.GetUint(); } + static ValueType& Set(ValueType& v, unsigned data) { return v.SetUint(data); } + static ValueType& Set(ValueType& v, unsigned data, typename ValueType::AllocatorType&) { return v.SetUint(data); } +}; + +#ifdef _MSC_VER +RAPIDJSON_STATIC_ASSERT(sizeof(long) == sizeof(int)); +template +struct TypeHelper { + static bool Is(const ValueType& v) { return v.IsInt(); } + static long Get(const ValueType& v) { return v.GetInt(); } + static ValueType& Set(ValueType& v, long data) { return v.SetInt(data); } + static ValueType& Set(ValueType& v, long data, typename ValueType::AllocatorType&) { return v.SetInt(data); } +}; + +RAPIDJSON_STATIC_ASSERT(sizeof(unsigned long) == sizeof(unsigned)); +template +struct TypeHelper { + static bool Is(const ValueType& v) { return v.IsUint(); } + static unsigned long Get(const ValueType& v) { return v.GetUint(); } + static ValueType& Set(ValueType& v, unsigned long data) { return v.SetUint(data); } + static ValueType& Set(ValueType& v, unsigned long data, typename ValueType::AllocatorType&) { return v.SetUint(data); } +}; +#endif + +template +struct TypeHelper { + static bool Is(const ValueType& v) { return v.IsInt64(); } + static int64_t Get(const ValueType& v) { return v.GetInt64(); } + static ValueType& Set(ValueType& v, int64_t data) { return v.SetInt64(data); } + static ValueType& Set(ValueType& v, int64_t data, typename ValueType::AllocatorType&) { return v.SetInt64(data); } +}; + +template +struct TypeHelper { + static bool Is(const ValueType& v) { return v.IsUint64(); } + static uint64_t Get(const ValueType& v) { return v.GetUint64(); } + static ValueType& Set(ValueType& v, uint64_t data) { return v.SetUint64(data); } + static ValueType& Set(ValueType& v, uint64_t data, typename ValueType::AllocatorType&) { return v.SetUint64(data); } +}; + +template +struct TypeHelper { + static bool Is(const ValueType& v) { return v.IsDouble(); } + static double Get(const ValueType& v) { return v.GetDouble(); } + static ValueType& Set(ValueType& v, double data) { return v.SetDouble(data); } + static ValueType& Set(ValueType& v, double data, typename ValueType::AllocatorType&) { return v.SetDouble(data); } +}; + +template +struct TypeHelper { + static bool Is(const ValueType& v) { return v.IsFloat(); } + static float Get(const ValueType& v) { return v.GetFloat(); } + static ValueType& Set(ValueType& v, float data) { return v.SetFloat(data); } + static ValueType& Set(ValueType& v, float data, typename ValueType::AllocatorType&) { return v.SetFloat(data); } +}; + +template +struct TypeHelper { + typedef const typename ValueType::Ch* StringType; + static bool Is(const ValueType& v) { return v.IsString(); } + static StringType Get(const ValueType& v) { return v.GetString(); } + static ValueType& Set(ValueType& v, const StringType data) { return v.SetString(typename ValueType::StringRefType(data)); } + static ValueType& Set(ValueType& v, const StringType data, typename ValueType::AllocatorType& a) { return v.SetString(data, a); } +}; + +#if RAPIDJSON_HAS_STDSTRING +template +struct TypeHelper > { + typedef std::basic_string StringType; + static bool Is(const ValueType& v) { return v.IsString(); } + static StringType Get(const ValueType& v) { return StringType(v.GetString(), v.GetStringLength()); } + static ValueType& Set(ValueType& v, const StringType& data, typename ValueType::AllocatorType& a) { return v.SetString(data, a); } +}; +#endif + +template +struct TypeHelper { + typedef typename ValueType::Array ArrayType; + static bool Is(const ValueType& v) { return v.IsArray(); } + static ArrayType Get(ValueType& v) { return v.GetArray(); } + static ValueType& Set(ValueType& v, ArrayType data) { return v = data; } + static ValueType& Set(ValueType& v, ArrayType data, typename ValueType::AllocatorType&) { return v = data; } +}; + +template +struct TypeHelper { + typedef typename ValueType::ConstArray ArrayType; + static bool Is(const ValueType& v) { return v.IsArray(); } + static ArrayType Get(const ValueType& v) { return v.GetArray(); } +}; + +template +struct TypeHelper { + typedef typename ValueType::Object ObjectType; + static bool Is(const ValueType& v) { return v.IsObject(); } + static ObjectType Get(ValueType& v) { return v.GetObject(); } + static ValueType& Set(ValueType& v, ObjectType data) { return v = data; } + static ValueType& Set(ValueType& v, ObjectType data, typename ValueType::AllocatorType&) { return v = data; } +}; + +template +struct TypeHelper { + typedef typename ValueType::ConstObject ObjectType; + static bool Is(const ValueType& v) { return v.IsObject(); } + static ObjectType Get(const ValueType& v) { return v.GetObject(); } +}; + +} // namespace internal + +// Forward declarations +template class GenericArray; +template class GenericObject; + +/////////////////////////////////////////////////////////////////////////////// +// GenericValue + +//! Represents a JSON value. Use Value for UTF8 encoding and default allocator. +/*! + A JSON value can be one of 7 types. This class is a variant type supporting + these types. + + Use the Value if UTF8 and default allocator + + \tparam Encoding Encoding of the value. (Even non-string values need to have the same encoding in a document) + \tparam Allocator Allocator type for allocating memory of object, array and string. +*/ +template +class GenericValue { +public: + //! Name-value pair in an object. + typedef GenericMember Member; + typedef Encoding EncodingType; //!< Encoding type from template parameter. + typedef Allocator AllocatorType; //!< Allocator type from template parameter. + typedef typename Encoding::Ch Ch; //!< Character type derived from Encoding. + typedef GenericStringRef StringRefType; //!< Reference to a constant string + typedef typename GenericMemberIterator::Iterator MemberIterator; //!< Member iterator for iterating in object. + typedef typename GenericMemberIterator::Iterator ConstMemberIterator; //!< Constant member iterator for iterating in object. + typedef GenericValue* ValueIterator; //!< Value iterator for iterating in array. + typedef const GenericValue* ConstValueIterator; //!< Constant value iterator for iterating in array. + typedef GenericValue ValueType; //!< Value type of itself. + typedef GenericArray Array; + typedef GenericArray ConstArray; + typedef GenericObject Object; + typedef GenericObject ConstObject; + + //!@name Constructors and destructor. + //@{ + + //! Default constructor creates a null value. + GenericValue() RAPIDJSON_NOEXCEPT : data_() { data_.f.flags = kNullFlag; } + +#if RAPIDJSON_HAS_CXX11_RVALUE_REFS + //! Move constructor in C++11 + GenericValue(GenericValue&& rhs) RAPIDJSON_NOEXCEPT : data_(rhs.data_) { + rhs.data_.f.flags = kNullFlag; // give up contents + } +#endif + +private: + //! Copy constructor is not permitted. + GenericValue(const GenericValue& rhs); + +#if RAPIDJSON_HAS_CXX11_RVALUE_REFS + //! Moving from a GenericDocument is not permitted. + template + GenericValue(GenericDocument&& rhs); + + //! Move assignment from a GenericDocument is not permitted. + template + GenericValue& operator=(GenericDocument&& rhs); +#endif + +public: + + //! Constructor with JSON value type. + /*! This creates a Value of specified type with default content. + \param type Type of the value. + \note Default content for number is zero. + */ + explicit GenericValue(Type type) RAPIDJSON_NOEXCEPT : data_() { + static const uint16_t defaultFlags[] = { + kNullFlag, kFalseFlag, kTrueFlag, kObjectFlag, kArrayFlag, kShortStringFlag, + kNumberAnyFlag + }; + RAPIDJSON_NOEXCEPT_ASSERT(type >= kNullType && type <= kNumberType); + data_.f.flags = defaultFlags[type]; + + // Use ShortString to store empty string. + if (type == kStringType) + data_.ss.SetLength(0); + } + + //! Explicit copy constructor (with allocator) + /*! Creates a copy of a Value by using the given Allocator + \tparam SourceAllocator allocator of \c rhs + \param rhs Value to copy from (read-only) + \param allocator Allocator for allocating copied elements and buffers. Commonly use GenericDocument::GetAllocator(). + \param copyConstStrings Force copying of constant strings (e.g. referencing an in-situ buffer) + \see CopyFrom() + */ + template + GenericValue(const GenericValue& rhs, Allocator& allocator, bool copyConstStrings = false) { + switch (rhs.GetType()) { + case kObjectType: { + SizeType count = rhs.data_.o.size; + Member* lm = reinterpret_cast(allocator.Malloc(count * sizeof(Member))); + const typename GenericValue::Member* rm = rhs.GetMembersPointer(); + for (SizeType i = 0; i < count; i++) { + new (&lm[i].name) GenericValue(rm[i].name, allocator, copyConstStrings); + new (&lm[i].value) GenericValue(rm[i].value, allocator, copyConstStrings); + } + data_.f.flags = kObjectFlag; + data_.o.size = data_.o.capacity = count; + SetMembersPointer(lm); + } + break; + case kArrayType: { + SizeType count = rhs.data_.a.size; + GenericValue* le = reinterpret_cast(allocator.Malloc(count * sizeof(GenericValue))); + const GenericValue* re = rhs.GetElementsPointer(); + for (SizeType i = 0; i < count; i++) + new (&le[i]) GenericValue(re[i], allocator, copyConstStrings); + data_.f.flags = kArrayFlag; + data_.a.size = data_.a.capacity = count; + SetElementsPointer(le); + } + break; + case kStringType: + if (rhs.data_.f.flags == kConstStringFlag && !copyConstStrings) { + data_.f.flags = rhs.data_.f.flags; + data_ = *reinterpret_cast(&rhs.data_); + } + else + SetStringRaw(StringRef(rhs.GetString(), rhs.GetStringLength()), allocator); + break; + default: + data_.f.flags = rhs.data_.f.flags; + data_ = *reinterpret_cast(&rhs.data_); + break; + } + } + + //! Constructor for boolean value. + /*! \param b Boolean value + \note This constructor is limited to \em real boolean values and rejects + implicitly converted types like arbitrary pointers. Use an explicit cast + to \c bool, if you want to construct a boolean JSON value in such cases. + */ +#ifndef RAPIDJSON_DOXYGEN_RUNNING // hide SFINAE from Doxygen + template + explicit GenericValue(T b, RAPIDJSON_ENABLEIF((internal::IsSame))) RAPIDJSON_NOEXCEPT // See #472 +#else + explicit GenericValue(bool b) RAPIDJSON_NOEXCEPT +#endif + : data_() { + // safe-guard against failing SFINAE + RAPIDJSON_STATIC_ASSERT((internal::IsSame::Value)); + data_.f.flags = b ? kTrueFlag : kFalseFlag; + } + + //! Constructor for int value. + explicit GenericValue(int i) RAPIDJSON_NOEXCEPT : data_() { + data_.n.i64 = i; + data_.f.flags = (i >= 0) ? (kNumberIntFlag | kUintFlag | kUint64Flag) : kNumberIntFlag; + } + + //! Constructor for unsigned value. + explicit GenericValue(unsigned u) RAPIDJSON_NOEXCEPT : data_() { + data_.n.u64 = u; + data_.f.flags = (u & 0x80000000) ? kNumberUintFlag : (kNumberUintFlag | kIntFlag | kInt64Flag); + } + + //! Constructor for int64_t value. + explicit GenericValue(int64_t i64) RAPIDJSON_NOEXCEPT : data_() { + data_.n.i64 = i64; + data_.f.flags = kNumberInt64Flag; + if (i64 >= 0) { + data_.f.flags |= kNumberUint64Flag; + if (!(static_cast(i64) & RAPIDJSON_UINT64_C2(0xFFFFFFFF, 0x00000000))) + data_.f.flags |= kUintFlag; + if (!(static_cast(i64) & RAPIDJSON_UINT64_C2(0xFFFFFFFF, 0x80000000))) + data_.f.flags |= kIntFlag; + } + else if (i64 >= static_cast(RAPIDJSON_UINT64_C2(0xFFFFFFFF, 0x80000000))) + data_.f.flags |= kIntFlag; + } + + //! Constructor for uint64_t value. + explicit GenericValue(uint64_t u64) RAPIDJSON_NOEXCEPT : data_() { + data_.n.u64 = u64; + data_.f.flags = kNumberUint64Flag; + if (!(u64 & RAPIDJSON_UINT64_C2(0x80000000, 0x00000000))) + data_.f.flags |= kInt64Flag; + if (!(u64 & RAPIDJSON_UINT64_C2(0xFFFFFFFF, 0x00000000))) + data_.f.flags |= kUintFlag; + if (!(u64 & RAPIDJSON_UINT64_C2(0xFFFFFFFF, 0x80000000))) + data_.f.flags |= kIntFlag; + } + + //! Constructor for double value. + explicit GenericValue(double d) RAPIDJSON_NOEXCEPT : data_() { data_.n.d = d; data_.f.flags = kNumberDoubleFlag; } + + //! Constructor for float value. + explicit GenericValue(float f) RAPIDJSON_NOEXCEPT : data_() { data_.n.d = static_cast(f); data_.f.flags = kNumberDoubleFlag; } + + //! Constructor for constant string (i.e. do not make a copy of string) + GenericValue(const Ch* s, SizeType length) RAPIDJSON_NOEXCEPT : data_() { SetStringRaw(StringRef(s, length)); } + + //! Constructor for constant string (i.e. do not make a copy of string) + explicit GenericValue(StringRefType s) RAPIDJSON_NOEXCEPT : data_() { SetStringRaw(s); } + + //! Constructor for copy-string (i.e. do make a copy of string) + GenericValue(const Ch* s, SizeType length, Allocator& allocator) : data_() { SetStringRaw(StringRef(s, length), allocator); } + + //! Constructor for copy-string (i.e. do make a copy of string) + GenericValue(const Ch*s, Allocator& allocator) : data_() { SetStringRaw(StringRef(s), allocator); } + +#if RAPIDJSON_HAS_STDSTRING + //! Constructor for copy-string from a string object (i.e. do make a copy of string) + /*! \note Requires the definition of the preprocessor symbol \ref RAPIDJSON_HAS_STDSTRING. + */ + GenericValue(const std::basic_string& s, Allocator& allocator) : data_() { SetStringRaw(StringRef(s), allocator); } +#endif + + //! Constructor for Array. + /*! + \param a An array obtained by \c GetArray(). + \note \c Array is always pass-by-value. + \note the source array is moved into this value and the sourec array becomes empty. + */ + GenericValue(Array a) RAPIDJSON_NOEXCEPT : data_(a.value_.data_) { + a.value_.data_ = Data(); + a.value_.data_.f.flags = kArrayFlag; + } + + //! Constructor for Object. + /*! + \param o An object obtained by \c GetObject(). + \note \c Object is always pass-by-value. + \note the source object is moved into this value and the sourec object becomes empty. + */ + GenericValue(Object o) RAPIDJSON_NOEXCEPT : data_(o.value_.data_) { + o.value_.data_ = Data(); + o.value_.data_.f.flags = kObjectFlag; + } + + //! Destructor. + /*! Need to destruct elements of array, members of object, or copy-string. + */ + ~GenericValue() { + if (Allocator::kNeedFree) { // Shortcut by Allocator's trait + switch(data_.f.flags) { + case kArrayFlag: + { + GenericValue* e = GetElementsPointer(); + for (GenericValue* v = e; v != e + data_.a.size; ++v) + v->~GenericValue(); + Allocator::Free(e); + } + break; + + case kObjectFlag: + for (MemberIterator m = MemberBegin(); m != MemberEnd(); ++m) + m->~Member(); + Allocator::Free(GetMembersPointer()); + break; + + case kCopyStringFlag: + Allocator::Free(const_cast(GetStringPointer())); + break; + + default: + break; // Do nothing for other types. + } + } + } + + //@} + + //!@name Assignment operators + //@{ + + //! Assignment with move semantics. + /*! \param rhs Source of the assignment. It will become a null value after assignment. + */ + GenericValue& operator=(GenericValue& rhs) RAPIDJSON_NOEXCEPT { + if (RAPIDJSON_LIKELY(this != &rhs)) { + this->~GenericValue(); + RawAssign(rhs); + } + return *this; + } + +#if RAPIDJSON_HAS_CXX11_RVALUE_REFS + //! Move assignment in C++11 + GenericValue& operator=(GenericValue&& rhs) RAPIDJSON_NOEXCEPT { + return *this = rhs.Move(); + } +#endif + + //! Assignment of constant string reference (no copy) + /*! \param str Constant string reference to be assigned + \note This overload is needed to avoid clashes with the generic primitive type assignment overload below. + \see GenericStringRef, operator=(T) + */ + GenericValue& operator=(StringRefType str) RAPIDJSON_NOEXCEPT { + GenericValue s(str); + return *this = s; + } + + //! Assignment with primitive types. + /*! \tparam T Either \ref Type, \c int, \c unsigned, \c int64_t, \c uint64_t + \param value The value to be assigned. + + \note The source type \c T explicitly disallows all pointer types, + especially (\c const) \ref Ch*. This helps avoiding implicitly + referencing character strings with insufficient lifetime, use + \ref SetString(const Ch*, Allocator&) (for copying) or + \ref StringRef() (to explicitly mark the pointer as constant) instead. + All other pointer types would implicitly convert to \c bool, + use \ref SetBool() instead. + */ + template + RAPIDJSON_DISABLEIF_RETURN((internal::IsPointer), (GenericValue&)) + operator=(T value) { + GenericValue v(value); + return *this = v; + } + + //! Deep-copy assignment from Value + /*! Assigns a \b copy of the Value to the current Value object + \tparam SourceAllocator Allocator type of \c rhs + \param rhs Value to copy from (read-only) + \param allocator Allocator to use for copying + \param copyConstStrings Force copying of constant strings (e.g. referencing an in-situ buffer) + */ + template + GenericValue& CopyFrom(const GenericValue& rhs, Allocator& allocator, bool copyConstStrings = false) { + RAPIDJSON_ASSERT(static_cast(this) != static_cast(&rhs)); + this->~GenericValue(); + new (this) GenericValue(rhs, allocator, copyConstStrings); + return *this; + } + + //! Exchange the contents of this value with those of other. + /*! + \param other Another value. + \note Constant complexity. + */ + GenericValue& Swap(GenericValue& other) RAPIDJSON_NOEXCEPT { + GenericValue temp; + temp.RawAssign(*this); + RawAssign(other); + other.RawAssign(temp); + return *this; + } + + //! free-standing swap function helper + /*! + Helper function to enable support for common swap implementation pattern based on \c std::swap: + \code + void swap(MyClass& a, MyClass& b) { + using std::swap; + swap(a.value, b.value); + // ... + } + \endcode + \see Swap() + */ + friend inline void swap(GenericValue& a, GenericValue& b) RAPIDJSON_NOEXCEPT { a.Swap(b); } + + //! Prepare Value for move semantics + /*! \return *this */ + GenericValue& Move() RAPIDJSON_NOEXCEPT { return *this; } + //@} + + //!@name Equal-to and not-equal-to operators + //@{ + //! Equal-to operator + /*! + \note If an object contains duplicated named member, comparing equality with any object is always \c false. + \note Complexity is quadratic in Object's member number and linear for the rest (number of all values in the subtree and total lengths of all strings). + */ + template + bool operator==(const GenericValue& rhs) const { + typedef GenericValue RhsType; + if (GetType() != rhs.GetType()) + return false; + + switch (GetType()) { + case kObjectType: // Warning: O(n^2) inner-loop + if (data_.o.size != rhs.data_.o.size) + return false; + for (ConstMemberIterator lhsMemberItr = MemberBegin(); lhsMemberItr != MemberEnd(); ++lhsMemberItr) { + typename RhsType::ConstMemberIterator rhsMemberItr = rhs.FindMember(lhsMemberItr->name); + if (rhsMemberItr == rhs.MemberEnd() || lhsMemberItr->value != rhsMemberItr->value) + return false; + } + return true; + + case kArrayType: + if (data_.a.size != rhs.data_.a.size) + return false; + for (SizeType i = 0; i < data_.a.size; i++) + if ((*this)[i] != rhs[i]) + return false; + return true; + + case kStringType: + return StringEqual(rhs); + + case kNumberType: + if (IsDouble() || rhs.IsDouble()) { + double a = GetDouble(); // May convert from integer to double. + double b = rhs.GetDouble(); // Ditto + return a >= b && a <= b; // Prevent -Wfloat-equal + } + else + return data_.n.u64 == rhs.data_.n.u64; + + default: + return true; + } + } + + //! Equal-to operator with const C-string pointer + bool operator==(const Ch* rhs) const { return *this == GenericValue(StringRef(rhs)); } + +#if RAPIDJSON_HAS_STDSTRING + //! Equal-to operator with string object + /*! \note Requires the definition of the preprocessor symbol \ref RAPIDJSON_HAS_STDSTRING. + */ + bool operator==(const std::basic_string& rhs) const { return *this == GenericValue(StringRef(rhs)); } +#endif + + //! Equal-to operator with primitive types + /*! \tparam T Either \ref Type, \c int, \c unsigned, \c int64_t, \c uint64_t, \c double, \c true, \c false + */ + template RAPIDJSON_DISABLEIF_RETURN((internal::OrExpr,internal::IsGenericValue >), (bool)) operator==(const T& rhs) const { return *this == GenericValue(rhs); } + + //! Not-equal-to operator + /*! \return !(*this == rhs) + */ + template + bool operator!=(const GenericValue& rhs) const { return !(*this == rhs); } + + //! Not-equal-to operator with const C-string pointer + bool operator!=(const Ch* rhs) const { return !(*this == rhs); } + + //! Not-equal-to operator with arbitrary types + /*! \return !(*this == rhs) + */ + template RAPIDJSON_DISABLEIF_RETURN((internal::IsGenericValue), (bool)) operator!=(const T& rhs) const { return !(*this == rhs); } + + //! Equal-to operator with arbitrary types (symmetric version) + /*! \return (rhs == lhs) + */ + template friend RAPIDJSON_DISABLEIF_RETURN((internal::IsGenericValue), (bool)) operator==(const T& lhs, const GenericValue& rhs) { return rhs == lhs; } + + //! Not-Equal-to operator with arbitrary types (symmetric version) + /*! \return !(rhs == lhs) + */ + template friend RAPIDJSON_DISABLEIF_RETURN((internal::IsGenericValue), (bool)) operator!=(const T& lhs, const GenericValue& rhs) { return !(rhs == lhs); } + //@} + + //!@name Type + //@{ + + Type GetType() const { return static_cast(data_.f.flags & kTypeMask); } + bool IsNull() const { return data_.f.flags == kNullFlag; } + bool IsFalse() const { return data_.f.flags == kFalseFlag; } + bool IsTrue() const { return data_.f.flags == kTrueFlag; } + bool IsBool() const { return (data_.f.flags & kBoolFlag) != 0; } + bool IsObject() const { return data_.f.flags == kObjectFlag; } + bool IsArray() const { return data_.f.flags == kArrayFlag; } + bool IsNumber() const { return (data_.f.flags & kNumberFlag) != 0; } + bool IsInt() const { return (data_.f.flags & kIntFlag) != 0; } + bool IsUint() const { return (data_.f.flags & kUintFlag) != 0; } + bool IsInt64() const { return (data_.f.flags & kInt64Flag) != 0; } + bool IsUint64() const { return (data_.f.flags & kUint64Flag) != 0; } + bool IsDouble() const { return (data_.f.flags & kDoubleFlag) != 0; } + bool IsString() const { return (data_.f.flags & kStringFlag) != 0; } + + // Checks whether a number can be losslessly converted to a double. + bool IsLosslessDouble() const { + if (!IsNumber()) return false; + if (IsUint64()) { + uint64_t u = GetUint64(); + volatile double d = static_cast(u); + return (d >= 0.0) + && (d < static_cast((std::numeric_limits::max)())) + && (u == static_cast(d)); + } + if (IsInt64()) { + int64_t i = GetInt64(); + volatile double d = static_cast(i); + return (d >= static_cast((std::numeric_limits::min)())) + && (d < static_cast((std::numeric_limits::max)())) + && (i == static_cast(d)); + } + return true; // double, int, uint are always lossless + } + + // Checks whether a number is a float (possible lossy). + bool IsFloat() const { + if ((data_.f.flags & kDoubleFlag) == 0) + return false; + double d = GetDouble(); + return d >= -3.4028234e38 && d <= 3.4028234e38; + } + // Checks whether a number can be losslessly converted to a float. + bool IsLosslessFloat() const { + if (!IsNumber()) return false; + double a = GetDouble(); + if (a < static_cast(-(std::numeric_limits::max)()) + || a > static_cast((std::numeric_limits::max)())) + return false; + double b = static_cast(static_cast(a)); + return a >= b && a <= b; // Prevent -Wfloat-equal + } + + //@} + + //!@name Null + //@{ + + GenericValue& SetNull() { this->~GenericValue(); new (this) GenericValue(); return *this; } + + //@} + + //!@name Bool + //@{ + + bool GetBool() const { RAPIDJSON_ASSERT(IsBool()); return data_.f.flags == kTrueFlag; } + //!< Set boolean value + /*! \post IsBool() == true */ + GenericValue& SetBool(bool b) { this->~GenericValue(); new (this) GenericValue(b); return *this; } + + //@} + + //!@name Object + //@{ + + //! Set this value as an empty object. + /*! \post IsObject() == true */ + GenericValue& SetObject() { this->~GenericValue(); new (this) GenericValue(kObjectType); return *this; } + + //! Get the number of members in the object. + SizeType MemberCount() const { RAPIDJSON_ASSERT(IsObject()); return data_.o.size; } + + //! Get the capacity of object. + SizeType MemberCapacity() const { RAPIDJSON_ASSERT(IsObject()); return data_.o.capacity; } + + //! Check whether the object is empty. + bool ObjectEmpty() const { RAPIDJSON_ASSERT(IsObject()); return data_.o.size == 0; } + + //! Get a value from an object associated with the name. + /*! \pre IsObject() == true + \tparam T Either \c Ch or \c const \c Ch (template used for disambiguation with \ref operator[](SizeType)) + \note In version 0.1x, if the member is not found, this function returns a null value. This makes issue 7. + Since 0.2, if the name is not correct, it will assert. + If user is unsure whether a member exists, user should use HasMember() first. + A better approach is to use FindMember(). + \note Linear time complexity. + */ + template + RAPIDJSON_DISABLEIF_RETURN((internal::NotExpr::Type, Ch> >),(GenericValue&)) operator[](T* name) { + GenericValue n(StringRef(name)); + return (*this)[n]; + } + template + RAPIDJSON_DISABLEIF_RETURN((internal::NotExpr::Type, Ch> >),(const GenericValue&)) operator[](T* name) const { return const_cast(*this)[name]; } + + //! Get a value from an object associated with the name. + /*! \pre IsObject() == true + \tparam SourceAllocator Allocator of the \c name value + + \note Compared to \ref operator[](T*), this version is faster because it does not need a StrLen(). + And it can also handle strings with embedded null characters. + + \note Linear time complexity. + */ + template + GenericValue& operator[](const GenericValue& name) { + MemberIterator member = FindMember(name); + if (member != MemberEnd()) + return member->value; + else { + RAPIDJSON_ASSERT(false); // see above note + + // This will generate -Wexit-time-destructors in clang + // static GenericValue NullValue; + // return NullValue; + + // Use static buffer and placement-new to prevent destruction + static char buffer[sizeof(GenericValue)]; + return *new (buffer) GenericValue(); + } + } + template + const GenericValue& operator[](const GenericValue& name) const { return const_cast(*this)[name]; } + +#if RAPIDJSON_HAS_STDSTRING + //! Get a value from an object associated with name (string object). + GenericValue& operator[](const std::basic_string& name) { return (*this)[GenericValue(StringRef(name))]; } + const GenericValue& operator[](const std::basic_string& name) const { return (*this)[GenericValue(StringRef(name))]; } +#endif + + //! Const member iterator + /*! \pre IsObject() == true */ + ConstMemberIterator MemberBegin() const { RAPIDJSON_ASSERT(IsObject()); return ConstMemberIterator(GetMembersPointer()); } + //! Const \em past-the-end member iterator + /*! \pre IsObject() == true */ + ConstMemberIterator MemberEnd() const { RAPIDJSON_ASSERT(IsObject()); return ConstMemberIterator(GetMembersPointer() + data_.o.size); } + //! Member iterator + /*! \pre IsObject() == true */ + MemberIterator MemberBegin() { RAPIDJSON_ASSERT(IsObject()); return MemberIterator(GetMembersPointer()); } + //! \em Past-the-end member iterator + /*! \pre IsObject() == true */ + MemberIterator MemberEnd() { RAPIDJSON_ASSERT(IsObject()); return MemberIterator(GetMembersPointer() + data_.o.size); } + + //! Request the object to have enough capacity to store members. + /*! \param newCapacity The capacity that the object at least need to have. + \param allocator Allocator for reallocating memory. It must be the same one as used before. Commonly use GenericDocument::GetAllocator(). + \return The value itself for fluent API. + \note Linear time complexity. + */ + GenericValue& MemberReserve(SizeType newCapacity, Allocator &allocator) { + RAPIDJSON_ASSERT(IsObject()); + if (newCapacity > data_.o.capacity) { + SetMembersPointer(reinterpret_cast(allocator.Realloc(GetMembersPointer(), data_.o.capacity * sizeof(Member), newCapacity * sizeof(Member)))); + data_.o.capacity = newCapacity; + } + return *this; + } + + //! Check whether a member exists in the object. + /*! + \param name Member name to be searched. + \pre IsObject() == true + \return Whether a member with that name exists. + \note It is better to use FindMember() directly if you need the obtain the value as well. + \note Linear time complexity. + */ + bool HasMember(const Ch* name) const { return FindMember(name) != MemberEnd(); } + +#if RAPIDJSON_HAS_STDSTRING + //! Check whether a member exists in the object with string object. + /*! + \param name Member name to be searched. + \pre IsObject() == true + \return Whether a member with that name exists. + \note It is better to use FindMember() directly if you need the obtain the value as well. + \note Linear time complexity. + */ + bool HasMember(const std::basic_string& name) const { return FindMember(name) != MemberEnd(); } +#endif + + //! Check whether a member exists in the object with GenericValue name. + /*! + This version is faster because it does not need a StrLen(). It can also handle string with null character. + \param name Member name to be searched. + \pre IsObject() == true + \return Whether a member with that name exists. + \note It is better to use FindMember() directly if you need the obtain the value as well. + \note Linear time complexity. + */ + template + bool HasMember(const GenericValue& name) const { return FindMember(name) != MemberEnd(); } + + //! Find member by name. + /*! + \param name Member name to be searched. + \pre IsObject() == true + \return Iterator to member, if it exists. + Otherwise returns \ref MemberEnd(). + + \note Earlier versions of Rapidjson returned a \c NULL pointer, in case + the requested member doesn't exist. For consistency with e.g. + \c std::map, this has been changed to MemberEnd() now. + \note Linear time complexity. + */ + MemberIterator FindMember(const Ch* name) { + GenericValue n(StringRef(name)); + return FindMember(n); + } + + ConstMemberIterator FindMember(const Ch* name) const { return const_cast(*this).FindMember(name); } + + //! Find member by name. + /*! + This version is faster because it does not need a StrLen(). It can also handle string with null character. + \param name Member name to be searched. + \pre IsObject() == true + \return Iterator to member, if it exists. + Otherwise returns \ref MemberEnd(). + + \note Earlier versions of Rapidjson returned a \c NULL pointer, in case + the requested member doesn't exist. For consistency with e.g. + \c std::map, this has been changed to MemberEnd() now. + \note Linear time complexity. + */ + template + MemberIterator FindMember(const GenericValue& name) { + RAPIDJSON_ASSERT(IsObject()); + RAPIDJSON_ASSERT(name.IsString()); + MemberIterator member = MemberBegin(); + for ( ; member != MemberEnd(); ++member) + if (name.StringEqual(member->name)) + break; + return member; + } + template ConstMemberIterator FindMember(const GenericValue& name) const { return const_cast(*this).FindMember(name); } + +#if RAPIDJSON_HAS_STDSTRING + //! Find member by string object name. + /*! + \param name Member name to be searched. + \pre IsObject() == true + \return Iterator to member, if it exists. + Otherwise returns \ref MemberEnd(). + */ + MemberIterator FindMember(const std::basic_string& name) { return FindMember(GenericValue(StringRef(name))); } + ConstMemberIterator FindMember(const std::basic_string& name) const { return FindMember(GenericValue(StringRef(name))); } +#endif + + //! Add a member (name-value pair) to the object. + /*! \param name A string value as name of member. + \param value Value of any type. + \param allocator Allocator for reallocating memory. It must be the same one as used before. Commonly use GenericDocument::GetAllocator(). + \return The value itself for fluent API. + \note The ownership of \c name and \c value will be transferred to this object on success. + \pre IsObject() && name.IsString() + \post name.IsNull() && value.IsNull() + \note Amortized Constant time complexity. + */ + GenericValue& AddMember(GenericValue& name, GenericValue& value, Allocator& allocator) { + RAPIDJSON_ASSERT(IsObject()); + RAPIDJSON_ASSERT(name.IsString()); + + ObjectData& o = data_.o; + if (o.size >= o.capacity) + MemberReserve(o.capacity == 0 ? kDefaultObjectCapacity : (o.capacity + (o.capacity + 1) / 2), allocator); + Member* members = GetMembersPointer(); + members[o.size].name.RawAssign(name); + members[o.size].value.RawAssign(value); + o.size++; + return *this; + } + + //! Add a constant string value as member (name-value pair) to the object. + /*! \param name A string value as name of member. + \param value constant string reference as value of member. + \param allocator Allocator for reallocating memory. It must be the same one as used before. Commonly use GenericDocument::GetAllocator(). + \return The value itself for fluent API. + \pre IsObject() + \note This overload is needed to avoid clashes with the generic primitive type AddMember(GenericValue&,T,Allocator&) overload below. + \note Amortized Constant time complexity. + */ + GenericValue& AddMember(GenericValue& name, StringRefType value, Allocator& allocator) { + GenericValue v(value); + return AddMember(name, v, allocator); + } + +#if RAPIDJSON_HAS_STDSTRING + //! Add a string object as member (name-value pair) to the object. + /*! \param name A string value as name of member. + \param value constant string reference as value of member. + \param allocator Allocator for reallocating memory. It must be the same one as used before. Commonly use GenericDocument::GetAllocator(). + \return The value itself for fluent API. + \pre IsObject() + \note This overload is needed to avoid clashes with the generic primitive type AddMember(GenericValue&,T,Allocator&) overload below. + \note Amortized Constant time complexity. + */ + GenericValue& AddMember(GenericValue& name, std::basic_string& value, Allocator& allocator) { + GenericValue v(value, allocator); + return AddMember(name, v, allocator); + } +#endif + + //! Add any primitive value as member (name-value pair) to the object. + /*! \tparam T Either \ref Type, \c int, \c unsigned, \c int64_t, \c uint64_t + \param name A string value as name of member. + \param value Value of primitive type \c T as value of member + \param allocator Allocator for reallocating memory. Commonly use GenericDocument::GetAllocator(). + \return The value itself for fluent API. + \pre IsObject() + + \note The source type \c T explicitly disallows all pointer types, + especially (\c const) \ref Ch*. This helps avoiding implicitly + referencing character strings with insufficient lifetime, use + \ref AddMember(StringRefType, GenericValue&, Allocator&) or \ref + AddMember(StringRefType, StringRefType, Allocator&). + All other pointer types would implicitly convert to \c bool, + use an explicit cast instead, if needed. + \note Amortized Constant time complexity. + */ + template + RAPIDJSON_DISABLEIF_RETURN((internal::OrExpr, internal::IsGenericValue >), (GenericValue&)) + AddMember(GenericValue& name, T value, Allocator& allocator) { + GenericValue v(value); + return AddMember(name, v, allocator); + } + +#if RAPIDJSON_HAS_CXX11_RVALUE_REFS + GenericValue& AddMember(GenericValue&& name, GenericValue&& value, Allocator& allocator) { + return AddMember(name, value, allocator); + } + GenericValue& AddMember(GenericValue&& name, GenericValue& value, Allocator& allocator) { + return AddMember(name, value, allocator); + } + GenericValue& AddMember(GenericValue& name, GenericValue&& value, Allocator& allocator) { + return AddMember(name, value, allocator); + } + GenericValue& AddMember(StringRefType name, GenericValue&& value, Allocator& allocator) { + GenericValue n(name); + return AddMember(n, value, allocator); + } +#endif // RAPIDJSON_HAS_CXX11_RVALUE_REFS + + + //! Add a member (name-value pair) to the object. + /*! \param name A constant string reference as name of member. + \param value Value of any type. + \param allocator Allocator for reallocating memory. It must be the same one as used before. Commonly use GenericDocument::GetAllocator(). + \return The value itself for fluent API. + \note The ownership of \c value will be transferred to this object on success. + \pre IsObject() + \post value.IsNull() + \note Amortized Constant time complexity. + */ + GenericValue& AddMember(StringRefType name, GenericValue& value, Allocator& allocator) { + GenericValue n(name); + return AddMember(n, value, allocator); + } + + //! Add a constant string value as member (name-value pair) to the object. + /*! \param name A constant string reference as name of member. + \param value constant string reference as value of member. + \param allocator Allocator for reallocating memory. It must be the same one as used before. Commonly use GenericDocument::GetAllocator(). + \return The value itself for fluent API. + \pre IsObject() + \note This overload is needed to avoid clashes with the generic primitive type AddMember(StringRefType,T,Allocator&) overload below. + \note Amortized Constant time complexity. + */ + GenericValue& AddMember(StringRefType name, StringRefType value, Allocator& allocator) { + GenericValue v(value); + return AddMember(name, v, allocator); + } + + //! Add any primitive value as member (name-value pair) to the object. + /*! \tparam T Either \ref Type, \c int, \c unsigned, \c int64_t, \c uint64_t + \param name A constant string reference as name of member. + \param value Value of primitive type \c T as value of member + \param allocator Allocator for reallocating memory. Commonly use GenericDocument::GetAllocator(). + \return The value itself for fluent API. + \pre IsObject() + + \note The source type \c T explicitly disallows all pointer types, + especially (\c const) \ref Ch*. This helps avoiding implicitly + referencing character strings with insufficient lifetime, use + \ref AddMember(StringRefType, GenericValue&, Allocator&) or \ref + AddMember(StringRefType, StringRefType, Allocator&). + All other pointer types would implicitly convert to \c bool, + use an explicit cast instead, if needed. + \note Amortized Constant time complexity. + */ + template + RAPIDJSON_DISABLEIF_RETURN((internal::OrExpr, internal::IsGenericValue >), (GenericValue&)) + AddMember(StringRefType name, T value, Allocator& allocator) { + GenericValue n(name); + return AddMember(n, value, allocator); + } + + //! Remove all members in the object. + /*! This function do not deallocate memory in the object, i.e. the capacity is unchanged. + \note Linear time complexity. + */ + void RemoveAllMembers() { + RAPIDJSON_ASSERT(IsObject()); + for (MemberIterator m = MemberBegin(); m != MemberEnd(); ++m) + m->~Member(); + data_.o.size = 0; + } + + //! Remove a member in object by its name. + /*! \param name Name of member to be removed. + \return Whether the member existed. + \note This function may reorder the object members. Use \ref + EraseMember(ConstMemberIterator) if you need to preserve the + relative order of the remaining members. + \note Linear time complexity. + */ + bool RemoveMember(const Ch* name) { + GenericValue n(StringRef(name)); + return RemoveMember(n); + } + +#if RAPIDJSON_HAS_STDSTRING + bool RemoveMember(const std::basic_string& name) { return RemoveMember(GenericValue(StringRef(name))); } +#endif + + template + bool RemoveMember(const GenericValue& name) { + MemberIterator m = FindMember(name); + if (m != MemberEnd()) { + RemoveMember(m); + return true; + } + else + return false; + } + + //! Remove a member in object by iterator. + /*! \param m member iterator (obtained by FindMember() or MemberBegin()). + \return the new iterator after removal. + \note This function may reorder the object members. Use \ref + EraseMember(ConstMemberIterator) if you need to preserve the + relative order of the remaining members. + \note Constant time complexity. + */ + MemberIterator RemoveMember(MemberIterator m) { + RAPIDJSON_ASSERT(IsObject()); + RAPIDJSON_ASSERT(data_.o.size > 0); + RAPIDJSON_ASSERT(GetMembersPointer() != 0); + RAPIDJSON_ASSERT(m >= MemberBegin() && m < MemberEnd()); + + MemberIterator last(GetMembersPointer() + (data_.o.size - 1)); + if (data_.o.size > 1 && m != last) + *m = *last; // Move the last one to this place + else + m->~Member(); // Only one left, just destroy + --data_.o.size; + return m; + } + + //! Remove a member from an object by iterator. + /*! \param pos iterator to the member to remove + \pre IsObject() == true && \ref MemberBegin() <= \c pos < \ref MemberEnd() + \return Iterator following the removed element. + If the iterator \c pos refers to the last element, the \ref MemberEnd() iterator is returned. + \note This function preserves the relative order of the remaining object + members. If you do not need this, use the more efficient \ref RemoveMember(MemberIterator). + \note Linear time complexity. + */ + MemberIterator EraseMember(ConstMemberIterator pos) { + return EraseMember(pos, pos +1); + } + + //! Remove members in the range [first, last) from an object. + /*! \param first iterator to the first member to remove + \param last iterator following the last member to remove + \pre IsObject() == true && \ref MemberBegin() <= \c first <= \c last <= \ref MemberEnd() + \return Iterator following the last removed element. + \note This function preserves the relative order of the remaining object + members. + \note Linear time complexity. + */ + MemberIterator EraseMember(ConstMemberIterator first, ConstMemberIterator last) { + RAPIDJSON_ASSERT(IsObject()); + RAPIDJSON_ASSERT(data_.o.size > 0); + RAPIDJSON_ASSERT(GetMembersPointer() != 0); + RAPIDJSON_ASSERT(first >= MemberBegin()); + RAPIDJSON_ASSERT(first <= last); + RAPIDJSON_ASSERT(last <= MemberEnd()); + + MemberIterator pos = MemberBegin() + (first - MemberBegin()); + for (MemberIterator itr = pos; itr != last; ++itr) + itr->~Member(); + std::memmove(static_cast(&*pos), &*last, static_cast(MemberEnd() - last) * sizeof(Member)); + data_.o.size -= static_cast(last - first); + return pos; + } + + //! Erase a member in object by its name. + /*! \param name Name of member to be removed. + \return Whether the member existed. + \note Linear time complexity. + */ + bool EraseMember(const Ch* name) { + GenericValue n(StringRef(name)); + return EraseMember(n); + } + +#if RAPIDJSON_HAS_STDSTRING + bool EraseMember(const std::basic_string& name) { return EraseMember(GenericValue(StringRef(name))); } +#endif + + template + bool EraseMember(const GenericValue& name) { + MemberIterator m = FindMember(name); + if (m != MemberEnd()) { + EraseMember(m); + return true; + } + else + return false; + } + + Object GetObject() { RAPIDJSON_ASSERT(IsObject()); return Object(*this); } + ConstObject GetObject() const { RAPIDJSON_ASSERT(IsObject()); return ConstObject(*this); } + + //@} + + //!@name Array + //@{ + + //! Set this value as an empty array. + /*! \post IsArray == true */ + GenericValue& SetArray() { this->~GenericValue(); new (this) GenericValue(kArrayType); return *this; } + + //! Get the number of elements in array. + SizeType Size() const { RAPIDJSON_ASSERT(IsArray()); return data_.a.size; } + + //! Get the capacity of array. + SizeType Capacity() const { RAPIDJSON_ASSERT(IsArray()); return data_.a.capacity; } + + //! Check whether the array is empty. + bool Empty() const { RAPIDJSON_ASSERT(IsArray()); return data_.a.size == 0; } + + //! Remove all elements in the array. + /*! This function do not deallocate memory in the array, i.e. the capacity is unchanged. + \note Linear time complexity. + */ + void Clear() { + RAPIDJSON_ASSERT(IsArray()); + GenericValue* e = GetElementsPointer(); + for (GenericValue* v = e; v != e + data_.a.size; ++v) + v->~GenericValue(); + data_.a.size = 0; + } + + //! Get an element from array by index. + /*! \pre IsArray() == true + \param index Zero-based index of element. + \see operator[](T*) + */ + GenericValue& operator[](SizeType index) { + RAPIDJSON_ASSERT(IsArray()); + RAPIDJSON_ASSERT(index < data_.a.size); + return GetElementsPointer()[index]; + } + const GenericValue& operator[](SizeType index) const { return const_cast(*this)[index]; } + + //! Element iterator + /*! \pre IsArray() == true */ + ValueIterator Begin() { RAPIDJSON_ASSERT(IsArray()); return GetElementsPointer(); } + //! \em Past-the-end element iterator + /*! \pre IsArray() == true */ + ValueIterator End() { RAPIDJSON_ASSERT(IsArray()); return GetElementsPointer() + data_.a.size; } + //! Constant element iterator + /*! \pre IsArray() == true */ + ConstValueIterator Begin() const { return const_cast(*this).Begin(); } + //! Constant \em past-the-end element iterator + /*! \pre IsArray() == true */ + ConstValueIterator End() const { return const_cast(*this).End(); } + + //! Request the array to have enough capacity to store elements. + /*! \param newCapacity The capacity that the array at least need to have. + \param allocator Allocator for reallocating memory. It must be the same one as used before. Commonly use GenericDocument::GetAllocator(). + \return The value itself for fluent API. + \note Linear time complexity. + */ + GenericValue& Reserve(SizeType newCapacity, Allocator &allocator) { + RAPIDJSON_ASSERT(IsArray()); + if (newCapacity > data_.a.capacity) { + SetElementsPointer(reinterpret_cast(allocator.Realloc(GetElementsPointer(), data_.a.capacity * sizeof(GenericValue), newCapacity * sizeof(GenericValue)))); + data_.a.capacity = newCapacity; + } + return *this; + } + + //! Append a GenericValue at the end of the array. + /*! \param value Value to be appended. + \param allocator Allocator for reallocating memory. It must be the same one as used before. Commonly use GenericDocument::GetAllocator(). + \pre IsArray() == true + \post value.IsNull() == true + \return The value itself for fluent API. + \note The ownership of \c value will be transferred to this array on success. + \note If the number of elements to be appended is known, calls Reserve() once first may be more efficient. + \note Amortized constant time complexity. + */ + GenericValue& PushBack(GenericValue& value, Allocator& allocator) { + RAPIDJSON_ASSERT(IsArray()); + if (data_.a.size >= data_.a.capacity) + Reserve(data_.a.capacity == 0 ? kDefaultArrayCapacity : (data_.a.capacity + (data_.a.capacity + 1) / 2), allocator); + GetElementsPointer()[data_.a.size++].RawAssign(value); + return *this; + } + +#if RAPIDJSON_HAS_CXX11_RVALUE_REFS + GenericValue& PushBack(GenericValue&& value, Allocator& allocator) { + return PushBack(value, allocator); + } +#endif // RAPIDJSON_HAS_CXX11_RVALUE_REFS + + //! Append a constant string reference at the end of the array. + /*! \param value Constant string reference to be appended. + \param allocator Allocator for reallocating memory. It must be the same one used previously. Commonly use GenericDocument::GetAllocator(). + \pre IsArray() == true + \return The value itself for fluent API. + \note If the number of elements to be appended is known, calls Reserve() once first may be more efficient. + \note Amortized constant time complexity. + \see GenericStringRef + */ + GenericValue& PushBack(StringRefType value, Allocator& allocator) { + return (*this).template PushBack(value, allocator); + } + + //! Append a primitive value at the end of the array. + /*! \tparam T Either \ref Type, \c int, \c unsigned, \c int64_t, \c uint64_t + \param value Value of primitive type T to be appended. + \param allocator Allocator for reallocating memory. It must be the same one as used before. Commonly use GenericDocument::GetAllocator(). + \pre IsArray() == true + \return The value itself for fluent API. + \note If the number of elements to be appended is known, calls Reserve() once first may be more efficient. + + \note The source type \c T explicitly disallows all pointer types, + especially (\c const) \ref Ch*. This helps avoiding implicitly + referencing character strings with insufficient lifetime, use + \ref PushBack(GenericValue&, Allocator&) or \ref + PushBack(StringRefType, Allocator&). + All other pointer types would implicitly convert to \c bool, + use an explicit cast instead, if needed. + \note Amortized constant time complexity. + */ + template + RAPIDJSON_DISABLEIF_RETURN((internal::OrExpr, internal::IsGenericValue >), (GenericValue&)) + PushBack(T value, Allocator& allocator) { + GenericValue v(value); + return PushBack(v, allocator); + } + + //! Remove the last element in the array. + /*! + \note Constant time complexity. + */ + GenericValue& PopBack() { + RAPIDJSON_ASSERT(IsArray()); + RAPIDJSON_ASSERT(!Empty()); + GetElementsPointer()[--data_.a.size].~GenericValue(); + return *this; + } + + //! Remove an element of array by iterator. + /*! + \param pos iterator to the element to remove + \pre IsArray() == true && \ref Begin() <= \c pos < \ref End() + \return Iterator following the removed element. If the iterator pos refers to the last element, the End() iterator is returned. + \note Linear time complexity. + */ + ValueIterator Erase(ConstValueIterator pos) { + return Erase(pos, pos + 1); + } + + //! Remove elements in the range [first, last) of the array. + /*! + \param first iterator to the first element to remove + \param last iterator following the last element to remove + \pre IsArray() == true && \ref Begin() <= \c first <= \c last <= \ref End() + \return Iterator following the last removed element. + \note Linear time complexity. + */ + ValueIterator Erase(ConstValueIterator first, ConstValueIterator last) { + RAPIDJSON_ASSERT(IsArray()); + RAPIDJSON_ASSERT(data_.a.size > 0); + RAPIDJSON_ASSERT(GetElementsPointer() != 0); + RAPIDJSON_ASSERT(first >= Begin()); + RAPIDJSON_ASSERT(first <= last); + RAPIDJSON_ASSERT(last <= End()); + ValueIterator pos = Begin() + (first - Begin()); + for (ValueIterator itr = pos; itr != last; ++itr) + itr->~GenericValue(); + std::memmove(static_cast(pos), last, static_cast(End() - last) * sizeof(GenericValue)); + data_.a.size -= static_cast(last - first); + return pos; + } + + Array GetArray() { RAPIDJSON_ASSERT(IsArray()); return Array(*this); } + ConstArray GetArray() const { RAPIDJSON_ASSERT(IsArray()); return ConstArray(*this); } + + //@} + + //!@name Number + //@{ + + int GetInt() const { RAPIDJSON_ASSERT(data_.f.flags & kIntFlag); return data_.n.i.i; } + unsigned GetUint() const { RAPIDJSON_ASSERT(data_.f.flags & kUintFlag); return data_.n.u.u; } + int64_t GetInt64() const { RAPIDJSON_ASSERT(data_.f.flags & kInt64Flag); return data_.n.i64; } + uint64_t GetUint64() const { RAPIDJSON_ASSERT(data_.f.flags & kUint64Flag); return data_.n.u64; } + + //! Get the value as double type. + /*! \note If the value is 64-bit integer type, it may lose precision. Use \c IsLosslessDouble() to check whether the converison is lossless. + */ + double GetDouble() const { + RAPIDJSON_ASSERT(IsNumber()); + if ((data_.f.flags & kDoubleFlag) != 0) return data_.n.d; // exact type, no conversion. + if ((data_.f.flags & kIntFlag) != 0) return data_.n.i.i; // int -> double + if ((data_.f.flags & kUintFlag) != 0) return data_.n.u.u; // unsigned -> double + if ((data_.f.flags & kInt64Flag) != 0) return static_cast(data_.n.i64); // int64_t -> double (may lose precision) + RAPIDJSON_ASSERT((data_.f.flags & kUint64Flag) != 0); return static_cast(data_.n.u64); // uint64_t -> double (may lose precision) + } + + //! Get the value as float type. + /*! \note If the value is 64-bit integer type, it may lose precision. Use \c IsLosslessFloat() to check whether the converison is lossless. + */ + float GetFloat() const { + return static_cast(GetDouble()); + } + + GenericValue& SetInt(int i) { this->~GenericValue(); new (this) GenericValue(i); return *this; } + GenericValue& SetUint(unsigned u) { this->~GenericValue(); new (this) GenericValue(u); return *this; } + GenericValue& SetInt64(int64_t i64) { this->~GenericValue(); new (this) GenericValue(i64); return *this; } + GenericValue& SetUint64(uint64_t u64) { this->~GenericValue(); new (this) GenericValue(u64); return *this; } + GenericValue& SetDouble(double d) { this->~GenericValue(); new (this) GenericValue(d); return *this; } + GenericValue& SetFloat(float f) { this->~GenericValue(); new (this) GenericValue(static_cast(f)); return *this; } + + //@} + + //!@name String + //@{ + + const Ch* GetString() const { RAPIDJSON_ASSERT(IsString()); return (data_.f.flags & kInlineStrFlag) ? data_.ss.str : GetStringPointer(); } + + //! Get the length of string. + /*! Since rapidjson permits "\\u0000" in the json string, strlen(v.GetString()) may not equal to v.GetStringLength(). + */ + SizeType GetStringLength() const { RAPIDJSON_ASSERT(IsString()); return ((data_.f.flags & kInlineStrFlag) ? (data_.ss.GetLength()) : data_.s.length); } + + //! Set this value as a string without copying source string. + /*! This version has better performance with supplied length, and also support string containing null character. + \param s source string pointer. + \param length The length of source string, excluding the trailing null terminator. + \return The value itself for fluent API. + \post IsString() == true && GetString() == s && GetStringLength() == length + \see SetString(StringRefType) + */ + GenericValue& SetString(const Ch* s, SizeType length) { return SetString(StringRef(s, length)); } + + //! Set this value as a string without copying source string. + /*! \param s source string reference + \return The value itself for fluent API. + \post IsString() == true && GetString() == s && GetStringLength() == s.length + */ + GenericValue& SetString(StringRefType s) { this->~GenericValue(); SetStringRaw(s); return *this; } + + //! Set this value as a string by copying from source string. + /*! This version has better performance with supplied length, and also support string containing null character. + \param s source string. + \param length The length of source string, excluding the trailing null terminator. + \param allocator Allocator for allocating copied buffer. Commonly use GenericDocument::GetAllocator(). + \return The value itself for fluent API. + \post IsString() == true && GetString() != s && strcmp(GetString(),s) == 0 && GetStringLength() == length + */ + GenericValue& SetString(const Ch* s, SizeType length, Allocator& allocator) { return SetString(StringRef(s, length), allocator); } + + //! Set this value as a string by copying from source string. + /*! \param s source string. + \param allocator Allocator for allocating copied buffer. Commonly use GenericDocument::GetAllocator(). + \return The value itself for fluent API. + \post IsString() == true && GetString() != s && strcmp(GetString(),s) == 0 && GetStringLength() == length + */ + GenericValue& SetString(const Ch* s, Allocator& allocator) { return SetString(StringRef(s), allocator); } + + //! Set this value as a string by copying from source string. + /*! \param s source string reference + \param allocator Allocator for allocating copied buffer. Commonly use GenericDocument::GetAllocator(). + \return The value itself for fluent API. + \post IsString() == true && GetString() != s.s && strcmp(GetString(),s) == 0 && GetStringLength() == length + */ + GenericValue& SetString(StringRefType s, Allocator& allocator) { this->~GenericValue(); SetStringRaw(s, allocator); return *this; } + +#if RAPIDJSON_HAS_STDSTRING + //! Set this value as a string by copying from source string. + /*! \param s source string. + \param allocator Allocator for allocating copied buffer. Commonly use GenericDocument::GetAllocator(). + \return The value itself for fluent API. + \post IsString() == true && GetString() != s.data() && strcmp(GetString(),s.data() == 0 && GetStringLength() == s.size() + \note Requires the definition of the preprocessor symbol \ref RAPIDJSON_HAS_STDSTRING. + */ + GenericValue& SetString(const std::basic_string& s, Allocator& allocator) { return SetString(StringRef(s), allocator); } +#endif + + //@} + + //!@name Array + //@{ + + //! Templated version for checking whether this value is type T. + /*! + \tparam T Either \c bool, \c int, \c unsigned, \c int64_t, \c uint64_t, \c double, \c float, \c const \c char*, \c std::basic_string + */ + template + bool Is() const { return internal::TypeHelper::Is(*this); } + + template + T Get() const { return internal::TypeHelper::Get(*this); } + + template + T Get() { return internal::TypeHelper::Get(*this); } + + template + ValueType& Set(const T& data) { return internal::TypeHelper::Set(*this, data); } + + template + ValueType& Set(const T& data, AllocatorType& allocator) { return internal::TypeHelper::Set(*this, data, allocator); } + + //@} + + //! Generate events of this value to a Handler. + /*! This function adopts the GoF visitor pattern. + Typical usage is to output this JSON value as JSON text via Writer, which is a Handler. + It can also be used to deep clone this value via GenericDocument, which is also a Handler. + \tparam Handler type of handler. + \param handler An object implementing concept Handler. + */ + template + bool Accept(Handler& handler) const { + switch(GetType()) { + case kNullType: return handler.Null(); + case kFalseType: return handler.Bool(false); + case kTrueType: return handler.Bool(true); + + case kObjectType: + if (RAPIDJSON_UNLIKELY(!handler.StartObject())) + return false; + for (ConstMemberIterator m = MemberBegin(); m != MemberEnd(); ++m) { + RAPIDJSON_ASSERT(m->name.IsString()); // User may change the type of name by MemberIterator. + if (RAPIDJSON_UNLIKELY(!handler.Key(m->name.GetString(), m->name.GetStringLength(), (m->name.data_.f.flags & kCopyFlag) != 0))) + return false; + if (RAPIDJSON_UNLIKELY(!m->value.Accept(handler))) + return false; + } + return handler.EndObject(data_.o.size); + + case kArrayType: + if (RAPIDJSON_UNLIKELY(!handler.StartArray())) + return false; + for (const GenericValue* v = Begin(); v != End(); ++v) + if (RAPIDJSON_UNLIKELY(!v->Accept(handler))) + return false; + return handler.EndArray(data_.a.size); + + case kStringType: + return handler.String(GetString(), GetStringLength(), (data_.f.flags & kCopyFlag) != 0); + + default: + RAPIDJSON_ASSERT(GetType() == kNumberType); + if (IsDouble()) return handler.Double(data_.n.d); + else if (IsInt()) return handler.Int(data_.n.i.i); + else if (IsUint()) return handler.Uint(data_.n.u.u); + else if (IsInt64()) return handler.Int64(data_.n.i64); + else return handler.Uint64(data_.n.u64); + } + } + +private: + template friend class GenericValue; + template friend class GenericDocument; + + enum { + kBoolFlag = 0x0008, + kNumberFlag = 0x0010, + kIntFlag = 0x0020, + kUintFlag = 0x0040, + kInt64Flag = 0x0080, + kUint64Flag = 0x0100, + kDoubleFlag = 0x0200, + kStringFlag = 0x0400, + kCopyFlag = 0x0800, + kInlineStrFlag = 0x1000, + + // Initial flags of different types. + kNullFlag = kNullType, + kTrueFlag = kTrueType | kBoolFlag, + kFalseFlag = kFalseType | kBoolFlag, + kNumberIntFlag = kNumberType | kNumberFlag | kIntFlag | kInt64Flag, + kNumberUintFlag = kNumberType | kNumberFlag | kUintFlag | kUint64Flag | kInt64Flag, + kNumberInt64Flag = kNumberType | kNumberFlag | kInt64Flag, + kNumberUint64Flag = kNumberType | kNumberFlag | kUint64Flag, + kNumberDoubleFlag = kNumberType | kNumberFlag | kDoubleFlag, + kNumberAnyFlag = kNumberType | kNumberFlag | kIntFlag | kInt64Flag | kUintFlag | kUint64Flag | kDoubleFlag, + kConstStringFlag = kStringType | kStringFlag, + kCopyStringFlag = kStringType | kStringFlag | kCopyFlag, + kShortStringFlag = kStringType | kStringFlag | kCopyFlag | kInlineStrFlag, + kObjectFlag = kObjectType, + kArrayFlag = kArrayType, + + kTypeMask = 0x07 + }; + + static const SizeType kDefaultArrayCapacity = RAPIDJSON_VALUE_DEFAULT_ARRAY_CAPACITY; + static const SizeType kDefaultObjectCapacity = RAPIDJSON_VALUE_DEFAULT_OBJECT_CAPACITY; + + struct Flag { +#if RAPIDJSON_48BITPOINTER_OPTIMIZATION + char payload[sizeof(SizeType) * 2 + 6]; // 2 x SizeType + lower 48-bit pointer +#elif RAPIDJSON_64BIT + char payload[sizeof(SizeType) * 2 + sizeof(void*) + 6]; // 6 padding bytes +#else + char payload[sizeof(SizeType) * 2 + sizeof(void*) + 2]; // 2 padding bytes +#endif + uint16_t flags; + }; + + struct String { + SizeType length; + SizeType hashcode; //!< reserved + const Ch* str; + }; // 12 bytes in 32-bit mode, 16 bytes in 64-bit mode + + // implementation detail: ShortString can represent zero-terminated strings up to MaxSize chars + // (excluding the terminating zero) and store a value to determine the length of the contained + // string in the last character str[LenPos] by storing "MaxSize - length" there. If the string + // to store has the maximal length of MaxSize then str[LenPos] will be 0 and therefore act as + // the string terminator as well. For getting the string length back from that value just use + // "MaxSize - str[LenPos]". + // This allows to store 13-chars strings in 32-bit mode, 21-chars strings in 64-bit mode, + // 13-chars strings for RAPIDJSON_48BITPOINTER_OPTIMIZATION=1 inline (for `UTF8`-encoded strings). + struct ShortString { + enum { MaxChars = sizeof(static_cast(0)->payload) / sizeof(Ch), MaxSize = MaxChars - 1, LenPos = MaxSize }; + Ch str[MaxChars]; + + inline static bool Usable(SizeType len) { return (MaxSize >= len); } + inline void SetLength(SizeType len) { str[LenPos] = static_cast(MaxSize - len); } + inline SizeType GetLength() const { return static_cast(MaxSize - str[LenPos]); } + }; // at most as many bytes as "String" above => 12 bytes in 32-bit mode, 16 bytes in 64-bit mode + + // By using proper binary layout, retrieval of different integer types do not need conversions. + union Number { +#if RAPIDJSON_ENDIAN == RAPIDJSON_LITTLEENDIAN + struct I { + int i; + char padding[4]; + }i; + struct U { + unsigned u; + char padding2[4]; + }u; +#else + struct I { + char padding[4]; + int i; + }i; + struct U { + char padding2[4]; + unsigned u; + }u; +#endif + int64_t i64; + uint64_t u64; + double d; + }; // 8 bytes + + struct ObjectData { + SizeType size; + SizeType capacity; + Member* members; + }; // 12 bytes in 32-bit mode, 16 bytes in 64-bit mode + + struct ArrayData { + SizeType size; + SizeType capacity; + GenericValue* elements; + }; // 12 bytes in 32-bit mode, 16 bytes in 64-bit mode + + union Data { + String s; + ShortString ss; + Number n; + ObjectData o; + ArrayData a; + Flag f; + }; // 16 bytes in 32-bit mode, 24 bytes in 64-bit mode, 16 bytes in 64-bit with RAPIDJSON_48BITPOINTER_OPTIMIZATION + + RAPIDJSON_FORCEINLINE const Ch* GetStringPointer() const { return RAPIDJSON_GETPOINTER(Ch, data_.s.str); } + RAPIDJSON_FORCEINLINE const Ch* SetStringPointer(const Ch* str) { return RAPIDJSON_SETPOINTER(Ch, data_.s.str, str); } + RAPIDJSON_FORCEINLINE GenericValue* GetElementsPointer() const { return RAPIDJSON_GETPOINTER(GenericValue, data_.a.elements); } + RAPIDJSON_FORCEINLINE GenericValue* SetElementsPointer(GenericValue* elements) { return RAPIDJSON_SETPOINTER(GenericValue, data_.a.elements, elements); } + RAPIDJSON_FORCEINLINE Member* GetMembersPointer() const { return RAPIDJSON_GETPOINTER(Member, data_.o.members); } + RAPIDJSON_FORCEINLINE Member* SetMembersPointer(Member* members) { return RAPIDJSON_SETPOINTER(Member, data_.o.members, members); } + + // Initialize this value as array with initial data, without calling destructor. + void SetArrayRaw(GenericValue* values, SizeType count, Allocator& allocator) { + data_.f.flags = kArrayFlag; + if (count) { + GenericValue* e = static_cast(allocator.Malloc(count * sizeof(GenericValue))); + SetElementsPointer(e); + std::memcpy(static_cast(e), values, count * sizeof(GenericValue)); + } + else + SetElementsPointer(0); + data_.a.size = data_.a.capacity = count; + } + + //! Initialize this value as object with initial data, without calling destructor. + void SetObjectRaw(Member* members, SizeType count, Allocator& allocator) { + data_.f.flags = kObjectFlag; + if (count) { + Member* m = static_cast(allocator.Malloc(count * sizeof(Member))); + SetMembersPointer(m); + std::memcpy(static_cast(m), members, count * sizeof(Member)); + } + else + SetMembersPointer(0); + data_.o.size = data_.o.capacity = count; + } + + //! Initialize this value as constant string, without calling destructor. + void SetStringRaw(StringRefType s) RAPIDJSON_NOEXCEPT { + data_.f.flags = kConstStringFlag; + SetStringPointer(s); + data_.s.length = s.length; + } + + //! Initialize this value as copy string with initial data, without calling destructor. + void SetStringRaw(StringRefType s, Allocator& allocator) { + Ch* str = 0; + if (ShortString::Usable(s.length)) { + data_.f.flags = kShortStringFlag; + data_.ss.SetLength(s.length); + str = data_.ss.str; + } else { + data_.f.flags = kCopyStringFlag; + data_.s.length = s.length; + str = static_cast(allocator.Malloc((s.length + 1) * sizeof(Ch))); + SetStringPointer(str); + } + std::memcpy(str, s, s.length * sizeof(Ch)); + str[s.length] = '\0'; + } + + //! Assignment without calling destructor + void RawAssign(GenericValue& rhs) RAPIDJSON_NOEXCEPT { + data_ = rhs.data_; + // data_.f.flags = rhs.data_.f.flags; + rhs.data_.f.flags = kNullFlag; + } + + template + bool StringEqual(const GenericValue& rhs) const { + RAPIDJSON_ASSERT(IsString()); + RAPIDJSON_ASSERT(rhs.IsString()); + + const SizeType len1 = GetStringLength(); + const SizeType len2 = rhs.GetStringLength(); + if(len1 != len2) { return false; } + + const Ch* const str1 = GetString(); + const Ch* const str2 = rhs.GetString(); + if(str1 == str2) { return true; } // fast path for constant string + + return (std::memcmp(str1, str2, sizeof(Ch) * len1) == 0); + } + + Data data_; +}; + +//! GenericValue with UTF8 encoding +typedef GenericValue > Value; + +/////////////////////////////////////////////////////////////////////////////// +// GenericDocument + +//! A document for parsing JSON text as DOM. +/*! + \note implements Handler concept + \tparam Encoding Encoding for both parsing and string storage. + \tparam Allocator Allocator for allocating memory for the DOM + \tparam StackAllocator Allocator for allocating memory for stack during parsing. + \warning Although GenericDocument inherits from GenericValue, the API does \b not provide any virtual functions, especially no virtual destructor. To avoid memory leaks, do not \c delete a GenericDocument object via a pointer to a GenericValue. +*/ +template +class GenericDocument : public GenericValue { +public: + typedef typename Encoding::Ch Ch; //!< Character type derived from Encoding. + typedef GenericValue ValueType; //!< Value type of the document. + typedef Allocator AllocatorType; //!< Allocator type from template parameter. + + //! Constructor + /*! Creates an empty document of specified type. + \param type Mandatory type of object to create. + \param allocator Optional allocator for allocating memory. + \param stackCapacity Optional initial capacity of stack in bytes. + \param stackAllocator Optional allocator for allocating memory for stack. + */ + explicit GenericDocument(Type type, Allocator* allocator = 0, size_t stackCapacity = kDefaultStackCapacity, StackAllocator* stackAllocator = 0) : + GenericValue(type), allocator_(allocator), ownAllocator_(0), stack_(stackAllocator, stackCapacity), parseResult_() + { + if (!allocator_) + ownAllocator_ = allocator_ = RAPIDJSON_NEW(Allocator)(); + } + + //! Constructor + /*! Creates an empty document which type is Null. + \param allocator Optional allocator for allocating memory. + \param stackCapacity Optional initial capacity of stack in bytes. + \param stackAllocator Optional allocator for allocating memory for stack. + */ + GenericDocument(Allocator* allocator = 0, size_t stackCapacity = kDefaultStackCapacity, StackAllocator* stackAllocator = 0) : + allocator_(allocator), ownAllocator_(0), stack_(stackAllocator, stackCapacity), parseResult_() + { + if (!allocator_) + ownAllocator_ = allocator_ = RAPIDJSON_NEW(Allocator)(); + } + +#if RAPIDJSON_HAS_CXX11_RVALUE_REFS + //! Move constructor in C++11 + GenericDocument(GenericDocument&& rhs) RAPIDJSON_NOEXCEPT + : ValueType(std::forward(rhs)), // explicit cast to avoid prohibited move from Document + allocator_(rhs.allocator_), + ownAllocator_(rhs.ownAllocator_), + stack_(std::move(rhs.stack_)), + parseResult_(rhs.parseResult_) + { + rhs.allocator_ = 0; + rhs.ownAllocator_ = 0; + rhs.parseResult_ = ParseResult(); + } +#endif + + ~GenericDocument() { + Destroy(); + } + +#if RAPIDJSON_HAS_CXX11_RVALUE_REFS + //! Move assignment in C++11 + GenericDocument& operator=(GenericDocument&& rhs) RAPIDJSON_NOEXCEPT + { + // The cast to ValueType is necessary here, because otherwise it would + // attempt to call GenericValue's templated assignment operator. + ValueType::operator=(std::forward(rhs)); + + // Calling the destructor here would prematurely call stack_'s destructor + Destroy(); + + allocator_ = rhs.allocator_; + ownAllocator_ = rhs.ownAllocator_; + stack_ = std::move(rhs.stack_); + parseResult_ = rhs.parseResult_; + + rhs.allocator_ = 0; + rhs.ownAllocator_ = 0; + rhs.parseResult_ = ParseResult(); + + return *this; + } +#endif + + //! Exchange the contents of this document with those of another. + /*! + \param rhs Another document. + \note Constant complexity. + \see GenericValue::Swap + */ + GenericDocument& Swap(GenericDocument& rhs) RAPIDJSON_NOEXCEPT { + ValueType::Swap(rhs); + stack_.Swap(rhs.stack_); + internal::Swap(allocator_, rhs.allocator_); + internal::Swap(ownAllocator_, rhs.ownAllocator_); + internal::Swap(parseResult_, rhs.parseResult_); + return *this; + } + + // Allow Swap with ValueType. + // Refer to Effective C++ 3rd Edition/Item 33: Avoid hiding inherited names. + using ValueType::Swap; + + //! free-standing swap function helper + /*! + Helper function to enable support for common swap implementation pattern based on \c std::swap: + \code + void swap(MyClass& a, MyClass& b) { + using std::swap; + swap(a.doc, b.doc); + // ... + } + \endcode + \see Swap() + */ + friend inline void swap(GenericDocument& a, GenericDocument& b) RAPIDJSON_NOEXCEPT { a.Swap(b); } + + //! Populate this document by a generator which produces SAX events. + /*! \tparam Generator A functor with bool f(Handler) prototype. + \param g Generator functor which sends SAX events to the parameter. + \return The document itself for fluent API. + */ + template + GenericDocument& Populate(Generator& g) { + ClearStackOnExit scope(*this); + if (g(*this)) { + RAPIDJSON_ASSERT(stack_.GetSize() == sizeof(ValueType)); // Got one and only one root object + ValueType::operator=(*stack_.template Pop(1));// Move value from stack to document + } + return *this; + } + + //!@name Parse from stream + //!@{ + + //! Parse JSON text from an input stream (with Encoding conversion) + /*! \tparam parseFlags Combination of \ref ParseFlag. + \tparam SourceEncoding Encoding of input stream + \tparam InputStream Type of input stream, implementing Stream concept + \param is Input stream to be parsed. + \return The document itself for fluent API. + */ + template + GenericDocument& ParseStream(InputStream& is) { + GenericReader reader( + stack_.HasAllocator() ? &stack_.GetAllocator() : 0); + ClearStackOnExit scope(*this); + parseResult_ = reader.template Parse(is, *this); + if (parseResult_) { + RAPIDJSON_ASSERT(stack_.GetSize() == sizeof(ValueType)); // Got one and only one root object + ValueType::operator=(*stack_.template Pop(1));// Move value from stack to document + } + return *this; + } + + //! Parse JSON text from an input stream + /*! \tparam parseFlags Combination of \ref ParseFlag. + \tparam InputStream Type of input stream, implementing Stream concept + \param is Input stream to be parsed. + \return The document itself for fluent API. + */ + template + GenericDocument& ParseStream(InputStream& is) { + return ParseStream(is); + } + + //! Parse JSON text from an input stream (with \ref kParseDefaultFlags) + /*! \tparam InputStream Type of input stream, implementing Stream concept + \param is Input stream to be parsed. + \return The document itself for fluent API. + */ + template + GenericDocument& ParseStream(InputStream& is) { + return ParseStream(is); + } + //!@} + + //!@name Parse in-place from mutable string + //!@{ + + //! Parse JSON text from a mutable string + /*! \tparam parseFlags Combination of \ref ParseFlag. + \param str Mutable zero-terminated string to be parsed. + \return The document itself for fluent API. + */ + template + GenericDocument& ParseInsitu(Ch* str) { + GenericInsituStringStream s(str); + return ParseStream(s); + } + + //! Parse JSON text from a mutable string (with \ref kParseDefaultFlags) + /*! \param str Mutable zero-terminated string to be parsed. + \return The document itself for fluent API. + */ + GenericDocument& ParseInsitu(Ch* str) { + return ParseInsitu(str); + } + //!@} + + //!@name Parse from read-only string + //!@{ + + //! Parse JSON text from a read-only string (with Encoding conversion) + /*! \tparam parseFlags Combination of \ref ParseFlag (must not contain \ref kParseInsituFlag). + \tparam SourceEncoding Transcoding from input Encoding + \param str Read-only zero-terminated string to be parsed. + */ + template + GenericDocument& Parse(const typename SourceEncoding::Ch* str) { + RAPIDJSON_ASSERT(!(parseFlags & kParseInsituFlag)); + GenericStringStream s(str); + return ParseStream(s); + } + + //! Parse JSON text from a read-only string + /*! \tparam parseFlags Combination of \ref ParseFlag (must not contain \ref kParseInsituFlag). + \param str Read-only zero-terminated string to be parsed. + */ + template + GenericDocument& Parse(const Ch* str) { + return Parse(str); + } + + //! Parse JSON text from a read-only string (with \ref kParseDefaultFlags) + /*! \param str Read-only zero-terminated string to be parsed. + */ + GenericDocument& Parse(const Ch* str) { + return Parse(str); + } + + template + GenericDocument& Parse(const typename SourceEncoding::Ch* str, size_t length) { + RAPIDJSON_ASSERT(!(parseFlags & kParseInsituFlag)); + MemoryStream ms(reinterpret_cast(str), length * sizeof(typename SourceEncoding::Ch)); + EncodedInputStream is(ms); + ParseStream(is); + return *this; + } + + template + GenericDocument& Parse(const Ch* str, size_t length) { + return Parse(str, length); + } + + GenericDocument& Parse(const Ch* str, size_t length) { + return Parse(str, length); + } + +#if RAPIDJSON_HAS_STDSTRING + template + GenericDocument& Parse(const std::basic_string& str) { + // c_str() is constant complexity according to standard. Should be faster than Parse(const char*, size_t) + return Parse(str.c_str()); + } + + template + GenericDocument& Parse(const std::basic_string& str) { + return Parse(str.c_str()); + } + + GenericDocument& Parse(const std::basic_string& str) { + return Parse(str); + } +#endif // RAPIDJSON_HAS_STDSTRING + + //!@} + + //!@name Handling parse errors + //!@{ + + //! Whether a parse error has occurred in the last parsing. + bool HasParseError() const { return parseResult_.IsError(); } + + //! Get the \ref ParseErrorCode of last parsing. + ParseErrorCode GetParseError() const { return parseResult_.Code(); } + + //! Get the position of last parsing error in input, 0 otherwise. + size_t GetErrorOffset() const { return parseResult_.Offset(); } + + //! Implicit conversion to get the last parse result +#ifndef __clang // -Wdocumentation + /*! \return \ref ParseResult of the last parse operation + + \code + Document doc; + ParseResult ok = doc.Parse(json); + if (!ok) + printf( "JSON parse error: %s (%u)\n", GetParseError_En(ok.Code()), ok.Offset()); + \endcode + */ +#endif + operator ParseResult() const { return parseResult_; } + //!@} + + //! Get the allocator of this document. + Allocator& GetAllocator() { + RAPIDJSON_ASSERT(allocator_); + return *allocator_; + } + + //! Get the capacity of stack in bytes. + size_t GetStackCapacity() const { return stack_.GetCapacity(); } + +private: + // clear stack on any exit from ParseStream, e.g. due to exception + struct ClearStackOnExit { + explicit ClearStackOnExit(GenericDocument& d) : d_(d) {} + ~ClearStackOnExit() { d_.ClearStack(); } + private: + ClearStackOnExit(const ClearStackOnExit&); + ClearStackOnExit& operator=(const ClearStackOnExit&); + GenericDocument& d_; + }; + + // callers of the following private Handler functions + // template friend class GenericReader; // for parsing + template friend class GenericValue; // for deep copying + +public: + // Implementation of Handler + bool Null() { new (stack_.template Push()) ValueType(); return true; } + bool Bool(bool b) { new (stack_.template Push()) ValueType(b); return true; } + bool Int(int i) { new (stack_.template Push()) ValueType(i); return true; } + bool Uint(unsigned i) { new (stack_.template Push()) ValueType(i); return true; } + bool Int64(int64_t i) { new (stack_.template Push()) ValueType(i); return true; } + bool Uint64(uint64_t i) { new (stack_.template Push()) ValueType(i); return true; } + bool Double(double d) { new (stack_.template Push()) ValueType(d); return true; } + + bool RawNumber(const Ch* str, SizeType length, bool copy) { + if (copy) + new (stack_.template Push()) ValueType(str, length, GetAllocator()); + else + new (stack_.template Push()) ValueType(str, length); + return true; + } + + bool String(const Ch* str, SizeType length, bool copy) { + if (copy) + new (stack_.template Push()) ValueType(str, length, GetAllocator()); + else + new (stack_.template Push()) ValueType(str, length); + return true; + } + + bool StartObject() { new (stack_.template Push()) ValueType(kObjectType); return true; } + + bool Key(const Ch* str, SizeType length, bool copy) { return String(str, length, copy); } + + bool EndObject(SizeType memberCount) { + typename ValueType::Member* members = stack_.template Pop(memberCount); + stack_.template Top()->SetObjectRaw(members, memberCount, GetAllocator()); + return true; + } + + bool StartArray() { new (stack_.template Push()) ValueType(kArrayType); return true; } + + bool EndArray(SizeType elementCount) { + ValueType* elements = stack_.template Pop(elementCount); + stack_.template Top()->SetArrayRaw(elements, elementCount, GetAllocator()); + return true; + } + +private: + //! Prohibit copying + GenericDocument(const GenericDocument&); + //! Prohibit assignment + GenericDocument& operator=(const GenericDocument&); + + void ClearStack() { + if (Allocator::kNeedFree) + while (stack_.GetSize() > 0) // Here assumes all elements in stack array are GenericValue (Member is actually 2 GenericValue objects) + (stack_.template Pop(1))->~ValueType(); + else + stack_.Clear(); + stack_.ShrinkToFit(); + } + + void Destroy() { + RAPIDJSON_DELETE(ownAllocator_); + } + + static const size_t kDefaultStackCapacity = 1024; + Allocator* allocator_; + Allocator* ownAllocator_; + internal::Stack stack_; + ParseResult parseResult_; +}; + +//! GenericDocument with UTF8 encoding +typedef GenericDocument > Document; + + +//! Helper class for accessing Value of array type. +/*! + Instance of this helper class is obtained by \c GenericValue::GetArray(). + In addition to all APIs for array type, it provides range-based for loop if \c RAPIDJSON_HAS_CXX11_RANGE_FOR=1. +*/ +template +class GenericArray { +public: + typedef GenericArray ConstArray; + typedef GenericArray Array; + typedef ValueT PlainType; + typedef typename internal::MaybeAddConst::Type ValueType; + typedef ValueType* ValueIterator; // This may be const or non-const iterator + typedef const ValueT* ConstValueIterator; + typedef typename ValueType::AllocatorType AllocatorType; + typedef typename ValueType::StringRefType StringRefType; + + template + friend class GenericValue; + + GenericArray(const GenericArray& rhs) : value_(rhs.value_) {} + GenericArray& operator=(const GenericArray& rhs) { value_ = rhs.value_; return *this; } + ~GenericArray() {} + + SizeType Size() const { return value_.Size(); } + SizeType Capacity() const { return value_.Capacity(); } + bool Empty() const { return value_.Empty(); } + void Clear() const { value_.Clear(); } + ValueType& operator[](SizeType index) const { return value_[index]; } + ValueIterator Begin() const { return value_.Begin(); } + ValueIterator End() const { return value_.End(); } + GenericArray Reserve(SizeType newCapacity, AllocatorType &allocator) const { value_.Reserve(newCapacity, allocator); return *this; } + GenericArray PushBack(ValueType& value, AllocatorType& allocator) const { value_.PushBack(value, allocator); return *this; } +#if RAPIDJSON_HAS_CXX11_RVALUE_REFS + GenericArray PushBack(ValueType&& value, AllocatorType& allocator) const { value_.PushBack(value, allocator); return *this; } +#endif // RAPIDJSON_HAS_CXX11_RVALUE_REFS + GenericArray PushBack(StringRefType value, AllocatorType& allocator) const { value_.PushBack(value, allocator); return *this; } + template RAPIDJSON_DISABLEIF_RETURN((internal::OrExpr, internal::IsGenericValue >), (const GenericArray&)) PushBack(T value, AllocatorType& allocator) const { value_.PushBack(value, allocator); return *this; } + GenericArray PopBack() const { value_.PopBack(); return *this; } + ValueIterator Erase(ConstValueIterator pos) const { return value_.Erase(pos); } + ValueIterator Erase(ConstValueIterator first, ConstValueIterator last) const { return value_.Erase(first, last); } + +#if RAPIDJSON_HAS_CXX11_RANGE_FOR + ValueIterator begin() const { return value_.Begin(); } + ValueIterator end() const { return value_.End(); } +#endif + +private: + GenericArray(); + GenericArray(ValueType& value) : value_(value) {} + ValueType& value_; +}; + +//! Helper class for accessing Value of object type. +/*! + Instance of this helper class is obtained by \c GenericValue::GetObject(). + In addition to all APIs for array type, it provides range-based for loop if \c RAPIDJSON_HAS_CXX11_RANGE_FOR=1. +*/ +template +class GenericObject { +public: + typedef GenericObject ConstObject; + typedef GenericObject Object; + typedef ValueT PlainType; + typedef typename internal::MaybeAddConst::Type ValueType; + typedef GenericMemberIterator MemberIterator; // This may be const or non-const iterator + typedef GenericMemberIterator ConstMemberIterator; + typedef typename ValueType::AllocatorType AllocatorType; + typedef typename ValueType::StringRefType StringRefType; + typedef typename ValueType::EncodingType EncodingType; + typedef typename ValueType::Ch Ch; + + template + friend class GenericValue; + + GenericObject(const GenericObject& rhs) : value_(rhs.value_) {} + GenericObject& operator=(const GenericObject& rhs) { value_ = rhs.value_; return *this; } + ~GenericObject() {} + + SizeType MemberCount() const { return value_.MemberCount(); } + SizeType MemberCapacity() const { return value_.MemberCapacity(); } + bool ObjectEmpty() const { return value_.ObjectEmpty(); } + template ValueType& operator[](T* name) const { return value_[name]; } + template ValueType& operator[](const GenericValue& name) const { return value_[name]; } +#if RAPIDJSON_HAS_STDSTRING + ValueType& operator[](const std::basic_string& name) const { return value_[name]; } +#endif + MemberIterator MemberBegin() const { return value_.MemberBegin(); } + MemberIterator MemberEnd() const { return value_.MemberEnd(); } + GenericObject MemberReserve(SizeType newCapacity, AllocatorType &allocator) const { value_.MemberReserve(newCapacity, allocator); return *this; } + bool HasMember(const Ch* name) const { return value_.HasMember(name); } +#if RAPIDJSON_HAS_STDSTRING + bool HasMember(const std::basic_string& name) const { return value_.HasMember(name); } +#endif + template bool HasMember(const GenericValue& name) const { return value_.HasMember(name); } + MemberIterator FindMember(const Ch* name) const { return value_.FindMember(name); } + template MemberIterator FindMember(const GenericValue& name) const { return value_.FindMember(name); } +#if RAPIDJSON_HAS_STDSTRING + MemberIterator FindMember(const std::basic_string& name) const { return value_.FindMember(name); } +#endif + GenericObject AddMember(ValueType& name, ValueType& value, AllocatorType& allocator) const { value_.AddMember(name, value, allocator); return *this; } + GenericObject AddMember(ValueType& name, StringRefType value, AllocatorType& allocator) const { value_.AddMember(name, value, allocator); return *this; } +#if RAPIDJSON_HAS_STDSTRING + GenericObject AddMember(ValueType& name, std::basic_string& value, AllocatorType& allocator) const { value_.AddMember(name, value, allocator); return *this; } +#endif + template RAPIDJSON_DISABLEIF_RETURN((internal::OrExpr, internal::IsGenericValue >), (ValueType&)) AddMember(ValueType& name, T value, AllocatorType& allocator) const { value_.AddMember(name, value, allocator); return *this; } +#if RAPIDJSON_HAS_CXX11_RVALUE_REFS + GenericObject AddMember(ValueType&& name, ValueType&& value, AllocatorType& allocator) const { value_.AddMember(name, value, allocator); return *this; } + GenericObject AddMember(ValueType&& name, ValueType& value, AllocatorType& allocator) const { value_.AddMember(name, value, allocator); return *this; } + GenericObject AddMember(ValueType& name, ValueType&& value, AllocatorType& allocator) const { value_.AddMember(name, value, allocator); return *this; } + GenericObject AddMember(StringRefType name, ValueType&& value, AllocatorType& allocator) const { value_.AddMember(name, value, allocator); return *this; } +#endif // RAPIDJSON_HAS_CXX11_RVALUE_REFS + GenericObject AddMember(StringRefType name, ValueType& value, AllocatorType& allocator) const { value_.AddMember(name, value, allocator); return *this; } + GenericObject AddMember(StringRefType name, StringRefType value, AllocatorType& allocator) const { value_.AddMember(name, value, allocator); return *this; } + template RAPIDJSON_DISABLEIF_RETURN((internal::OrExpr, internal::IsGenericValue >), (GenericObject)) AddMember(StringRefType name, T value, AllocatorType& allocator) const { value_.AddMember(name, value, allocator); return *this; } + void RemoveAllMembers() { value_.RemoveAllMembers(); } + bool RemoveMember(const Ch* name) const { return value_.RemoveMember(name); } +#if RAPIDJSON_HAS_STDSTRING + bool RemoveMember(const std::basic_string& name) const { return value_.RemoveMember(name); } +#endif + template bool RemoveMember(const GenericValue& name) const { return value_.RemoveMember(name); } + MemberIterator RemoveMember(MemberIterator m) const { return value_.RemoveMember(m); } + MemberIterator EraseMember(ConstMemberIterator pos) const { return value_.EraseMember(pos); } + MemberIterator EraseMember(ConstMemberIterator first, ConstMemberIterator last) const { return value_.EraseMember(first, last); } + bool EraseMember(const Ch* name) const { return value_.EraseMember(name); } +#if RAPIDJSON_HAS_STDSTRING + bool EraseMember(const std::basic_string& name) const { return EraseMember(ValueType(StringRef(name))); } +#endif + template bool EraseMember(const GenericValue& name) const { return value_.EraseMember(name); } + +#if RAPIDJSON_HAS_CXX11_RANGE_FOR + MemberIterator begin() const { return value_.MemberBegin(); } + MemberIterator end() const { return value_.MemberEnd(); } +#endif + +private: + GenericObject(); + GenericObject(ValueType& value) : value_(value) {} + ValueType& value_; +}; + +RAPIDJSON_NAMESPACE_END +RAPIDJSON_DIAG_POP + +#endif // RAPIDJSON_DOCUMENT_H_ diff --git a/vendor/github.com/Benau/go_rlottie/lottie_rapidjson_encodedstream.h b/vendor/github.com/Benau/go_rlottie/lottie_rapidjson_encodedstream.h new file mode 100644 index 00000000..98dc96dc --- /dev/null +++ b/vendor/github.com/Benau/go_rlottie/lottie_rapidjson_encodedstream.h @@ -0,0 +1,299 @@ +// Tencent is pleased to support the open source community by making RapidJSON available. +// +// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved. +// +// Licensed under the MIT License (the "License"); you may not use this file except +// in compliance with the License. You may obtain a copy of the License at +// +// http://opensource.org/licenses/MIT +// +// Unless required by applicable law or agreed to in writing, software distributed +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +// CONDITIONS OF ANY KIND, either express or implied. See the License for the +// specific language governing permissions and limitations under the License. + +#ifndef RAPIDJSON_ENCODEDSTREAM_H_ +#define RAPIDJSON_ENCODEDSTREAM_H_ + +#include "lottie_rapidjson_stream.h" +#include "lottie_rapidjson_memorystream.h" + +#ifdef __GNUC__ +RAPIDJSON_DIAG_PUSH +RAPIDJSON_DIAG_OFF(effc++) +#endif + +#ifdef __clang__ +RAPIDJSON_DIAG_PUSH +RAPIDJSON_DIAG_OFF(padded) +#endif + +RAPIDJSON_NAMESPACE_BEGIN + +//! Input byte stream wrapper with a statically bound encoding. +/*! + \tparam Encoding The interpretation of encoding of the stream. Either UTF8, UTF16LE, UTF16BE, UTF32LE, UTF32BE. + \tparam InputByteStream Type of input byte stream. For example, FileReadStream. +*/ +template +class EncodedInputStream { + RAPIDJSON_STATIC_ASSERT(sizeof(typename InputByteStream::Ch) == 1); +public: + typedef typename Encoding::Ch Ch; + + EncodedInputStream(InputByteStream& is) : is_(is) { + current_ = Encoding::TakeBOM(is_); + } + + Ch Peek() const { return current_; } + Ch Take() { Ch c = current_; current_ = Encoding::Take(is_); return c; } + size_t Tell() const { return is_.Tell(); } + + // Not implemented + void Put(Ch) { RAPIDJSON_ASSERT(false); } + void Flush() { RAPIDJSON_ASSERT(false); } + Ch* PutBegin() { RAPIDJSON_ASSERT(false); return 0; } + size_t PutEnd(Ch*) { RAPIDJSON_ASSERT(false); return 0; } + +private: + EncodedInputStream(const EncodedInputStream&); + EncodedInputStream& operator=(const EncodedInputStream&); + + InputByteStream& is_; + Ch current_; +}; + +//! Specialized for UTF8 MemoryStream. +template <> +class EncodedInputStream, MemoryStream> { +public: + typedef UTF8<>::Ch Ch; + + EncodedInputStream(MemoryStream& is) : is_(is) { + if (static_cast(is_.Peek()) == 0xEFu) is_.Take(); + if (static_cast(is_.Peek()) == 0xBBu) is_.Take(); + if (static_cast(is_.Peek()) == 0xBFu) is_.Take(); + } + Ch Peek() const { return is_.Peek(); } + Ch Take() { return is_.Take(); } + size_t Tell() const { return is_.Tell(); } + + // Not implemented + void Put(Ch) {} + void Flush() {} + Ch* PutBegin() { return 0; } + size_t PutEnd(Ch*) { return 0; } + + MemoryStream& is_; + +private: + EncodedInputStream(const EncodedInputStream&); + EncodedInputStream& operator=(const EncodedInputStream&); +}; + +//! Output byte stream wrapper with statically bound encoding. +/*! + \tparam Encoding The interpretation of encoding of the stream. Either UTF8, UTF16LE, UTF16BE, UTF32LE, UTF32BE. + \tparam OutputByteStream Type of input byte stream. For example, FileWriteStream. +*/ +template +class EncodedOutputStream { + RAPIDJSON_STATIC_ASSERT(sizeof(typename OutputByteStream::Ch) == 1); +public: + typedef typename Encoding::Ch Ch; + + EncodedOutputStream(OutputByteStream& os, bool putBOM = true) : os_(os) { + if (putBOM) + Encoding::PutBOM(os_); + } + + void Put(Ch c) { Encoding::Put(os_, c); } + void Flush() { os_.Flush(); } + + // Not implemented + Ch Peek() const { RAPIDJSON_ASSERT(false); return 0;} + Ch Take() { RAPIDJSON_ASSERT(false); return 0;} + size_t Tell() const { RAPIDJSON_ASSERT(false); return 0; } + Ch* PutBegin() { RAPIDJSON_ASSERT(false); return 0; } + size_t PutEnd(Ch*) { RAPIDJSON_ASSERT(false); return 0; } + +private: + EncodedOutputStream(const EncodedOutputStream&); + EncodedOutputStream& operator=(const EncodedOutputStream&); + + OutputByteStream& os_; +}; + +#define RAPIDJSON_ENCODINGS_FUNC(x) UTF8::x, UTF16LE::x, UTF16BE::x, UTF32LE::x, UTF32BE::x + +//! Input stream wrapper with dynamically bound encoding and automatic encoding detection. +/*! + \tparam CharType Type of character for reading. + \tparam InputByteStream type of input byte stream to be wrapped. +*/ +template +class AutoUTFInputStream { + RAPIDJSON_STATIC_ASSERT(sizeof(typename InputByteStream::Ch) == 1); +public: + typedef CharType Ch; + + //! Constructor. + /*! + \param is input stream to be wrapped. + \param type UTF encoding type if it is not detected from the stream. + */ + AutoUTFInputStream(InputByteStream& is, UTFType type = kUTF8) : is_(&is), type_(type), hasBOM_(false) { + RAPIDJSON_ASSERT(type >= kUTF8 && type <= kUTF32BE); + DetectType(); + static const TakeFunc f[] = { RAPIDJSON_ENCODINGS_FUNC(Take) }; + takeFunc_ = f[type_]; + current_ = takeFunc_(*is_); + } + + UTFType GetType() const { return type_; } + bool HasBOM() const { return hasBOM_; } + + Ch Peek() const { return current_; } + Ch Take() { Ch c = current_; current_ = takeFunc_(*is_); return c; } + size_t Tell() const { return is_->Tell(); } + + // Not implemented + void Put(Ch) { RAPIDJSON_ASSERT(false); } + void Flush() { RAPIDJSON_ASSERT(false); } + Ch* PutBegin() { RAPIDJSON_ASSERT(false); return 0; } + size_t PutEnd(Ch*) { RAPIDJSON_ASSERT(false); return 0; } + +private: + AutoUTFInputStream(const AutoUTFInputStream&); + AutoUTFInputStream& operator=(const AutoUTFInputStream&); + + // Detect encoding type with BOM or RFC 4627 + void DetectType() { + // BOM (Byte Order Mark): + // 00 00 FE FF UTF-32BE + // FF FE 00 00 UTF-32LE + // FE FF UTF-16BE + // FF FE UTF-16LE + // EF BB BF UTF-8 + + const unsigned char* c = reinterpret_cast(is_->Peek4()); + if (!c) + return; + + unsigned bom = static_cast(c[0] | (c[1] << 8) | (c[2] << 16) | (c[3] << 24)); + hasBOM_ = false; + if (bom == 0xFFFE0000) { type_ = kUTF32BE; hasBOM_ = true; is_->Take(); is_->Take(); is_->Take(); is_->Take(); } + else if (bom == 0x0000FEFF) { type_ = kUTF32LE; hasBOM_ = true; is_->Take(); is_->Take(); is_->Take(); is_->Take(); } + else if ((bom & 0xFFFF) == 0xFFFE) { type_ = kUTF16BE; hasBOM_ = true; is_->Take(); is_->Take(); } + else if ((bom & 0xFFFF) == 0xFEFF) { type_ = kUTF16LE; hasBOM_ = true; is_->Take(); is_->Take(); } + else if ((bom & 0xFFFFFF) == 0xBFBBEF) { type_ = kUTF8; hasBOM_ = true; is_->Take(); is_->Take(); is_->Take(); } + + // RFC 4627: Section 3 + // "Since the first two characters of a JSON text will always be ASCII + // characters [RFC0020], it is possible to determine whether an octet + // stream is UTF-8, UTF-16 (BE or LE), or UTF-32 (BE or LE) by looking + // at the pattern of nulls in the first four octets." + // 00 00 00 xx UTF-32BE + // 00 xx 00 xx UTF-16BE + // xx 00 00 00 UTF-32LE + // xx 00 xx 00 UTF-16LE + // xx xx xx xx UTF-8 + + if (!hasBOM_) { + int pattern = (c[0] ? 1 : 0) | (c[1] ? 2 : 0) | (c[2] ? 4 : 0) | (c[3] ? 8 : 0); + switch (pattern) { + case 0x08: type_ = kUTF32BE; break; + case 0x0A: type_ = kUTF16BE; break; + case 0x01: type_ = kUTF32LE; break; + case 0x05: type_ = kUTF16LE; break; + case 0x0F: type_ = kUTF8; break; + default: break; // Use type defined by user. + } + } + + // Runtime check whether the size of character type is sufficient. It only perform checks with assertion. + if (type_ == kUTF16LE || type_ == kUTF16BE) RAPIDJSON_ASSERT(sizeof(Ch) >= 2); + if (type_ == kUTF32LE || type_ == kUTF32BE) RAPIDJSON_ASSERT(sizeof(Ch) >= 4); + } + + typedef Ch (*TakeFunc)(InputByteStream& is); + InputByteStream* is_; + UTFType type_; + Ch current_; + TakeFunc takeFunc_; + bool hasBOM_; +}; + +//! Output stream wrapper with dynamically bound encoding and automatic encoding detection. +/*! + \tparam CharType Type of character for writing. + \tparam OutputByteStream type of output byte stream to be wrapped. +*/ +template +class AutoUTFOutputStream { + RAPIDJSON_STATIC_ASSERT(sizeof(typename OutputByteStream::Ch) == 1); +public: + typedef CharType Ch; + + //! Constructor. + /*! + \param os output stream to be wrapped. + \param type UTF encoding type. + \param putBOM Whether to write BOM at the beginning of the stream. + */ + AutoUTFOutputStream(OutputByteStream& os, UTFType type, bool putBOM) : os_(&os), type_(type) { + RAPIDJSON_ASSERT(type >= kUTF8 && type <= kUTF32BE); + + // Runtime check whether the size of character type is sufficient. It only perform checks with assertion. + if (type_ == kUTF16LE || type_ == kUTF16BE) RAPIDJSON_ASSERT(sizeof(Ch) >= 2); + if (type_ == kUTF32LE || type_ == kUTF32BE) RAPIDJSON_ASSERT(sizeof(Ch) >= 4); + + static const PutFunc f[] = { RAPIDJSON_ENCODINGS_FUNC(Put) }; + putFunc_ = f[type_]; + + if (putBOM) + PutBOM(); + } + + UTFType GetType() const { return type_; } + + void Put(Ch c) { putFunc_(*os_, c); } + void Flush() { os_->Flush(); } + + // Not implemented + Ch Peek() const { RAPIDJSON_ASSERT(false); return 0;} + Ch Take() { RAPIDJSON_ASSERT(false); return 0;} + size_t Tell() const { RAPIDJSON_ASSERT(false); return 0; } + Ch* PutBegin() { RAPIDJSON_ASSERT(false); return 0; } + size_t PutEnd(Ch*) { RAPIDJSON_ASSERT(false); return 0; } + +private: + AutoUTFOutputStream(const AutoUTFOutputStream&); + AutoUTFOutputStream& operator=(const AutoUTFOutputStream&); + + void PutBOM() { + typedef void (*PutBOMFunc)(OutputByteStream&); + static const PutBOMFunc f[] = { RAPIDJSON_ENCODINGS_FUNC(PutBOM) }; + f[type_](*os_); + } + + typedef void (*PutFunc)(OutputByteStream&, Ch); + + OutputByteStream* os_; + UTFType type_; + PutFunc putFunc_; +}; + +#undef RAPIDJSON_ENCODINGS_FUNC + +RAPIDJSON_NAMESPACE_END + +#ifdef __clang__ +RAPIDJSON_DIAG_POP +#endif + +#ifdef __GNUC__ +RAPIDJSON_DIAG_POP +#endif + +#endif // RAPIDJSON_FILESTREAM_H_ diff --git a/vendor/github.com/Benau/go_rlottie/lottie_rapidjson_encodings.h b/vendor/github.com/Benau/go_rlottie/lottie_rapidjson_encodings.h new file mode 100644 index 00000000..52089823 --- /dev/null +++ b/vendor/github.com/Benau/go_rlottie/lottie_rapidjson_encodings.h @@ -0,0 +1,716 @@ +// Tencent is pleased to support the open source community by making RapidJSON available. +// +// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved. +// +// Licensed under the MIT License (the "License"); you may not use this file except +// in compliance with the License. You may obtain a copy of the License at +// +// http://opensource.org/licenses/MIT +// +// Unless required by applicable law or agreed to in writing, software distributed +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +// CONDITIONS OF ANY KIND, either express or implied. See the License for the +// specific language governing permissions and limitations under the License. + +#ifndef RAPIDJSON_ENCODINGS_H_ +#define RAPIDJSON_ENCODINGS_H_ + +#include "lottie_rapidjson_rapidjson.h" + +#if defined(_MSC_VER) && !defined(__clang__) +RAPIDJSON_DIAG_PUSH +RAPIDJSON_DIAG_OFF(4244) // conversion from 'type1' to 'type2', possible loss of data +RAPIDJSON_DIAG_OFF(4702) // unreachable code +#elif defined(__GNUC__) +RAPIDJSON_DIAG_PUSH +RAPIDJSON_DIAG_OFF(effc++) +RAPIDJSON_DIAG_OFF(overflow) +#endif + +RAPIDJSON_NAMESPACE_BEGIN + +/////////////////////////////////////////////////////////////////////////////// +// Encoding + +/*! \class rapidjson::Encoding + \brief Concept for encoding of Unicode characters. + +\code +concept Encoding { + typename Ch; //! Type of character. A "character" is actually a code unit in unicode's definition. + + enum { supportUnicode = 1 }; // or 0 if not supporting unicode + + //! \brief Encode a Unicode codepoint to an output stream. + //! \param os Output stream. + //! \param codepoint An unicode codepoint, ranging from 0x0 to 0x10FFFF inclusively. + template + static void Encode(OutputStream& os, unsigned codepoint); + + //! \brief Decode a Unicode codepoint from an input stream. + //! \param is Input stream. + //! \param codepoint Output of the unicode codepoint. + //! \return true if a valid codepoint can be decoded from the stream. + template + static bool Decode(InputStream& is, unsigned* codepoint); + + //! \brief Validate one Unicode codepoint from an encoded stream. + //! \param is Input stream to obtain codepoint. + //! \param os Output for copying one codepoint. + //! \return true if it is valid. + //! \note This function just validating and copying the codepoint without actually decode it. + template + static bool Validate(InputStream& is, OutputStream& os); + + // The following functions are deal with byte streams. + + //! Take a character from input byte stream, skip BOM if exist. + template + static CharType TakeBOM(InputByteStream& is); + + //! Take a character from input byte stream. + template + static Ch Take(InputByteStream& is); + + //! Put BOM to output byte stream. + template + static void PutBOM(OutputByteStream& os); + + //! Put a character to output byte stream. + template + static void Put(OutputByteStream& os, Ch c); +}; +\endcode +*/ + +/////////////////////////////////////////////////////////////////////////////// +// UTF8 + +//! UTF-8 encoding. +/*! http://en.wikipedia.org/wiki/UTF-8 + http://tools.ietf.org/html/rfc3629 + \tparam CharType Code unit for storing 8-bit UTF-8 data. Default is char. + \note implements Encoding concept +*/ +template +struct UTF8 { + typedef CharType Ch; + + enum { supportUnicode = 1 }; + + template + static void Encode(OutputStream& os, unsigned codepoint) { + if (codepoint <= 0x7F) + os.Put(static_cast(codepoint & 0xFF)); + else if (codepoint <= 0x7FF) { + os.Put(static_cast(0xC0 | ((codepoint >> 6) & 0xFF))); + os.Put(static_cast(0x80 | ((codepoint & 0x3F)))); + } + else if (codepoint <= 0xFFFF) { + os.Put(static_cast(0xE0 | ((codepoint >> 12) & 0xFF))); + os.Put(static_cast(0x80 | ((codepoint >> 6) & 0x3F))); + os.Put(static_cast(0x80 | (codepoint & 0x3F))); + } + else { + RAPIDJSON_ASSERT(codepoint <= 0x10FFFF); + os.Put(static_cast(0xF0 | ((codepoint >> 18) & 0xFF))); + os.Put(static_cast(0x80 | ((codepoint >> 12) & 0x3F))); + os.Put(static_cast(0x80 | ((codepoint >> 6) & 0x3F))); + os.Put(static_cast(0x80 | (codepoint & 0x3F))); + } + } + + template + static void EncodeUnsafe(OutputStream& os, unsigned codepoint) { + if (codepoint <= 0x7F) + PutUnsafe(os, static_cast(codepoint & 0xFF)); + else if (codepoint <= 0x7FF) { + PutUnsafe(os, static_cast(0xC0 | ((codepoint >> 6) & 0xFF))); + PutUnsafe(os, static_cast(0x80 | ((codepoint & 0x3F)))); + } + else if (codepoint <= 0xFFFF) { + PutUnsafe(os, static_cast(0xE0 | ((codepoint >> 12) & 0xFF))); + PutUnsafe(os, static_cast(0x80 | ((codepoint >> 6) & 0x3F))); + PutUnsafe(os, static_cast(0x80 | (codepoint & 0x3F))); + } + else { + RAPIDJSON_ASSERT(codepoint <= 0x10FFFF); + PutUnsafe(os, static_cast(0xF0 | ((codepoint >> 18) & 0xFF))); + PutUnsafe(os, static_cast(0x80 | ((codepoint >> 12) & 0x3F))); + PutUnsafe(os, static_cast(0x80 | ((codepoint >> 6) & 0x3F))); + PutUnsafe(os, static_cast(0x80 | (codepoint & 0x3F))); + } + } + + template + static bool Decode(InputStream& is, unsigned* codepoint) { +#define RAPIDJSON_COPY() c = is.Take(); *codepoint = (*codepoint << 6) | (static_cast(c) & 0x3Fu) +#define RAPIDJSON_TRANS(mask) result &= ((GetRange(static_cast(c)) & mask) != 0) +#define RAPIDJSON_TAIL() RAPIDJSON_COPY(); RAPIDJSON_TRANS(0x70) + typename InputStream::Ch c = is.Take(); + if (!(c & 0x80)) { + *codepoint = static_cast(c); + return true; + } + + unsigned char type = GetRange(static_cast(c)); + if (type >= 32) { + *codepoint = 0; + } else { + *codepoint = (0xFFu >> type) & static_cast(c); + } + bool result = true; + switch (type) { + case 2: RAPIDJSON_TAIL(); return result; + case 3: RAPIDJSON_TAIL(); RAPIDJSON_TAIL(); return result; + case 4: RAPIDJSON_COPY(); RAPIDJSON_TRANS(0x50); RAPIDJSON_TAIL(); return result; + case 5: RAPIDJSON_COPY(); RAPIDJSON_TRANS(0x10); RAPIDJSON_TAIL(); RAPIDJSON_TAIL(); return result; + case 6: RAPIDJSON_TAIL(); RAPIDJSON_TAIL(); RAPIDJSON_TAIL(); return result; + case 10: RAPIDJSON_COPY(); RAPIDJSON_TRANS(0x20); RAPIDJSON_TAIL(); return result; + case 11: RAPIDJSON_COPY(); RAPIDJSON_TRANS(0x60); RAPIDJSON_TAIL(); RAPIDJSON_TAIL(); return result; + default: return false; + } +#undef RAPIDJSON_COPY +#undef RAPIDJSON_TRANS +#undef RAPIDJSON_TAIL + } + + template + static bool Validate(InputStream& is, OutputStream& os) { +#define RAPIDJSON_COPY() os.Put(c = is.Take()) +#define RAPIDJSON_TRANS(mask) result &= ((GetRange(static_cast(c)) & mask) != 0) +#define RAPIDJSON_TAIL() RAPIDJSON_COPY(); RAPIDJSON_TRANS(0x70) + Ch c; + RAPIDJSON_COPY(); + if (!(c & 0x80)) + return true; + + bool result = true; + switch (GetRange(static_cast(c))) { + case 2: RAPIDJSON_TAIL(); return result; + case 3: RAPIDJSON_TAIL(); RAPIDJSON_TAIL(); return result; + case 4: RAPIDJSON_COPY(); RAPIDJSON_TRANS(0x50); RAPIDJSON_TAIL(); return result; + case 5: RAPIDJSON_COPY(); RAPIDJSON_TRANS(0x10); RAPIDJSON_TAIL(); RAPIDJSON_TAIL(); return result; + case 6: RAPIDJSON_TAIL(); RAPIDJSON_TAIL(); RAPIDJSON_TAIL(); return result; + case 10: RAPIDJSON_COPY(); RAPIDJSON_TRANS(0x20); RAPIDJSON_TAIL(); return result; + case 11: RAPIDJSON_COPY(); RAPIDJSON_TRANS(0x60); RAPIDJSON_TAIL(); RAPIDJSON_TAIL(); return result; + default: return false; + } +#undef RAPIDJSON_COPY +#undef RAPIDJSON_TRANS +#undef RAPIDJSON_TAIL + } + + static unsigned char GetRange(unsigned char c) { + // Referring to DFA of http://bjoern.hoehrmann.de/utf-8/decoder/dfa/ + // With new mapping 1 -> 0x10, 7 -> 0x20, 9 -> 0x40, such that AND operation can test multiple types. + static const unsigned char type[] = { + 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, + 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, + 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, + 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, + 0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10, + 0x40,0x40,0x40,0x40,0x40,0x40,0x40,0x40,0x40,0x40,0x40,0x40,0x40,0x40,0x40,0x40, + 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20, + 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20, + 8,8,2,2,2,2,2,2,2,2,2,2,2,2,2,2, 2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2, + 10,3,3,3,3,3,3,3,3,3,3,3,3,4,3,3, 11,6,6,6,5,8,8,8,8,8,8,8,8,8,8,8, + }; + return type[c]; + } + + template + static CharType TakeBOM(InputByteStream& is) { + RAPIDJSON_STATIC_ASSERT(sizeof(typename InputByteStream::Ch) == 1); + typename InputByteStream::Ch c = Take(is); + if (static_cast(c) != 0xEFu) return c; + c = is.Take(); + if (static_cast(c) != 0xBBu) return c; + c = is.Take(); + if (static_cast(c) != 0xBFu) return c; + c = is.Take(); + return c; + } + + template + static Ch Take(InputByteStream& is) { + RAPIDJSON_STATIC_ASSERT(sizeof(typename InputByteStream::Ch) == 1); + return static_cast(is.Take()); + } + + template + static void PutBOM(OutputByteStream& os) { + RAPIDJSON_STATIC_ASSERT(sizeof(typename OutputByteStream::Ch) == 1); + os.Put(static_cast(0xEFu)); + os.Put(static_cast(0xBBu)); + os.Put(static_cast(0xBFu)); + } + + template + static void Put(OutputByteStream& os, Ch c) { + RAPIDJSON_STATIC_ASSERT(sizeof(typename OutputByteStream::Ch) == 1); + os.Put(static_cast(c)); + } +}; + +/////////////////////////////////////////////////////////////////////////////// +// UTF16 + +//! UTF-16 encoding. +/*! http://en.wikipedia.org/wiki/UTF-16 + http://tools.ietf.org/html/rfc2781 + \tparam CharType Type for storing 16-bit UTF-16 data. Default is wchar_t. C++11 may use char16_t instead. + \note implements Encoding concept + + \note For in-memory access, no need to concern endianness. The code units and code points are represented by CPU's endianness. + For streaming, use UTF16LE and UTF16BE, which handle endianness. +*/ +template +struct UTF16 { + typedef CharType Ch; + RAPIDJSON_STATIC_ASSERT(sizeof(Ch) >= 2); + + enum { supportUnicode = 1 }; + + template + static void Encode(OutputStream& os, unsigned codepoint) { + RAPIDJSON_STATIC_ASSERT(sizeof(typename OutputStream::Ch) >= 2); + if (codepoint <= 0xFFFF) { + RAPIDJSON_ASSERT(codepoint < 0xD800 || codepoint > 0xDFFF); // Code point itself cannot be surrogate pair + os.Put(static_cast(codepoint)); + } + else { + RAPIDJSON_ASSERT(codepoint <= 0x10FFFF); + unsigned v = codepoint - 0x10000; + os.Put(static_cast((v >> 10) | 0xD800)); + os.Put(static_cast((v & 0x3FF) | 0xDC00)); + } + } + + + template + static void EncodeUnsafe(OutputStream& os, unsigned codepoint) { + RAPIDJSON_STATIC_ASSERT(sizeof(typename OutputStream::Ch) >= 2); + if (codepoint <= 0xFFFF) { + RAPIDJSON_ASSERT(codepoint < 0xD800 || codepoint > 0xDFFF); // Code point itself cannot be surrogate pair + PutUnsafe(os, static_cast(codepoint)); + } + else { + RAPIDJSON_ASSERT(codepoint <= 0x10FFFF); + unsigned v = codepoint - 0x10000; + PutUnsafe(os, static_cast((v >> 10) | 0xD800)); + PutUnsafe(os, static_cast((v & 0x3FF) | 0xDC00)); + } + } + + template + static bool Decode(InputStream& is, unsigned* codepoint) { + RAPIDJSON_STATIC_ASSERT(sizeof(typename InputStream::Ch) >= 2); + typename InputStream::Ch c = is.Take(); + if (c < 0xD800 || c > 0xDFFF) { + *codepoint = static_cast(c); + return true; + } + else if (c <= 0xDBFF) { + *codepoint = (static_cast(c) & 0x3FF) << 10; + c = is.Take(); + *codepoint |= (static_cast(c) & 0x3FF); + *codepoint += 0x10000; + return c >= 0xDC00 && c <= 0xDFFF; + } + return false; + } + + template + static bool Validate(InputStream& is, OutputStream& os) { + RAPIDJSON_STATIC_ASSERT(sizeof(typename InputStream::Ch) >= 2); + RAPIDJSON_STATIC_ASSERT(sizeof(typename OutputStream::Ch) >= 2); + typename InputStream::Ch c; + os.Put(static_cast(c = is.Take())); + if (c < 0xD800 || c > 0xDFFF) + return true; + else if (c <= 0xDBFF) { + os.Put(c = is.Take()); + return c >= 0xDC00 && c <= 0xDFFF; + } + return false; + } +}; + +//! UTF-16 little endian encoding. +template +struct UTF16LE : UTF16 { + template + static CharType TakeBOM(InputByteStream& is) { + RAPIDJSON_STATIC_ASSERT(sizeof(typename InputByteStream::Ch) == 1); + CharType c = Take(is); + return static_cast(c) == 0xFEFFu ? Take(is) : c; + } + + template + static CharType Take(InputByteStream& is) { + RAPIDJSON_STATIC_ASSERT(sizeof(typename InputByteStream::Ch) == 1); + unsigned c = static_cast(is.Take()); + c |= static_cast(static_cast(is.Take())) << 8; + return static_cast(c); + } + + template + static void PutBOM(OutputByteStream& os) { + RAPIDJSON_STATIC_ASSERT(sizeof(typename OutputByteStream::Ch) == 1); + os.Put(static_cast(0xFFu)); + os.Put(static_cast(0xFEu)); + } + + template + static void Put(OutputByteStream& os, CharType c) { + RAPIDJSON_STATIC_ASSERT(sizeof(typename OutputByteStream::Ch) == 1); + os.Put(static_cast(static_cast(c) & 0xFFu)); + os.Put(static_cast((static_cast(c) >> 8) & 0xFFu)); + } +}; + +//! UTF-16 big endian encoding. +template +struct UTF16BE : UTF16 { + template + static CharType TakeBOM(InputByteStream& is) { + RAPIDJSON_STATIC_ASSERT(sizeof(typename InputByteStream::Ch) == 1); + CharType c = Take(is); + return static_cast(c) == 0xFEFFu ? Take(is) : c; + } + + template + static CharType Take(InputByteStream& is) { + RAPIDJSON_STATIC_ASSERT(sizeof(typename InputByteStream::Ch) == 1); + unsigned c = static_cast(static_cast(is.Take())) << 8; + c |= static_cast(static_cast(is.Take())); + return static_cast(c); + } + + template + static void PutBOM(OutputByteStream& os) { + RAPIDJSON_STATIC_ASSERT(sizeof(typename OutputByteStream::Ch) == 1); + os.Put(static_cast(0xFEu)); + os.Put(static_cast(0xFFu)); + } + + template + static void Put(OutputByteStream& os, CharType c) { + RAPIDJSON_STATIC_ASSERT(sizeof(typename OutputByteStream::Ch) == 1); + os.Put(static_cast((static_cast(c) >> 8) & 0xFFu)); + os.Put(static_cast(static_cast(c) & 0xFFu)); + } +}; + +/////////////////////////////////////////////////////////////////////////////// +// UTF32 + +//! UTF-32 encoding. +/*! http://en.wikipedia.org/wiki/UTF-32 + \tparam CharType Type for storing 32-bit UTF-32 data. Default is unsigned. C++11 may use char32_t instead. + \note implements Encoding concept + + \note For in-memory access, no need to concern endianness. The code units and code points are represented by CPU's endianness. + For streaming, use UTF32LE and UTF32BE, which handle endianness. +*/ +template +struct UTF32 { + typedef CharType Ch; + RAPIDJSON_STATIC_ASSERT(sizeof(Ch) >= 4); + + enum { supportUnicode = 1 }; + + template + static void Encode(OutputStream& os, unsigned codepoint) { + RAPIDJSON_STATIC_ASSERT(sizeof(typename OutputStream::Ch) >= 4); + RAPIDJSON_ASSERT(codepoint <= 0x10FFFF); + os.Put(codepoint); + } + + template + static void EncodeUnsafe(OutputStream& os, unsigned codepoint) { + RAPIDJSON_STATIC_ASSERT(sizeof(typename OutputStream::Ch) >= 4); + RAPIDJSON_ASSERT(codepoint <= 0x10FFFF); + PutUnsafe(os, codepoint); + } + + template + static bool Decode(InputStream& is, unsigned* codepoint) { + RAPIDJSON_STATIC_ASSERT(sizeof(typename InputStream::Ch) >= 4); + Ch c = is.Take(); + *codepoint = c; + return c <= 0x10FFFF; + } + + template + static bool Validate(InputStream& is, OutputStream& os) { + RAPIDJSON_STATIC_ASSERT(sizeof(typename InputStream::Ch) >= 4); + Ch c; + os.Put(c = is.Take()); + return c <= 0x10FFFF; + } +}; + +//! UTF-32 little endian enocoding. +template +struct UTF32LE : UTF32 { + template + static CharType TakeBOM(InputByteStream& is) { + RAPIDJSON_STATIC_ASSERT(sizeof(typename InputByteStream::Ch) == 1); + CharType c = Take(is); + return static_cast(c) == 0x0000FEFFu ? Take(is) : c; + } + + template + static CharType Take(InputByteStream& is) { + RAPIDJSON_STATIC_ASSERT(sizeof(typename InputByteStream::Ch) == 1); + unsigned c = static_cast(is.Take()); + c |= static_cast(static_cast(is.Take())) << 8; + c |= static_cast(static_cast(is.Take())) << 16; + c |= static_cast(static_cast(is.Take())) << 24; + return static_cast(c); + } + + template + static void PutBOM(OutputByteStream& os) { + RAPIDJSON_STATIC_ASSERT(sizeof(typename OutputByteStream::Ch) == 1); + os.Put(static_cast(0xFFu)); + os.Put(static_cast(0xFEu)); + os.Put(static_cast(0x00u)); + os.Put(static_cast(0x00u)); + } + + template + static void Put(OutputByteStream& os, CharType c) { + RAPIDJSON_STATIC_ASSERT(sizeof(typename OutputByteStream::Ch) == 1); + os.Put(static_cast(c & 0xFFu)); + os.Put(static_cast((c >> 8) & 0xFFu)); + os.Put(static_cast((c >> 16) & 0xFFu)); + os.Put(static_cast((c >> 24) & 0xFFu)); + } +}; + +//! UTF-32 big endian encoding. +template +struct UTF32BE : UTF32 { + template + static CharType TakeBOM(InputByteStream& is) { + RAPIDJSON_STATIC_ASSERT(sizeof(typename InputByteStream::Ch) == 1); + CharType c = Take(is); + return static_cast(c) == 0x0000FEFFu ? Take(is) : c; + } + + template + static CharType Take(InputByteStream& is) { + RAPIDJSON_STATIC_ASSERT(sizeof(typename InputByteStream::Ch) == 1); + unsigned c = static_cast(static_cast(is.Take())) << 24; + c |= static_cast(static_cast(is.Take())) << 16; + c |= static_cast(static_cast(is.Take())) << 8; + c |= static_cast(static_cast(is.Take())); + return static_cast(c); + } + + template + static void PutBOM(OutputByteStream& os) { + RAPIDJSON_STATIC_ASSERT(sizeof(typename OutputByteStream::Ch) == 1); + os.Put(static_cast(0x00u)); + os.Put(static_cast(0x00u)); + os.Put(static_cast(0xFEu)); + os.Put(static_cast(0xFFu)); + } + + template + static void Put(OutputByteStream& os, CharType c) { + RAPIDJSON_STATIC_ASSERT(sizeof(typename OutputByteStream::Ch) == 1); + os.Put(static_cast((c >> 24) & 0xFFu)); + os.Put(static_cast((c >> 16) & 0xFFu)); + os.Put(static_cast((c >> 8) & 0xFFu)); + os.Put(static_cast(c & 0xFFu)); + } +}; + +/////////////////////////////////////////////////////////////////////////////// +// ASCII + +//! ASCII encoding. +/*! http://en.wikipedia.org/wiki/ASCII + \tparam CharType Code unit for storing 7-bit ASCII data. Default is char. + \note implements Encoding concept +*/ +template +struct ASCII { + typedef CharType Ch; + + enum { supportUnicode = 0 }; + + template + static void Encode(OutputStream& os, unsigned codepoint) { + RAPIDJSON_ASSERT(codepoint <= 0x7F); + os.Put(static_cast(codepoint & 0xFF)); + } + + template + static void EncodeUnsafe(OutputStream& os, unsigned codepoint) { + RAPIDJSON_ASSERT(codepoint <= 0x7F); + PutUnsafe(os, static_cast(codepoint & 0xFF)); + } + + template + static bool Decode(InputStream& is, unsigned* codepoint) { + uint8_t c = static_cast(is.Take()); + *codepoint = c; + return c <= 0X7F; + } + + template + static bool Validate(InputStream& is, OutputStream& os) { + uint8_t c = static_cast(is.Take()); + os.Put(static_cast(c)); + return c <= 0x7F; + } + + template + static CharType TakeBOM(InputByteStream& is) { + RAPIDJSON_STATIC_ASSERT(sizeof(typename InputByteStream::Ch) == 1); + uint8_t c = static_cast(Take(is)); + return static_cast(c); + } + + template + static Ch Take(InputByteStream& is) { + RAPIDJSON_STATIC_ASSERT(sizeof(typename InputByteStream::Ch) == 1); + return static_cast(is.Take()); + } + + template + static void PutBOM(OutputByteStream& os) { + RAPIDJSON_STATIC_ASSERT(sizeof(typename OutputByteStream::Ch) == 1); + (void)os; + } + + template + static void Put(OutputByteStream& os, Ch c) { + RAPIDJSON_STATIC_ASSERT(sizeof(typename OutputByteStream::Ch) == 1); + os.Put(static_cast(c)); + } +}; + +/////////////////////////////////////////////////////////////////////////////// +// AutoUTF + +//! Runtime-specified UTF encoding type of a stream. +enum UTFType { + kUTF8 = 0, //!< UTF-8. + kUTF16LE = 1, //!< UTF-16 little endian. + kUTF16BE = 2, //!< UTF-16 big endian. + kUTF32LE = 3, //!< UTF-32 little endian. + kUTF32BE = 4 //!< UTF-32 big endian. +}; + +//! Dynamically select encoding according to stream's runtime-specified UTF encoding type. +/*! \note This class can be used with AutoUTFInputtStream and AutoUTFOutputStream, which provides GetType(). +*/ +template +struct AutoUTF { + typedef CharType Ch; + + enum { supportUnicode = 1 }; + +#define RAPIDJSON_ENCODINGS_FUNC(x) UTF8::x, UTF16LE::x, UTF16BE::x, UTF32LE::x, UTF32BE::x + + template + static RAPIDJSON_FORCEINLINE void Encode(OutputStream& os, unsigned codepoint) { + typedef void (*EncodeFunc)(OutputStream&, unsigned); + static const EncodeFunc f[] = { RAPIDJSON_ENCODINGS_FUNC(Encode) }; + (*f[os.GetType()])(os, codepoint); + } + + template + static RAPIDJSON_FORCEINLINE void EncodeUnsafe(OutputStream& os, unsigned codepoint) { + typedef void (*EncodeFunc)(OutputStream&, unsigned); + static const EncodeFunc f[] = { RAPIDJSON_ENCODINGS_FUNC(EncodeUnsafe) }; + (*f[os.GetType()])(os, codepoint); + } + + template + static RAPIDJSON_FORCEINLINE bool Decode(InputStream& is, unsigned* codepoint) { + typedef bool (*DecodeFunc)(InputStream&, unsigned*); + static const DecodeFunc f[] = { RAPIDJSON_ENCODINGS_FUNC(Decode) }; + return (*f[is.GetType()])(is, codepoint); + } + + template + static RAPIDJSON_FORCEINLINE bool Validate(InputStream& is, OutputStream& os) { + typedef bool (*ValidateFunc)(InputStream&, OutputStream&); + static const ValidateFunc f[] = { RAPIDJSON_ENCODINGS_FUNC(Validate) }; + return (*f[is.GetType()])(is, os); + } + +#undef RAPIDJSON_ENCODINGS_FUNC +}; + +/////////////////////////////////////////////////////////////////////////////// +// Transcoder + +//! Encoding conversion. +template +struct Transcoder { + //! Take one Unicode codepoint from source encoding, convert it to target encoding and put it to the output stream. + template + static RAPIDJSON_FORCEINLINE bool Transcode(InputStream& is, OutputStream& os) { + unsigned codepoint; + if (!SourceEncoding::Decode(is, &codepoint)) + return false; + TargetEncoding::Encode(os, codepoint); + return true; + } + + template + static RAPIDJSON_FORCEINLINE bool TranscodeUnsafe(InputStream& is, OutputStream& os) { + unsigned codepoint; + if (!SourceEncoding::Decode(is, &codepoint)) + return false; + TargetEncoding::EncodeUnsafe(os, codepoint); + return true; + } + + //! Validate one Unicode codepoint from an encoded stream. + template + static RAPIDJSON_FORCEINLINE bool Validate(InputStream& is, OutputStream& os) { + return Transcode(is, os); // Since source/target encoding is different, must transcode. + } +}; + +// Forward declaration. +template +inline void PutUnsafe(Stream& stream, typename Stream::Ch c); + +//! Specialization of Transcoder with same source and target encoding. +template +struct Transcoder { + template + static RAPIDJSON_FORCEINLINE bool Transcode(InputStream& is, OutputStream& os) { + os.Put(is.Take()); // Just copy one code unit. This semantic is different from primary template class. + return true; + } + + template + static RAPIDJSON_FORCEINLINE bool TranscodeUnsafe(InputStream& is, OutputStream& os) { + PutUnsafe(os, is.Take()); // Just copy one code unit. This semantic is different from primary template class. + return true; + } + + template + static RAPIDJSON_FORCEINLINE bool Validate(InputStream& is, OutputStream& os) { + return Encoding::Validate(is, os); // source/target encoding are the same + } +}; + +RAPIDJSON_NAMESPACE_END + +#if defined(__GNUC__) || (defined(_MSC_VER) && !defined(__clang__)) +RAPIDJSON_DIAG_POP +#endif + +#endif // RAPIDJSON_ENCODINGS_H_ diff --git a/vendor/github.com/Benau/go_rlottie/lottie_rapidjson_error_en.h b/vendor/github.com/Benau/go_rlottie/lottie_rapidjson_error_en.h new file mode 100644 index 00000000..e81ed92d --- /dev/null +++ b/vendor/github.com/Benau/go_rlottie/lottie_rapidjson_error_en.h @@ -0,0 +1,74 @@ +// Tencent is pleased to support the open source community by making RapidJSON available. +// +// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved. +// +// Licensed under the MIT License (the "License"); you may not use this file except +// in compliance with the License. You may obtain a copy of the License at +// +// http://opensource.org/licenses/MIT +// +// Unless required by applicable law or agreed to in writing, software distributed +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +// CONDITIONS OF ANY KIND, either express or implied. See the License for the +// specific language governing permissions and limitations under the License. + +#ifndef RAPIDJSON_ERROR_EN_H_ +#define RAPIDJSON_ERROR_EN_H_ + +#include "lottie_rapidjson_error_error.h" + +#ifdef __clang__ +RAPIDJSON_DIAG_PUSH +RAPIDJSON_DIAG_OFF(switch-enum) +RAPIDJSON_DIAG_OFF(covered-switch-default) +#endif + +RAPIDJSON_NAMESPACE_BEGIN + +//! Maps error code of parsing into error message. +/*! + \ingroup RAPIDJSON_ERRORS + \param parseErrorCode Error code obtained in parsing. + \return the error message. + \note User can make a copy of this function for localization. + Using switch-case is safer for future modification of error codes. +*/ +inline const RAPIDJSON_ERROR_CHARTYPE* GetParseError_En(ParseErrorCode parseErrorCode) { + switch (parseErrorCode) { + case kParseErrorNone: return RAPIDJSON_ERROR_STRING("No error."); + + case kParseErrorDocumentEmpty: return RAPIDJSON_ERROR_STRING("The document is empty."); + case kParseErrorDocumentRootNotSingular: return RAPIDJSON_ERROR_STRING("The document root must not be followed by other values."); + + case kParseErrorValueInvalid: return RAPIDJSON_ERROR_STRING("Invalid value."); + + case kParseErrorObjectMissName: return RAPIDJSON_ERROR_STRING("Missing a name for object member."); + case kParseErrorObjectMissColon: return RAPIDJSON_ERROR_STRING("Missing a colon after a name of object member."); + case kParseErrorObjectMissCommaOrCurlyBracket: return RAPIDJSON_ERROR_STRING("Missing a comma or '}' after an object member."); + + case kParseErrorArrayMissCommaOrSquareBracket: return RAPIDJSON_ERROR_STRING("Missing a comma or ']' after an array element."); + + case kParseErrorStringUnicodeEscapeInvalidHex: return RAPIDJSON_ERROR_STRING("Incorrect hex digit after \\u escape in string."); + case kParseErrorStringUnicodeSurrogateInvalid: return RAPIDJSON_ERROR_STRING("The surrogate pair in string is invalid."); + case kParseErrorStringEscapeInvalid: return RAPIDJSON_ERROR_STRING("Invalid escape character in string."); + case kParseErrorStringMissQuotationMark: return RAPIDJSON_ERROR_STRING("Missing a closing quotation mark in string."); + case kParseErrorStringInvalidEncoding: return RAPIDJSON_ERROR_STRING("Invalid encoding in string."); + + case kParseErrorNumberTooBig: return RAPIDJSON_ERROR_STRING("Number too big to be stored in double."); + case kParseErrorNumberMissFraction: return RAPIDJSON_ERROR_STRING("Miss fraction part in number."); + case kParseErrorNumberMissExponent: return RAPIDJSON_ERROR_STRING("Miss exponent in number."); + + case kParseErrorTermination: return RAPIDJSON_ERROR_STRING("Terminate parsing due to Handler error."); + case kParseErrorUnspecificSyntaxError: return RAPIDJSON_ERROR_STRING("Unspecific syntax error."); + + default: return RAPIDJSON_ERROR_STRING("Unknown error."); + } +} + +RAPIDJSON_NAMESPACE_END + +#ifdef __clang__ +RAPIDJSON_DIAG_POP +#endif + +#endif // RAPIDJSON_ERROR_EN_H_ diff --git a/vendor/github.com/Benau/go_rlottie/lottie_rapidjson_error_error.h b/vendor/github.com/Benau/go_rlottie/lottie_rapidjson_error_error.h new file mode 100644 index 00000000..034675e5 --- /dev/null +++ b/vendor/github.com/Benau/go_rlottie/lottie_rapidjson_error_error.h @@ -0,0 +1,161 @@ +// Tencent is pleased to support the open source community by making RapidJSON available. +// +// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved. +// +// Licensed under the MIT License (the "License"); you may not use this file except +// in compliance with the License. You may obtain a copy of the License at +// +// http://opensource.org/licenses/MIT +// +// Unless required by applicable law or agreed to in writing, software distributed +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +// CONDITIONS OF ANY KIND, either express or implied. See the License for the +// specific language governing permissions and limitations under the License. + +#ifndef RAPIDJSON_ERROR_ERROR_H_ +#define RAPIDJSON_ERROR_ERROR_H_ + +#include "lottie_rapidjson_rapidjson.h" + +#ifdef __clang__ +RAPIDJSON_DIAG_PUSH +RAPIDJSON_DIAG_OFF(padded) +#endif + +/*! \file error.h */ + +/*! \defgroup RAPIDJSON_ERRORS RapidJSON error handling */ + +/////////////////////////////////////////////////////////////////////////////// +// RAPIDJSON_ERROR_CHARTYPE + +//! Character type of error messages. +/*! \ingroup RAPIDJSON_ERRORS + The default character type is \c char. + On Windows, user can define this macro as \c TCHAR for supporting both + unicode/non-unicode settings. +*/ +#ifndef RAPIDJSON_ERROR_CHARTYPE +#define RAPIDJSON_ERROR_CHARTYPE char +#endif + +/////////////////////////////////////////////////////////////////////////////// +// RAPIDJSON_ERROR_STRING + +//! Macro for converting string literial to \ref RAPIDJSON_ERROR_CHARTYPE[]. +/*! \ingroup RAPIDJSON_ERRORS + By default this conversion macro does nothing. + On Windows, user can define this macro as \c _T(x) for supporting both + unicode/non-unicode settings. +*/ +#ifndef RAPIDJSON_ERROR_STRING +#define RAPIDJSON_ERROR_STRING(x) x +#endif + +RAPIDJSON_NAMESPACE_BEGIN + +/////////////////////////////////////////////////////////////////////////////// +// ParseErrorCode + +//! Error code of parsing. +/*! \ingroup RAPIDJSON_ERRORS + \see GenericReader::Parse, GenericReader::GetParseErrorCode +*/ +enum ParseErrorCode { + kParseErrorNone = 0, //!< No error. + + kParseErrorDocumentEmpty, //!< The document is empty. + kParseErrorDocumentRootNotSingular, //!< The document root must not follow by other values. + + kParseErrorValueInvalid, //!< Invalid value. + + kParseErrorObjectMissName, //!< Missing a name for object member. + kParseErrorObjectMissColon, //!< Missing a colon after a name of object member. + kParseErrorObjectMissCommaOrCurlyBracket, //!< Missing a comma or '}' after an object member. + + kParseErrorArrayMissCommaOrSquareBracket, //!< Missing a comma or ']' after an array element. + + kParseErrorStringUnicodeEscapeInvalidHex, //!< Incorrect hex digit after \\u escape in string. + kParseErrorStringUnicodeSurrogateInvalid, //!< The surrogate pair in string is invalid. + kParseErrorStringEscapeInvalid, //!< Invalid escape character in string. + kParseErrorStringMissQuotationMark, //!< Missing a closing quotation mark in string. + kParseErrorStringInvalidEncoding, //!< Invalid encoding in string. + + kParseErrorNumberTooBig, //!< Number too big to be stored in double. + kParseErrorNumberMissFraction, //!< Miss fraction part in number. + kParseErrorNumberMissExponent, //!< Miss exponent in number. + + kParseErrorTermination, //!< Parsing was terminated. + kParseErrorUnspecificSyntaxError //!< Unspecific syntax error. +}; + +//! Result of parsing (wraps ParseErrorCode) +/*! + \ingroup RAPIDJSON_ERRORS + \code + Document doc; + ParseResult ok = doc.Parse("[42]"); + if (!ok) { + fprintf(stderr, "JSON parse error: %s (%u)", + GetParseError_En(ok.Code()), ok.Offset()); + exit(EXIT_FAILURE); + } + \endcode + \see GenericReader::Parse, GenericDocument::Parse +*/ +struct ParseResult { + //!! Unspecified boolean type + typedef bool (ParseResult::*BooleanType)() const; +public: + //! Default constructor, no error. + ParseResult() : code_(kParseErrorNone), offset_(0) {} + //! Constructor to set an error. + ParseResult(ParseErrorCode code, size_t offset) : code_(code), offset_(offset) {} + + //! Get the error code. + ParseErrorCode Code() const { return code_; } + //! Get the error offset, if \ref IsError(), 0 otherwise. + size_t Offset() const { return offset_; } + + //! Explicit conversion to \c bool, returns \c true, iff !\ref IsError(). + operator BooleanType() const { return !IsError() ? &ParseResult::IsError : NULL; } + //! Whether the result is an error. + bool IsError() const { return code_ != kParseErrorNone; } + + bool operator==(const ParseResult& that) const { return code_ == that.code_; } + bool operator==(ParseErrorCode code) const { return code_ == code; } + friend bool operator==(ParseErrorCode code, const ParseResult & err) { return code == err.code_; } + + bool operator!=(const ParseResult& that) const { return !(*this == that); } + bool operator!=(ParseErrorCode code) const { return !(*this == code); } + friend bool operator!=(ParseErrorCode code, const ParseResult & err) { return err != code; } + + //! Reset error code. + void Clear() { Set(kParseErrorNone); } + //! Update error code and offset. + void Set(ParseErrorCode code, size_t offset = 0) { code_ = code; offset_ = offset; } + +private: + ParseErrorCode code_; + size_t offset_; +}; + +//! Function pointer type of GetParseError(). +/*! \ingroup RAPIDJSON_ERRORS + + This is the prototype for \c GetParseError_X(), where \c X is a locale. + User can dynamically change locale in runtime, e.g.: +\code + GetParseErrorFunc GetParseError = GetParseError_En; // or whatever + const RAPIDJSON_ERROR_CHARTYPE* s = GetParseError(document.GetParseErrorCode()); +\endcode +*/ +typedef const RAPIDJSON_ERROR_CHARTYPE* (*GetParseErrorFunc)(ParseErrorCode); + +RAPIDJSON_NAMESPACE_END + +#ifdef __clang__ +RAPIDJSON_DIAG_POP +#endif + +#endif // RAPIDJSON_ERROR_ERROR_H_ diff --git a/vendor/github.com/Benau/go_rlottie/lottie_rapidjson_filereadstream.h b/vendor/github.com/Benau/go_rlottie/lottie_rapidjson_filereadstream.h new file mode 100644 index 00000000..f46a7545 --- /dev/null +++ b/vendor/github.com/Benau/go_rlottie/lottie_rapidjson_filereadstream.h @@ -0,0 +1,99 @@ +// Tencent is pleased to support the open source community by making RapidJSON available. +// +// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved. +// +// Licensed under the MIT License (the "License"); you may not use this file except +// in compliance with the License. You may obtain a copy of the License at +// +// http://opensource.org/licenses/MIT +// +// Unless required by applicable law or agreed to in writing, software distributed +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +// CONDITIONS OF ANY KIND, either express or implied. See the License for the +// specific language governing permissions and limitations under the License. + +#ifndef RAPIDJSON_FILEREADSTREAM_H_ +#define RAPIDJSON_FILEREADSTREAM_H_ + +#include "lottie_rapidjson_stream.h" +#include + +#ifdef __clang__ +RAPIDJSON_DIAG_PUSH +RAPIDJSON_DIAG_OFF(padded) +RAPIDJSON_DIAG_OFF(unreachable-code) +RAPIDJSON_DIAG_OFF(missing-noreturn) +#endif + +RAPIDJSON_NAMESPACE_BEGIN + +//! File byte stream for input using fread(). +/*! + \note implements Stream concept +*/ +class FileReadStream { +public: + typedef char Ch; //!< Character type (byte). + + //! Constructor. + /*! + \param fp File pointer opened for read. + \param buffer user-supplied buffer. + \param bufferSize size of buffer in bytes. Must >=4 bytes. + */ + FileReadStream(std::FILE* fp, char* buffer, size_t bufferSize) : fp_(fp), buffer_(buffer), bufferSize_(bufferSize), bufferLast_(0), current_(buffer_), readCount_(0), count_(0), eof_(false) { + RAPIDJSON_ASSERT(fp_ != 0); + RAPIDJSON_ASSERT(bufferSize >= 4); + Read(); + } + + Ch Peek() const { return *current_; } + Ch Take() { Ch c = *current_; Read(); return c; } + size_t Tell() const { return count_ + static_cast(current_ - buffer_); } + + // Not implemented + void Put(Ch) { RAPIDJSON_ASSERT(false); } + void Flush() { RAPIDJSON_ASSERT(false); } + Ch* PutBegin() { RAPIDJSON_ASSERT(false); return 0; } + size_t PutEnd(Ch*) { RAPIDJSON_ASSERT(false); return 0; } + + // For encoding detection only. + const Ch* Peek4() const { + return (current_ + 4 - !eof_ <= bufferLast_) ? current_ : 0; + } + +private: + void Read() { + if (current_ < bufferLast_) + ++current_; + else if (!eof_) { + count_ += readCount_; + readCount_ = std::fread(buffer_, 1, bufferSize_, fp_); + bufferLast_ = buffer_ + readCount_ - 1; + current_ = buffer_; + + if (readCount_ < bufferSize_) { + buffer_[readCount_] = '\0'; + ++bufferLast_; + eof_ = true; + } + } + } + + std::FILE* fp_; + Ch *buffer_; + size_t bufferSize_; + Ch *bufferLast_; + Ch *current_; + size_t readCount_; + size_t count_; //!< Number of characters read + bool eof_; +}; + +RAPIDJSON_NAMESPACE_END + +#ifdef __clang__ +RAPIDJSON_DIAG_POP +#endif + +#endif // RAPIDJSON_FILESTREAM_H_ diff --git a/vendor/github.com/Benau/go_rlottie/lottie_rapidjson_filewritestream.h b/vendor/github.com/Benau/go_rlottie/lottie_rapidjson_filewritestream.h new file mode 100644 index 00000000..0a3408ee --- /dev/null +++ b/vendor/github.com/Benau/go_rlottie/lottie_rapidjson_filewritestream.h @@ -0,0 +1,104 @@ +// Tencent is pleased to support the open source community by making RapidJSON available. +// +// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved. +// +// Licensed under the MIT License (the "License"); you may not use this file except +// in compliance with the License. You may obtain a copy of the License at +// +// http://opensource.org/licenses/MIT +// +// Unless required by applicable law or agreed to in writing, software distributed +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +// CONDITIONS OF ANY KIND, either express or implied. See the License for the +// specific language governing permissions and limitations under the License. + +#ifndef RAPIDJSON_FILEWRITESTREAM_H_ +#define RAPIDJSON_FILEWRITESTREAM_H_ + +#include "lottie_rapidjson_stream.h" +#include + +#ifdef __clang__ +RAPIDJSON_DIAG_PUSH +RAPIDJSON_DIAG_OFF(unreachable-code) +#endif + +RAPIDJSON_NAMESPACE_BEGIN + +//! Wrapper of C file stream for output using fwrite(). +/*! + \note implements Stream concept +*/ +class FileWriteStream { +public: + typedef char Ch; //!< Character type. Only support char. + + FileWriteStream(std::FILE* fp, char* buffer, size_t bufferSize) : fp_(fp), buffer_(buffer), bufferEnd_(buffer + bufferSize), current_(buffer_) { + RAPIDJSON_ASSERT(fp_ != 0); + } + + void Put(char c) { + if (current_ >= bufferEnd_) + Flush(); + + *current_++ = c; + } + + void PutN(char c, size_t n) { + size_t avail = static_cast(bufferEnd_ - current_); + while (n > avail) { + std::memset(current_, c, avail); + current_ += avail; + Flush(); + n -= avail; + avail = static_cast(bufferEnd_ - current_); + } + + if (n > 0) { + std::memset(current_, c, n); + current_ += n; + } + } + + void Flush() { + if (current_ != buffer_) { + size_t result = std::fwrite(buffer_, 1, static_cast(current_ - buffer_), fp_); + if (result < static_cast(current_ - buffer_)) { + // failure deliberately ignored at this time + // added to avoid warn_unused_result build errors + } + current_ = buffer_; + } + } + + // Not implemented + char Peek() const { RAPIDJSON_ASSERT(false); return 0; } + char Take() { RAPIDJSON_ASSERT(false); return 0; } + size_t Tell() const { RAPIDJSON_ASSERT(false); return 0; } + char* PutBegin() { RAPIDJSON_ASSERT(false); return 0; } + size_t PutEnd(char*) { RAPIDJSON_ASSERT(false); return 0; } + +private: + // Prohibit copy constructor & assignment operator. + FileWriteStream(const FileWriteStream&); + FileWriteStream& operator=(const FileWriteStream&); + + std::FILE* fp_; + char *buffer_; + char *bufferEnd_; + char *current_; +}; + +//! Implement specialized version of PutN() with memset() for better performance. +template<> +inline void PutN(FileWriteStream& stream, char c, size_t n) { + stream.PutN(c, n); +} + +RAPIDJSON_NAMESPACE_END + +#ifdef __clang__ +RAPIDJSON_DIAG_POP +#endif + +#endif // RAPIDJSON_FILESTREAM_H_ diff --git a/vendor/github.com/Benau/go_rlottie/lottie_rapidjson_fwd.h b/vendor/github.com/Benau/go_rlottie/lottie_rapidjson_fwd.h new file mode 100644 index 00000000..eee2ef8b --- /dev/null +++ b/vendor/github.com/Benau/go_rlottie/lottie_rapidjson_fwd.h @@ -0,0 +1,151 @@ +// Tencent is pleased to support the open source community by making RapidJSON available. +// +// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved. +// +// Licensed under the MIT License (the "License"); you may not use this file except +// in compliance with the License. You may obtain a copy of the License at +// +// http://opensource.org/licenses/MIT +// +// Unless required by applicable law or agreed to in writing, software distributed +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +// CONDITIONS OF ANY KIND, either express or implied. See the License for the +// specific language governing permissions and limitations under the License. + +#ifndef RAPIDJSON_FWD_H_ +#define RAPIDJSON_FWD_H_ + +#include "lottie_rapidjson_rapidjson.h" + +RAPIDJSON_NAMESPACE_BEGIN + +// encodings.h + +template struct UTF8; +template struct UTF16; +template struct UTF16BE; +template struct UTF16LE; +template struct UTF32; +template struct UTF32BE; +template struct UTF32LE; +template struct ASCII; +template struct AutoUTF; + +template +struct Transcoder; + +// allocators.h + +class CrtAllocator; + +template +class MemoryPoolAllocator; + +// stream.h + +template +struct GenericStringStream; + +typedef GenericStringStream > StringStream; + +template +struct GenericInsituStringStream; + +typedef GenericInsituStringStream > InsituStringStream; + +// stringbuffer.h + +template +class GenericStringBuffer; + +typedef GenericStringBuffer, CrtAllocator> StringBuffer; + +// filereadstream.h + +class FileReadStream; + +// filewritestream.h + +class FileWriteStream; + +// memorybuffer.h + +template +struct GenericMemoryBuffer; + +typedef GenericMemoryBuffer MemoryBuffer; + +// memorystream.h + +struct MemoryStream; + +// reader.h + +template +struct BaseReaderHandler; + +template +class GenericReader; + +typedef GenericReader, UTF8, CrtAllocator> Reader; + +// writer.h + +template +class Writer; + +// prettywriter.h + +template +class PrettyWriter; + +// document.h + +template +class GenericMember; + +template +class GenericMemberIterator; + +template +struct GenericStringRef; + +template +class GenericValue; + +typedef GenericValue, MemoryPoolAllocator > Value; + +template +class GenericDocument; + +typedef GenericDocument, MemoryPoolAllocator, CrtAllocator> Document; + +// pointer.h + +template +class GenericPointer; + +typedef GenericPointer Pointer; + +// schema.h + +template +class IGenericRemoteSchemaDocumentProvider; + +template +class GenericSchemaDocument; + +typedef GenericSchemaDocument SchemaDocument; +typedef IGenericRemoteSchemaDocumentProvider IRemoteSchemaDocumentProvider; + +template < + typename SchemaDocumentType, + typename OutputHandler, + typename StateAllocator> +class GenericSchemaValidator; + +typedef GenericSchemaValidator, void>, CrtAllocator> SchemaValidator; + +RAPIDJSON_NAMESPACE_END + +#endif // RAPIDJSON_RAPIDJSONFWD_H_ diff --git a/vendor/github.com/Benau/go_rlottie/lottie_rapidjson_internal_biginteger.h b/vendor/github.com/Benau/go_rlottie/lottie_rapidjson_internal_biginteger.h new file mode 100644 index 00000000..1d5db026 --- /dev/null +++ b/vendor/github.com/Benau/go_rlottie/lottie_rapidjson_internal_biginteger.h @@ -0,0 +1,290 @@ +// Tencent is pleased to support the open source community by making RapidJSON available. +// +// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved. +// +// Licensed under the MIT License (the "License"); you may not use this file except +// in compliance with the License. You may obtain a copy of the License at +// +// http://opensource.org/licenses/MIT +// +// Unless required by applicable law or agreed to in writing, software distributed +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +// CONDITIONS OF ANY KIND, either express or implied. See the License for the +// specific language governing permissions and limitations under the License. + +#ifndef RAPIDJSON_BIGINTEGER_H_ +#define RAPIDJSON_BIGINTEGER_H_ + +#include "lottie_rapidjson_rapidjson.h" + +#if defined(_MSC_VER) && !defined(__INTEL_COMPILER) && defined(_M_AMD64) +#include // for _umul128 +#pragma intrinsic(_umul128) +#endif + +RAPIDJSON_NAMESPACE_BEGIN +namespace internal { + +class BigInteger { +public: + typedef uint64_t Type; + + BigInteger(const BigInteger& rhs) : count_(rhs.count_) { + std::memcpy(digits_, rhs.digits_, count_ * sizeof(Type)); + } + + explicit BigInteger(uint64_t u) : count_(1) { + digits_[0] = u; + } + + BigInteger(const char* decimals, size_t length) : count_(1) { + RAPIDJSON_ASSERT(length > 0); + digits_[0] = 0; + size_t i = 0; + const size_t kMaxDigitPerIteration = 19; // 2^64 = 18446744073709551616 > 10^19 + while (length >= kMaxDigitPerIteration) { + AppendDecimal64(decimals + i, decimals + i + kMaxDigitPerIteration); + length -= kMaxDigitPerIteration; + i += kMaxDigitPerIteration; + } + + if (length > 0) + AppendDecimal64(decimals + i, decimals + i + length); + } + + BigInteger& operator=(const BigInteger &rhs) + { + if (this != &rhs) { + count_ = rhs.count_; + std::memcpy(digits_, rhs.digits_, count_ * sizeof(Type)); + } + return *this; + } + + BigInteger& operator=(uint64_t u) { + digits_[0] = u; + count_ = 1; + return *this; + } + + BigInteger& operator+=(uint64_t u) { + Type backup = digits_[0]; + digits_[0] += u; + for (size_t i = 0; i < count_ - 1; i++) { + if (digits_[i] >= backup) + return *this; // no carry + backup = digits_[i + 1]; + digits_[i + 1] += 1; + } + + // Last carry + if (digits_[count_ - 1] < backup) + PushBack(1); + + return *this; + } + + BigInteger& operator*=(uint64_t u) { + if (u == 0) return *this = 0; + if (u == 1) return *this; + if (*this == 1) return *this = u; + + uint64_t k = 0; + for (size_t i = 0; i < count_; i++) { + uint64_t hi; + digits_[i] = MulAdd64(digits_[i], u, k, &hi); + k = hi; + } + + if (k > 0) + PushBack(k); + + return *this; + } + + BigInteger& operator*=(uint32_t u) { + if (u == 0) return *this = 0; + if (u == 1) return *this; + if (*this == 1) return *this = u; + + uint64_t k = 0; + for (size_t i = 0; i < count_; i++) { + const uint64_t c = digits_[i] >> 32; + const uint64_t d = digits_[i] & 0xFFFFFFFF; + const uint64_t uc = u * c; + const uint64_t ud = u * d; + const uint64_t p0 = ud + k; + const uint64_t p1 = uc + (p0 >> 32); + digits_[i] = (p0 & 0xFFFFFFFF) | (p1 << 32); + k = p1 >> 32; + } + + if (k > 0) + PushBack(k); + + return *this; + } + + BigInteger& operator<<=(size_t shift) { + if (IsZero() || shift == 0) return *this; + + size_t offset = shift / kTypeBit; + size_t interShift = shift % kTypeBit; + RAPIDJSON_ASSERT(count_ + offset <= kCapacity); + + if (interShift == 0) { + std::memmove(digits_ + offset, digits_, count_ * sizeof(Type)); + count_ += offset; + } + else { + digits_[count_] = 0; + for (size_t i = count_; i > 0; i--) + digits_[i + offset] = (digits_[i] << interShift) | (digits_[i - 1] >> (kTypeBit - interShift)); + digits_[offset] = digits_[0] << interShift; + count_ += offset; + if (digits_[count_]) + count_++; + } + + std::memset(digits_, 0, offset * sizeof(Type)); + + return *this; + } + + bool operator==(const BigInteger& rhs) const { + return count_ == rhs.count_ && std::memcmp(digits_, rhs.digits_, count_ * sizeof(Type)) == 0; + } + + bool operator==(const Type rhs) const { + return count_ == 1 && digits_[0] == rhs; + } + + BigInteger& MultiplyPow5(unsigned exp) { + static const uint32_t kPow5[12] = { + 5, + 5 * 5, + 5 * 5 * 5, + 5 * 5 * 5 * 5, + 5 * 5 * 5 * 5 * 5, + 5 * 5 * 5 * 5 * 5 * 5, + 5 * 5 * 5 * 5 * 5 * 5 * 5, + 5 * 5 * 5 * 5 * 5 * 5 * 5 * 5, + 5 * 5 * 5 * 5 * 5 * 5 * 5 * 5 * 5, + 5 * 5 * 5 * 5 * 5 * 5 * 5 * 5 * 5 * 5, + 5 * 5 * 5 * 5 * 5 * 5 * 5 * 5 * 5 * 5 * 5, + 5 * 5 * 5 * 5 * 5 * 5 * 5 * 5 * 5 * 5 * 5 * 5 + }; + if (exp == 0) return *this; + for (; exp >= 27; exp -= 27) *this *= RAPIDJSON_UINT64_C2(0X6765C793, 0XFA10079D); // 5^27 + for (; exp >= 13; exp -= 13) *this *= static_cast(1220703125u); // 5^13 + if (exp > 0) *this *= kPow5[exp - 1]; + return *this; + } + + // Compute absolute difference of this and rhs. + // Assume this != rhs + bool Difference(const BigInteger& rhs, BigInteger* out) const { + int cmp = Compare(rhs); + RAPIDJSON_ASSERT(cmp != 0); + const BigInteger *a, *b; // Makes a > b + bool ret; + if (cmp < 0) { a = &rhs; b = this; ret = true; } + else { a = this; b = &rhs; ret = false; } + + Type borrow = 0; + for (size_t i = 0; i < a->count_; i++) { + Type d = a->digits_[i] - borrow; + if (i < b->count_) + d -= b->digits_[i]; + borrow = (d > a->digits_[i]) ? 1 : 0; + out->digits_[i] = d; + if (d != 0) + out->count_ = i + 1; + } + + return ret; + } + + int Compare(const BigInteger& rhs) const { + if (count_ != rhs.count_) + return count_ < rhs.count_ ? -1 : 1; + + for (size_t i = count_; i-- > 0;) + if (digits_[i] != rhs.digits_[i]) + return digits_[i] < rhs.digits_[i] ? -1 : 1; + + return 0; + } + + size_t GetCount() const { return count_; } + Type GetDigit(size_t index) const { RAPIDJSON_ASSERT(index < count_); return digits_[index]; } + bool IsZero() const { return count_ == 1 && digits_[0] == 0; } + +private: + void AppendDecimal64(const char* begin, const char* end) { + uint64_t u = ParseUint64(begin, end); + if (IsZero()) + *this = u; + else { + unsigned exp = static_cast(end - begin); + (MultiplyPow5(exp) <<= exp) += u; // *this = *this * 10^exp + u + } + } + + void PushBack(Type digit) { + RAPIDJSON_ASSERT(count_ < kCapacity); + digits_[count_++] = digit; + } + + static uint64_t ParseUint64(const char* begin, const char* end) { + uint64_t r = 0; + for (const char* p = begin; p != end; ++p) { + RAPIDJSON_ASSERT(*p >= '0' && *p <= '9'); + r = r * 10u + static_cast(*p - '0'); + } + return r; + } + + // Assume a * b + k < 2^128 + static uint64_t MulAdd64(uint64_t a, uint64_t b, uint64_t k, uint64_t* outHigh) { +#if defined(_MSC_VER) && defined(_M_AMD64) + uint64_t low = _umul128(a, b, outHigh) + k; + if (low < k) + (*outHigh)++; + return low; +#elif (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6)) && defined(__x86_64__) + __extension__ typedef unsigned __int128 uint128; + uint128 p = static_cast(a) * static_cast(b); + p += k; + *outHigh = static_cast(p >> 64); + return static_cast(p); +#else + const uint64_t a0 = a & 0xFFFFFFFF, a1 = a >> 32, b0 = b & 0xFFFFFFFF, b1 = b >> 32; + uint64_t x0 = a0 * b0, x1 = a0 * b1, x2 = a1 * b0, x3 = a1 * b1; + x1 += (x0 >> 32); // can't give carry + x1 += x2; + if (x1 < x2) + x3 += (static_cast(1) << 32); + uint64_t lo = (x1 << 32) + (x0 & 0xFFFFFFFF); + uint64_t hi = x3 + (x1 >> 32); + + lo += k; + if (lo < k) + hi++; + *outHigh = hi; + return lo; +#endif + } + + static const size_t kBitCount = 3328; // 64bit * 54 > 10^1000 + static const size_t kCapacity = kBitCount / sizeof(Type); + static const size_t kTypeBit = sizeof(Type) * 8; + + Type digits_[kCapacity]; + size_t count_; +}; + +} // namespace internal +RAPIDJSON_NAMESPACE_END + +#endif // RAPIDJSON_BIGINTEGER_H_ diff --git a/vendor/github.com/Benau/go_rlottie/lottie_rapidjson_internal_clzll.h b/vendor/github.com/Benau/go_rlottie/lottie_rapidjson_internal_clzll.h new file mode 100644 index 00000000..b43ab88d --- /dev/null +++ b/vendor/github.com/Benau/go_rlottie/lottie_rapidjson_internal_clzll.h @@ -0,0 +1,71 @@ +// Tencent is pleased to support the open source community by making RapidJSON available. +// +// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved. +// +// Licensed under the MIT License (the "License"); you may not use this file except +// in compliance with the License. You may obtain a copy of the License at +// +// http://opensource.org/licenses/MIT +// +// Unless required by applicable law or agreed to in writing, software distributed +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +// CONDITIONS OF ANY KIND, either express or implied. See the License for the +// specific language governing permissions and limitations under the License. + +#ifndef RAPIDJSON_CLZLL_H_ +#define RAPIDJSON_CLZLL_H_ + +#include "lottie_rapidjson_rapidjson.h" + +#if defined(_MSC_VER) && !defined(UNDER_CE) +#include +#if defined(_WIN64) +#pragma intrinsic(_BitScanReverse64) +#else +#pragma intrinsic(_BitScanReverse) +#endif +#endif + +RAPIDJSON_NAMESPACE_BEGIN +namespace internal { + +inline uint32_t clzll(uint64_t x) { + // Passing 0 to __builtin_clzll is UB in GCC and results in an + // infinite loop in the software implementation. + RAPIDJSON_ASSERT(x != 0); + +#if defined(_MSC_VER) && !defined(UNDER_CE) + unsigned long r = 0; +#if defined(_WIN64) + _BitScanReverse64(&r, x); +#else + // Scan the high 32 bits. + if (_BitScanReverse(&r, static_cast(x >> 32))) + return 63 - (r + 32); + + // Scan the low 32 bits. + _BitScanReverse(&r, static_cast(x & 0xFFFFFFFF)); +#endif // _WIN64 + + return 63 - r; +#elif (defined(__GNUC__) && __GNUC__ >= 4) || RAPIDJSON_HAS_BUILTIN(__builtin_clzll) + // __builtin_clzll wrapper + return static_cast(__builtin_clzll(x)); +#else + // naive version + uint32_t r = 0; + while (!(x & (static_cast(1) << 63))) { + x <<= 1; + ++r; + } + + return r; +#endif // _MSC_VER +} + +#define RAPIDJSON_CLZLL RAPIDJSON_NAMESPACE::internal::clzll + +} // namespace internal +RAPIDJSON_NAMESPACE_END + +#endif // RAPIDJSON_CLZLL_H_ diff --git a/vendor/github.com/Benau/go_rlottie/lottie_rapidjson_internal_diyfp.h b/vendor/github.com/Benau/go_rlottie/lottie_rapidjson_internal_diyfp.h new file mode 100644 index 00000000..349a1003 --- /dev/null +++ b/vendor/github.com/Benau/go_rlottie/lottie_rapidjson_internal_diyfp.h @@ -0,0 +1,257 @@ +// Tencent is pleased to support the open source community by making RapidJSON available. +// +// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved. +// +// Licensed under the MIT License (the "License"); you may not use this file except +// in compliance with the License. You may obtain a copy of the License at +// +// http://opensource.org/licenses/MIT +// +// Unless required by applicable law or agreed to in writing, software distributed +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +// CONDITIONS OF ANY KIND, either express or implied. See the License for the +// specific language governing permissions and limitations under the License. + +// This is a C++ header-only implementation of Grisu2 algorithm from the publication: +// Loitsch, Florian. "Printing floating-point numbers quickly and accurately with +// integers." ACM Sigplan Notices 45.6 (2010): 233-243. + +#ifndef RAPIDJSON_DIYFP_H_ +#define RAPIDJSON_DIYFP_H_ + +#include "lottie_rapidjson_rapidjson.h" +#include "lottie_rapidjson_internal_clzll.h" +#include + +#if defined(_MSC_VER) && defined(_M_AMD64) && !defined(__INTEL_COMPILER) +#include +#pragma intrinsic(_umul128) +#endif + +RAPIDJSON_NAMESPACE_BEGIN +namespace internal { + +#ifdef __GNUC__ +RAPIDJSON_DIAG_PUSH +RAPIDJSON_DIAG_OFF(effc++) +#endif + +#ifdef __clang__ +RAPIDJSON_DIAG_PUSH +RAPIDJSON_DIAG_OFF(padded) +#endif + +struct DiyFp { + DiyFp() : f(), e() {} + + DiyFp(uint64_t fp, int exp) : f(fp), e(exp) {} + + explicit DiyFp(double d) { + union { + double d; + uint64_t u64; + } u = { d }; + + int biased_e = static_cast((u.u64 & kDpExponentMask) >> kDpSignificandSize); + uint64_t significand = (u.u64 & kDpSignificandMask); + if (biased_e != 0) { + f = significand + kDpHiddenBit; + e = biased_e - kDpExponentBias; + } + else { + f = significand; + e = kDpMinExponent + 1; + } + } + + DiyFp operator-(const DiyFp& rhs) const { + return DiyFp(f - rhs.f, e); + } + + DiyFp operator*(const DiyFp& rhs) const { +#if defined(_MSC_VER) && defined(_M_AMD64) + uint64_t h; + uint64_t l = _umul128(f, rhs.f, &h); + if (l & (uint64_t(1) << 63)) // rounding + h++; + return DiyFp(h, e + rhs.e + 64); +#elif (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6)) && defined(__x86_64__) + __extension__ typedef unsigned __int128 uint128; + uint128 p = static_cast(f) * static_cast(rhs.f); + uint64_t h = static_cast(p >> 64); + uint64_t l = static_cast(p); + if (l & (uint64_t(1) << 63)) // rounding + h++; + return DiyFp(h, e + rhs.e + 64); +#else + const uint64_t M32 = 0xFFFFFFFF; + const uint64_t a = f >> 32; + const uint64_t b = f & M32; + const uint64_t c = rhs.f >> 32; + const uint64_t d = rhs.f & M32; + const uint64_t ac = a * c; + const uint64_t bc = b * c; + const uint64_t ad = a * d; + const uint64_t bd = b * d; + uint64_t tmp = (bd >> 32) + (ad & M32) + (bc & M32); + tmp += 1U << 31; /// mult_round + return DiyFp(ac + (ad >> 32) + (bc >> 32) + (tmp >> 32), e + rhs.e + 64); +#endif + } + + DiyFp Normalize() const { + int s = static_cast(clzll(f)); + return DiyFp(f << s, e - s); + } + + DiyFp NormalizeBoundary() const { + DiyFp res = *this; + while (!(res.f & (kDpHiddenBit << 1))) { + res.f <<= 1; + res.e--; + } + res.f <<= (kDiySignificandSize - kDpSignificandSize - 2); + res.e = res.e - (kDiySignificandSize - kDpSignificandSize - 2); + return res; + } + + void NormalizedBoundaries(DiyFp* minus, DiyFp* plus) const { + DiyFp pl = DiyFp((f << 1) + 1, e - 1).NormalizeBoundary(); + DiyFp mi = (f == kDpHiddenBit) ? DiyFp((f << 2) - 1, e - 2) : DiyFp((f << 1) - 1, e - 1); + mi.f <<= mi.e - pl.e; + mi.e = pl.e; + *plus = pl; + *minus = mi; + } + + double ToDouble() const { + union { + double d; + uint64_t u64; + }u; + RAPIDJSON_ASSERT(f <= kDpHiddenBit + kDpSignificandMask); + if (e < kDpDenormalExponent) { + // Underflow. + return 0.0; + } + if (e >= kDpMaxExponent) { + // Overflow. + return std::numeric_limits::infinity(); + } + const uint64_t be = (e == kDpDenormalExponent && (f & kDpHiddenBit) == 0) ? 0 : + static_cast(e + kDpExponentBias); + u.u64 = (f & kDpSignificandMask) | (be << kDpSignificandSize); + return u.d; + } + + static const int kDiySignificandSize = 64; + static const int kDpSignificandSize = 52; + static const int kDpExponentBias = 0x3FF + kDpSignificandSize; + static const int kDpMaxExponent = 0x7FF - kDpExponentBias; + static const int kDpMinExponent = -kDpExponentBias; + static const int kDpDenormalExponent = -kDpExponentBias + 1; + static const uint64_t kDpExponentMask = RAPIDJSON_UINT64_C2(0x7FF00000, 0x00000000); + static const uint64_t kDpSignificandMask = RAPIDJSON_UINT64_C2(0x000FFFFF, 0xFFFFFFFF); + static const uint64_t kDpHiddenBit = RAPIDJSON_UINT64_C2(0x00100000, 0x00000000); + + uint64_t f; + int e; +}; + +inline DiyFp GetCachedPowerByIndex(size_t index) { + // 10^-348, 10^-340, ..., 10^340 + static const uint64_t kCachedPowers_F[] = { + RAPIDJSON_UINT64_C2(0xfa8fd5a0, 0x081c0288), RAPIDJSON_UINT64_C2(0xbaaee17f, 0xa23ebf76), + RAPIDJSON_UINT64_C2(0x8b16fb20, 0x3055ac76), RAPIDJSON_UINT64_C2(0xcf42894a, 0x5dce35ea), + RAPIDJSON_UINT64_C2(0x9a6bb0aa, 0x55653b2d), RAPIDJSON_UINT64_C2(0xe61acf03, 0x3d1a45df), + RAPIDJSON_UINT64_C2(0xab70fe17, 0xc79ac6ca), RAPIDJSON_UINT64_C2(0xff77b1fc, 0xbebcdc4f), + RAPIDJSON_UINT64_C2(0xbe5691ef, 0x416bd60c), RAPIDJSON_UINT64_C2(0x8dd01fad, 0x907ffc3c), + RAPIDJSON_UINT64_C2(0xd3515c28, 0x31559a83), RAPIDJSON_UINT64_C2(0x9d71ac8f, 0xada6c9b5), + RAPIDJSON_UINT64_C2(0xea9c2277, 0x23ee8bcb), RAPIDJSON_UINT64_C2(0xaecc4991, 0x4078536d), + RAPIDJSON_UINT64_C2(0x823c1279, 0x5db6ce57), RAPIDJSON_UINT64_C2(0xc2109436, 0x4dfb5637), + RAPIDJSON_UINT64_C2(0x9096ea6f, 0x3848984f), RAPIDJSON_UINT64_C2(0xd77485cb, 0x25823ac7), + RAPIDJSON_UINT64_C2(0xa086cfcd, 0x97bf97f4), RAPIDJSON_UINT64_C2(0xef340a98, 0x172aace5), + RAPIDJSON_UINT64_C2(0xb23867fb, 0x2a35b28e), RAPIDJSON_UINT64_C2(0x84c8d4df, 0xd2c63f3b), + RAPIDJSON_UINT64_C2(0xc5dd4427, 0x1ad3cdba), RAPIDJSON_UINT64_C2(0x936b9fce, 0xbb25c996), + RAPIDJSON_UINT64_C2(0xdbac6c24, 0x7d62a584), RAPIDJSON_UINT64_C2(0xa3ab6658, 0x0d5fdaf6), + RAPIDJSON_UINT64_C2(0xf3e2f893, 0xdec3f126), RAPIDJSON_UINT64_C2(0xb5b5ada8, 0xaaff80b8), + RAPIDJSON_UINT64_C2(0x87625f05, 0x6c7c4a8b), RAPIDJSON_UINT64_C2(0xc9bcff60, 0x34c13053), + RAPIDJSON_UINT64_C2(0x964e858c, 0x91ba2655), RAPIDJSON_UINT64_C2(0xdff97724, 0x70297ebd), + RAPIDJSON_UINT64_C2(0xa6dfbd9f, 0xb8e5b88f), RAPIDJSON_UINT64_C2(0xf8a95fcf, 0x88747d94), + RAPIDJSON_UINT64_C2(0xb9447093, 0x8fa89bcf), RAPIDJSON_UINT64_C2(0x8a08f0f8, 0xbf0f156b), + RAPIDJSON_UINT64_C2(0xcdb02555, 0x653131b6), RAPIDJSON_UINT64_C2(0x993fe2c6, 0xd07b7fac), + RAPIDJSON_UINT64_C2(0xe45c10c4, 0x2a2b3b06), RAPIDJSON_UINT64_C2(0xaa242499, 0x697392d3), + RAPIDJSON_UINT64_C2(0xfd87b5f2, 0x8300ca0e), RAPIDJSON_UINT64_C2(0xbce50864, 0x92111aeb), + RAPIDJSON_UINT64_C2(0x8cbccc09, 0x6f5088cc), RAPIDJSON_UINT64_C2(0xd1b71758, 0xe219652c), + RAPIDJSON_UINT64_C2(0x9c400000, 0x00000000), RAPIDJSON_UINT64_C2(0xe8d4a510, 0x00000000), + RAPIDJSON_UINT64_C2(0xad78ebc5, 0xac620000), RAPIDJSON_UINT64_C2(0x813f3978, 0xf8940984), + RAPIDJSON_UINT64_C2(0xc097ce7b, 0xc90715b3), RAPIDJSON_UINT64_C2(0x8f7e32ce, 0x7bea5c70), + RAPIDJSON_UINT64_C2(0xd5d238a4, 0xabe98068), RAPIDJSON_UINT64_C2(0x9f4f2726, 0x179a2245), + RAPIDJSON_UINT64_C2(0xed63a231, 0xd4c4fb27), RAPIDJSON_UINT64_C2(0xb0de6538, 0x8cc8ada8), + RAPIDJSON_UINT64_C2(0x83c7088e, 0x1aab65db), RAPIDJSON_UINT64_C2(0xc45d1df9, 0x42711d9a), + RAPIDJSON_UINT64_C2(0x924d692c, 0xa61be758), RAPIDJSON_UINT64_C2(0xda01ee64, 0x1a708dea), + RAPIDJSON_UINT64_C2(0xa26da399, 0x9aef774a), RAPIDJSON_UINT64_C2(0xf209787b, 0xb47d6b85), + RAPIDJSON_UINT64_C2(0xb454e4a1, 0x79dd1877), RAPIDJSON_UINT64_C2(0x865b8692, 0x5b9bc5c2), + RAPIDJSON_UINT64_C2(0xc83553c5, 0xc8965d3d), RAPIDJSON_UINT64_C2(0x952ab45c, 0xfa97a0b3), + RAPIDJSON_UINT64_C2(0xde469fbd, 0x99a05fe3), RAPIDJSON_UINT64_C2(0xa59bc234, 0xdb398c25), + RAPIDJSON_UINT64_C2(0xf6c69a72, 0xa3989f5c), RAPIDJSON_UINT64_C2(0xb7dcbf53, 0x54e9bece), + RAPIDJSON_UINT64_C2(0x88fcf317, 0xf22241e2), RAPIDJSON_UINT64_C2(0xcc20ce9b, 0xd35c78a5), + RAPIDJSON_UINT64_C2(0x98165af3, 0x7b2153df), RAPIDJSON_UINT64_C2(0xe2a0b5dc, 0x971f303a), + RAPIDJSON_UINT64_C2(0xa8d9d153, 0x5ce3b396), RAPIDJSON_UINT64_C2(0xfb9b7cd9, 0xa4a7443c), + RAPIDJSON_UINT64_C2(0xbb764c4c, 0xa7a44410), RAPIDJSON_UINT64_C2(0x8bab8eef, 0xb6409c1a), + RAPIDJSON_UINT64_C2(0xd01fef10, 0xa657842c), RAPIDJSON_UINT64_C2(0x9b10a4e5, 0xe9913129), + RAPIDJSON_UINT64_C2(0xe7109bfb, 0xa19c0c9d), RAPIDJSON_UINT64_C2(0xac2820d9, 0x623bf429), + RAPIDJSON_UINT64_C2(0x80444b5e, 0x7aa7cf85), RAPIDJSON_UINT64_C2(0xbf21e440, 0x03acdd2d), + RAPIDJSON_UINT64_C2(0x8e679c2f, 0x5e44ff8f), RAPIDJSON_UINT64_C2(0xd433179d, 0x9c8cb841), + RAPIDJSON_UINT64_C2(0x9e19db92, 0xb4e31ba9), RAPIDJSON_UINT64_C2(0xeb96bf6e, 0xbadf77d9), + RAPIDJSON_UINT64_C2(0xaf87023b, 0x9bf0ee6b) + }; + static const int16_t kCachedPowers_E[] = { + -1220, -1193, -1166, -1140, -1113, -1087, -1060, -1034, -1007, -980, + -954, -927, -901, -874, -847, -821, -794, -768, -741, -715, + -688, -661, -635, -608, -582, -555, -529, -502, -475, -449, + -422, -396, -369, -343, -316, -289, -263, -236, -210, -183, + -157, -130, -103, -77, -50, -24, 3, 30, 56, 83, + 109, 136, 162, 189, 216, 242, 269, 295, 322, 348, + 375, 402, 428, 455, 481, 508, 534, 561, 588, 614, + 641, 667, 694, 720, 747, 774, 800, 827, 853, 880, + 907, 933, 960, 986, 1013, 1039, 1066 + }; + RAPIDJSON_ASSERT(index < 87); + return DiyFp(kCachedPowers_F[index], kCachedPowers_E[index]); +} + +inline DiyFp GetCachedPower(int e, int* K) { + + //int k = static_cast(ceil((-61 - e) * 0.30102999566398114)) + 374; + double dk = (-61 - e) * 0.30102999566398114 + 347; // dk must be positive, so can do ceiling in positive + int k = static_cast(dk); + if (dk - k > 0.0) + k++; + + unsigned index = static_cast((k >> 3) + 1); + *K = -(-348 + static_cast(index << 3)); // decimal exponent no need lookup table + + return GetCachedPowerByIndex(index); +} + +inline DiyFp GetCachedPower10(int exp, int *outExp) { + RAPIDJSON_ASSERT(exp >= -348); + unsigned index = static_cast(exp + 348) / 8u; + *outExp = -348 + static_cast(index) * 8; + return GetCachedPowerByIndex(index); +} + +#ifdef __GNUC__ +RAPIDJSON_DIAG_POP +#endif + +#ifdef __clang__ +RAPIDJSON_DIAG_POP +RAPIDJSON_DIAG_OFF(padded) +#endif + +} // namespace internal +RAPIDJSON_NAMESPACE_END + +#endif // RAPIDJSON_DIYFP_H_ diff --git a/vendor/github.com/Benau/go_rlottie/lottie_rapidjson_internal_dtoa.h b/vendor/github.com/Benau/go_rlottie/lottie_rapidjson_internal_dtoa.h new file mode 100644 index 00000000..c3be702c --- /dev/null +++ b/vendor/github.com/Benau/go_rlottie/lottie_rapidjson_internal_dtoa.h @@ -0,0 +1,245 @@ +// Tencent is pleased to support the open source community by making RapidJSON available. +// +// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved. +// +// Licensed under the MIT License (the "License"); you may not use this file except +// in compliance with the License. You may obtain a copy of the License at +// +// http://opensource.org/licenses/MIT +// +// Unless required by applicable law or agreed to in writing, software distributed +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +// CONDITIONS OF ANY KIND, either express or implied. See the License for the +// specific language governing permissions and limitations under the License. + +// This is a C++ header-only implementation of Grisu2 algorithm from the publication: +// Loitsch, Florian. "Printing floating-point numbers quickly and accurately with +// integers." ACM Sigplan Notices 45.6 (2010): 233-243. + +#ifndef RAPIDJSON_DTOA_ +#define RAPIDJSON_DTOA_ + +#include "lottie_rapidjson_internal_itoa.h" +#include "lottie_rapidjson_internal_diyfp.h" +#include "lottie_rapidjson_internal_ieee754.h" + +RAPIDJSON_NAMESPACE_BEGIN +namespace internal { + +#ifdef __GNUC__ +RAPIDJSON_DIAG_PUSH +RAPIDJSON_DIAG_OFF(effc++) +RAPIDJSON_DIAG_OFF(array-bounds) // some gcc versions generate wrong warnings https://gcc.gnu.org/bugzilla/show_bug.cgi?id=59124 +#endif + +inline void GrisuRound(char* buffer, int len, uint64_t delta, uint64_t rest, uint64_t ten_kappa, uint64_t wp_w) { + while (rest < wp_w && delta - rest >= ten_kappa && + (rest + ten_kappa < wp_w || /// closer + wp_w - rest > rest + ten_kappa - wp_w)) { + buffer[len - 1]--; + rest += ten_kappa; + } +} + +inline int CountDecimalDigit32(uint32_t n) { + // Simple pure C++ implementation was faster than __builtin_clz version in this situation. + if (n < 10) return 1; + if (n < 100) return 2; + if (n < 1000) return 3; + if (n < 10000) return 4; + if (n < 100000) return 5; + if (n < 1000000) return 6; + if (n < 10000000) return 7; + if (n < 100000000) return 8; + // Will not reach 10 digits in DigitGen() + //if (n < 1000000000) return 9; + //return 10; + return 9; +} + +inline void DigitGen(const DiyFp& W, const DiyFp& Mp, uint64_t delta, char* buffer, int* len, int* K) { + static const uint32_t kPow10[] = { 1, 10, 100, 1000, 10000, 100000, 1000000, 10000000, 100000000, 1000000000 }; + const DiyFp one(uint64_t(1) << -Mp.e, Mp.e); + const DiyFp wp_w = Mp - W; + uint32_t p1 = static_cast(Mp.f >> -one.e); + uint64_t p2 = Mp.f & (one.f - 1); + int kappa = CountDecimalDigit32(p1); // kappa in [0, 9] + *len = 0; + + while (kappa > 0) { + uint32_t d = 0; + switch (kappa) { + case 9: d = p1 / 100000000; p1 %= 100000000; break; + case 8: d = p1 / 10000000; p1 %= 10000000; break; + case 7: d = p1 / 1000000; p1 %= 1000000; break; + case 6: d = p1 / 100000; p1 %= 100000; break; + case 5: d = p1 / 10000; p1 %= 10000; break; + case 4: d = p1 / 1000; p1 %= 1000; break; + case 3: d = p1 / 100; p1 %= 100; break; + case 2: d = p1 / 10; p1 %= 10; break; + case 1: d = p1; p1 = 0; break; + default:; + } + if (d || *len) + buffer[(*len)++] = static_cast('0' + static_cast(d)); + kappa--; + uint64_t tmp = (static_cast(p1) << -one.e) + p2; + if (tmp <= delta) { + *K += kappa; + GrisuRound(buffer, *len, delta, tmp, static_cast(kPow10[kappa]) << -one.e, wp_w.f); + return; + } + } + + // kappa = 0 + for (;;) { + p2 *= 10; + delta *= 10; + char d = static_cast(p2 >> -one.e); + if (d || *len) + buffer[(*len)++] = static_cast('0' + d); + p2 &= one.f - 1; + kappa--; + if (p2 < delta) { + *K += kappa; + int index = -kappa; + GrisuRound(buffer, *len, delta, p2, one.f, wp_w.f * (index < 9 ? kPow10[index] : 0)); + return; + } + } +} + +inline void Grisu2(double value, char* buffer, int* length, int* K) { + const DiyFp v(value); + DiyFp w_m, w_p; + v.NormalizedBoundaries(&w_m, &w_p); + + const DiyFp c_mk = GetCachedPower(w_p.e, K); + const DiyFp W = v.Normalize() * c_mk; + DiyFp Wp = w_p * c_mk; + DiyFp Wm = w_m * c_mk; + Wm.f++; + Wp.f--; + DigitGen(W, Wp, Wp.f - Wm.f, buffer, length, K); +} + +inline char* WriteExponent(int K, char* buffer) { + if (K < 0) { + *buffer++ = '-'; + K = -K; + } + + if (K >= 100) { + *buffer++ = static_cast('0' + static_cast(K / 100)); + K %= 100; + const char* d = GetDigitsLut() + K * 2; + *buffer++ = d[0]; + *buffer++ = d[1]; + } + else if (K >= 10) { + const char* d = GetDigitsLut() + K * 2; + *buffer++ = d[0]; + *buffer++ = d[1]; + } + else + *buffer++ = static_cast('0' + static_cast(K)); + + return buffer; +} + +inline char* Prettify(char* buffer, int length, int k, int maxDecimalPlaces) { + const int kk = length + k; // 10^(kk-1) <= v < 10^kk + + if (0 <= k && kk <= 21) { + // 1234e7 -> 12340000000 + for (int i = length; i < kk; i++) + buffer[i] = '0'; + buffer[kk] = '.'; + buffer[kk + 1] = '0'; + return &buffer[kk + 2]; + } + else if (0 < kk && kk <= 21) { + // 1234e-2 -> 12.34 + std::memmove(&buffer[kk + 1], &buffer[kk], static_cast(length - kk)); + buffer[kk] = '.'; + if (0 > k + maxDecimalPlaces) { + // When maxDecimalPlaces = 2, 1.2345 -> 1.23, 1.102 -> 1.1 + // Remove extra trailing zeros (at least one) after truncation. + for (int i = kk + maxDecimalPlaces; i > kk + 1; i--) + if (buffer[i] != '0') + return &buffer[i + 1]; + return &buffer[kk + 2]; // Reserve one zero + } + else + return &buffer[length + 1]; + } + else if (-6 < kk && kk <= 0) { + // 1234e-6 -> 0.001234 + const int offset = 2 - kk; + std::memmove(&buffer[offset], &buffer[0], static_cast(length)); + buffer[0] = '0'; + buffer[1] = '.'; + for (int i = 2; i < offset; i++) + buffer[i] = '0'; + if (length - kk > maxDecimalPlaces) { + // When maxDecimalPlaces = 2, 0.123 -> 0.12, 0.102 -> 0.1 + // Remove extra trailing zeros (at least one) after truncation. + for (int i = maxDecimalPlaces + 1; i > 2; i--) + if (buffer[i] != '0') + return &buffer[i + 1]; + return &buffer[3]; // Reserve one zero + } + else + return &buffer[length + offset]; + } + else if (kk < -maxDecimalPlaces) { + // Truncate to zero + buffer[0] = '0'; + buffer[1] = '.'; + buffer[2] = '0'; + return &buffer[3]; + } + else if (length == 1) { + // 1e30 + buffer[1] = 'e'; + return WriteExponent(kk - 1, &buffer[2]); + } + else { + // 1234e30 -> 1.234e33 + std::memmove(&buffer[2], &buffer[1], static_cast(length - 1)); + buffer[1] = '.'; + buffer[length + 1] = 'e'; + return WriteExponent(kk - 1, &buffer[0 + length + 2]); + } +} + +inline char* dtoa(double value, char* buffer, int maxDecimalPlaces = 324) { + RAPIDJSON_ASSERT(maxDecimalPlaces >= 1); + Double d(value); + if (d.IsZero()) { + if (d.Sign()) + *buffer++ = '-'; // -0.0, Issue #289 + buffer[0] = '0'; + buffer[1] = '.'; + buffer[2] = '0'; + return &buffer[3]; + } + else { + if (value < 0) { + *buffer++ = '-'; + value = -value; + } + int length, K; + Grisu2(value, buffer, &length, &K); + return Prettify(buffer, length, K, maxDecimalPlaces); + } +} + +#ifdef __GNUC__ +RAPIDJSON_DIAG_POP +#endif + +} // namespace internal +RAPIDJSON_NAMESPACE_END + +#endif // RAPIDJSON_DTOA_ diff --git a/vendor/github.com/Benau/go_rlottie/lottie_rapidjson_internal_ieee754.h b/vendor/github.com/Benau/go_rlottie/lottie_rapidjson_internal_ieee754.h new file mode 100644 index 00000000..bb004743 --- /dev/null +++ b/vendor/github.com/Benau/go_rlottie/lottie_rapidjson_internal_ieee754.h @@ -0,0 +1,78 @@ +// Tencent is pleased to support the open source community by making RapidJSON available. +// +// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved. +// +// Licensed under the MIT License (the "License"); you may not use this file except +// in compliance with the License. You may obtain a copy of the License at +// +// http://opensource.org/licenses/MIT +// +// Unless required by applicable law or agreed to in writing, software distributed +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +// CONDITIONS OF ANY KIND, either express or implied. See the License for the +// specific language governing permissions and limitations under the License. + +#ifndef RAPIDJSON_IEEE754_ +#define RAPIDJSON_IEEE754_ + +#include "lottie_rapidjson_rapidjson.h" + +RAPIDJSON_NAMESPACE_BEGIN +namespace internal { + +class Double { +public: + Double() {} + Double(double d) : d_(d) {} + Double(uint64_t u) : u_(u) {} + + double Value() const { return d_; } + uint64_t Uint64Value() const { return u_; } + + double NextPositiveDouble() const { + RAPIDJSON_ASSERT(!Sign()); + return Double(u_ + 1).Value(); + } + + bool Sign() const { return (u_ & kSignMask) != 0; } + uint64_t Significand() const { return u_ & kSignificandMask; } + int Exponent() const { return static_cast(((u_ & kExponentMask) >> kSignificandSize) - kExponentBias); } + + bool IsNan() const { return (u_ & kExponentMask) == kExponentMask && Significand() != 0; } + bool IsInf() const { return (u_ & kExponentMask) == kExponentMask && Significand() == 0; } + bool IsNanOrInf() const { return (u_ & kExponentMask) == kExponentMask; } + bool IsNormal() const { return (u_ & kExponentMask) != 0 || Significand() == 0; } + bool IsZero() const { return (u_ & (kExponentMask | kSignificandMask)) == 0; } + + uint64_t IntegerSignificand() const { return IsNormal() ? Significand() | kHiddenBit : Significand(); } + int IntegerExponent() const { return (IsNormal() ? Exponent() : kDenormalExponent) - kSignificandSize; } + uint64_t ToBias() const { return (u_ & kSignMask) ? ~u_ + 1 : u_ | kSignMask; } + + static int EffectiveSignificandSize(int order) { + if (order >= -1021) + return 53; + else if (order <= -1074) + return 0; + else + return order + 1074; + } + +private: + static const int kSignificandSize = 52; + static const int kExponentBias = 0x3FF; + static const int kDenormalExponent = 1 - kExponentBias; + static const uint64_t kSignMask = RAPIDJSON_UINT64_C2(0x80000000, 0x00000000); + static const uint64_t kExponentMask = RAPIDJSON_UINT64_C2(0x7FF00000, 0x00000000); + static const uint64_t kSignificandMask = RAPIDJSON_UINT64_C2(0x000FFFFF, 0xFFFFFFFF); + static const uint64_t kHiddenBit = RAPIDJSON_UINT64_C2(0x00100000, 0x00000000); + + union { + double d_; + uint64_t u_; + }; +}; + +} // namespace internal +RAPIDJSON_NAMESPACE_END + +#endif // RAPIDJSON_IEEE754_ diff --git a/vendor/github.com/Benau/go_rlottie/lottie_rapidjson_internal_itoa.h b/vendor/github.com/Benau/go_rlottie/lottie_rapidjson_internal_itoa.h new file mode 100644 index 00000000..e63b1308 --- /dev/null +++ b/vendor/github.com/Benau/go_rlottie/lottie_rapidjson_internal_itoa.h @@ -0,0 +1,308 @@ +// Tencent is pleased to support the open source community by making RapidJSON available. +// +// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved. +// +// Licensed under the MIT License (the "License"); you may not use this file except +// in compliance with the License. You may obtain a copy of the License at +// +// http://opensource.org/licenses/MIT +// +// Unless required by applicable law or agreed to in writing, software distributed +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +// CONDITIONS OF ANY KIND, either express or implied. See the License for the +// specific language governing permissions and limitations under the License. + +#ifndef RAPIDJSON_ITOA_ +#define RAPIDJSON_ITOA_ + +#include "lottie_rapidjson_rapidjson.h" + +RAPIDJSON_NAMESPACE_BEGIN +namespace internal { + +inline const char* GetDigitsLut() { + static const char cDigitsLut[200] = { + '0','0','0','1','0','2','0','3','0','4','0','5','0','6','0','7','0','8','0','9', + '1','0','1','1','1','2','1','3','1','4','1','5','1','6','1','7','1','8','1','9', + '2','0','2','1','2','2','2','3','2','4','2','5','2','6','2','7','2','8','2','9', + '3','0','3','1','3','2','3','3','3','4','3','5','3','6','3','7','3','8','3','9', + '4','0','4','1','4','2','4','3','4','4','4','5','4','6','4','7','4','8','4','9', + '5','0','5','1','5','2','5','3','5','4','5','5','5','6','5','7','5','8','5','9', + '6','0','6','1','6','2','6','3','6','4','6','5','6','6','6','7','6','8','6','9', + '7','0','7','1','7','2','7','3','7','4','7','5','7','6','7','7','7','8','7','9', + '8','0','8','1','8','2','8','3','8','4','8','5','8','6','8','7','8','8','8','9', + '9','0','9','1','9','2','9','3','9','4','9','5','9','6','9','7','9','8','9','9' + }; + return cDigitsLut; +} + +inline char* u32toa(uint32_t value, char* buffer) { + RAPIDJSON_ASSERT(buffer != 0); + + const char* cDigitsLut = GetDigitsLut(); + + if (value < 10000) { + const uint32_t d1 = (value / 100) << 1; + const uint32_t d2 = (value % 100) << 1; + + if (value >= 1000) + *buffer++ = cDigitsLut[d1]; + if (value >= 100) + *buffer++ = cDigitsLut[d1 + 1]; + if (value >= 10) + *buffer++ = cDigitsLut[d2]; + *buffer++ = cDigitsLut[d2 + 1]; + } + else if (value < 100000000) { + // value = bbbbcccc + const uint32_t b = value / 10000; + const uint32_t c = value % 10000; + + const uint32_t d1 = (b / 100) << 1; + const uint32_t d2 = (b % 100) << 1; + + const uint32_t d3 = (c / 100) << 1; + const uint32_t d4 = (c % 100) << 1; + + if (value >= 10000000) + *buffer++ = cDigitsLut[d1]; + if (value >= 1000000) + *buffer++ = cDigitsLut[d1 + 1]; + if (value >= 100000) + *buffer++ = cDigitsLut[d2]; + *buffer++ = cDigitsLut[d2 + 1]; + + *buffer++ = cDigitsLut[d3]; + *buffer++ = cDigitsLut[d3 + 1]; + *buffer++ = cDigitsLut[d4]; + *buffer++ = cDigitsLut[d4 + 1]; + } + else { + // value = aabbbbcccc in decimal + + const uint32_t a = value / 100000000; // 1 to 42 + value %= 100000000; + + if (a >= 10) { + const unsigned i = a << 1; + *buffer++ = cDigitsLut[i]; + *buffer++ = cDigitsLut[i + 1]; + } + else + *buffer++ = static_cast('0' + static_cast(a)); + + const uint32_t b = value / 10000; // 0 to 9999 + const uint32_t c = value % 10000; // 0 to 9999 + + const uint32_t d1 = (b / 100) << 1; + const uint32_t d2 = (b % 100) << 1; + + const uint32_t d3 = (c / 100) << 1; + const uint32_t d4 = (c % 100) << 1; + + *buffer++ = cDigitsLut[d1]; + *buffer++ = cDigitsLut[d1 + 1]; + *buffer++ = cDigitsLut[d2]; + *buffer++ = cDigitsLut[d2 + 1]; + *buffer++ = cDigitsLut[d3]; + *buffer++ = cDigitsLut[d3 + 1]; + *buffer++ = cDigitsLut[d4]; + *buffer++ = cDigitsLut[d4 + 1]; + } + return buffer; +} + +inline char* i32toa(int32_t value, char* buffer) { + RAPIDJSON_ASSERT(buffer != 0); + uint32_t u = static_cast(value); + if (value < 0) { + *buffer++ = '-'; + u = ~u + 1; + } + + return u32toa(u, buffer); +} + +inline char* u64toa(uint64_t value, char* buffer) { + RAPIDJSON_ASSERT(buffer != 0); + const char* cDigitsLut = GetDigitsLut(); + const uint64_t kTen8 = 100000000; + const uint64_t kTen9 = kTen8 * 10; + const uint64_t kTen10 = kTen8 * 100; + const uint64_t kTen11 = kTen8 * 1000; + const uint64_t kTen12 = kTen8 * 10000; + const uint64_t kTen13 = kTen8 * 100000; + const uint64_t kTen14 = kTen8 * 1000000; + const uint64_t kTen15 = kTen8 * 10000000; + const uint64_t kTen16 = kTen8 * kTen8; + + if (value < kTen8) { + uint32_t v = static_cast(value); + if (v < 10000) { + const uint32_t d1 = (v / 100) << 1; + const uint32_t d2 = (v % 100) << 1; + + if (v >= 1000) + *buffer++ = cDigitsLut[d1]; + if (v >= 100) + *buffer++ = cDigitsLut[d1 + 1]; + if (v >= 10) + *buffer++ = cDigitsLut[d2]; + *buffer++ = cDigitsLut[d2 + 1]; + } + else { + // value = bbbbcccc + const uint32_t b = v / 10000; + const uint32_t c = v % 10000; + + const uint32_t d1 = (b / 100) << 1; + const uint32_t d2 = (b % 100) << 1; + + const uint32_t d3 = (c / 100) << 1; + const uint32_t d4 = (c % 100) << 1; + + if (value >= 10000000) + *buffer++ = cDigitsLut[d1]; + if (value >= 1000000) + *buffer++ = cDigitsLut[d1 + 1]; + if (value >= 100000) + *buffer++ = cDigitsLut[d2]; + *buffer++ = cDigitsLut[d2 + 1]; + + *buffer++ = cDigitsLut[d3]; + *buffer++ = cDigitsLut[d3 + 1]; + *buffer++ = cDigitsLut[d4]; + *buffer++ = cDigitsLut[d4 + 1]; + } + } + else if (value < kTen16) { + const uint32_t v0 = static_cast(value / kTen8); + const uint32_t v1 = static_cast(value % kTen8); + + const uint32_t b0 = v0 / 10000; + const uint32_t c0 = v0 % 10000; + + const uint32_t d1 = (b0 / 100) << 1; + const uint32_t d2 = (b0 % 100) << 1; + + const uint32_t d3 = (c0 / 100) << 1; + const uint32_t d4 = (c0 % 100) << 1; + + const uint32_t b1 = v1 / 10000; + const uint32_t c1 = v1 % 10000; + + const uint32_t d5 = (b1 / 100) << 1; + const uint32_t d6 = (b1 % 100) << 1; + + const uint32_t d7 = (c1 / 100) << 1; + const uint32_t d8 = (c1 % 100) << 1; + + if (value >= kTen15) + *buffer++ = cDigitsLut[d1]; + if (value >= kTen14) + *buffer++ = cDigitsLut[d1 + 1]; + if (value >= kTen13) + *buffer++ = cDigitsLut[d2]; + if (value >= kTen12) + *buffer++ = cDigitsLut[d2 + 1]; + if (value >= kTen11) + *buffer++ = cDigitsLut[d3]; + if (value >= kTen10) + *buffer++ = cDigitsLut[d3 + 1]; + if (value >= kTen9) + *buffer++ = cDigitsLut[d4]; + + *buffer++ = cDigitsLut[d4 + 1]; + *buffer++ = cDigitsLut[d5]; + *buffer++ = cDigitsLut[d5 + 1]; + *buffer++ = cDigitsLut[d6]; + *buffer++ = cDigitsLut[d6 + 1]; + *buffer++ = cDigitsLut[d7]; + *buffer++ = cDigitsLut[d7 + 1]; + *buffer++ = cDigitsLut[d8]; + *buffer++ = cDigitsLut[d8 + 1]; + } + else { + const uint32_t a = static_cast(value / kTen16); // 1 to 1844 + value %= kTen16; + + if (a < 10) + *buffer++ = static_cast('0' + static_cast(a)); + else if (a < 100) { + const uint32_t i = a << 1; + *buffer++ = cDigitsLut[i]; + *buffer++ = cDigitsLut[i + 1]; + } + else if (a < 1000) { + *buffer++ = static_cast('0' + static_cast(a / 100)); + + const uint32_t i = (a % 100) << 1; + *buffer++ = cDigitsLut[i]; + *buffer++ = cDigitsLut[i + 1]; + } + else { + const uint32_t i = (a / 100) << 1; + const uint32_t j = (a % 100) << 1; + *buffer++ = cDigitsLut[i]; + *buffer++ = cDigitsLut[i + 1]; + *buffer++ = cDigitsLut[j]; + *buffer++ = cDigitsLut[j + 1]; + } + + const uint32_t v0 = static_cast(value / kTen8); + const uint32_t v1 = static_cast(value % kTen8); + + const uint32_t b0 = v0 / 10000; + const uint32_t c0 = v0 % 10000; + + const uint32_t d1 = (b0 / 100) << 1; + const uint32_t d2 = (b0 % 100) << 1; + + const uint32_t d3 = (c0 / 100) << 1; + const uint32_t d4 = (c0 % 100) << 1; + + const uint32_t b1 = v1 / 10000; + const uint32_t c1 = v1 % 10000; + + const uint32_t d5 = (b1 / 100) << 1; + const uint32_t d6 = (b1 % 100) << 1; + + const uint32_t d7 = (c1 / 100) << 1; + const uint32_t d8 = (c1 % 100) << 1; + + *buffer++ = cDigitsLut[d1]; + *buffer++ = cDigitsLut[d1 + 1]; + *buffer++ = cDigitsLut[d2]; + *buffer++ = cDigitsLut[d2 + 1]; + *buffer++ = cDigitsLut[d3]; + *buffer++ = cDigitsLut[d3 + 1]; + *buffer++ = cDigitsLut[d4]; + *buffer++ = cDigitsLut[d4 + 1]; + *buffer++ = cDigitsLut[d5]; + *buffer++ = cDigitsLut[d5 + 1]; + *buffer++ = cDigitsLut[d6]; + *buffer++ = cDigitsLut[d6 + 1]; + *buffer++ = cDigitsLut[d7]; + *buffer++ = cDigitsLut[d7 + 1]; + *buffer++ = cDigitsLut[d8]; + *buffer++ = cDigitsLut[d8 + 1]; + } + + return buffer; +} + +inline char* i64toa(int64_t value, char* buffer) { + RAPIDJSON_ASSERT(buffer != 0); + uint64_t u = static_cast(value); + if (value < 0) { + *buffer++ = '-'; + u = ~u + 1; + } + + return u64toa(u, buffer); +} + +} // namespace internal +RAPIDJSON_NAMESPACE_END + +#endif // RAPIDJSON_ITOA_ diff --git a/vendor/github.com/Benau/go_rlottie/lottie_rapidjson_internal_meta.h b/vendor/github.com/Benau/go_rlottie/lottie_rapidjson_internal_meta.h new file mode 100644 index 00000000..dfc590e6 --- /dev/null +++ b/vendor/github.com/Benau/go_rlottie/lottie_rapidjson_internal_meta.h @@ -0,0 +1,186 @@ +// Tencent is pleased to support the open source community by making RapidJSON available. +// +// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved. +// +// Licensed under the MIT License (the "License"); you may not use this file except +// in compliance with the License. You may obtain a copy of the License at +// +// http://opensource.org/licenses/MIT +// +// Unless required by applicable law or agreed to in writing, software distributed +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +// CONDITIONS OF ANY KIND, either express or implied. See the License for the +// specific language governing permissions and limitations under the License. + +#ifndef RAPIDJSON_INTERNAL_META_H_ +#define RAPIDJSON_INTERNAL_META_H_ + +#include "lottie_rapidjson_rapidjson.h" + +#ifdef __GNUC__ +RAPIDJSON_DIAG_PUSH +RAPIDJSON_DIAG_OFF(effc++) +#endif + +#if defined(_MSC_VER) && !defined(__clang__) +RAPIDJSON_DIAG_PUSH +RAPIDJSON_DIAG_OFF(6334) +#endif + +#if RAPIDJSON_HAS_CXX11_TYPETRAITS +#include +#endif + +//@cond RAPIDJSON_INTERNAL +RAPIDJSON_NAMESPACE_BEGIN +namespace internal { + +// Helper to wrap/convert arbitrary types to void, useful for arbitrary type matching +template struct Void { typedef void Type; }; + +/////////////////////////////////////////////////////////////////////////////// +// BoolType, TrueType, FalseType +// +template struct BoolType { + static const bool Value = Cond; + typedef BoolType Type; +}; +typedef BoolType TrueType; +typedef BoolType FalseType; + + +/////////////////////////////////////////////////////////////////////////////// +// SelectIf, BoolExpr, NotExpr, AndExpr, OrExpr +// + +template struct SelectIfImpl { template struct Apply { typedef T1 Type; }; }; +template <> struct SelectIfImpl { template struct Apply { typedef T2 Type; }; }; +template struct SelectIfCond : SelectIfImpl::template Apply {}; +template struct SelectIf : SelectIfCond {}; + +template struct AndExprCond : FalseType {}; +template <> struct AndExprCond : TrueType {}; +template struct OrExprCond : TrueType {}; +template <> struct OrExprCond : FalseType {}; + +template struct BoolExpr : SelectIf::Type {}; +template struct NotExpr : SelectIf::Type {}; +template struct AndExpr : AndExprCond::Type {}; +template struct OrExpr : OrExprCond::Type {}; + + +/////////////////////////////////////////////////////////////////////////////// +// AddConst, MaybeAddConst, RemoveConst +template struct AddConst { typedef const T Type; }; +template struct MaybeAddConst : SelectIfCond {}; +template struct RemoveConst { typedef T Type; }; +template struct RemoveConst { typedef T Type; }; + + +/////////////////////////////////////////////////////////////////////////////// +// IsSame, IsConst, IsMoreConst, IsPointer +// +template struct IsSame : FalseType {}; +template struct IsSame : TrueType {}; + +template struct IsConst : FalseType {}; +template struct IsConst : TrueType {}; + +template +struct IsMoreConst + : AndExpr::Type, typename RemoveConst::Type>, + BoolType::Value >= IsConst::Value> >::Type {}; + +template struct IsPointer : FalseType {}; +template struct IsPointer : TrueType {}; + +/////////////////////////////////////////////////////////////////////////////// +// IsBaseOf +// +#if RAPIDJSON_HAS_CXX11_TYPETRAITS + +template struct IsBaseOf + : BoolType< ::std::is_base_of::value> {}; + +#else // simplified version adopted from Boost + +template struct IsBaseOfImpl { + RAPIDJSON_STATIC_ASSERT(sizeof(B) != 0); + RAPIDJSON_STATIC_ASSERT(sizeof(D) != 0); + + typedef char (&Yes)[1]; + typedef char (&No) [2]; + + template + static Yes Check(const D*, T); + static No Check(const B*, int); + + struct Host { + operator const B*() const; + operator const D*(); + }; + + enum { Value = (sizeof(Check(Host(), 0)) == sizeof(Yes)) }; +}; + +template struct IsBaseOf + : OrExpr, BoolExpr > >::Type {}; + +#endif // RAPIDJSON_HAS_CXX11_TYPETRAITS + + +////////////////////////////////////////////////////////////////////////// +// EnableIf / DisableIf +// +template struct EnableIfCond { typedef T Type; }; +template struct EnableIfCond { /* empty */ }; + +template struct DisableIfCond { typedef T Type; }; +template struct DisableIfCond { /* empty */ }; + +template +struct EnableIf : EnableIfCond {}; + +template +struct DisableIf : DisableIfCond {}; + +// SFINAE helpers +struct SfinaeTag {}; +template struct RemoveSfinaeTag; +template struct RemoveSfinaeTag { typedef T Type; }; + +#define RAPIDJSON_REMOVEFPTR_(type) \ + typename ::RAPIDJSON_NAMESPACE::internal::RemoveSfinaeTag \ + < ::RAPIDJSON_NAMESPACE::internal::SfinaeTag&(*) type>::Type + +#define RAPIDJSON_ENABLEIF(cond) \ + typename ::RAPIDJSON_NAMESPACE::internal::EnableIf \ + ::Type * = NULL + +#define RAPIDJSON_DISABLEIF(cond) \ + typename ::RAPIDJSON_NAMESPACE::internal::DisableIf \ + ::Type * = NULL + +#define RAPIDJSON_ENABLEIF_RETURN(cond,returntype) \ + typename ::RAPIDJSON_NAMESPACE::internal::EnableIf \ + ::Type + +#define RAPIDJSON_DISABLEIF_RETURN(cond,returntype) \ + typename ::RAPIDJSON_NAMESPACE::internal::DisableIf \ + ::Type + +} // namespace internal +RAPIDJSON_NAMESPACE_END +//@endcond + +#if defined(_MSC_VER) && !defined(__clang__) +RAPIDJSON_DIAG_POP +#endif + +#ifdef __GNUC__ +RAPIDJSON_DIAG_POP +#endif + +#endif // RAPIDJSON_INTERNAL_META_H_ diff --git a/vendor/github.com/Benau/go_rlottie/lottie_rapidjson_internal_pow10.h b/vendor/github.com/Benau/go_rlottie/lottie_rapidjson_internal_pow10.h new file mode 100644 index 00000000..18fae6fb --- /dev/null +++ b/vendor/github.com/Benau/go_rlottie/lottie_rapidjson_internal_pow10.h @@ -0,0 +1,55 @@ +// Tencent is pleased to support the open source community by making RapidJSON available. +// +// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved. +// +// Licensed under the MIT License (the "License"); you may not use this file except +// in compliance with the License. You may obtain a copy of the License at +// +// http://opensource.org/licenses/MIT +// +// Unless required by applicable law or agreed to in writing, software distributed +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +// CONDITIONS OF ANY KIND, either express or implied. See the License for the +// specific language governing permissions and limitations under the License. + +#ifndef RAPIDJSON_POW10_ +#define RAPIDJSON_POW10_ + +#include "lottie_rapidjson_rapidjson.h" + +RAPIDJSON_NAMESPACE_BEGIN +namespace internal { + +//! Computes integer powers of 10 in double (10.0^n). +/*! This function uses lookup table for fast and accurate results. + \param n non-negative exponent. Must <= 308. + \return 10.0^n +*/ +inline double Pow10(int n) { + static const double e[] = { // 1e-0...1e308: 309 * 8 bytes = 2472 bytes + 1e+0, + 1e+1, 1e+2, 1e+3, 1e+4, 1e+5, 1e+6, 1e+7, 1e+8, 1e+9, 1e+10, 1e+11, 1e+12, 1e+13, 1e+14, 1e+15, 1e+16, 1e+17, 1e+18, 1e+19, 1e+20, + 1e+21, 1e+22, 1e+23, 1e+24, 1e+25, 1e+26, 1e+27, 1e+28, 1e+29, 1e+30, 1e+31, 1e+32, 1e+33, 1e+34, 1e+35, 1e+36, 1e+37, 1e+38, 1e+39, 1e+40, + 1e+41, 1e+42, 1e+43, 1e+44, 1e+45, 1e+46, 1e+47, 1e+48, 1e+49, 1e+50, 1e+51, 1e+52, 1e+53, 1e+54, 1e+55, 1e+56, 1e+57, 1e+58, 1e+59, 1e+60, + 1e+61, 1e+62, 1e+63, 1e+64, 1e+65, 1e+66, 1e+67, 1e+68, 1e+69, 1e+70, 1e+71, 1e+72, 1e+73, 1e+74, 1e+75, 1e+76, 1e+77, 1e+78, 1e+79, 1e+80, + 1e+81, 1e+82, 1e+83, 1e+84, 1e+85, 1e+86, 1e+87, 1e+88, 1e+89, 1e+90, 1e+91, 1e+92, 1e+93, 1e+94, 1e+95, 1e+96, 1e+97, 1e+98, 1e+99, 1e+100, + 1e+101,1e+102,1e+103,1e+104,1e+105,1e+106,1e+107,1e+108,1e+109,1e+110,1e+111,1e+112,1e+113,1e+114,1e+115,1e+116,1e+117,1e+118,1e+119,1e+120, + 1e+121,1e+122,1e+123,1e+124,1e+125,1e+126,1e+127,1e+128,1e+129,1e+130,1e+131,1e+132,1e+133,1e+134,1e+135,1e+136,1e+137,1e+138,1e+139,1e+140, + 1e+141,1e+142,1e+143,1e+144,1e+145,1e+146,1e+147,1e+148,1e+149,1e+150,1e+151,1e+152,1e+153,1e+154,1e+155,1e+156,1e+157,1e+158,1e+159,1e+160, + 1e+161,1e+162,1e+163,1e+164,1e+165,1e+166,1e+167,1e+168,1e+169,1e+170,1e+171,1e+172,1e+173,1e+174,1e+175,1e+176,1e+177,1e+178,1e+179,1e+180, + 1e+181,1e+182,1e+183,1e+184,1e+185,1e+186,1e+187,1e+188,1e+189,1e+190,1e+191,1e+192,1e+193,1e+194,1e+195,1e+196,1e+197,1e+198,1e+199,1e+200, + 1e+201,1e+202,1e+203,1e+204,1e+205,1e+206,1e+207,1e+208,1e+209,1e+210,1e+211,1e+212,1e+213,1e+214,1e+215,1e+216,1e+217,1e+218,1e+219,1e+220, + 1e+221,1e+222,1e+223,1e+224,1e+225,1e+226,1e+227,1e+228,1e+229,1e+230,1e+231,1e+232,1e+233,1e+234,1e+235,1e+236,1e+237,1e+238,1e+239,1e+240, + 1e+241,1e+242,1e+243,1e+244,1e+245,1e+246,1e+247,1e+248,1e+249,1e+250,1e+251,1e+252,1e+253,1e+254,1e+255,1e+256,1e+257,1e+258,1e+259,1e+260, + 1e+261,1e+262,1e+263,1e+264,1e+265,1e+266,1e+267,1e+268,1e+269,1e+270,1e+271,1e+272,1e+273,1e+274,1e+275,1e+276,1e+277,1e+278,1e+279,1e+280, + 1e+281,1e+282,1e+283,1e+284,1e+285,1e+286,1e+287,1e+288,1e+289,1e+290,1e+291,1e+292,1e+293,1e+294,1e+295,1e+296,1e+297,1e+298,1e+299,1e+300, + 1e+301,1e+302,1e+303,1e+304,1e+305,1e+306,1e+307,1e+308 + }; + RAPIDJSON_ASSERT(n >= 0 && n <= 308); + return e[n]; +} + +} // namespace internal +RAPIDJSON_NAMESPACE_END + +#endif // RAPIDJSON_POW10_ diff --git a/vendor/github.com/Benau/go_rlottie/lottie_rapidjson_internal_regex.h b/vendor/github.com/Benau/go_rlottie/lottie_rapidjson_internal_regex.h new file mode 100644 index 00000000..422971b4 --- /dev/null +++ b/vendor/github.com/Benau/go_rlottie/lottie_rapidjson_internal_regex.h @@ -0,0 +1,739 @@ +// Tencent is pleased to support the open source community by making RapidJSON available. +// +// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved. +// +// Licensed under the MIT License (the "License"); you may not use this file except +// in compliance with the License. You may obtain a copy of the License at +// +// http://opensource.org/licenses/MIT +// +// Unless required by applicable law or agreed to in writing, software distributed +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +// CONDITIONS OF ANY KIND, either express or implied. See the License for the +// specific language governing permissions and limitations under the License. + +#ifndef RAPIDJSON_INTERNAL_REGEX_H_ +#define RAPIDJSON_INTERNAL_REGEX_H_ + +#include "lottie_rapidjson_allocators.h" +#include "lottie_rapidjson_stream.h" +#include "lottie_rapidjson_internal_stack.h" + +#ifdef __clang__ +RAPIDJSON_DIAG_PUSH +RAPIDJSON_DIAG_OFF(padded) +RAPIDJSON_DIAG_OFF(switch-enum) +#elif defined(_MSC_VER) +RAPIDJSON_DIAG_PUSH +RAPIDJSON_DIAG_OFF(4512) // assignment operator could not be generated +#endif + +#ifdef __GNUC__ +RAPIDJSON_DIAG_PUSH +RAPIDJSON_DIAG_OFF(effc++) +#endif + +#ifndef RAPIDJSON_REGEX_VERBOSE +#define RAPIDJSON_REGEX_VERBOSE 0 +#endif + +RAPIDJSON_NAMESPACE_BEGIN +namespace internal { + +/////////////////////////////////////////////////////////////////////////////// +// DecodedStream + +template +class DecodedStream { +public: + DecodedStream(SourceStream& ss) : ss_(ss), codepoint_() { Decode(); } + unsigned Peek() { return codepoint_; } + unsigned Take() { + unsigned c = codepoint_; + if (c) // No further decoding when '\0' + Decode(); + return c; + } + +private: + void Decode() { + if (!Encoding::Decode(ss_, &codepoint_)) + codepoint_ = 0; + } + + SourceStream& ss_; + unsigned codepoint_; +}; + +/////////////////////////////////////////////////////////////////////////////// +// GenericRegex + +static const SizeType kRegexInvalidState = ~SizeType(0); //!< Represents an invalid index in GenericRegex::State::out, out1 +static const SizeType kRegexInvalidRange = ~SizeType(0); + +template +class GenericRegexSearch; + +//! Regular expression engine with subset of ECMAscript grammar. +/*! + Supported regular expression syntax: + - \c ab Concatenation + - \c a|b Alternation + - \c a? Zero or one + - \c a* Zero or more + - \c a+ One or more + - \c a{3} Exactly 3 times + - \c a{3,} At least 3 times + - \c a{3,5} 3 to 5 times + - \c (ab) Grouping + - \c ^a At the beginning + - \c a$ At the end + - \c . Any character + - \c [abc] Character classes + - \c [a-c] Character class range + - \c [a-z0-9_] Character class combination + - \c [^abc] Negated character classes + - \c [^a-c] Negated character class range + - \c [\b] Backspace (U+0008) + - \c \\| \\\\ ... Escape characters + - \c \\f Form feed (U+000C) + - \c \\n Line feed (U+000A) + - \c \\r Carriage return (U+000D) + - \c \\t Tab (U+0009) + - \c \\v Vertical tab (U+000B) + + \note This is a Thompson NFA engine, implemented with reference to + Cox, Russ. "Regular Expression Matching Can Be Simple And Fast (but is slow in Java, Perl, PHP, Python, Ruby,...).", + https://swtch.com/~rsc/regexp/regexp1.html +*/ +template +class GenericRegex { +public: + typedef Encoding EncodingType; + typedef typename Encoding::Ch Ch; + template friend class GenericRegexSearch; + + GenericRegex(const Ch* source, Allocator* allocator = 0) : + ownAllocator_(allocator ? 0 : RAPIDJSON_NEW(Allocator)()), allocator_(allocator ? allocator : ownAllocator_), + states_(allocator_, 256), ranges_(allocator_, 256), root_(kRegexInvalidState), stateCount_(), rangeCount_(), + anchorBegin_(), anchorEnd_() + { + GenericStringStream ss(source); + DecodedStream, Encoding> ds(ss); + Parse(ds); + } + + ~GenericRegex() + { + RAPIDJSON_DELETE(ownAllocator_); + } + + bool IsValid() const { + return root_ != kRegexInvalidState; + } + +private: + enum Operator { + kZeroOrOne, + kZeroOrMore, + kOneOrMore, + kConcatenation, + kAlternation, + kLeftParenthesis + }; + + static const unsigned kAnyCharacterClass = 0xFFFFFFFF; //!< For '.' + static const unsigned kRangeCharacterClass = 0xFFFFFFFE; + static const unsigned kRangeNegationFlag = 0x80000000; + + struct Range { + unsigned start; // + unsigned end; + SizeType next; + }; + + struct State { + SizeType out; //!< Equals to kInvalid for matching state + SizeType out1; //!< Equals to non-kInvalid for split + SizeType rangeStart; + unsigned codepoint; + }; + + struct Frag { + Frag(SizeType s, SizeType o, SizeType m) : start(s), out(o), minIndex(m) {} + SizeType start; + SizeType out; //!< link-list of all output states + SizeType minIndex; + }; + + State& GetState(SizeType index) { + RAPIDJSON_ASSERT(index < stateCount_); + return states_.template Bottom()[index]; + } + + const State& GetState(SizeType index) const { + RAPIDJSON_ASSERT(index < stateCount_); + return states_.template Bottom()[index]; + } + + Range& GetRange(SizeType index) { + RAPIDJSON_ASSERT(index < rangeCount_); + return ranges_.template Bottom()[index]; + } + + const Range& GetRange(SizeType index) const { + RAPIDJSON_ASSERT(index < rangeCount_); + return ranges_.template Bottom()[index]; + } + + template + void Parse(DecodedStream& ds) { + Stack operandStack(allocator_, 256); // Frag + Stack operatorStack(allocator_, 256); // Operator + Stack atomCountStack(allocator_, 256); // unsigned (Atom per parenthesis) + + *atomCountStack.template Push() = 0; + + unsigned codepoint; + while (ds.Peek() != 0) { + switch (codepoint = ds.Take()) { + case '^': + anchorBegin_ = true; + break; + + case '$': + anchorEnd_ = true; + break; + + case '|': + while (!operatorStack.Empty() && *operatorStack.template Top() < kAlternation) + if (!Eval(operandStack, *operatorStack.template Pop(1))) + return; + *operatorStack.template Push() = kAlternation; + *atomCountStack.template Top() = 0; + break; + + case '(': + *operatorStack.template Push() = kLeftParenthesis; + *atomCountStack.template Push() = 0; + break; + + case ')': + while (!operatorStack.Empty() && *operatorStack.template Top() != kLeftParenthesis) + if (!Eval(operandStack, *operatorStack.template Pop(1))) + return; + if (operatorStack.Empty()) + return; + operatorStack.template Pop(1); + atomCountStack.template Pop(1); + ImplicitConcatenation(atomCountStack, operatorStack); + break; + + case '?': + if (!Eval(operandStack, kZeroOrOne)) + return; + break; + + case '*': + if (!Eval(operandStack, kZeroOrMore)) + return; + break; + + case '+': + if (!Eval(operandStack, kOneOrMore)) + return; + break; + + case '{': + { + unsigned n, m; + if (!ParseUnsigned(ds, &n)) + return; + + if (ds.Peek() == ',') { + ds.Take(); + if (ds.Peek() == '}') + m = kInfinityQuantifier; + else if (!ParseUnsigned(ds, &m) || m < n) + return; + } + else + m = n; + + if (!EvalQuantifier(operandStack, n, m) || ds.Peek() != '}') + return; + ds.Take(); + } + break; + + case '.': + PushOperand(operandStack, kAnyCharacterClass); + ImplicitConcatenation(atomCountStack, operatorStack); + break; + + case '[': + { + SizeType range; + if (!ParseRange(ds, &range)) + return; + SizeType s = NewState(kRegexInvalidState, kRegexInvalidState, kRangeCharacterClass); + GetState(s).rangeStart = range; + *operandStack.template Push() = Frag(s, s, s); + } + ImplicitConcatenation(atomCountStack, operatorStack); + break; + + case '\\': // Escape character + if (!CharacterEscape(ds, &codepoint)) + return; // Unsupported escape character + // fall through to default + RAPIDJSON_DELIBERATE_FALLTHROUGH; + + default: // Pattern character + PushOperand(operandStack, codepoint); + ImplicitConcatenation(atomCountStack, operatorStack); + } + } + + while (!operatorStack.Empty()) + if (!Eval(operandStack, *operatorStack.template Pop(1))) + return; + + // Link the operand to matching state. + if (operandStack.GetSize() == sizeof(Frag)) { + Frag* e = operandStack.template Pop(1); + Patch(e->out, NewState(kRegexInvalidState, kRegexInvalidState, 0)); + root_ = e->start; + +#if RAPIDJSON_REGEX_VERBOSE + printf("root: %d\n", root_); + for (SizeType i = 0; i < stateCount_ ; i++) { + State& s = GetState(i); + printf("[%2d] out: %2d out1: %2d c: '%c'\n", i, s.out, s.out1, (char)s.codepoint); + } + printf("\n"); +#endif + } + } + + SizeType NewState(SizeType out, SizeType out1, unsigned codepoint) { + State* s = states_.template Push(); + s->out = out; + s->out1 = out1; + s->codepoint = codepoint; + s->rangeStart = kRegexInvalidRange; + return stateCount_++; + } + + void PushOperand(Stack& operandStack, unsigned codepoint) { + SizeType s = NewState(kRegexInvalidState, kRegexInvalidState, codepoint); + *operandStack.template Push() = Frag(s, s, s); + } + + void ImplicitConcatenation(Stack& atomCountStack, Stack& operatorStack) { + if (*atomCountStack.template Top()) + *operatorStack.template Push() = kConcatenation; + (*atomCountStack.template Top())++; + } + + SizeType Append(SizeType l1, SizeType l2) { + SizeType old = l1; + while (GetState(l1).out != kRegexInvalidState) + l1 = GetState(l1).out; + GetState(l1).out = l2; + return old; + } + + void Patch(SizeType l, SizeType s) { + for (SizeType next; l != kRegexInvalidState; l = next) { + next = GetState(l).out; + GetState(l).out = s; + } + } + + bool Eval(Stack& operandStack, Operator op) { + switch (op) { + case kConcatenation: + RAPIDJSON_ASSERT(operandStack.GetSize() >= sizeof(Frag) * 2); + { + Frag e2 = *operandStack.template Pop(1); + Frag e1 = *operandStack.template Pop(1); + Patch(e1.out, e2.start); + *operandStack.template Push() = Frag(e1.start, e2.out, Min(e1.minIndex, e2.minIndex)); + } + return true; + + case kAlternation: + if (operandStack.GetSize() >= sizeof(Frag) * 2) { + Frag e2 = *operandStack.template Pop(1); + Frag e1 = *operandStack.template Pop(1); + SizeType s = NewState(e1.start, e2.start, 0); + *operandStack.template Push() = Frag(s, Append(e1.out, e2.out), Min(e1.minIndex, e2.minIndex)); + return true; + } + return false; + + case kZeroOrOne: + if (operandStack.GetSize() >= sizeof(Frag)) { + Frag e = *operandStack.template Pop(1); + SizeType s = NewState(kRegexInvalidState, e.start, 0); + *operandStack.template Push() = Frag(s, Append(e.out, s), e.minIndex); + return true; + } + return false; + + case kZeroOrMore: + if (operandStack.GetSize() >= sizeof(Frag)) { + Frag e = *operandStack.template Pop(1); + SizeType s = NewState(kRegexInvalidState, e.start, 0); + Patch(e.out, s); + *operandStack.template Push() = Frag(s, s, e.minIndex); + return true; + } + return false; + + case kOneOrMore: + if (operandStack.GetSize() >= sizeof(Frag)) { + Frag e = *operandStack.template Pop(1); + SizeType s = NewState(kRegexInvalidState, e.start, 0); + Patch(e.out, s); + *operandStack.template Push() = Frag(e.start, s, e.minIndex); + return true; + } + return false; + + default: + // syntax error (e.g. unclosed kLeftParenthesis) + return false; + } + } + + bool EvalQuantifier(Stack& operandStack, unsigned n, unsigned m) { + RAPIDJSON_ASSERT(n <= m); + RAPIDJSON_ASSERT(operandStack.GetSize() >= sizeof(Frag)); + + if (n == 0) { + if (m == 0) // a{0} not support + return false; + else if (m == kInfinityQuantifier) + Eval(operandStack, kZeroOrMore); // a{0,} -> a* + else { + Eval(operandStack, kZeroOrOne); // a{0,5} -> a? + for (unsigned i = 0; i < m - 1; i++) + CloneTopOperand(operandStack); // a{0,5} -> a? a? a? a? a? + for (unsigned i = 0; i < m - 1; i++) + Eval(operandStack, kConcatenation); // a{0,5} -> a?a?a?a?a? + } + return true; + } + + for (unsigned i = 0; i < n - 1; i++) // a{3} -> a a a + CloneTopOperand(operandStack); + + if (m == kInfinityQuantifier) + Eval(operandStack, kOneOrMore); // a{3,} -> a a a+ + else if (m > n) { + CloneTopOperand(operandStack); // a{3,5} -> a a a a + Eval(operandStack, kZeroOrOne); // a{3,5} -> a a a a? + for (unsigned i = n; i < m - 1; i++) + CloneTopOperand(operandStack); // a{3,5} -> a a a a? a? + for (unsigned i = n; i < m; i++) + Eval(operandStack, kConcatenation); // a{3,5} -> a a aa?a? + } + + for (unsigned i = 0; i < n - 1; i++) + Eval(operandStack, kConcatenation); // a{3} -> aaa, a{3,} -> aaa+, a{3.5} -> aaaa?a? + + return true; + } + + static SizeType Min(SizeType a, SizeType b) { return a < b ? a : b; } + + void CloneTopOperand(Stack& operandStack) { + const Frag src = *operandStack.template Top(); // Copy constructor to prevent invalidation + SizeType count = stateCount_ - src.minIndex; // Assumes top operand contains states in [src->minIndex, stateCount_) + State* s = states_.template Push(count); + memcpy(s, &GetState(src.minIndex), count * sizeof(State)); + for (SizeType j = 0; j < count; j++) { + if (s[j].out != kRegexInvalidState) + s[j].out += count; + if (s[j].out1 != kRegexInvalidState) + s[j].out1 += count; + } + *operandStack.template Push() = Frag(src.start + count, src.out + count, src.minIndex + count); + stateCount_ += count; + } + + template + bool ParseUnsigned(DecodedStream& ds, unsigned* u) { + unsigned r = 0; + if (ds.Peek() < '0' || ds.Peek() > '9') + return false; + while (ds.Peek() >= '0' && ds.Peek() <= '9') { + if (r >= 429496729 && ds.Peek() > '5') // 2^32 - 1 = 4294967295 + return false; // overflow + r = r * 10 + (ds.Take() - '0'); + } + *u = r; + return true; + } + + template + bool ParseRange(DecodedStream& ds, SizeType* range) { + bool isBegin = true; + bool negate = false; + int step = 0; + SizeType start = kRegexInvalidRange; + SizeType current = kRegexInvalidRange; + unsigned codepoint; + while ((codepoint = ds.Take()) != 0) { + if (isBegin) { + isBegin = false; + if (codepoint == '^') { + negate = true; + continue; + } + } + + switch (codepoint) { + case ']': + if (start == kRegexInvalidRange) + return false; // Error: nothing inside [] + if (step == 2) { // Add trailing '-' + SizeType r = NewRange('-'); + RAPIDJSON_ASSERT(current != kRegexInvalidRange); + GetRange(current).next = r; + } + if (negate) + GetRange(start).start |= kRangeNegationFlag; + *range = start; + return true; + + case '\\': + if (ds.Peek() == 'b') { + ds.Take(); + codepoint = 0x0008; // Escape backspace character + } + else if (!CharacterEscape(ds, &codepoint)) + return false; + // fall through to default + RAPIDJSON_DELIBERATE_FALLTHROUGH; + + default: + switch (step) { + case 1: + if (codepoint == '-') { + step++; + break; + } + // fall through to step 0 for other characters + RAPIDJSON_DELIBERATE_FALLTHROUGH; + + case 0: + { + SizeType r = NewRange(codepoint); + if (current != kRegexInvalidRange) + GetRange(current).next = r; + if (start == kRegexInvalidRange) + start = r; + current = r; + } + step = 1; + break; + + default: + RAPIDJSON_ASSERT(step == 2); + GetRange(current).end = codepoint; + step = 0; + } + } + } + return false; + } + + SizeType NewRange(unsigned codepoint) { + Range* r = ranges_.template Push(); + r->start = r->end = codepoint; + r->next = kRegexInvalidRange; + return rangeCount_++; + } + + template + bool CharacterEscape(DecodedStream& ds, unsigned* escapedCodepoint) { + unsigned codepoint; + switch (codepoint = ds.Take()) { + case '^': + case '$': + case '|': + case '(': + case ')': + case '?': + case '*': + case '+': + case '.': + case '[': + case ']': + case '{': + case '}': + case '\\': + *escapedCodepoint = codepoint; return true; + case 'f': *escapedCodepoint = 0x000C; return true; + case 'n': *escapedCodepoint = 0x000A; return true; + case 'r': *escapedCodepoint = 0x000D; return true; + case 't': *escapedCodepoint = 0x0009; return true; + case 'v': *escapedCodepoint = 0x000B; return true; + default: + return false; // Unsupported escape character + } + } + + Allocator* ownAllocator_; + Allocator* allocator_; + Stack states_; + Stack ranges_; + SizeType root_; + SizeType stateCount_; + SizeType rangeCount_; + + static const unsigned kInfinityQuantifier = ~0u; + + // For SearchWithAnchoring() + bool anchorBegin_; + bool anchorEnd_; +}; + +template +class GenericRegexSearch { +public: + typedef typename RegexType::EncodingType Encoding; + typedef typename Encoding::Ch Ch; + + GenericRegexSearch(const RegexType& regex, Allocator* allocator = 0) : + regex_(regex), allocator_(allocator), ownAllocator_(0), + state0_(allocator, 0), state1_(allocator, 0), stateSet_() + { + RAPIDJSON_ASSERT(regex_.IsValid()); + if (!allocator_) + ownAllocator_ = allocator_ = RAPIDJSON_NEW(Allocator)(); + stateSet_ = static_cast(allocator_->Malloc(GetStateSetSize())); + state0_.template Reserve(regex_.stateCount_); + state1_.template Reserve(regex_.stateCount_); + } + + ~GenericRegexSearch() { + Allocator::Free(stateSet_); + RAPIDJSON_DELETE(ownAllocator_); + } + + template + bool Match(InputStream& is) { + return SearchWithAnchoring(is, true, true); + } + + bool Match(const Ch* s) { + GenericStringStream is(s); + return Match(is); + } + + template + bool Search(InputStream& is) { + return SearchWithAnchoring(is, regex_.anchorBegin_, regex_.anchorEnd_); + } + + bool Search(const Ch* s) { + GenericStringStream is(s); + return Search(is); + } + +private: + typedef typename RegexType::State State; + typedef typename RegexType::Range Range; + + template + bool SearchWithAnchoring(InputStream& is, bool anchorBegin, bool anchorEnd) { + DecodedStream ds(is); + + state0_.Clear(); + Stack *current = &state0_, *next = &state1_; + const size_t stateSetSize = GetStateSetSize(); + std::memset(stateSet_, 0, stateSetSize); + + bool matched = AddState(*current, regex_.root_); + unsigned codepoint; + while (!current->Empty() && (codepoint = ds.Take()) != 0) { + std::memset(stateSet_, 0, stateSetSize); + next->Clear(); + matched = false; + for (const SizeType* s = current->template Bottom(); s != current->template End(); ++s) { + const State& sr = regex_.GetState(*s); + if (sr.codepoint == codepoint || + sr.codepoint == RegexType::kAnyCharacterClass || + (sr.codepoint == RegexType::kRangeCharacterClass && MatchRange(sr.rangeStart, codepoint))) + { + matched = AddState(*next, sr.out) || matched; + if (!anchorEnd && matched) + return true; + } + if (!anchorBegin) + AddState(*next, regex_.root_); + } + internal::Swap(current, next); + } + + return matched; + } + + size_t GetStateSetSize() const { + return (regex_.stateCount_ + 31) / 32 * 4; + } + + // Return whether the added states is a match state + bool AddState(Stack& l, SizeType index) { + RAPIDJSON_ASSERT(index != kRegexInvalidState); + + const State& s = regex_.GetState(index); + if (s.out1 != kRegexInvalidState) { // Split + bool matched = AddState(l, s.out); + return AddState(l, s.out1) || matched; + } + else if (!(stateSet_[index >> 5] & (1u << (index & 31)))) { + stateSet_[index >> 5] |= (1u << (index & 31)); + *l.template PushUnsafe() = index; + } + return s.out == kRegexInvalidState; // by using PushUnsafe() above, we can ensure s is not validated due to reallocation. + } + + bool MatchRange(SizeType rangeIndex, unsigned codepoint) const { + bool yes = (regex_.GetRange(rangeIndex).start & RegexType::kRangeNegationFlag) == 0; + while (rangeIndex != kRegexInvalidRange) { + const Range& r = regex_.GetRange(rangeIndex); + if (codepoint >= (r.start & ~RegexType::kRangeNegationFlag) && codepoint <= r.end) + return yes; + rangeIndex = r.next; + } + return !yes; + } + + const RegexType& regex_; + Allocator* allocator_; + Allocator* ownAllocator_; + Stack state0_; + Stack state1_; + uint32_t* stateSet_; +}; + +typedef GenericRegex > Regex; +typedef GenericRegexSearch RegexSearch; + +} // namespace internal +RAPIDJSON_NAMESPACE_END + +#ifdef __GNUC__ +RAPIDJSON_DIAG_POP +#endif + +#if defined(__clang__) || defined(_MSC_VER) +RAPIDJSON_DIAG_POP +#endif + +#endif // RAPIDJSON_INTERNAL_REGEX_H_ diff --git a/vendor/github.com/Benau/go_rlottie/lottie_rapidjson_internal_stack.h b/vendor/github.com/Benau/go_rlottie/lottie_rapidjson_internal_stack.h new file mode 100644 index 00000000..173e0fec --- /dev/null +++ b/vendor/github.com/Benau/go_rlottie/lottie_rapidjson_internal_stack.h @@ -0,0 +1,232 @@ +// Tencent is pleased to support the open source community by making RapidJSON available. +// +// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved. +// +// Licensed under the MIT License (the "License"); you may not use this file except +// in compliance with the License. You may obtain a copy of the License at +// +// http://opensource.org/licenses/MIT +// +// Unless required by applicable law or agreed to in writing, software distributed +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +// CONDITIONS OF ANY KIND, either express or implied. See the License for the +// specific language governing permissions and limitations under the License. + +#ifndef RAPIDJSON_INTERNAL_STACK_H_ +#define RAPIDJSON_INTERNAL_STACK_H_ + +#include "lottie_rapidjson_allocators.h" +#include "lottie_rapidjson_internal_swap.h" +#include + +#if defined(__clang__) +RAPIDJSON_DIAG_PUSH +RAPIDJSON_DIAG_OFF(c++98-compat) +#endif + +RAPIDJSON_NAMESPACE_BEGIN +namespace internal { + +/////////////////////////////////////////////////////////////////////////////// +// Stack + +//! A type-unsafe stack for storing different types of data. +/*! \tparam Allocator Allocator for allocating stack memory. +*/ +template +class Stack { +public: + // Optimization note: Do not allocate memory for stack_ in constructor. + // Do it lazily when first Push() -> Expand() -> Resize(). + Stack(Allocator* allocator, size_t stackCapacity) : allocator_(allocator), ownAllocator_(0), stack_(0), stackTop_(0), stackEnd_(0), initialCapacity_(stackCapacity) { + } + +#if RAPIDJSON_HAS_CXX11_RVALUE_REFS + Stack(Stack&& rhs) + : allocator_(rhs.allocator_), + ownAllocator_(rhs.ownAllocator_), + stack_(rhs.stack_), + stackTop_(rhs.stackTop_), + stackEnd_(rhs.stackEnd_), + initialCapacity_(rhs.initialCapacity_) + { + rhs.allocator_ = 0; + rhs.ownAllocator_ = 0; + rhs.stack_ = 0; + rhs.stackTop_ = 0; + rhs.stackEnd_ = 0; + rhs.initialCapacity_ = 0; + } +#endif + + ~Stack() { + Destroy(); + } + +#if RAPIDJSON_HAS_CXX11_RVALUE_REFS + Stack& operator=(Stack&& rhs) { + if (&rhs != this) + { + Destroy(); + + allocator_ = rhs.allocator_; + ownAllocator_ = rhs.ownAllocator_; + stack_ = rhs.stack_; + stackTop_ = rhs.stackTop_; + stackEnd_ = rhs.stackEnd_; + initialCapacity_ = rhs.initialCapacity_; + + rhs.allocator_ = 0; + rhs.ownAllocator_ = 0; + rhs.stack_ = 0; + rhs.stackTop_ = 0; + rhs.stackEnd_ = 0; + rhs.initialCapacity_ = 0; + } + return *this; + } +#endif + + void Swap(Stack& rhs) RAPIDJSON_NOEXCEPT { + internal::Swap(allocator_, rhs.allocator_); + internal::Swap(ownAllocator_, rhs.ownAllocator_); + internal::Swap(stack_, rhs.stack_); + internal::Swap(stackTop_, rhs.stackTop_); + internal::Swap(stackEnd_, rhs.stackEnd_); + internal::Swap(initialCapacity_, rhs.initialCapacity_); + } + + void Clear() { stackTop_ = stack_; } + + void ShrinkToFit() { + if (Empty()) { + // If the stack is empty, completely deallocate the memory. + Allocator::Free(stack_); // NOLINT (+clang-analyzer-unix.Malloc) + stack_ = 0; + stackTop_ = 0; + stackEnd_ = 0; + } + else + Resize(GetSize()); + } + + // Optimization note: try to minimize the size of this function for force inline. + // Expansion is run very infrequently, so it is moved to another (probably non-inline) function. + template + RAPIDJSON_FORCEINLINE void Reserve(size_t count = 1) { + // Expand the stack if needed + if (RAPIDJSON_UNLIKELY(static_cast(sizeof(T) * count) > (stackEnd_ - stackTop_))) + Expand(count); + } + + template + RAPIDJSON_FORCEINLINE T* Push(size_t count = 1) { + Reserve(count); + return PushUnsafe(count); + } + + template + RAPIDJSON_FORCEINLINE T* PushUnsafe(size_t count = 1) { + RAPIDJSON_ASSERT(stackTop_); + RAPIDJSON_ASSERT(static_cast(sizeof(T) * count) <= (stackEnd_ - stackTop_)); + T* ret = reinterpret_cast(stackTop_); + stackTop_ += sizeof(T) * count; + return ret; + } + + template + T* Pop(size_t count) { + RAPIDJSON_ASSERT(GetSize() >= count * sizeof(T)); + stackTop_ -= count * sizeof(T); + return reinterpret_cast(stackTop_); + } + + template + T* Top() { + RAPIDJSON_ASSERT(GetSize() >= sizeof(T)); + return reinterpret_cast(stackTop_ - sizeof(T)); + } + + template + const T* Top() const { + RAPIDJSON_ASSERT(GetSize() >= sizeof(T)); + return reinterpret_cast(stackTop_ - sizeof(T)); + } + + template + T* End() { return reinterpret_cast(stackTop_); } + + template + const T* End() const { return reinterpret_cast(stackTop_); } + + template + T* Bottom() { return reinterpret_cast(stack_); } + + template + const T* Bottom() const { return reinterpret_cast(stack_); } + + bool HasAllocator() const { + return allocator_ != 0; + } + + Allocator& GetAllocator() { + RAPIDJSON_ASSERT(allocator_); + return *allocator_; + } + + bool Empty() const { return stackTop_ == stack_; } + size_t GetSize() const { return static_cast(stackTop_ - stack_); } + size_t GetCapacity() const { return static_cast(stackEnd_ - stack_); } + +private: + template + void Expand(size_t count) { + // Only expand the capacity if the current stack exists. Otherwise just create a stack with initial capacity. + size_t newCapacity; + if (stack_ == 0) { + if (!allocator_) + ownAllocator_ = allocator_ = RAPIDJSON_NEW(Allocator)(); + newCapacity = initialCapacity_; + } else { + newCapacity = GetCapacity(); + newCapacity += (newCapacity + 1) / 2; + } + size_t newSize = GetSize() + sizeof(T) * count; + if (newCapacity < newSize) + newCapacity = newSize; + + Resize(newCapacity); + } + + void Resize(size_t newCapacity) { + const size_t size = GetSize(); // Backup the current size + stack_ = static_cast(allocator_->Realloc(stack_, GetCapacity(), newCapacity)); + stackTop_ = stack_ + size; + stackEnd_ = stack_ + newCapacity; + } + + void Destroy() { + Allocator::Free(stack_); + RAPIDJSON_DELETE(ownAllocator_); // Only delete if it is owned by the stack + } + + // Prohibit copy constructor & assignment operator. + Stack(const Stack&); + Stack& operator=(const Stack&); + + Allocator* allocator_; + Allocator* ownAllocator_; + char *stack_; + char *stackTop_; + char *stackEnd_; + size_t initialCapacity_; +}; + +} // namespace internal +RAPIDJSON_NAMESPACE_END + +#if defined(__clang__) +RAPIDJSON_DIAG_POP +#endif + +#endif // RAPIDJSON_STACK_H_ diff --git a/vendor/github.com/Benau/go_rlottie/lottie_rapidjson_internal_strfunc.h b/vendor/github.com/Benau/go_rlottie/lottie_rapidjson_internal_strfunc.h new file mode 100644 index 00000000..7fa0d81e --- /dev/null +++ b/vendor/github.com/Benau/go_rlottie/lottie_rapidjson_internal_strfunc.h @@ -0,0 +1,69 @@ +// Tencent is pleased to support the open source community by making RapidJSON available. +// +// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved. +// +// Licensed under the MIT License (the "License"); you may not use this file except +// in compliance with the License. You may obtain a copy of the License at +// +// http://opensource.org/licenses/MIT +// +// Unless required by applicable law or agreed to in writing, software distributed +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +// CONDITIONS OF ANY KIND, either express or implied. See the License for the +// specific language governing permissions and limitations under the License. + +#ifndef RAPIDJSON_INTERNAL_STRFUNC_H_ +#define RAPIDJSON_INTERNAL_STRFUNC_H_ + +#include "lottie_rapidjson_stream.h" +#include + +RAPIDJSON_NAMESPACE_BEGIN +namespace internal { + +//! Custom strlen() which works on different character types. +/*! \tparam Ch Character type (e.g. char, wchar_t, short) + \param s Null-terminated input string. + \return Number of characters in the string. + \note This has the same semantics as strlen(), the return value is not number of Unicode codepoints. +*/ +template +inline SizeType StrLen(const Ch* s) { + RAPIDJSON_ASSERT(s != 0); + const Ch* p = s; + while (*p) ++p; + return SizeType(p - s); +} + +template <> +inline SizeType StrLen(const char* s) { + return SizeType(std::strlen(s)); +} + +template <> +inline SizeType StrLen(const wchar_t* s) { + return SizeType(std::wcslen(s)); +} + +//! Returns number of code points in a encoded string. +template +bool CountStringCodePoint(const typename Encoding::Ch* s, SizeType length, SizeType* outCount) { + RAPIDJSON_ASSERT(s != 0); + RAPIDJSON_ASSERT(outCount != 0); + GenericStringStream is(s); + const typename Encoding::Ch* end = s + length; + SizeType count = 0; + while (is.src_ < end) { + unsigned codepoint; + if (!Encoding::Decode(is, &codepoint)) + return false; + count++; + } + *outCount = count; + return true; +} + +} // namespace internal +RAPIDJSON_NAMESPACE_END + +#endif // RAPIDJSON_INTERNAL_STRFUNC_H_ diff --git a/vendor/github.com/Benau/go_rlottie/lottie_rapidjson_internal_strtod.h b/vendor/github.com/Benau/go_rlottie/lottie_rapidjson_internal_strtod.h new file mode 100644 index 00000000..fc1bc5d6 --- /dev/null +++ b/vendor/github.com/Benau/go_rlottie/lottie_rapidjson_internal_strtod.h @@ -0,0 +1,290 @@ +// Tencent is pleased to support the open source community by making RapidJSON available. +// +// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved. +// +// Licensed under the MIT License (the "License"); you may not use this file except +// in compliance with the License. You may obtain a copy of the License at +// +// http://opensource.org/licenses/MIT +// +// Unless required by applicable law or agreed to in writing, software distributed +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +// CONDITIONS OF ANY KIND, either express or implied. See the License for the +// specific language governing permissions and limitations under the License. + +#ifndef RAPIDJSON_STRTOD_ +#define RAPIDJSON_STRTOD_ + +#include "lottie_rapidjson_internal_ieee754.h" +#include "lottie_rapidjson_internal_biginteger.h" +#include "lottie_rapidjson_internal_diyfp.h" +#include "lottie_rapidjson_internal_pow10.h" +#include +#include + +RAPIDJSON_NAMESPACE_BEGIN +namespace internal { + +inline double FastPath(double significand, int exp) { + if (exp < -308) + return 0.0; + else if (exp >= 0) + return significand * internal::Pow10(exp); + else + return significand / internal::Pow10(-exp); +} + +inline double StrtodNormalPrecision(double d, int p) { + if (p < -308) { + // Prevent expSum < -308, making Pow10(p) = 0 + d = FastPath(d, -308); + d = FastPath(d, p + 308); + } + else + d = FastPath(d, p); + return d; +} + +template +inline T Min3(T a, T b, T c) { + T m = a; + if (m > b) m = b; + if (m > c) m = c; + return m; +} + +inline int CheckWithinHalfULP(double b, const BigInteger& d, int dExp) { + const Double db(b); + const uint64_t bInt = db.IntegerSignificand(); + const int bExp = db.IntegerExponent(); + const int hExp = bExp - 1; + + int dS_Exp2 = 0, dS_Exp5 = 0, bS_Exp2 = 0, bS_Exp5 = 0, hS_Exp2 = 0, hS_Exp5 = 0; + + // Adjust for decimal exponent + if (dExp >= 0) { + dS_Exp2 += dExp; + dS_Exp5 += dExp; + } + else { + bS_Exp2 -= dExp; + bS_Exp5 -= dExp; + hS_Exp2 -= dExp; + hS_Exp5 -= dExp; + } + + // Adjust for binary exponent + if (bExp >= 0) + bS_Exp2 += bExp; + else { + dS_Exp2 -= bExp; + hS_Exp2 -= bExp; + } + + // Adjust for half ulp exponent + if (hExp >= 0) + hS_Exp2 += hExp; + else { + dS_Exp2 -= hExp; + bS_Exp2 -= hExp; + } + + // Remove common power of two factor from all three scaled values + int common_Exp2 = Min3(dS_Exp2, bS_Exp2, hS_Exp2); + dS_Exp2 -= common_Exp2; + bS_Exp2 -= common_Exp2; + hS_Exp2 -= common_Exp2; + + BigInteger dS = d; + dS.MultiplyPow5(static_cast(dS_Exp5)) <<= static_cast(dS_Exp2); + + BigInteger bS(bInt); + bS.MultiplyPow5(static_cast(bS_Exp5)) <<= static_cast(bS_Exp2); + + BigInteger hS(1); + hS.MultiplyPow5(static_cast(hS_Exp5)) <<= static_cast(hS_Exp2); + + BigInteger delta(0); + dS.Difference(bS, &delta); + + return delta.Compare(hS); +} + +inline bool StrtodFast(double d, int p, double* result) { + // Use fast path for string-to-double conversion if possible + // see http://www.exploringbinary.com/fast-path-decimal-to-floating-point-conversion/ + if (p > 22 && p < 22 + 16) { + // Fast Path Cases In Disguise + d *= internal::Pow10(p - 22); + p = 22; + } + + if (p >= -22 && p <= 22 && d <= 9007199254740991.0) { // 2^53 - 1 + *result = FastPath(d, p); + return true; + } + else + return false; +} + +// Compute an approximation and see if it is within 1/2 ULP +inline bool StrtodDiyFp(const char* decimals, int dLen, int dExp, double* result) { + uint64_t significand = 0; + int i = 0; // 2^64 - 1 = 18446744073709551615, 1844674407370955161 = 0x1999999999999999 + for (; i < dLen; i++) { + if (significand > RAPIDJSON_UINT64_C2(0x19999999, 0x99999999) || + (significand == RAPIDJSON_UINT64_C2(0x19999999, 0x99999999) && decimals[i] > '5')) + break; + significand = significand * 10u + static_cast(decimals[i] - '0'); + } + + if (i < dLen && decimals[i] >= '5') // Rounding + significand++; + + int remaining = dLen - i; + const int kUlpShift = 3; + const int kUlp = 1 << kUlpShift; + int64_t error = (remaining == 0) ? 0 : kUlp / 2; + + DiyFp v(significand, 0); + v = v.Normalize(); + error <<= -v.e; + + dExp += remaining; + + int actualExp; + DiyFp cachedPower = GetCachedPower10(dExp, &actualExp); + if (actualExp != dExp) { + static const DiyFp kPow10[] = { + DiyFp(RAPIDJSON_UINT64_C2(0xa0000000, 0x00000000), -60), // 10^1 + DiyFp(RAPIDJSON_UINT64_C2(0xc8000000, 0x00000000), -57), // 10^2 + DiyFp(RAPIDJSON_UINT64_C2(0xfa000000, 0x00000000), -54), // 10^3 + DiyFp(RAPIDJSON_UINT64_C2(0x9c400000, 0x00000000), -50), // 10^4 + DiyFp(RAPIDJSON_UINT64_C2(0xc3500000, 0x00000000), -47), // 10^5 + DiyFp(RAPIDJSON_UINT64_C2(0xf4240000, 0x00000000), -44), // 10^6 + DiyFp(RAPIDJSON_UINT64_C2(0x98968000, 0x00000000), -40) // 10^7 + }; + int adjustment = dExp - actualExp; + RAPIDJSON_ASSERT(adjustment >= 1 && adjustment < 8); + v = v * kPow10[adjustment - 1]; + if (dLen + adjustment > 19) // has more digits than decimal digits in 64-bit + error += kUlp / 2; + } + + v = v * cachedPower; + + error += kUlp + (error == 0 ? 0 : 1); + + const int oldExp = v.e; + v = v.Normalize(); + error <<= oldExp - v.e; + + const int effectiveSignificandSize = Double::EffectiveSignificandSize(64 + v.e); + int precisionSize = 64 - effectiveSignificandSize; + if (precisionSize + kUlpShift >= 64) { + int scaleExp = (precisionSize + kUlpShift) - 63; + v.f >>= scaleExp; + v.e += scaleExp; + error = (error >> scaleExp) + 1 + kUlp; + precisionSize -= scaleExp; + } + + DiyFp rounded(v.f >> precisionSize, v.e + precisionSize); + const uint64_t precisionBits = (v.f & ((uint64_t(1) << precisionSize) - 1)) * kUlp; + const uint64_t halfWay = (uint64_t(1) << (precisionSize - 1)) * kUlp; + if (precisionBits >= halfWay + static_cast(error)) { + rounded.f++; + if (rounded.f & (DiyFp::kDpHiddenBit << 1)) { // rounding overflows mantissa (issue #340) + rounded.f >>= 1; + rounded.e++; + } + } + + *result = rounded.ToDouble(); + + return halfWay - static_cast(error) >= precisionBits || precisionBits >= halfWay + static_cast(error); +} + +inline double StrtodBigInteger(double approx, const char* decimals, int dLen, int dExp) { + RAPIDJSON_ASSERT(dLen >= 0); + const BigInteger dInt(decimals, static_cast(dLen)); + Double a(approx); + int cmp = CheckWithinHalfULP(a.Value(), dInt, dExp); + if (cmp < 0) + return a.Value(); // within half ULP + else if (cmp == 0) { + // Round towards even + if (a.Significand() & 1) + return a.NextPositiveDouble(); + else + return a.Value(); + } + else // adjustment + return a.NextPositiveDouble(); +} + +inline double StrtodFullPrecision(double d, int p, const char* decimals, size_t length, size_t decimalPosition, int exp) { + RAPIDJSON_ASSERT(d >= 0.0); + RAPIDJSON_ASSERT(length >= 1); + + double result = 0.0; + if (StrtodFast(d, p, &result)) + return result; + + RAPIDJSON_ASSERT(length <= INT_MAX); + int dLen = static_cast(length); + + RAPIDJSON_ASSERT(length >= decimalPosition); + RAPIDJSON_ASSERT(length - decimalPosition <= INT_MAX); + int dExpAdjust = static_cast(length - decimalPosition); + + RAPIDJSON_ASSERT(exp >= INT_MIN + dExpAdjust); + int dExp = exp - dExpAdjust; + + // Make sure length+dExp does not overflow + RAPIDJSON_ASSERT(dExp <= INT_MAX - dLen); + + // Trim leading zeros + while (dLen > 0 && *decimals == '0') { + dLen--; + decimals++; + } + + // Trim trailing zeros + while (dLen > 0 && decimals[dLen - 1] == '0') { + dLen--; + dExp++; + } + + if (dLen == 0) { // Buffer only contains zeros. + return 0.0; + } + + // Trim right-most digits + const int kMaxDecimalDigit = 767 + 1; + if (dLen > kMaxDecimalDigit) { + dExp += dLen - kMaxDecimalDigit; + dLen = kMaxDecimalDigit; + } + + // If too small, underflow to zero. + // Any x <= 10^-324 is interpreted as zero. + if (dLen + dExp <= -324) + return 0.0; + + // If too large, overflow to infinity. + // Any x >= 10^309 is interpreted as +infinity. + if (dLen + dExp > 309) + return std::numeric_limits::infinity(); + + if (StrtodDiyFp(decimals, dLen, dExp, &result)) + return result; + + // Use approximation from StrtodDiyFp and make adjustment with BigInteger comparison + return StrtodBigInteger(result, decimals, dLen, dExp); +} + +} // namespace internal +RAPIDJSON_NAMESPACE_END + +#endif // RAPIDJSON_STRTOD_ diff --git a/vendor/github.com/Benau/go_rlottie/lottie_rapidjson_internal_swap.h b/vendor/github.com/Benau/go_rlottie/lottie_rapidjson_internal_swap.h new file mode 100644 index 00000000..816fe3cc --- /dev/null +++ b/vendor/github.com/Benau/go_rlottie/lottie_rapidjson_internal_swap.h @@ -0,0 +1,46 @@ +// Tencent is pleased to support the open source community by making RapidJSON available. +// +// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved. +// +// Licensed under the MIT License (the "License"); you may not use this file except +// in compliance with the License. You may obtain a copy of the License at +// +// http://opensource.org/licenses/MIT +// +// Unless required by applicable law or agreed to in writing, software distributed +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +// CONDITIONS OF ANY KIND, either express or implied. See the License for the +// specific language governing permissions and limitations under the License. + +#ifndef RAPIDJSON_INTERNAL_SWAP_H_ +#define RAPIDJSON_INTERNAL_SWAP_H_ + +#include "lottie_rapidjson_rapidjson.h" + +#if defined(__clang__) +RAPIDJSON_DIAG_PUSH +RAPIDJSON_DIAG_OFF(c++98-compat) +#endif + +RAPIDJSON_NAMESPACE_BEGIN +namespace internal { + +//! Custom swap() to avoid dependency on C++ header +/*! \tparam T Type of the arguments to swap, should be instantiated with primitive C++ types only. + \note This has the same semantics as std::swap(). +*/ +template +inline void Swap(T& a, T& b) RAPIDJSON_NOEXCEPT { + T tmp = a; + a = b; + b = tmp; +} + +} // namespace internal +RAPIDJSON_NAMESPACE_END + +#if defined(__clang__) +RAPIDJSON_DIAG_POP +#endif + +#endif // RAPIDJSON_INTERNAL_SWAP_H_ diff --git a/vendor/github.com/Benau/go_rlottie/lottie_rapidjson_istreamwrapper.h b/vendor/github.com/Benau/go_rlottie/lottie_rapidjson_istreamwrapper.h new file mode 100644 index 00000000..47b279f9 --- /dev/null +++ b/vendor/github.com/Benau/go_rlottie/lottie_rapidjson_istreamwrapper.h @@ -0,0 +1,128 @@ +// Tencent is pleased to support the open source community by making RapidJSON available. +// +// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved. +// +// Licensed under the MIT License (the "License"); you may not use this file except +// in compliance with the License. You may obtain a copy of the License at +// +// http://opensource.org/licenses/MIT +// +// Unless required by applicable law or agreed to in writing, software distributed +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +// CONDITIONS OF ANY KIND, either express or implied. See the License for the +// specific language governing permissions and limitations under the License. + +#ifndef RAPIDJSON_ISTREAMWRAPPER_H_ +#define RAPIDJSON_ISTREAMWRAPPER_H_ + +#include "lottie_rapidjson_stream.h" +#include +#include + +#ifdef __clang__ +RAPIDJSON_DIAG_PUSH +RAPIDJSON_DIAG_OFF(padded) +#elif defined(_MSC_VER) +RAPIDJSON_DIAG_PUSH +RAPIDJSON_DIAG_OFF(4351) // new behavior: elements of array 'array' will be default initialized +#endif + +RAPIDJSON_NAMESPACE_BEGIN + +//! Wrapper of \c std::basic_istream into RapidJSON's Stream concept. +/*! + The classes can be wrapped including but not limited to: + + - \c std::istringstream + - \c std::stringstream + - \c std::wistringstream + - \c std::wstringstream + - \c std::ifstream + - \c std::fstream + - \c std::wifstream + - \c std::wfstream + + \tparam StreamType Class derived from \c std::basic_istream. +*/ + +template +class BasicIStreamWrapper { +public: + typedef typename StreamType::char_type Ch; + + //! Constructor. + /*! + \param stream stream opened for read. + */ + BasicIStreamWrapper(StreamType &stream) : stream_(stream), buffer_(peekBuffer_), bufferSize_(4), bufferLast_(0), current_(buffer_), readCount_(0), count_(0), eof_(false) { + Read(); + } + + //! Constructor. + /*! + \param stream stream opened for read. + \param buffer user-supplied buffer. + \param bufferSize size of buffer in bytes. Must >=4 bytes. + */ + BasicIStreamWrapper(StreamType &stream, char* buffer, size_t bufferSize) : stream_(stream), buffer_(buffer), bufferSize_(bufferSize), bufferLast_(0), current_(buffer_), readCount_(0), count_(0), eof_(false) { + RAPIDJSON_ASSERT(bufferSize >= 4); + Read(); + } + + Ch Peek() const { return *current_; } + Ch Take() { Ch c = *current_; Read(); return c; } + size_t Tell() const { return count_ + static_cast(current_ - buffer_); } + + // Not implemented + void Put(Ch) { RAPIDJSON_ASSERT(false); } + void Flush() { RAPIDJSON_ASSERT(false); } + Ch* PutBegin() { RAPIDJSON_ASSERT(false); return 0; } + size_t PutEnd(Ch*) { RAPIDJSON_ASSERT(false); return 0; } + + // For encoding detection only. + const Ch* Peek4() const { + return (current_ + 4 - !eof_ <= bufferLast_) ? current_ : 0; + } + +private: + BasicIStreamWrapper(); + BasicIStreamWrapper(const BasicIStreamWrapper&); + BasicIStreamWrapper& operator=(const BasicIStreamWrapper&); + + void Read() { + if (current_ < bufferLast_) + ++current_; + else if (!eof_) { + count_ += readCount_; + readCount_ = bufferSize_; + bufferLast_ = buffer_ + readCount_ - 1; + current_ = buffer_; + + if (!stream_.read(buffer_, static_cast(bufferSize_))) { + readCount_ = static_cast(stream_.gcount()); + *(bufferLast_ = buffer_ + readCount_) = '\0'; + eof_ = true; + } + } + } + + StreamType &stream_; + Ch peekBuffer_[4], *buffer_; + size_t bufferSize_; + Ch *bufferLast_; + Ch *current_; + size_t readCount_; + size_t count_; //!< Number of characters read + bool eof_; +}; + +typedef BasicIStreamWrapper IStreamWrapper; +typedef BasicIStreamWrapper WIStreamWrapper; + +#if defined(__clang__) || defined(_MSC_VER) +RAPIDJSON_DIAG_POP +#endif + +RAPIDJSON_NAMESPACE_END + +#endif // RAPIDJSON_ISTREAMWRAPPER_H_ diff --git a/vendor/github.com/Benau/go_rlottie/lottie_rapidjson_memorybuffer.h b/vendor/github.com/Benau/go_rlottie/lottie_rapidjson_memorybuffer.h new file mode 100644 index 00000000..100a46fd --- /dev/null +++ b/vendor/github.com/Benau/go_rlottie/lottie_rapidjson_memorybuffer.h @@ -0,0 +1,70 @@ +// Tencent is pleased to support the open source community by making RapidJSON available. +// +// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved. +// +// Licensed under the MIT License (the "License"); you may not use this file except +// in compliance with the License. You may obtain a copy of the License at +// +// http://opensource.org/licenses/MIT +// +// Unless required by applicable law or agreed to in writing, software distributed +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +// CONDITIONS OF ANY KIND, either express or implied. See the License for the +// specific language governing permissions and limitations under the License. + +#ifndef RAPIDJSON_MEMORYBUFFER_H_ +#define RAPIDJSON_MEMORYBUFFER_H_ + +#include "lottie_rapidjson_stream.h" +#include "lottie_rapidjson_internal_stack.h" + +RAPIDJSON_NAMESPACE_BEGIN + +//! Represents an in-memory output byte stream. +/*! + This class is mainly for being wrapped by EncodedOutputStream or AutoUTFOutputStream. + + It is similar to FileWriteBuffer but the destination is an in-memory buffer instead of a file. + + Differences between MemoryBuffer and StringBuffer: + 1. StringBuffer has Encoding but MemoryBuffer is only a byte buffer. + 2. StringBuffer::GetString() returns a null-terminated string. MemoryBuffer::GetBuffer() returns a buffer without terminator. + + \tparam Allocator type for allocating memory buffer. + \note implements Stream concept +*/ +template +struct GenericMemoryBuffer { + typedef char Ch; // byte + + GenericMemoryBuffer(Allocator* allocator = 0, size_t capacity = kDefaultCapacity) : stack_(allocator, capacity) {} + + void Put(Ch c) { *stack_.template Push() = c; } + void Flush() {} + + void Clear() { stack_.Clear(); } + void ShrinkToFit() { stack_.ShrinkToFit(); } + Ch* Push(size_t count) { return stack_.template Push(count); } + void Pop(size_t count) { stack_.template Pop(count); } + + const Ch* GetBuffer() const { + return stack_.template Bottom(); + } + + size_t GetSize() const { return stack_.GetSize(); } + + static const size_t kDefaultCapacity = 256; + mutable internal::Stack stack_; +}; + +typedef GenericMemoryBuffer<> MemoryBuffer; + +//! Implement specialized version of PutN() with memset() for better performance. +template<> +inline void PutN(MemoryBuffer& memoryBuffer, char c, size_t n) { + std::memset(memoryBuffer.stack_.Push(n), c, n * sizeof(c)); +} + +RAPIDJSON_NAMESPACE_END + +#endif // RAPIDJSON_MEMORYBUFFER_H_ diff --git a/vendor/github.com/Benau/go_rlottie/lottie_rapidjson_memorystream.h b/vendor/github.com/Benau/go_rlottie/lottie_rapidjson_memorystream.h new file mode 100644 index 00000000..933319e8 --- /dev/null +++ b/vendor/github.com/Benau/go_rlottie/lottie_rapidjson_memorystream.h @@ -0,0 +1,71 @@ +// Tencent is pleased to support the open source community by making RapidJSON available. +// +// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved. +// +// Licensed under the MIT License (the "License"); you may not use this file except +// in compliance with the License. You may obtain a copy of the License at +// +// http://opensource.org/licenses/MIT +// +// Unless required by applicable law or agreed to in writing, software distributed +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +// CONDITIONS OF ANY KIND, either express or implied. See the License for the +// specific language governing permissions and limitations under the License. + +#ifndef RAPIDJSON_MEMORYSTREAM_H_ +#define RAPIDJSON_MEMORYSTREAM_H_ + +#include "lottie_rapidjson_stream.h" + +#ifdef __clang__ +RAPIDJSON_DIAG_PUSH +RAPIDJSON_DIAG_OFF(unreachable-code) +RAPIDJSON_DIAG_OFF(missing-noreturn) +#endif + +RAPIDJSON_NAMESPACE_BEGIN + +//! Represents an in-memory input byte stream. +/*! + This class is mainly for being wrapped by EncodedInputStream or AutoUTFInputStream. + + It is similar to FileReadBuffer but the source is an in-memory buffer instead of a file. + + Differences between MemoryStream and StringStream: + 1. StringStream has encoding but MemoryStream is a byte stream. + 2. MemoryStream needs size of the source buffer and the buffer don't need to be null terminated. StringStream assume null-terminated string as source. + 3. MemoryStream supports Peek4() for encoding detection. StringStream is specified with an encoding so it should not have Peek4(). + \note implements Stream concept +*/ +struct MemoryStream { + typedef char Ch; // byte + + MemoryStream(const Ch *src, size_t size) : src_(src), begin_(src), end_(src + size), size_(size) {} + + Ch Peek() const { return RAPIDJSON_UNLIKELY(src_ == end_) ? '\0' : *src_; } + Ch Take() { return RAPIDJSON_UNLIKELY(src_ == end_) ? '\0' : *src_++; } + size_t Tell() const { return static_cast(src_ - begin_); } + + Ch* PutBegin() { RAPIDJSON_ASSERT(false); return 0; } + void Put(Ch) { RAPIDJSON_ASSERT(false); } + void Flush() { RAPIDJSON_ASSERT(false); } + size_t PutEnd(Ch*) { RAPIDJSON_ASSERT(false); return 0; } + + // For encoding detection only. + const Ch* Peek4() const { + return Tell() + 4 <= size_ ? src_ : 0; + } + + const Ch* src_; //!< Current read position. + const Ch* begin_; //!< Original head of the string. + const Ch* end_; //!< End of stream. + size_t size_; //!< Size of the stream. +}; + +RAPIDJSON_NAMESPACE_END + +#ifdef __clang__ +RAPIDJSON_DIAG_POP +#endif + +#endif // RAPIDJSON_MEMORYBUFFER_H_ diff --git a/vendor/github.com/Benau/go_rlottie/lottie_rapidjson_msinttypes_inttypes.h b/vendor/github.com/Benau/go_rlottie/lottie_rapidjson_msinttypes_inttypes.h new file mode 100644 index 00000000..8db2e8e7 --- /dev/null +++ b/vendor/github.com/Benau/go_rlottie/lottie_rapidjson_msinttypes_inttypes.h @@ -0,0 +1,316 @@ +// ISO C9x compliant inttypes.h for Microsoft Visual Studio +// Based on ISO/IEC 9899:TC2 Committee draft (May 6, 2005) WG14/N1124 +// +// Copyright (c) 2006-2013 Alexander Chemeris +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// 2. Redistributions in binary form must reproduce the above copyright +// notice, this list of conditions and the following disclaimer in the +// documentation and/or other materials provided with the distribution. +// +// 3. Neither the name of the product nor the names of its contributors may +// be used to endorse or promote products derived from this software +// without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED +// WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +// MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO +// EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; +// OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR +// OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF +// ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +/////////////////////////////////////////////////////////////////////////////// + +// The above software in this distribution may have been modified by +// THL A29 Limited ("Tencent Modifications"). +// All Tencent Modifications are Copyright (C) 2015 THL A29 Limited. + +#ifndef _MSC_VER // [ +#error "Use this header only with Microsoft Visual C++ compilers!" +#endif // _MSC_VER ] + +#ifndef _MSC_INTTYPES_H_ // [ +#define _MSC_INTTYPES_H_ + +#if _MSC_VER > 1000 +#pragma once +#endif + +#include "lottie_rapidjson_msinttypes_stdint.h" + +// miloyip: VC supports inttypes.h since VC2013 +#if _MSC_VER >= 1800 +#include +#else + +// 7.8 Format conversion of integer types + +typedef struct { + intmax_t quot; + intmax_t rem; +} imaxdiv_t; + +// 7.8.1 Macros for format specifiers + +#if !defined(__cplusplus) || defined(__STDC_FORMAT_MACROS) // [ See footnote 185 at page 198 + +// The fprintf macros for signed integers are: +#define PRId8 "d" +#define PRIi8 "i" +#define PRIdLEAST8 "d" +#define PRIiLEAST8 "i" +#define PRIdFAST8 "d" +#define PRIiFAST8 "i" + +#define PRId16 "hd" +#define PRIi16 "hi" +#define PRIdLEAST16 "hd" +#define PRIiLEAST16 "hi" +#define PRIdFAST16 "hd" +#define PRIiFAST16 "hi" + +#define PRId32 "I32d" +#define PRIi32 "I32i" +#define PRIdLEAST32 "I32d" +#define PRIiLEAST32 "I32i" +#define PRIdFAST32 "I32d" +#define PRIiFAST32 "I32i" + +#define PRId64 "I64d" +#define PRIi64 "I64i" +#define PRIdLEAST64 "I64d" +#define PRIiLEAST64 "I64i" +#define PRIdFAST64 "I64d" +#define PRIiFAST64 "I64i" + +#define PRIdMAX "I64d" +#define PRIiMAX "I64i" + +#define PRIdPTR "Id" +#define PRIiPTR "Ii" + +// The fprintf macros for unsigned integers are: +#define PRIo8 "o" +#define PRIu8 "u" +#define PRIx8 "x" +#define PRIX8 "X" +#define PRIoLEAST8 "o" +#define PRIuLEAST8 "u" +#define PRIxLEAST8 "x" +#define PRIXLEAST8 "X" +#define PRIoFAST8 "o" +#define PRIuFAST8 "u" +#define PRIxFAST8 "x" +#define PRIXFAST8 "X" + +#define PRIo16 "ho" +#define PRIu16 "hu" +#define PRIx16 "hx" +#define PRIX16 "hX" +#define PRIoLEAST16 "ho" +#define PRIuLEAST16 "hu" +#define PRIxLEAST16 "hx" +#define PRIXLEAST16 "hX" +#define PRIoFAST16 "ho" +#define PRIuFAST16 "hu" +#define PRIxFAST16 "hx" +#define PRIXFAST16 "hX" + +#define PRIo32 "I32o" +#define PRIu32 "I32u" +#define PRIx32 "I32x" +#define PRIX32 "I32X" +#define PRIoLEAST32 "I32o" +#define PRIuLEAST32 "I32u" +#define PRIxLEAST32 "I32x" +#define PRIXLEAST32 "I32X" +#define PRIoFAST32 "I32o" +#define PRIuFAST32 "I32u" +#define PRIxFAST32 "I32x" +#define PRIXFAST32 "I32X" + +#define PRIo64 "I64o" +#define PRIu64 "I64u" +#define PRIx64 "I64x" +#define PRIX64 "I64X" +#define PRIoLEAST64 "I64o" +#define PRIuLEAST64 "I64u" +#define PRIxLEAST64 "I64x" +#define PRIXLEAST64 "I64X" +#define PRIoFAST64 "I64o" +#define PRIuFAST64 "I64u" +#define PRIxFAST64 "I64x" +#define PRIXFAST64 "I64X" + +#define PRIoMAX "I64o" +#define PRIuMAX "I64u" +#define PRIxMAX "I64x" +#define PRIXMAX "I64X" + +#define PRIoPTR "Io" +#define PRIuPTR "Iu" +#define PRIxPTR "Ix" +#define PRIXPTR "IX" + +// The fscanf macros for signed integers are: +#define SCNd8 "d" +#define SCNi8 "i" +#define SCNdLEAST8 "d" +#define SCNiLEAST8 "i" +#define SCNdFAST8 "d" +#define SCNiFAST8 "i" + +#define SCNd16 "hd" +#define SCNi16 "hi" +#define SCNdLEAST16 "hd" +#define SCNiLEAST16 "hi" +#define SCNdFAST16 "hd" +#define SCNiFAST16 "hi" + +#define SCNd32 "ld" +#define SCNi32 "li" +#define SCNdLEAST32 "ld" +#define SCNiLEAST32 "li" +#define SCNdFAST32 "ld" +#define SCNiFAST32 "li" + +#define SCNd64 "I64d" +#define SCNi64 "I64i" +#define SCNdLEAST64 "I64d" +#define SCNiLEAST64 "I64i" +#define SCNdFAST64 "I64d" +#define SCNiFAST64 "I64i" + +#define SCNdMAX "I64d" +#define SCNiMAX "I64i" + +#ifdef _WIN64 // [ +# define SCNdPTR "I64d" +# define SCNiPTR "I64i" +#else // _WIN64 ][ +# define SCNdPTR "ld" +# define SCNiPTR "li" +#endif // _WIN64 ] + +// The fscanf macros for unsigned integers are: +#define SCNo8 "o" +#define SCNu8 "u" +#define SCNx8 "x" +#define SCNX8 "X" +#define SCNoLEAST8 "o" +#define SCNuLEAST8 "u" +#define SCNxLEAST8 "x" +#define SCNXLEAST8 "X" +#define SCNoFAST8 "o" +#define SCNuFAST8 "u" +#define SCNxFAST8 "x" +#define SCNXFAST8 "X" + +#define SCNo16 "ho" +#define SCNu16 "hu" +#define SCNx16 "hx" +#define SCNX16 "hX" +#define SCNoLEAST16 "ho" +#define SCNuLEAST16 "hu" +#define SCNxLEAST16 "hx" +#define SCNXLEAST16 "hX" +#define SCNoFAST16 "ho" +#define SCNuFAST16 "hu" +#define SCNxFAST16 "hx" +#define SCNXFAST16 "hX" + +#define SCNo32 "lo" +#define SCNu32 "lu" +#define SCNx32 "lx" +#define SCNX32 "lX" +#define SCNoLEAST32 "lo" +#define SCNuLEAST32 "lu" +#define SCNxLEAST32 "lx" +#define SCNXLEAST32 "lX" +#define SCNoFAST32 "lo" +#define SCNuFAST32 "lu" +#define SCNxFAST32 "lx" +#define SCNXFAST32 "lX" + +#define SCNo64 "I64o" +#define SCNu64 "I64u" +#define SCNx64 "I64x" +#define SCNX64 "I64X" +#define SCNoLEAST64 "I64o" +#define SCNuLEAST64 "I64u" +#define SCNxLEAST64 "I64x" +#define SCNXLEAST64 "I64X" +#define SCNoFAST64 "I64o" +#define SCNuFAST64 "I64u" +#define SCNxFAST64 "I64x" +#define SCNXFAST64 "I64X" + +#define SCNoMAX "I64o" +#define SCNuMAX "I64u" +#define SCNxMAX "I64x" +#define SCNXMAX "I64X" + +#ifdef _WIN64 // [ +# define SCNoPTR "I64o" +# define SCNuPTR "I64u" +# define SCNxPTR "I64x" +# define SCNXPTR "I64X" +#else // _WIN64 ][ +# define SCNoPTR "lo" +# define SCNuPTR "lu" +# define SCNxPTR "lx" +# define SCNXPTR "lX" +#endif // _WIN64 ] + +#endif // __STDC_FORMAT_MACROS ] + +// 7.8.2 Functions for greatest-width integer types + +// 7.8.2.1 The imaxabs function +#define imaxabs _abs64 + +// 7.8.2.2 The imaxdiv function + +// This is modified version of div() function from Microsoft's div.c found +// in %MSVC.NET%\crt\src\div.c +#ifdef STATIC_IMAXDIV // [ +static +#else // STATIC_IMAXDIV ][ +_inline +#endif // STATIC_IMAXDIV ] +imaxdiv_t __cdecl imaxdiv(intmax_t numer, intmax_t denom) +{ + imaxdiv_t result; + + result.quot = numer / denom; + result.rem = numer % denom; + + if (numer < 0 && result.rem > 0) { + // did division wrong; must fix up + ++result.quot; + result.rem -= denom; + } + + return result; +} + +// 7.8.2.3 The strtoimax and strtoumax functions +#define strtoimax _strtoi64 +#define strtoumax _strtoui64 + +// 7.8.2.4 The wcstoimax and wcstoumax functions +#define wcstoimax _wcstoi64 +#define wcstoumax _wcstoui64 + +#endif // _MSC_VER >= 1800 + +#endif // _MSC_INTTYPES_H_ ] diff --git a/vendor/github.com/Benau/go_rlottie/lottie_rapidjson_msinttypes_stdint.h b/vendor/github.com/Benau/go_rlottie/lottie_rapidjson_msinttypes_stdint.h new file mode 100644 index 00000000..3d4477b9 --- /dev/null +++ b/vendor/github.com/Benau/go_rlottie/lottie_rapidjson_msinttypes_stdint.h @@ -0,0 +1,300 @@ +// ISO C9x compliant stdint.h for Microsoft Visual Studio +// Based on ISO/IEC 9899:TC2 Committee draft (May 6, 2005) WG14/N1124 +// +// Copyright (c) 2006-2013 Alexander Chemeris +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// 2. Redistributions in binary form must reproduce the above copyright +// notice, this list of conditions and the following disclaimer in the +// documentation and/or other materials provided with the distribution. +// +// 3. Neither the name of the product nor the names of its contributors may +// be used to endorse or promote products derived from this software +// without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED +// WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +// MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO +// EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; +// OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR +// OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF +// ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +/////////////////////////////////////////////////////////////////////////////// + +// The above software in this distribution may have been modified by +// THL A29 Limited ("Tencent Modifications"). +// All Tencent Modifications are Copyright (C) 2015 THL A29 Limited. + +#ifndef _MSC_VER // [ +#error "Use this header only with Microsoft Visual C++ compilers!" +#endif // _MSC_VER ] + +#ifndef _MSC_STDINT_H_ // [ +#define _MSC_STDINT_H_ + +#if _MSC_VER > 1000 +#pragma once +#endif + +// miloyip: Originally Visual Studio 2010 uses its own stdint.h. However it generates warning with INT64_C(), so change to use this file for vs2010. +#if _MSC_VER >= 1600 // [ +#include + +#if !defined(__cplusplus) || defined(__STDC_CONSTANT_MACROS) // [ See footnote 224 at page 260 + +#undef INT8_C +#undef INT16_C +#undef INT32_C +#undef INT64_C +#undef UINT8_C +#undef UINT16_C +#undef UINT32_C +#undef UINT64_C + +// 7.18.4.1 Macros for minimum-width integer constants + +#define INT8_C(val) val##i8 +#define INT16_C(val) val##i16 +#define INT32_C(val) val##i32 +#define INT64_C(val) val##i64 + +#define UINT8_C(val) val##ui8 +#define UINT16_C(val) val##ui16 +#define UINT32_C(val) val##ui32 +#define UINT64_C(val) val##ui64 + +// 7.18.4.2 Macros for greatest-width integer constants +// These #ifndef's are needed to prevent collisions with . +// Check out Issue 9 for the details. +#ifndef INTMAX_C // [ +# define INTMAX_C INT64_C +#endif // INTMAX_C ] +#ifndef UINTMAX_C // [ +# define UINTMAX_C UINT64_C +#endif // UINTMAX_C ] + +#endif // __STDC_CONSTANT_MACROS ] + +#else // ] _MSC_VER >= 1700 [ + +#include + +// For Visual Studio 6 in C++ mode and for many Visual Studio versions when +// compiling for ARM we have to wrap include with 'extern "C++" {}' +// or compiler would give many errors like this: +// error C2733: second C linkage of overloaded function 'wmemchr' not allowed +#if defined(__cplusplus) && !defined(_M_ARM) +extern "C" { +#endif +# include +#if defined(__cplusplus) && !defined(_M_ARM) +} +#endif + +// Define _W64 macros to mark types changing their size, like intptr_t. +#ifndef _W64 +# if !defined(__midl) && (defined(_X86_) || defined(_M_IX86)) && _MSC_VER >= 1300 +# define _W64 __w64 +# else +# define _W64 +# endif +#endif + + +// 7.18.1 Integer types + +// 7.18.1.1 Exact-width integer types + +// Visual Studio 6 and Embedded Visual C++ 4 doesn't +// realize that, e.g. char has the same size as __int8 +// so we give up on __intX for them. +#if (_MSC_VER < 1300) + typedef signed char int8_t; + typedef signed short int16_t; + typedef signed int int32_t; + typedef unsigned char uint8_t; + typedef unsigned short uint16_t; + typedef unsigned int uint32_t; +#else + typedef signed __int8 int8_t; + typedef signed __int16 int16_t; + typedef signed __int32 int32_t; + typedef unsigned __int8 uint8_t; + typedef unsigned __int16 uint16_t; + typedef unsigned __int32 uint32_t; +#endif +typedef signed __int64 int64_t; +typedef unsigned __int64 uint64_t; + + +// 7.18.1.2 Minimum-width integer types +typedef int8_t int_least8_t; +typedef int16_t int_least16_t; +typedef int32_t int_least32_t; +typedef int64_t int_least64_t; +typedef uint8_t uint_least8_t; +typedef uint16_t uint_least16_t; +typedef uint32_t uint_least32_t; +typedef uint64_t uint_least64_t; + +// 7.18.1.3 Fastest minimum-width integer types +typedef int8_t int_fast8_t; +typedef int16_t int_fast16_t; +typedef int32_t int_fast32_t; +typedef int64_t int_fast64_t; +typedef uint8_t uint_fast8_t; +typedef uint16_t uint_fast16_t; +typedef uint32_t uint_fast32_t; +typedef uint64_t uint_fast64_t; + +// 7.18.1.4 Integer types capable of holding object pointers +#ifdef _WIN64 // [ + typedef signed __int64 intptr_t; + typedef unsigned __int64 uintptr_t; +#else // _WIN64 ][ + typedef _W64 signed int intptr_t; + typedef _W64 unsigned int uintptr_t; +#endif // _WIN64 ] + +// 7.18.1.5 Greatest-width integer types +typedef int64_t intmax_t; +typedef uint64_t uintmax_t; + + +// 7.18.2 Limits of specified-width integer types + +#if !defined(__cplusplus) || defined(__STDC_LIMIT_MACROS) // [ See footnote 220 at page 257 and footnote 221 at page 259 + +// 7.18.2.1 Limits of exact-width integer types +#define INT8_MIN ((int8_t)_I8_MIN) +#define INT8_MAX _I8_MAX +#define INT16_MIN ((int16_t)_I16_MIN) +#define INT16_MAX _I16_MAX +#define INT32_MIN ((int32_t)_I32_MIN) +#define INT32_MAX _I32_MAX +#define INT64_MIN ((int64_t)_I64_MIN) +#define INT64_MAX _I64_MAX +#define UINT8_MAX _UI8_MAX +#define UINT16_MAX _UI16_MAX +#define UINT32_MAX _UI32_MAX +#define UINT64_MAX _UI64_MAX + +// 7.18.2.2 Limits of minimum-width integer types +#define INT_LEAST8_MIN INT8_MIN +#define INT_LEAST8_MAX INT8_MAX +#define INT_LEAST16_MIN INT16_MIN +#define INT_LEAST16_MAX INT16_MAX +#define INT_LEAST32_MIN INT32_MIN +#define INT_LEAST32_MAX INT32_MAX +#define INT_LEAST64_MIN INT64_MIN +#define INT_LEAST64_MAX INT64_MAX +#define UINT_LEAST8_MAX UINT8_MAX +#define UINT_LEAST16_MAX UINT16_MAX +#define UINT_LEAST32_MAX UINT32_MAX +#define UINT_LEAST64_MAX UINT64_MAX + +// 7.18.2.3 Limits of fastest minimum-width integer types +#define INT_FAST8_MIN INT8_MIN +#define INT_FAST8_MAX INT8_MAX +#define INT_FAST16_MIN INT16_MIN +#define INT_FAST16_MAX INT16_MAX +#define INT_FAST32_MIN INT32_MIN +#define INT_FAST32_MAX INT32_MAX +#define INT_FAST64_MIN INT64_MIN +#define INT_FAST64_MAX INT64_MAX +#define UINT_FAST8_MAX UINT8_MAX +#define UINT_FAST16_MAX UINT16_MAX +#define UINT_FAST32_MAX UINT32_MAX +#define UINT_FAST64_MAX UINT64_MAX + +// 7.18.2.4 Limits of integer types capable of holding object pointers +#ifdef _WIN64 // [ +# define INTPTR_MIN INT64_MIN +# define INTPTR_MAX INT64_MAX +# define UINTPTR_MAX UINT64_MAX +#else // _WIN64 ][ +# define INTPTR_MIN INT32_MIN +# define INTPTR_MAX INT32_MAX +# define UINTPTR_MAX UINT32_MAX +#endif // _WIN64 ] + +// 7.18.2.5 Limits of greatest-width integer types +#define INTMAX_MIN INT64_MIN +#define INTMAX_MAX INT64_MAX +#define UINTMAX_MAX UINT64_MAX + +// 7.18.3 Limits of other integer types + +#ifdef _WIN64 // [ +# define PTRDIFF_MIN _I64_MIN +# define PTRDIFF_MAX _I64_MAX +#else // _WIN64 ][ +# define PTRDIFF_MIN _I32_MIN +# define PTRDIFF_MAX _I32_MAX +#endif // _WIN64 ] + +#define SIG_ATOMIC_MIN INT_MIN +#define SIG_ATOMIC_MAX INT_MAX + +#ifndef SIZE_MAX // [ +# ifdef _WIN64 // [ +# define SIZE_MAX _UI64_MAX +# else // _WIN64 ][ +# define SIZE_MAX _UI32_MAX +# endif // _WIN64 ] +#endif // SIZE_MAX ] + +// WCHAR_MIN and WCHAR_MAX are also defined in +#ifndef WCHAR_MIN // [ +# define WCHAR_MIN 0 +#endif // WCHAR_MIN ] +#ifndef WCHAR_MAX // [ +# define WCHAR_MAX _UI16_MAX +#endif // WCHAR_MAX ] + +#define WINT_MIN 0 +#define WINT_MAX _UI16_MAX + +#endif // __STDC_LIMIT_MACROS ] + + +// 7.18.4 Limits of other integer types + +#if !defined(__cplusplus) || defined(__STDC_CONSTANT_MACROS) // [ See footnote 224 at page 260 + +// 7.18.4.1 Macros for minimum-width integer constants + +#define INT8_C(val) val##i8 +#define INT16_C(val) val##i16 +#define INT32_C(val) val##i32 +#define INT64_C(val) val##i64 + +#define UINT8_C(val) val##ui8 +#define UINT16_C(val) val##ui16 +#define UINT32_C(val) val##ui32 +#define UINT64_C(val) val##ui64 + +// 7.18.4.2 Macros for greatest-width integer constants +// These #ifndef's are needed to prevent collisions with . +// Check out Issue 9 for the details. +#ifndef INTMAX_C // [ +# define INTMAX_C INT64_C +#endif // INTMAX_C ] +#ifndef UINTMAX_C // [ +# define UINTMAX_C UINT64_C +#endif // UINTMAX_C ] + +#endif // __STDC_CONSTANT_MACROS ] + +#endif // _MSC_VER >= 1600 ] + +#endif // _MSC_STDINT_H_ ] diff --git a/vendor/github.com/Benau/go_rlottie/lottie_rapidjson_ostreamwrapper.h b/vendor/github.com/Benau/go_rlottie/lottie_rapidjson_ostreamwrapper.h new file mode 100644 index 00000000..1a606077 --- /dev/null +++ b/vendor/github.com/Benau/go_rlottie/lottie_rapidjson_ostreamwrapper.h @@ -0,0 +1,81 @@ +// Tencent is pleased to support the open source community by making RapidJSON available. +// +// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved. +// +// Licensed under the MIT License (the "License"); you may not use this file except +// in compliance with the License. You may obtain a copy of the License at +// +// http://opensource.org/licenses/MIT +// +// Unless required by applicable law or agreed to in writing, software distributed +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +// CONDITIONS OF ANY KIND, either express or implied. See the License for the +// specific language governing permissions and limitations under the License. + +#ifndef RAPIDJSON_OSTREAMWRAPPER_H_ +#define RAPIDJSON_OSTREAMWRAPPER_H_ + +#include "lottie_rapidjson_stream.h" +#include + +#ifdef __clang__ +RAPIDJSON_DIAG_PUSH +RAPIDJSON_DIAG_OFF(padded) +#endif + +RAPIDJSON_NAMESPACE_BEGIN + +//! Wrapper of \c std::basic_ostream into RapidJSON's Stream concept. +/*! + The classes can be wrapped including but not limited to: + + - \c std::ostringstream + - \c std::stringstream + - \c std::wpstringstream + - \c std::wstringstream + - \c std::ifstream + - \c std::fstream + - \c std::wofstream + - \c std::wfstream + + \tparam StreamType Class derived from \c std::basic_ostream. +*/ + +template +class BasicOStreamWrapper { +public: + typedef typename StreamType::char_type Ch; + BasicOStreamWrapper(StreamType& stream) : stream_(stream) {} + + void Put(Ch c) { + stream_.put(c); + } + + void Flush() { + stream_.flush(); + } + + // Not implemented + char Peek() const { RAPIDJSON_ASSERT(false); return 0; } + char Take() { RAPIDJSON_ASSERT(false); return 0; } + size_t Tell() const { RAPIDJSON_ASSERT(false); return 0; } + char* PutBegin() { RAPIDJSON_ASSERT(false); return 0; } + size_t PutEnd(char*) { RAPIDJSON_ASSERT(false); return 0; } + +private: + BasicOStreamWrapper(const BasicOStreamWrapper&); + BasicOStreamWrapper& operator=(const BasicOStreamWrapper&); + + StreamType& stream_; +}; + +typedef BasicOStreamWrapper OStreamWrapper; +typedef BasicOStreamWrapper WOStreamWrapper; + +#ifdef __clang__ +RAPIDJSON_DIAG_POP +#endif + +RAPIDJSON_NAMESPACE_END + +#endif // RAPIDJSON_OSTREAMWRAPPER_H_ diff --git a/vendor/github.com/Benau/go_rlottie/lottie_rapidjson_pointer.h b/vendor/github.com/Benau/go_rlottie/lottie_rapidjson_pointer.h new file mode 100644 index 00000000..82b25eb9 --- /dev/null +++ b/vendor/github.com/Benau/go_rlottie/lottie_rapidjson_pointer.h @@ -0,0 +1,1415 @@ +// Tencent is pleased to support the open source community by making RapidJSON available. +// +// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved. +// +// Licensed under the MIT License (the "License"); you may not use this file except +// in compliance with the License. You may obtain a copy of the License at +// +// http://opensource.org/licenses/MIT +// +// Unless required by applicable law or agreed to in writing, software distributed +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +// CONDITIONS OF ANY KIND, either express or implied. See the License for the +// specific language governing permissions and limitations under the License. + +#ifndef RAPIDJSON_POINTER_H_ +#define RAPIDJSON_POINTER_H_ + +#include "lottie_rapidjson_document.h" +#include "lottie_rapidjson_internal_itoa.h" + +#ifdef __clang__ +RAPIDJSON_DIAG_PUSH +RAPIDJSON_DIAG_OFF(switch-enum) +#elif defined(_MSC_VER) +RAPIDJSON_DIAG_PUSH +RAPIDJSON_DIAG_OFF(4512) // assignment operator could not be generated +#endif + +RAPIDJSON_NAMESPACE_BEGIN + +static const SizeType kPointerInvalidIndex = ~SizeType(0); //!< Represents an invalid index in GenericPointer::Token + +//! Error code of parsing. +/*! \ingroup RAPIDJSON_ERRORS + \see GenericPointer::GenericPointer, GenericPointer::GetParseErrorCode +*/ +enum PointerParseErrorCode { + kPointerParseErrorNone = 0, //!< The parse is successful + + kPointerParseErrorTokenMustBeginWithSolidus, //!< A token must begin with a '/' + kPointerParseErrorInvalidEscape, //!< Invalid escape + kPointerParseErrorInvalidPercentEncoding, //!< Invalid percent encoding in URI fragment + kPointerParseErrorCharacterMustPercentEncode //!< A character must percent encoded in URI fragment +}; + +/////////////////////////////////////////////////////////////////////////////// +// GenericPointer + +//! Represents a JSON Pointer. Use Pointer for UTF8 encoding and default allocator. +/*! + This class implements RFC 6901 "JavaScript Object Notation (JSON) Pointer" + (https://tools.ietf.org/html/rfc6901). + + A JSON pointer is for identifying a specific value in a JSON document + (GenericDocument). It can simplify coding of DOM tree manipulation, because it + can access multiple-level depth of DOM tree with single API call. + + After it parses a string representation (e.g. "/foo/0" or URI fragment + representation (e.g. "#/foo/0") into its internal representation (tokens), + it can be used to resolve a specific value in multiple documents, or sub-tree + of documents. + + Contrary to GenericValue, Pointer can be copy constructed and copy assigned. + Apart from assignment, a Pointer cannot be modified after construction. + + Although Pointer is very convenient, please aware that constructing Pointer + involves parsing and dynamic memory allocation. A special constructor with user- + supplied tokens eliminates these. + + GenericPointer depends on GenericDocument and GenericValue. + + \tparam ValueType The value type of the DOM tree. E.g. GenericValue > + \tparam Allocator The allocator type for allocating memory for internal representation. + + \note GenericPointer uses same encoding of ValueType. + However, Allocator of GenericPointer is independent of Allocator of Value. +*/ +template +class GenericPointer { +public: + typedef typename ValueType::EncodingType EncodingType; //!< Encoding type from Value + typedef typename ValueType::Ch Ch; //!< Character type from Value + + //! A token is the basic units of internal representation. + /*! + A JSON pointer string representation "/foo/123" is parsed to two tokens: + "foo" and 123. 123 will be represented in both numeric form and string form. + They are resolved according to the actual value type (object or array). + + For token that are not numbers, or the numeric value is out of bound + (greater than limits of SizeType), they are only treated as string form + (i.e. the token's index will be equal to kPointerInvalidIndex). + + This struct is public so that user can create a Pointer without parsing and + allocation, using a special constructor. + */ + struct Token { + const Ch* name; //!< Name of the token. It has null character at the end but it can contain null character. + SizeType length; //!< Length of the name. + SizeType index; //!< A valid array index, if it is not equal to kPointerInvalidIndex. + }; + + //!@name Constructors and destructor. + //@{ + + //! Default constructor. + GenericPointer(Allocator* allocator = 0) : allocator_(allocator), ownAllocator_(), nameBuffer_(), tokens_(), tokenCount_(), parseErrorOffset_(), parseErrorCode_(kPointerParseErrorNone) {} + + //! Constructor that parses a string or URI fragment representation. + /*! + \param source A null-terminated, string or URI fragment representation of JSON pointer. + \param allocator User supplied allocator for this pointer. If no allocator is provided, it creates a self-owned one. + */ + explicit GenericPointer(const Ch* source, Allocator* allocator = 0) : allocator_(allocator), ownAllocator_(), nameBuffer_(), tokens_(), tokenCount_(), parseErrorOffset_(), parseErrorCode_(kPointerParseErrorNone) { + Parse(source, internal::StrLen(source)); + } + +#if RAPIDJSON_HAS_STDSTRING + //! Constructor that parses a string or URI fragment representation. + /*! + \param source A string or URI fragment representation of JSON pointer. + \param allocator User supplied allocator for this pointer. If no allocator is provided, it creates a self-owned one. + \note Requires the definition of the preprocessor symbol \ref RAPIDJSON_HAS_STDSTRING. + */ + explicit GenericPointer(const std::basic_string& source, Allocator* allocator = 0) : allocator_(allocator), ownAllocator_(), nameBuffer_(), tokens_(), tokenCount_(), parseErrorOffset_(), parseErrorCode_(kPointerParseErrorNone) { + Parse(source.c_str(), source.size()); + } +#endif + + //! Constructor that parses a string or URI fragment representation, with length of the source string. + /*! + \param source A string or URI fragment representation of JSON pointer. + \param length Length of source. + \param allocator User supplied allocator for this pointer. If no allocator is provided, it creates a self-owned one. + \note Slightly faster than the overload without length. + */ + GenericPointer(const Ch* source, size_t length, Allocator* allocator = 0) : allocator_(allocator), ownAllocator_(), nameBuffer_(), tokens_(), tokenCount_(), parseErrorOffset_(), parseErrorCode_(kPointerParseErrorNone) { + Parse(source, length); + } + + //! Constructor with user-supplied tokens. + /*! + This constructor let user supplies const array of tokens. + This prevents the parsing process and eliminates allocation. + This is preferred for memory constrained environments. + + \param tokens An constant array of tokens representing the JSON pointer. + \param tokenCount Number of tokens. + + \b Example + \code + #define NAME(s) { s, sizeof(s) / sizeof(s[0]) - 1, kPointerInvalidIndex } + #define INDEX(i) { #i, sizeof(#i) - 1, i } + + static const Pointer::Token kTokens[] = { NAME("foo"), INDEX(123) }; + static const Pointer p(kTokens, sizeof(kTokens) / sizeof(kTokens[0])); + // Equivalent to static const Pointer p("/foo/123"); + + #undef NAME + #undef INDEX + \endcode + */ + GenericPointer(const Token* tokens, size_t tokenCount) : allocator_(), ownAllocator_(), nameBuffer_(), tokens_(const_cast(tokens)), tokenCount_(tokenCount), parseErrorOffset_(), parseErrorCode_(kPointerParseErrorNone) {} + + //! Copy constructor. + GenericPointer(const GenericPointer& rhs) : allocator_(rhs.allocator_), ownAllocator_(), nameBuffer_(), tokens_(), tokenCount_(), parseErrorOffset_(), parseErrorCode_(kPointerParseErrorNone) { + *this = rhs; + } + + //! Copy constructor. + GenericPointer(const GenericPointer& rhs, Allocator* allocator) : allocator_(allocator), ownAllocator_(), nameBuffer_(), tokens_(), tokenCount_(), parseErrorOffset_(), parseErrorCode_(kPointerParseErrorNone) { + *this = rhs; + } + + //! Destructor. + ~GenericPointer() { + if (nameBuffer_) // If user-supplied tokens constructor is used, nameBuffer_ is nullptr and tokens_ are not deallocated. + Allocator::Free(tokens_); + RAPIDJSON_DELETE(ownAllocator_); + } + + //! Assignment operator. + GenericPointer& operator=(const GenericPointer& rhs) { + if (this != &rhs) { + // Do not delete ownAllcator + if (nameBuffer_) + Allocator::Free(tokens_); + + tokenCount_ = rhs.tokenCount_; + parseErrorOffset_ = rhs.parseErrorOffset_; + parseErrorCode_ = rhs.parseErrorCode_; + + if (rhs.nameBuffer_) + CopyFromRaw(rhs); // Normally parsed tokens. + else { + tokens_ = rhs.tokens_; // User supplied const tokens. + nameBuffer_ = 0; + } + } + return *this; + } + + //! Swap the content of this pointer with an other. + /*! + \param other The pointer to swap with. + \note Constant complexity. + */ + GenericPointer& Swap(GenericPointer& other) RAPIDJSON_NOEXCEPT { + internal::Swap(allocator_, other.allocator_); + internal::Swap(ownAllocator_, other.ownAllocator_); + internal::Swap(nameBuffer_, other.nameBuffer_); + internal::Swap(tokens_, other.tokens_); + internal::Swap(tokenCount_, other.tokenCount_); + internal::Swap(parseErrorOffset_, other.parseErrorOffset_); + internal::Swap(parseErrorCode_, other.parseErrorCode_); + return *this; + } + + //! free-standing swap function helper + /*! + Helper function to enable support for common swap implementation pattern based on \c std::swap: + \code + void swap(MyClass& a, MyClass& b) { + using std::swap; + swap(a.pointer, b.pointer); + // ... + } + \endcode + \see Swap() + */ + friend inline void swap(GenericPointer& a, GenericPointer& b) RAPIDJSON_NOEXCEPT { a.Swap(b); } + + //@} + + //!@name Append token + //@{ + + //! Append a token and return a new Pointer + /*! + \param token Token to be appended. + \param allocator Allocator for the newly return Pointer. + \return A new Pointer with appended token. + */ + GenericPointer Append(const Token& token, Allocator* allocator = 0) const { + GenericPointer r; + r.allocator_ = allocator; + Ch *p = r.CopyFromRaw(*this, 1, token.length + 1); + std::memcpy(p, token.name, (token.length + 1) * sizeof(Ch)); + r.tokens_[tokenCount_].name = p; + r.tokens_[tokenCount_].length = token.length; + r.tokens_[tokenCount_].index = token.index; + return r; + } + + //! Append a name token with length, and return a new Pointer + /*! + \param name Name to be appended. + \param length Length of name. + \param allocator Allocator for the newly return Pointer. + \return A new Pointer with appended token. + */ + GenericPointer Append(const Ch* name, SizeType length, Allocator* allocator = 0) const { + Token token = { name, length, kPointerInvalidIndex }; + return Append(token, allocator); + } + + //! Append a name token without length, and return a new Pointer + /*! + \param name Name (const Ch*) to be appended. + \param allocator Allocator for the newly return Pointer. + \return A new Pointer with appended token. + */ + template + RAPIDJSON_DISABLEIF_RETURN((internal::NotExpr::Type, Ch> >), (GenericPointer)) + Append(T* name, Allocator* allocator = 0) const { + return Append(name, internal::StrLen(name), allocator); + } + +#if RAPIDJSON_HAS_STDSTRING + //! Append a name token, and return a new Pointer + /*! + \param name Name to be appended. + \param allocator Allocator for the newly return Pointer. + \return A new Pointer with appended token. + */ + GenericPointer Append(const std::basic_string& name, Allocator* allocator = 0) const { + return Append(name.c_str(), static_cast(name.size()), allocator); + } +#endif + + //! Append a index token, and return a new Pointer + /*! + \param index Index to be appended. + \param allocator Allocator for the newly return Pointer. + \return A new Pointer with appended token. + */ + GenericPointer Append(SizeType index, Allocator* allocator = 0) const { + char buffer[21]; + char* end = sizeof(SizeType) == 4 ? internal::u32toa(index, buffer) : internal::u64toa(index, buffer); + SizeType length = static_cast(end - buffer); + buffer[length] = '\0'; + + if (sizeof(Ch) == 1) { + Token token = { reinterpret_cast(buffer), length, index }; + return Append(token, allocator); + } + else { + Ch name[21]; + for (size_t i = 0; i <= length; i++) + name[i] = static_cast(buffer[i]); + Token token = { name, length, index }; + return Append(token, allocator); + } + } + + //! Append a token by value, and return a new Pointer + /*! + \param token token to be appended. + \param allocator Allocator for the newly return Pointer. + \return A new Pointer with appended token. + */ + GenericPointer Append(const ValueType& token, Allocator* allocator = 0) const { + if (token.IsString()) + return Append(token.GetString(), token.GetStringLength(), allocator); + else { + RAPIDJSON_ASSERT(token.IsUint64()); + RAPIDJSON_ASSERT(token.GetUint64() <= SizeType(~0)); + return Append(static_cast(token.GetUint64()), allocator); + } + } + + //!@name Handling Parse Error + //@{ + + //! Check whether this is a valid pointer. + bool IsValid() const { return parseErrorCode_ == kPointerParseErrorNone; } + + //! Get the parsing error offset in code unit. + size_t GetParseErrorOffset() const { return parseErrorOffset_; } + + //! Get the parsing error code. + PointerParseErrorCode GetParseErrorCode() const { return parseErrorCode_; } + + //@} + + //! Get the allocator of this pointer. + Allocator& GetAllocator() { return *allocator_; } + + //!@name Tokens + //@{ + + //! Get the token array (const version only). + const Token* GetTokens() const { return tokens_; } + + //! Get the number of tokens. + size_t GetTokenCount() const { return tokenCount_; } + + //@} + + //!@name Equality/inequality operators + //@{ + + //! Equality operator. + /*! + \note When any pointers are invalid, always returns false. + */ + bool operator==(const GenericPointer& rhs) const { + if (!IsValid() || !rhs.IsValid() || tokenCount_ != rhs.tokenCount_) + return false; + + for (size_t i = 0; i < tokenCount_; i++) { + if (tokens_[i].index != rhs.tokens_[i].index || + tokens_[i].length != rhs.tokens_[i].length || + (tokens_[i].length != 0 && std::memcmp(tokens_[i].name, rhs.tokens_[i].name, sizeof(Ch)* tokens_[i].length) != 0)) + { + return false; + } + } + + return true; + } + + //! Inequality operator. + /*! + \note When any pointers are invalid, always returns true. + */ + bool operator!=(const GenericPointer& rhs) const { return !(*this == rhs); } + + //! Less than operator. + /*! + \note Invalid pointers are always greater than valid ones. + */ + bool operator<(const GenericPointer& rhs) const { + if (!IsValid()) + return false; + if (!rhs.IsValid()) + return true; + + if (tokenCount_ != rhs.tokenCount_) + return tokenCount_ < rhs.tokenCount_; + + for (size_t i = 0; i < tokenCount_; i++) { + if (tokens_[i].index != rhs.tokens_[i].index) + return tokens_[i].index < rhs.tokens_[i].index; + + if (tokens_[i].length != rhs.tokens_[i].length) + return tokens_[i].length < rhs.tokens_[i].length; + + if (int cmp = std::memcmp(tokens_[i].name, rhs.tokens_[i].name, sizeof(Ch) * tokens_[i].length)) + return cmp < 0; + } + + return false; + } + + //@} + + //!@name Stringify + //@{ + + //! Stringify the pointer into string representation. + /*! + \tparam OutputStream Type of output stream. + \param os The output stream. + */ + template + bool Stringify(OutputStream& os) const { + return Stringify(os); + } + + //! Stringify the pointer into URI fragment representation. + /*! + \tparam OutputStream Type of output stream. + \param os The output stream. + */ + template + bool StringifyUriFragment(OutputStream& os) const { + return Stringify(os); + } + + //@} + + //!@name Create value + //@{ + + //! Create a value in a subtree. + /*! + If the value is not exist, it creates all parent values and a JSON Null value. + So it always succeed and return the newly created or existing value. + + Remind that it may change types of parents according to tokens, so it + potentially removes previously stored values. For example, if a document + was an array, and "/foo" is used to create a value, then the document + will be changed to an object, and all existing array elements are lost. + + \param root Root value of a DOM subtree to be resolved. It can be any value other than document root. + \param allocator Allocator for creating the values if the specified value or its parents are not exist. + \param alreadyExist If non-null, it stores whether the resolved value is already exist. + \return The resolved newly created (a JSON Null value), or already exists value. + */ + ValueType& Create(ValueType& root, typename ValueType::AllocatorType& allocator, bool* alreadyExist = 0) const { + RAPIDJSON_ASSERT(IsValid()); + ValueType* v = &root; + bool exist = true; + for (const Token *t = tokens_; t != tokens_ + tokenCount_; ++t) { + if (v->IsArray() && t->name[0] == '-' && t->length == 1) { + v->PushBack(ValueType().Move(), allocator); + v = &((*v)[v->Size() - 1]); + exist = false; + } + else { + if (t->index == kPointerInvalidIndex) { // must be object name + if (!v->IsObject()) + v->SetObject(); // Change to Object + } + else { // object name or array index + if (!v->IsArray() && !v->IsObject()) + v->SetArray(); // Change to Array + } + + if (v->IsArray()) { + if (t->index >= v->Size()) { + v->Reserve(t->index + 1, allocator); + while (t->index >= v->Size()) + v->PushBack(ValueType().Move(), allocator); + exist = false; + } + v = &((*v)[t->index]); + } + else { + typename ValueType::MemberIterator m = v->FindMember(GenericValue(GenericStringRef(t->name, t->length))); + if (m == v->MemberEnd()) { + v->AddMember(ValueType(t->name, t->length, allocator).Move(), ValueType().Move(), allocator); + m = v->MemberEnd(); + v = &(--m)->value; // Assumes AddMember() appends at the end + exist = false; + } + else + v = &m->value; + } + } + } + + if (alreadyExist) + *alreadyExist = exist; + + return *v; + } + + //! Creates a value in a document. + /*! + \param document A document to be resolved. + \param alreadyExist If non-null, it stores whether the resolved value is already exist. + \return The resolved newly created, or already exists value. + */ + template + ValueType& Create(GenericDocument& document, bool* alreadyExist = 0) const { + return Create(document, document.GetAllocator(), alreadyExist); + } + + //@} + + //!@name Query value + //@{ + + //! Query a value in a subtree. + /*! + \param root Root value of a DOM sub-tree to be resolved. It can be any value other than document root. + \param unresolvedTokenIndex If the pointer cannot resolve a token in the pointer, this parameter can obtain the index of unresolved token. + \return Pointer to the value if it can be resolved. Otherwise null. + + \note + There are only 3 situations when a value cannot be resolved: + 1. A value in the path is not an array nor object. + 2. An object value does not contain the token. + 3. A token is out of range of an array value. + + Use unresolvedTokenIndex to retrieve the token index. + */ + ValueType* Get(ValueType& root, size_t* unresolvedTokenIndex = 0) const { + RAPIDJSON_ASSERT(IsValid()); + ValueType* v = &root; + for (const Token *t = tokens_; t != tokens_ + tokenCount_; ++t) { + switch (v->GetType()) { + case kObjectType: + { + typename ValueType::MemberIterator m = v->FindMember(GenericValue(GenericStringRef(t->name, t->length))); + if (m == v->MemberEnd()) + break; + v = &m->value; + } + continue; + case kArrayType: + if (t->index == kPointerInvalidIndex || t->index >= v->Size()) + break; + v = &((*v)[t->index]); + continue; + default: + break; + } + + // Error: unresolved token + if (unresolvedTokenIndex) + *unresolvedTokenIndex = static_cast(t - tokens_); + return 0; + } + return v; + } + + //! Query a const value in a const subtree. + /*! + \param root Root value of a DOM sub-tree to be resolved. It can be any value other than document root. + \return Pointer to the value if it can be resolved. Otherwise null. + */ + const ValueType* Get(const ValueType& root, size_t* unresolvedTokenIndex = 0) const { + return Get(const_cast(root), unresolvedTokenIndex); + } + + //@} + + //!@name Query a value with default + //@{ + + //! Query a value in a subtree with default value. + /*! + Similar to Get(), but if the specified value do not exists, it creates all parents and clone the default value. + So that this function always succeed. + + \param root Root value of a DOM sub-tree to be resolved. It can be any value other than document root. + \param defaultValue Default value to be cloned if the value was not exists. + \param allocator Allocator for creating the values if the specified value or its parents are not exist. + \see Create() + */ + ValueType& GetWithDefault(ValueType& root, const ValueType& defaultValue, typename ValueType::AllocatorType& allocator) const { + bool alreadyExist; + ValueType& v = Create(root, allocator, &alreadyExist); + return alreadyExist ? v : v.CopyFrom(defaultValue, allocator); + } + + //! Query a value in a subtree with default null-terminated string. + ValueType& GetWithDefault(ValueType& root, const Ch* defaultValue, typename ValueType::AllocatorType& allocator) const { + bool alreadyExist; + ValueType& v = Create(root, allocator, &alreadyExist); + return alreadyExist ? v : v.SetString(defaultValue, allocator); + } + +#if RAPIDJSON_HAS_STDSTRING + //! Query a value in a subtree with default std::basic_string. + ValueType& GetWithDefault(ValueType& root, const std::basic_string& defaultValue, typename ValueType::AllocatorType& allocator) const { + bool alreadyExist; + ValueType& v = Create(root, allocator, &alreadyExist); + return alreadyExist ? v : v.SetString(defaultValue, allocator); + } +#endif + + //! Query a value in a subtree with default primitive value. + /*! + \tparam T Either \ref Type, \c int, \c unsigned, \c int64_t, \c uint64_t, \c bool + */ + template + RAPIDJSON_DISABLEIF_RETURN((internal::OrExpr, internal::IsGenericValue >), (ValueType&)) + GetWithDefault(ValueType& root, T defaultValue, typename ValueType::AllocatorType& allocator) const { + return GetWithDefault(root, ValueType(defaultValue).Move(), allocator); + } + + //! Query a value in a document with default value. + template + ValueType& GetWithDefault(GenericDocument& document, const ValueType& defaultValue) const { + return GetWithDefault(document, defaultValue, document.GetAllocator()); + } + + //! Query a value in a document with default null-terminated string. + template + ValueType& GetWithDefault(GenericDocument& document, const Ch* defaultValue) const { + return GetWithDefault(document, defaultValue, document.GetAllocator()); + } + +#if RAPIDJSON_HAS_STDSTRING + //! Query a value in a document with default std::basic_string. + template + ValueType& GetWithDefault(GenericDocument& document, const std::basic_string& defaultValue) const { + return GetWithDefault(document, defaultValue, document.GetAllocator()); + } +#endif + + //! Query a value in a document with default primitive value. + /*! + \tparam T Either \ref Type, \c int, \c unsigned, \c int64_t, \c uint64_t, \c bool + */ + template + RAPIDJSON_DISABLEIF_RETURN((internal::OrExpr, internal::IsGenericValue >), (ValueType&)) + GetWithDefault(GenericDocument& document, T defaultValue) const { + return GetWithDefault(document, defaultValue, document.GetAllocator()); + } + + //@} + + //!@name Set a value + //@{ + + //! Set a value in a subtree, with move semantics. + /*! + It creates all parents if they are not exist or types are different to the tokens. + So this function always succeeds but potentially remove existing values. + + \param root Root value of a DOM sub-tree to be resolved. It can be any value other than document root. + \param value Value to be set. + \param allocator Allocator for creating the values if the specified value or its parents are not exist. + \see Create() + */ + ValueType& Set(ValueType& root, ValueType& value, typename ValueType::AllocatorType& allocator) const { + return Create(root, allocator) = value; + } + + //! Set a value in a subtree, with copy semantics. + ValueType& Set(ValueType& root, const ValueType& value, typename ValueType::AllocatorType& allocator) const { + return Create(root, allocator).CopyFrom(value, allocator); + } + + //! Set a null-terminated string in a subtree. + ValueType& Set(ValueType& root, const Ch* value, typename ValueType::AllocatorType& allocator) const { + return Create(root, allocator) = ValueType(value, allocator).Move(); + } + +#if RAPIDJSON_HAS_STDSTRING + //! Set a std::basic_string in a subtree. + ValueType& Set(ValueType& root, const std::basic_string& value, typename ValueType::AllocatorType& allocator) const { + return Create(root, allocator) = ValueType(value, allocator).Move(); + } +#endif + + //! Set a primitive value in a subtree. + /*! + \tparam T Either \ref Type, \c int, \c unsigned, \c int64_t, \c uint64_t, \c bool + */ + template + RAPIDJSON_DISABLEIF_RETURN((internal::OrExpr, internal::IsGenericValue >), (ValueType&)) + Set(ValueType& root, T value, typename ValueType::AllocatorType& allocator) const { + return Create(root, allocator) = ValueType(value).Move(); + } + + //! Set a value in a document, with move semantics. + template + ValueType& Set(GenericDocument& document, ValueType& value) const { + return Create(document) = value; + } + + //! Set a value in a document, with copy semantics. + template + ValueType& Set(GenericDocument& document, const ValueType& value) const { + return Create(document).CopyFrom(value, document.GetAllocator()); + } + + //! Set a null-terminated string in a document. + template + ValueType& Set(GenericDocument& document, const Ch* value) const { + return Create(document) = ValueType(value, document.GetAllocator()).Move(); + } + +#if RAPIDJSON_HAS_STDSTRING + //! Sets a std::basic_string in a document. + template + ValueType& Set(GenericDocument& document, const std::basic_string& value) const { + return Create(document) = ValueType(value, document.GetAllocator()).Move(); + } +#endif + + //! Set a primitive value in a document. + /*! + \tparam T Either \ref Type, \c int, \c unsigned, \c int64_t, \c uint64_t, \c bool + */ + template + RAPIDJSON_DISABLEIF_RETURN((internal::OrExpr, internal::IsGenericValue >), (ValueType&)) + Set(GenericDocument& document, T value) const { + return Create(document) = value; + } + + //@} + + //!@name Swap a value + //@{ + + //! Swap a value with a value in a subtree. + /*! + It creates all parents if they are not exist or types are different to the tokens. + So this function always succeeds but potentially remove existing values. + + \param root Root value of a DOM sub-tree to be resolved. It can be any value other than document root. + \param value Value to be swapped. + \param allocator Allocator for creating the values if the specified value or its parents are not exist. + \see Create() + */ + ValueType& Swap(ValueType& root, ValueType& value, typename ValueType::AllocatorType& allocator) const { + return Create(root, allocator).Swap(value); + } + + //! Swap a value with a value in a document. + template + ValueType& Swap(GenericDocument& document, ValueType& value) const { + return Create(document).Swap(value); + } + + //@} + + //! Erase a value in a subtree. + /*! + \param root Root value of a DOM sub-tree to be resolved. It can be any value other than document root. + \return Whether the resolved value is found and erased. + + \note Erasing with an empty pointer \c Pointer(""), i.e. the root, always fail and return false. + */ + bool Erase(ValueType& root) const { + RAPIDJSON_ASSERT(IsValid()); + if (tokenCount_ == 0) // Cannot erase the root + return false; + + ValueType* v = &root; + const Token* last = tokens_ + (tokenCount_ - 1); + for (const Token *t = tokens_; t != last; ++t) { + switch (v->GetType()) { + case kObjectType: + { + typename ValueType::MemberIterator m = v->FindMember(GenericValue(GenericStringRef(t->name, t->length))); + if (m == v->MemberEnd()) + return false; + v = &m->value; + } + break; + case kArrayType: + if (t->index == kPointerInvalidIndex || t->index >= v->Size()) + return false; + v = &((*v)[t->index]); + break; + default: + return false; + } + } + + switch (v->GetType()) { + case kObjectType: + return v->EraseMember(GenericStringRef(last->name, last->length)); + case kArrayType: + if (last->index == kPointerInvalidIndex || last->index >= v->Size()) + return false; + v->Erase(v->Begin() + last->index); + return true; + default: + return false; + } + } + +private: + //! Clone the content from rhs to this. + /*! + \param rhs Source pointer. + \param extraToken Extra tokens to be allocated. + \param extraNameBufferSize Extra name buffer size (in number of Ch) to be allocated. + \return Start of non-occupied name buffer, for storing extra names. + */ + Ch* CopyFromRaw(const GenericPointer& rhs, size_t extraToken = 0, size_t extraNameBufferSize = 0) { + if (!allocator_) // allocator is independently owned. + ownAllocator_ = allocator_ = RAPIDJSON_NEW(Allocator)(); + + size_t nameBufferSize = rhs.tokenCount_; // null terminators for tokens + for (Token *t = rhs.tokens_; t != rhs.tokens_ + rhs.tokenCount_; ++t) + nameBufferSize += t->length; + + tokenCount_ = rhs.tokenCount_ + extraToken; + tokens_ = static_cast(allocator_->Malloc(tokenCount_ * sizeof(Token) + (nameBufferSize + extraNameBufferSize) * sizeof(Ch))); + nameBuffer_ = reinterpret_cast(tokens_ + tokenCount_); + if (rhs.tokenCount_ > 0) { + std::memcpy(tokens_, rhs.tokens_, rhs.tokenCount_ * sizeof(Token)); + } + if (nameBufferSize > 0) { + std::memcpy(nameBuffer_, rhs.nameBuffer_, nameBufferSize * sizeof(Ch)); + } + + // Adjust pointers to name buffer + std::ptrdiff_t diff = nameBuffer_ - rhs.nameBuffer_; + for (Token *t = tokens_; t != tokens_ + rhs.tokenCount_; ++t) + t->name += diff; + + return nameBuffer_ + nameBufferSize; + } + + //! Check whether a character should be percent-encoded. + /*! + According to RFC 3986 2.3 Unreserved Characters. + \param c The character (code unit) to be tested. + */ + bool NeedPercentEncode(Ch c) const { + return !((c >= '0' && c <= '9') || (c >= 'A' && c <='Z') || (c >= 'a' && c <= 'z') || c == '-' || c == '.' || c == '_' || c =='~'); + } + + //! Parse a JSON String or its URI fragment representation into tokens. +#ifndef __clang__ // -Wdocumentation + /*! + \param source Either a JSON Pointer string, or its URI fragment representation. Not need to be null terminated. + \param length Length of the source string. + \note Source cannot be JSON String Representation of JSON Pointer, e.g. In "/\u0000", \u0000 will not be unescaped. + */ +#endif + void Parse(const Ch* source, size_t length) { + RAPIDJSON_ASSERT(source != NULL); + RAPIDJSON_ASSERT(nameBuffer_ == 0); + RAPIDJSON_ASSERT(tokens_ == 0); + + // Create own allocator if user did not supply. + if (!allocator_) + ownAllocator_ = allocator_ = RAPIDJSON_NEW(Allocator)(); + + // Count number of '/' as tokenCount + tokenCount_ = 0; + for (const Ch* s = source; s != source + length; s++) + if (*s == '/') + tokenCount_++; + + Token* token = tokens_ = static_cast(allocator_->Malloc(tokenCount_ * sizeof(Token) + length * sizeof(Ch))); + Ch* name = nameBuffer_ = reinterpret_cast(tokens_ + tokenCount_); + size_t i = 0; + + // Detect if it is a URI fragment + bool uriFragment = false; + if (source[i] == '#') { + uriFragment = true; + i++; + } + + if (i != length && source[i] != '/') { + parseErrorCode_ = kPointerParseErrorTokenMustBeginWithSolidus; + goto error; + } + + while (i < length) { + RAPIDJSON_ASSERT(source[i] == '/'); + i++; // consumes '/' + + token->name = name; + bool isNumber = true; + + while (i < length && source[i] != '/') { + Ch c = source[i]; + if (uriFragment) { + // Decoding percent-encoding for URI fragment + if (c == '%') { + PercentDecodeStream is(&source[i], source + length); + GenericInsituStringStream os(name); + Ch* begin = os.PutBegin(); + if (!Transcoder, EncodingType>().Validate(is, os) || !is.IsValid()) { + parseErrorCode_ = kPointerParseErrorInvalidPercentEncoding; + goto error; + } + size_t len = os.PutEnd(begin); + i += is.Tell() - 1; + if (len == 1) + c = *name; + else { + name += len; + isNumber = false; + i++; + continue; + } + } + else if (NeedPercentEncode(c)) { + parseErrorCode_ = kPointerParseErrorCharacterMustPercentEncode; + goto error; + } + } + + i++; + + // Escaping "~0" -> '~', "~1" -> '/' + if (c == '~') { + if (i < length) { + c = source[i]; + if (c == '0') c = '~'; + else if (c == '1') c = '/'; + else { + parseErrorCode_ = kPointerParseErrorInvalidEscape; + goto error; + } + i++; + } + else { + parseErrorCode_ = kPointerParseErrorInvalidEscape; + goto error; + } + } + + // First check for index: all of characters are digit + if (c < '0' || c > '9') + isNumber = false; + + *name++ = c; + } + token->length = static_cast(name - token->name); + if (token->length == 0) + isNumber = false; + *name++ = '\0'; // Null terminator + + // Second check for index: more than one digit cannot have leading zero + if (isNumber && token->length > 1 && token->name[0] == '0') + isNumber = false; + + // String to SizeType conversion + SizeType n = 0; + if (isNumber) { + for (size_t j = 0; j < token->length; j++) { + SizeType m = n * 10 + static_cast(token->name[j] - '0'); + if (m < n) { // overflow detection + isNumber = false; + break; + } + n = m; + } + } + + token->index = isNumber ? n : kPointerInvalidIndex; + token++; + } + + RAPIDJSON_ASSERT(name <= nameBuffer_ + length); // Should not overflow buffer + parseErrorCode_ = kPointerParseErrorNone; + return; + + error: + Allocator::Free(tokens_); + nameBuffer_ = 0; + tokens_ = 0; + tokenCount_ = 0; + parseErrorOffset_ = i; + return; + } + + //! Stringify to string or URI fragment representation. + /*! + \tparam uriFragment True for stringifying to URI fragment representation. False for string representation. + \tparam OutputStream type of output stream. + \param os The output stream. + */ + template + bool Stringify(OutputStream& os) const { + RAPIDJSON_ASSERT(IsValid()); + + if (uriFragment) + os.Put('#'); + + for (Token *t = tokens_; t != tokens_ + tokenCount_; ++t) { + os.Put('/'); + for (size_t j = 0; j < t->length; j++) { + Ch c = t->name[j]; + if (c == '~') { + os.Put('~'); + os.Put('0'); + } + else if (c == '/') { + os.Put('~'); + os.Put('1'); + } + else if (uriFragment && NeedPercentEncode(c)) { + // Transcode to UTF8 sequence + GenericStringStream source(&t->name[j]); + PercentEncodeStream target(os); + if (!Transcoder >().Validate(source, target)) + return false; + j += source.Tell() - 1; + } + else + os.Put(c); + } + } + return true; + } + + //! A helper stream for decoding a percent-encoded sequence into code unit. + /*! + This stream decodes %XY triplet into code unit (0-255). + If it encounters invalid characters, it sets output code unit as 0 and + mark invalid, and to be checked by IsValid(). + */ + class PercentDecodeStream { + public: + typedef typename ValueType::Ch Ch; + + //! Constructor + /*! + \param source Start of the stream + \param end Past-the-end of the stream. + */ + PercentDecodeStream(const Ch* source, const Ch* end) : src_(source), head_(source), end_(end), valid_(true) {} + + Ch Take() { + if (*src_ != '%' || src_ + 3 > end_) { // %XY triplet + valid_ = false; + return 0; + } + src_++; + Ch c = 0; + for (int j = 0; j < 2; j++) { + c = static_cast(c << 4); + Ch h = *src_; + if (h >= '0' && h <= '9') c = static_cast(c + h - '0'); + else if (h >= 'A' && h <= 'F') c = static_cast(c + h - 'A' + 10); + else if (h >= 'a' && h <= 'f') c = static_cast(c + h - 'a' + 10); + else { + valid_ = false; + return 0; + } + src_++; + } + return c; + } + + size_t Tell() const { return static_cast(src_ - head_); } + bool IsValid() const { return valid_; } + + private: + const Ch* src_; //!< Current read position. + const Ch* head_; //!< Original head of the string. + const Ch* end_; //!< Past-the-end position. + bool valid_; //!< Whether the parsing is valid. + }; + + //! A helper stream to encode character (UTF-8 code unit) into percent-encoded sequence. + template + class PercentEncodeStream { + public: + PercentEncodeStream(OutputStream& os) : os_(os) {} + void Put(char c) { // UTF-8 must be byte + unsigned char u = static_cast(c); + static const char hexDigits[16] = { '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'A', 'B', 'C', 'D', 'E', 'F' }; + os_.Put('%'); + os_.Put(static_cast(hexDigits[u >> 4])); + os_.Put(static_cast(hexDigits[u & 15])); + } + private: + OutputStream& os_; + }; + + Allocator* allocator_; //!< The current allocator. It is either user-supplied or equal to ownAllocator_. + Allocator* ownAllocator_; //!< Allocator owned by this Pointer. + Ch* nameBuffer_; //!< A buffer containing all names in tokens. + Token* tokens_; //!< A list of tokens. + size_t tokenCount_; //!< Number of tokens in tokens_. + size_t parseErrorOffset_; //!< Offset in code unit when parsing fail. + PointerParseErrorCode parseErrorCode_; //!< Parsing error code. +}; + +//! GenericPointer for Value (UTF-8, default allocator). +typedef GenericPointer Pointer; + +//!@name Helper functions for GenericPointer +//@{ + +////////////////////////////////////////////////////////////////////////////// + +template +typename T::ValueType& CreateValueByPointer(T& root, const GenericPointer& pointer, typename T::AllocatorType& a) { + return pointer.Create(root, a); +} + +template +typename T::ValueType& CreateValueByPointer(T& root, const CharType(&source)[N], typename T::AllocatorType& a) { + return GenericPointer(source, N - 1).Create(root, a); +} + +// No allocator parameter + +template +typename DocumentType::ValueType& CreateValueByPointer(DocumentType& document, const GenericPointer& pointer) { + return pointer.Create(document); +} + +template +typename DocumentType::ValueType& CreateValueByPointer(DocumentType& document, const CharType(&source)[N]) { + return GenericPointer(source, N - 1).Create(document); +} + +////////////////////////////////////////////////////////////////////////////// + +template +typename T::ValueType* GetValueByPointer(T& root, const GenericPointer& pointer, size_t* unresolvedTokenIndex = 0) { + return pointer.Get(root, unresolvedTokenIndex); +} + +template +const typename T::ValueType* GetValueByPointer(const T& root, const GenericPointer& pointer, size_t* unresolvedTokenIndex = 0) { + return pointer.Get(root, unresolvedTokenIndex); +} + +template +typename T::ValueType* GetValueByPointer(T& root, const CharType (&source)[N], size_t* unresolvedTokenIndex = 0) { + return GenericPointer(source, N - 1).Get(root, unresolvedTokenIndex); +} + +template +const typename T::ValueType* GetValueByPointer(const T& root, const CharType(&source)[N], size_t* unresolvedTokenIndex = 0) { + return GenericPointer(source, N - 1).Get(root, unresolvedTokenIndex); +} + +////////////////////////////////////////////////////////////////////////////// + +template +typename T::ValueType& GetValueByPointerWithDefault(T& root, const GenericPointer& pointer, const typename T::ValueType& defaultValue, typename T::AllocatorType& a) { + return pointer.GetWithDefault(root, defaultValue, a); +} + +template +typename T::ValueType& GetValueByPointerWithDefault(T& root, const GenericPointer& pointer, const typename T::Ch* defaultValue, typename T::AllocatorType& a) { + return pointer.GetWithDefault(root, defaultValue, a); +} + +#if RAPIDJSON_HAS_STDSTRING +template +typename T::ValueType& GetValueByPointerWithDefault(T& root, const GenericPointer& pointer, const std::basic_string& defaultValue, typename T::AllocatorType& a) { + return pointer.GetWithDefault(root, defaultValue, a); +} +#endif + +template +RAPIDJSON_DISABLEIF_RETURN((internal::OrExpr, internal::IsGenericValue >), (typename T::ValueType&)) +GetValueByPointerWithDefault(T& root, const GenericPointer& pointer, T2 defaultValue, typename T::AllocatorType& a) { + return pointer.GetWithDefault(root, defaultValue, a); +} + +template +typename T::ValueType& GetValueByPointerWithDefault(T& root, const CharType(&source)[N], const typename T::ValueType& defaultValue, typename T::AllocatorType& a) { + return GenericPointer(source, N - 1).GetWithDefault(root, defaultValue, a); +} + +template +typename T::ValueType& GetValueByPointerWithDefault(T& root, const CharType(&source)[N], const typename T::Ch* defaultValue, typename T::AllocatorType& a) { + return GenericPointer(source, N - 1).GetWithDefault(root, defaultValue, a); +} + +#if RAPIDJSON_HAS_STDSTRING +template +typename T::ValueType& GetValueByPointerWithDefault(T& root, const CharType(&source)[N], const std::basic_string& defaultValue, typename T::AllocatorType& a) { + return GenericPointer(source, N - 1).GetWithDefault(root, defaultValue, a); +} +#endif + +template +RAPIDJSON_DISABLEIF_RETURN((internal::OrExpr, internal::IsGenericValue >), (typename T::ValueType&)) +GetValueByPointerWithDefault(T& root, const CharType(&source)[N], T2 defaultValue, typename T::AllocatorType& a) { + return GenericPointer(source, N - 1).GetWithDefault(root, defaultValue, a); +} + +// No allocator parameter + +template +typename DocumentType::ValueType& GetValueByPointerWithDefault(DocumentType& document, const GenericPointer& pointer, const typename DocumentType::ValueType& defaultValue) { + return pointer.GetWithDefault(document, defaultValue); +} + +template +typename DocumentType::ValueType& GetValueByPointerWithDefault(DocumentType& document, const GenericPointer& pointer, const typename DocumentType::Ch* defaultValue) { + return pointer.GetWithDefault(document, defaultValue); +} + +#if RAPIDJSON_HAS_STDSTRING +template +typename DocumentType::ValueType& GetValueByPointerWithDefault(DocumentType& document, const GenericPointer& pointer, const std::basic_string& defaultValue) { + return pointer.GetWithDefault(document, defaultValue); +} +#endif + +template +RAPIDJSON_DISABLEIF_RETURN((internal::OrExpr, internal::IsGenericValue >), (typename DocumentType::ValueType&)) +GetValueByPointerWithDefault(DocumentType& document, const GenericPointer& pointer, T2 defaultValue) { + return pointer.GetWithDefault(document, defaultValue); +} + +template +typename DocumentType::ValueType& GetValueByPointerWithDefault(DocumentType& document, const CharType(&source)[N], const typename DocumentType::ValueType& defaultValue) { + return GenericPointer(source, N - 1).GetWithDefault(document, defaultValue); +} + +template +typename DocumentType::ValueType& GetValueByPointerWithDefault(DocumentType& document, const CharType(&source)[N], const typename DocumentType::Ch* defaultValue) { + return GenericPointer(source, N - 1).GetWithDefault(document, defaultValue); +} + +#if RAPIDJSON_HAS_STDSTRING +template +typename DocumentType::ValueType& GetValueByPointerWithDefault(DocumentType& document, const CharType(&source)[N], const std::basic_string& defaultValue) { + return GenericPointer(source, N - 1).GetWithDefault(document, defaultValue); +} +#endif + +template +RAPIDJSON_DISABLEIF_RETURN((internal::OrExpr, internal::IsGenericValue >), (typename DocumentType::ValueType&)) +GetValueByPointerWithDefault(DocumentType& document, const CharType(&source)[N], T2 defaultValue) { + return GenericPointer(source, N - 1).GetWithDefault(document, defaultValue); +} + +////////////////////////////////////////////////////////////////////////////// + +template +typename T::ValueType& SetValueByPointer(T& root, const GenericPointer& pointer, typename T::ValueType& value, typename T::AllocatorType& a) { + return pointer.Set(root, value, a); +} + +template +typename T::ValueType& SetValueByPointer(T& root, const GenericPointer& pointer, const typename T::ValueType& value, typename T::AllocatorType& a) { + return pointer.Set(root, value, a); +} + +template +typename T::ValueType& SetValueByPointer(T& root, const GenericPointer& pointer, const typename T::Ch* value, typename T::AllocatorType& a) { + return pointer.Set(root, value, a); +} + +#if RAPIDJSON_HAS_STDSTRING +template +typename T::ValueType& SetValueByPointer(T& root, const GenericPointer& pointer, const std::basic_string& value, typename T::AllocatorType& a) { + return pointer.Set(root, value, a); +} +#endif + +template +RAPIDJSON_DISABLEIF_RETURN((internal::OrExpr, internal::IsGenericValue >), (typename T::ValueType&)) +SetValueByPointer(T& root, const GenericPointer& pointer, T2 value, typename T::AllocatorType& a) { + return pointer.Set(root, value, a); +} + +template +typename T::ValueType& SetValueByPointer(T& root, const CharType(&source)[N], typename T::ValueType& value, typename T::AllocatorType& a) { + return GenericPointer(source, N - 1).Set(root, value, a); +} + +template +typename T::ValueType& SetValueByPointer(T& root, const CharType(&source)[N], const typename T::ValueType& value, typename T::AllocatorType& a) { + return GenericPointer(source, N - 1).Set(root, value, a); +} + +template +typename T::ValueType& SetValueByPointer(T& root, const CharType(&source)[N], const typename T::Ch* value, typename T::AllocatorType& a) { + return GenericPointer(source, N - 1).Set(root, value, a); +} + +#if RAPIDJSON_HAS_STDSTRING +template +typename T::ValueType& SetValueByPointer(T& root, const CharType(&source)[N], const std::basic_string& value, typename T::AllocatorType& a) { + return GenericPointer(source, N - 1).Set(root, value, a); +} +#endif + +template +RAPIDJSON_DISABLEIF_RETURN((internal::OrExpr, internal::IsGenericValue >), (typename T::ValueType&)) +SetValueByPointer(T& root, const CharType(&source)[N], T2 value, typename T::AllocatorType& a) { + return GenericPointer(source, N - 1).Set(root, value, a); +} + +// No allocator parameter + +template +typename DocumentType::ValueType& SetValueByPointer(DocumentType& document, const GenericPointer& pointer, typename DocumentType::ValueType& value) { + return pointer.Set(document, value); +} + +template +typename DocumentType::ValueType& SetValueByPointer(DocumentType& document, const GenericPointer& pointer, const typename DocumentType::ValueType& value) { + return pointer.Set(document, value); +} + +template +typename DocumentType::ValueType& SetValueByPointer(DocumentType& document, const GenericPointer& pointer, const typename DocumentType::Ch* value) { + return pointer.Set(document, value); +} + +#if RAPIDJSON_HAS_STDSTRING +template +typename DocumentType::ValueType& SetValueByPointer(DocumentType& document, const GenericPointer& pointer, const std::basic_string& value) { + return pointer.Set(document, value); +} +#endif + +template +RAPIDJSON_DISABLEIF_RETURN((internal::OrExpr, internal::IsGenericValue >), (typename DocumentType::ValueType&)) +SetValueByPointer(DocumentType& document, const GenericPointer& pointer, T2 value) { + return pointer.Set(document, value); +} + +template +typename DocumentType::ValueType& SetValueByPointer(DocumentType& document, const CharType(&source)[N], typename DocumentType::ValueType& value) { + return GenericPointer(source, N - 1).Set(document, value); +} + +template +typename DocumentType::ValueType& SetValueByPointer(DocumentType& document, const CharType(&source)[N], const typename DocumentType::ValueType& value) { + return GenericPointer(source, N - 1).Set(document, value); +} + +template +typename DocumentType::ValueType& SetValueByPointer(DocumentType& document, const CharType(&source)[N], const typename DocumentType::Ch* value) { + return GenericPointer(source, N - 1).Set(document, value); +} + +#if RAPIDJSON_HAS_STDSTRING +template +typename DocumentType::ValueType& SetValueByPointer(DocumentType& document, const CharType(&source)[N], const std::basic_string& value) { + return GenericPointer(source, N - 1).Set(document, value); +} +#endif + +template +RAPIDJSON_DISABLEIF_RETURN((internal::OrExpr, internal::IsGenericValue >), (typename DocumentType::ValueType&)) +SetValueByPointer(DocumentType& document, const CharType(&source)[N], T2 value) { + return GenericPointer(source, N - 1).Set(document, value); +} + +////////////////////////////////////////////////////////////////////////////// + +template +typename T::ValueType& SwapValueByPointer(T& root, const GenericPointer& pointer, typename T::ValueType& value, typename T::AllocatorType& a) { + return pointer.Swap(root, value, a); +} + +template +typename T::ValueType& SwapValueByPointer(T& root, const CharType(&source)[N], typename T::ValueType& value, typename T::AllocatorType& a) { + return GenericPointer(source, N - 1).Swap(root, value, a); +} + +template +typename DocumentType::ValueType& SwapValueByPointer(DocumentType& document, const GenericPointer& pointer, typename DocumentType::ValueType& value) { + return pointer.Swap(document, value); +} + +template +typename DocumentType::ValueType& SwapValueByPointer(DocumentType& document, const CharType(&source)[N], typename DocumentType::ValueType& value) { + return GenericPointer(source, N - 1).Swap(document, value); +} + +////////////////////////////////////////////////////////////////////////////// + +template +bool EraseValueByPointer(T& root, const GenericPointer& pointer) { + return pointer.Erase(root); +} + +template +bool EraseValueByPointer(T& root, const CharType(&source)[N]) { + return GenericPointer(source, N - 1).Erase(root); +} + +//@} + +RAPIDJSON_NAMESPACE_END + +#if defined(__clang__) || defined(_MSC_VER) +RAPIDJSON_DIAG_POP +#endif + +#endif // RAPIDJSON_POINTER_H_ diff --git a/vendor/github.com/Benau/go_rlottie/lottie_rapidjson_prettywriter.h b/vendor/github.com/Benau/go_rlottie/lottie_rapidjson_prettywriter.h new file mode 100644 index 00000000..d353faa7 --- /dev/null +++ b/vendor/github.com/Benau/go_rlottie/lottie_rapidjson_prettywriter.h @@ -0,0 +1,277 @@ +// Tencent is pleased to support the open source community by making RapidJSON available. +// +// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved. +// +// Licensed under the MIT License (the "License"); you may not use this file except +// in compliance with the License. You may obtain a copy of the License at +// +// http://opensource.org/licenses/MIT +// +// Unless required by applicable law or agreed to in writing, software distributed +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +// CONDITIONS OF ANY KIND, either express or implied. See the License for the +// specific language governing permissions and limitations under the License. + +#ifndef RAPIDJSON_PRETTYWRITER_H_ +#define RAPIDJSON_PRETTYWRITER_H_ + +#include "lottie_rapidjson_writer.h" + +#ifdef __GNUC__ +RAPIDJSON_DIAG_PUSH +RAPIDJSON_DIAG_OFF(effc++) +#endif + +#if defined(__clang__) +RAPIDJSON_DIAG_PUSH +RAPIDJSON_DIAG_OFF(c++98-compat) +#endif + +RAPIDJSON_NAMESPACE_BEGIN + +//! Combination of PrettyWriter format flags. +/*! \see PrettyWriter::SetFormatOptions + */ +enum PrettyFormatOptions { + kFormatDefault = 0, //!< Default pretty formatting. + kFormatSingleLineArray = 1 //!< Format arrays on a single line. +}; + +//! Writer with indentation and spacing. +/*! + \tparam OutputStream Type of output os. + \tparam SourceEncoding Encoding of source string. + \tparam TargetEncoding Encoding of output stream. + \tparam StackAllocator Type of allocator for allocating memory of stack. +*/ +template, typename TargetEncoding = UTF8<>, typename StackAllocator = CrtAllocator, unsigned writeFlags = kWriteDefaultFlags> +class PrettyWriter : public Writer { +public: + typedef Writer Base; + typedef typename Base::Ch Ch; + + //! Constructor + /*! \param os Output stream. + \param allocator User supplied allocator. If it is null, it will create a private one. + \param levelDepth Initial capacity of stack. + */ + explicit PrettyWriter(OutputStream& os, StackAllocator* allocator = 0, size_t levelDepth = Base::kDefaultLevelDepth) : + Base(os, allocator, levelDepth), indentChar_(' '), indentCharCount_(4), formatOptions_(kFormatDefault) {} + + + explicit PrettyWriter(StackAllocator* allocator = 0, size_t levelDepth = Base::kDefaultLevelDepth) : + Base(allocator, levelDepth), indentChar_(' '), indentCharCount_(4), formatOptions_(kFormatDefault) {} + +#if RAPIDJSON_HAS_CXX11_RVALUE_REFS + PrettyWriter(PrettyWriter&& rhs) : + Base(std::forward(rhs)), indentChar_(rhs.indentChar_), indentCharCount_(rhs.indentCharCount_), formatOptions_(rhs.formatOptions_) {} +#endif + + //! Set custom indentation. + /*! \param indentChar Character for indentation. Must be whitespace character (' ', '\\t', '\\n', '\\r'). + \param indentCharCount Number of indent characters for each indentation level. + \note The default indentation is 4 spaces. + */ + PrettyWriter& SetIndent(Ch indentChar, unsigned indentCharCount) { + RAPIDJSON_ASSERT(indentChar == ' ' || indentChar == '\t' || indentChar == '\n' || indentChar == '\r'); + indentChar_ = indentChar; + indentCharCount_ = indentCharCount; + return *this; + } + + //! Set pretty writer formatting options. + /*! \param options Formatting options. + */ + PrettyWriter& SetFormatOptions(PrettyFormatOptions options) { + formatOptions_ = options; + return *this; + } + + /*! @name Implementation of Handler + \see Handler + */ + //@{ + + bool Null() { PrettyPrefix(kNullType); return Base::EndValue(Base::WriteNull()); } + bool Bool(bool b) { PrettyPrefix(b ? kTrueType : kFalseType); return Base::EndValue(Base::WriteBool(b)); } + bool Int(int i) { PrettyPrefix(kNumberType); return Base::EndValue(Base::WriteInt(i)); } + bool Uint(unsigned u) { PrettyPrefix(kNumberType); return Base::EndValue(Base::WriteUint(u)); } + bool Int64(int64_t i64) { PrettyPrefix(kNumberType); return Base::EndValue(Base::WriteInt64(i64)); } + bool Uint64(uint64_t u64) { PrettyPrefix(kNumberType); return Base::EndValue(Base::WriteUint64(u64)); } + bool Double(double d) { PrettyPrefix(kNumberType); return Base::EndValue(Base::WriteDouble(d)); } + + bool RawNumber(const Ch* str, SizeType length, bool copy = false) { + RAPIDJSON_ASSERT(str != 0); + (void)copy; + PrettyPrefix(kNumberType); + return Base::EndValue(Base::WriteString(str, length)); + } + + bool String(const Ch* str, SizeType length, bool copy = false) { + RAPIDJSON_ASSERT(str != 0); + (void)copy; + PrettyPrefix(kStringType); + return Base::EndValue(Base::WriteString(str, length)); + } + +#if RAPIDJSON_HAS_STDSTRING + bool String(const std::basic_string& str) { + return String(str.data(), SizeType(str.size())); + } +#endif + + bool StartObject() { + PrettyPrefix(kObjectType); + new (Base::level_stack_.template Push()) typename Base::Level(false); + return Base::WriteStartObject(); + } + + bool Key(const Ch* str, SizeType length, bool copy = false) { return String(str, length, copy); } + +#if RAPIDJSON_HAS_STDSTRING + bool Key(const std::basic_string& str) { + return Key(str.data(), SizeType(str.size())); + } +#endif + + bool EndObject(SizeType memberCount = 0) { + (void)memberCount; + RAPIDJSON_ASSERT(Base::level_stack_.GetSize() >= sizeof(typename Base::Level)); // not inside an Object + RAPIDJSON_ASSERT(!Base::level_stack_.template Top()->inArray); // currently inside an Array, not Object + RAPIDJSON_ASSERT(0 == Base::level_stack_.template Top()->valueCount % 2); // Object has a Key without a Value + + bool empty = Base::level_stack_.template Pop(1)->valueCount == 0; + + if (!empty) { + Base::os_->Put('\n'); + WriteIndent(); + } + bool ret = Base::EndValue(Base::WriteEndObject()); + (void)ret; + RAPIDJSON_ASSERT(ret == true); + if (Base::level_stack_.Empty()) // end of json text + Base::Flush(); + return true; + } + + bool StartArray() { + PrettyPrefix(kArrayType); + new (Base::level_stack_.template Push()) typename Base::Level(true); + return Base::WriteStartArray(); + } + + bool EndArray(SizeType memberCount = 0) { + (void)memberCount; + RAPIDJSON_ASSERT(Base::level_stack_.GetSize() >= sizeof(typename Base::Level)); + RAPIDJSON_ASSERT(Base::level_stack_.template Top()->inArray); + bool empty = Base::level_stack_.template Pop(1)->valueCount == 0; + + if (!empty && !(formatOptions_ & kFormatSingleLineArray)) { + Base::os_->Put('\n'); + WriteIndent(); + } + bool ret = Base::EndValue(Base::WriteEndArray()); + (void)ret; + RAPIDJSON_ASSERT(ret == true); + if (Base::level_stack_.Empty()) // end of json text + Base::Flush(); + return true; + } + + //@} + + /*! @name Convenience extensions */ + //@{ + + //! Simpler but slower overload. + bool String(const Ch* str) { return String(str, internal::StrLen(str)); } + bool Key(const Ch* str) { return Key(str, internal::StrLen(str)); } + + //@} + + //! Write a raw JSON value. + /*! + For user to write a stringified JSON as a value. + + \param json A well-formed JSON value. It should not contain null character within [0, length - 1] range. + \param length Length of the json. + \param type Type of the root of json. + \note When using PrettyWriter::RawValue(), the result json may not be indented correctly. + */ + bool RawValue(const Ch* json, size_t length, Type type) { + RAPIDJSON_ASSERT(json != 0); + PrettyPrefix(type); + return Base::EndValue(Base::WriteRawValue(json, length)); + } + +protected: + void PrettyPrefix(Type type) { + (void)type; + if (Base::level_stack_.GetSize() != 0) { // this value is not at root + typename Base::Level* level = Base::level_stack_.template Top(); + + if (level->inArray) { + if (level->valueCount > 0) { + Base::os_->Put(','); // add comma if it is not the first element in array + if (formatOptions_ & kFormatSingleLineArray) + Base::os_->Put(' '); + } + + if (!(formatOptions_ & kFormatSingleLineArray)) { + Base::os_->Put('\n'); + WriteIndent(); + } + } + else { // in object + if (level->valueCount > 0) { + if (level->valueCount % 2 == 0) { + Base::os_->Put(','); + Base::os_->Put('\n'); + } + else { + Base::os_->Put(':'); + Base::os_->Put(' '); + } + } + else + Base::os_->Put('\n'); + + if (level->valueCount % 2 == 0) + WriteIndent(); + } + if (!level->inArray && level->valueCount % 2 == 0) + RAPIDJSON_ASSERT(type == kStringType); // if it's in object, then even number should be a name + level->valueCount++; + } + else { + RAPIDJSON_ASSERT(!Base::hasRoot_); // Should only has one and only one root. + Base::hasRoot_ = true; + } + } + + void WriteIndent() { + size_t count = (Base::level_stack_.GetSize() / sizeof(typename Base::Level)) * indentCharCount_; + PutN(*Base::os_, static_cast(indentChar_), count); + } + + Ch indentChar_; + unsigned indentCharCount_; + PrettyFormatOptions formatOptions_; + +private: + // Prohibit copy constructor & assignment operator. + PrettyWriter(const PrettyWriter&); + PrettyWriter& operator=(const PrettyWriter&); +}; + +RAPIDJSON_NAMESPACE_END + +#if defined(__clang__) +RAPIDJSON_DIAG_POP +#endif + +#ifdef __GNUC__ +RAPIDJSON_DIAG_POP +#endif + +#endif // RAPIDJSON_RAPIDJSON_H_ diff --git a/vendor/github.com/Benau/go_rlottie/lottie_rapidjson_rapidjson.h b/vendor/github.com/Benau/go_rlottie/lottie_rapidjson_rapidjson.h new file mode 100644 index 00000000..f1fd5ba2 --- /dev/null +++ b/vendor/github.com/Benau/go_rlottie/lottie_rapidjson_rapidjson.h @@ -0,0 +1,692 @@ +// Tencent is pleased to support the open source community by making RapidJSON available. +// +// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved. +// +// Licensed under the MIT License (the "License"); you may not use this file except +// in compliance with the License. You may obtain a copy of the License at +// +// http://opensource.org/licenses/MIT +// +// Unless required by applicable law or agreed to in writing, software distributed +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +// CONDITIONS OF ANY KIND, either express or implied. See the License for the +// specific language governing permissions and limitations under the License. + +#ifndef RAPIDJSON_RAPIDJSON_H_ +#define RAPIDJSON_RAPIDJSON_H_ + +/*!\file rapidjson.h + \brief common definitions and configuration + + \see RAPIDJSON_CONFIG + */ + +/*! \defgroup RAPIDJSON_CONFIG RapidJSON configuration + \brief Configuration macros for library features + + Some RapidJSON features are configurable to adapt the library to a wide + variety of platforms, environments and usage scenarios. Most of the + features can be configured in terms of overridden or predefined + preprocessor macros at compile-time. + + Some additional customization is available in the \ref RAPIDJSON_ERRORS APIs. + + \note These macros should be given on the compiler command-line + (where applicable) to avoid inconsistent values when compiling + different translation units of a single application. + */ + +#include // malloc(), realloc(), free(), size_t +#include // memset(), memcpy(), memmove(), memcmp() + +/////////////////////////////////////////////////////////////////////////////// +// RAPIDJSON_VERSION_STRING +// +// ALWAYS synchronize the following 3 macros with corresponding variables in /CMakeLists.txt. +// + +//!@cond RAPIDJSON_HIDDEN_FROM_DOXYGEN +// token stringification +#define RAPIDJSON_STRINGIFY(x) RAPIDJSON_DO_STRINGIFY(x) +#define RAPIDJSON_DO_STRINGIFY(x) #x + +// token concatenation +#define RAPIDJSON_JOIN(X, Y) RAPIDJSON_DO_JOIN(X, Y) +#define RAPIDJSON_DO_JOIN(X, Y) RAPIDJSON_DO_JOIN2(X, Y) +#define RAPIDJSON_DO_JOIN2(X, Y) X##Y +//!@endcond + +/*! \def RAPIDJSON_MAJOR_VERSION + \ingroup RAPIDJSON_CONFIG + \brief Major version of RapidJSON in integer. +*/ +/*! \def RAPIDJSON_MINOR_VERSION + \ingroup RAPIDJSON_CONFIG + \brief Minor version of RapidJSON in integer. +*/ +/*! \def RAPIDJSON_PATCH_VERSION + \ingroup RAPIDJSON_CONFIG + \brief Patch version of RapidJSON in integer. +*/ +/*! \def RAPIDJSON_VERSION_STRING + \ingroup RAPIDJSON_CONFIG + \brief Version of RapidJSON in ".." string format. +*/ +#define RAPIDJSON_MAJOR_VERSION 1 +#define RAPIDJSON_MINOR_VERSION 1 +#define RAPIDJSON_PATCH_VERSION 0 +#define RAPIDJSON_VERSION_STRING \ + RAPIDJSON_STRINGIFY(RAPIDJSON_MAJOR_VERSION.RAPIDJSON_MINOR_VERSION.RAPIDJSON_PATCH_VERSION) + +/////////////////////////////////////////////////////////////////////////////// +// RAPIDJSON_NAMESPACE_(BEGIN|END) +/*! \def RAPIDJSON_NAMESPACE + \ingroup RAPIDJSON_CONFIG + \brief provide custom rapidjson namespace + + In order to avoid symbol clashes and/or "One Definition Rule" errors + between multiple inclusions of (different versions of) RapidJSON in + a single binary, users can customize the name of the main RapidJSON + namespace. + + In case of a single nesting level, defining \c RAPIDJSON_NAMESPACE + to a custom name (e.g. \c MyRapidJSON) is sufficient. If multiple + levels are needed, both \ref RAPIDJSON_NAMESPACE_BEGIN and \ref + RAPIDJSON_NAMESPACE_END need to be defined as well: + + \code + // in some .cpp file + #define RAPIDJSON_NAMESPACE my::rapidjson + #define RAPIDJSON_NAMESPACE_BEGIN namespace my { namespace rapidjson { + #define RAPIDJSON_NAMESPACE_END } } + #include "rapidjson/..." + \endcode + + \see rapidjson + */ +/*! \def RAPIDJSON_NAMESPACE_BEGIN + \ingroup RAPIDJSON_CONFIG + \brief provide custom rapidjson namespace (opening expression) + \see RAPIDJSON_NAMESPACE +*/ +/*! \def RAPIDJSON_NAMESPACE_END + \ingroup RAPIDJSON_CONFIG + \brief provide custom rapidjson namespace (closing expression) + \see RAPIDJSON_NAMESPACE +*/ +#ifndef RAPIDJSON_NAMESPACE +#define RAPIDJSON_NAMESPACE rapidjson +#endif +#ifndef RAPIDJSON_NAMESPACE_BEGIN +#define RAPIDJSON_NAMESPACE_BEGIN namespace RAPIDJSON_NAMESPACE { +#endif +#ifndef RAPIDJSON_NAMESPACE_END +#define RAPIDJSON_NAMESPACE_END } +#endif + +/////////////////////////////////////////////////////////////////////////////// +// RAPIDJSON_HAS_STDSTRING + +#ifndef RAPIDJSON_HAS_STDSTRING +#ifdef RAPIDJSON_DOXYGEN_RUNNING +#define RAPIDJSON_HAS_STDSTRING 1 // force generation of documentation +#else +#define RAPIDJSON_HAS_STDSTRING 0 // no std::string support by default +#endif +/*! \def RAPIDJSON_HAS_STDSTRING + \ingroup RAPIDJSON_CONFIG + \brief Enable RapidJSON support for \c std::string + + By defining this preprocessor symbol to \c 1, several convenience functions for using + \ref rapidjson::GenericValue with \c std::string are enabled, especially + for construction and comparison. + + \hideinitializer +*/ +#endif // !defined(RAPIDJSON_HAS_STDSTRING) + +#if RAPIDJSON_HAS_STDSTRING +#include +#endif // RAPIDJSON_HAS_STDSTRING + +/////////////////////////////////////////////////////////////////////////////// +// RAPIDJSON_NO_INT64DEFINE + +/*! \def RAPIDJSON_NO_INT64DEFINE + \ingroup RAPIDJSON_CONFIG + \brief Use external 64-bit integer types. + + RapidJSON requires the 64-bit integer types \c int64_t and \c uint64_t types + to be available at global scope. + + If users have their own definition, define RAPIDJSON_NO_INT64DEFINE to + prevent RapidJSON from defining its own types. +*/ +#ifndef RAPIDJSON_NO_INT64DEFINE +//!@cond RAPIDJSON_HIDDEN_FROM_DOXYGEN +#if defined(_MSC_VER) && (_MSC_VER < 1800) // Visual Studio 2013 +#include "lottie_rapidjson_msinttypes_stdint.h" +#include "lottie_rapidjson_msinttypes_inttypes.h" +#else +// Other compilers should have this. +#include +#include +#endif +//!@endcond +#ifdef RAPIDJSON_DOXYGEN_RUNNING +#define RAPIDJSON_NO_INT64DEFINE +#endif +#endif // RAPIDJSON_NO_INT64TYPEDEF + +/////////////////////////////////////////////////////////////////////////////// +// RAPIDJSON_FORCEINLINE + +#ifndef RAPIDJSON_FORCEINLINE +//!@cond RAPIDJSON_HIDDEN_FROM_DOXYGEN +#if defined(_MSC_VER) && defined(NDEBUG) +#define RAPIDJSON_FORCEINLINE __forceinline +#elif defined(__GNUC__) && __GNUC__ >= 4 && defined(NDEBUG) +#define RAPIDJSON_FORCEINLINE __attribute__((always_inline)) +#else +#define RAPIDJSON_FORCEINLINE +#endif +//!@endcond +#endif // RAPIDJSON_FORCEINLINE + +/////////////////////////////////////////////////////////////////////////////// +// RAPIDJSON_ENDIAN +#define RAPIDJSON_LITTLEENDIAN 0 //!< Little endian machine +#define RAPIDJSON_BIGENDIAN 1 //!< Big endian machine + +//! Endianness of the machine. +/*! + \def RAPIDJSON_ENDIAN + \ingroup RAPIDJSON_CONFIG + + GCC 4.6 provided macro for detecting endianness of the target machine. But other + compilers may not have this. User can define RAPIDJSON_ENDIAN to either + \ref RAPIDJSON_LITTLEENDIAN or \ref RAPIDJSON_BIGENDIAN. + + Default detection implemented with reference to + \li https://gcc.gnu.org/onlinedocs/gcc-4.6.0/cpp/Common-Predefined-Macros.html + \li http://www.boost.org/doc/libs/1_42_0/boost/detail/endian.hpp +*/ +#ifndef RAPIDJSON_ENDIAN +// Detect with GCC 4.6's macro +# ifdef __BYTE_ORDER__ +# if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ +# define RAPIDJSON_ENDIAN RAPIDJSON_LITTLEENDIAN +# elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ +# define RAPIDJSON_ENDIAN RAPIDJSON_BIGENDIAN +# else +# error Unknown machine endianness detected. User needs to define RAPIDJSON_ENDIAN. +# endif // __BYTE_ORDER__ +// Detect with GLIBC's endian.h +# elif defined(__GLIBC__) +# include +# if (__BYTE_ORDER == __LITTLE_ENDIAN) +# define RAPIDJSON_ENDIAN RAPIDJSON_LITTLEENDIAN +# elif (__BYTE_ORDER == __BIG_ENDIAN) +# define RAPIDJSON_ENDIAN RAPIDJSON_BIGENDIAN +# else +# error Unknown machine endianness detected. User needs to define RAPIDJSON_ENDIAN. +# endif // __GLIBC__ +// Detect with _LITTLE_ENDIAN and _BIG_ENDIAN macro +# elif defined(_LITTLE_ENDIAN) && !defined(_BIG_ENDIAN) +# define RAPIDJSON_ENDIAN RAPIDJSON_LITTLEENDIAN +# elif defined(_BIG_ENDIAN) && !defined(_LITTLE_ENDIAN) +# define RAPIDJSON_ENDIAN RAPIDJSON_BIGENDIAN +// Detect with architecture macros +# elif defined(__sparc) || defined(__sparc__) || defined(_POWER) || defined(__powerpc__) || defined(__ppc__) || defined(__hpux) || defined(__hppa) || defined(_MIPSEB) || defined(_POWER) || defined(__s390__) +# define RAPIDJSON_ENDIAN RAPIDJSON_BIGENDIAN +# elif defined(__i386__) || defined(__alpha__) || defined(__ia64) || defined(__ia64__) || defined(_M_IX86) || defined(_M_IA64) || defined(_M_ALPHA) || defined(__amd64) || defined(__amd64__) || defined(_M_AMD64) || defined(__x86_64) || defined(__x86_64__) || defined(_M_X64) || defined(__bfin__) +# define RAPIDJSON_ENDIAN RAPIDJSON_LITTLEENDIAN +# elif defined(_MSC_VER) && (defined(_M_ARM) || defined(_M_ARM64)) +# define RAPIDJSON_ENDIAN RAPIDJSON_LITTLEENDIAN +# elif defined(RAPIDJSON_DOXYGEN_RUNNING) +# define RAPIDJSON_ENDIAN +# else +# error Unknown machine endianness detected. User needs to define RAPIDJSON_ENDIAN. +# endif +#endif // RAPIDJSON_ENDIAN + +/////////////////////////////////////////////////////////////////////////////// +// RAPIDJSON_64BIT + +//! Whether using 64-bit architecture +#ifndef RAPIDJSON_64BIT +#if defined(__LP64__) || (defined(__x86_64__) && defined(__ILP32__)) || defined(_WIN64) || defined(__EMSCRIPTEN__) +#define RAPIDJSON_64BIT 1 +#else +#define RAPIDJSON_64BIT 0 +#endif +#endif // RAPIDJSON_64BIT + +/////////////////////////////////////////////////////////////////////////////// +// RAPIDJSON_ALIGN + +//! Data alignment of the machine. +/*! \ingroup RAPIDJSON_CONFIG + \param x pointer to align + + Some machines require strict data alignment. The default is 8 bytes. + User can customize by defining the RAPIDJSON_ALIGN function macro. +*/ +#ifndef RAPIDJSON_ALIGN +#define RAPIDJSON_ALIGN(x) (((x) + static_cast(7u)) & ~static_cast(7u)) +#endif + +/////////////////////////////////////////////////////////////////////////////// +// RAPIDJSON_UINT64_C2 + +//! Construct a 64-bit literal by a pair of 32-bit integer. +/*! + 64-bit literal with or without ULL suffix is prone to compiler warnings. + UINT64_C() is C macro which cause compilation problems. + Use this macro to define 64-bit constants by a pair of 32-bit integer. +*/ +#ifndef RAPIDJSON_UINT64_C2 +#define RAPIDJSON_UINT64_C2(high32, low32) ((static_cast(high32) << 32) | static_cast(low32)) +#endif + +/////////////////////////////////////////////////////////////////////////////// +// RAPIDJSON_48BITPOINTER_OPTIMIZATION + +//! Use only lower 48-bit address for some pointers. +/*! + \ingroup RAPIDJSON_CONFIG + + This optimization uses the fact that current X86-64 architecture only implement lower 48-bit virtual address. + The higher 16-bit can be used for storing other data. + \c GenericValue uses this optimization to reduce its size form 24 bytes to 16 bytes in 64-bit architecture. +*/ +#ifndef RAPIDJSON_48BITPOINTER_OPTIMIZATION +#if defined(__amd64__) || defined(__amd64) || defined(__x86_64__) || defined(__x86_64) || defined(_M_X64) || defined(_M_AMD64) +#define RAPIDJSON_48BITPOINTER_OPTIMIZATION 1 +#else +#define RAPIDJSON_48BITPOINTER_OPTIMIZATION 0 +#endif +#endif // RAPIDJSON_48BITPOINTER_OPTIMIZATION + +#if RAPIDJSON_48BITPOINTER_OPTIMIZATION == 1 +#if RAPIDJSON_64BIT != 1 +#error RAPIDJSON_48BITPOINTER_OPTIMIZATION can only be set to 1 when RAPIDJSON_64BIT=1 +#endif +#define RAPIDJSON_SETPOINTER(type, p, x) (p = reinterpret_cast((reinterpret_cast(p) & static_cast(RAPIDJSON_UINT64_C2(0xFFFF0000, 0x00000000))) | reinterpret_cast(reinterpret_cast(x)))) +#define RAPIDJSON_GETPOINTER(type, p) (reinterpret_cast(reinterpret_cast(p) & static_cast(RAPIDJSON_UINT64_C2(0x0000FFFF, 0xFFFFFFFF)))) +#else +#define RAPIDJSON_SETPOINTER(type, p, x) (p = (x)) +#define RAPIDJSON_GETPOINTER(type, p) (p) +#endif + +/////////////////////////////////////////////////////////////////////////////// +// RAPIDJSON_SSE2/RAPIDJSON_SSE42/RAPIDJSON_NEON/RAPIDJSON_SIMD + +/*! \def RAPIDJSON_SIMD + \ingroup RAPIDJSON_CONFIG + \brief Enable SSE2/SSE4.2/Neon optimization. + + RapidJSON supports optimized implementations for some parsing operations + based on the SSE2, SSE4.2 or NEon SIMD extensions on modern Intel + or ARM compatible processors. + + To enable these optimizations, three different symbols can be defined; + \code + // Enable SSE2 optimization. + #define RAPIDJSON_SSE2 + + // Enable SSE4.2 optimization. + #define RAPIDJSON_SSE42 + \endcode + + // Enable ARM Neon optimization. + #define RAPIDJSON_NEON + \endcode + + \c RAPIDJSON_SSE42 takes precedence over SSE2, if both are defined. + + If any of these symbols is defined, RapidJSON defines the macro + \c RAPIDJSON_SIMD to indicate the availability of the optimized code. +*/ +#if defined(RAPIDJSON_SSE2) || defined(RAPIDJSON_SSE42) \ + || defined(RAPIDJSON_NEON) || defined(RAPIDJSON_DOXYGEN_RUNNING) +#define RAPIDJSON_SIMD +#endif + +/////////////////////////////////////////////////////////////////////////////// +// RAPIDJSON_NO_SIZETYPEDEFINE + +#ifndef RAPIDJSON_NO_SIZETYPEDEFINE +/*! \def RAPIDJSON_NO_SIZETYPEDEFINE + \ingroup RAPIDJSON_CONFIG + \brief User-provided \c SizeType definition. + + In order to avoid using 32-bit size types for indexing strings and arrays, + define this preprocessor symbol and provide the type rapidjson::SizeType + before including RapidJSON: + \code + #define RAPIDJSON_NO_SIZETYPEDEFINE + namespace rapidjson { typedef ::std::size_t SizeType; } + #include "rapidjson/..." + \endcode + + \see rapidjson::SizeType +*/ +#ifdef RAPIDJSON_DOXYGEN_RUNNING +#define RAPIDJSON_NO_SIZETYPEDEFINE +#endif +RAPIDJSON_NAMESPACE_BEGIN +//! Size type (for string lengths, array sizes, etc.) +/*! RapidJSON uses 32-bit array/string indices even on 64-bit platforms, + instead of using \c size_t. Users may override the SizeType by defining + \ref RAPIDJSON_NO_SIZETYPEDEFINE. +*/ +typedef unsigned SizeType; +RAPIDJSON_NAMESPACE_END +#endif + +// always import std::size_t to rapidjson namespace +RAPIDJSON_NAMESPACE_BEGIN +using std::size_t; +RAPIDJSON_NAMESPACE_END + +/////////////////////////////////////////////////////////////////////////////// +// RAPIDJSON_ASSERT + +//! Assertion. +/*! \ingroup RAPIDJSON_CONFIG + By default, rapidjson uses C \c assert() for internal assertions. + User can override it by defining RAPIDJSON_ASSERT(x) macro. + + \note Parsing errors are handled and can be customized by the + \ref RAPIDJSON_ERRORS APIs. +*/ +#ifndef RAPIDJSON_ASSERT +#include +#define RAPIDJSON_ASSERT(x) assert(x) +#endif // RAPIDJSON_ASSERT + +/////////////////////////////////////////////////////////////////////////////// +// RAPIDJSON_STATIC_ASSERT + +// Prefer C++11 static_assert, if available +#ifndef RAPIDJSON_STATIC_ASSERT +#if __cplusplus >= 201103L || ( defined(_MSC_VER) && _MSC_VER >= 1800 ) +#define RAPIDJSON_STATIC_ASSERT(x) \ + static_assert(x, RAPIDJSON_STRINGIFY(x)) +#endif // C++11 +#endif // RAPIDJSON_STATIC_ASSERT + +// Adopt C++03 implementation from boost +#ifndef RAPIDJSON_STATIC_ASSERT +#ifndef __clang__ +//!@cond RAPIDJSON_HIDDEN_FROM_DOXYGEN +#endif +RAPIDJSON_NAMESPACE_BEGIN +template struct STATIC_ASSERTION_FAILURE; +template <> struct STATIC_ASSERTION_FAILURE { enum { value = 1 }; }; +template struct StaticAssertTest {}; +RAPIDJSON_NAMESPACE_END + +#if defined(__GNUC__) || defined(__clang__) +#define RAPIDJSON_STATIC_ASSERT_UNUSED_ATTRIBUTE __attribute__((unused)) +#else +#define RAPIDJSON_STATIC_ASSERT_UNUSED_ATTRIBUTE +#endif +#ifndef __clang__ +//!@endcond +#endif + +/*! \def RAPIDJSON_STATIC_ASSERT + \brief (Internal) macro to check for conditions at compile-time + \param x compile-time condition + \hideinitializer + */ +#define RAPIDJSON_STATIC_ASSERT(x) \ + typedef ::RAPIDJSON_NAMESPACE::StaticAssertTest< \ + sizeof(::RAPIDJSON_NAMESPACE::STATIC_ASSERTION_FAILURE)> \ + RAPIDJSON_JOIN(StaticAssertTypedef, __LINE__) RAPIDJSON_STATIC_ASSERT_UNUSED_ATTRIBUTE +#endif // RAPIDJSON_STATIC_ASSERT + +/////////////////////////////////////////////////////////////////////////////// +// RAPIDJSON_LIKELY, RAPIDJSON_UNLIKELY + +//! Compiler branching hint for expression with high probability to be true. +/*! + \ingroup RAPIDJSON_CONFIG + \param x Boolean expression likely to be true. +*/ +#ifndef RAPIDJSON_LIKELY +#if defined(__GNUC__) || defined(__clang__) +#define RAPIDJSON_LIKELY(x) __builtin_expect(!!(x), 1) +#else +#define RAPIDJSON_LIKELY(x) (x) +#endif +#endif + +//! Compiler branching hint for expression with low probability to be true. +/*! + \ingroup RAPIDJSON_CONFIG + \param x Boolean expression unlikely to be true. +*/ +#ifndef RAPIDJSON_UNLIKELY +#if defined(__GNUC__) || defined(__clang__) +#define RAPIDJSON_UNLIKELY(x) __builtin_expect(!!(x), 0) +#else +#define RAPIDJSON_UNLIKELY(x) (x) +#endif +#endif + +/////////////////////////////////////////////////////////////////////////////// +// Helpers + +//!@cond RAPIDJSON_HIDDEN_FROM_DOXYGEN + +#define RAPIDJSON_MULTILINEMACRO_BEGIN do { +#define RAPIDJSON_MULTILINEMACRO_END \ +} while((void)0, 0) + +// adopted from Boost +#define RAPIDJSON_VERSION_CODE(x,y,z) \ + (((x)*100000) + ((y)*100) + (z)) + +#if defined(__has_builtin) +#define RAPIDJSON_HAS_BUILTIN(x) __has_builtin(x) +#else +#define RAPIDJSON_HAS_BUILTIN(x) 0 +#endif + +/////////////////////////////////////////////////////////////////////////////// +// RAPIDJSON_DIAG_PUSH/POP, RAPIDJSON_DIAG_OFF + +#if defined(__GNUC__) +#define RAPIDJSON_GNUC \ + RAPIDJSON_VERSION_CODE(__GNUC__,__GNUC_MINOR__,__GNUC_PATCHLEVEL__) +#endif + +#if defined(__clang__) || (defined(RAPIDJSON_GNUC) && RAPIDJSON_GNUC >= RAPIDJSON_VERSION_CODE(4,2,0)) + +#define RAPIDJSON_PRAGMA(x) _Pragma(RAPIDJSON_STRINGIFY(x)) +#define RAPIDJSON_DIAG_PRAGMA(x) RAPIDJSON_PRAGMA(GCC diagnostic x) +#define RAPIDJSON_DIAG_OFF(x) \ + RAPIDJSON_DIAG_PRAGMA(ignored RAPIDJSON_STRINGIFY(RAPIDJSON_JOIN(-W,x))) + +// push/pop support in Clang and GCC>=4.6 +#if defined(__clang__) || (defined(RAPIDJSON_GNUC) && RAPIDJSON_GNUC >= RAPIDJSON_VERSION_CODE(4,6,0)) +#define RAPIDJSON_DIAG_PUSH RAPIDJSON_DIAG_PRAGMA(push) +#define RAPIDJSON_DIAG_POP RAPIDJSON_DIAG_PRAGMA(pop) +#else // GCC >= 4.2, < 4.6 +#define RAPIDJSON_DIAG_PUSH /* ignored */ +#define RAPIDJSON_DIAG_POP /* ignored */ +#endif + +#elif defined(_MSC_VER) + +// pragma (MSVC specific) +#define RAPIDJSON_PRAGMA(x) __pragma(x) +#define RAPIDJSON_DIAG_PRAGMA(x) RAPIDJSON_PRAGMA(warning(x)) + +#define RAPIDJSON_DIAG_OFF(x) RAPIDJSON_DIAG_PRAGMA(disable: x) +#define RAPIDJSON_DIAG_PUSH RAPIDJSON_DIAG_PRAGMA(push) +#define RAPIDJSON_DIAG_POP RAPIDJSON_DIAG_PRAGMA(pop) + +#else + +#define RAPIDJSON_DIAG_OFF(x) /* ignored */ +#define RAPIDJSON_DIAG_PUSH /* ignored */ +#define RAPIDJSON_DIAG_POP /* ignored */ + +#endif // RAPIDJSON_DIAG_* + +/////////////////////////////////////////////////////////////////////////////// +// C++11 features + +#ifndef RAPIDJSON_HAS_CXX11_RVALUE_REFS +#if defined(__clang__) +#if __has_feature(cxx_rvalue_references) && \ + (defined(_MSC_VER) || defined(_LIBCPP_VERSION) || defined(__GLIBCXX__) && __GLIBCXX__ >= 20080306) +#define RAPIDJSON_HAS_CXX11_RVALUE_REFS 1 +#else +#define RAPIDJSON_HAS_CXX11_RVALUE_REFS 0 +#endif +#elif (defined(RAPIDJSON_GNUC) && (RAPIDJSON_GNUC >= RAPIDJSON_VERSION_CODE(4,3,0)) && defined(__GXX_EXPERIMENTAL_CXX0X__)) || \ + (defined(_MSC_VER) && _MSC_VER >= 1600) || \ + (defined(__SUNPRO_CC) && __SUNPRO_CC >= 0x5140 && defined(__GXX_EXPERIMENTAL_CXX0X__)) + +#define RAPIDJSON_HAS_CXX11_RVALUE_REFS 1 +#else +#define RAPIDJSON_HAS_CXX11_RVALUE_REFS 0 +#endif +#endif // RAPIDJSON_HAS_CXX11_RVALUE_REFS + +#ifndef RAPIDJSON_HAS_CXX11_NOEXCEPT +#if defined(__clang__) +#define RAPIDJSON_HAS_CXX11_NOEXCEPT __has_feature(cxx_noexcept) +#elif (defined(RAPIDJSON_GNUC) && (RAPIDJSON_GNUC >= RAPIDJSON_VERSION_CODE(4,6,0)) && defined(__GXX_EXPERIMENTAL_CXX0X__)) || \ + (defined(_MSC_VER) && _MSC_VER >= 1900) || \ + (defined(__SUNPRO_CC) && __SUNPRO_CC >= 0x5140 && defined(__GXX_EXPERIMENTAL_CXX0X__)) +#define RAPIDJSON_HAS_CXX11_NOEXCEPT 1 +#else +#define RAPIDJSON_HAS_CXX11_NOEXCEPT 0 +#endif +#endif +#if RAPIDJSON_HAS_CXX11_NOEXCEPT +#define RAPIDJSON_NOEXCEPT noexcept +#else +#define RAPIDJSON_NOEXCEPT /* noexcept */ +#endif // RAPIDJSON_HAS_CXX11_NOEXCEPT + +// no automatic detection, yet +#ifndef RAPIDJSON_HAS_CXX11_TYPETRAITS +#if (defined(_MSC_VER) && _MSC_VER >= 1700) +#define RAPIDJSON_HAS_CXX11_TYPETRAITS 1 +#else +#define RAPIDJSON_HAS_CXX11_TYPETRAITS 0 +#endif +#endif + +#ifndef RAPIDJSON_HAS_CXX11_RANGE_FOR +#if defined(__clang__) +#define RAPIDJSON_HAS_CXX11_RANGE_FOR __has_feature(cxx_range_for) +#elif (defined(RAPIDJSON_GNUC) && (RAPIDJSON_GNUC >= RAPIDJSON_VERSION_CODE(4,6,0)) && defined(__GXX_EXPERIMENTAL_CXX0X__)) || \ + (defined(_MSC_VER) && _MSC_VER >= 1700) || \ + (defined(__SUNPRO_CC) && __SUNPRO_CC >= 0x5140 && defined(__GXX_EXPERIMENTAL_CXX0X__)) +#define RAPIDJSON_HAS_CXX11_RANGE_FOR 1 +#else +#define RAPIDJSON_HAS_CXX11_RANGE_FOR 0 +#endif +#endif // RAPIDJSON_HAS_CXX11_RANGE_FOR + +/////////////////////////////////////////////////////////////////////////////// +// C++17 features + +#if defined(__has_cpp_attribute) +# if __has_cpp_attribute(fallthrough) +# define RAPIDJSON_DELIBERATE_FALLTHROUGH [[fallthrough]] +# else +# define RAPIDJSON_DELIBERATE_FALLTHROUGH +# endif +#else +# define RAPIDJSON_DELIBERATE_FALLTHROUGH +#endif + +//!@endcond + +//! Assertion (in non-throwing contexts). + /*! \ingroup RAPIDJSON_CONFIG + Some functions provide a \c noexcept guarantee, if the compiler supports it. + In these cases, the \ref RAPIDJSON_ASSERT macro cannot be overridden to + throw an exception. This macro adds a separate customization point for + such cases. + + Defaults to C \c assert() (as \ref RAPIDJSON_ASSERT), if \c noexcept is + supported, and to \ref RAPIDJSON_ASSERT otherwise. + */ + +/////////////////////////////////////////////////////////////////////////////// +// RAPIDJSON_NOEXCEPT_ASSERT + +#ifndef RAPIDJSON_NOEXCEPT_ASSERT +#ifdef RAPIDJSON_ASSERT_THROWS +#if RAPIDJSON_HAS_CXX11_NOEXCEPT +#define RAPIDJSON_NOEXCEPT_ASSERT(x) +#else +#include +#define RAPIDJSON_NOEXCEPT_ASSERT(x) assert(x) +#endif // RAPIDJSON_HAS_CXX11_NOEXCEPT +#else +#define RAPIDJSON_NOEXCEPT_ASSERT(x) RAPIDJSON_ASSERT(x) +#endif // RAPIDJSON_ASSERT_THROWS +#endif // RAPIDJSON_NOEXCEPT_ASSERT + +/////////////////////////////////////////////////////////////////////////////// +// malloc/realloc/free + +#ifndef RAPIDJSON_MALLOC +///! customization point for global \c malloc +#define RAPIDJSON_MALLOC(size) std::malloc(size) +#endif +#ifndef RAPIDJSON_REALLOC +///! customization point for global \c realloc +#define RAPIDJSON_REALLOC(ptr, new_size) std::realloc(ptr, new_size) +#endif +#ifndef RAPIDJSON_FREE +///! customization point for global \c free +#define RAPIDJSON_FREE(ptr) std::free(ptr) +#endif + +/////////////////////////////////////////////////////////////////////////////// +// new/delete + +#ifndef RAPIDJSON_NEW +///! customization point for global \c new +#define RAPIDJSON_NEW(TypeName) new TypeName +#endif +#ifndef RAPIDJSON_DELETE +///! customization point for global \c delete +#define RAPIDJSON_DELETE(x) delete x +#endif + +/////////////////////////////////////////////////////////////////////////////// +// Type + +/*! \namespace rapidjson + \brief main RapidJSON namespace + \see RAPIDJSON_NAMESPACE +*/ +RAPIDJSON_NAMESPACE_BEGIN + +//! Type of JSON value +enum Type { + kNullType = 0, //!< null + kFalseType = 1, //!< false + kTrueType = 2, //!< true + kObjectType = 3, //!< object + kArrayType = 4, //!< array + kStringType = 5, //!< string + kNumberType = 6 //!< number +}; + +RAPIDJSON_NAMESPACE_END + +#endif // RAPIDJSON_RAPIDJSON_H_ diff --git a/vendor/github.com/Benau/go_rlottie/lottie_rapidjson_reader.h b/vendor/github.com/Benau/go_rlottie/lottie_rapidjson_reader.h new file mode 100644 index 00000000..caa783f9 --- /dev/null +++ b/vendor/github.com/Benau/go_rlottie/lottie_rapidjson_reader.h @@ -0,0 +1,2244 @@ +// Tencent is pleased to support the open source community by making RapidJSON available. +// +// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved. +// +// Licensed under the MIT License (the "License"); you may not use this file except +// in compliance with the License. You may obtain a copy of the License at +// +// http://opensource.org/licenses/MIT +// +// Unless required by applicable law or agreed to in writing, software distributed +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +// CONDITIONS OF ANY KIND, either express or implied. See the License for the +// specific language governing permissions and limitations under the License. + +#ifndef RAPIDJSON_READER_H_ +#define RAPIDJSON_READER_H_ + +/*! \file reader.h */ + +#include "lottie_rapidjson_allocators.h" +#include "lottie_rapidjson_stream.h" +#include "lottie_rapidjson_encodedstream.h" +#include "lottie_rapidjson_internal_clzll.h" +#include "lottie_rapidjson_internal_meta.h" +#include "lottie_rapidjson_internal_stack.h" +#include "lottie_rapidjson_internal_strtod.h" +#include + +#if defined(RAPIDJSON_SIMD) && defined(_MSC_VER) +#include +#pragma intrinsic(_BitScanForward) +#endif +#ifdef RAPIDJSON_SSE42 +#include +#elif defined(RAPIDJSON_SSE2) +#include +#elif defined(RAPIDJSON_NEON) +#include +#endif + +#ifdef __clang__ +RAPIDJSON_DIAG_PUSH +RAPIDJSON_DIAG_OFF(old-style-cast) +RAPIDJSON_DIAG_OFF(padded) +RAPIDJSON_DIAG_OFF(switch-enum) +#elif defined(_MSC_VER) +RAPIDJSON_DIAG_PUSH +RAPIDJSON_DIAG_OFF(4127) // conditional expression is constant +RAPIDJSON_DIAG_OFF(4702) // unreachable code +#endif + +#ifdef __GNUC__ +RAPIDJSON_DIAG_PUSH +RAPIDJSON_DIAG_OFF(effc++) +#endif + +//!@cond RAPIDJSON_HIDDEN_FROM_DOXYGEN +#define RAPIDJSON_NOTHING /* deliberately empty */ +#ifndef RAPIDJSON_PARSE_ERROR_EARLY_RETURN +#define RAPIDJSON_PARSE_ERROR_EARLY_RETURN(value) \ + RAPIDJSON_MULTILINEMACRO_BEGIN \ + if (RAPIDJSON_UNLIKELY(HasParseError())) { return value; } \ + RAPIDJSON_MULTILINEMACRO_END +#endif +#define RAPIDJSON_PARSE_ERROR_EARLY_RETURN_VOID \ + RAPIDJSON_PARSE_ERROR_EARLY_RETURN(RAPIDJSON_NOTHING) +//!@endcond + +/*! \def RAPIDJSON_PARSE_ERROR_NORETURN + \ingroup RAPIDJSON_ERRORS + \brief Macro to indicate a parse error. + \param parseErrorCode \ref rapidjson::ParseErrorCode of the error + \param offset position of the error in JSON input (\c size_t) + + This macros can be used as a customization point for the internal + error handling mechanism of RapidJSON. + + A common usage model is to throw an exception instead of requiring the + caller to explicitly check the \ref rapidjson::GenericReader::Parse's + return value: + + \code + #define RAPIDJSON_PARSE_ERROR_NORETURN(parseErrorCode,offset) \ + throw ParseException(parseErrorCode, #parseErrorCode, offset) + + #include // std::runtime_error + #include "rapidjson/error/error.h" // rapidjson::ParseResult + + struct ParseException : std::runtime_error, rapidjson::ParseResult { + ParseException(rapidjson::ParseErrorCode code, const char* msg, size_t offset) + : std::runtime_error(msg), ParseResult(code, offset) {} + }; + + #include "rapidjson/reader.h" + \endcode + + \see RAPIDJSON_PARSE_ERROR, rapidjson::GenericReader::Parse + */ +#ifndef RAPIDJSON_PARSE_ERROR_NORETURN +#define RAPIDJSON_PARSE_ERROR_NORETURN(parseErrorCode, offset) \ + RAPIDJSON_MULTILINEMACRO_BEGIN \ + RAPIDJSON_ASSERT(!HasParseError()); /* Error can only be assigned once */ \ + SetParseError(parseErrorCode, offset); \ + RAPIDJSON_MULTILINEMACRO_END +#endif + +/*! \def RAPIDJSON_PARSE_ERROR + \ingroup RAPIDJSON_ERRORS + \brief (Internal) macro to indicate and handle a parse error. + \param parseErrorCode \ref rapidjson::ParseErrorCode of the error + \param offset position of the error in JSON input (\c size_t) + + Invokes RAPIDJSON_PARSE_ERROR_NORETURN and stops the parsing. + + \see RAPIDJSON_PARSE_ERROR_NORETURN + \hideinitializer + */ +#ifndef RAPIDJSON_PARSE_ERROR +#define RAPIDJSON_PARSE_ERROR(parseErrorCode, offset) \ + RAPIDJSON_MULTILINEMACRO_BEGIN \ + RAPIDJSON_PARSE_ERROR_NORETURN(parseErrorCode, offset); \ + RAPIDJSON_PARSE_ERROR_EARLY_RETURN_VOID; \ + RAPIDJSON_MULTILINEMACRO_END +#endif + +#include "lottie_rapidjson_error_error.h" + +RAPIDJSON_NAMESPACE_BEGIN + +/////////////////////////////////////////////////////////////////////////////// +// ParseFlag + +/*! \def RAPIDJSON_PARSE_DEFAULT_FLAGS + \ingroup RAPIDJSON_CONFIG + \brief User-defined kParseDefaultFlags definition. + + User can define this as any \c ParseFlag combinations. +*/ +#ifndef RAPIDJSON_PARSE_DEFAULT_FLAGS +#define RAPIDJSON_PARSE_DEFAULT_FLAGS kParseNoFlags +#endif + +//! Combination of parseFlags +/*! \see Reader::Parse, Document::Parse, Document::ParseInsitu, Document::ParseStream + */ +enum ParseFlag { + kParseNoFlags = 0, //!< No flags are set. + kParseInsituFlag = 1, //!< In-situ(destructive) parsing. + kParseValidateEncodingFlag = 2, //!< Validate encoding of JSON strings. + kParseIterativeFlag = 4, //!< Iterative(constant complexity in terms of function call stack size) parsing. + kParseStopWhenDoneFlag = 8, //!< After parsing a complete JSON root from stream, stop further processing the rest of stream. When this flag is used, parser will not generate kParseErrorDocumentRootNotSingular error. + kParseFullPrecisionFlag = 16, //!< Parse number in full precision (but slower). + kParseCommentsFlag = 32, //!< Allow one-line (//) and multi-line (/**/) comments. + kParseNumbersAsStringsFlag = 64, //!< Parse all numbers (ints/doubles) as strings. + kParseTrailingCommasFlag = 128, //!< Allow trailing commas at the end of objects and arrays. + kParseNanAndInfFlag = 256, //!< Allow parsing NaN, Inf, Infinity, -Inf and -Infinity as doubles. + kParseEscapedApostropheFlag = 512, //!< Allow escaped apostrophe in strings. + kParseDefaultFlags = RAPIDJSON_PARSE_DEFAULT_FLAGS //!< Default parse flags. Can be customized by defining RAPIDJSON_PARSE_DEFAULT_FLAGS +}; + +/////////////////////////////////////////////////////////////////////////////// +// Handler + +/*! \class rapidjson::Handler + \brief Concept for receiving events from GenericReader upon parsing. + The functions return true if no error occurs. If they return false, + the event publisher should terminate the process. +\code +concept Handler { + typename Ch; + + bool Null(); + bool Bool(bool b); + bool Int(int i); + bool Uint(unsigned i); + bool Int64(int64_t i); + bool Uint64(uint64_t i); + bool Double(double d); + /// enabled via kParseNumbersAsStringsFlag, string is not null-terminated (use length) + bool RawNumber(const Ch* str, SizeType length, bool copy); + bool String(const Ch* str, SizeType length, bool copy); + bool StartObject(); + bool Key(const Ch* str, SizeType length, bool copy); + bool EndObject(SizeType memberCount); + bool StartArray(); + bool EndArray(SizeType elementCount); +}; +\endcode +*/ +/////////////////////////////////////////////////////////////////////////////// +// BaseReaderHandler + +//! Default implementation of Handler. +/*! This can be used as base class of any reader handler. + \note implements Handler concept +*/ +template, typename Derived = void> +struct BaseReaderHandler { + typedef typename Encoding::Ch Ch; + + typedef typename internal::SelectIf, BaseReaderHandler, Derived>::Type Override; + + bool Default() { return true; } + bool Null() { return static_cast(*this).Default(); } + bool Bool(bool) { return static_cast(*this).Default(); } + bool Int(int) { return static_cast(*this).Default(); } + bool Uint(unsigned) { return static_cast(*this).Default(); } + bool Int64(int64_t) { return static_cast(*this).Default(); } + bool Uint64(uint64_t) { return static_cast(*this).Default(); } + bool Double(double) { return static_cast(*this).Default(); } + /// enabled via kParseNumbersAsStringsFlag, string is not null-terminated (use length) + bool RawNumber(const Ch* str, SizeType len, bool copy) { return static_cast(*this).String(str, len, copy); } + bool String(const Ch*, SizeType, bool) { return static_cast(*this).Default(); } + bool StartObject() { return static_cast(*this).Default(); } + bool Key(const Ch* str, SizeType len, bool copy) { return static_cast(*this).String(str, len, copy); } + bool EndObject(SizeType) { return static_cast(*this).Default(); } + bool StartArray() { return static_cast(*this).Default(); } + bool EndArray(SizeType) { return static_cast(*this).Default(); } +}; + +/////////////////////////////////////////////////////////////////////////////// +// StreamLocalCopy + +namespace internal { + +template::copyOptimization> +class StreamLocalCopy; + +//! Do copy optimization. +template +class StreamLocalCopy { +public: + StreamLocalCopy(Stream& original) : s(original), original_(original) {} + ~StreamLocalCopy() { original_ = s; } + + Stream s; + +private: + StreamLocalCopy& operator=(const StreamLocalCopy&) /* = delete */; + + Stream& original_; +}; + +//! Keep reference. +template +class StreamLocalCopy { +public: + StreamLocalCopy(Stream& original) : s(original) {} + + Stream& s; + +private: + StreamLocalCopy& operator=(const StreamLocalCopy&) /* = delete */; +}; + +} // namespace internal + +/////////////////////////////////////////////////////////////////////////////// +// SkipWhitespace + +//! Skip the JSON white spaces in a stream. +/*! \param is A input stream for skipping white spaces. + \note This function has SSE2/SSE4.2 specialization. +*/ +template +void SkipWhitespace(InputStream& is) { + internal::StreamLocalCopy copy(is); + InputStream& s(copy.s); + + typename InputStream::Ch c; + while ((c = s.Peek()) == ' ' || c == '\n' || c == '\r' || c == '\t') + s.Take(); +} + +inline const char* SkipWhitespace(const char* p, const char* end) { + while (p != end && (*p == ' ' || *p == '\n' || *p == '\r' || *p == '\t')) + ++p; + return p; +} + +#ifdef RAPIDJSON_SSE42 +//! Skip whitespace with SSE 4.2 pcmpistrm instruction, testing 16 8-byte characters at once. +inline const char *SkipWhitespace_SIMD(const char* p) { + // Fast return for single non-whitespace + if (*p == ' ' || *p == '\n' || *p == '\r' || *p == '\t') + ++p; + else + return p; + + // 16-byte align to the next boundary + const char* nextAligned = reinterpret_cast((reinterpret_cast(p) + 15) & static_cast(~15)); + while (p != nextAligned) + if (*p == ' ' || *p == '\n' || *p == '\r' || *p == '\t') + ++p; + else + return p; + + // The rest of string using SIMD + static const char whitespace[16] = " \n\r\t"; + const __m128i w = _mm_loadu_si128(reinterpret_cast(&whitespace[0])); + + for (;; p += 16) { + const __m128i s = _mm_load_si128(reinterpret_cast(p)); + const int r = _mm_cmpistri(w, s, _SIDD_UBYTE_OPS | _SIDD_CMP_EQUAL_ANY | _SIDD_LEAST_SIGNIFICANT | _SIDD_NEGATIVE_POLARITY); + if (r != 16) // some of characters is non-whitespace + return p + r; + } +} + +inline const char *SkipWhitespace_SIMD(const char* p, const char* end) { + // Fast return for single non-whitespace + if (p != end && (*p == ' ' || *p == '\n' || *p == '\r' || *p == '\t')) + ++p; + else + return p; + + // The middle of string using SIMD + static const char whitespace[16] = " \n\r\t"; + const __m128i w = _mm_loadu_si128(reinterpret_cast(&whitespace[0])); + + for (; p <= end - 16; p += 16) { + const __m128i s = _mm_loadu_si128(reinterpret_cast(p)); + const int r = _mm_cmpistri(w, s, _SIDD_UBYTE_OPS | _SIDD_CMP_EQUAL_ANY | _SIDD_LEAST_SIGNIFICANT | _SIDD_NEGATIVE_POLARITY); + if (r != 16) // some of characters is non-whitespace + return p + r; + } + + return SkipWhitespace(p, end); +} + +#elif defined(RAPIDJSON_SSE2) + +//! Skip whitespace with SSE2 instructions, testing 16 8-byte characters at once. +inline const char *SkipWhitespace_SIMD(const char* p) { + // Fast return for single non-whitespace + if (*p == ' ' || *p == '\n' || *p == '\r' || *p == '\t') + ++p; + else + return p; + + // 16-byte align to the next boundary + const char* nextAligned = reinterpret_cast((reinterpret_cast(p) + 15) & static_cast(~15)); + while (p != nextAligned) + if (*p == ' ' || *p == '\n' || *p == '\r' || *p == '\t') + ++p; + else + return p; + + // The rest of string + #define C16(c) { c, c, c, c, c, c, c, c, c, c, c, c, c, c, c, c } + static const char whitespaces[4][16] = { C16(' '), C16('\n'), C16('\r'), C16('\t') }; + #undef C16 + + const __m128i w0 = _mm_loadu_si128(reinterpret_cast(&whitespaces[0][0])); + const __m128i w1 = _mm_loadu_si128(reinterpret_cast(&whitespaces[1][0])); + const __m128i w2 = _mm_loadu_si128(reinterpret_cast(&whitespaces[2][0])); + const __m128i w3 = _mm_loadu_si128(reinterpret_cast(&whitespaces[3][0])); + + for (;; p += 16) { + const __m128i s = _mm_load_si128(reinterpret_cast(p)); + __m128i x = _mm_cmpeq_epi8(s, w0); + x = _mm_or_si128(x, _mm_cmpeq_epi8(s, w1)); + x = _mm_or_si128(x, _mm_cmpeq_epi8(s, w2)); + x = _mm_or_si128(x, _mm_cmpeq_epi8(s, w3)); + unsigned short r = static_cast(~_mm_movemask_epi8(x)); + if (r != 0) { // some of characters may be non-whitespace +#ifdef _MSC_VER // Find the index of first non-whitespace + unsigned long offset; + _BitScanForward(&offset, r); + return p + offset; +#else + return p + __builtin_ffs(r) - 1; +#endif + } + } +} + +inline const char *SkipWhitespace_SIMD(const char* p, const char* end) { + // Fast return for single non-whitespace + if (p != end && (*p == ' ' || *p == '\n' || *p == '\r' || *p == '\t')) + ++p; + else + return p; + + // The rest of string + #define C16(c) { c, c, c, c, c, c, c, c, c, c, c, c, c, c, c, c } + static const char whitespaces[4][16] = { C16(' '), C16('\n'), C16('\r'), C16('\t') }; + #undef C16 + + const __m128i w0 = _mm_loadu_si128(reinterpret_cast(&whitespaces[0][0])); + const __m128i w1 = _mm_loadu_si128(reinterpret_cast(&whitespaces[1][0])); + const __m128i w2 = _mm_loadu_si128(reinterpret_cast(&whitespaces[2][0])); + const __m128i w3 = _mm_loadu_si128(reinterpret_cast(&whitespaces[3][0])); + + for (; p <= end - 16; p += 16) { + const __m128i s = _mm_loadu_si128(reinterpret_cast(p)); + __m128i x = _mm_cmpeq_epi8(s, w0); + x = _mm_or_si128(x, _mm_cmpeq_epi8(s, w1)); + x = _mm_or_si128(x, _mm_cmpeq_epi8(s, w2)); + x = _mm_or_si128(x, _mm_cmpeq_epi8(s, w3)); + unsigned short r = static_cast(~_mm_movemask_epi8(x)); + if (r != 0) { // some of characters may be non-whitespace +#ifdef _MSC_VER // Find the index of first non-whitespace + unsigned long offset; + _BitScanForward(&offset, r); + return p + offset; +#else + return p + __builtin_ffs(r) - 1; +#endif + } + } + + return SkipWhitespace(p, end); +} + +#elif defined(RAPIDJSON_NEON) + +//! Skip whitespace with ARM Neon instructions, testing 16 8-byte characters at once. +inline const char *SkipWhitespace_SIMD(const char* p) { + // Fast return for single non-whitespace + if (*p == ' ' || *p == '\n' || *p == '\r' || *p == '\t') + ++p; + else + return p; + + // 16-byte align to the next boundary + const char* nextAligned = reinterpret_cast((reinterpret_cast(p) + 15) & static_cast(~15)); + while (p != nextAligned) + if (*p == ' ' || *p == '\n' || *p == '\r' || *p == '\t') + ++p; + else + return p; + + const uint8x16_t w0 = vmovq_n_u8(' '); + const uint8x16_t w1 = vmovq_n_u8('\n'); + const uint8x16_t w2 = vmovq_n_u8('\r'); + const uint8x16_t w3 = vmovq_n_u8('\t'); + + for (;; p += 16) { + const uint8x16_t s = vld1q_u8(reinterpret_cast(p)); + uint8x16_t x = vceqq_u8(s, w0); + x = vorrq_u8(x, vceqq_u8(s, w1)); + x = vorrq_u8(x, vceqq_u8(s, w2)); + x = vorrq_u8(x, vceqq_u8(s, w3)); + + x = vmvnq_u8(x); // Negate + x = vrev64q_u8(x); // Rev in 64 + uint64_t low = vgetq_lane_u64(vreinterpretq_u64_u8(x), 0); // extract + uint64_t high = vgetq_lane_u64(vreinterpretq_u64_u8(x), 1); // extract + + if (low == 0) { + if (high != 0) { + uint32_t lz = internal::clzll(high); + return p + 8 + (lz >> 3); + } + } else { + uint32_t lz = internal::clzll(low); + return p + (lz >> 3); + } + } +} + +inline const char *SkipWhitespace_SIMD(const char* p, const char* end) { + // Fast return for single non-whitespace + if (p != end && (*p == ' ' || *p == '\n' || *p == '\r' || *p == '\t')) + ++p; + else + return p; + + const uint8x16_t w0 = vmovq_n_u8(' '); + const uint8x16_t w1 = vmovq_n_u8('\n'); + const uint8x16_t w2 = vmovq_n_u8('\r'); + const uint8x16_t w3 = vmovq_n_u8('\t'); + + for (; p <= end - 16; p += 16) { + const uint8x16_t s = vld1q_u8(reinterpret_cast(p)); + uint8x16_t x = vceqq_u8(s, w0); + x = vorrq_u8(x, vceqq_u8(s, w1)); + x = vorrq_u8(x, vceqq_u8(s, w2)); + x = vorrq_u8(x, vceqq_u8(s, w3)); + + x = vmvnq_u8(x); // Negate + x = vrev64q_u8(x); // Rev in 64 + uint64_t low = vgetq_lane_u64(vreinterpretq_u64_u8(x), 0); // extract + uint64_t high = vgetq_lane_u64(vreinterpretq_u64_u8(x), 1); // extract + + if (low == 0) { + if (high != 0) { + uint32_t lz = internal::clzll(high); + return p + 8 + (lz >> 3); + } + } else { + uint32_t lz = internal::clzll(low); + return p + (lz >> 3); + } + } + + return SkipWhitespace(p, end); +} + +#endif // RAPIDJSON_NEON + +#ifdef RAPIDJSON_SIMD +//! Template function specialization for InsituStringStream +template<> inline void SkipWhitespace(InsituStringStream& is) { + is.src_ = const_cast(SkipWhitespace_SIMD(is.src_)); +} + +//! Template function specialization for StringStream +template<> inline void SkipWhitespace(StringStream& is) { + is.src_ = SkipWhitespace_SIMD(is.src_); +} + +template<> inline void SkipWhitespace(EncodedInputStream, MemoryStream>& is) { + is.is_.src_ = SkipWhitespace_SIMD(is.is_.src_, is.is_.end_); +} +#endif // RAPIDJSON_SIMD + +/////////////////////////////////////////////////////////////////////////////// +// GenericReader + +//! SAX-style JSON parser. Use \ref Reader for UTF8 encoding and default allocator. +/*! GenericReader parses JSON text from a stream, and send events synchronously to an + object implementing Handler concept. + + It needs to allocate a stack for storing a single decoded string during + non-destructive parsing. + + For in-situ parsing, the decoded string is directly written to the source + text string, no temporary buffer is required. + + A GenericReader object can be reused for parsing multiple JSON text. + + \tparam SourceEncoding Encoding of the input stream. + \tparam TargetEncoding Encoding of the parse output. + \tparam StackAllocator Allocator type for stack. +*/ +template +class GenericReader { +public: + typedef typename SourceEncoding::Ch Ch; //!< SourceEncoding character type + + //! Constructor. + /*! \param stackAllocator Optional allocator for allocating stack memory. (Only use for non-destructive parsing) + \param stackCapacity stack capacity in bytes for storing a single decoded string. (Only use for non-destructive parsing) + */ + GenericReader(StackAllocator* stackAllocator = 0, size_t stackCapacity = kDefaultStackCapacity) : + stack_(stackAllocator, stackCapacity), parseResult_(), state_(IterativeParsingStartState) {} + + //! Parse JSON text. + /*! \tparam parseFlags Combination of \ref ParseFlag. + \tparam InputStream Type of input stream, implementing Stream concept. + \tparam Handler Type of handler, implementing Handler concept. + \param is Input stream to be parsed. + \param handler The handler to receive events. + \return Whether the parsing is successful. + */ + template + ParseResult Parse(InputStream& is, Handler& handler) { + if (parseFlags & kParseIterativeFlag) + return IterativeParse(is, handler); + + parseResult_.Clear(); + + ClearStackOnExit scope(*this); + + SkipWhitespaceAndComments(is); + RAPIDJSON_PARSE_ERROR_EARLY_RETURN(parseResult_); + + if (RAPIDJSON_UNLIKELY(is.Peek() == '\0')) { + RAPIDJSON_PARSE_ERROR_NORETURN(kParseErrorDocumentEmpty, is.Tell()); + RAPIDJSON_PARSE_ERROR_EARLY_RETURN(parseResult_); + } + else { + ParseValue(is, handler); + RAPIDJSON_PARSE_ERROR_EARLY_RETURN(parseResult_); + + if (!(parseFlags & kParseStopWhenDoneFlag)) { + SkipWhitespaceAndComments(is); + RAPIDJSON_PARSE_ERROR_EARLY_RETURN(parseResult_); + + if (RAPIDJSON_UNLIKELY(is.Peek() != '\0')) { + RAPIDJSON_PARSE_ERROR_NORETURN(kParseErrorDocumentRootNotSingular, is.Tell()); + RAPIDJSON_PARSE_ERROR_EARLY_RETURN(parseResult_); + } + } + } + + return parseResult_; + } + + //! Parse JSON text (with \ref kParseDefaultFlags) + /*! \tparam InputStream Type of input stream, implementing Stream concept + \tparam Handler Type of handler, implementing Handler concept. + \param is Input stream to be parsed. + \param handler The handler to receive events. + \return Whether the parsing is successful. + */ + template + ParseResult Parse(InputStream& is, Handler& handler) { + return Parse(is, handler); + } + + //! Initialize JSON text token-by-token parsing + /*! + */ + void IterativeParseInit() { + parseResult_.Clear(); + state_ = IterativeParsingStartState; + } + + //! Parse one token from JSON text + /*! \tparam InputStream Type of input stream, implementing Stream concept + \tparam Handler Type of handler, implementing Handler concept. + \param is Input stream to be parsed. + \param handler The handler to receive events. + \return Whether the parsing is successful. + */ + template + bool IterativeParseNext(InputStream& is, Handler& handler) { + while (RAPIDJSON_LIKELY(is.Peek() != '\0')) { + SkipWhitespaceAndComments(is); + + Token t = Tokenize(is.Peek()); + IterativeParsingState n = Predict(state_, t); + IterativeParsingState d = Transit(state_, t, n, is, handler); + + // If we've finished or hit an error... + if (RAPIDJSON_UNLIKELY(IsIterativeParsingCompleteState(d))) { + // Report errors. + if (d == IterativeParsingErrorState) { + HandleError(state_, is); + return false; + } + + // Transition to the finish state. + RAPIDJSON_ASSERT(d == IterativeParsingFinishState); + state_ = d; + + // If StopWhenDone is not set... + if (!(parseFlags & kParseStopWhenDoneFlag)) { + // ... and extra non-whitespace data is found... + SkipWhitespaceAndComments(is); + if (is.Peek() != '\0') { + // ... this is considered an error. + HandleError(state_, is); + return false; + } + } + + // Success! We are done! + return true; + } + + // Transition to the new state. + state_ = d; + + // If we parsed anything other than a delimiter, we invoked the handler, so we can return true now. + if (!IsIterativeParsingDelimiterState(n)) + return true; + } + + // We reached the end of file. + stack_.Clear(); + + if (state_ != IterativeParsingFinishState) { + HandleError(state_, is); + return false; + } + + return true; + } + + //! Check if token-by-token parsing JSON text is complete + /*! \return Whether the JSON has been fully decoded. + */ + RAPIDJSON_FORCEINLINE bool IterativeParseComplete() const { + return IsIterativeParsingCompleteState(state_); + } + + //! Whether a parse error has occurred in the last parsing. + bool HasParseError() const { return parseResult_.IsError(); } + + //! Get the \ref ParseErrorCode of last parsing. + ParseErrorCode GetParseErrorCode() const { return parseResult_.Code(); } + + //! Get the position of last parsing error in input, 0 otherwise. + size_t GetErrorOffset() const { return parseResult_.Offset(); } + +protected: + void SetParseError(ParseErrorCode code, size_t offset) { parseResult_.Set(code, offset); } + +private: + // Prohibit copy constructor & assignment operator. + GenericReader(const GenericReader&); + GenericReader& operator=(const GenericReader&); + + void ClearStack() { stack_.Clear(); } + + // clear stack on any exit from ParseStream, e.g. due to exception + struct ClearStackOnExit { + explicit ClearStackOnExit(GenericReader& r) : r_(r) {} + ~ClearStackOnExit() { r_.ClearStack(); } + private: + GenericReader& r_; + ClearStackOnExit(const ClearStackOnExit&); + ClearStackOnExit& operator=(const ClearStackOnExit&); + }; + + template + void SkipWhitespaceAndComments(InputStream& is) { + SkipWhitespace(is); + + if (parseFlags & kParseCommentsFlag) { + while (RAPIDJSON_UNLIKELY(Consume(is, '/'))) { + if (Consume(is, '*')) { + while (true) { + if (RAPIDJSON_UNLIKELY(is.Peek() == '\0')) + RAPIDJSON_PARSE_ERROR(kParseErrorUnspecificSyntaxError, is.Tell()); + else if (Consume(is, '*')) { + if (Consume(is, '/')) + break; + } + else + is.Take(); + } + } + else if (RAPIDJSON_LIKELY(Consume(is, '/'))) + while (is.Peek() != '\0' && is.Take() != '\n') {} + else + RAPIDJSON_PARSE_ERROR(kParseErrorUnspecificSyntaxError, is.Tell()); + + SkipWhitespace(is); + } + } + } + + // Parse object: { string : value, ... } + template + void ParseObject(InputStream& is, Handler& handler) { + RAPIDJSON_ASSERT(is.Peek() == '{'); + is.Take(); // Skip '{' + + if (RAPIDJSON_UNLIKELY(!handler.StartObject())) + RAPIDJSON_PARSE_ERROR(kParseErrorTermination, is.Tell()); + + SkipWhitespaceAndComments(is); + RAPIDJSON_PARSE_ERROR_EARLY_RETURN_VOID; + + if (Consume(is, '}')) { + if (RAPIDJSON_UNLIKELY(!handler.EndObject(0))) // empty object + RAPIDJSON_PARSE_ERROR(kParseErrorTermination, is.Tell()); + return; + } + + for (SizeType memberCount = 0;;) { + if (RAPIDJSON_UNLIKELY(is.Peek() != '"')) + RAPIDJSON_PARSE_ERROR(kParseErrorObjectMissName, is.Tell()); + + ParseString(is, handler, true); + RAPIDJSON_PARSE_ERROR_EARLY_RETURN_VOID; + + SkipWhitespaceAndComments(is); + RAPIDJSON_PARSE_ERROR_EARLY_RETURN_VOID; + + if (RAPIDJSON_UNLIKELY(!Consume(is, ':'))) + RAPIDJSON_PARSE_ERROR(kParseErrorObjectMissColon, is.Tell()); + + SkipWhitespaceAndComments(is); + RAPIDJSON_PARSE_ERROR_EARLY_RETURN_VOID; + + ParseValue(is, handler); + RAPIDJSON_PARSE_ERROR_EARLY_RETURN_VOID; + + SkipWhitespaceAndComments(is); + RAPIDJSON_PARSE_ERROR_EARLY_RETURN_VOID; + + ++memberCount; + + switch (is.Peek()) { + case ',': + is.Take(); + SkipWhitespaceAndComments(is); + RAPIDJSON_PARSE_ERROR_EARLY_RETURN_VOID; + break; + case '}': + is.Take(); + if (RAPIDJSON_UNLIKELY(!handler.EndObject(memberCount))) + RAPIDJSON_PARSE_ERROR(kParseErrorTermination, is.Tell()); + return; + default: + RAPIDJSON_PARSE_ERROR(kParseErrorObjectMissCommaOrCurlyBracket, is.Tell()); break; // This useless break is only for making warning and coverage happy + } + + if (parseFlags & kParseTrailingCommasFlag) { + if (is.Peek() == '}') { + if (RAPIDJSON_UNLIKELY(!handler.EndObject(memberCount))) + RAPIDJSON_PARSE_ERROR(kParseErrorTermination, is.Tell()); + is.Take(); + return; + } + } + } + } + + // Parse array: [ value, ... ] + template + void ParseArray(InputStream& is, Handler& handler) { + RAPIDJSON_ASSERT(is.Peek() == '['); + is.Take(); // Skip '[' + + if (RAPIDJSON_UNLIKELY(!handler.StartArray())) + RAPIDJSON_PARSE_ERROR(kParseErrorTermination, is.Tell()); + + SkipWhitespaceAndComments(is); + RAPIDJSON_PARSE_ERROR_EARLY_RETURN_VOID; + + if (Consume(is, ']')) { + if (RAPIDJSON_UNLIKELY(!handler.EndArray(0))) // empty array + RAPIDJSON_PARSE_ERROR(kParseErrorTermination, is.Tell()); + return; + } + + for (SizeType elementCount = 0;;) { + ParseValue(is, handler); + RAPIDJSON_PARSE_ERROR_EARLY_RETURN_VOID; + + ++elementCount; + SkipWhitespaceAndComments(is); + RAPIDJSON_PARSE_ERROR_EARLY_RETURN_VOID; + + if (Consume(is, ',')) { + SkipWhitespaceAndComments(is); + RAPIDJSON_PARSE_ERROR_EARLY_RETURN_VOID; + } + else if (Consume(is, ']')) { + if (RAPIDJSON_UNLIKELY(!handler.EndArray(elementCount))) + RAPIDJSON_PARSE_ERROR(kParseErrorTermination, is.Tell()); + return; + } + else + RAPIDJSON_PARSE_ERROR(kParseErrorArrayMissCommaOrSquareBracket, is.Tell()); + + if (parseFlags & kParseTrailingCommasFlag) { + if (is.Peek() == ']') { + if (RAPIDJSON_UNLIKELY(!handler.EndArray(elementCount))) + RAPIDJSON_PARSE_ERROR(kParseErrorTermination, is.Tell()); + is.Take(); + return; + } + } + } + } + + template + void ParseNull(InputStream& is, Handler& handler) { + RAPIDJSON_ASSERT(is.Peek() == 'n'); + is.Take(); + + if (RAPIDJSON_LIKELY(Consume(is, 'u') && Consume(is, 'l') && Consume(is, 'l'))) { + if (RAPIDJSON_UNLIKELY(!handler.Null())) + RAPIDJSON_PARSE_ERROR(kParseErrorTermination, is.Tell()); + } + else + RAPIDJSON_PARSE_ERROR(kParseErrorValueInvalid, is.Tell()); + } + + template + void ParseTrue(InputStream& is, Handler& handler) { + RAPIDJSON_ASSERT(is.Peek() == 't'); + is.Take(); + + if (RAPIDJSON_LIKELY(Consume(is, 'r') && Consume(is, 'u') && Consume(is, 'e'))) { + if (RAPIDJSON_UNLIKELY(!handler.Bool(true))) + RAPIDJSON_PARSE_ERROR(kParseErrorTermination, is.Tell()); + } + else + RAPIDJSON_PARSE_ERROR(kParseErrorValueInvalid, is.Tell()); + } + + template + void ParseFalse(InputStream& is, Handler& handler) { + RAPIDJSON_ASSERT(is.Peek() == 'f'); + is.Take(); + + if (RAPIDJSON_LIKELY(Consume(is, 'a') && Consume(is, 'l') && Consume(is, 's') && Consume(is, 'e'))) { + if (RAPIDJSON_UNLIKELY(!handler.Bool(false))) + RAPIDJSON_PARSE_ERROR(kParseErrorTermination, is.Tell()); + } + else + RAPIDJSON_PARSE_ERROR(kParseErrorValueInvalid, is.Tell()); + } + + template + RAPIDJSON_FORCEINLINE static bool Consume(InputStream& is, typename InputStream::Ch expect) { + if (RAPIDJSON_LIKELY(is.Peek() == expect)) { + is.Take(); + return true; + } + else + return false; + } + + // Helper function to parse four hexadecimal digits in \uXXXX in ParseString(). + template + unsigned ParseHex4(InputStream& is, size_t escapeOffset) { + unsigned codepoint = 0; + for (int i = 0; i < 4; i++) { + Ch c = is.Peek(); + codepoint <<= 4; + codepoint += static_cast(c); + if (c >= '0' && c <= '9') + codepoint -= '0'; + else if (c >= 'A' && c <= 'F') + codepoint -= 'A' - 10; + else if (c >= 'a' && c <= 'f') + codepoint -= 'a' - 10; + else { + RAPIDJSON_PARSE_ERROR_NORETURN(kParseErrorStringUnicodeEscapeInvalidHex, escapeOffset); + RAPIDJSON_PARSE_ERROR_EARLY_RETURN(0); + } + is.Take(); + } + return codepoint; + } + + template + class StackStream { + public: + typedef CharType Ch; + + StackStream(internal::Stack& stack) : stack_(stack), length_(0) {} + RAPIDJSON_FORCEINLINE void Put(Ch c) { + *stack_.template Push() = c; + ++length_; + } + + RAPIDJSON_FORCEINLINE void* Push(SizeType count) { + length_ += count; + return stack_.template Push(count); + } + + size_t Length() const { return length_; } + + Ch* Pop() { + return stack_.template Pop(length_); + } + + private: + StackStream(const StackStream&); + StackStream& operator=(const StackStream&); + + internal::Stack& stack_; + SizeType length_; + }; + + // Parse string and generate String event. Different code paths for kParseInsituFlag. + template + void ParseString(InputStream& is, Handler& handler, bool isKey = false) { + internal::StreamLocalCopy copy(is); + InputStream& s(copy.s); + + RAPIDJSON_ASSERT(s.Peek() == '\"'); + s.Take(); // Skip '\"' + + bool success = false; + if (parseFlags & kParseInsituFlag) { + typename InputStream::Ch *head = s.PutBegin(); + ParseStringToStream(s, s); + RAPIDJSON_PARSE_ERROR_EARLY_RETURN_VOID; + size_t length = s.PutEnd(head) - 1; + RAPIDJSON_ASSERT(length <= 0xFFFFFFFF); + const typename TargetEncoding::Ch* const str = reinterpret_cast(head); + success = (isKey ? handler.Key(str, SizeType(length), false) : handler.String(str, SizeType(length), false)); + } + else { + StackStream stackStream(stack_); + ParseStringToStream(s, stackStream); + RAPIDJSON_PARSE_ERROR_EARLY_RETURN_VOID; + SizeType length = static_cast(stackStream.Length()) - 1; + const typename TargetEncoding::Ch* const str = stackStream.Pop(); + success = (isKey ? handler.Key(str, length, true) : handler.String(str, length, true)); + } + if (RAPIDJSON_UNLIKELY(!success)) + RAPIDJSON_PARSE_ERROR(kParseErrorTermination, s.Tell()); + } + + // Parse string to an output is + // This function handles the prefix/suffix double quotes, escaping, and optional encoding validation. + template + RAPIDJSON_FORCEINLINE void ParseStringToStream(InputStream& is, OutputStream& os) { +//!@cond RAPIDJSON_HIDDEN_FROM_DOXYGEN +#define Z16 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0 + static const char escape[256] = { + Z16, Z16, 0, 0,'\"', 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, '/', + Z16, Z16, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,'\\', 0, 0, 0, + 0, 0,'\b', 0, 0, 0,'\f', 0, 0, 0, 0, 0, 0, 0,'\n', 0, + 0, 0,'\r', 0,'\t', 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + Z16, Z16, Z16, Z16, Z16, Z16, Z16, Z16 + }; +#undef Z16 +//!@endcond + + for (;;) { + // Scan and copy string before "\\\"" or < 0x20. This is an optional optimzation. + if (!(parseFlags & kParseValidateEncodingFlag)) + ScanCopyUnescapedString(is, os); + + Ch c = is.Peek(); + if (RAPIDJSON_UNLIKELY(c == '\\')) { // Escape + size_t escapeOffset = is.Tell(); // For invalid escaping, report the initial '\\' as error offset + is.Take(); + Ch e = is.Peek(); + if ((sizeof(Ch) == 1 || unsigned(e) < 256) && RAPIDJSON_LIKELY(escape[static_cast(e)])) { + is.Take(); + os.Put(static_cast(escape[static_cast(e)])); + } + else if ((parseFlags & kParseEscapedApostropheFlag) && RAPIDJSON_LIKELY(e == '\'')) { // Allow escaped apostrophe + is.Take(); + os.Put('\''); + } + else if (RAPIDJSON_LIKELY(e == 'u')) { // Unicode + is.Take(); + unsigned codepoint = ParseHex4(is, escapeOffset); + RAPIDJSON_PARSE_ERROR_EARLY_RETURN_VOID; + if (RAPIDJSON_UNLIKELY(codepoint >= 0xD800 && codepoint <= 0xDFFF)) { + // high surrogate, check if followed by valid low surrogate + if (RAPIDJSON_LIKELY(codepoint <= 0xDBFF)) { + // Handle UTF-16 surrogate pair + if (RAPIDJSON_UNLIKELY(!Consume(is, '\\') || !Consume(is, 'u'))) + RAPIDJSON_PARSE_ERROR(kParseErrorStringUnicodeSurrogateInvalid, escapeOffset); + unsigned codepoint2 = ParseHex4(is, escapeOffset); + RAPIDJSON_PARSE_ERROR_EARLY_RETURN_VOID; + if (RAPIDJSON_UNLIKELY(codepoint2 < 0xDC00 || codepoint2 > 0xDFFF)) + RAPIDJSON_PARSE_ERROR(kParseErrorStringUnicodeSurrogateInvalid, escapeOffset); + codepoint = (((codepoint - 0xD800) << 10) | (codepoint2 - 0xDC00)) + 0x10000; + } + // single low surrogate + else + { + RAPIDJSON_PARSE_ERROR(kParseErrorStringUnicodeSurrogateInvalid, escapeOffset); + } + } + TEncoding::Encode(os, codepoint); + } + else + RAPIDJSON_PARSE_ERROR(kParseErrorStringEscapeInvalid, escapeOffset); + } + else if (RAPIDJSON_UNLIKELY(c == '"')) { // Closing double quote + is.Take(); + os.Put('\0'); // null-terminate the string + return; + } + else if (RAPIDJSON_UNLIKELY(static_cast(c) < 0x20)) { // RFC 4627: unescaped = %x20-21 / %x23-5B / %x5D-10FFFF + if (c == '\0') + RAPIDJSON_PARSE_ERROR(kParseErrorStringMissQuotationMark, is.Tell()); + else + RAPIDJSON_PARSE_ERROR(kParseErrorStringInvalidEncoding, is.Tell()); + } + else { + size_t offset = is.Tell(); + if (RAPIDJSON_UNLIKELY((parseFlags & kParseValidateEncodingFlag ? + !Transcoder::Validate(is, os) : + !Transcoder::Transcode(is, os)))) + RAPIDJSON_PARSE_ERROR(kParseErrorStringInvalidEncoding, offset); + } + } + } + + template + static RAPIDJSON_FORCEINLINE void ScanCopyUnescapedString(InputStream&, OutputStream&) { + // Do nothing for generic version + } + +#if defined(RAPIDJSON_SSE2) || defined(RAPIDJSON_SSE42) + // StringStream -> StackStream + static RAPIDJSON_FORCEINLINE void ScanCopyUnescapedString(StringStream& is, StackStream& os) { + const char* p = is.src_; + + // Scan one by one until alignment (unaligned load may cross page boundary and cause crash) + const char* nextAligned = reinterpret_cast((reinterpret_cast(p) + 15) & static_cast(~15)); + while (p != nextAligned) + if (RAPIDJSON_UNLIKELY(*p == '\"') || RAPIDJSON_UNLIKELY(*p == '\\') || RAPIDJSON_UNLIKELY(static_cast(*p) < 0x20)) { + is.src_ = p; + return; + } + else + os.Put(*p++); + + // The rest of string using SIMD + static const char dquote[16] = { '\"', '\"', '\"', '\"', '\"', '\"', '\"', '\"', '\"', '\"', '\"', '\"', '\"', '\"', '\"', '\"' }; + static const char bslash[16] = { '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\' }; + static const char space[16] = { 0x1F, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F }; + const __m128i dq = _mm_loadu_si128(reinterpret_cast(&dquote[0])); + const __m128i bs = _mm_loadu_si128(reinterpret_cast(&bslash[0])); + const __m128i sp = _mm_loadu_si128(reinterpret_cast(&space[0])); + + for (;; p += 16) { + const __m128i s = _mm_load_si128(reinterpret_cast(p)); + const __m128i t1 = _mm_cmpeq_epi8(s, dq); + const __m128i t2 = _mm_cmpeq_epi8(s, bs); + const __m128i t3 = _mm_cmpeq_epi8(_mm_max_epu8(s, sp), sp); // s < 0x20 <=> max(s, 0x1F) == 0x1F + const __m128i x = _mm_or_si128(_mm_or_si128(t1, t2), t3); + unsigned short r = static_cast(_mm_movemask_epi8(x)); + if (RAPIDJSON_UNLIKELY(r != 0)) { // some of characters is escaped + SizeType length; + #ifdef _MSC_VER // Find the index of first escaped + unsigned long offset; + _BitScanForward(&offset, r); + length = offset; + #else + length = static_cast(__builtin_ffs(r) - 1); + #endif + if (length != 0) { + char* q = reinterpret_cast(os.Push(length)); + for (size_t i = 0; i < length; i++) + q[i] = p[i]; + + p += length; + } + break; + } + _mm_storeu_si128(reinterpret_cast<__m128i *>(os.Push(16)), s); + } + + is.src_ = p; + } + + // InsituStringStream -> InsituStringStream + static RAPIDJSON_FORCEINLINE void ScanCopyUnescapedString(InsituStringStream& is, InsituStringStream& os) { + RAPIDJSON_ASSERT(&is == &os); + (void)os; + + if (is.src_ == is.dst_) { + SkipUnescapedString(is); + return; + } + + char* p = is.src_; + char *q = is.dst_; + + // Scan one by one until alignment (unaligned load may cross page boundary and cause crash) + const char* nextAligned = reinterpret_cast((reinterpret_cast(p) + 15) & static_cast(~15)); + while (p != nextAligned) + if (RAPIDJSON_UNLIKELY(*p == '\"') || RAPIDJSON_UNLIKELY(*p == '\\') || RAPIDJSON_UNLIKELY(static_cast(*p) < 0x20)) { + is.src_ = p; + is.dst_ = q; + return; + } + else + *q++ = *p++; + + // The rest of string using SIMD + static const char dquote[16] = { '\"', '\"', '\"', '\"', '\"', '\"', '\"', '\"', '\"', '\"', '\"', '\"', '\"', '\"', '\"', '\"' }; + static const char bslash[16] = { '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\' }; + static const char space[16] = { 0x1F, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F }; + const __m128i dq = _mm_loadu_si128(reinterpret_cast(&dquote[0])); + const __m128i bs = _mm_loadu_si128(reinterpret_cast(&bslash[0])); + const __m128i sp = _mm_loadu_si128(reinterpret_cast(&space[0])); + + for (;; p += 16, q += 16) { + const __m128i s = _mm_load_si128(reinterpret_cast(p)); + const __m128i t1 = _mm_cmpeq_epi8(s, dq); + const __m128i t2 = _mm_cmpeq_epi8(s, bs); + const __m128i t3 = _mm_cmpeq_epi8(_mm_max_epu8(s, sp), sp); // s < 0x20 <=> max(s, 0x1F) == 0x1F + const __m128i x = _mm_or_si128(_mm_or_si128(t1, t2), t3); + unsigned short r = static_cast(_mm_movemask_epi8(x)); + if (RAPIDJSON_UNLIKELY(r != 0)) { // some of characters is escaped + size_t length; +#ifdef _MSC_VER // Find the index of first escaped + unsigned long offset; + _BitScanForward(&offset, r); + length = offset; +#else + length = static_cast(__builtin_ffs(r) - 1); +#endif + for (const char* pend = p + length; p != pend; ) + *q++ = *p++; + break; + } + _mm_storeu_si128(reinterpret_cast<__m128i *>(q), s); + } + + is.src_ = p; + is.dst_ = q; + } + + // When read/write pointers are the same for insitu stream, just skip unescaped characters + static RAPIDJSON_FORCEINLINE void SkipUnescapedString(InsituStringStream& is) { + RAPIDJSON_ASSERT(is.src_ == is.dst_); + char* p = is.src_; + + // Scan one by one until alignment (unaligned load may cross page boundary and cause crash) + const char* nextAligned = reinterpret_cast((reinterpret_cast(p) + 15) & static_cast(~15)); + for (; p != nextAligned; p++) + if (RAPIDJSON_UNLIKELY(*p == '\"') || RAPIDJSON_UNLIKELY(*p == '\\') || RAPIDJSON_UNLIKELY(static_cast(*p) < 0x20)) { + is.src_ = is.dst_ = p; + return; + } + + // The rest of string using SIMD + static const char dquote[16] = { '\"', '\"', '\"', '\"', '\"', '\"', '\"', '\"', '\"', '\"', '\"', '\"', '\"', '\"', '\"', '\"' }; + static const char bslash[16] = { '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\' }; + static const char space[16] = { 0x1F, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F }; + const __m128i dq = _mm_loadu_si128(reinterpret_cast(&dquote[0])); + const __m128i bs = _mm_loadu_si128(reinterpret_cast(&bslash[0])); + const __m128i sp = _mm_loadu_si128(reinterpret_cast(&space[0])); + + for (;; p += 16) { + const __m128i s = _mm_load_si128(reinterpret_cast(p)); + const __m128i t1 = _mm_cmpeq_epi8(s, dq); + const __m128i t2 = _mm_cmpeq_epi8(s, bs); + const __m128i t3 = _mm_cmpeq_epi8(_mm_max_epu8(s, sp), sp); // s < 0x20 <=> max(s, 0x1F) == 0x1F + const __m128i x = _mm_or_si128(_mm_or_si128(t1, t2), t3); + unsigned short r = static_cast(_mm_movemask_epi8(x)); + if (RAPIDJSON_UNLIKELY(r != 0)) { // some of characters is escaped + size_t length; +#ifdef _MSC_VER // Find the index of first escaped + unsigned long offset; + _BitScanForward(&offset, r); + length = offset; +#else + length = static_cast(__builtin_ffs(r) - 1); +#endif + p += length; + break; + } + } + + is.src_ = is.dst_ = p; + } +#elif defined(RAPIDJSON_NEON) + // StringStream -> StackStream + static RAPIDJSON_FORCEINLINE void ScanCopyUnescapedString(StringStream& is, StackStream& os) { + const char* p = is.src_; + + // Scan one by one until alignment (unaligned load may cross page boundary and cause crash) + const char* nextAligned = reinterpret_cast((reinterpret_cast(p) + 15) & static_cast(~15)); + while (p != nextAligned) + if (RAPIDJSON_UNLIKELY(*p == '\"') || RAPIDJSON_UNLIKELY(*p == '\\') || RAPIDJSON_UNLIKELY(static_cast(*p) < 0x20)) { + is.src_ = p; + return; + } + else + os.Put(*p++); + + // The rest of string using SIMD + const uint8x16_t s0 = vmovq_n_u8('"'); + const uint8x16_t s1 = vmovq_n_u8('\\'); + const uint8x16_t s2 = vmovq_n_u8('\b'); + const uint8x16_t s3 = vmovq_n_u8(32); + + for (;; p += 16) { + const uint8x16_t s = vld1q_u8(reinterpret_cast(p)); + uint8x16_t x = vceqq_u8(s, s0); + x = vorrq_u8(x, vceqq_u8(s, s1)); + x = vorrq_u8(x, vceqq_u8(s, s2)); + x = vorrq_u8(x, vcltq_u8(s, s3)); + + x = vrev64q_u8(x); // Rev in 64 + uint64_t low = vgetq_lane_u64(vreinterpretq_u64_u8(x), 0); // extract + uint64_t high = vgetq_lane_u64(vreinterpretq_u64_u8(x), 1); // extract + + SizeType length = 0; + bool escaped = false; + if (low == 0) { + if (high != 0) { + uint32_t lz = internal::clzll(high); + length = 8 + (lz >> 3); + escaped = true; + } + } else { + uint32_t lz = internal::clzll(low); + length = lz >> 3; + escaped = true; + } + if (RAPIDJSON_UNLIKELY(escaped)) { // some of characters is escaped + if (length != 0) { + char* q = reinterpret_cast(os.Push(length)); + for (size_t i = 0; i < length; i++) + q[i] = p[i]; + + p += length; + } + break; + } + vst1q_u8(reinterpret_cast(os.Push(16)), s); + } + + is.src_ = p; + } + + // InsituStringStream -> InsituStringStream + static RAPIDJSON_FORCEINLINE void ScanCopyUnescapedString(InsituStringStream& is, InsituStringStream& os) { + RAPIDJSON_ASSERT(&is == &os); + (void)os; + + if (is.src_ == is.dst_) { + SkipUnescapedString(is); + return; + } + + char* p = is.src_; + char *q = is.dst_; + + // Scan one by one until alignment (unaligned load may cross page boundary and cause crash) + const char* nextAligned = reinterpret_cast((reinterpret_cast(p) + 15) & static_cast(~15)); + while (p != nextAligned) + if (RAPIDJSON_UNLIKELY(*p == '\"') || RAPIDJSON_UNLIKELY(*p == '\\') || RAPIDJSON_UNLIKELY(static_cast(*p) < 0x20)) { + is.src_ = p; + is.dst_ = q; + return; + } + else + *q++ = *p++; + + // The rest of string using SIMD + const uint8x16_t s0 = vmovq_n_u8('"'); + const uint8x16_t s1 = vmovq_n_u8('\\'); + const uint8x16_t s2 = vmovq_n_u8('\b'); + const uint8x16_t s3 = vmovq_n_u8(32); + + for (;; p += 16, q += 16) { + const uint8x16_t s = vld1q_u8(reinterpret_cast(p)); + uint8x16_t x = vceqq_u8(s, s0); + x = vorrq_u8(x, vceqq_u8(s, s1)); + x = vorrq_u8(x, vceqq_u8(s, s2)); + x = vorrq_u8(x, vcltq_u8(s, s3)); + + x = vrev64q_u8(x); // Rev in 64 + uint64_t low = vgetq_lane_u64(vreinterpretq_u64_u8(x), 0); // extract + uint64_t high = vgetq_lane_u64(vreinterpretq_u64_u8(x), 1); // extract + + SizeType length = 0; + bool escaped = false; + if (low == 0) { + if (high != 0) { + uint32_t lz = internal::clzll(high); + length = 8 + (lz >> 3); + escaped = true; + } + } else { + uint32_t lz = internal::clzll(low); + length = lz >> 3; + escaped = true; + } + if (RAPIDJSON_UNLIKELY(escaped)) { // some of characters is escaped + for (const char* pend = p + length; p != pend; ) { + *q++ = *p++; + } + break; + } + vst1q_u8(reinterpret_cast(q), s); + } + + is.src_ = p; + is.dst_ = q; + } + + // When read/write pointers are the same for insitu stream, just skip unescaped characters + static RAPIDJSON_FORCEINLINE void SkipUnescapedString(InsituStringStream& is) { + RAPIDJSON_ASSERT(is.src_ == is.dst_); + char* p = is.src_; + + // Scan one by one until alignment (unaligned load may cross page boundary and cause crash) + const char* nextAligned = reinterpret_cast((reinterpret_cast(p) + 15) & static_cast(~15)); + for (; p != nextAligned; p++) + if (RAPIDJSON_UNLIKELY(*p == '\"') || RAPIDJSON_UNLIKELY(*p == '\\') || RAPIDJSON_UNLIKELY(static_cast(*p) < 0x20)) { + is.src_ = is.dst_ = p; + return; + } + + // The rest of string using SIMD + const uint8x16_t s0 = vmovq_n_u8('"'); + const uint8x16_t s1 = vmovq_n_u8('\\'); + const uint8x16_t s2 = vmovq_n_u8('\b'); + const uint8x16_t s3 = vmovq_n_u8(32); + + for (;; p += 16) { + const uint8x16_t s = vld1q_u8(reinterpret_cast(p)); + uint8x16_t x = vceqq_u8(s, s0); + x = vorrq_u8(x, vceqq_u8(s, s1)); + x = vorrq_u8(x, vceqq_u8(s, s2)); + x = vorrq_u8(x, vcltq_u8(s, s3)); + + x = vrev64q_u8(x); // Rev in 64 + uint64_t low = vgetq_lane_u64(vreinterpretq_u64_u8(x), 0); // extract + uint64_t high = vgetq_lane_u64(vreinterpretq_u64_u8(x), 1); // extract + + if (low == 0) { + if (high != 0) { + uint32_t lz = internal::clzll(high); + p += 8 + (lz >> 3); + break; + } + } else { + uint32_t lz = internal::clzll(low); + p += lz >> 3; + break; + } + } + + is.src_ = is.dst_ = p; + } +#endif // RAPIDJSON_NEON + + template + class NumberStream; + + template + class NumberStream { + public: + typedef typename InputStream::Ch Ch; + + NumberStream(GenericReader& reader, InputStream& s) : is(s) { (void)reader; } + + RAPIDJSON_FORCEINLINE Ch Peek() const { return is.Peek(); } + RAPIDJSON_FORCEINLINE Ch TakePush() { return is.Take(); } + RAPIDJSON_FORCEINLINE Ch Take() { return is.Take(); } + RAPIDJSON_FORCEINLINE void Push(char) {} + + size_t Tell() { return is.Tell(); } + size_t Length() { return 0; } + const char* Pop() { return 0; } + + protected: + NumberStream& operator=(const NumberStream&); + + InputStream& is; + }; + + template + class NumberStream : public NumberStream { + typedef NumberStream Base; + public: + NumberStream(GenericReader& reader, InputStream& is) : Base(reader, is), stackStream(reader.stack_) {} + + RAPIDJSON_FORCEINLINE Ch TakePush() { + stackStream.Put(static_cast(Base::is.Peek())); + return Base::is.Take(); + } + + RAPIDJSON_FORCEINLINE void Push(char c) { + stackStream.Put(c); + } + + size_t Length() { return stackStream.Length(); } + + const char* Pop() { + stackStream.Put('\0'); + return stackStream.Pop(); + } + + private: + StackStream stackStream; + }; + + template + class NumberStream : public NumberStream { + typedef NumberStream Base; + public: + NumberStream(GenericReader& reader, InputStream& is) : Base(reader, is) {} + + RAPIDJSON_FORCEINLINE Ch Take() { return Base::TakePush(); } + }; + + template + void ParseNumber(InputStream& is, Handler& handler) { + internal::StreamLocalCopy copy(is); + NumberStream s(*this, copy.s); + + size_t startOffset = s.Tell(); + double d = 0.0; + bool useNanOrInf = false; + + // Parse minus + bool minus = Consume(s, '-'); + + // Parse int: zero / ( digit1-9 *DIGIT ) + unsigned i = 0; + uint64_t i64 = 0; + bool use64bit = false; + int significandDigit = 0; + if (RAPIDJSON_UNLIKELY(s.Peek() == '0')) { + i = 0; + s.TakePush(); + } + else if (RAPIDJSON_LIKELY(s.Peek() >= '1' && s.Peek() <= '9')) { + i = static_cast(s.TakePush() - '0'); + + if (minus) + while (RAPIDJSON_LIKELY(s.Peek() >= '0' && s.Peek() <= '9')) { + if (RAPIDJSON_UNLIKELY(i >= 214748364)) { // 2^31 = 2147483648 + if (RAPIDJSON_LIKELY(i != 214748364 || s.Peek() > '8')) { + i64 = i; + use64bit = true; + break; + } + } + i = i * 10 + static_cast(s.TakePush() - '0'); + significandDigit++; + } + else + while (RAPIDJSON_LIKELY(s.Peek() >= '0' && s.Peek() <= '9')) { + if (RAPIDJSON_UNLIKELY(i >= 429496729)) { // 2^32 - 1 = 4294967295 + if (RAPIDJSON_LIKELY(i != 429496729 || s.Peek() > '5')) { + i64 = i; + use64bit = true; + break; + } + } + i = i * 10 + static_cast(s.TakePush() - '0'); + significandDigit++; + } + } + // Parse NaN or Infinity here + else if ((parseFlags & kParseNanAndInfFlag) && RAPIDJSON_LIKELY((s.Peek() == 'I' || s.Peek() == 'N'))) { + if (Consume(s, 'N')) { + if (Consume(s, 'a') && Consume(s, 'N')) { + d = std::numeric_limits::quiet_NaN(); + useNanOrInf = true; + } + } + else if (RAPIDJSON_LIKELY(Consume(s, 'I'))) { + if (Consume(s, 'n') && Consume(s, 'f')) { + d = (minus ? -std::numeric_limits::infinity() : std::numeric_limits::infinity()); + useNanOrInf = true; + + if (RAPIDJSON_UNLIKELY(s.Peek() == 'i' && !(Consume(s, 'i') && Consume(s, 'n') + && Consume(s, 'i') && Consume(s, 't') && Consume(s, 'y')))) { + RAPIDJSON_PARSE_ERROR(kParseErrorValueInvalid, s.Tell()); + } + } + } + + if (RAPIDJSON_UNLIKELY(!useNanOrInf)) { + RAPIDJSON_PARSE_ERROR(kParseErrorValueInvalid, s.Tell()); + } + } + else + RAPIDJSON_PARSE_ERROR(kParseErrorValueInvalid, s.Tell()); + + // Parse 64bit int + bool useDouble = false; + if (use64bit) { + if (minus) + while (RAPIDJSON_LIKELY(s.Peek() >= '0' && s.Peek() <= '9')) { + if (RAPIDJSON_UNLIKELY(i64 >= RAPIDJSON_UINT64_C2(0x0CCCCCCC, 0xCCCCCCCC))) // 2^63 = 9223372036854775808 + if (RAPIDJSON_LIKELY(i64 != RAPIDJSON_UINT64_C2(0x0CCCCCCC, 0xCCCCCCCC) || s.Peek() > '8')) { + d = static_cast(i64); + useDouble = true; + break; + } + i64 = i64 * 10 + static_cast(s.TakePush() - '0'); + significandDigit++; + } + else + while (RAPIDJSON_LIKELY(s.Peek() >= '0' && s.Peek() <= '9')) { + if (RAPIDJSON_UNLIKELY(i64 >= RAPIDJSON_UINT64_C2(0x19999999, 0x99999999))) // 2^64 - 1 = 18446744073709551615 + if (RAPIDJSON_LIKELY(i64 != RAPIDJSON_UINT64_C2(0x19999999, 0x99999999) || s.Peek() > '5')) { + d = static_cast(i64); + useDouble = true; + break; + } + i64 = i64 * 10 + static_cast(s.TakePush() - '0'); + significandDigit++; + } + } + + // Force double for big integer + if (useDouble) { + while (RAPIDJSON_LIKELY(s.Peek() >= '0' && s.Peek() <= '9')) { + d = d * 10 + (s.TakePush() - '0'); + } + } + + // Parse frac = decimal-point 1*DIGIT + int expFrac = 0; + size_t decimalPosition; + if (Consume(s, '.')) { + decimalPosition = s.Length(); + + if (RAPIDJSON_UNLIKELY(!(s.Peek() >= '0' && s.Peek() <= '9'))) + RAPIDJSON_PARSE_ERROR(kParseErrorNumberMissFraction, s.Tell()); + + if (!useDouble) { +#if RAPIDJSON_64BIT + // Use i64 to store significand in 64-bit architecture + if (!use64bit) + i64 = i; + + while (RAPIDJSON_LIKELY(s.Peek() >= '0' && s.Peek() <= '9')) { + if (i64 > RAPIDJSON_UINT64_C2(0x1FFFFF, 0xFFFFFFFF)) // 2^53 - 1 for fast path + break; + else { + i64 = i64 * 10 + static_cast(s.TakePush() - '0'); + --expFrac; + if (i64 != 0) + significandDigit++; + } + } + + d = static_cast(i64); +#else + // Use double to store significand in 32-bit architecture + d = static_cast(use64bit ? i64 : i); +#endif + useDouble = true; + } + + while (RAPIDJSON_LIKELY(s.Peek() >= '0' && s.Peek() <= '9')) { + if (significandDigit < 17) { + d = d * 10.0 + (s.TakePush() - '0'); + --expFrac; + if (RAPIDJSON_LIKELY(d > 0.0)) + significandDigit++; + } + else + s.TakePush(); + } + } + else + decimalPosition = s.Length(); // decimal position at the end of integer. + + // Parse exp = e [ minus / plus ] 1*DIGIT + int exp = 0; + if (Consume(s, 'e') || Consume(s, 'E')) { + if (!useDouble) { + d = static_cast(use64bit ? i64 : i); + useDouble = true; + } + + bool expMinus = false; + if (Consume(s, '+')) + ; + else if (Consume(s, '-')) + expMinus = true; + + if (RAPIDJSON_LIKELY(s.Peek() >= '0' && s.Peek() <= '9')) { + exp = static_cast(s.Take() - '0'); + if (expMinus) { + // (exp + expFrac) must not underflow int => we're detecting when -exp gets + // dangerously close to INT_MIN (a pessimistic next digit 9 would push it into + // underflow territory): + // + // -(exp * 10 + 9) + expFrac >= INT_MIN + // <=> exp <= (expFrac - INT_MIN - 9) / 10 + RAPIDJSON_ASSERT(expFrac <= 0); + int maxExp = (expFrac + 2147483639) / 10; + + while (RAPIDJSON_LIKELY(s.Peek() >= '0' && s.Peek() <= '9')) { + exp = exp * 10 + static_cast(s.Take() - '0'); + if (RAPIDJSON_UNLIKELY(exp > maxExp)) { + while (RAPIDJSON_UNLIKELY(s.Peek() >= '0' && s.Peek() <= '9')) // Consume the rest of exponent + s.Take(); + } + } + } + else { // positive exp + int maxExp = 308 - expFrac; + while (RAPIDJSON_LIKELY(s.Peek() >= '0' && s.Peek() <= '9')) { + exp = exp * 10 + static_cast(s.Take() - '0'); + if (RAPIDJSON_UNLIKELY(exp > maxExp)) + RAPIDJSON_PARSE_ERROR(kParseErrorNumberTooBig, startOffset); + } + } + } + else + RAPIDJSON_PARSE_ERROR(kParseErrorNumberMissExponent, s.Tell()); + + if (expMinus) + exp = -exp; + } + + // Finish parsing, call event according to the type of number. + bool cont = true; + + if (parseFlags & kParseNumbersAsStringsFlag) { + if (parseFlags & kParseInsituFlag) { + s.Pop(); // Pop stack no matter if it will be used or not. + typename InputStream::Ch* head = is.PutBegin(); + const size_t length = s.Tell() - startOffset; + RAPIDJSON_ASSERT(length <= 0xFFFFFFFF); + // unable to insert the \0 character here, it will erase the comma after this number + const typename TargetEncoding::Ch* const str = reinterpret_cast(head); + cont = handler.RawNumber(str, SizeType(length), false); + } + else { + SizeType numCharsToCopy = static_cast(s.Length()); + StringStream srcStream(s.Pop()); + StackStream dstStream(stack_); + while (numCharsToCopy--) { + Transcoder, TargetEncoding>::Transcode(srcStream, dstStream); + } + dstStream.Put('\0'); + const typename TargetEncoding::Ch* str = dstStream.Pop(); + const SizeType length = static_cast(dstStream.Length()) - 1; + cont = handler.RawNumber(str, SizeType(length), true); + } + } + else { + size_t length = s.Length(); + const char* decimal = s.Pop(); // Pop stack no matter if it will be used or not. + + if (useDouble) { + int p = exp + expFrac; + if (parseFlags & kParseFullPrecisionFlag) + d = internal::StrtodFullPrecision(d, p, decimal, length, decimalPosition, exp); + else + d = internal::StrtodNormalPrecision(d, p); + + // Use > max, instead of == inf, to fix bogus warning -Wfloat-equal + if (d > (std::numeric_limits::max)()) { + // Overflow + // TODO: internal::StrtodX should report overflow (or underflow) + RAPIDJSON_PARSE_ERROR(kParseErrorNumberTooBig, startOffset); + } + + cont = handler.Double(minus ? -d : d); + } + else if (useNanOrInf) { + cont = handler.Double(d); + } + else { + if (use64bit) { + if (minus) + cont = handler.Int64(static_cast(~i64 + 1)); + else + cont = handler.Uint64(i64); + } + else { + if (minus) + cont = handler.Int(static_cast(~i + 1)); + else + cont = handler.Uint(i); + } + } + } + if (RAPIDJSON_UNLIKELY(!cont)) + RAPIDJSON_PARSE_ERROR(kParseErrorTermination, startOffset); + } + + // Parse any JSON value + template + void ParseValue(InputStream& is, Handler& handler) { + switch (is.Peek()) { + case 'n': ParseNull (is, handler); break; + case 't': ParseTrue (is, handler); break; + case 'f': ParseFalse (is, handler); break; + case '"': ParseString(is, handler); break; + case '{': ParseObject(is, handler); break; + case '[': ParseArray (is, handler); break; + default : + ParseNumber(is, handler); + break; + + } + } + + // Iterative Parsing + + // States + enum IterativeParsingState { + IterativeParsingFinishState = 0, // sink states at top + IterativeParsingErrorState, // sink states at top + IterativeParsingStartState, + + // Object states + IterativeParsingObjectInitialState, + IterativeParsingMemberKeyState, + IterativeParsingMemberValueState, + IterativeParsingObjectFinishState, + + // Array states + IterativeParsingArrayInitialState, + IterativeParsingElementState, + IterativeParsingArrayFinishState, + + // Single value state + IterativeParsingValueState, + + // Delimiter states (at bottom) + IterativeParsingElementDelimiterState, + IterativeParsingMemberDelimiterState, + IterativeParsingKeyValueDelimiterState, + + cIterativeParsingStateCount + }; + + // Tokens + enum Token { + LeftBracketToken = 0, + RightBracketToken, + + LeftCurlyBracketToken, + RightCurlyBracketToken, + + CommaToken, + ColonToken, + + StringToken, + FalseToken, + TrueToken, + NullToken, + NumberToken, + + kTokenCount + }; + + RAPIDJSON_FORCEINLINE Token Tokenize(Ch c) const { + +//!@cond RAPIDJSON_HIDDEN_FROM_DOXYGEN +#define N NumberToken +#define N16 N,N,N,N,N,N,N,N,N,N,N,N,N,N,N,N + // Maps from ASCII to Token + static const unsigned char tokenMap[256] = { + N16, // 00~0F + N16, // 10~1F + N, N, StringToken, N, N, N, N, N, N, N, N, N, CommaToken, N, N, N, // 20~2F + N, N, N, N, N, N, N, N, N, N, ColonToken, N, N, N, N, N, // 30~3F + N16, // 40~4F + N, N, N, N, N, N, N, N, N, N, N, LeftBracketToken, N, RightBracketToken, N, N, // 50~5F + N, N, N, N, N, N, FalseToken, N, N, N, N, N, N, N, NullToken, N, // 60~6F + N, N, N, N, TrueToken, N, N, N, N, N, N, LeftCurlyBracketToken, N, RightCurlyBracketToken, N, N, // 70~7F + N16, N16, N16, N16, N16, N16, N16, N16 // 80~FF + }; +#undef N +#undef N16 +//!@endcond + + if (sizeof(Ch) == 1 || static_cast(c) < 256) + return static_cast(tokenMap[static_cast(c)]); + else + return NumberToken; + } + + RAPIDJSON_FORCEINLINE IterativeParsingState Predict(IterativeParsingState state, Token token) const { + // current state x one lookahead token -> new state + static const char G[cIterativeParsingStateCount][kTokenCount] = { + // Finish(sink state) + { + IterativeParsingErrorState, IterativeParsingErrorState, IterativeParsingErrorState, IterativeParsingErrorState, IterativeParsingErrorState, + IterativeParsingErrorState, IterativeParsingErrorState, IterativeParsingErrorState, IterativeParsingErrorState, IterativeParsingErrorState, + IterativeParsingErrorState + }, + // Error(sink state) + { + IterativeParsingErrorState, IterativeParsingErrorState, IterativeParsingErrorState, IterativeParsingErrorState, IterativeParsingErrorState, + IterativeParsingErrorState, IterativeParsingErrorState, IterativeParsingErrorState, IterativeParsingErrorState, IterativeParsingErrorState, + IterativeParsingErrorState + }, + // Start + { + IterativeParsingArrayInitialState, // Left bracket + IterativeParsingErrorState, // Right bracket + IterativeParsingObjectInitialState, // Left curly bracket + IterativeParsingErrorState, // Right curly bracket + IterativeParsingErrorState, // Comma + IterativeParsingErrorState, // Colon + IterativeParsingValueState, // String + IterativeParsingValueState, // False + IterativeParsingValueState, // True + IterativeParsingValueState, // Null + IterativeParsingValueState // Number + }, + // ObjectInitial + { + IterativeParsingErrorState, // Left bracket + IterativeParsingErrorState, // Right bracket + IterativeParsingErrorState, // Left curly bracket + IterativeParsingObjectFinishState, // Right curly bracket + IterativeParsingErrorState, // Comma + IterativeParsingErrorState, // Colon + IterativeParsingMemberKeyState, // String + IterativeParsingErrorState, // False + IterativeParsingErrorState, // True + IterativeParsingErrorState, // Null + IterativeParsingErrorState // Number + }, + // MemberKey + { + IterativeParsingErrorState, // Left bracket + IterativeParsingErrorState, // Right bracket + IterativeParsingErrorState, // Left curly bracket + IterativeParsingErrorState, // Right curly bracket + IterativeParsingErrorState, // Comma + IterativeParsingKeyValueDelimiterState, // Colon + IterativeParsingErrorState, // String + IterativeParsingErrorState, // False + IterativeParsingErrorState, // True + IterativeParsingErrorState, // Null + IterativeParsingErrorState // Number + }, + // MemberValue + { + IterativeParsingErrorState, // Left bracket + IterativeParsingErrorState, // Right bracket + IterativeParsingErrorState, // Left curly bracket + IterativeParsingObjectFinishState, // Right curly bracket + IterativeParsingMemberDelimiterState, // Comma + IterativeParsingErrorState, // Colon + IterativeParsingErrorState, // String + IterativeParsingErrorState, // False + IterativeParsingErrorState, // True + IterativeParsingErrorState, // Null + IterativeParsingErrorState // Number + }, + // ObjectFinish(sink state) + { + IterativeParsingErrorState, IterativeParsingErrorState, IterativeParsingErrorState, IterativeParsingErrorState, IterativeParsingErrorState, + IterativeParsingErrorState, IterativeParsingErrorState, IterativeParsingErrorState, IterativeParsingErrorState, IterativeParsingErrorState, + IterativeParsingErrorState + }, + // ArrayInitial + { + IterativeParsingArrayInitialState, // Left bracket(push Element state) + IterativeParsingArrayFinishState, // Right bracket + IterativeParsingObjectInitialState, // Left curly bracket(push Element state) + IterativeParsingErrorState, // Right curly bracket + IterativeParsingErrorState, // Comma + IterativeParsingErrorState, // Colon + IterativeParsingElementState, // String + IterativeParsingElementState, // False + IterativeParsingElementState, // True + IterativeParsingElementState, // Null + IterativeParsingElementState // Number + }, + // Element + { + IterativeParsingErrorState, // Left bracket + IterativeParsingArrayFinishState, // Right bracket + IterativeParsingErrorState, // Left curly bracket + IterativeParsingErrorState, // Right curly bracket + IterativeParsingElementDelimiterState, // Comma + IterativeParsingErrorState, // Colon + IterativeParsingErrorState, // String + IterativeParsingErrorState, // False + IterativeParsingErrorState, // True + IterativeParsingErrorState, // Null + IterativeParsingErrorState // Number + }, + // ArrayFinish(sink state) + { + IterativeParsingErrorState, IterativeParsingErrorState, IterativeParsingErrorState, IterativeParsingErrorState, IterativeParsingErrorState, + IterativeParsingErrorState, IterativeParsingErrorState, IterativeParsingErrorState, IterativeParsingErrorState, IterativeParsingErrorState, + IterativeParsingErrorState + }, + // Single Value (sink state) + { + IterativeParsingErrorState, IterativeParsingErrorState, IterativeParsingErrorState, IterativeParsingErrorState, IterativeParsingErrorState, + IterativeParsingErrorState, IterativeParsingErrorState, IterativeParsingErrorState, IterativeParsingErrorState, IterativeParsingErrorState, + IterativeParsingErrorState + }, + // ElementDelimiter + { + IterativeParsingArrayInitialState, // Left bracket(push Element state) + IterativeParsingArrayFinishState, // Right bracket + IterativeParsingObjectInitialState, // Left curly bracket(push Element state) + IterativeParsingErrorState, // Right curly bracket + IterativeParsingErrorState, // Comma + IterativeParsingErrorState, // Colon + IterativeParsingElementState, // String + IterativeParsingElementState, // False + IterativeParsingElementState, // True + IterativeParsingElementState, // Null + IterativeParsingElementState // Number + }, + // MemberDelimiter + { + IterativeParsingErrorState, // Left bracket + IterativeParsingErrorState, // Right bracket + IterativeParsingErrorState, // Left curly bracket + IterativeParsingObjectFinishState, // Right curly bracket + IterativeParsingErrorState, // Comma + IterativeParsingErrorState, // Colon + IterativeParsingMemberKeyState, // String + IterativeParsingErrorState, // False + IterativeParsingErrorState, // True + IterativeParsingErrorState, // Null + IterativeParsingErrorState // Number + }, + // KeyValueDelimiter + { + IterativeParsingArrayInitialState, // Left bracket(push MemberValue state) + IterativeParsingErrorState, // Right bracket + IterativeParsingObjectInitialState, // Left curly bracket(push MemberValue state) + IterativeParsingErrorState, // Right curly bracket + IterativeParsingErrorState, // Comma + IterativeParsingErrorState, // Colon + IterativeParsingMemberValueState, // String + IterativeParsingMemberValueState, // False + IterativeParsingMemberValueState, // True + IterativeParsingMemberValueState, // Null + IterativeParsingMemberValueState // Number + }, + }; // End of G + + return static_cast(G[state][token]); + } + + // Make an advance in the token stream and state based on the candidate destination state which was returned by Transit(). + // May return a new state on state pop. + template + RAPIDJSON_FORCEINLINE IterativeParsingState Transit(IterativeParsingState src, Token token, IterativeParsingState dst, InputStream& is, Handler& handler) { + (void)token; + + switch (dst) { + case IterativeParsingErrorState: + return dst; + + case IterativeParsingObjectInitialState: + case IterativeParsingArrayInitialState: + { + // Push the state(Element or MemeberValue) if we are nested in another array or value of member. + // In this way we can get the correct state on ObjectFinish or ArrayFinish by frame pop. + IterativeParsingState n = src; + if (src == IterativeParsingArrayInitialState || src == IterativeParsingElementDelimiterState) + n = IterativeParsingElementState; + else if (src == IterativeParsingKeyValueDelimiterState) + n = IterativeParsingMemberValueState; + // Push current state. + *stack_.template Push(1) = n; + // Initialize and push the member/element count. + *stack_.template Push(1) = 0; + // Call handler + bool hr = (dst == IterativeParsingObjectInitialState) ? handler.StartObject() : handler.StartArray(); + // On handler short circuits the parsing. + if (!hr) { + RAPIDJSON_PARSE_ERROR_NORETURN(kParseErrorTermination, is.Tell()); + return IterativeParsingErrorState; + } + else { + is.Take(); + return dst; + } + } + + case IterativeParsingMemberKeyState: + ParseString(is, handler, true); + if (HasParseError()) + return IterativeParsingErrorState; + else + return dst; + + case IterativeParsingKeyValueDelimiterState: + RAPIDJSON_ASSERT(token == ColonToken); + is.Take(); + return dst; + + case IterativeParsingMemberValueState: + // Must be non-compound value. Or it would be ObjectInitial or ArrayInitial state. + ParseValue(is, handler); + if (HasParseError()) { + return IterativeParsingErrorState; + } + return dst; + + case IterativeParsingElementState: + // Must be non-compound value. Or it would be ObjectInitial or ArrayInitial state. + ParseValue(is, handler); + if (HasParseError()) { + return IterativeParsingErrorState; + } + return dst; + + case IterativeParsingMemberDelimiterState: + case IterativeParsingElementDelimiterState: + is.Take(); + // Update member/element count. + *stack_.template Top() = *stack_.template Top() + 1; + return dst; + + case IterativeParsingObjectFinishState: + { + // Transit from delimiter is only allowed when trailing commas are enabled + if (!(parseFlags & kParseTrailingCommasFlag) && src == IterativeParsingMemberDelimiterState) { + RAPIDJSON_PARSE_ERROR_NORETURN(kParseErrorObjectMissName, is.Tell()); + return IterativeParsingErrorState; + } + // Get member count. + SizeType c = *stack_.template Pop(1); + // If the object is not empty, count the last member. + if (src == IterativeParsingMemberValueState) + ++c; + // Restore the state. + IterativeParsingState n = static_cast(*stack_.template Pop(1)); + // Transit to Finish state if this is the topmost scope. + if (n == IterativeParsingStartState) + n = IterativeParsingFinishState; + // Call handler + bool hr = handler.EndObject(c); + // On handler short circuits the parsing. + if (!hr) { + RAPIDJSON_PARSE_ERROR_NORETURN(kParseErrorTermination, is.Tell()); + return IterativeParsingErrorState; + } + else { + is.Take(); + return n; + } + } + + case IterativeParsingArrayFinishState: + { + // Transit from delimiter is only allowed when trailing commas are enabled + if (!(parseFlags & kParseTrailingCommasFlag) && src == IterativeParsingElementDelimiterState) { + RAPIDJSON_PARSE_ERROR_NORETURN(kParseErrorValueInvalid, is.Tell()); + return IterativeParsingErrorState; + } + // Get element count. + SizeType c = *stack_.template Pop(1); + // If the array is not empty, count the last element. + if (src == IterativeParsingElementState) + ++c; + // Restore the state. + IterativeParsingState n = static_cast(*stack_.template Pop(1)); + // Transit to Finish state if this is the topmost scope. + if (n == IterativeParsingStartState) + n = IterativeParsingFinishState; + // Call handler + bool hr = handler.EndArray(c); + // On handler short circuits the parsing. + if (!hr) { + RAPIDJSON_PARSE_ERROR_NORETURN(kParseErrorTermination, is.Tell()); + return IterativeParsingErrorState; + } + else { + is.Take(); + return n; + } + } + + default: + // This branch is for IterativeParsingValueState actually. + // Use `default:` rather than + // `case IterativeParsingValueState:` is for code coverage. + + // The IterativeParsingStartState is not enumerated in this switch-case. + // It is impossible for that case. And it can be caught by following assertion. + + // The IterativeParsingFinishState is not enumerated in this switch-case either. + // It is a "derivative" state which cannot triggered from Predict() directly. + // Therefore it cannot happen here. And it can be caught by following assertion. + RAPIDJSON_ASSERT(dst == IterativeParsingValueState); + + // Must be non-compound value. Or it would be ObjectInitial or ArrayInitial state. + ParseValue(is, handler); + if (HasParseError()) { + return IterativeParsingErrorState; + } + return IterativeParsingFinishState; + } + } + + template + void HandleError(IterativeParsingState src, InputStream& is) { + if (HasParseError()) { + // Error flag has been set. + return; + } + + switch (src) { + case IterativeParsingStartState: RAPIDJSON_PARSE_ERROR(kParseErrorDocumentEmpty, is.Tell()); return; + case IterativeParsingFinishState: RAPIDJSON_PARSE_ERROR(kParseErrorDocumentRootNotSingular, is.Tell()); return; + case IterativeParsingObjectInitialState: + case IterativeParsingMemberDelimiterState: RAPIDJSON_PARSE_ERROR(kParseErrorObjectMissName, is.Tell()); return; + case IterativeParsingMemberKeyState: RAPIDJSON_PARSE_ERROR(kParseErrorObjectMissColon, is.Tell()); return; + case IterativeParsingMemberValueState: RAPIDJSON_PARSE_ERROR(kParseErrorObjectMissCommaOrCurlyBracket, is.Tell()); return; + case IterativeParsingKeyValueDelimiterState: + case IterativeParsingArrayInitialState: + case IterativeParsingElementDelimiterState: RAPIDJSON_PARSE_ERROR(kParseErrorValueInvalid, is.Tell()); return; + default: RAPIDJSON_ASSERT(src == IterativeParsingElementState); RAPIDJSON_PARSE_ERROR(kParseErrorArrayMissCommaOrSquareBracket, is.Tell()); return; + } + } + + RAPIDJSON_FORCEINLINE bool IsIterativeParsingDelimiterState(IterativeParsingState s) const { + return s >= IterativeParsingElementDelimiterState; + } + + RAPIDJSON_FORCEINLINE bool IsIterativeParsingCompleteState(IterativeParsingState s) const { + return s <= IterativeParsingErrorState; + } + + template + ParseResult IterativeParse(InputStream& is, Handler& handler) { + parseResult_.Clear(); + ClearStackOnExit scope(*this); + IterativeParsingState state = IterativeParsingStartState; + + SkipWhitespaceAndComments(is); + RAPIDJSON_PARSE_ERROR_EARLY_RETURN(parseResult_); + while (is.Peek() != '\0') { + Token t = Tokenize(is.Peek()); + IterativeParsingState n = Predict(state, t); + IterativeParsingState d = Transit(state, t, n, is, handler); + + if (d == IterativeParsingErrorState) { + HandleError(state, is); + break; + } + + state = d; + + // Do not further consume streams if a root JSON has been parsed. + if ((parseFlags & kParseStopWhenDoneFlag) && state == IterativeParsingFinishState) + break; + + SkipWhitespaceAndComments(is); + RAPIDJSON_PARSE_ERROR_EARLY_RETURN(parseResult_); + } + + // Handle the end of file. + if (state != IterativeParsingFinishState) + HandleError(state, is); + + return parseResult_; + } + + static const size_t kDefaultStackCapacity = 256; //!< Default stack capacity in bytes for storing a single decoded string. + internal::Stack stack_; //!< A stack for storing decoded string temporarily during non-destructive parsing. + ParseResult parseResult_; + IterativeParsingState state_; +}; // class GenericReader + +//! Reader with UTF8 encoding and default allocator. +typedef GenericReader, UTF8<> > Reader; + +RAPIDJSON_NAMESPACE_END + +#if defined(__clang__) || defined(_MSC_VER) +RAPIDJSON_DIAG_POP +#endif + + +#ifdef __GNUC__ +RAPIDJSON_DIAG_POP +#endif + +#endif // RAPIDJSON_READER_H_ diff --git a/vendor/github.com/Benau/go_rlottie/lottie_rapidjson_schema.h b/vendor/github.com/Benau/go_rlottie/lottie_rapidjson_schema.h new file mode 100644 index 00000000..963dff2c --- /dev/null +++ b/vendor/github.com/Benau/go_rlottie/lottie_rapidjson_schema.h @@ -0,0 +1,2496 @@ +// Tencent is pleased to support the open source community by making RapidJSON available-> +// +// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip-> All rights reserved-> +// +// Licensed under the MIT License (the "License"); you may not use this file except +// in compliance with the License-> You may obtain a copy of the License at +// +// http://opensource->org/licenses/MIT +// +// Unless required by applicable law or agreed to in writing, software distributed +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +// CONDITIONS OF ANY KIND, either express or implied-> See the License for the +// specific language governing permissions and limitations under the License-> + +#ifndef RAPIDJSON_SCHEMA_H_ +#define RAPIDJSON_SCHEMA_H_ + +#include "lottie_rapidjson_document.h" +#include "lottie_rapidjson_pointer.h" +#include "lottie_rapidjson_stringbuffer.h" +#include // abs, floor + +#if !defined(RAPIDJSON_SCHEMA_USE_INTERNALREGEX) +#define RAPIDJSON_SCHEMA_USE_INTERNALREGEX 1 +#else +#define RAPIDJSON_SCHEMA_USE_INTERNALREGEX 0 +#endif + +#if !RAPIDJSON_SCHEMA_USE_INTERNALREGEX && defined(RAPIDJSON_SCHEMA_USE_STDREGEX) && (__cplusplus >=201103L || (defined(_MSC_VER) && _MSC_VER >= 1800)) +#define RAPIDJSON_SCHEMA_USE_STDREGEX 1 +#else +#define RAPIDJSON_SCHEMA_USE_STDREGEX 0 +#endif + +#if RAPIDJSON_SCHEMA_USE_INTERNALREGEX +#include "lottie_rapidjson_internal_regex.h" +#elif RAPIDJSON_SCHEMA_USE_STDREGEX +#include +#endif + +#if RAPIDJSON_SCHEMA_USE_INTERNALREGEX || RAPIDJSON_SCHEMA_USE_STDREGEX +#define RAPIDJSON_SCHEMA_HAS_REGEX 1 +#else +#define RAPIDJSON_SCHEMA_HAS_REGEX 0 +#endif + +#ifndef RAPIDJSON_SCHEMA_VERBOSE +#define RAPIDJSON_SCHEMA_VERBOSE 0 +#endif + +#if RAPIDJSON_SCHEMA_VERBOSE +#include "lottie_rapidjson_stringbuffer.h" +#endif + +RAPIDJSON_DIAG_PUSH + +#if defined(__GNUC__) +RAPIDJSON_DIAG_OFF(effc++) +#endif + +#ifdef __clang__ +RAPIDJSON_DIAG_OFF(weak-vtables) +RAPIDJSON_DIAG_OFF(exit-time-destructors) +RAPIDJSON_DIAG_OFF(c++98-compat-pedantic) +RAPIDJSON_DIAG_OFF(variadic-macros) +#elif defined(_MSC_VER) +RAPIDJSON_DIAG_OFF(4512) // assignment operator could not be generated +#endif + +RAPIDJSON_NAMESPACE_BEGIN + +/////////////////////////////////////////////////////////////////////////////// +// Verbose Utilities + +#if RAPIDJSON_SCHEMA_VERBOSE + +namespace internal { + +inline void PrintInvalidKeyword(const char* keyword) { + printf("Fail keyword: %s\n", keyword); +} + +inline void PrintInvalidKeyword(const wchar_t* keyword) { + wprintf(L"Fail keyword: %ls\n", keyword); +} + +inline void PrintInvalidDocument(const char* document) { + printf("Fail document: %s\n\n", document); +} + +inline void PrintInvalidDocument(const wchar_t* document) { + wprintf(L"Fail document: %ls\n\n", document); +} + +inline void PrintValidatorPointers(unsigned depth, const char* s, const char* d) { + printf("S: %*s%s\nD: %*s%s\n\n", depth * 4, " ", s, depth * 4, " ", d); +} + +inline void PrintValidatorPointers(unsigned depth, const wchar_t* s, const wchar_t* d) { + wprintf(L"S: %*ls%ls\nD: %*ls%ls\n\n", depth * 4, L" ", s, depth * 4, L" ", d); +} + +} // namespace internal + +#endif // RAPIDJSON_SCHEMA_VERBOSE + +/////////////////////////////////////////////////////////////////////////////// +// RAPIDJSON_INVALID_KEYWORD_RETURN + +#if RAPIDJSON_SCHEMA_VERBOSE +#define RAPIDJSON_INVALID_KEYWORD_VERBOSE(keyword) internal::PrintInvalidKeyword(keyword) +#else +#define RAPIDJSON_INVALID_KEYWORD_VERBOSE(keyword) +#endif + +#define RAPIDJSON_INVALID_KEYWORD_RETURN(keyword)\ +RAPIDJSON_MULTILINEMACRO_BEGIN\ + context.invalidKeyword = keyword.GetString();\ + RAPIDJSON_INVALID_KEYWORD_VERBOSE(keyword.GetString());\ + return false;\ +RAPIDJSON_MULTILINEMACRO_END + +/////////////////////////////////////////////////////////////////////////////// +// Forward declarations + +template +class GenericSchemaDocument; + +namespace internal { + +template +class Schema; + +/////////////////////////////////////////////////////////////////////////////// +// ISchemaValidator + +class ISchemaValidator { +public: + virtual ~ISchemaValidator() {} + virtual bool IsValid() const = 0; +}; + +/////////////////////////////////////////////////////////////////////////////// +// ISchemaStateFactory + +template +class ISchemaStateFactory { +public: + virtual ~ISchemaStateFactory() {} + virtual ISchemaValidator* CreateSchemaValidator(const SchemaType&) = 0; + virtual void DestroySchemaValidator(ISchemaValidator* validator) = 0; + virtual void* CreateHasher() = 0; + virtual uint64_t GetHashCode(void* hasher) = 0; + virtual void DestroryHasher(void* hasher) = 0; + virtual void* MallocState(size_t size) = 0; + virtual void FreeState(void* p) = 0; +}; + +/////////////////////////////////////////////////////////////////////////////// +// IValidationErrorHandler + +template +class IValidationErrorHandler { +public: + typedef typename SchemaType::Ch Ch; + typedef typename SchemaType::SValue SValue; + + virtual ~IValidationErrorHandler() {} + + virtual void NotMultipleOf(int64_t actual, const SValue& expected) = 0; + virtual void NotMultipleOf(uint64_t actual, const SValue& expected) = 0; + virtual void NotMultipleOf(double actual, const SValue& expected) = 0; + virtual void AboveMaximum(int64_t actual, const SValue& expected, bool exclusive) = 0; + virtual void AboveMaximum(uint64_t actual, const SValue& expected, bool exclusive) = 0; + virtual void AboveMaximum(double actual, const SValue& expected, bool exclusive) = 0; + virtual void BelowMinimum(int64_t actual, const SValue& expected, bool exclusive) = 0; + virtual void BelowMinimum(uint64_t actual, const SValue& expected, bool exclusive) = 0; + virtual void BelowMinimum(double actual, const SValue& expected, bool exclusive) = 0; + + virtual void TooLong(const Ch* str, SizeType length, SizeType expected) = 0; + virtual void TooShort(const Ch* str, SizeType length, SizeType expected) = 0; + virtual void DoesNotMatch(const Ch* str, SizeType length) = 0; + + virtual void DisallowedItem(SizeType index) = 0; + virtual void TooFewItems(SizeType actualCount, SizeType expectedCount) = 0; + virtual void TooManyItems(SizeType actualCount, SizeType expectedCount) = 0; + virtual void DuplicateItems(SizeType index1, SizeType index2) = 0; + + virtual void TooManyProperties(SizeType actualCount, SizeType expectedCount) = 0; + virtual void TooFewProperties(SizeType actualCount, SizeType expectedCount) = 0; + virtual void StartMissingProperties() = 0; + virtual void AddMissingProperty(const SValue& name) = 0; + virtual bool EndMissingProperties() = 0; + virtual void PropertyViolations(ISchemaValidator** subvalidators, SizeType count) = 0; + virtual void DisallowedProperty(const Ch* name, SizeType length) = 0; + + virtual void StartDependencyErrors() = 0; + virtual void StartMissingDependentProperties() = 0; + virtual void AddMissingDependentProperty(const SValue& targetName) = 0; + virtual void EndMissingDependentProperties(const SValue& sourceName) = 0; + virtual void AddDependencySchemaError(const SValue& souceName, ISchemaValidator* subvalidator) = 0; + virtual bool EndDependencyErrors() = 0; + + virtual void DisallowedValue() = 0; + virtual void StartDisallowedType() = 0; + virtual void AddExpectedType(const typename SchemaType::ValueType& expectedType) = 0; + virtual void EndDisallowedType(const typename SchemaType::ValueType& actualType) = 0; + virtual void NotAllOf(ISchemaValidator** subvalidators, SizeType count) = 0; + virtual void NoneOf(ISchemaValidator** subvalidators, SizeType count) = 0; + virtual void NotOneOf(ISchemaValidator** subvalidators, SizeType count) = 0; + virtual void Disallowed() = 0; +}; + + +/////////////////////////////////////////////////////////////////////////////// +// Hasher + +// For comparison of compound value +template +class Hasher { +public: + typedef typename Encoding::Ch Ch; + + Hasher(Allocator* allocator = 0, size_t stackCapacity = kDefaultSize) : stack_(allocator, stackCapacity) {} + + bool Null() { return WriteType(kNullType); } + bool Bool(bool b) { return WriteType(b ? kTrueType : kFalseType); } + bool Int(int i) { Number n; n.u.i = i; n.d = static_cast(i); return WriteNumber(n); } + bool Uint(unsigned u) { Number n; n.u.u = u; n.d = static_cast(u); return WriteNumber(n); } + bool Int64(int64_t i) { Number n; n.u.i = i; n.d = static_cast(i); return WriteNumber(n); } + bool Uint64(uint64_t u) { Number n; n.u.u = u; n.d = static_cast(u); return WriteNumber(n); } + bool Double(double d) { + Number n; + if (d < 0) n.u.i = static_cast(d); + else n.u.u = static_cast(d); + n.d = d; + return WriteNumber(n); + } + + bool RawNumber(const Ch* str, SizeType len, bool) { + WriteBuffer(kNumberType, str, len * sizeof(Ch)); + return true; + } + + bool String(const Ch* str, SizeType len, bool) { + WriteBuffer(kStringType, str, len * sizeof(Ch)); + return true; + } + + bool StartObject() { return true; } + bool Key(const Ch* str, SizeType len, bool copy) { return String(str, len, copy); } + bool EndObject(SizeType memberCount) { + uint64_t h = Hash(0, kObjectType); + uint64_t* kv = stack_.template Pop(memberCount * 2); + for (SizeType i = 0; i < memberCount; i++) + h ^= Hash(kv[i * 2], kv[i * 2 + 1]); // Use xor to achieve member order insensitive + *stack_.template Push() = h; + return true; + } + + bool StartArray() { return true; } + bool EndArray(SizeType elementCount) { + uint64_t h = Hash(0, kArrayType); + uint64_t* e = stack_.template Pop(elementCount); + for (SizeType i = 0; i < elementCount; i++) + h = Hash(h, e[i]); // Use hash to achieve element order sensitive + *stack_.template Push() = h; + return true; + } + + bool IsValid() const { return stack_.GetSize() == sizeof(uint64_t); } + + uint64_t GetHashCode() const { + RAPIDJSON_ASSERT(IsValid()); + return *stack_.template Top(); + } + +private: + static const size_t kDefaultSize = 256; + struct Number { + union U { + uint64_t u; + int64_t i; + }u; + double d; + }; + + bool WriteType(Type type) { return WriteBuffer(type, 0, 0); } + + bool WriteNumber(const Number& n) { return WriteBuffer(kNumberType, &n, sizeof(n)); } + + bool WriteBuffer(Type type, const void* data, size_t len) { + // FNV-1a from http://isthe.com/chongo/tech/comp/fnv/ + uint64_t h = Hash(RAPIDJSON_UINT64_C2(0x84222325, 0xcbf29ce4), type); + const unsigned char* d = static_cast(data); + for (size_t i = 0; i < len; i++) + h = Hash(h, d[i]); + *stack_.template Push() = h; + return true; + } + + static uint64_t Hash(uint64_t h, uint64_t d) { + static const uint64_t kPrime = RAPIDJSON_UINT64_C2(0x00000100, 0x000001b3); + h ^= d; + h *= kPrime; + return h; + } + + Stack stack_; +}; + +/////////////////////////////////////////////////////////////////////////////// +// SchemaValidationContext + +template +struct SchemaValidationContext { + typedef Schema SchemaType; + typedef ISchemaStateFactory SchemaValidatorFactoryType; + typedef IValidationErrorHandler ErrorHandlerType; + typedef typename SchemaType::ValueType ValueType; + typedef typename ValueType::Ch Ch; + + enum PatternValidatorType { + kPatternValidatorOnly, + kPatternValidatorWithProperty, + kPatternValidatorWithAdditionalProperty + }; + + SchemaValidationContext(SchemaValidatorFactoryType& f, ErrorHandlerType& eh, const SchemaType* s) : + factory(f), + error_handler(eh), + schema(s), + valueSchema(), + invalidKeyword(), + hasher(), + arrayElementHashCodes(), + validators(), + validatorCount(), + patternPropertiesValidators(), + patternPropertiesValidatorCount(), + patternPropertiesSchemas(), + patternPropertiesSchemaCount(), + valuePatternValidatorType(kPatternValidatorOnly), + propertyExist(), + inArray(false), + valueUniqueness(false), + arrayUniqueness(false) + { + } + + ~SchemaValidationContext() { + if (hasher) + factory.DestroryHasher(hasher); + if (validators) { + for (SizeType i = 0; i < validatorCount; i++) + factory.DestroySchemaValidator(validators[i]); + factory.FreeState(validators); + } + if (patternPropertiesValidators) { + for (SizeType i = 0; i < patternPropertiesValidatorCount; i++) + factory.DestroySchemaValidator(patternPropertiesValidators[i]); + factory.FreeState(patternPropertiesValidators); + } + if (patternPropertiesSchemas) + factory.FreeState(patternPropertiesSchemas); + if (propertyExist) + factory.FreeState(propertyExist); + } + + SchemaValidatorFactoryType& factory; + ErrorHandlerType& error_handler; + const SchemaType* schema; + const SchemaType* valueSchema; + const Ch* invalidKeyword; + void* hasher; // Only validator access + void* arrayElementHashCodes; // Only validator access this + ISchemaValidator** validators; + SizeType validatorCount; + ISchemaValidator** patternPropertiesValidators; + SizeType patternPropertiesValidatorCount; + const SchemaType** patternPropertiesSchemas; + SizeType patternPropertiesSchemaCount; + PatternValidatorType valuePatternValidatorType; + PatternValidatorType objectPatternValidatorType; + SizeType arrayElementIndex; + bool* propertyExist; + bool inArray; + bool valueUniqueness; + bool arrayUniqueness; +}; + +/////////////////////////////////////////////////////////////////////////////// +// Schema + +template +class Schema { +public: + typedef typename SchemaDocumentType::ValueType ValueType; + typedef typename SchemaDocumentType::AllocatorType AllocatorType; + typedef typename SchemaDocumentType::PointerType PointerType; + typedef typename ValueType::EncodingType EncodingType; + typedef typename EncodingType::Ch Ch; + typedef SchemaValidationContext Context; + typedef Schema SchemaType; + typedef GenericValue SValue; + typedef IValidationErrorHandler ErrorHandler; + friend class GenericSchemaDocument; + + Schema(SchemaDocumentType* schemaDocument, const PointerType& p, const ValueType& value, const ValueType& document, AllocatorType* allocator) : + allocator_(allocator), + uri_(schemaDocument->GetURI(), *allocator), + pointer_(p, allocator), + typeless_(schemaDocument->GetTypeless()), + enum_(), + enumCount_(), + not_(), + type_((1 << kTotalSchemaType) - 1), // typeless + validatorCount_(), + notValidatorIndex_(), + properties_(), + additionalPropertiesSchema_(), + patternProperties_(), + patternPropertyCount_(), + propertyCount_(), + minProperties_(), + maxProperties_(SizeType(~0)), + additionalProperties_(true), + hasDependencies_(), + hasRequired_(), + hasSchemaDependencies_(), + additionalItemsSchema_(), + itemsList_(), + itemsTuple_(), + itemsTupleCount_(), + minItems_(), + maxItems_(SizeType(~0)), + additionalItems_(true), + uniqueItems_(false), + pattern_(), + minLength_(0), + maxLength_(~SizeType(0)), + exclusiveMinimum_(false), + exclusiveMaximum_(false), + defaultValueLength_(0) + { + typedef typename ValueType::ConstValueIterator ConstValueIterator; + typedef typename ValueType::ConstMemberIterator ConstMemberIterator; + + if (!value.IsObject()) + return; + + if (const ValueType* v = GetMember(value, GetTypeString())) { + type_ = 0; + if (v->IsString()) + AddType(*v); + else if (v->IsArray()) + for (ConstValueIterator itr = v->Begin(); itr != v->End(); ++itr) + AddType(*itr); + } + + if (const ValueType* v = GetMember(value, GetEnumString())) + if (v->IsArray() && v->Size() > 0) { + enum_ = static_cast(allocator_->Malloc(sizeof(uint64_t) * v->Size())); + for (ConstValueIterator itr = v->Begin(); itr != v->End(); ++itr) { + typedef Hasher > EnumHasherType; + char buffer[256u + 24]; + MemoryPoolAllocator<> hasherAllocator(buffer, sizeof(buffer)); + EnumHasherType h(&hasherAllocator, 256); + itr->Accept(h); + enum_[enumCount_++] = h.GetHashCode(); + } + } + + if (schemaDocument) { + AssignIfExist(allOf_, *schemaDocument, p, value, GetAllOfString(), document); + AssignIfExist(anyOf_, *schemaDocument, p, value, GetAnyOfString(), document); + AssignIfExist(oneOf_, *schemaDocument, p, value, GetOneOfString(), document); + } + + if (const ValueType* v = GetMember(value, GetNotString())) { + schemaDocument->CreateSchema(¬_, p.Append(GetNotString(), allocator_), *v, document); + notValidatorIndex_ = validatorCount_; + validatorCount_++; + } + + // Object + + const ValueType* properties = GetMember(value, GetPropertiesString()); + const ValueType* required = GetMember(value, GetRequiredString()); + const ValueType* dependencies = GetMember(value, GetDependenciesString()); + { + // Gather properties from properties/required/dependencies + SValue allProperties(kArrayType); + + if (properties && properties->IsObject()) + for (ConstMemberIterator itr = properties->MemberBegin(); itr != properties->MemberEnd(); ++itr) + AddUniqueElement(allProperties, itr->name); + + if (required && required->IsArray()) + for (ConstValueIterator itr = required->Begin(); itr != required->End(); ++itr) + if (itr->IsString()) + AddUniqueElement(allProperties, *itr); + + if (dependencies && dependencies->IsObject()) + for (ConstMemberIterator itr = dependencies->MemberBegin(); itr != dependencies->MemberEnd(); ++itr) { + AddUniqueElement(allProperties, itr->name); + if (itr->value.IsArray()) + for (ConstValueIterator i = itr->value.Begin(); i != itr->value.End(); ++i) + if (i->IsString()) + AddUniqueElement(allProperties, *i); + } + + if (allProperties.Size() > 0) { + propertyCount_ = allProperties.Size(); + properties_ = static_cast(allocator_->Malloc(sizeof(Property) * propertyCount_)); + for (SizeType i = 0; i < propertyCount_; i++) { + new (&properties_[i]) Property(); + properties_[i].name = allProperties[i]; + properties_[i].schema = typeless_; + } + } + } + + if (properties && properties->IsObject()) { + PointerType q = p.Append(GetPropertiesString(), allocator_); + for (ConstMemberIterator itr = properties->MemberBegin(); itr != properties->MemberEnd(); ++itr) { + SizeType index; + if (FindPropertyIndex(itr->name, &index)) + schemaDocument->CreateSchema(&properties_[index].schema, q.Append(itr->name, allocator_), itr->value, document); + } + } + + if (const ValueType* v = GetMember(value, GetPatternPropertiesString())) { + PointerType q = p.Append(GetPatternPropertiesString(), allocator_); + patternProperties_ = static_cast(allocator_->Malloc(sizeof(PatternProperty) * v->MemberCount())); + patternPropertyCount_ = 0; + + for (ConstMemberIterator itr = v->MemberBegin(); itr != v->MemberEnd(); ++itr) { + new (&patternProperties_[patternPropertyCount_]) PatternProperty(); + patternProperties_[patternPropertyCount_].pattern = CreatePattern(itr->name); + schemaDocument->CreateSchema(&patternProperties_[patternPropertyCount_].schema, q.Append(itr->name, allocator_), itr->value, document); + patternPropertyCount_++; + } + } + + if (required && required->IsArray()) + for (ConstValueIterator itr = required->Begin(); itr != required->End(); ++itr) + if (itr->IsString()) { + SizeType index; + if (FindPropertyIndex(*itr, &index)) { + properties_[index].required = true; + hasRequired_ = true; + } + } + + if (dependencies && dependencies->IsObject()) { + PointerType q = p.Append(GetDependenciesString(), allocator_); + hasDependencies_ = true; + for (ConstMemberIterator itr = dependencies->MemberBegin(); itr != dependencies->MemberEnd(); ++itr) { + SizeType sourceIndex; + if (FindPropertyIndex(itr->name, &sourceIndex)) { + if (itr->value.IsArray()) { + properties_[sourceIndex].dependencies = static_cast(allocator_->Malloc(sizeof(bool) * propertyCount_)); + std::memset(properties_[sourceIndex].dependencies, 0, sizeof(bool)* propertyCount_); + for (ConstValueIterator targetItr = itr->value.Begin(); targetItr != itr->value.End(); ++targetItr) { + SizeType targetIndex; + if (FindPropertyIndex(*targetItr, &targetIndex)) + properties_[sourceIndex].dependencies[targetIndex] = true; + } + } + else if (itr->value.IsObject()) { + hasSchemaDependencies_ = true; + schemaDocument->CreateSchema(&properties_[sourceIndex].dependenciesSchema, q.Append(itr->name, allocator_), itr->value, document); + properties_[sourceIndex].dependenciesValidatorIndex = validatorCount_; + validatorCount_++; + } + } + } + } + + if (const ValueType* v = GetMember(value, GetAdditionalPropertiesString())) { + if (v->IsBool()) + additionalProperties_ = v->GetBool(); + else if (v->IsObject()) + schemaDocument->CreateSchema(&additionalPropertiesSchema_, p.Append(GetAdditionalPropertiesString(), allocator_), *v, document); + } + + AssignIfExist(minProperties_, value, GetMinPropertiesString()); + AssignIfExist(maxProperties_, value, GetMaxPropertiesString()); + + // Array + if (const ValueType* v = GetMember(value, GetItemsString())) { + PointerType q = p.Append(GetItemsString(), allocator_); + if (v->IsObject()) // List validation + schemaDocument->CreateSchema(&itemsList_, q, *v, document); + else if (v->IsArray()) { // Tuple validation + itemsTuple_ = static_cast(allocator_->Malloc(sizeof(const Schema*) * v->Size())); + SizeType index = 0; + for (ConstValueIterator itr = v->Begin(); itr != v->End(); ++itr, index++) + schemaDocument->CreateSchema(&itemsTuple_[itemsTupleCount_++], q.Append(index, allocator_), *itr, document); + } + } + + AssignIfExist(minItems_, value, GetMinItemsString()); + AssignIfExist(maxItems_, value, GetMaxItemsString()); + + if (const ValueType* v = GetMember(value, GetAdditionalItemsString())) { + if (v->IsBool()) + additionalItems_ = v->GetBool(); + else if (v->IsObject()) + schemaDocument->CreateSchema(&additionalItemsSchema_, p.Append(GetAdditionalItemsString(), allocator_), *v, document); + } + + AssignIfExist(uniqueItems_, value, GetUniqueItemsString()); + + // String + AssignIfExist(minLength_, value, GetMinLengthString()); + AssignIfExist(maxLength_, value, GetMaxLengthString()); + + if (const ValueType* v = GetMember(value, GetPatternString())) + pattern_ = CreatePattern(*v); + + // Number + if (const ValueType* v = GetMember(value, GetMinimumString())) + if (v->IsNumber()) + minimum_.CopyFrom(*v, *allocator_); + + if (const ValueType* v = GetMember(value, GetMaximumString())) + if (v->IsNumber()) + maximum_.CopyFrom(*v, *allocator_); + + AssignIfExist(exclusiveMinimum_, value, GetExclusiveMinimumString()); + AssignIfExist(exclusiveMaximum_, value, GetExclusiveMaximumString()); + + if (const ValueType* v = GetMember(value, GetMultipleOfString())) + if (v->IsNumber() && v->GetDouble() > 0.0) + multipleOf_.CopyFrom(*v, *allocator_); + + // Default + if (const ValueType* v = GetMember(value, GetDefaultValueString())) + if (v->IsString()) + defaultValueLength_ = v->GetStringLength(); + + } + + ~Schema() { + AllocatorType::Free(enum_); + if (properties_) { + for (SizeType i = 0; i < propertyCount_; i++) + properties_[i].~Property(); + AllocatorType::Free(properties_); + } + if (patternProperties_) { + for (SizeType i = 0; i < patternPropertyCount_; i++) + patternProperties_[i].~PatternProperty(); + AllocatorType::Free(patternProperties_); + } + AllocatorType::Free(itemsTuple_); +#if RAPIDJSON_SCHEMA_HAS_REGEX + if (pattern_) { + pattern_->~RegexType(); + AllocatorType::Free(pattern_); + } +#endif + } + + const SValue& GetURI() const { + return uri_; + } + + const PointerType& GetPointer() const { + return pointer_; + } + + bool BeginValue(Context& context) const { + if (context.inArray) { + if (uniqueItems_) + context.valueUniqueness = true; + + if (itemsList_) + context.valueSchema = itemsList_; + else if (itemsTuple_) { + if (context.arrayElementIndex < itemsTupleCount_) + context.valueSchema = itemsTuple_[context.arrayElementIndex]; + else if (additionalItemsSchema_) + context.valueSchema = additionalItemsSchema_; + else if (additionalItems_) + context.valueSchema = typeless_; + else { + context.error_handler.DisallowedItem(context.arrayElementIndex); + RAPIDJSON_INVALID_KEYWORD_RETURN(GetItemsString()); + } + } + else + context.valueSchema = typeless_; + + context.arrayElementIndex++; + } + return true; + } + + RAPIDJSON_FORCEINLINE bool EndValue(Context& context) const { + if (context.patternPropertiesValidatorCount > 0) { + bool otherValid = false; + SizeType count = context.patternPropertiesValidatorCount; + if (context.objectPatternValidatorType != Context::kPatternValidatorOnly) + otherValid = context.patternPropertiesValidators[--count]->IsValid(); + + bool patternValid = true; + for (SizeType i = 0; i < count; i++) + if (!context.patternPropertiesValidators[i]->IsValid()) { + patternValid = false; + break; + } + + if (context.objectPatternValidatorType == Context::kPatternValidatorOnly) { + if (!patternValid) { + context.error_handler.PropertyViolations(context.patternPropertiesValidators, count); + RAPIDJSON_INVALID_KEYWORD_RETURN(GetPatternPropertiesString()); + } + } + else if (context.objectPatternValidatorType == Context::kPatternValidatorWithProperty) { + if (!patternValid || !otherValid) { + context.error_handler.PropertyViolations(context.patternPropertiesValidators, count + 1); + RAPIDJSON_INVALID_KEYWORD_RETURN(GetPatternPropertiesString()); + } + } + else if (!patternValid && !otherValid) { // kPatternValidatorWithAdditionalProperty) + context.error_handler.PropertyViolations(context.patternPropertiesValidators, count + 1); + RAPIDJSON_INVALID_KEYWORD_RETURN(GetPatternPropertiesString()); + } + } + + if (enum_) { + const uint64_t h = context.factory.GetHashCode(context.hasher); + for (SizeType i = 0; i < enumCount_; i++) + if (enum_[i] == h) + goto foundEnum; + context.error_handler.DisallowedValue(); + RAPIDJSON_INVALID_KEYWORD_RETURN(GetEnumString()); + foundEnum:; + } + + if (allOf_.schemas) + for (SizeType i = allOf_.begin; i < allOf_.begin + allOf_.count; i++) + if (!context.validators[i]->IsValid()) { + context.error_handler.NotAllOf(&context.validators[allOf_.begin], allOf_.count); + RAPIDJSON_INVALID_KEYWORD_RETURN(GetAllOfString()); + } + + if (anyOf_.schemas) { + for (SizeType i = anyOf_.begin; i < anyOf_.begin + anyOf_.count; i++) + if (context.validators[i]->IsValid()) + goto foundAny; + context.error_handler.NoneOf(&context.validators[anyOf_.begin], anyOf_.count); + RAPIDJSON_INVALID_KEYWORD_RETURN(GetAnyOfString()); + foundAny:; + } + + if (oneOf_.schemas) { + bool oneValid = false; + for (SizeType i = oneOf_.begin; i < oneOf_.begin + oneOf_.count; i++) + if (context.validators[i]->IsValid()) { + if (oneValid) { + context.error_handler.NotOneOf(&context.validators[oneOf_.begin], oneOf_.count); + RAPIDJSON_INVALID_KEYWORD_RETURN(GetOneOfString()); + } else + oneValid = true; + } + if (!oneValid) { + context.error_handler.NotOneOf(&context.validators[oneOf_.begin], oneOf_.count); + RAPIDJSON_INVALID_KEYWORD_RETURN(GetOneOfString()); + } + } + + if (not_ && context.validators[notValidatorIndex_]->IsValid()) { + context.error_handler.Disallowed(); + RAPIDJSON_INVALID_KEYWORD_RETURN(GetNotString()); + } + + return true; + } + + bool Null(Context& context) const { + if (!(type_ & (1 << kNullSchemaType))) { + DisallowedType(context, GetNullString()); + RAPIDJSON_INVALID_KEYWORD_RETURN(GetTypeString()); + } + return CreateParallelValidator(context); + } + + bool Bool(Context& context, bool) const { + if (!(type_ & (1 << kBooleanSchemaType))) { + DisallowedType(context, GetBooleanString()); + RAPIDJSON_INVALID_KEYWORD_RETURN(GetTypeString()); + } + return CreateParallelValidator(context); + } + + bool Int(Context& context, int i) const { + if (!CheckInt(context, i)) + return false; + return CreateParallelValidator(context); + } + + bool Uint(Context& context, unsigned u) const { + if (!CheckUint(context, u)) + return false; + return CreateParallelValidator(context); + } + + bool Int64(Context& context, int64_t i) const { + if (!CheckInt(context, i)) + return false; + return CreateParallelValidator(context); + } + + bool Uint64(Context& context, uint64_t u) const { + if (!CheckUint(context, u)) + return false; + return CreateParallelValidator(context); + } + + bool Double(Context& context, double d) const { + if (!(type_ & (1 << kNumberSchemaType))) { + DisallowedType(context, GetNumberString()); + RAPIDJSON_INVALID_KEYWORD_RETURN(GetTypeString()); + } + + if (!minimum_.IsNull() && !CheckDoubleMinimum(context, d)) + return false; + + if (!maximum_.IsNull() && !CheckDoubleMaximum(context, d)) + return false; + + if (!multipleOf_.IsNull() && !CheckDoubleMultipleOf(context, d)) + return false; + + return CreateParallelValidator(context); + } + + bool String(Context& context, const Ch* str, SizeType length, bool) const { + if (!(type_ & (1 << kStringSchemaType))) { + DisallowedType(context, GetStringString()); + RAPIDJSON_INVALID_KEYWORD_RETURN(GetTypeString()); + } + + if (minLength_ != 0 || maxLength_ != SizeType(~0)) { + SizeType count; + if (internal::CountStringCodePoint(str, length, &count)) { + if (count < minLength_) { + context.error_handler.TooShort(str, length, minLength_); + RAPIDJSON_INVALID_KEYWORD_RETURN(GetMinLengthString()); + } + if (count > maxLength_) { + context.error_handler.TooLong(str, length, maxLength_); + RAPIDJSON_INVALID_KEYWORD_RETURN(GetMaxLengthString()); + } + } + } + + if (pattern_ && !IsPatternMatch(pattern_, str, length)) { + context.error_handler.DoesNotMatch(str, length); + RAPIDJSON_INVALID_KEYWORD_RETURN(GetPatternString()); + } + + return CreateParallelValidator(context); + } + + bool StartObject(Context& context) const { + if (!(type_ & (1 << kObjectSchemaType))) { + DisallowedType(context, GetObjectString()); + RAPIDJSON_INVALID_KEYWORD_RETURN(GetTypeString()); + } + + if (hasDependencies_ || hasRequired_) { + context.propertyExist = static_cast(context.factory.MallocState(sizeof(bool) * propertyCount_)); + std::memset(context.propertyExist, 0, sizeof(bool) * propertyCount_); + } + + if (patternProperties_) { // pre-allocate schema array + SizeType count = patternPropertyCount_ + 1; // extra for valuePatternValidatorType + context.patternPropertiesSchemas = static_cast(context.factory.MallocState(sizeof(const SchemaType*) * count)); + context.patternPropertiesSchemaCount = 0; + std::memset(context.patternPropertiesSchemas, 0, sizeof(SchemaType*) * count); + } + + return CreateParallelValidator(context); + } + + bool Key(Context& context, const Ch* str, SizeType len, bool) const { + if (patternProperties_) { + context.patternPropertiesSchemaCount = 0; + for (SizeType i = 0; i < patternPropertyCount_; i++) + if (patternProperties_[i].pattern && IsPatternMatch(patternProperties_[i].pattern, str, len)) { + context.patternPropertiesSchemas[context.patternPropertiesSchemaCount++] = patternProperties_[i].schema; + context.valueSchema = typeless_; + } + } + + SizeType index = 0; + if (FindPropertyIndex(ValueType(str, len).Move(), &index)) { + if (context.patternPropertiesSchemaCount > 0) { + context.patternPropertiesSchemas[context.patternPropertiesSchemaCount++] = properties_[index].schema; + context.valueSchema = typeless_; + context.valuePatternValidatorType = Context::kPatternValidatorWithProperty; + } + else + context.valueSchema = properties_[index].schema; + + if (context.propertyExist) + context.propertyExist[index] = true; + + return true; + } + + if (additionalPropertiesSchema_) { + if (additionalPropertiesSchema_ && context.patternPropertiesSchemaCount > 0) { + context.patternPropertiesSchemas[context.patternPropertiesSchemaCount++] = additionalPropertiesSchema_; + context.valueSchema = typeless_; + context.valuePatternValidatorType = Context::kPatternValidatorWithAdditionalProperty; + } + else + context.valueSchema = additionalPropertiesSchema_; + return true; + } + else if (additionalProperties_) { + context.valueSchema = typeless_; + return true; + } + + if (context.patternPropertiesSchemaCount == 0) { // patternProperties are not additional properties + context.error_handler.DisallowedProperty(str, len); + RAPIDJSON_INVALID_KEYWORD_RETURN(GetAdditionalPropertiesString()); + } + + return true; + } + + bool EndObject(Context& context, SizeType memberCount) const { + if (hasRequired_) { + context.error_handler.StartMissingProperties(); + for (SizeType index = 0; index < propertyCount_; index++) + if (properties_[index].required && !context.propertyExist[index]) + if (properties_[index].schema->defaultValueLength_ == 0 ) + context.error_handler.AddMissingProperty(properties_[index].name); + if (context.error_handler.EndMissingProperties()) + RAPIDJSON_INVALID_KEYWORD_RETURN(GetRequiredString()); + } + + if (memberCount < minProperties_) { + context.error_handler.TooFewProperties(memberCount, minProperties_); + RAPIDJSON_INVALID_KEYWORD_RETURN(GetMinPropertiesString()); + } + + if (memberCount > maxProperties_) { + context.error_handler.TooManyProperties(memberCount, maxProperties_); + RAPIDJSON_INVALID_KEYWORD_RETURN(GetMaxPropertiesString()); + } + + if (hasDependencies_) { + context.error_handler.StartDependencyErrors(); + for (SizeType sourceIndex = 0; sourceIndex < propertyCount_; sourceIndex++) { + const Property& source = properties_[sourceIndex]; + if (context.propertyExist[sourceIndex]) { + if (source.dependencies) { + context.error_handler.StartMissingDependentProperties(); + for (SizeType targetIndex = 0; targetIndex < propertyCount_; targetIndex++) + if (source.dependencies[targetIndex] && !context.propertyExist[targetIndex]) + context.error_handler.AddMissingDependentProperty(properties_[targetIndex].name); + context.error_handler.EndMissingDependentProperties(source.name); + } + else if (source.dependenciesSchema) { + ISchemaValidator* dependenciesValidator = context.validators[source.dependenciesValidatorIndex]; + if (!dependenciesValidator->IsValid()) + context.error_handler.AddDependencySchemaError(source.name, dependenciesValidator); + } + } + } + if (context.error_handler.EndDependencyErrors()) + RAPIDJSON_INVALID_KEYWORD_RETURN(GetDependenciesString()); + } + + return true; + } + + bool StartArray(Context& context) const { + if (!(type_ & (1 << kArraySchemaType))) { + DisallowedType(context, GetArrayString()); + RAPIDJSON_INVALID_KEYWORD_RETURN(GetTypeString()); + } + + context.arrayElementIndex = 0; + context.inArray = true; + + return CreateParallelValidator(context); + } + + bool EndArray(Context& context, SizeType elementCount) const { + context.inArray = false; + + if (elementCount < minItems_) { + context.error_handler.TooFewItems(elementCount, minItems_); + RAPIDJSON_INVALID_KEYWORD_RETURN(GetMinItemsString()); + } + + if (elementCount > maxItems_) { + context.error_handler.TooManyItems(elementCount, maxItems_); + RAPIDJSON_INVALID_KEYWORD_RETURN(GetMaxItemsString()); + } + + return true; + } + + // Generate functions for string literal according to Ch +#define RAPIDJSON_STRING_(name, ...) \ + static const ValueType& Get##name##String() {\ + static const Ch s[] = { __VA_ARGS__, '\0' };\ + static const ValueType v(s, static_cast(sizeof(s) / sizeof(Ch) - 1));\ + return v;\ + } + + RAPIDJSON_STRING_(Null, 'n', 'u', 'l', 'l') + RAPIDJSON_STRING_(Boolean, 'b', 'o', 'o', 'l', 'e', 'a', 'n') + RAPIDJSON_STRING_(Object, 'o', 'b', 'j', 'e', 'c', 't') + RAPIDJSON_STRING_(Array, 'a', 'r', 'r', 'a', 'y') + RAPIDJSON_STRING_(String, 's', 't', 'r', 'i', 'n', 'g') + RAPIDJSON_STRING_(Number, 'n', 'u', 'm', 'b', 'e', 'r') + RAPIDJSON_STRING_(Integer, 'i', 'n', 't', 'e', 'g', 'e', 'r') + RAPIDJSON_STRING_(Type, 't', 'y', 'p', 'e') + RAPIDJSON_STRING_(Enum, 'e', 'n', 'u', 'm') + RAPIDJSON_STRING_(AllOf, 'a', 'l', 'l', 'O', 'f') + RAPIDJSON_STRING_(AnyOf, 'a', 'n', 'y', 'O', 'f') + RAPIDJSON_STRING_(OneOf, 'o', 'n', 'e', 'O', 'f') + RAPIDJSON_STRING_(Not, 'n', 'o', 't') + RAPIDJSON_STRING_(Properties, 'p', 'r', 'o', 'p', 'e', 'r', 't', 'i', 'e', 's') + RAPIDJSON_STRING_(Required, 'r', 'e', 'q', 'u', 'i', 'r', 'e', 'd') + RAPIDJSON_STRING_(Dependencies, 'd', 'e', 'p', 'e', 'n', 'd', 'e', 'n', 'c', 'i', 'e', 's') + RAPIDJSON_STRING_(PatternProperties, 'p', 'a', 't', 't', 'e', 'r', 'n', 'P', 'r', 'o', 'p', 'e', 'r', 't', 'i', 'e', 's') + RAPIDJSON_STRING_(AdditionalProperties, 'a', 'd', 'd', 'i', 't', 'i', 'o', 'n', 'a', 'l', 'P', 'r', 'o', 'p', 'e', 'r', 't', 'i', 'e', 's') + RAPIDJSON_STRING_(MinProperties, 'm', 'i', 'n', 'P', 'r', 'o', 'p', 'e', 'r', 't', 'i', 'e', 's') + RAPIDJSON_STRING_(MaxProperties, 'm', 'a', 'x', 'P', 'r', 'o', 'p', 'e', 'r', 't', 'i', 'e', 's') + RAPIDJSON_STRING_(Items, 'i', 't', 'e', 'm', 's') + RAPIDJSON_STRING_(MinItems, 'm', 'i', 'n', 'I', 't', 'e', 'm', 's') + RAPIDJSON_STRING_(MaxItems, 'm', 'a', 'x', 'I', 't', 'e', 'm', 's') + RAPIDJSON_STRING_(AdditionalItems, 'a', 'd', 'd', 'i', 't', 'i', 'o', 'n', 'a', 'l', 'I', 't', 'e', 'm', 's') + RAPIDJSON_STRING_(UniqueItems, 'u', 'n', 'i', 'q', 'u', 'e', 'I', 't', 'e', 'm', 's') + RAPIDJSON_STRING_(MinLength, 'm', 'i', 'n', 'L', 'e', 'n', 'g', 't', 'h') + RAPIDJSON_STRING_(MaxLength, 'm', 'a', 'x', 'L', 'e', 'n', 'g', 't', 'h') + RAPIDJSON_STRING_(Pattern, 'p', 'a', 't', 't', 'e', 'r', 'n') + RAPIDJSON_STRING_(Minimum, 'm', 'i', 'n', 'i', 'm', 'u', 'm') + RAPIDJSON_STRING_(Maximum, 'm', 'a', 'x', 'i', 'm', 'u', 'm') + RAPIDJSON_STRING_(ExclusiveMinimum, 'e', 'x', 'c', 'l', 'u', 's', 'i', 'v', 'e', 'M', 'i', 'n', 'i', 'm', 'u', 'm') + RAPIDJSON_STRING_(ExclusiveMaximum, 'e', 'x', 'c', 'l', 'u', 's', 'i', 'v', 'e', 'M', 'a', 'x', 'i', 'm', 'u', 'm') + RAPIDJSON_STRING_(MultipleOf, 'm', 'u', 'l', 't', 'i', 'p', 'l', 'e', 'O', 'f') + RAPIDJSON_STRING_(DefaultValue, 'd', 'e', 'f', 'a', 'u', 'l', 't') + +#undef RAPIDJSON_STRING_ + +private: + enum SchemaValueType { + kNullSchemaType, + kBooleanSchemaType, + kObjectSchemaType, + kArraySchemaType, + kStringSchemaType, + kNumberSchemaType, + kIntegerSchemaType, + kTotalSchemaType + }; + +#if RAPIDJSON_SCHEMA_USE_INTERNALREGEX + typedef internal::GenericRegex RegexType; +#elif RAPIDJSON_SCHEMA_USE_STDREGEX + typedef std::basic_regex RegexType; +#else + typedef char RegexType; +#endif + + struct SchemaArray { + SchemaArray() : schemas(), count() {} + ~SchemaArray() { AllocatorType::Free(schemas); } + const SchemaType** schemas; + SizeType begin; // begin index of context.validators + SizeType count; + }; + + template + void AddUniqueElement(V1& a, const V2& v) { + for (typename V1::ConstValueIterator itr = a.Begin(); itr != a.End(); ++itr) + if (*itr == v) + return; + V1 c(v, *allocator_); + a.PushBack(c, *allocator_); + } + + static const ValueType* GetMember(const ValueType& value, const ValueType& name) { + typename ValueType::ConstMemberIterator itr = value.FindMember(name); + return itr != value.MemberEnd() ? &(itr->value) : 0; + } + + static void AssignIfExist(bool& out, const ValueType& value, const ValueType& name) { + if (const ValueType* v = GetMember(value, name)) + if (v->IsBool()) + out = v->GetBool(); + } + + static void AssignIfExist(SizeType& out, const ValueType& value, const ValueType& name) { + if (const ValueType* v = GetMember(value, name)) + if (v->IsUint64() && v->GetUint64() <= SizeType(~0)) + out = static_cast(v->GetUint64()); + } + + void AssignIfExist(SchemaArray& out, SchemaDocumentType& schemaDocument, const PointerType& p, const ValueType& value, const ValueType& name, const ValueType& document) { + if (const ValueType* v = GetMember(value, name)) { + if (v->IsArray() && v->Size() > 0) { + PointerType q = p.Append(name, allocator_); + out.count = v->Size(); + out.schemas = static_cast(allocator_->Malloc(out.count * sizeof(const Schema*))); + memset(out.schemas, 0, sizeof(Schema*)* out.count); + for (SizeType i = 0; i < out.count; i++) + schemaDocument.CreateSchema(&out.schemas[i], q.Append(i, allocator_), (*v)[i], document); + out.begin = validatorCount_; + validatorCount_ += out.count; + } + } + } + +#if RAPIDJSON_SCHEMA_USE_INTERNALREGEX + template + RegexType* CreatePattern(const ValueType& value) { + if (value.IsString()) { + RegexType* r = new (allocator_->Malloc(sizeof(RegexType))) RegexType(value.GetString(), allocator_); + if (!r->IsValid()) { + r->~RegexType(); + AllocatorType::Free(r); + r = 0; + } + return r; + } + return 0; + } + + static bool IsPatternMatch(const RegexType* pattern, const Ch *str, SizeType) { + GenericRegexSearch rs(*pattern); + return rs.Search(str); + } +#elif RAPIDJSON_SCHEMA_USE_STDREGEX + template + RegexType* CreatePattern(const ValueType& value) { + if (value.IsString()) { + RegexType *r = static_cast(allocator_->Malloc(sizeof(RegexType))); + try { + return new (r) RegexType(value.GetString(), std::size_t(value.GetStringLength()), std::regex_constants::ECMAScript); + } + catch (const std::regex_error&) { + AllocatorType::Free(r); + } + } + return 0; + } + + static bool IsPatternMatch(const RegexType* pattern, const Ch *str, SizeType length) { + std::match_results r; + return std::regex_search(str, str + length, r, *pattern); + } +#else + template + RegexType* CreatePattern(const ValueType&) { return 0; } + + static bool IsPatternMatch(const RegexType*, const Ch *, SizeType) { return true; } +#endif // RAPIDJSON_SCHEMA_USE_STDREGEX + + void AddType(const ValueType& type) { + if (type == GetNullString() ) type_ |= 1 << kNullSchemaType; + else if (type == GetBooleanString()) type_ |= 1 << kBooleanSchemaType; + else if (type == GetObjectString() ) type_ |= 1 << kObjectSchemaType; + else if (type == GetArrayString() ) type_ |= 1 << kArraySchemaType; + else if (type == GetStringString() ) type_ |= 1 << kStringSchemaType; + else if (type == GetIntegerString()) type_ |= 1 << kIntegerSchemaType; + else if (type == GetNumberString() ) type_ |= (1 << kNumberSchemaType) | (1 << kIntegerSchemaType); + } + + bool CreateParallelValidator(Context& context) const { + if (enum_ || context.arrayUniqueness) + context.hasher = context.factory.CreateHasher(); + + if (validatorCount_) { + RAPIDJSON_ASSERT(context.validators == 0); + context.validators = static_cast(context.factory.MallocState(sizeof(ISchemaValidator*) * validatorCount_)); + context.validatorCount = validatorCount_; + + if (allOf_.schemas) + CreateSchemaValidators(context, allOf_); + + if (anyOf_.schemas) + CreateSchemaValidators(context, anyOf_); + + if (oneOf_.schemas) + CreateSchemaValidators(context, oneOf_); + + if (not_) + context.validators[notValidatorIndex_] = context.factory.CreateSchemaValidator(*not_); + + if (hasSchemaDependencies_) { + for (SizeType i = 0; i < propertyCount_; i++) + if (properties_[i].dependenciesSchema) + context.validators[properties_[i].dependenciesValidatorIndex] = context.factory.CreateSchemaValidator(*properties_[i].dependenciesSchema); + } + } + + return true; + } + + void CreateSchemaValidators(Context& context, const SchemaArray& schemas) const { + for (SizeType i = 0; i < schemas.count; i++) + context.validators[schemas.begin + i] = context.factory.CreateSchemaValidator(*schemas.schemas[i]); + } + + // O(n) + bool FindPropertyIndex(const ValueType& name, SizeType* outIndex) const { + SizeType len = name.GetStringLength(); + const Ch* str = name.GetString(); + for (SizeType index = 0; index < propertyCount_; index++) + if (properties_[index].name.GetStringLength() == len && + (std::memcmp(properties_[index].name.GetString(), str, sizeof(Ch) * len) == 0)) + { + *outIndex = index; + return true; + } + return false; + } + + bool CheckInt(Context& context, int64_t i) const { + if (!(type_ & ((1 << kIntegerSchemaType) | (1 << kNumberSchemaType)))) { + DisallowedType(context, GetIntegerString()); + RAPIDJSON_INVALID_KEYWORD_RETURN(GetTypeString()); + } + + if (!minimum_.IsNull()) { + if (minimum_.IsInt64()) { + if (exclusiveMinimum_ ? i <= minimum_.GetInt64() : i < minimum_.GetInt64()) { + context.error_handler.BelowMinimum(i, minimum_, exclusiveMinimum_); + RAPIDJSON_INVALID_KEYWORD_RETURN(GetMinimumString()); + } + } + else if (minimum_.IsUint64()) { + context.error_handler.BelowMinimum(i, minimum_, exclusiveMinimum_); + RAPIDJSON_INVALID_KEYWORD_RETURN(GetMinimumString()); // i <= max(int64_t) < minimum.GetUint64() + } + else if (!CheckDoubleMinimum(context, static_cast(i))) + return false; + } + + if (!maximum_.IsNull()) { + if (maximum_.IsInt64()) { + if (exclusiveMaximum_ ? i >= maximum_.GetInt64() : i > maximum_.GetInt64()) { + context.error_handler.AboveMaximum(i, maximum_, exclusiveMaximum_); + RAPIDJSON_INVALID_KEYWORD_RETURN(GetMaximumString()); + } + } + else if (maximum_.IsUint64()) { } + /* do nothing */ // i <= max(int64_t) < maximum_.GetUint64() + else if (!CheckDoubleMaximum(context, static_cast(i))) + return false; + } + + if (!multipleOf_.IsNull()) { + if (multipleOf_.IsUint64()) { + if (static_cast(i >= 0 ? i : -i) % multipleOf_.GetUint64() != 0) { + context.error_handler.NotMultipleOf(i, multipleOf_); + RAPIDJSON_INVALID_KEYWORD_RETURN(GetMultipleOfString()); + } + } + else if (!CheckDoubleMultipleOf(context, static_cast(i))) + return false; + } + + return true; + } + + bool CheckUint(Context& context, uint64_t i) const { + if (!(type_ & ((1 << kIntegerSchemaType) | (1 << kNumberSchemaType)))) { + DisallowedType(context, GetIntegerString()); + RAPIDJSON_INVALID_KEYWORD_RETURN(GetTypeString()); + } + + if (!minimum_.IsNull()) { + if (minimum_.IsUint64()) { + if (exclusiveMinimum_ ? i <= minimum_.GetUint64() : i < minimum_.GetUint64()) { + context.error_handler.BelowMinimum(i, minimum_, exclusiveMinimum_); + RAPIDJSON_INVALID_KEYWORD_RETURN(GetMinimumString()); + } + } + else if (minimum_.IsInt64()) + /* do nothing */; // i >= 0 > minimum.Getint64() + else if (!CheckDoubleMinimum(context, static_cast(i))) + return false; + } + + if (!maximum_.IsNull()) { + if (maximum_.IsUint64()) { + if (exclusiveMaximum_ ? i >= maximum_.GetUint64() : i > maximum_.GetUint64()) { + context.error_handler.AboveMaximum(i, maximum_, exclusiveMaximum_); + RAPIDJSON_INVALID_KEYWORD_RETURN(GetMaximumString()); + } + } + else if (maximum_.IsInt64()) { + context.error_handler.AboveMaximum(i, maximum_, exclusiveMaximum_); + RAPIDJSON_INVALID_KEYWORD_RETURN(GetMaximumString()); // i >= 0 > maximum_ + } + else if (!CheckDoubleMaximum(context, static_cast(i))) + return false; + } + + if (!multipleOf_.IsNull()) { + if (multipleOf_.IsUint64()) { + if (i % multipleOf_.GetUint64() != 0) { + context.error_handler.NotMultipleOf(i, multipleOf_); + RAPIDJSON_INVALID_KEYWORD_RETURN(GetMultipleOfString()); + } + } + else if (!CheckDoubleMultipleOf(context, static_cast(i))) + return false; + } + + return true; + } + + bool CheckDoubleMinimum(Context& context, double d) const { + if (exclusiveMinimum_ ? d <= minimum_.GetDouble() : d < minimum_.GetDouble()) { + context.error_handler.BelowMinimum(d, minimum_, exclusiveMinimum_); + RAPIDJSON_INVALID_KEYWORD_RETURN(GetMinimumString()); + } + return true; + } + + bool CheckDoubleMaximum(Context& context, double d) const { + if (exclusiveMaximum_ ? d >= maximum_.GetDouble() : d > maximum_.GetDouble()) { + context.error_handler.AboveMaximum(d, maximum_, exclusiveMaximum_); + RAPIDJSON_INVALID_KEYWORD_RETURN(GetMaximumString()); + } + return true; + } + + bool CheckDoubleMultipleOf(Context& context, double d) const { + double a = std::abs(d), b = std::abs(multipleOf_.GetDouble()); + double q = std::floor(a / b); + double r = a - q * b; + if (r > 0.0) { + context.error_handler.NotMultipleOf(d, multipleOf_); + RAPIDJSON_INVALID_KEYWORD_RETURN(GetMultipleOfString()); + } + return true; + } + + void DisallowedType(Context& context, const ValueType& actualType) const { + ErrorHandler& eh = context.error_handler; + eh.StartDisallowedType(); + + if (type_ & (1 << kNullSchemaType)) eh.AddExpectedType(GetNullString()); + if (type_ & (1 << kBooleanSchemaType)) eh.AddExpectedType(GetBooleanString()); + if (type_ & (1 << kObjectSchemaType)) eh.AddExpectedType(GetObjectString()); + if (type_ & (1 << kArraySchemaType)) eh.AddExpectedType(GetArrayString()); + if (type_ & (1 << kStringSchemaType)) eh.AddExpectedType(GetStringString()); + + if (type_ & (1 << kNumberSchemaType)) eh.AddExpectedType(GetNumberString()); + else if (type_ & (1 << kIntegerSchemaType)) eh.AddExpectedType(GetIntegerString()); + + eh.EndDisallowedType(actualType); + } + + struct Property { + Property() : schema(), dependenciesSchema(), dependenciesValidatorIndex(), dependencies(), required(false) {} + ~Property() { AllocatorType::Free(dependencies); } + SValue name; + const SchemaType* schema; + const SchemaType* dependenciesSchema; + SizeType dependenciesValidatorIndex; + bool* dependencies; + bool required; + }; + + struct PatternProperty { + PatternProperty() : schema(), pattern() {} + ~PatternProperty() { + if (pattern) { + pattern->~RegexType(); + AllocatorType::Free(pattern); + } + } + const SchemaType* schema; + RegexType* pattern; + }; + + AllocatorType* allocator_; + SValue uri_; + PointerType pointer_; + const SchemaType* typeless_; + uint64_t* enum_; + SizeType enumCount_; + SchemaArray allOf_; + SchemaArray anyOf_; + SchemaArray oneOf_; + const SchemaType* not_; + unsigned type_; // bitmask of kSchemaType + SizeType validatorCount_; + SizeType notValidatorIndex_; + + Property* properties_; + const SchemaType* additionalPropertiesSchema_; + PatternProperty* patternProperties_; + SizeType patternPropertyCount_; + SizeType propertyCount_; + SizeType minProperties_; + SizeType maxProperties_; + bool additionalProperties_; + bool hasDependencies_; + bool hasRequired_; + bool hasSchemaDependencies_; + + const SchemaType* additionalItemsSchema_; + const SchemaType* itemsList_; + const SchemaType** itemsTuple_; + SizeType itemsTupleCount_; + SizeType minItems_; + SizeType maxItems_; + bool additionalItems_; + bool uniqueItems_; + + RegexType* pattern_; + SizeType minLength_; + SizeType maxLength_; + + SValue minimum_; + SValue maximum_; + SValue multipleOf_; + bool exclusiveMinimum_; + bool exclusiveMaximum_; + + SizeType defaultValueLength_; +}; + +template +struct TokenHelper { + RAPIDJSON_FORCEINLINE static void AppendIndexToken(Stack& documentStack, SizeType index) { + *documentStack.template Push() = '/'; + char buffer[21]; + size_t length = static_cast((sizeof(SizeType) == 4 ? u32toa(index, buffer) : u64toa(index, buffer)) - buffer); + for (size_t i = 0; i < length; i++) + *documentStack.template Push() = static_cast(buffer[i]); + } +}; + +// Partial specialized version for char to prevent buffer copying. +template +struct TokenHelper { + RAPIDJSON_FORCEINLINE static void AppendIndexToken(Stack& documentStack, SizeType index) { + if (sizeof(SizeType) == 4) { + char *buffer = documentStack.template Push(1 + 10); // '/' + uint + *buffer++ = '/'; + const char* end = internal::u32toa(index, buffer); + documentStack.template Pop(static_cast(10 - (end - buffer))); + } + else { + char *buffer = documentStack.template Push(1 + 20); // '/' + uint64 + *buffer++ = '/'; + const char* end = internal::u64toa(index, buffer); + documentStack.template Pop(static_cast(20 - (end - buffer))); + } + } +}; + +} // namespace internal + +/////////////////////////////////////////////////////////////////////////////// +// IGenericRemoteSchemaDocumentProvider + +template +class IGenericRemoteSchemaDocumentProvider { +public: + typedef typename SchemaDocumentType::Ch Ch; + + virtual ~IGenericRemoteSchemaDocumentProvider() {} + virtual const SchemaDocumentType* GetRemoteDocument(const Ch* uri, SizeType length) = 0; +}; + +/////////////////////////////////////////////////////////////////////////////// +// GenericSchemaDocument + +//! JSON schema document. +/*! + A JSON schema document is a compiled version of a JSON schema. + It is basically a tree of internal::Schema. + + \note This is an immutable class (i.e. its instance cannot be modified after construction). + \tparam ValueT Type of JSON value (e.g. \c Value ), which also determine the encoding. + \tparam Allocator Allocator type for allocating memory of this document. +*/ +template +class GenericSchemaDocument { +public: + typedef ValueT ValueType; + typedef IGenericRemoteSchemaDocumentProvider IRemoteSchemaDocumentProviderType; + typedef Allocator AllocatorType; + typedef typename ValueType::EncodingType EncodingType; + typedef typename EncodingType::Ch Ch; + typedef internal::Schema SchemaType; + typedef GenericPointer PointerType; + typedef GenericValue URIType; + friend class internal::Schema; + template + friend class GenericSchemaValidator; + + //! Constructor. + /*! + Compile a JSON document into schema document. + + \param document A JSON document as source. + \param uri The base URI of this schema document for purposes of violation reporting. + \param uriLength Length of \c name, in code points. + \param remoteProvider An optional remote schema document provider for resolving remote reference. Can be null. + \param allocator An optional allocator instance for allocating memory. Can be null. + */ + explicit GenericSchemaDocument(const ValueType& document, const Ch* uri = 0, SizeType uriLength = 0, + IRemoteSchemaDocumentProviderType* remoteProvider = 0, Allocator* allocator = 0) : + remoteProvider_(remoteProvider), + allocator_(allocator), + ownAllocator_(), + root_(), + typeless_(), + schemaMap_(allocator, kInitialSchemaMapSize), + schemaRef_(allocator, kInitialSchemaRefSize) + { + if (!allocator_) + ownAllocator_ = allocator_ = RAPIDJSON_NEW(Allocator)(); + + Ch noUri[1] = {0}; + uri_.SetString(uri ? uri : noUri, uriLength, *allocator_); + + typeless_ = static_cast(allocator_->Malloc(sizeof(SchemaType))); + new (typeless_) SchemaType(this, PointerType(), ValueType(kObjectType).Move(), ValueType(kObjectType).Move(), allocator_); + + // Generate root schema, it will call CreateSchema() to create sub-schemas, + // And call AddRefSchema() if there are $ref. + CreateSchemaRecursive(&root_, PointerType(), document, document); + + // Resolve $ref + while (!schemaRef_.Empty()) { + SchemaRefEntry* refEntry = schemaRef_.template Pop(1); + if (const SchemaType* s = GetSchema(refEntry->target)) { + if (refEntry->schema) + *refEntry->schema = s; + + // Create entry in map if not exist + if (!GetSchema(refEntry->source)) { + new (schemaMap_.template Push()) SchemaEntry(refEntry->source, const_cast(s), false, allocator_); + } + } + else if (refEntry->schema) + *refEntry->schema = typeless_; + + refEntry->~SchemaRefEntry(); + } + + RAPIDJSON_ASSERT(root_ != 0); + + schemaRef_.ShrinkToFit(); // Deallocate all memory for ref + } + +#if RAPIDJSON_HAS_CXX11_RVALUE_REFS + //! Move constructor in C++11 + GenericSchemaDocument(GenericSchemaDocument&& rhs) RAPIDJSON_NOEXCEPT : + remoteProvider_(rhs.remoteProvider_), + allocator_(rhs.allocator_), + ownAllocator_(rhs.ownAllocator_), + root_(rhs.root_), + typeless_(rhs.typeless_), + schemaMap_(std::move(rhs.schemaMap_)), + schemaRef_(std::move(rhs.schemaRef_)), + uri_(std::move(rhs.uri_)) + { + rhs.remoteProvider_ = 0; + rhs.allocator_ = 0; + rhs.ownAllocator_ = 0; + rhs.typeless_ = 0; + } +#endif + + //! Destructor + ~GenericSchemaDocument() { + while (!schemaMap_.Empty()) + schemaMap_.template Pop(1)->~SchemaEntry(); + + if (typeless_) { + typeless_->~SchemaType(); + Allocator::Free(typeless_); + } + + RAPIDJSON_DELETE(ownAllocator_); + } + + const URIType& GetURI() const { return uri_; } + + //! Get the root schema. + const SchemaType& GetRoot() const { return *root_; } + +private: + //! Prohibit copying + GenericSchemaDocument(const GenericSchemaDocument&); + //! Prohibit assignment + GenericSchemaDocument& operator=(const GenericSchemaDocument&); + + struct SchemaRefEntry { + SchemaRefEntry(const PointerType& s, const PointerType& t, const SchemaType** outSchema, Allocator *allocator) : source(s, allocator), target(t, allocator), schema(outSchema) {} + PointerType source; + PointerType target; + const SchemaType** schema; + }; + + struct SchemaEntry { + SchemaEntry(const PointerType& p, SchemaType* s, bool o, Allocator* allocator) : pointer(p, allocator), schema(s), owned(o) {} + ~SchemaEntry() { + if (owned) { + schema->~SchemaType(); + Allocator::Free(schema); + } + } + PointerType pointer; + SchemaType* schema; + bool owned; + }; + + void CreateSchemaRecursive(const SchemaType** schema, const PointerType& pointer, const ValueType& v, const ValueType& document) { + if (schema) + *schema = typeless_; + + if (v.GetType() == kObjectType) { + const SchemaType* s = GetSchema(pointer); + if (!s) + CreateSchema(schema, pointer, v, document); + + for (typename ValueType::ConstMemberIterator itr = v.MemberBegin(); itr != v.MemberEnd(); ++itr) + CreateSchemaRecursive(0, pointer.Append(itr->name, allocator_), itr->value, document); + } + else if (v.GetType() == kArrayType) + for (SizeType i = 0; i < v.Size(); i++) + CreateSchemaRecursive(0, pointer.Append(i, allocator_), v[i], document); + } + + void CreateSchema(const SchemaType** schema, const PointerType& pointer, const ValueType& v, const ValueType& document) { + RAPIDJSON_ASSERT(pointer.IsValid()); + if (v.IsObject()) { + if (!HandleRefSchema(pointer, schema, v, document)) { + SchemaType* s = new (allocator_->Malloc(sizeof(SchemaType))) SchemaType(this, pointer, v, document, allocator_); + new (schemaMap_.template Push()) SchemaEntry(pointer, s, true, allocator_); + if (schema) + *schema = s; + } + } + } + + bool HandleRefSchema(const PointerType& source, const SchemaType** schema, const ValueType& v, const ValueType& document) { + static const Ch kRefString[] = { '$', 'r', 'e', 'f', '\0' }; + static const ValueType kRefValue(kRefString, 4); + + typename ValueType::ConstMemberIterator itr = v.FindMember(kRefValue); + if (itr == v.MemberEnd()) + return false; + + if (itr->value.IsString()) { + SizeType len = itr->value.GetStringLength(); + if (len > 0) { + const Ch* s = itr->value.GetString(); + SizeType i = 0; + while (i < len && s[i] != '#') // Find the first # + i++; + + if (i > 0) { // Remote reference, resolve immediately + if (remoteProvider_) { + if (const GenericSchemaDocument* remoteDocument = remoteProvider_->GetRemoteDocument(s, i)) { + PointerType pointer(&s[i], len - i, allocator_); + if (pointer.IsValid()) { + if (const SchemaType* sc = remoteDocument->GetSchema(pointer)) { + if (schema) + *schema = sc; + new (schemaMap_.template Push()) SchemaEntry(source, const_cast(sc), false, allocator_); + return true; + } + } + } + } + } + else if (s[i] == '#') { // Local reference, defer resolution + PointerType pointer(&s[i], len - i, allocator_); + if (pointer.IsValid()) { + if (const ValueType* nv = pointer.Get(document)) + if (HandleRefSchema(source, schema, *nv, document)) + return true; + + new (schemaRef_.template Push()) SchemaRefEntry(source, pointer, schema, allocator_); + return true; + } + } + } + } + return false; + } + + const SchemaType* GetSchema(const PointerType& pointer) const { + for (const SchemaEntry* target = schemaMap_.template Bottom(); target != schemaMap_.template End(); ++target) + if (pointer == target->pointer) + return target->schema; + return 0; + } + + PointerType GetPointer(const SchemaType* schema) const { + for (const SchemaEntry* target = schemaMap_.template Bottom(); target != schemaMap_.template End(); ++target) + if (schema == target->schema) + return target->pointer; + return PointerType(); + } + + const SchemaType* GetTypeless() const { return typeless_; } + + static const size_t kInitialSchemaMapSize = 64; + static const size_t kInitialSchemaRefSize = 64; + + IRemoteSchemaDocumentProviderType* remoteProvider_; + Allocator *allocator_; + Allocator *ownAllocator_; + const SchemaType* root_; //!< Root schema. + SchemaType* typeless_; + internal::Stack schemaMap_; // Stores created Pointer -> Schemas + internal::Stack schemaRef_; // Stores Pointer from $ref and schema which holds the $ref + URIType uri_; +}; + +//! GenericSchemaDocument using Value type. +typedef GenericSchemaDocument SchemaDocument; +//! IGenericRemoteSchemaDocumentProvider using SchemaDocument. +typedef IGenericRemoteSchemaDocumentProvider IRemoteSchemaDocumentProvider; + +/////////////////////////////////////////////////////////////////////////////// +// GenericSchemaValidator + +//! JSON Schema Validator. +/*! + A SAX style JSON schema validator. + It uses a \c GenericSchemaDocument to validate SAX events. + It delegates the incoming SAX events to an output handler. + The default output handler does nothing. + It can be reused multiple times by calling \c Reset(). + + \tparam SchemaDocumentType Type of schema document. + \tparam OutputHandler Type of output handler. Default handler does nothing. + \tparam StateAllocator Allocator for storing the internal validation states. +*/ +template < + typename SchemaDocumentType, + typename OutputHandler = BaseReaderHandler, + typename StateAllocator = CrtAllocator> +class GenericSchemaValidator : + public internal::ISchemaStateFactory, + public internal::ISchemaValidator, + public internal::IValidationErrorHandler +{ +public: + typedef typename SchemaDocumentType::SchemaType SchemaType; + typedef typename SchemaDocumentType::PointerType PointerType; + typedef typename SchemaType::EncodingType EncodingType; + typedef typename SchemaType::SValue SValue; + typedef typename EncodingType::Ch Ch; + typedef GenericStringRef StringRefType; + typedef GenericValue ValueType; + + //! Constructor without output handler. + /*! + \param schemaDocument The schema document to conform to. + \param allocator Optional allocator for storing internal validation states. + \param schemaStackCapacity Optional initial capacity of schema path stack. + \param documentStackCapacity Optional initial capacity of document path stack. + */ + GenericSchemaValidator( + const SchemaDocumentType& schemaDocument, + StateAllocator* allocator = 0, + size_t schemaStackCapacity = kDefaultSchemaStackCapacity, + size_t documentStackCapacity = kDefaultDocumentStackCapacity) + : + schemaDocument_(&schemaDocument), + root_(schemaDocument.GetRoot()), + stateAllocator_(allocator), + ownStateAllocator_(0), + schemaStack_(allocator, schemaStackCapacity), + documentStack_(allocator, documentStackCapacity), + outputHandler_(0), + error_(kObjectType), + currentError_(), + missingDependents_(), + valid_(true) +#if RAPIDJSON_SCHEMA_VERBOSE + , depth_(0) +#endif + { + } + + //! Constructor with output handler. + /*! + \param schemaDocument The schema document to conform to. + \param allocator Optional allocator for storing internal validation states. + \param schemaStackCapacity Optional initial capacity of schema path stack. + \param documentStackCapacity Optional initial capacity of document path stack. + */ + GenericSchemaValidator( + const SchemaDocumentType& schemaDocument, + OutputHandler& outputHandler, + StateAllocator* allocator = 0, + size_t schemaStackCapacity = kDefaultSchemaStackCapacity, + size_t documentStackCapacity = kDefaultDocumentStackCapacity) + : + schemaDocument_(&schemaDocument), + root_(schemaDocument.GetRoot()), + stateAllocator_(allocator), + ownStateAllocator_(0), + schemaStack_(allocator, schemaStackCapacity), + documentStack_(allocator, documentStackCapacity), + outputHandler_(&outputHandler), + error_(kObjectType), + currentError_(), + missingDependents_(), + valid_(true) +#if RAPIDJSON_SCHEMA_VERBOSE + , depth_(0) +#endif + { + } + + //! Destructor. + ~GenericSchemaValidator() { + Reset(); + RAPIDJSON_DELETE(ownStateAllocator_); + } + + //! Reset the internal states. + void Reset() { + while (!schemaStack_.Empty()) + PopSchema(); + documentStack_.Clear(); + error_.SetObject(); + currentError_.SetNull(); + missingDependents_.SetNull(); + valid_ = true; + } + + //! Checks whether the current state is valid. + // Implementation of ISchemaValidator + virtual bool IsValid() const { return valid_; } + + //! Gets the error object. + ValueType& GetError() { return error_; } + const ValueType& GetError() const { return error_; } + + //! Gets the JSON pointer pointed to the invalid schema. + PointerType GetInvalidSchemaPointer() const { + return schemaStack_.Empty() ? PointerType() : CurrentSchema().GetPointer(); + } + + //! Gets the keyword of invalid schema. + const Ch* GetInvalidSchemaKeyword() const { + return schemaStack_.Empty() ? 0 : CurrentContext().invalidKeyword; + } + + //! Gets the JSON pointer pointed to the invalid value. + PointerType GetInvalidDocumentPointer() const { + if (documentStack_.Empty()) { + return PointerType(); + } + else { + return PointerType(documentStack_.template Bottom(), documentStack_.GetSize() / sizeof(Ch)); + } + } + + void NotMultipleOf(int64_t actual, const SValue& expected) { + AddNumberError(SchemaType::GetMultipleOfString(), ValueType(actual).Move(), expected); + } + void NotMultipleOf(uint64_t actual, const SValue& expected) { + AddNumberError(SchemaType::GetMultipleOfString(), ValueType(actual).Move(), expected); + } + void NotMultipleOf(double actual, const SValue& expected) { + AddNumberError(SchemaType::GetMultipleOfString(), ValueType(actual).Move(), expected); + } + void AboveMaximum(int64_t actual, const SValue& expected, bool exclusive) { + AddNumberError(SchemaType::GetMaximumString(), ValueType(actual).Move(), expected, + exclusive ? &SchemaType::GetExclusiveMaximumString : 0); + } + void AboveMaximum(uint64_t actual, const SValue& expected, bool exclusive) { + AddNumberError(SchemaType::GetMaximumString(), ValueType(actual).Move(), expected, + exclusive ? &SchemaType::GetExclusiveMaximumString : 0); + } + void AboveMaximum(double actual, const SValue& expected, bool exclusive) { + AddNumberError(SchemaType::GetMaximumString(), ValueType(actual).Move(), expected, + exclusive ? &SchemaType::GetExclusiveMaximumString : 0); + } + void BelowMinimum(int64_t actual, const SValue& expected, bool exclusive) { + AddNumberError(SchemaType::GetMinimumString(), ValueType(actual).Move(), expected, + exclusive ? &SchemaType::GetExclusiveMinimumString : 0); + } + void BelowMinimum(uint64_t actual, const SValue& expected, bool exclusive) { + AddNumberError(SchemaType::GetMinimumString(), ValueType(actual).Move(), expected, + exclusive ? &SchemaType::GetExclusiveMinimumString : 0); + } + void BelowMinimum(double actual, const SValue& expected, bool exclusive) { + AddNumberError(SchemaType::GetMinimumString(), ValueType(actual).Move(), expected, + exclusive ? &SchemaType::GetExclusiveMinimumString : 0); + } + + void TooLong(const Ch* str, SizeType length, SizeType expected) { + AddNumberError(SchemaType::GetMaxLengthString(), + ValueType(str, length, GetStateAllocator()).Move(), SValue(expected).Move()); + } + void TooShort(const Ch* str, SizeType length, SizeType expected) { + AddNumberError(SchemaType::GetMinLengthString(), + ValueType(str, length, GetStateAllocator()).Move(), SValue(expected).Move()); + } + void DoesNotMatch(const Ch* str, SizeType length) { + currentError_.SetObject(); + currentError_.AddMember(GetActualString(), ValueType(str, length, GetStateAllocator()).Move(), GetStateAllocator()); + AddCurrentError(SchemaType::GetPatternString()); + } + + void DisallowedItem(SizeType index) { + currentError_.SetObject(); + currentError_.AddMember(GetDisallowedString(), ValueType(index).Move(), GetStateAllocator()); + AddCurrentError(SchemaType::GetAdditionalItemsString(), true); + } + void TooFewItems(SizeType actualCount, SizeType expectedCount) { + AddNumberError(SchemaType::GetMinItemsString(), + ValueType(actualCount).Move(), SValue(expectedCount).Move()); + } + void TooManyItems(SizeType actualCount, SizeType expectedCount) { + AddNumberError(SchemaType::GetMaxItemsString(), + ValueType(actualCount).Move(), SValue(expectedCount).Move()); + } + void DuplicateItems(SizeType index1, SizeType index2) { + ValueType duplicates(kArrayType); + duplicates.PushBack(index1, GetStateAllocator()); + duplicates.PushBack(index2, GetStateAllocator()); + currentError_.SetObject(); + currentError_.AddMember(GetDuplicatesString(), duplicates, GetStateAllocator()); + AddCurrentError(SchemaType::GetUniqueItemsString(), true); + } + + void TooManyProperties(SizeType actualCount, SizeType expectedCount) { + AddNumberError(SchemaType::GetMaxPropertiesString(), + ValueType(actualCount).Move(), SValue(expectedCount).Move()); + } + void TooFewProperties(SizeType actualCount, SizeType expectedCount) { + AddNumberError(SchemaType::GetMinPropertiesString(), + ValueType(actualCount).Move(), SValue(expectedCount).Move()); + } + void StartMissingProperties() { + currentError_.SetArray(); + } + void AddMissingProperty(const SValue& name) { + currentError_.PushBack(ValueType(name, GetStateAllocator()).Move(), GetStateAllocator()); + } + bool EndMissingProperties() { + if (currentError_.Empty()) + return false; + ValueType error(kObjectType); + error.AddMember(GetMissingString(), currentError_, GetStateAllocator()); + currentError_ = error; + AddCurrentError(SchemaType::GetRequiredString()); + return true; + } + void PropertyViolations(ISchemaValidator** subvalidators, SizeType count) { + for (SizeType i = 0; i < count; ++i) + MergeError(static_cast(subvalidators[i])->GetError()); + } + void DisallowedProperty(const Ch* name, SizeType length) { + currentError_.SetObject(); + currentError_.AddMember(GetDisallowedString(), ValueType(name, length, GetStateAllocator()).Move(), GetStateAllocator()); + AddCurrentError(SchemaType::GetAdditionalPropertiesString(), true); + } + + void StartDependencyErrors() { + currentError_.SetObject(); + } + void StartMissingDependentProperties() { + missingDependents_.SetArray(); + } + void AddMissingDependentProperty(const SValue& targetName) { + missingDependents_.PushBack(ValueType(targetName, GetStateAllocator()).Move(), GetStateAllocator()); + } + void EndMissingDependentProperties(const SValue& sourceName) { + if (!missingDependents_.Empty()) + currentError_.AddMember(ValueType(sourceName, GetStateAllocator()).Move(), + missingDependents_, GetStateAllocator()); + } + void AddDependencySchemaError(const SValue& sourceName, ISchemaValidator* subvalidator) { + currentError_.AddMember(ValueType(sourceName, GetStateAllocator()).Move(), + static_cast(subvalidator)->GetError(), GetStateAllocator()); + } + bool EndDependencyErrors() { + if (currentError_.ObjectEmpty()) + return false; + ValueType error(kObjectType); + error.AddMember(GetErrorsString(), currentError_, GetStateAllocator()); + currentError_ = error; + AddCurrentError(SchemaType::GetDependenciesString()); + return true; + } + + void DisallowedValue() { + currentError_.SetObject(); + AddCurrentError(SchemaType::GetEnumString()); + } + void StartDisallowedType() { + currentError_.SetArray(); + } + void AddExpectedType(const typename SchemaType::ValueType& expectedType) { + currentError_.PushBack(ValueType(expectedType, GetStateAllocator()).Move(), GetStateAllocator()); + } + void EndDisallowedType(const typename SchemaType::ValueType& actualType) { + ValueType error(kObjectType); + error.AddMember(GetExpectedString(), currentError_, GetStateAllocator()); + error.AddMember(GetActualString(), ValueType(actualType, GetStateAllocator()).Move(), GetStateAllocator()); + currentError_ = error; + AddCurrentError(SchemaType::GetTypeString()); + } + void NotAllOf(ISchemaValidator** subvalidators, SizeType count) { + for (SizeType i = 0; i < count; ++i) { + MergeError(static_cast(subvalidators[i])->GetError()); + } + } + void NoneOf(ISchemaValidator** subvalidators, SizeType count) { + AddErrorArray(SchemaType::GetAnyOfString(), subvalidators, count); + } + void NotOneOf(ISchemaValidator** subvalidators, SizeType count) { + AddErrorArray(SchemaType::GetOneOfString(), subvalidators, count); + } + void Disallowed() { + currentError_.SetObject(); + AddCurrentError(SchemaType::GetNotString()); + } + +#define RAPIDJSON_STRING_(name, ...) \ + static const StringRefType& Get##name##String() {\ + static const Ch s[] = { __VA_ARGS__, '\0' };\ + static const StringRefType v(s, static_cast(sizeof(s) / sizeof(Ch) - 1)); \ + return v;\ + } + + RAPIDJSON_STRING_(InstanceRef, 'i', 'n', 's', 't', 'a', 'n', 'c', 'e', 'R', 'e', 'f') + RAPIDJSON_STRING_(SchemaRef, 's', 'c', 'h', 'e', 'm', 'a', 'R', 'e', 'f') + RAPIDJSON_STRING_(Expected, 'e', 'x', 'p', 'e', 'c', 't', 'e', 'd') + RAPIDJSON_STRING_(Actual, 'a', 'c', 't', 'u', 'a', 'l') + RAPIDJSON_STRING_(Disallowed, 'd', 'i', 's', 'a', 'l', 'l', 'o', 'w', 'e', 'd') + RAPIDJSON_STRING_(Missing, 'm', 'i', 's', 's', 'i', 'n', 'g') + RAPIDJSON_STRING_(Errors, 'e', 'r', 'r', 'o', 'r', 's') + RAPIDJSON_STRING_(Duplicates, 'd', 'u', 'p', 'l', 'i', 'c', 'a', 't', 'e', 's') + +#undef RAPIDJSON_STRING_ + +#if RAPIDJSON_SCHEMA_VERBOSE +#define RAPIDJSON_SCHEMA_HANDLE_BEGIN_VERBOSE_() \ +RAPIDJSON_MULTILINEMACRO_BEGIN\ + *documentStack_.template Push() = '\0';\ + documentStack_.template Pop(1);\ + internal::PrintInvalidDocument(documentStack_.template Bottom());\ +RAPIDJSON_MULTILINEMACRO_END +#else +#define RAPIDJSON_SCHEMA_HANDLE_BEGIN_VERBOSE_() +#endif + +#define RAPIDJSON_SCHEMA_HANDLE_BEGIN_(method, arg1)\ + if (!valid_) return false; \ + if (!BeginValue() || !CurrentSchema().method arg1) {\ + RAPIDJSON_SCHEMA_HANDLE_BEGIN_VERBOSE_();\ + return valid_ = false;\ + } + +#define RAPIDJSON_SCHEMA_HANDLE_PARALLEL_(method, arg2)\ + for (Context* context = schemaStack_.template Bottom(); context != schemaStack_.template End(); context++) {\ + if (context->hasher)\ + static_cast(context->hasher)->method arg2;\ + if (context->validators)\ + for (SizeType i_ = 0; i_ < context->validatorCount; i_++)\ + static_cast(context->validators[i_])->method arg2;\ + if (context->patternPropertiesValidators)\ + for (SizeType i_ = 0; i_ < context->patternPropertiesValidatorCount; i_++)\ + static_cast(context->patternPropertiesValidators[i_])->method arg2;\ + } + +#define RAPIDJSON_SCHEMA_HANDLE_END_(method, arg2)\ + return valid_ = EndValue() && (!outputHandler_ || outputHandler_->method arg2) + +#define RAPIDJSON_SCHEMA_HANDLE_VALUE_(method, arg1, arg2) \ + RAPIDJSON_SCHEMA_HANDLE_BEGIN_ (method, arg1);\ + RAPIDJSON_SCHEMA_HANDLE_PARALLEL_(method, arg2);\ + RAPIDJSON_SCHEMA_HANDLE_END_ (method, arg2) + + bool Null() { RAPIDJSON_SCHEMA_HANDLE_VALUE_(Null, (CurrentContext()), ( )); } + bool Bool(bool b) { RAPIDJSON_SCHEMA_HANDLE_VALUE_(Bool, (CurrentContext(), b), (b)); } + bool Int(int i) { RAPIDJSON_SCHEMA_HANDLE_VALUE_(Int, (CurrentContext(), i), (i)); } + bool Uint(unsigned u) { RAPIDJSON_SCHEMA_HANDLE_VALUE_(Uint, (CurrentContext(), u), (u)); } + bool Int64(int64_t i) { RAPIDJSON_SCHEMA_HANDLE_VALUE_(Int64, (CurrentContext(), i), (i)); } + bool Uint64(uint64_t u) { RAPIDJSON_SCHEMA_HANDLE_VALUE_(Uint64, (CurrentContext(), u), (u)); } + bool Double(double d) { RAPIDJSON_SCHEMA_HANDLE_VALUE_(Double, (CurrentContext(), d), (d)); } + bool RawNumber(const Ch* str, SizeType length, bool copy) + { RAPIDJSON_SCHEMA_HANDLE_VALUE_(String, (CurrentContext(), str, length, copy), (str, length, copy)); } + bool String(const Ch* str, SizeType length, bool copy) + { RAPIDJSON_SCHEMA_HANDLE_VALUE_(String, (CurrentContext(), str, length, copy), (str, length, copy)); } + + bool StartObject() { + RAPIDJSON_SCHEMA_HANDLE_BEGIN_(StartObject, (CurrentContext())); + RAPIDJSON_SCHEMA_HANDLE_PARALLEL_(StartObject, ()); + return valid_ = !outputHandler_ || outputHandler_->StartObject(); + } + + bool Key(const Ch* str, SizeType len, bool copy) { + if (!valid_) return false; + AppendToken(str, len); + if (!CurrentSchema().Key(CurrentContext(), str, len, copy)) return valid_ = false; + RAPIDJSON_SCHEMA_HANDLE_PARALLEL_(Key, (str, len, copy)); + return valid_ = !outputHandler_ || outputHandler_->Key(str, len, copy); + } + + bool EndObject(SizeType memberCount) { + if (!valid_) return false; + RAPIDJSON_SCHEMA_HANDLE_PARALLEL_(EndObject, (memberCount)); + if (!CurrentSchema().EndObject(CurrentContext(), memberCount)) return valid_ = false; + RAPIDJSON_SCHEMA_HANDLE_END_(EndObject, (memberCount)); + } + + bool StartArray() { + RAPIDJSON_SCHEMA_HANDLE_BEGIN_(StartArray, (CurrentContext())); + RAPIDJSON_SCHEMA_HANDLE_PARALLEL_(StartArray, ()); + return valid_ = !outputHandler_ || outputHandler_->StartArray(); + } + + bool EndArray(SizeType elementCount) { + if (!valid_) return false; + RAPIDJSON_SCHEMA_HANDLE_PARALLEL_(EndArray, (elementCount)); + if (!CurrentSchema().EndArray(CurrentContext(), elementCount)) return valid_ = false; + RAPIDJSON_SCHEMA_HANDLE_END_(EndArray, (elementCount)); + } + +#undef RAPIDJSON_SCHEMA_HANDLE_BEGIN_VERBOSE_ +#undef RAPIDJSON_SCHEMA_HANDLE_BEGIN_ +#undef RAPIDJSON_SCHEMA_HANDLE_PARALLEL_ +#undef RAPIDJSON_SCHEMA_HANDLE_VALUE_ + + // Implementation of ISchemaStateFactory + virtual ISchemaValidator* CreateSchemaValidator(const SchemaType& root) { + return new (GetStateAllocator().Malloc(sizeof(GenericSchemaValidator))) GenericSchemaValidator(*schemaDocument_, root, documentStack_.template Bottom(), documentStack_.GetSize(), +#if RAPIDJSON_SCHEMA_VERBOSE + depth_ + 1, +#endif + &GetStateAllocator()); + } + + virtual void DestroySchemaValidator(ISchemaValidator* validator) { + GenericSchemaValidator* v = static_cast(validator); + v->~GenericSchemaValidator(); + StateAllocator::Free(v); + } + + virtual void* CreateHasher() { + return new (GetStateAllocator().Malloc(sizeof(HasherType))) HasherType(&GetStateAllocator()); + } + + virtual uint64_t GetHashCode(void* hasher) { + return static_cast(hasher)->GetHashCode(); + } + + virtual void DestroryHasher(void* hasher) { + HasherType* h = static_cast(hasher); + h->~HasherType(); + StateAllocator::Free(h); + } + + virtual void* MallocState(size_t size) { + return GetStateAllocator().Malloc(size); + } + + virtual void FreeState(void* p) { + StateAllocator::Free(p); + } + +private: + typedef typename SchemaType::Context Context; + typedef GenericValue, StateAllocator> HashCodeArray; + typedef internal::Hasher HasherType; + + GenericSchemaValidator( + const SchemaDocumentType& schemaDocument, + const SchemaType& root, + const char* basePath, size_t basePathSize, +#if RAPIDJSON_SCHEMA_VERBOSE + unsigned depth, +#endif + StateAllocator* allocator = 0, + size_t schemaStackCapacity = kDefaultSchemaStackCapacity, + size_t documentStackCapacity = kDefaultDocumentStackCapacity) + : + schemaDocument_(&schemaDocument), + root_(root), + stateAllocator_(allocator), + ownStateAllocator_(0), + schemaStack_(allocator, schemaStackCapacity), + documentStack_(allocator, documentStackCapacity), + outputHandler_(0), + error_(kObjectType), + currentError_(), + missingDependents_(), + valid_(true) +#if RAPIDJSON_SCHEMA_VERBOSE + , depth_(depth) +#endif + { + if (basePath && basePathSize) + memcpy(documentStack_.template Push(basePathSize), basePath, basePathSize); + } + + StateAllocator& GetStateAllocator() { + if (!stateAllocator_) + stateAllocator_ = ownStateAllocator_ = RAPIDJSON_NEW(StateAllocator)(); + return *stateAllocator_; + } + + bool BeginValue() { + if (schemaStack_.Empty()) + PushSchema(root_); + else { + if (CurrentContext().inArray) + internal::TokenHelper, Ch>::AppendIndexToken(documentStack_, CurrentContext().arrayElementIndex); + + if (!CurrentSchema().BeginValue(CurrentContext())) + return false; + + SizeType count = CurrentContext().patternPropertiesSchemaCount; + const SchemaType** sa = CurrentContext().patternPropertiesSchemas; + typename Context::PatternValidatorType patternValidatorType = CurrentContext().valuePatternValidatorType; + bool valueUniqueness = CurrentContext().valueUniqueness; + RAPIDJSON_ASSERT(CurrentContext().valueSchema); + PushSchema(*CurrentContext().valueSchema); + + if (count > 0) { + CurrentContext().objectPatternValidatorType = patternValidatorType; + ISchemaValidator**& va = CurrentContext().patternPropertiesValidators; + SizeType& validatorCount = CurrentContext().patternPropertiesValidatorCount; + va = static_cast(MallocState(sizeof(ISchemaValidator*) * count)); + for (SizeType i = 0; i < count; i++) + va[validatorCount++] = CreateSchemaValidator(*sa[i]); + } + + CurrentContext().arrayUniqueness = valueUniqueness; + } + return true; + } + + bool EndValue() { + if (!CurrentSchema().EndValue(CurrentContext())) + return false; + +#if RAPIDJSON_SCHEMA_VERBOSE + GenericStringBuffer sb; + schemaDocument_->GetPointer(&CurrentSchema()).Stringify(sb); + + *documentStack_.template Push() = '\0'; + documentStack_.template Pop(1); + internal::PrintValidatorPointers(depth_, sb.GetString(), documentStack_.template Bottom()); +#endif + + uint64_t h = CurrentContext().arrayUniqueness ? static_cast(CurrentContext().hasher)->GetHashCode() : 0; + + PopSchema(); + + if (!schemaStack_.Empty()) { + Context& context = CurrentContext(); + if (context.valueUniqueness) { + HashCodeArray* a = static_cast(context.arrayElementHashCodes); + if (!a) + CurrentContext().arrayElementHashCodes = a = new (GetStateAllocator().Malloc(sizeof(HashCodeArray))) HashCodeArray(kArrayType); + for (typename HashCodeArray::ConstValueIterator itr = a->Begin(); itr != a->End(); ++itr) + if (itr->GetUint64() == h) { + DuplicateItems(static_cast(itr - a->Begin()), a->Size()); + RAPIDJSON_INVALID_KEYWORD_RETURN(SchemaType::GetUniqueItemsString()); + } + a->PushBack(h, GetStateAllocator()); + } + } + + // Remove the last token of document pointer + while (!documentStack_.Empty() && *documentStack_.template Pop(1) != '/') + ; + + return true; + } + + void AppendToken(const Ch* str, SizeType len) { + documentStack_.template Reserve(1 + len * 2); // worst case all characters are escaped as two characters + *documentStack_.template PushUnsafe() = '/'; + for (SizeType i = 0; i < len; i++) { + if (str[i] == '~') { + *documentStack_.template PushUnsafe() = '~'; + *documentStack_.template PushUnsafe() = '0'; + } + else if (str[i] == '/') { + *documentStack_.template PushUnsafe() = '~'; + *documentStack_.template PushUnsafe() = '1'; + } + else + *documentStack_.template PushUnsafe() = str[i]; + } + } + + RAPIDJSON_FORCEINLINE void PushSchema(const SchemaType& schema) { new (schemaStack_.template Push()) Context(*this, *this, &schema); } + + RAPIDJSON_FORCEINLINE void PopSchema() { + Context* c = schemaStack_.template Pop(1); + if (HashCodeArray* a = static_cast(c->arrayElementHashCodes)) { + a->~HashCodeArray(); + StateAllocator::Free(a); + } + c->~Context(); + } + + void AddErrorLocation(ValueType& result, bool parent) { + GenericStringBuffer sb; + PointerType instancePointer = GetInvalidDocumentPointer(); + ((parent && instancePointer.GetTokenCount() > 0) + ? PointerType(instancePointer.GetTokens(), instancePointer.GetTokenCount() - 1) + : instancePointer).StringifyUriFragment(sb); + ValueType instanceRef(sb.GetString(), static_cast(sb.GetSize() / sizeof(Ch)), + GetStateAllocator()); + result.AddMember(GetInstanceRefString(), instanceRef, GetStateAllocator()); + sb.Clear(); + memcpy(sb.Push(CurrentSchema().GetURI().GetStringLength()), + CurrentSchema().GetURI().GetString(), + CurrentSchema().GetURI().GetStringLength() * sizeof(Ch)); + GetInvalidSchemaPointer().StringifyUriFragment(sb); + ValueType schemaRef(sb.GetString(), static_cast(sb.GetSize() / sizeof(Ch)), + GetStateAllocator()); + result.AddMember(GetSchemaRefString(), schemaRef, GetStateAllocator()); + } + + void AddError(ValueType& keyword, ValueType& error) { + typename ValueType::MemberIterator member = error_.FindMember(keyword); + if (member == error_.MemberEnd()) + error_.AddMember(keyword, error, GetStateAllocator()); + else { + if (member->value.IsObject()) { + ValueType errors(kArrayType); + errors.PushBack(member->value, GetStateAllocator()); + member->value = errors; + } + member->value.PushBack(error, GetStateAllocator()); + } + } + + void AddCurrentError(const typename SchemaType::ValueType& keyword, bool parent = false) { + AddErrorLocation(currentError_, parent); + AddError(ValueType(keyword, GetStateAllocator(), false).Move(), currentError_); + } + + void MergeError(ValueType& other) { + for (typename ValueType::MemberIterator it = other.MemberBegin(), end = other.MemberEnd(); it != end; ++it) { + AddError(it->name, it->value); + } + } + + void AddNumberError(const typename SchemaType::ValueType& keyword, ValueType& actual, const SValue& expected, + const typename SchemaType::ValueType& (*exclusive)() = 0) { + currentError_.SetObject(); + currentError_.AddMember(GetActualString(), actual, GetStateAllocator()); + currentError_.AddMember(GetExpectedString(), ValueType(expected, GetStateAllocator()).Move(), GetStateAllocator()); + if (exclusive) + currentError_.AddMember(ValueType(exclusive(), GetStateAllocator()).Move(), true, GetStateAllocator()); + AddCurrentError(keyword); + } + + void AddErrorArray(const typename SchemaType::ValueType& keyword, + ISchemaValidator** subvalidators, SizeType count) { + ValueType errors(kArrayType); + for (SizeType i = 0; i < count; ++i) + errors.PushBack(static_cast(subvalidators[i])->GetError(), GetStateAllocator()); + currentError_.SetObject(); + currentError_.AddMember(GetErrorsString(), errors, GetStateAllocator()); + AddCurrentError(keyword); + } + + const SchemaType& CurrentSchema() const { return *schemaStack_.template Top()->schema; } + Context& CurrentContext() { return *schemaStack_.template Top(); } + const Context& CurrentContext() const { return *schemaStack_.template Top(); } + + static const size_t kDefaultSchemaStackCapacity = 1024; + static const size_t kDefaultDocumentStackCapacity = 256; + const SchemaDocumentType* schemaDocument_; + const SchemaType& root_; + StateAllocator* stateAllocator_; + StateAllocator* ownStateAllocator_; + internal::Stack schemaStack_; //!< stack to store the current path of schema (BaseSchemaType *) + internal::Stack documentStack_; //!< stack to store the current path of validating document (Ch) + OutputHandler* outputHandler_; + ValueType error_; + ValueType currentError_; + ValueType missingDependents_; + bool valid_; +#if RAPIDJSON_SCHEMA_VERBOSE + unsigned depth_; +#endif +}; + +typedef GenericSchemaValidator SchemaValidator; + +/////////////////////////////////////////////////////////////////////////////// +// SchemaValidatingReader + +//! A helper class for parsing with validation. +/*! + This helper class is a functor, designed as a parameter of \ref GenericDocument::Populate(). + + \tparam parseFlags Combination of \ref ParseFlag. + \tparam InputStream Type of input stream, implementing Stream concept. + \tparam SourceEncoding Encoding of the input stream. + \tparam SchemaDocumentType Type of schema document. + \tparam StackAllocator Allocator type for stack. +*/ +template < + unsigned parseFlags, + typename InputStream, + typename SourceEncoding, + typename SchemaDocumentType = SchemaDocument, + typename StackAllocator = CrtAllocator> +class SchemaValidatingReader { +public: + typedef typename SchemaDocumentType::PointerType PointerType; + typedef typename InputStream::Ch Ch; + typedef GenericValue ValueType; + + //! Constructor + /*! + \param is Input stream. + \param sd Schema document. + */ + SchemaValidatingReader(InputStream& is, const SchemaDocumentType& sd) : is_(is), sd_(sd), invalidSchemaKeyword_(), error_(kObjectType), isValid_(true) {} + + template + bool operator()(Handler& handler) { + GenericReader reader; + GenericSchemaValidator validator(sd_, handler); + parseResult_ = reader.template Parse(is_, validator); + + isValid_ = validator.IsValid(); + if (isValid_) { + invalidSchemaPointer_ = PointerType(); + invalidSchemaKeyword_ = 0; + invalidDocumentPointer_ = PointerType(); + error_.SetObject(); + } + else { + invalidSchemaPointer_ = validator.GetInvalidSchemaPointer(); + invalidSchemaKeyword_ = validator.GetInvalidSchemaKeyword(); + invalidDocumentPointer_ = validator.GetInvalidDocumentPointer(); + error_.CopyFrom(validator.GetError(), allocator_); + } + + return parseResult_; + } + + const ParseResult& GetParseResult() const { return parseResult_; } + bool IsValid() const { return isValid_; } + const PointerType& GetInvalidSchemaPointer() const { return invalidSchemaPointer_; } + const Ch* GetInvalidSchemaKeyword() const { return invalidSchemaKeyword_; } + const PointerType& GetInvalidDocumentPointer() const { return invalidDocumentPointer_; } + const ValueType& GetError() const { return error_; } + +private: + InputStream& is_; + const SchemaDocumentType& sd_; + + ParseResult parseResult_; + PointerType invalidSchemaPointer_; + const Ch* invalidSchemaKeyword_; + PointerType invalidDocumentPointer_; + StackAllocator allocator_; + ValueType error_; + bool isValid_; +}; + +RAPIDJSON_NAMESPACE_END +RAPIDJSON_DIAG_POP + +#endif // RAPIDJSON_SCHEMA_H_ diff --git a/vendor/github.com/Benau/go_rlottie/lottie_rapidjson_stream.h b/vendor/github.com/Benau/go_rlottie/lottie_rapidjson_stream.h new file mode 100644 index 00000000..da28a998 --- /dev/null +++ b/vendor/github.com/Benau/go_rlottie/lottie_rapidjson_stream.h @@ -0,0 +1,223 @@ +// Tencent is pleased to support the open source community by making RapidJSON available. +// +// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved. +// +// Licensed under the MIT License (the "License"); you may not use this file except +// in compliance with the License. You may obtain a copy of the License at +// +// http://opensource.org/licenses/MIT +// +// Unless required by applicable law or agreed to in writing, software distributed +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +// CONDITIONS OF ANY KIND, either express or implied. See the License for the +// specific language governing permissions and limitations under the License. + +#include "lottie_rapidjson_rapidjson.h" + +#ifndef RAPIDJSON_STREAM_H_ +#define RAPIDJSON_STREAM_H_ + +#include "lottie_rapidjson_encodings.h" + +RAPIDJSON_NAMESPACE_BEGIN + +/////////////////////////////////////////////////////////////////////////////// +// Stream + +/*! \class rapidjson::Stream + \brief Concept for reading and writing characters. + + For read-only stream, no need to implement PutBegin(), Put(), Flush() and PutEnd(). + + For write-only stream, only need to implement Put() and Flush(). + +\code +concept Stream { + typename Ch; //!< Character type of the stream. + + //! Read the current character from stream without moving the read cursor. + Ch Peek() const; + + //! Read the current character from stream and moving the read cursor to next character. + Ch Take(); + + //! Get the current read cursor. + //! \return Number of characters read from start. + size_t Tell(); + + //! Begin writing operation at the current read pointer. + //! \return The begin writer pointer. + Ch* PutBegin(); + + //! Write a character. + void Put(Ch c); + + //! Flush the buffer. + void Flush(); + + //! End the writing operation. + //! \param begin The begin write pointer returned by PutBegin(). + //! \return Number of characters written. + size_t PutEnd(Ch* begin); +} +\endcode +*/ + +//! Provides additional information for stream. +/*! + By using traits pattern, this type provides a default configuration for stream. + For custom stream, this type can be specialized for other configuration. + See TEST(Reader, CustomStringStream) in readertest.cpp for example. +*/ +template +struct StreamTraits { + //! Whether to make local copy of stream for optimization during parsing. + /*! + By default, for safety, streams do not use local copy optimization. + Stream that can be copied fast should specialize this, like StreamTraits. + */ + enum { copyOptimization = 0 }; +}; + +//! Reserve n characters for writing to a stream. +template +inline void PutReserve(Stream& stream, size_t count) { + (void)stream; + (void)count; +} + +//! Write character to a stream, presuming buffer is reserved. +template +inline void PutUnsafe(Stream& stream, typename Stream::Ch c) { + stream.Put(c); +} + +//! Put N copies of a character to a stream. +template +inline void PutN(Stream& stream, Ch c, size_t n) { + PutReserve(stream, n); + for (size_t i = 0; i < n; i++) + PutUnsafe(stream, c); +} + +/////////////////////////////////////////////////////////////////////////////// +// GenericStreamWrapper + +//! A Stream Wrapper +/*! \tThis string stream is a wrapper for any stream by just forwarding any + \treceived message to the origin stream. + \note implements Stream concept +*/ + +#if defined(_MSC_VER) && _MSC_VER <= 1800 +RAPIDJSON_DIAG_PUSH +RAPIDJSON_DIAG_OFF(4702) // unreachable code +RAPIDJSON_DIAG_OFF(4512) // assignment operator could not be generated +#endif + +template > +class GenericStreamWrapper { +public: + typedef typename Encoding::Ch Ch; + GenericStreamWrapper(InputStream& is): is_(is) {} + + Ch Peek() const { return is_.Peek(); } + Ch Take() { return is_.Take(); } + size_t Tell() { return is_.Tell(); } + Ch* PutBegin() { return is_.PutBegin(); } + void Put(Ch ch) { is_.Put(ch); } + void Flush() { is_.Flush(); } + size_t PutEnd(Ch* ch) { return is_.PutEnd(ch); } + + // wrapper for MemoryStream + const Ch* Peek4() const { return is_.Peek4(); } + + // wrapper for AutoUTFInputStream + UTFType GetType() const { return is_.GetType(); } + bool HasBOM() const { return is_.HasBOM(); } + +protected: + InputStream& is_; +}; + +#if defined(_MSC_VER) && _MSC_VER <= 1800 +RAPIDJSON_DIAG_POP +#endif + +/////////////////////////////////////////////////////////////////////////////// +// StringStream + +//! Read-only string stream. +/*! \note implements Stream concept +*/ +template +struct GenericStringStream { + typedef typename Encoding::Ch Ch; + + GenericStringStream(const Ch *src) : src_(src), head_(src) {} + + Ch Peek() const { return *src_; } + Ch Take() { return *src_++; } + size_t Tell() const { return static_cast(src_ - head_); } + + Ch* PutBegin() { RAPIDJSON_ASSERT(false); return 0; } + void Put(Ch) { RAPIDJSON_ASSERT(false); } + void Flush() { RAPIDJSON_ASSERT(false); } + size_t PutEnd(Ch*) { RAPIDJSON_ASSERT(false); return 0; } + + const Ch* src_; //!< Current read position. + const Ch* head_; //!< Original head of the string. +}; + +template +struct StreamTraits > { + enum { copyOptimization = 1 }; +}; + +//! String stream with UTF8 encoding. +typedef GenericStringStream > StringStream; + +/////////////////////////////////////////////////////////////////////////////// +// InsituStringStream + +//! A read-write string stream. +/*! This string stream is particularly designed for in-situ parsing. + \note implements Stream concept +*/ +template +struct GenericInsituStringStream { + typedef typename Encoding::Ch Ch; + + GenericInsituStringStream(Ch *src) : src_(src), dst_(0), head_(src) {} + + // Read + Ch Peek() { return *src_; } + Ch Take() { return *src_++; } + size_t Tell() { return static_cast(src_ - head_); } + + // Write + void Put(Ch c) { RAPIDJSON_ASSERT(dst_ != 0); *dst_++ = c; } + + Ch* PutBegin() { return dst_ = src_; } + size_t PutEnd(Ch* begin) { return static_cast(dst_ - begin); } + void Flush() {} + + Ch* Push(size_t count) { Ch* begin = dst_; dst_ += count; return begin; } + void Pop(size_t count) { dst_ -= count; } + + Ch* src_; + Ch* dst_; + Ch* head_; +}; + +template +struct StreamTraits > { + enum { copyOptimization = 1 }; +}; + +//! Insitu string stream with UTF8 encoding. +typedef GenericInsituStringStream > InsituStringStream; + +RAPIDJSON_NAMESPACE_END + +#endif // RAPIDJSON_STREAM_H_ diff --git a/vendor/github.com/Benau/go_rlottie/lottie_rapidjson_stringbuffer.h b/vendor/github.com/Benau/go_rlottie/lottie_rapidjson_stringbuffer.h new file mode 100644 index 00000000..295d6a2f --- /dev/null +++ b/vendor/github.com/Benau/go_rlottie/lottie_rapidjson_stringbuffer.h @@ -0,0 +1,121 @@ +// Tencent is pleased to support the open source community by making RapidJSON available. +// +// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved. +// +// Licensed under the MIT License (the "License"); you may not use this file except +// in compliance with the License. You may obtain a copy of the License at +// +// http://opensource.org/licenses/MIT +// +// Unless required by applicable law or agreed to in writing, software distributed +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +// CONDITIONS OF ANY KIND, either express or implied. See the License for the +// specific language governing permissions and limitations under the License. + +#ifndef RAPIDJSON_STRINGBUFFER_H_ +#define RAPIDJSON_STRINGBUFFER_H_ + +#include "lottie_rapidjson_stream.h" +#include "lottie_rapidjson_internal_stack.h" + +#if RAPIDJSON_HAS_CXX11_RVALUE_REFS +#include // std::move +#endif + +#include "lottie_rapidjson_internal_stack.h" + +#if defined(__clang__) +RAPIDJSON_DIAG_PUSH +RAPIDJSON_DIAG_OFF(c++98-compat) +#endif + +RAPIDJSON_NAMESPACE_BEGIN + +//! Represents an in-memory output stream. +/*! + \tparam Encoding Encoding of the stream. + \tparam Allocator type for allocating memory buffer. + \note implements Stream concept +*/ +template +class GenericStringBuffer { +public: + typedef typename Encoding::Ch Ch; + + GenericStringBuffer(Allocator* allocator = 0, size_t capacity = kDefaultCapacity) : stack_(allocator, capacity) {} + +#if RAPIDJSON_HAS_CXX11_RVALUE_REFS + GenericStringBuffer(GenericStringBuffer&& rhs) : stack_(std::move(rhs.stack_)) {} + GenericStringBuffer& operator=(GenericStringBuffer&& rhs) { + if (&rhs != this) + stack_ = std::move(rhs.stack_); + return *this; + } +#endif + + void Put(Ch c) { *stack_.template Push() = c; } + void PutUnsafe(Ch c) { *stack_.template PushUnsafe() = c; } + void Flush() {} + + void Clear() { stack_.Clear(); } + void ShrinkToFit() { + // Push and pop a null terminator. This is safe. + *stack_.template Push() = '\0'; + stack_.ShrinkToFit(); + stack_.template Pop(1); + } + + void Reserve(size_t count) { stack_.template Reserve(count); } + Ch* Push(size_t count) { return stack_.template Push(count); } + Ch* PushUnsafe(size_t count) { return stack_.template PushUnsafe(count); } + void Pop(size_t count) { stack_.template Pop(count); } + + const Ch* GetString() const { + // Push and pop a null terminator. This is safe. + *stack_.template Push() = '\0'; + stack_.template Pop(1); + + return stack_.template Bottom(); + } + + //! Get the size of string in bytes in the string buffer. + size_t GetSize() const { return stack_.GetSize(); } + + //! Get the length of string in Ch in the string buffer. + size_t GetLength() const { return stack_.GetSize() / sizeof(Ch); } + + static const size_t kDefaultCapacity = 256; + mutable internal::Stack stack_; + +private: + // Prohibit copy constructor & assignment operator. + GenericStringBuffer(const GenericStringBuffer&); + GenericStringBuffer& operator=(const GenericStringBuffer&); +}; + +//! String buffer with UTF8 encoding +typedef GenericStringBuffer > StringBuffer; + +template +inline void PutReserve(GenericStringBuffer& stream, size_t count) { + stream.Reserve(count); +} + +template +inline void PutUnsafe(GenericStringBuffer& stream, typename Encoding::Ch c) { + stream.PutUnsafe(c); +} + +//! Implement specialized version of PutN() with memset() for better performance. +template<> +inline void PutN(GenericStringBuffer >& stream, char c, size_t n) { + std::memset(stream.stack_.Push(n), c, n * sizeof(c)); +} + +RAPIDJSON_NAMESPACE_END + +#if defined(__clang__) +RAPIDJSON_DIAG_POP +#endif + +#endif // RAPIDJSON_STRINGBUFFER_H_ diff --git a/vendor/github.com/Benau/go_rlottie/lottie_rapidjson_writer.h b/vendor/github.com/Benau/go_rlottie/lottie_rapidjson_writer.h new file mode 100644 index 00000000..d494cd47 --- /dev/null +++ b/vendor/github.com/Benau/go_rlottie/lottie_rapidjson_writer.h @@ -0,0 +1,710 @@ +// Tencent is pleased to support the open source community by making RapidJSON available. +// +// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved. +// +// Licensed under the MIT License (the "License"); you may not use this file except +// in compliance with the License. You may obtain a copy of the License at +// +// http://opensource.org/licenses/MIT +// +// Unless required by applicable law or agreed to in writing, software distributed +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +// CONDITIONS OF ANY KIND, either express or implied. See the License for the +// specific language governing permissions and limitations under the License. + +#ifndef RAPIDJSON_WRITER_H_ +#define RAPIDJSON_WRITER_H_ + +#include "lottie_rapidjson_stream.h" +#include "lottie_rapidjson_internal_clzll.h" +#include "lottie_rapidjson_internal_meta.h" +#include "lottie_rapidjson_internal_stack.h" +#include "lottie_rapidjson_internal_strfunc.h" +#include "lottie_rapidjson_internal_dtoa.h" +#include "lottie_rapidjson_internal_itoa.h" +#include "lottie_rapidjson_stringbuffer.h" +#include // placement new + +#if defined(RAPIDJSON_SIMD) && defined(_MSC_VER) +#include +#pragma intrinsic(_BitScanForward) +#endif +#ifdef RAPIDJSON_SSE42 +#include +#elif defined(RAPIDJSON_SSE2) +#include +#elif defined(RAPIDJSON_NEON) +#include +#endif + +#ifdef __clang__ +RAPIDJSON_DIAG_PUSH +RAPIDJSON_DIAG_OFF(padded) +RAPIDJSON_DIAG_OFF(unreachable-code) +RAPIDJSON_DIAG_OFF(c++98-compat) +#elif defined(_MSC_VER) +RAPIDJSON_DIAG_PUSH +RAPIDJSON_DIAG_OFF(4127) // conditional expression is constant +#endif + +RAPIDJSON_NAMESPACE_BEGIN + +/////////////////////////////////////////////////////////////////////////////// +// WriteFlag + +/*! \def RAPIDJSON_WRITE_DEFAULT_FLAGS + \ingroup RAPIDJSON_CONFIG + \brief User-defined kWriteDefaultFlags definition. + + User can define this as any \c WriteFlag combinations. +*/ +#ifndef RAPIDJSON_WRITE_DEFAULT_FLAGS +#define RAPIDJSON_WRITE_DEFAULT_FLAGS kWriteNoFlags +#endif + +//! Combination of writeFlags +enum WriteFlag { + kWriteNoFlags = 0, //!< No flags are set. + kWriteValidateEncodingFlag = 1, //!< Validate encoding of JSON strings. + kWriteNanAndInfFlag = 2, //!< Allow writing of Infinity, -Infinity and NaN. + kWriteDefaultFlags = RAPIDJSON_WRITE_DEFAULT_FLAGS //!< Default write flags. Can be customized by defining RAPIDJSON_WRITE_DEFAULT_FLAGS +}; + +//! JSON writer +/*! Writer implements the concept Handler. + It generates JSON text by events to an output os. + + User may programmatically calls the functions of a writer to generate JSON text. + + On the other side, a writer can also be passed to objects that generates events, + + for example Reader::Parse() and Document::Accept(). + + \tparam OutputStream Type of output stream. + \tparam SourceEncoding Encoding of source string. + \tparam TargetEncoding Encoding of output stream. + \tparam StackAllocator Type of allocator for allocating memory of stack. + \note implements Handler concept +*/ +template, typename TargetEncoding = UTF8<>, typename StackAllocator = CrtAllocator, unsigned writeFlags = kWriteDefaultFlags> +class Writer { +public: + typedef typename SourceEncoding::Ch Ch; + + static const int kDefaultMaxDecimalPlaces = 324; + + //! Constructor + /*! \param os Output stream. + \param stackAllocator User supplied allocator. If it is null, it will create a private one. + \param levelDepth Initial capacity of stack. + */ + explicit + Writer(OutputStream& os, StackAllocator* stackAllocator = 0, size_t levelDepth = kDefaultLevelDepth) : + os_(&os), level_stack_(stackAllocator, levelDepth * sizeof(Level)), maxDecimalPlaces_(kDefaultMaxDecimalPlaces), hasRoot_(false) {} + + explicit + Writer(StackAllocator* allocator = 0, size_t levelDepth = kDefaultLevelDepth) : + os_(0), level_stack_(allocator, levelDepth * sizeof(Level)), maxDecimalPlaces_(kDefaultMaxDecimalPlaces), hasRoot_(false) {} + +#if RAPIDJSON_HAS_CXX11_RVALUE_REFS + Writer(Writer&& rhs) : + os_(rhs.os_), level_stack_(std::move(rhs.level_stack_)), maxDecimalPlaces_(rhs.maxDecimalPlaces_), hasRoot_(rhs.hasRoot_) { + rhs.os_ = 0; + } +#endif + + //! Reset the writer with a new stream. + /*! + This function reset the writer with a new stream and default settings, + in order to make a Writer object reusable for output multiple JSONs. + + \param os New output stream. + \code + Writer writer(os1); + writer.StartObject(); + // ... + writer.EndObject(); + + writer.Reset(os2); + writer.StartObject(); + // ... + writer.EndObject(); + \endcode + */ + void Reset(OutputStream& os) { + os_ = &os; + hasRoot_ = false; + level_stack_.Clear(); + } + + //! Checks whether the output is a complete JSON. + /*! + A complete JSON has a complete root object or array. + */ + bool IsComplete() const { + return hasRoot_ && level_stack_.Empty(); + } + + int GetMaxDecimalPlaces() const { + return maxDecimalPlaces_; + } + + //! Sets the maximum number of decimal places for double output. + /*! + This setting truncates the output with specified number of decimal places. + + For example, + + \code + writer.SetMaxDecimalPlaces(3); + writer.StartArray(); + writer.Double(0.12345); // "0.123" + writer.Double(0.0001); // "0.0" + writer.Double(1.234567890123456e30); // "1.234567890123456e30" (do not truncate significand for positive exponent) + writer.Double(1.23e-4); // "0.0" (do truncate significand for negative exponent) + writer.EndArray(); + \endcode + + The default setting does not truncate any decimal places. You can restore to this setting by calling + \code + writer.SetMaxDecimalPlaces(Writer::kDefaultMaxDecimalPlaces); + \endcode + */ + void SetMaxDecimalPlaces(int maxDecimalPlaces) { + maxDecimalPlaces_ = maxDecimalPlaces; + } + + /*!@name Implementation of Handler + \see Handler + */ + //@{ + + bool Null() { Prefix(kNullType); return EndValue(WriteNull()); } + bool Bool(bool b) { Prefix(b ? kTrueType : kFalseType); return EndValue(WriteBool(b)); } + bool Int(int i) { Prefix(kNumberType); return EndValue(WriteInt(i)); } + bool Uint(unsigned u) { Prefix(kNumberType); return EndValue(WriteUint(u)); } + bool Int64(int64_t i64) { Prefix(kNumberType); return EndValue(WriteInt64(i64)); } + bool Uint64(uint64_t u64) { Prefix(kNumberType); return EndValue(WriteUint64(u64)); } + + //! Writes the given \c double value to the stream + /*! + \param d The value to be written. + \return Whether it is succeed. + */ + bool Double(double d) { Prefix(kNumberType); return EndValue(WriteDouble(d)); } + + bool RawNumber(const Ch* str, SizeType length, bool copy = false) { + RAPIDJSON_ASSERT(str != 0); + (void)copy; + Prefix(kNumberType); + return EndValue(WriteString(str, length)); + } + + bool String(const Ch* str, SizeType length, bool copy = false) { + RAPIDJSON_ASSERT(str != 0); + (void)copy; + Prefix(kStringType); + return EndValue(WriteString(str, length)); + } + +#if RAPIDJSON_HAS_STDSTRING + bool String(const std::basic_string& str) { + return String(str.data(), SizeType(str.size())); + } +#endif + + bool StartObject() { + Prefix(kObjectType); + new (level_stack_.template Push()) Level(false); + return WriteStartObject(); + } + + bool Key(const Ch* str, SizeType length, bool copy = false) { return String(str, length, copy); } + +#if RAPIDJSON_HAS_STDSTRING + bool Key(const std::basic_string& str) + { + return Key(str.data(), SizeType(str.size())); + } +#endif + + bool EndObject(SizeType memberCount = 0) { + (void)memberCount; + RAPIDJSON_ASSERT(level_stack_.GetSize() >= sizeof(Level)); // not inside an Object + RAPIDJSON_ASSERT(!level_stack_.template Top()->inArray); // currently inside an Array, not Object + RAPIDJSON_ASSERT(0 == level_stack_.template Top()->valueCount % 2); // Object has a Key without a Value + level_stack_.template Pop(1); + return EndValue(WriteEndObject()); + } + + bool StartArray() { + Prefix(kArrayType); + new (level_stack_.template Push()) Level(true); + return WriteStartArray(); + } + + bool EndArray(SizeType elementCount = 0) { + (void)elementCount; + RAPIDJSON_ASSERT(level_stack_.GetSize() >= sizeof(Level)); + RAPIDJSON_ASSERT(level_stack_.template Top()->inArray); + level_stack_.template Pop(1); + return EndValue(WriteEndArray()); + } + //@} + + /*! @name Convenience extensions */ + //@{ + + //! Simpler but slower overload. + bool String(const Ch* const& str) { return String(str, internal::StrLen(str)); } + bool Key(const Ch* const& str) { return Key(str, internal::StrLen(str)); } + + //@} + + //! Write a raw JSON value. + /*! + For user to write a stringified JSON as a value. + + \param json A well-formed JSON value. It should not contain null character within [0, length - 1] range. + \param length Length of the json. + \param type Type of the root of json. + */ + bool RawValue(const Ch* json, size_t length, Type type) { + RAPIDJSON_ASSERT(json != 0); + Prefix(type); + return EndValue(WriteRawValue(json, length)); + } + + //! Flush the output stream. + /*! + Allows the user to flush the output stream immediately. + */ + void Flush() { + os_->Flush(); + } + + static const size_t kDefaultLevelDepth = 32; + +protected: + //! Information for each nested level + struct Level { + Level(bool inArray_) : valueCount(0), inArray(inArray_) {} + size_t valueCount; //!< number of values in this level + bool inArray; //!< true if in array, otherwise in object + }; + + bool WriteNull() { + PutReserve(*os_, 4); + PutUnsafe(*os_, 'n'); PutUnsafe(*os_, 'u'); PutUnsafe(*os_, 'l'); PutUnsafe(*os_, 'l'); return true; + } + + bool WriteBool(bool b) { + if (b) { + PutReserve(*os_, 4); + PutUnsafe(*os_, 't'); PutUnsafe(*os_, 'r'); PutUnsafe(*os_, 'u'); PutUnsafe(*os_, 'e'); + } + else { + PutReserve(*os_, 5); + PutUnsafe(*os_, 'f'); PutUnsafe(*os_, 'a'); PutUnsafe(*os_, 'l'); PutUnsafe(*os_, 's'); PutUnsafe(*os_, 'e'); + } + return true; + } + + bool WriteInt(int i) { + char buffer[11]; + const char* end = internal::i32toa(i, buffer); + PutReserve(*os_, static_cast(end - buffer)); + for (const char* p = buffer; p != end; ++p) + PutUnsafe(*os_, static_cast(*p)); + return true; + } + + bool WriteUint(unsigned u) { + char buffer[10]; + const char* end = internal::u32toa(u, buffer); + PutReserve(*os_, static_cast(end - buffer)); + for (const char* p = buffer; p != end; ++p) + PutUnsafe(*os_, static_cast(*p)); + return true; + } + + bool WriteInt64(int64_t i64) { + char buffer[21]; + const char* end = internal::i64toa(i64, buffer); + PutReserve(*os_, static_cast(end - buffer)); + for (const char* p = buffer; p != end; ++p) + PutUnsafe(*os_, static_cast(*p)); + return true; + } + + bool WriteUint64(uint64_t u64) { + char buffer[20]; + char* end = internal::u64toa(u64, buffer); + PutReserve(*os_, static_cast(end - buffer)); + for (char* p = buffer; p != end; ++p) + PutUnsafe(*os_, static_cast(*p)); + return true; + } + + bool WriteDouble(double d) { + if (internal::Double(d).IsNanOrInf()) { + if (!(writeFlags & kWriteNanAndInfFlag)) + return false; + if (internal::Double(d).IsNan()) { + PutReserve(*os_, 3); + PutUnsafe(*os_, 'N'); PutUnsafe(*os_, 'a'); PutUnsafe(*os_, 'N'); + return true; + } + if (internal::Double(d).Sign()) { + PutReserve(*os_, 9); + PutUnsafe(*os_, '-'); + } + else + PutReserve(*os_, 8); + PutUnsafe(*os_, 'I'); PutUnsafe(*os_, 'n'); PutUnsafe(*os_, 'f'); + PutUnsafe(*os_, 'i'); PutUnsafe(*os_, 'n'); PutUnsafe(*os_, 'i'); PutUnsafe(*os_, 't'); PutUnsafe(*os_, 'y'); + return true; + } + + char buffer[25]; + char* end = internal::dtoa(d, buffer, maxDecimalPlaces_); + PutReserve(*os_, static_cast(end - buffer)); + for (char* p = buffer; p != end; ++p) + PutUnsafe(*os_, static_cast(*p)); + return true; + } + + bool WriteString(const Ch* str, SizeType length) { + static const typename OutputStream::Ch hexDigits[16] = { '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'A', 'B', 'C', 'D', 'E', 'F' }; + static const char escape[256] = { +#define Z16 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0 + //0 1 2 3 4 5 6 7 8 9 A B C D E F + 'u', 'u', 'u', 'u', 'u', 'u', 'u', 'u', 'b', 't', 'n', 'u', 'f', 'r', 'u', 'u', // 00 + 'u', 'u', 'u', 'u', 'u', 'u', 'u', 'u', 'u', 'u', 'u', 'u', 'u', 'u', 'u', 'u', // 10 + 0, 0, '"', 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 20 + Z16, Z16, // 30~4F + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,'\\', 0, 0, 0, // 50 + Z16, Z16, Z16, Z16, Z16, Z16, Z16, Z16, Z16, Z16 // 60~FF +#undef Z16 + }; + + if (TargetEncoding::supportUnicode) + PutReserve(*os_, 2 + length * 6); // "\uxxxx..." + else + PutReserve(*os_, 2 + length * 12); // "\uxxxx\uyyyy..." + + PutUnsafe(*os_, '\"'); + GenericStringStream is(str); + while (ScanWriteUnescapedString(is, length)) { + const Ch c = is.Peek(); + if (!TargetEncoding::supportUnicode && static_cast(c) >= 0x80) { + // Unicode escaping + unsigned codepoint; + if (RAPIDJSON_UNLIKELY(!SourceEncoding::Decode(is, &codepoint))) + return false; + PutUnsafe(*os_, '\\'); + PutUnsafe(*os_, 'u'); + if (codepoint <= 0xD7FF || (codepoint >= 0xE000 && codepoint <= 0xFFFF)) { + PutUnsafe(*os_, hexDigits[(codepoint >> 12) & 15]); + PutUnsafe(*os_, hexDigits[(codepoint >> 8) & 15]); + PutUnsafe(*os_, hexDigits[(codepoint >> 4) & 15]); + PutUnsafe(*os_, hexDigits[(codepoint ) & 15]); + } + else { + RAPIDJSON_ASSERT(codepoint >= 0x010000 && codepoint <= 0x10FFFF); + // Surrogate pair + unsigned s = codepoint - 0x010000; + unsigned lead = (s >> 10) + 0xD800; + unsigned trail = (s & 0x3FF) + 0xDC00; + PutUnsafe(*os_, hexDigits[(lead >> 12) & 15]); + PutUnsafe(*os_, hexDigits[(lead >> 8) & 15]); + PutUnsafe(*os_, hexDigits[(lead >> 4) & 15]); + PutUnsafe(*os_, hexDigits[(lead ) & 15]); + PutUnsafe(*os_, '\\'); + PutUnsafe(*os_, 'u'); + PutUnsafe(*os_, hexDigits[(trail >> 12) & 15]); + PutUnsafe(*os_, hexDigits[(trail >> 8) & 15]); + PutUnsafe(*os_, hexDigits[(trail >> 4) & 15]); + PutUnsafe(*os_, hexDigits[(trail ) & 15]); + } + } + else if ((sizeof(Ch) == 1 || static_cast(c) < 256) && RAPIDJSON_UNLIKELY(escape[static_cast(c)])) { + is.Take(); + PutUnsafe(*os_, '\\'); + PutUnsafe(*os_, static_cast(escape[static_cast(c)])); + if (escape[static_cast(c)] == 'u') { + PutUnsafe(*os_, '0'); + PutUnsafe(*os_, '0'); + PutUnsafe(*os_, hexDigits[static_cast(c) >> 4]); + PutUnsafe(*os_, hexDigits[static_cast(c) & 0xF]); + } + } + else if (RAPIDJSON_UNLIKELY(!(writeFlags & kWriteValidateEncodingFlag ? + Transcoder::Validate(is, *os_) : + Transcoder::TranscodeUnsafe(is, *os_)))) + return false; + } + PutUnsafe(*os_, '\"'); + return true; + } + + bool ScanWriteUnescapedString(GenericStringStream& is, size_t length) { + return RAPIDJSON_LIKELY(is.Tell() < length); + } + + bool WriteStartObject() { os_->Put('{'); return true; } + bool WriteEndObject() { os_->Put('}'); return true; } + bool WriteStartArray() { os_->Put('['); return true; } + bool WriteEndArray() { os_->Put(']'); return true; } + + bool WriteRawValue(const Ch* json, size_t length) { + PutReserve(*os_, length); + GenericStringStream is(json); + while (RAPIDJSON_LIKELY(is.Tell() < length)) { + RAPIDJSON_ASSERT(is.Peek() != '\0'); + if (RAPIDJSON_UNLIKELY(!(writeFlags & kWriteValidateEncodingFlag ? + Transcoder::Validate(is, *os_) : + Transcoder::TranscodeUnsafe(is, *os_)))) + return false; + } + return true; + } + + void Prefix(Type type) { + (void)type; + if (RAPIDJSON_LIKELY(level_stack_.GetSize() != 0)) { // this value is not at root + Level* level = level_stack_.template Top(); + if (level->valueCount > 0) { + if (level->inArray) + os_->Put(','); // add comma if it is not the first element in array + else // in object + os_->Put((level->valueCount % 2 == 0) ? ',' : ':'); + } + if (!level->inArray && level->valueCount % 2 == 0) + RAPIDJSON_ASSERT(type == kStringType); // if it's in object, then even number should be a name + level->valueCount++; + } + else { + RAPIDJSON_ASSERT(!hasRoot_); // Should only has one and only one root. + hasRoot_ = true; + } + } + + // Flush the value if it is the top level one. + bool EndValue(bool ret) { + if (RAPIDJSON_UNLIKELY(level_stack_.Empty())) // end of json text + Flush(); + return ret; + } + + OutputStream* os_; + internal::Stack level_stack_; + int maxDecimalPlaces_; + bool hasRoot_; + +private: + // Prohibit copy constructor & assignment operator. + Writer(const Writer&); + Writer& operator=(const Writer&); +}; + +// Full specialization for StringStream to prevent memory copying + +template<> +inline bool Writer::WriteInt(int i) { + char *buffer = os_->Push(11); + const char* end = internal::i32toa(i, buffer); + os_->Pop(static_cast(11 - (end - buffer))); + return true; +} + +template<> +inline bool Writer::WriteUint(unsigned u) { + char *buffer = os_->Push(10); + const char* end = internal::u32toa(u, buffer); + os_->Pop(static_cast(10 - (end - buffer))); + return true; +} + +template<> +inline bool Writer::WriteInt64(int64_t i64) { + char *buffer = os_->Push(21); + const char* end = internal::i64toa(i64, buffer); + os_->Pop(static_cast(21 - (end - buffer))); + return true; +} + +template<> +inline bool Writer::WriteUint64(uint64_t u) { + char *buffer = os_->Push(20); + const char* end = internal::u64toa(u, buffer); + os_->Pop(static_cast(20 - (end - buffer))); + return true; +} + +template<> +inline bool Writer::WriteDouble(double d) { + if (internal::Double(d).IsNanOrInf()) { + // Note: This code path can only be reached if (RAPIDJSON_WRITE_DEFAULT_FLAGS & kWriteNanAndInfFlag). + if (!(kWriteDefaultFlags & kWriteNanAndInfFlag)) + return false; + if (internal::Double(d).IsNan()) { + PutReserve(*os_, 3); + PutUnsafe(*os_, 'N'); PutUnsafe(*os_, 'a'); PutUnsafe(*os_, 'N'); + return true; + } + if (internal::Double(d).Sign()) { + PutReserve(*os_, 9); + PutUnsafe(*os_, '-'); + } + else + PutReserve(*os_, 8); + PutUnsafe(*os_, 'I'); PutUnsafe(*os_, 'n'); PutUnsafe(*os_, 'f'); + PutUnsafe(*os_, 'i'); PutUnsafe(*os_, 'n'); PutUnsafe(*os_, 'i'); PutUnsafe(*os_, 't'); PutUnsafe(*os_, 'y'); + return true; + } + + char *buffer = os_->Push(25); + char* end = internal::dtoa(d, buffer, maxDecimalPlaces_); + os_->Pop(static_cast(25 - (end - buffer))); + return true; +} + +#if defined(RAPIDJSON_SSE2) || defined(RAPIDJSON_SSE42) +template<> +inline bool Writer::ScanWriteUnescapedString(StringStream& is, size_t length) { + if (length < 16) + return RAPIDJSON_LIKELY(is.Tell() < length); + + if (!RAPIDJSON_LIKELY(is.Tell() < length)) + return false; + + const char* p = is.src_; + const char* end = is.head_ + length; + const char* nextAligned = reinterpret_cast((reinterpret_cast(p) + 15) & static_cast(~15)); + const char* endAligned = reinterpret_cast(reinterpret_cast(end) & static_cast(~15)); + if (nextAligned > end) + return true; + + while (p != nextAligned) + if (*p < 0x20 || *p == '\"' || *p == '\\') { + is.src_ = p; + return RAPIDJSON_LIKELY(is.Tell() < length); + } + else + os_->PutUnsafe(*p++); + + // The rest of string using SIMD + static const char dquote[16] = { '\"', '\"', '\"', '\"', '\"', '\"', '\"', '\"', '\"', '\"', '\"', '\"', '\"', '\"', '\"', '\"' }; + static const char bslash[16] = { '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\' }; + static const char space[16] = { 0x1F, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F }; + const __m128i dq = _mm_loadu_si128(reinterpret_cast(&dquote[0])); + const __m128i bs = _mm_loadu_si128(reinterpret_cast(&bslash[0])); + const __m128i sp = _mm_loadu_si128(reinterpret_cast(&space[0])); + + for (; p != endAligned; p += 16) { + const __m128i s = _mm_load_si128(reinterpret_cast(p)); + const __m128i t1 = _mm_cmpeq_epi8(s, dq); + const __m128i t2 = _mm_cmpeq_epi8(s, bs); + const __m128i t3 = _mm_cmpeq_epi8(_mm_max_epu8(s, sp), sp); // s < 0x20 <=> max(s, 0x1F) == 0x1F + const __m128i x = _mm_or_si128(_mm_or_si128(t1, t2), t3); + unsigned short r = static_cast(_mm_movemask_epi8(x)); + if (RAPIDJSON_UNLIKELY(r != 0)) { // some of characters is escaped + SizeType len; +#ifdef _MSC_VER // Find the index of first escaped + unsigned long offset; + _BitScanForward(&offset, r); + len = offset; +#else + len = static_cast(__builtin_ffs(r) - 1); +#endif + char* q = reinterpret_cast(os_->PushUnsafe(len)); + for (size_t i = 0; i < len; i++) + q[i] = p[i]; + + p += len; + break; + } + _mm_storeu_si128(reinterpret_cast<__m128i *>(os_->PushUnsafe(16)), s); + } + + is.src_ = p; + return RAPIDJSON_LIKELY(is.Tell() < length); +} +#elif defined(RAPIDJSON_NEON) +template<> +inline bool Writer::ScanWriteUnescapedString(StringStream& is, size_t length) { + if (length < 16) + return RAPIDJSON_LIKELY(is.Tell() < length); + + if (!RAPIDJSON_LIKELY(is.Tell() < length)) + return false; + + const char* p = is.src_; + const char* end = is.head_ + length; + const char* nextAligned = reinterpret_cast((reinterpret_cast(p) + 15) & static_cast(~15)); + const char* endAligned = reinterpret_cast(reinterpret_cast(end) & static_cast(~15)); + if (nextAligned > end) + return true; + + while (p != nextAligned) + if (*p < 0x20 || *p == '\"' || *p == '\\') { + is.src_ = p; + return RAPIDJSON_LIKELY(is.Tell() < length); + } + else + os_->PutUnsafe(*p++); + + // The rest of string using SIMD + const uint8x16_t s0 = vmovq_n_u8('"'); + const uint8x16_t s1 = vmovq_n_u8('\\'); + const uint8x16_t s2 = vmovq_n_u8('\b'); + const uint8x16_t s3 = vmovq_n_u8(32); + + for (; p != endAligned; p += 16) { + const uint8x16_t s = vld1q_u8(reinterpret_cast(p)); + uint8x16_t x = vceqq_u8(s, s0); + x = vorrq_u8(x, vceqq_u8(s, s1)); + x = vorrq_u8(x, vceqq_u8(s, s2)); + x = vorrq_u8(x, vcltq_u8(s, s3)); + + x = vrev64q_u8(x); // Rev in 64 + uint64_t low = vgetq_lane_u64(vreinterpretq_u64_u8(x), 0); // extract + uint64_t high = vgetq_lane_u64(vreinterpretq_u64_u8(x), 1); // extract + + SizeType len = 0; + bool escaped = false; + if (low == 0) { + if (high != 0) { + uint32_t lz = internal::clzll(high); + len = 8 + (lz >> 3); + escaped = true; + } + } else { + uint32_t lz = internal::clzll(low); + len = lz >> 3; + escaped = true; + } + if (RAPIDJSON_UNLIKELY(escaped)) { // some of characters is escaped + char* q = reinterpret_cast(os_->PushUnsafe(len)); + for (size_t i = 0; i < len; i++) + q[i] = p[i]; + + p += len; + break; + } + vst1q_u8(reinterpret_cast(os_->PushUnsafe(16)), s); + } + + is.src_ = p; + return RAPIDJSON_LIKELY(is.Tell() < length); +} +#endif // RAPIDJSON_NEON + +RAPIDJSON_NAMESPACE_END + +#if defined(_MSC_VER) || defined(__clang__) +RAPIDJSON_DIAG_POP +#endif + +#endif // RAPIDJSON_RAPIDJSON_H_ diff --git a/vendor/github.com/Benau/go_rlottie/rlottie.h b/vendor/github.com/Benau/go_rlottie/rlottie.h new file mode 100644 index 00000000..67565691 --- /dev/null +++ b/vendor/github.com/Benau/go_rlottie/rlottie.h @@ -0,0 +1,525 @@ +/* + * Copyright (c) 2020 Samsung Electronics Co., Ltd. All rights reserved. + + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef _RLOTTIE_H_ +#define _RLOTTIE_H_ + +#include +#include +#include + +#if defined _WIN32 || defined __CYGWIN__ + #ifdef RLOTTIE_BUILD + #define RLOTTIE_API __declspec(dllexport) + #else + #define RLOTTIE_API __declspec(dllimport) + #endif +#else + #ifdef RLOTTIE_BUILD + #define RLOTTIE_API __attribute__ ((visibility ("default"))) + #else + #define RLOTTIE_API + #endif +#endif + +class AnimationImpl; +struct LOTNode; +struct LOTLayerNode; + +namespace rlottie { + +/** + * @brief Configures rlottie model cache policy. + * + * Provides Library level control to configure model cache + * policy. Setting it to 0 will disable + * the cache as well as flush all the previously cached content. + * + * @param[in] cacheSize Maximum Model Cache size. + * + * @note to disable Caching configure with 0 size. + * @note to flush the current Cache content configure it with 0 and + * then reconfigure with the new size. + * + * @internal + */ +RLOTTIE_API void configureModelCacheSize(size_t cacheSize); + +struct Color { + Color() = default; + Color(float r, float g , float b):_r(r), _g(g), _b(b){} + float r() const {return _r;} + float g() const {return _g;} + float b() const {return _b;} +private: + float _r{0}; + float _g{0}; + float _b{0}; +}; + +struct Size { + Size() = default; + Size(float w, float h):_w(w), _h(h){} + float w() const {return _w;} + float h() const {return _h;} +private: + float _w{0}; + float _h{0}; +}; + +struct Point { + Point() = default; + Point(float x, float y):_x(x), _y(y){} + float x() const {return _x;} + float y() const {return _y;} +private: + float _x{0}; + float _y{0}; +}; + +struct FrameInfo { + explicit FrameInfo(uint32_t frame): _frameNo(frame){} + uint32_t curFrame() const {return _frameNo;} +private: + uint32_t _frameNo; +}; + +enum class Property { + FillColor, /*!< Color property of Fill object , value type is rlottie::Color */ + FillOpacity, /*!< Opacity property of Fill object , value type is float [ 0 .. 100] */ + StrokeColor, /*!< Color property of Stroke object , value type is rlottie::Color */ + StrokeOpacity, /*!< Opacity property of Stroke object , value type is float [ 0 .. 100] */ + StrokeWidth, /*!< stroke width property of Stroke object , value type is float */ + TrAnchor, /*!< Transform Anchor property of Layer and Group object , value type is rlottie::Point */ + TrPosition, /*!< Transform Position property of Layer and Group object , value type is rlottie::Point */ + TrScale, /*!< Transform Scale property of Layer and Group object , value type is rlottie::Size. range[0 ..100] */ + TrRotation, /*!< Transform Rotation property of Layer and Group object , value type is float. range[0 .. 360] in degrees*/ + TrOpacity /*!< Transform Opacity property of Layer and Group object , value type is float [ 0 .. 100] */ +}; + +struct Color_Type{}; +struct Point_Type{}; +struct Size_Type{}; +struct Float_Type{}; +template struct MapType; + +class RLOTTIE_API Surface { +public: + /** + * @brief Surface object constructor. + * + * @param[in] buffer surface buffer. + * @param[in] width surface width. + * @param[in] height surface height. + * @param[in] bytesPerLine number of bytes in a surface scanline. + * + * @note Default surface format is ARGB32_Premultiplied. + * + * @internal + */ + Surface(uint32_t *buffer, size_t width, size_t height, size_t bytesPerLine); + + /** + * @brief Sets the Draw Area available on the Surface. + * + * Lottie will use the draw region size to generate frame image + * and will update only the draw rgion of the surface. + * + * @param[in] x region area x position. + * @param[in] y region area y position. + * @param[in] width region area width. + * @param[in] height region area height. + * + * @note Default surface format is ARGB32_Premultiplied. + * @note Default draw region area is [ 0 , 0, surface width , surface height] + * + * @internal + */ + void setDrawRegion(size_t x, size_t y, size_t width, size_t height); + + /** + * @brief Returns width of the surface. + * + * @return surface width + * + * @internal + * + */ + size_t width() const {return mWidth;} + + /** + * @brief Returns height of the surface. + * + * @return surface height + * + * @internal + */ + size_t height() const {return mHeight;} + + /** + * @brief Returns number of bytes in the surface scanline. + * + * @return number of bytes in scanline. + * + * @internal + */ + size_t bytesPerLine() const {return mBytesPerLine;} + + /** + * @brief Returns buffer attached tp the surface. + * + * @return buffer attaced to the Surface. + * + * @internal + */ + uint32_t *buffer() const {return mBuffer;} + + /** + * @brief Returns drawable area width of the surface. + * + * @return drawable area width + * + * @note Default value is width() of the surface + * + * @internal + * + */ + size_t drawRegionWidth() const {return mDrawArea.w;} + + /** + * @brief Returns drawable area height of the surface. + * + * @return drawable area height + * + * @note Default value is height() of the surface + * + * @internal + */ + size_t drawRegionHeight() const {return mDrawArea.h;} + + /** + * @brief Returns drawable area's x position of the surface. + * + * @return drawable area's x potition. + * + * @note Default value is 0 + * + * @internal + */ + size_t drawRegionPosX() const {return mDrawArea.x;} + + /** + * @brief Returns drawable area's y position of the surface. + * + * @return drawable area's y potition. + * + * @note Default value is 0 + * + * @internal + */ + size_t drawRegionPosY() const {return mDrawArea.y;} + + /** + * @brief Default constructor. + */ + Surface() = default; +private: + uint32_t *mBuffer{nullptr}; + size_t mWidth{0}; + size_t mHeight{0}; + size_t mBytesPerLine{0}; + struct { + size_t x{0}; + size_t y{0}; + size_t w{0}; + size_t h{0}; + }mDrawArea; +}; + +using MarkerList = std::vector>; +/** + * @brief https://helpx.adobe.com/after-effects/using/layer-markers-composition-markers.html + * Markers exported form AE are used to describe a segmnet of an animation {comment/tag , startFrame, endFrame} + * Marker can be use to devide a resource in to separate animations by tagging the segment with comment string , + * start frame and duration of that segment. + */ + +using LayerInfoList = std::vector>; + + +using ColorFilter = std::function; + +class RLOTTIE_API Animation { +public: + + /** + * @brief Constructs an animation object from file path. + * + * @param[in] path Lottie resource file path + * @param[in] cachePolicy whether to cache or not the model data. + * use only when need to explicit disabl caching for a + * particular resource. To disable caching at library level + * use @see configureModelCacheSize() instead. + * + * @return Animation object that can render the contents of the + * Lottie resource represented by file path. + * + * @internal + */ + static std::unique_ptr + loadFromFile(const std::string &path, bool cachePolicy=true); + + /** + * @brief Constructs an animation object from JSON string data. + * + * @param[in] jsonData The JSON string data. + * @param[in] key the string that will be used to cache the JSON string data. + * @param[in] resourcePath the path will be used to search for external resource. + * @param[in] cachePolicy whether to cache or not the model data. + * use only when need to explicit disabl caching for a + * particular resource. To disable caching at library level + * use @see configureModelCacheSize() instead. + * + * @return Animation object that can render the contents of the + * Lottie resource represented by JSON string data. + * + * @internal + */ + static std::unique_ptr + loadFromData(std::string jsonData, const std::string &key, + const std::string &resourcePath="", bool cachePolicy=true); + + /** + * @brief Constructs an animation object from JSON string data and update. + * the color properties using ColorFilter. + + * @param[in] jsonData The JSON string data. + * @param[in] resourcePath the path will be used to search for external resource. + * @param[in] filter The color filter that will be applied for each color property + * found during parsing. + + * @return Animation object that can render the contents of the + * Lottie resource represented by JSON string data. + * + * @internal + */ + static std::unique_ptr + loadFromData(std::string jsonData, std::string resourcePath, ColorFilter filter); + + /** + * @brief Returns default framerate of the Lottie resource. + * + * @return framerate of the Lottie resource + * + * @internal + * + */ + double frameRate() const; + + /** + * @brief Returns total number of frames present in the Lottie resource. + * + * @return frame count of the Lottie resource. + * + * @note frame number starts with 0. + * + * @internal + */ + size_t totalFrame() const; + + /** + * @brief Returns default viewport size of the Lottie resource. + * + * @param[out] width default width of the viewport. + * @param[out] height default height of the viewport. + * + * @internal + * + */ + void size(size_t &width, size_t &height) const; + + /** + * @brief Returns total animation duration of Lottie resource in second. + * it uses totalFrame() and frameRate() to calculate the duration. + * duration = totalFrame() / frameRate(). + * + * @return total animation duration in second. + * @retval 0 if the Lottie resource has no animation. + * + * @see totalFrame() + * @see frameRate() + * + * @internal + */ + double duration() const; + + /** + * @brief Returns frame number for a given position. + * this function helps to map the position value retuned + * by the animator to a frame number in side the Lottie resource. + * frame_number = lerp(start_frame, endframe, pos); + * + * @param[in] pos normalized position value [0 ... 1] + * + * @return frame numer maps to the position value [startFrame .... endFrame] + * + * @internal + */ + size_t frameAtPos(double pos); + + /** + * @brief Renders the content to surface Asynchronously. + * it gives a future in return to get the result of the + * rendering at a future point. + * To get best performance user has to start rendering as soon as + * it finds that content at {frameNo} has to be rendered and get the + * result from the future at the last moment when the surface is needed + * to draw into the screen. + * + * + * @param[in] frameNo Content corresponds to the @p frameNo needs to be drawn + * @param[in] surface Surface in which content will be drawn + * @param[in] keepAspectRatio whether to keep the aspect ratio while scaling the content. + * + * @return future that will hold the result when rendering finished. + * + * for Synchronus rendering @see renderSync + * + * @see Surface + * @internal + */ + std::future render(size_t frameNo, Surface surface, bool keepAspectRatio=true); + + /** + * @brief Renders the content to surface synchronously. + * for performance use the async rendering @see render + * + * @param[in] frameNo Content corresponds to the @p frameNo needs to be drawn + * @param[in] surface Surface in which content will be drawn + * @param[in] keepAspectRatio whether to keep the aspect ratio while scaling the content. + * + * @internal + */ + void renderSync(size_t frameNo, Surface surface, bool keepAspectRatio=true); + + /** + * @brief Returns root layer of the composition updated with + * content of the Lottie resource at frame number @p frameNo. + * + * @param[in] frameNo Content corresponds to the @p frameNo needs to be extracted. + * @param[in] width content viewbox width + * @param[in] height content viewbox height + * + * @return Root layer node. + * + * @internal + */ + const LOTLayerNode * renderTree(size_t frameNo, size_t width, size_t height) const; + + /** + * @brief Returns Composition Markers. + * + * + * @return returns MarkerList of the Composition. + * + * @see MarkerList + * @internal + */ + const MarkerList& markers() const; + + /** + * @brief Returns Layer information{name, inFrame, outFrame} of all the child layers of the composition. + * + * + * @return List of Layer Information of the Composition. + * + * @see LayerInfoList + * @internal + */ + const LayerInfoList& layers() const; + + /** + * @brief Sets property value for the specified {@link KeyPath}. This {@link KeyPath} can resolve + * to multiple contents. In that case, the callback's value will apply to all of them. + * + * Keypath should conatin object names separated by (.) and can handle globe(**) or wildchar(*). + * + * @usage + * To change fillcolor property of fill1 object in the layer1->group1->fill1 hirarchy to RED color + * + * player->setValue("layer1.group1.fill1", rlottie::Color(1, 0, 0); + * + * if all the color property inside group1 needs to be changed to GREEN color + * + * player->setValue("**.group1.**", rlottie::Color(0, 1, 0); + * + * @internal + */ + template + void setValue(const std::string &keypath, AnyValue value) + { + setValue(MapType>{}, prop, keypath, value); + } + + /** + * @brief default destructor + * + * @internal + */ + ~Animation(); + +private: + void setValue(Color_Type, Property, const std::string &, Color); + void setValue(Float_Type, Property, const std::string &, float); + void setValue(Size_Type, Property, const std::string &, Size); + void setValue(Point_Type, Property, const std::string &, Point); + + void setValue(Color_Type, Property, const std::string &, std::function &&); + void setValue(Float_Type, Property, const std::string &, std::function &&); + void setValue(Size_Type, Property, const std::string &, std::function &&); + void setValue(Point_Type, Property, const std::string &, std::function &&); + /** + * @brief default constructor + * + * @internal + */ + Animation(); + + std::unique_ptr d; +}; + +//Map Property to Value type +template<> struct MapType>: Color_Type{}; +template<> struct MapType>: Color_Type{}; +template<> struct MapType>: Float_Type{}; +template<> struct MapType>: Float_Type{}; +template<> struct MapType>: Float_Type{}; +template<> struct MapType>: Float_Type{}; +template<> struct MapType>: Float_Type{}; +template<> struct MapType>: Point_Type{}; +template<> struct MapType>: Point_Type{}; +template<> struct MapType>: Size_Type{}; + + +} // namespace lotplayer + +#endif // _RLOTTIE_H_ diff --git a/vendor/github.com/Benau/go_rlottie/rlottie_capi.h b/vendor/github.com/Benau/go_rlottie/rlottie_capi.h new file mode 100644 index 00000000..7c883fac --- /dev/null +++ b/vendor/github.com/Benau/go_rlottie/rlottie_capi.h @@ -0,0 +1,299 @@ +/* + * Copyright (c) 2020 Samsung Electronics Co., Ltd. All rights reserved. + + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef _RLOTTIE_CAPI_H_ +#define _RLOTTIE_CAPI_H_ + +#include +#include +#include "rlottiecommon.h" + +#ifdef __cplusplus +extern "C" { +#endif + +typedef enum { + LOTTIE_ANIMATION_PROPERTY_FILLCOLOR, /*!< Color property of Fill object , value type is float [0 ... 1] */ + LOTTIE_ANIMATION_PROPERTY_FILLOPACITY, /*!< Opacity property of Fill object , value type is float [ 0 .. 100] */ + LOTTIE_ANIMATION_PROPERTY_STROKECOLOR, /*!< Color property of Stroke object , value type is float [0 ... 1] */ + LOTTIE_ANIMATION_PROPERTY_STROKEOPACITY, /*!< Opacity property of Stroke object , value type is float [ 0 .. 100] */ + LOTTIE_ANIMATION_PROPERTY_STROKEWIDTH, /*!< stroke with property of Stroke object , value type is float */ + LOTTIE_ANIMATION_PROPERTY_TR_ANCHOR, /*!< Transform Anchor property of Layer and Group object , value type is int */ + LOTTIE_ANIMATION_PROPERTY_TR_POSITION, /*!< Transform Position property of Layer and Group object , value type is int */ + LOTTIE_ANIMATION_PROPERTY_TR_SCALE, /*!< Transform Scale property of Layer and Group object , value type is float range[0 ..100] */ + LOTTIE_ANIMATION_PROPERTY_TR_ROTATION, /*!< Transform Scale property of Layer and Group object , value type is float. range[0 .. 360] in degrees*/ + LOTTIE_ANIMATION_PROPERTY_TR_OPACITY /*!< Transform Opacity property of Layer and Group object , value type is float [ 0 .. 100] */ +}Lottie_Animation_Property; + +typedef struct Lottie_Animation_S Lottie_Animation; + +/** + * @brief Constructs an animation object from file path. + * + * @param[in] path Lottie resource file path + * + * @return Animation object that can build the contents of the + * Lottie resource represented by file path. + * + * @see lottie_animation_destroy() + * + * @ingroup Lottie_Animation + * @internal + */ +RLOTTIE_API Lottie_Animation *lottie_animation_from_file(const char *path); + +/** + * @brief Constructs an animation object from JSON string data. + * + * @param[in] data The JSON string data. + * @param[in] key the string that will be used to cache the JSON string data. + * @param[in] resource_path the path that will be used to load external resource needed by the JSON data. + * + * @return Animation object that can build the contents of the + * Lottie resource represented by JSON string data. + * + * @ingroup Lottie_Animation + * @internal + */ +RLOTTIE_API Lottie_Animation *lottie_animation_from_data(const char *data, const char *key, const char *resource_path); + +/** + * @brief Free given Animation object resource. + * + * @param[in] animation Animation object to free. + * + * @see lottie_animation_from_file() + * @see lottie_animation_from_data() + * + * @ingroup Lottie_Animation + * @internal + */ +RLOTTIE_API void lottie_animation_destroy(Lottie_Animation *animation); + +/** + * @brief Returns default viewport size of the Lottie resource. + * + * @param[in] animation Animation object. + * @param[out] w default width of the viewport. + * @param[out] h default height of the viewport. + * + * @ingroup Lottie_Animation + * @internal + */ +RLOTTIE_API void lottie_animation_get_size(const Lottie_Animation *animation, size_t *width, size_t *height); + +/** + * @brief Returns total animation duration of Lottie resource in second. + * it uses totalFrame() and frameRate() to calculate the duration. + * duration = totalFrame() / frameRate(). + * + * @param[in] animation Animation object. + * + * @return total animation duration in second. + * @c 0 if the Lottie resource has no animation. + * + * @see lottie_animation_get_totalframe() + * @see lottie_animation_get_framerate() + * + * @ingroup Lottie_Animation + * @internal + */ +RLOTTIE_API double lottie_animation_get_duration(const Lottie_Animation *animation); + +/** + * @brief Returns total number of frames present in the Lottie resource. + * + * @param[in] animation Animation object. + * + * @return frame count of the Lottie resource.* + * + * @note frame number starts with 0. + * + * @see lottie_animation_get_duration() + * @see lottie_animation_get_framerate() + * + * @ingroup Lottie_Animation + * @internal + */ +RLOTTIE_API size_t lottie_animation_get_totalframe(const Lottie_Animation *animation); + +/** + * @brief Returns default framerate of the Lottie resource. + * + * @param[in] animation Animation object. + * + * @return framerate of the Lottie resource + * + * @ingroup Lottie_Animation + * @internal + * + */ +RLOTTIE_API double lottie_animation_get_framerate(const Lottie_Animation *animation); + +/** + * @brief Get the render tree which contains the snapshot of the animation object + * at frame = @c frame_num, the content of the animation in that frame number. + * + * @param[in] animation Animation object. + * @param[in] frame_num Content corresponds to the @p frame_num needs to be drawn + * @param[in] width requested snapshot viewport width. + * @param[in] height requested snapshot viewport height. + * + * @return Animation snapshot tree. + * + * @note: User has to traverse the tree for rendering. + * + * @see LOTLayerNode + * @see LOTNode + * + * @ingroup Lottie_Animation + * @internal + */ +RLOTTIE_API const LOTLayerNode *lottie_animation_render_tree(Lottie_Animation *animation, size_t frame_num, size_t width, size_t height); + +/** + * @brief Maps position to frame number and returns it. + * + * @param[in] animation Animation object. + * @param[in] pos position in the range [ 0.0 .. 1.0 ]. + * + * @return mapped frame numbe in the range [ start_frame .. end_frame ]. + * @c 0 if the Lottie resource has no animation. + * + * + * @ingroup Lottie_Animation + * @internal + */ +RLOTTIE_API size_t lottie_animation_get_frame_at_pos(const Lottie_Animation *animation, float pos); + +/** + * @brief Request to render the content of the frame @p frame_num to buffer @p buffer. + * + * @param[in] animation Animation object. + * @param[in] frame_num the frame number needs to be rendered. + * @param[in] buffer surface buffer use for rendering. + * @param[in] width width of the surface + * @param[in] height height of the surface + * @param[in] bytes_per_line stride of the surface in bytes. + * + * + * @ingroup Lottie_Animation + * @internal + */ +RLOTTIE_API void lottie_animation_render(Lottie_Animation *animation, size_t frame_num, uint32_t *buffer, size_t width, size_t height, size_t bytes_per_line); + +/** + * @brief Request to render the content of the frame @p frame_num to buffer @p buffer asynchronously. + * + * @param[in] animation Animation object. + * @param[in] frame_num the frame number needs to be rendered. + * @param[in] buffer surface buffer use for rendering. + * @param[in] width width of the surface + * @param[in] height height of the surface + * @param[in] bytes_per_line stride of the surface in bytes. + * + * @note user must call lottie_animation_render_flush() to make sure render is finished. + * + * @ingroup Lottie_Animation + * @internal + */ +RLOTTIE_API void lottie_animation_render_async(Lottie_Animation *animation, size_t frame_num, uint32_t *buffer, size_t width, size_t height, size_t bytes_per_line); + +/** + * @brief Request to finish the current async renderer job for this animation object. + * If render is finished then this call returns immidiately. + * If not, it waits till render job finish and then return. + * + * @param[in] animation Animation object. + * + * @warning User must call lottie_animation_render_async() and lottie_animation_render_flush() + * in pair to get the benefit of async rendering. + * + * @return the pixel buffer it finished rendering. + * + * @ingroup Lottie_Animation + * @internal + */ +RLOTTIE_API uint32_t *lottie_animation_render_flush(Lottie_Animation *animation); + + +/** + * @brief Request to change the properties of this animation object. + * Keypath should conatin object names separated by (.) and can handle globe(**) or wildchar(*) + * + * @usage + * To change fillcolor property of fill1 object in the layer1->group1->fill1 hirarchy to RED color + * + * lottie_animation_property_override(animation, LOTTIE_ANIMATION_PROPERTY_FILLCOLOR, "layer1.group1.fill1", 1.0, 0.0, 0.0); + * + * if all the color property inside group1 needs to be changed to GREEN color + * + * lottie_animation_property_override(animation, LOTTIE_ANIMATION_PROPERTY_FILLCOLOR, "**.group1.**", 1.0, 0.0, 0.0); + * + * @param[in] animation Animation object. + * @param[in] type Property type. (@p Lottie_Animation_Property) + * @param[in] keypath Specific content of target. + * @param[in] ... Property values. + * + * @ingroup Lottie_Animation + * @internal + * */ +RLOTTIE_API void lottie_animation_property_override(Lottie_Animation *animation, const Lottie_Animation_Property type, const char *keypath, ...); + + +/** + * @brief Returns list of markers in the Lottie resource + * @p LOTMarkerList has a @p LOTMarker list and size of list + * @p LOTMarker has the marker's name, start frame, and end frame. + * + * @param[in] animation Animation object. + * + * @return The list of marker. If there is no marker, return null. + * + * @ingroup Lottie_Animation + * @internal + * */ +RLOTTIE_API const LOTMarkerList* lottie_animation_get_markerlist(Lottie_Animation *animation); + +/** + * @brief Configures rlottie model cache policy. + * + * Provides Library level control to configure model cache + * policy. Setting it to 0 will disable + * the cache as well as flush all the previously cached content. + * + * @param[in] cacheSize Maximum Model Cache size. + * + * @note to disable Caching configure with 0 size. + * @note to flush the current Cache content configure it with 0 and + * then reconfigure with the new size. + * + * @internal + */ +RLOTTIE_API void lottie_configure_model_cache_size(size_t cacheSize); + +#ifdef __cplusplus +} +#endif + +#endif //_RLOTTIE_CAPI_H_ + diff --git a/vendor/github.com/Benau/go_rlottie/rlottiecommon.h b/vendor/github.com/Benau/go_rlottie/rlottiecommon.h new file mode 100644 index 00000000..784fbe28 --- /dev/null +++ b/vendor/github.com/Benau/go_rlottie/rlottiecommon.h @@ -0,0 +1,231 @@ +/* + * Copyright (c) 2020 Samsung Electronics Co., Ltd. All rights reserved. + + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef _RLOTTIE_COMMON_H_ +#define _RLOTTIE_COMMON_H_ + +#if defined _WIN32 || defined __CYGWIN__ + #ifdef RLOTTIE_BUILD + #define RLOTTIE_API __declspec(dllexport) + #else + #define RLOTTIE_API __declspec(dllimport) + #endif +#else + #ifdef RLOTTIE_BUILD + #define RLOTTIE_API __attribute__ ((visibility ("default"))) + #else + #define RLOTTIE_API + #endif +#endif + + +/** + * @defgroup Lottie_Animation Lottie_Animation + * + * Lottie Animation is a modern style vector based animation design. Its animation + * resource(within json format) could be generated by Adobe After Effect using + * bodymovin plugin. You can find a good examples in Lottie Community which + * shares many free resources(see: www.lottiefiles.com). + * + * This Lottie_Animation is a common engine to manipulate, control Lottie + * Animation from the Lottie resource - json file. It provides a scene-graph + * node tree per frames by user demand as well as rasterized frame images. + * + */ + +/** + * @ingroup Lottie_Animation + */ + +typedef enum +{ + BrushSolid = 0, + BrushGradient +} LOTBrushType; + +typedef enum +{ + FillEvenOdd = 0, + FillWinding +} LOTFillRule; + +typedef enum +{ + JoinMiter = 0, + JoinBevel, + JoinRound +} LOTJoinStyle; + +typedef enum +{ + CapFlat = 0, + CapSquare, + CapRound +} LOTCapStyle; + +typedef enum +{ + GradientLinear = 0, + GradientRadial +} LOTGradientType; + +typedef struct LOTGradientStop +{ + float pos; + unsigned char r, g, b, a; +} LOTGradientStop; + +typedef enum +{ + MaskAdd = 0, + MaskSubstract, + MaskIntersect, + MaskDifference +} LOTMaskType; + +typedef struct LOTMask { + struct { + const float *ptPtr; + size_t ptCount; + const char* elmPtr; + size_t elmCount; + } mPath; + LOTMaskType mMode; + unsigned char mAlpha; +}LOTMask; + +typedef enum +{ + MatteNone = 0, + MatteAlpha, + MatteAlphaInv, + MatteLuma, + MatteLumaInv +} LOTMatteType; + +typedef struct LOTMarker { + char *name; + size_t startframe; + size_t endframe; +} LOTMarker; + +typedef struct LOTMarkerList { + LOTMarker *ptr; + size_t size; +} LOTMarkerList; + +typedef struct LOTNode { + +#define ChangeFlagNone 0x0000 +#define ChangeFlagPath 0x0001 +#define ChangeFlagPaint 0x0010 +#define ChangeFlagAll (ChangeFlagPath & ChangeFlagPaint) + + struct { + const float *ptPtr; + size_t ptCount; + const char *elmPtr; + size_t elmCount; + } mPath; + + struct { + unsigned char r, g, b, a; + } mColor; + + struct { + unsigned char enable; + float width; + LOTCapStyle cap; + LOTJoinStyle join; + float miterLimit; + float *dashArray; + int dashArraySize; + } mStroke; + + struct { + LOTGradientType type; + LOTGradientStop *stopPtr; + size_t stopCount; + struct { + float x, y; + } start, end, center, focal; + float cradius; + float fradius; + } mGradient; + + struct { + unsigned char *data; + size_t width; + size_t height; + unsigned char mAlpha; + struct { + float m11; float m12; float m13; + float m21; float m22; float m23; + float m31; float m32; float m33; + } mMatrix; + } mImageInfo; + + int mFlag; + LOTBrushType mBrushType; + LOTFillRule mFillRule; + + const char *keypath; +} LOTNode; + + + +typedef struct LOTLayerNode { + + struct { + LOTMask *ptr; + size_t size; + } mMaskList; + + struct { + const float *ptPtr; + size_t ptCount; + const char *elmPtr; + size_t elmCount; + } mClipPath; + + struct { + struct LOTLayerNode **ptr; + size_t size; + } mLayerList; + + struct { + LOTNode **ptr; + size_t size; + } mNodeList; + + LOTMatteType mMatte; + int mVisible; + unsigned char mAlpha; + const char *keypath; + +} LOTLayerNode; + +/** + * @} + */ + +#endif // _RLOTTIE_COMMON_H_ diff --git a/vendor/github.com/Benau/go_rlottie/vector_freetype_v_ft_math.cpp b/vendor/github.com/Benau/go_rlottie/vector_freetype_v_ft_math.cpp new file mode 100644 index 00000000..932f7ed4 --- /dev/null +++ b/vendor/github.com/Benau/go_rlottie/vector_freetype_v_ft_math.cpp @@ -0,0 +1,461 @@ +/***************************************************************************/ +/* */ +/* fttrigon.c */ +/* */ +/* FreeType trigonometric functions (body). */ +/* */ +/* Copyright 2001-2005, 2012-2013 by */ +/* David Turner, Robert Wilhelm, and Werner Lemberg. */ +/* */ +/* This file is part of the FreeType project, and may only be used, */ +/* modified, and distributed under the terms of the FreeType project */ +/* license, LICENSE.TXT. By continuing to use, modify, or distribute */ +/* this file you indicate that you have read the license and */ +/* understand and accept it fully. */ +/* */ +/***************************************************************************/ + +#include "vector_freetype_v_ft_math.h" +#include + +//form https://github.com/chromium/chromium/blob/59afd8336009c9d97c22854c52e0382b62b3aa5e/third_party/abseil-cpp/absl/base/internal/bits.h + +#if defined(_MSC_VER) +#include +static unsigned int __inline clz(unsigned int x) { + unsigned long r = 0; + if (x != 0) + { + _BitScanReverse(&r, x); + } + return r; +} +#define SW_FT_MSB(x) (clz(x)) +#elif defined(__GNUC__) +#define SW_FT_MSB(x) (31 - __builtin_clz(x)) +#else +static unsigned int __inline clz(unsigned int x) { + int c = 31; + x &= ~x + 1; + if (n & 0x0000FFFF) c -= 16; + if (n & 0x00FF00FF) c -= 8; + if (n & 0x0F0F0F0F) c -= 4; + if (n & 0x33333333) c -= 2; + if (n & 0x55555555) c -= 1; + return c; +} +#define SW_FT_MSB(x) (clz(x)) +#endif + + + + + +#define SW_FT_PAD_FLOOR(x, n) ((x) & ~((n)-1)) +#define SW_FT_PAD_ROUND(x, n) SW_FT_PAD_FLOOR((x) + ((n) / 2), n) +#define SW_FT_PAD_CEIL(x, n) SW_FT_PAD_FLOOR((x) + ((n)-1), n) + +#define SW_FT_BEGIN_STMNT do { +#define SW_FT_END_STMNT \ + } \ + while (0) +/* transfer sign leaving a positive number */ +#define SW_FT_MOVE_SIGN(x, s) \ + SW_FT_BEGIN_STMNT \ + if (x < 0) { \ + x = -x; \ + s = -s; \ + } \ + SW_FT_END_STMNT + +SW_FT_Long SW_FT_MulFix(SW_FT_Long a, SW_FT_Long b) +{ + SW_FT_Int s = 1; + SW_FT_Long c; + + SW_FT_MOVE_SIGN(a, s); + SW_FT_MOVE_SIGN(b, s); + + c = (SW_FT_Long)(((SW_FT_Int64)a * b + 0x8000L) >> 16); + + return (s > 0) ? c : -c; +} + +SW_FT_Long SW_FT_MulDiv(SW_FT_Long a, SW_FT_Long b, SW_FT_Long c) +{ + SW_FT_Int s = 1; + SW_FT_Long d; + + SW_FT_MOVE_SIGN(a, s); + SW_FT_MOVE_SIGN(b, s); + SW_FT_MOVE_SIGN(c, s); + + d = (SW_FT_Long)(c > 0 ? ((SW_FT_Int64)a * b + (c >> 1)) / c : 0x7FFFFFFFL); + + return (s > 0) ? d : -d; +} + +SW_FT_Long SW_FT_DivFix(SW_FT_Long a, SW_FT_Long b) +{ + SW_FT_Int s = 1; + SW_FT_Long q; + + SW_FT_MOVE_SIGN(a, s); + SW_FT_MOVE_SIGN(b, s); + + q = (SW_FT_Long)(b > 0 ? (((SW_FT_UInt64)a << 16) + (b >> 1)) / b + : 0x7FFFFFFFL); + + return (s < 0 ? -q : q); +} + +/*************************************************************************/ +/* */ +/* This is a fixed-point CORDIC implementation of trigonometric */ +/* functions as well as transformations between Cartesian and polar */ +/* coordinates. The angles are represented as 16.16 fixed-point values */ +/* in degrees, i.e., the angular resolution is 2^-16 degrees. Note that */ +/* only vectors longer than 2^16*180/pi (or at least 22 bits) on a */ +/* discrete Cartesian grid can have the same or better angular */ +/* resolution. Therefore, to maintain this precision, some functions */ +/* require an interim upscaling of the vectors, whereas others operate */ +/* with 24-bit long vectors directly. */ +/* */ +/*************************************************************************/ + +/* the Cordic shrink factor 0.858785336480436 * 2^32 */ +#define SW_FT_TRIG_SCALE 0xDBD95B16UL + +/* the highest bit in overflow-safe vector components, */ +/* MSB of 0.858785336480436 * sqrt(0.5) * 2^30 */ +#define SW_FT_TRIG_SAFE_MSB 29 + +/* this table was generated for SW_FT_PI = 180L << 16, i.e. degrees */ +#define SW_FT_TRIG_MAX_ITERS 23 + +static const SW_FT_Fixed ft_trig_arctan_table[] = { + 1740967L, 919879L, 466945L, 234379L, 117304L, 58666L, 29335L, 14668L, + 7334L, 3667L, 1833L, 917L, 458L, 229L, 115L, 57L, + 29L, 14L, 7L, 4L, 2L, 1L}; + +/* multiply a given value by the CORDIC shrink factor */ +static SW_FT_Fixed ft_trig_downscale(SW_FT_Fixed val) +{ + SW_FT_Fixed s; + SW_FT_Int64 v; + + s = val; + val = SW_FT_ABS(val); + + v = (val * (SW_FT_Int64)SW_FT_TRIG_SCALE) + 0x100000000UL; + val = (SW_FT_Fixed)(v >> 32); + + return (s >= 0) ? val : -val; +} + +/* undefined and never called for zero vector */ +static SW_FT_Int ft_trig_prenorm(SW_FT_Vector* vec) +{ + SW_FT_Pos x, y; + SW_FT_Int shift; + + x = vec->x; + y = vec->y; + + shift = SW_FT_MSB(SW_FT_ABS(x) | SW_FT_ABS(y)); + + if (shift <= SW_FT_TRIG_SAFE_MSB) { + shift = SW_FT_TRIG_SAFE_MSB - shift; + vec->x = (SW_FT_Pos)((SW_FT_ULong)x << shift); + vec->y = (SW_FT_Pos)((SW_FT_ULong)y << shift); + } else { + shift -= SW_FT_TRIG_SAFE_MSB; + vec->x = x >> shift; + vec->y = y >> shift; + shift = -shift; + } + + return shift; +} + +static void ft_trig_pseudo_rotate(SW_FT_Vector* vec, SW_FT_Angle theta) +{ + SW_FT_Int i; + SW_FT_Fixed x, y, xtemp, b; + const SW_FT_Fixed* arctanptr; + + x = vec->x; + y = vec->y; + + /* Rotate inside [-PI/4,PI/4] sector */ + while (theta < -SW_FT_ANGLE_PI4) { + xtemp = y; + y = -x; + x = xtemp; + theta += SW_FT_ANGLE_PI2; + } + + while (theta > SW_FT_ANGLE_PI4) { + xtemp = -y; + y = x; + x = xtemp; + theta -= SW_FT_ANGLE_PI2; + } + + arctanptr = ft_trig_arctan_table; + + /* Pseudorotations, with right shifts */ + for (i = 1, b = 1; i < SW_FT_TRIG_MAX_ITERS; b <<= 1, i++) { + SW_FT_Fixed v1 = ((y + b) >> i); + SW_FT_Fixed v2 = ((x + b) >> i); + if (theta < 0) { + xtemp = x + v1; + y = y - v2; + x = xtemp; + theta += *arctanptr++; + } else { + xtemp = x - v1; + y = y + v2; + x = xtemp; + theta -= *arctanptr++; + } + } + + vec->x = x; + vec->y = y; +} + +static void ft_trig_pseudo_polarize(SW_FT_Vector* vec) +{ + SW_FT_Angle theta; + SW_FT_Int i; + SW_FT_Fixed x, y, xtemp, b; + const SW_FT_Fixed* arctanptr; + + x = vec->x; + y = vec->y; + + /* Get the vector into [-PI/4,PI/4] sector */ + if (y > x) { + if (y > -x) { + theta = SW_FT_ANGLE_PI2; + xtemp = y; + y = -x; + x = xtemp; + } else { + theta = y > 0 ? SW_FT_ANGLE_PI : -SW_FT_ANGLE_PI; + x = -x; + y = -y; + } + } else { + if (y < -x) { + theta = -SW_FT_ANGLE_PI2; + xtemp = -y; + y = x; + x = xtemp; + } else { + theta = 0; + } + } + + arctanptr = ft_trig_arctan_table; + + /* Pseudorotations, with right shifts */ + for (i = 1, b = 1; i < SW_FT_TRIG_MAX_ITERS; b <<= 1, i++) { + SW_FT_Fixed v1 = ((y + b) >> i); + SW_FT_Fixed v2 = ((x + b) >> i); + if (y > 0) { + xtemp = x + v1; + y = y - v2; + x = xtemp; + theta += *arctanptr++; + } else { + xtemp = x - v1; + y = y + v2; + x = xtemp; + theta -= *arctanptr++; + } + } + + /* round theta */ + if (theta >= 0) + theta = SW_FT_PAD_ROUND(theta, 32); + else + theta = -SW_FT_PAD_ROUND(-theta, 32); + + vec->x = x; + vec->y = theta; +} + +/* documentation is in fttrigon.h */ + +SW_FT_Fixed SW_FT_Cos(SW_FT_Angle angle) +{ + SW_FT_Vector v; + + v.x = SW_FT_TRIG_SCALE >> 8; + v.y = 0; + ft_trig_pseudo_rotate(&v, angle); + + return (v.x + 0x80L) >> 8; +} + +/* documentation is in fttrigon.h */ + +SW_FT_Fixed SW_FT_Sin(SW_FT_Angle angle) +{ + return SW_FT_Cos(SW_FT_ANGLE_PI2 - angle); +} + +/* documentation is in fttrigon.h */ + +SW_FT_Fixed SW_FT_Tan(SW_FT_Angle angle) +{ + SW_FT_Vector v; + + v.x = SW_FT_TRIG_SCALE >> 8; + v.y = 0; + ft_trig_pseudo_rotate(&v, angle); + + return SW_FT_DivFix(v.y, v.x); +} + +/* documentation is in fttrigon.h */ + +SW_FT_Angle SW_FT_Atan2(SW_FT_Fixed dx, SW_FT_Fixed dy) +{ + SW_FT_Vector v; + + if (dx == 0 && dy == 0) return 0; + + v.x = dx; + v.y = dy; + ft_trig_prenorm(&v); + ft_trig_pseudo_polarize(&v); + + return v.y; +} + +/* documentation is in fttrigon.h */ + +void SW_FT_Vector_Unit(SW_FT_Vector* vec, SW_FT_Angle angle) +{ + vec->x = SW_FT_TRIG_SCALE >> 8; + vec->y = 0; + ft_trig_pseudo_rotate(vec, angle); + vec->x = (vec->x + 0x80L) >> 8; + vec->y = (vec->y + 0x80L) >> 8; +} + +/* these macros return 0 for positive numbers, + and -1 for negative ones */ +#define SW_FT_SIGN_LONG(x) ((x) >> (SW_FT_SIZEOF_LONG * 8 - 1)) +#define SW_FT_SIGN_INT(x) ((x) >> (SW_FT_SIZEOF_INT * 8 - 1)) +#define SW_FT_SIGN_INT32(x) ((x) >> 31) +#define SW_FT_SIGN_INT16(x) ((x) >> 15) + +/* documentation is in fttrigon.h */ + +void SW_FT_Vector_Rotate(SW_FT_Vector* vec, SW_FT_Angle angle) +{ + SW_FT_Int shift; + SW_FT_Vector v; + + v.x = vec->x; + v.y = vec->y; + + if (angle && (v.x != 0 || v.y != 0)) { + shift = ft_trig_prenorm(&v); + ft_trig_pseudo_rotate(&v, angle); + v.x = ft_trig_downscale(v.x); + v.y = ft_trig_downscale(v.y); + + if (shift > 0) { + SW_FT_Int32 half = (SW_FT_Int32)1L << (shift - 1); + + vec->x = (v.x + half + SW_FT_SIGN_LONG(v.x)) >> shift; + vec->y = (v.y + half + SW_FT_SIGN_LONG(v.y)) >> shift; + } else { + shift = -shift; + vec->x = (SW_FT_Pos)((SW_FT_ULong)v.x << shift); + vec->y = (SW_FT_Pos)((SW_FT_ULong)v.y << shift); + } + } +} + +/* documentation is in fttrigon.h */ + +SW_FT_Fixed SW_FT_Vector_Length(SW_FT_Vector* vec) +{ + SW_FT_Int shift; + SW_FT_Vector v; + + v = *vec; + + /* handle trivial cases */ + if (v.x == 0) { + return SW_FT_ABS(v.y); + } else if (v.y == 0) { + return SW_FT_ABS(v.x); + } + + /* general case */ + shift = ft_trig_prenorm(&v); + ft_trig_pseudo_polarize(&v); + + v.x = ft_trig_downscale(v.x); + + if (shift > 0) return (v.x + (1 << (shift - 1))) >> shift; + + return (SW_FT_Fixed)((SW_FT_UInt32)v.x << -shift); +} + +/* documentation is in fttrigon.h */ + +void SW_FT_Vector_Polarize(SW_FT_Vector* vec, SW_FT_Fixed* length, + SW_FT_Angle* angle) +{ + SW_FT_Int shift; + SW_FT_Vector v; + + v = *vec; + + if (v.x == 0 && v.y == 0) return; + + shift = ft_trig_prenorm(&v); + ft_trig_pseudo_polarize(&v); + + v.x = ft_trig_downscale(v.x); + + *length = (shift >= 0) ? (v.x >> shift) + : (SW_FT_Fixed)((SW_FT_UInt32)v.x << -shift); + *angle = v.y; +} + +/* documentation is in fttrigon.h */ + +void SW_FT_Vector_From_Polar(SW_FT_Vector* vec, SW_FT_Fixed length, + SW_FT_Angle angle) +{ + vec->x = length; + vec->y = 0; + + SW_FT_Vector_Rotate(vec, angle); +} + +/* documentation is in fttrigon.h */ + +SW_FT_Angle SW_FT_Angle_Diff( SW_FT_Angle angle1, SW_FT_Angle angle2 ) +{ + SW_FT_Angle delta = angle2 - angle1; + + while ( delta <= -SW_FT_ANGLE_PI ) + delta += SW_FT_ANGLE_2PI; + + while ( delta > SW_FT_ANGLE_PI ) + delta -= SW_FT_ANGLE_2PI; + + return delta; +} + +/* END */ diff --git a/vendor/github.com/Benau/go_rlottie/vector_freetype_v_ft_math.h b/vendor/github.com/Benau/go_rlottie/vector_freetype_v_ft_math.h new file mode 100644 index 00000000..0405c05b --- /dev/null +++ b/vendor/github.com/Benau/go_rlottie/vector_freetype_v_ft_math.h @@ -0,0 +1,438 @@ +#ifndef V_FT_MATH_H +#define V_FT_MATH_H + +/***************************************************************************/ +/* */ +/* fttrigon.h */ +/* */ +/* FreeType trigonometric functions (specification). */ +/* */ +/* Copyright 2001, 2003, 2005, 2007, 2013 by */ +/* David Turner, Robert Wilhelm, and Werner Lemberg. */ +/* */ +/* This file is part of the FreeType project, and may only be used, */ +/* modified, and distributed under the terms of the FreeType project */ +/* license, LICENSE.TXT. By continuing to use, modify, or distribute */ +/* this file you indicate that you have read the license and */ +/* understand and accept it fully. */ +/* */ +/***************************************************************************/ + +#include "vector_freetype_v_ft_types.h" + + +/*************************************************************************/ +/* */ +/* The min and max functions missing in C. As usual, be careful not to */ +/* write things like SW_FT_MIN( a++, b++ ) to avoid side effects. */ +/* */ +#define SW_FT_MIN( a, b ) ( (a) < (b) ? (a) : (b) ) +#define SW_FT_MAX( a, b ) ( (a) > (b) ? (a) : (b) ) + +#define SW_FT_ABS( a ) ( (a) < 0 ? -(a) : (a) ) + +/* + * Approximate sqrt(x*x+y*y) using the `alpha max plus beta min' + * algorithm. We use alpha = 1, beta = 3/8, giving us results with a + * largest error less than 7% compared to the exact value. + */ +#define SW_FT_HYPOT( x, y ) \ + ( x = SW_FT_ABS( x ), \ + y = SW_FT_ABS( y ), \ + x > y ? x + ( 3 * y >> 3 ) \ + : y + ( 3 * x >> 3 ) ) + +/*************************************************************************/ +/* */ +/* */ +/* SW_FT_MulFix */ +/* */ +/* */ +/* A very simple function used to perform the computation */ +/* `(a*b)/0x10000' with maximum accuracy. Most of the time this is */ +/* used to multiply a given value by a 16.16 fixed-point factor. */ +/* */ +/* */ +/* a :: The first multiplier. */ +/* b :: The second multiplier. Use a 16.16 factor here whenever */ +/* possible (see note below). */ +/* */ +/* */ +/* The result of `(a*b)/0x10000'. */ +/* */ +/* */ +/* This function has been optimized for the case where the absolute */ +/* value of `a' is less than 2048, and `b' is a 16.16 scaling factor. */ +/* As this happens mainly when scaling from notional units to */ +/* fractional pixels in FreeType, it resulted in noticeable speed */ +/* improvements between versions 2.x and 1.x. */ +/* */ +/* As a conclusion, always try to place a 16.16 factor as the */ +/* _second_ argument of this function; this can make a great */ +/* difference. */ +/* */ +SW_FT_Long +SW_FT_MulFix( SW_FT_Long a, + SW_FT_Long b ); + +/*************************************************************************/ +/* */ +/* */ +/* SW_FT_MulDiv */ +/* */ +/* */ +/* A very simple function used to perform the computation `(a*b)/c' */ +/* with maximum accuracy (it uses a 64-bit intermediate integer */ +/* whenever necessary). */ +/* */ +/* This function isn't necessarily as fast as some processor specific */ +/* operations, but is at least completely portable. */ +/* */ +/* */ +/* a :: The first multiplier. */ +/* b :: The second multiplier. */ +/* c :: The divisor. */ +/* */ +/* */ +/* The result of `(a*b)/c'. This function never traps when trying to */ +/* divide by zero; it simply returns `MaxInt' or `MinInt' depending */ +/* on the signs of `a' and `b'. */ +/* */ +SW_FT_Long +SW_FT_MulDiv( SW_FT_Long a, + SW_FT_Long b, + SW_FT_Long c ); + +/*************************************************************************/ +/* */ +/* */ +/* SW_FT_DivFix */ +/* */ +/* */ +/* A very simple function used to perform the computation */ +/* `(a*0x10000)/b' with maximum accuracy. Most of the time, this is */ +/* used to divide a given value by a 16.16 fixed-point factor. */ +/* */ +/* */ +/* a :: The numerator. */ +/* b :: The denominator. Use a 16.16 factor here. */ +/* */ +/* */ +/* The result of `(a*0x10000)/b'. */ +/* */ +SW_FT_Long +SW_FT_DivFix( SW_FT_Long a, + SW_FT_Long b ); + + + + /*************************************************************************/ + /* */ + /*
*/ + /* computations */ + /* */ + /*************************************************************************/ + + + /************************************************************************* + * + * @type: + * SW_FT_Angle + * + * @description: + * This type is used to model angle values in FreeType. Note that the + * angle is a 16.16 fixed-point value expressed in degrees. + * + */ + typedef SW_FT_Fixed SW_FT_Angle; + + + /************************************************************************* + * + * @macro: + * SW_FT_ANGLE_PI + * + * @description: + * The angle pi expressed in @SW_FT_Angle units. + * + */ +#define SW_FT_ANGLE_PI ( 180L << 16 ) + + + /************************************************************************* + * + * @macro: + * SW_FT_ANGLE_2PI + * + * @description: + * The angle 2*pi expressed in @SW_FT_Angle units. + * + */ +#define SW_FT_ANGLE_2PI ( SW_FT_ANGLE_PI * 2 ) + + + /************************************************************************* + * + * @macro: + * SW_FT_ANGLE_PI2 + * + * @description: + * The angle pi/2 expressed in @SW_FT_Angle units. + * + */ +#define SW_FT_ANGLE_PI2 ( SW_FT_ANGLE_PI / 2 ) + + + /************************************************************************* + * + * @macro: + * SW_FT_ANGLE_PI4 + * + * @description: + * The angle pi/4 expressed in @SW_FT_Angle units. + * + */ +#define SW_FT_ANGLE_PI4 ( SW_FT_ANGLE_PI / 4 ) + + + /************************************************************************* + * + * @function: + * SW_FT_Sin + * + * @description: + * Return the sinus of a given angle in fixed-point format. + * + * @input: + * angle :: + * The input angle. + * + * @return: + * The sinus value. + * + * @note: + * If you need both the sinus and cosinus for a given angle, use the + * function @SW_FT_Vector_Unit. + * + */ + SW_FT_Fixed + SW_FT_Sin( SW_FT_Angle angle ); + + + /************************************************************************* + * + * @function: + * SW_FT_Cos + * + * @description: + * Return the cosinus of a given angle in fixed-point format. + * + * @input: + * angle :: + * The input angle. + * + * @return: + * The cosinus value. + * + * @note: + * If you need both the sinus and cosinus for a given angle, use the + * function @SW_FT_Vector_Unit. + * + */ + SW_FT_Fixed + SW_FT_Cos( SW_FT_Angle angle ); + + + /************************************************************************* + * + * @function: + * SW_FT_Tan + * + * @description: + * Return the tangent of a given angle in fixed-point format. + * + * @input: + * angle :: + * The input angle. + * + * @return: + * The tangent value. + * + */ + SW_FT_Fixed + SW_FT_Tan( SW_FT_Angle angle ); + + + /************************************************************************* + * + * @function: + * SW_FT_Atan2 + * + * @description: + * Return the arc-tangent corresponding to a given vector (x,y) in + * the 2d plane. + * + * @input: + * x :: + * The horizontal vector coordinate. + * + * y :: + * The vertical vector coordinate. + * + * @return: + * The arc-tangent value (i.e. angle). + * + */ + SW_FT_Angle + SW_FT_Atan2( SW_FT_Fixed x, + SW_FT_Fixed y ); + + + /************************************************************************* + * + * @function: + * SW_FT_Angle_Diff + * + * @description: + * Return the difference between two angles. The result is always + * constrained to the ]-PI..PI] interval. + * + * @input: + * angle1 :: + * First angle. + * + * angle2 :: + * Second angle. + * + * @return: + * Constrained value of `value2-value1'. + * + */ + SW_FT_Angle + SW_FT_Angle_Diff( SW_FT_Angle angle1, + SW_FT_Angle angle2 ); + + + /************************************************************************* + * + * @function: + * SW_FT_Vector_Unit + * + * @description: + * Return the unit vector corresponding to a given angle. After the + * call, the value of `vec.x' will be `sin(angle)', and the value of + * `vec.y' will be `cos(angle)'. + * + * This function is useful to retrieve both the sinus and cosinus of a + * given angle quickly. + * + * @output: + * vec :: + * The address of target vector. + * + * @input: + * angle :: + * The input angle. + * + */ + void + SW_FT_Vector_Unit( SW_FT_Vector* vec, + SW_FT_Angle angle ); + + + /************************************************************************* + * + * @function: + * SW_FT_Vector_Rotate + * + * @description: + * Rotate a vector by a given angle. + * + * @inout: + * vec :: + * The address of target vector. + * + * @input: + * angle :: + * The input angle. + * + */ + void + SW_FT_Vector_Rotate( SW_FT_Vector* vec, + SW_FT_Angle angle ); + + + /************************************************************************* + * + * @function: + * SW_FT_Vector_Length + * + * @description: + * Return the length of a given vector. + * + * @input: + * vec :: + * The address of target vector. + * + * @return: + * The vector length, expressed in the same units that the original + * vector coordinates. + * + */ + SW_FT_Fixed + SW_FT_Vector_Length( SW_FT_Vector* vec ); + + + /************************************************************************* + * + * @function: + * SW_FT_Vector_Polarize + * + * @description: + * Compute both the length and angle of a given vector. + * + * @input: + * vec :: + * The address of source vector. + * + * @output: + * length :: + * The vector length. + * + * angle :: + * The vector angle. + * + */ + void + SW_FT_Vector_Polarize( SW_FT_Vector* vec, + SW_FT_Fixed *length, + SW_FT_Angle *angle ); + + + /************************************************************************* + * + * @function: + * SW_FT_Vector_From_Polar + * + * @description: + * Compute vector coordinates from a length and angle. + * + * @output: + * vec :: + * The address of source vector. + * + * @input: + * length :: + * The vector length. + * + * angle :: + * The vector angle. + * + */ + void + SW_FT_Vector_From_Polar( SW_FT_Vector* vec, + SW_FT_Fixed length, + SW_FT_Angle angle ); + + +#endif // V_FT_MATH_H diff --git a/vendor/github.com/Benau/go_rlottie/vector_freetype_v_ft_raster.cpp b/vendor/github.com/Benau/go_rlottie/vector_freetype_v_ft_raster.cpp new file mode 100644 index 00000000..01f2f23b --- /dev/null +++ b/vendor/github.com/Benau/go_rlottie/vector_freetype_v_ft_raster.cpp @@ -0,0 +1,1423 @@ +/***************************************************************************/ +/* */ +/* ftgrays.c */ +/* */ +/* A new `perfect' anti-aliasing renderer (body). */ +/* */ +/* Copyright 2000-2003, 2005-2014 by */ +/* David Turner, Robert Wilhelm, and Werner Lemberg. */ +/* */ +/* This file is part of the FreeType project, and may only be used, */ +/* modified, and distributed under the terms of the FreeType project */ +/* license, LICENSE.TXT. By continuing to use, modify, or distribute */ +/* this file you indicate that you have read the license and */ +/* understand and accept it fully. */ +/* */ +/***************************************************************************/ + +/*************************************************************************/ +/* */ +/* This is a new anti-aliasing scan-converter for FreeType 2. The */ +/* algorithm used here is _very_ different from the one in the standard */ +/* `ftraster' module. Actually, `ftgrays' computes the _exact_ */ +/* coverage of the outline on each pixel cell. */ +/* */ +/* It is based on ideas that I initially found in Raph Levien's */ +/* excellent LibArt graphics library (see http://www.levien.com/libart */ +/* for more information, though the web pages do not tell anything */ +/* about the renderer; you'll have to dive into the source code to */ +/* understand how it works). */ +/* */ +/* Note, however, that this is a _very_ different implementation */ +/* compared to Raph's. Coverage information is stored in a very */ +/* different way, and I don't use sorted vector paths. Also, it doesn't */ +/* use floating point values. */ +/* */ +/* This renderer has the following advantages: */ +/* */ +/* - It doesn't need an intermediate bitmap. Instead, one can supply a */ +/* callback function that will be called by the renderer to draw gray */ +/* spans on any target surface. You can thus do direct composition on */ +/* any kind of bitmap, provided that you give the renderer the right */ +/* callback. */ +/* */ +/* - A perfect anti-aliaser, i.e., it computes the _exact_ coverage on */ +/* each pixel cell. */ +/* */ +/* - It performs a single pass on the outline (the `standard' FT2 */ +/* renderer makes two passes). */ +/* */ +/* - It can easily be modified to render to _any_ number of gray levels */ +/* cheaply. */ +/* */ +/* - For small (< 20) pixel sizes, it is faster than the standard */ +/* renderer. */ +/* */ +/*************************************************************************/ + +#include "vector_freetype_v_ft_raster.h" +#include "vector_freetype_v_ft_math.h" + +/* Auxiliary macros for token concatenation. */ +#define SW_FT_ERR_XCAT(x, y) x##y +#define SW_FT_ERR_CAT(x, y) SW_FT_ERR_XCAT(x, y) + +#define SW_FT_BEGIN_STMNT do { +#define SW_FT_END_STMNT \ + } \ + while (0) + +#include +#include +#include +#include +#define SW_FT_UINT_MAX UINT_MAX +#define SW_FT_INT_MAX INT_MAX +#define SW_FT_ULONG_MAX ULONG_MAX +#define SW_FT_CHAR_BIT CHAR_BIT + +#define ft_memset memset + +#define ft_setjmp setjmp +#define ft_longjmp longjmp +#define ft_jmp_buf jmp_buf + +typedef ptrdiff_t SW_FT_PtrDist; + +#define ErrRaster_Invalid_Mode -2 +#define ErrRaster_Invalid_Outline -1 +#define ErrRaster_Invalid_Argument -3 +#define ErrRaster_Memory_Overflow -4 + +#define SW_FT_BEGIN_HEADER +#define SW_FT_END_HEADER + +/* This macro is used to indicate that a function parameter is unused. */ +/* Its purpose is simply to reduce compiler warnings. Note also that */ +/* simply defining it as `(void)x' doesn't avoid warnings with certain */ +/* ANSI compilers (e.g. LCC). */ +#define SW_FT_UNUSED(x) (x) = (x) + +#define SW_FT_THROW(e) SW_FT_ERR_CAT(ErrRaster_, e) + +/* The size in bytes of the render pool used by the scan-line converter */ +/* to do all of its work. */ +#define SW_FT_RENDER_POOL_SIZE 16384L + +typedef int (*SW_FT_Outline_MoveToFunc)(const SW_FT_Vector* to, void* user); + +#define SW_FT_Outline_MoveTo_Func SW_FT_Outline_MoveToFunc + +typedef int (*SW_FT_Outline_LineToFunc)(const SW_FT_Vector* to, void* user); + +#define SW_FT_Outline_LineTo_Func SW_FT_Outline_LineToFunc + +typedef int (*SW_FT_Outline_ConicToFunc)(const SW_FT_Vector* control, + const SW_FT_Vector* to, void* user); + +#define SW_FT_Outline_ConicTo_Func SW_FT_Outline_ConicToFunc + +typedef int (*SW_FT_Outline_CubicToFunc)(const SW_FT_Vector* control1, + const SW_FT_Vector* control2, + const SW_FT_Vector* to, void* user); + +#define SW_FT_Outline_CubicTo_Func SW_FT_Outline_CubicToFunc + +typedef struct SW_FT_Outline_Funcs_ { + SW_FT_Outline_MoveToFunc move_to; + SW_FT_Outline_LineToFunc line_to; + SW_FT_Outline_ConicToFunc conic_to; + SW_FT_Outline_CubicToFunc cubic_to; + + int shift; + SW_FT_Pos delta; + +} SW_FT_Outline_Funcs; + +#define SW_FT_DEFINE_OUTLINE_FUNCS(class_, move_to_, line_to_, conic_to_, \ + cubic_to_, shift_, delta_) \ + static const SW_FT_Outline_Funcs class_ = {move_to_, line_to_, conic_to_, \ + cubic_to_, shift_, delta_}; + +#define SW_FT_DEFINE_RASTER_FUNCS(class_, raster_new_, raster_reset_, \ + raster_render_, raster_done_) \ + const SW_FT_Raster_Funcs class_ = {raster_new_, raster_reset_, \ + raster_render_, raster_done_}; + +#ifndef SW_FT_MEM_SET +#define SW_FT_MEM_SET(d, s, c) ft_memset(d, s, c) +#endif + +#ifndef SW_FT_MEM_ZERO +#define SW_FT_MEM_ZERO(dest, count) SW_FT_MEM_SET(dest, 0, count) +#endif + +/* as usual, for the speed hungry :-) */ + +#undef RAS_ARG +#undef RAS_ARG_ +#undef RAS_VAR +#undef RAS_VAR_ + +#ifndef SW_FT_STATIC_RASTER + +#define RAS_ARG gray_PWorker worker +#define RAS_ARG_ gray_PWorker worker, + +#define RAS_VAR worker +#define RAS_VAR_ worker, + +#else /* SW_FT_STATIC_RASTER */ + +#define RAS_ARG /* empty */ +#define RAS_ARG_ /* empty */ +#define RAS_VAR /* empty */ +#define RAS_VAR_ /* empty */ + +#endif /* SW_FT_STATIC_RASTER */ + +/* must be at least 6 bits! */ +#define PIXEL_BITS 8 + +#undef FLOOR +#undef CEILING +#undef TRUNC +#undef SCALED + +#define ONE_PIXEL (1L << PIXEL_BITS) +#define PIXEL_MASK (-1L << PIXEL_BITS) +#define TRUNC(x) ((TCoord)((x) >> PIXEL_BITS)) +#define SUBPIXELS(x) ((TPos)(x) << PIXEL_BITS) +#define FLOOR(x) ((x) & -ONE_PIXEL) +#define CEILING(x) (((x) + ONE_PIXEL - 1) & -ONE_PIXEL) +#define ROUND(x) (((x) + ONE_PIXEL / 2) & -ONE_PIXEL) + +#if PIXEL_BITS >= 6 +#define UPSCALE(x) ((x) << (PIXEL_BITS - 6)) +#define DOWNSCALE(x) ((x) >> (PIXEL_BITS - 6)) +#else +#define UPSCALE(x) ((x) >> (6 - PIXEL_BITS)) +#define DOWNSCALE(x) ((x) << (6 - PIXEL_BITS)) +#endif + +/* Compute `dividend / divisor' and return both its quotient and */ +/* remainder, cast to a specific type. This macro also ensures that */ +/* the remainder is always positive. */ +#define SW_FT_DIV_MOD(type, dividend, divisor, quotient, remainder) \ + SW_FT_BEGIN_STMNT(quotient) = (type)((dividend) / (divisor)); \ + (remainder) = (type)((dividend) % (divisor)); \ + if ((remainder) < 0) { \ + (quotient)--; \ + (remainder) += (type)(divisor); \ + } \ + SW_FT_END_STMNT + +#ifdef __arm__ +/* Work around a bug specific to GCC which make the compiler fail to */ +/* optimize a division and modulo operation on the same parameters */ +/* into a single call to `__aeabi_idivmod'. See */ +/* */ +/* http://gcc.gnu.org/bugzilla/show_bug.cgi?id=43721 */ +#undef SW_FT_DIV_MOD +#define SW_FT_DIV_MOD(type, dividend, divisor, quotient, remainder) \ + SW_FT_BEGIN_STMNT(quotient) = (type)((dividend) / (divisor)); \ + (remainder) = (type)((dividend) - (quotient) * (divisor)); \ + if ((remainder) < 0) { \ + (quotient)--; \ + (remainder) += (type)(divisor); \ + } \ + SW_FT_END_STMNT +#endif /* __arm__ */ + +/* These macros speed up repetitive divisions by replacing them */ +/* with multiplications and right shifts. */ +#define SW_FT_UDIVPREP(b) \ + long b##_r = (long)(SW_FT_ULONG_MAX >> PIXEL_BITS) / (b) +#define SW_FT_UDIV(a, b) \ + (((unsigned long)(a) * (unsigned long)(b##_r)) >> \ + (sizeof(long) * SW_FT_CHAR_BIT - PIXEL_BITS)) + +/*************************************************************************/ +/* */ +/* TYPE DEFINITIONS */ +/* */ + +/* don't change the following types to SW_FT_Int or SW_FT_Pos, since we might */ +/* need to define them to "float" or "double" when experimenting with */ +/* new algorithms */ + +typedef long TCoord; /* integer scanline/pixel coordinate */ +typedef long TPos; /* sub-pixel coordinate */ + +/* determine the type used to store cell areas. This normally takes at */ +/* least PIXEL_BITS*2 + 1 bits. On 16-bit systems, we need to use */ +/* `long' instead of `int', otherwise bad things happen */ + +#if PIXEL_BITS <= 7 + +typedef int TArea; + +#else /* PIXEL_BITS >= 8 */ + +/* approximately determine the size of integers using an ANSI-C header */ +#if SW_FT_UINT_MAX == 0xFFFFU +typedef long TArea; +#else +typedef int TArea; +#endif + +#endif /* PIXEL_BITS >= 8 */ + +/* maximum number of gray spans in a call to the span callback */ +#define SW_FT_MAX_GRAY_SPANS 256 + +typedef struct TCell_* PCell; + +typedef struct TCell_ { + TPos x; /* same with gray_TWorker.ex */ + TCoord cover; /* same with gray_TWorker.cover */ + TArea area; + PCell next; + +} TCell; + +#if defined(_MSC_VER) /* Visual C++ (and Intel C++) */ +/* We disable the warning `structure was padded due to */ +/* __declspec(align())' in order to compile cleanly with */ +/* the maximum level of warnings. */ +#pragma warning(push) +#pragma warning(disable : 4324) +#endif /* _MSC_VER */ + +typedef struct gray_TWorker_ { + TCoord ex, ey; + TPos min_ex, max_ex; + TPos min_ey, max_ey; + TPos count_ex, count_ey; + + TArea area; + TCoord cover; + int invalid; + + PCell cells; + SW_FT_PtrDist max_cells; + SW_FT_PtrDist num_cells; + + TPos x, y; + + SW_FT_Vector bez_stack[32 * 3 + 1]; + int lev_stack[32]; + + SW_FT_Outline outline; + SW_FT_BBox clip_box; + + int bound_left; + int bound_top; + int bound_right; + int bound_bottom; + + SW_FT_Span gray_spans[SW_FT_MAX_GRAY_SPANS]; + int num_gray_spans; + + SW_FT_Raster_Span_Func render_span; + void* render_span_data; + + int band_size; + int band_shoot; + + ft_jmp_buf jump_buffer; + + void* buffer; + long buffer_size; + + PCell* ycells; + TPos ycount; + +} gray_TWorker, *gray_PWorker; + +#if defined(_MSC_VER) +#pragma warning(pop) +#endif + +#ifndef SW_FT_STATIC_RASTER +#define ras (*worker) +#else +static gray_TWorker ras; +#endif + +typedef struct gray_TRaster_ { + void* memory; + +} gray_TRaster, *gray_PRaster; + +/*************************************************************************/ +/* */ +/* Initialize the cells table. */ +/* */ +static void gray_init_cells(RAS_ARG_ void* buffer, long byte_size) +{ + ras.buffer = buffer; + ras.buffer_size = byte_size; + + ras.ycells = (PCell*)buffer; + ras.cells = NULL; + ras.max_cells = 0; + ras.num_cells = 0; + ras.area = 0; + ras.cover = 0; + ras.invalid = 1; + + ras.bound_left = INT_MAX; + ras.bound_top = INT_MAX; + ras.bound_right = INT_MIN; + ras.bound_bottom = INT_MIN; +} + +/*************************************************************************/ +/* */ +/* Compute the outline bounding box. */ +/* */ +static void gray_compute_cbox(RAS_ARG) +{ + SW_FT_Outline* outline = &ras.outline; + SW_FT_Vector* vec = outline->points; + SW_FT_Vector* limit = vec + outline->n_points; + + if (outline->n_points <= 0) { + ras.min_ex = ras.max_ex = 0; + ras.min_ey = ras.max_ey = 0; + return; + } + + ras.min_ex = ras.max_ex = vec->x; + ras.min_ey = ras.max_ey = vec->y; + + vec++; + + for (; vec < limit; vec++) { + TPos x = vec->x; + TPos y = vec->y; + + if (x < ras.min_ex) ras.min_ex = x; + if (x > ras.max_ex) ras.max_ex = x; + if (y < ras.min_ey) ras.min_ey = y; + if (y > ras.max_ey) ras.max_ey = y; + } + + /* truncate the bounding box to integer pixels */ + ras.min_ex = ras.min_ex >> 6; + ras.min_ey = ras.min_ey >> 6; + ras.max_ex = (ras.max_ex + 63) >> 6; + ras.max_ey = (ras.max_ey + 63) >> 6; +} + +/*************************************************************************/ +/* */ +/* Record the current cell in the table. */ +/* */ +static PCell gray_find_cell(RAS_ARG) +{ + PCell *pcell, cell; + TPos x = ras.ex; + + if (x > ras.count_ex) x = ras.count_ex; + + pcell = &ras.ycells[ras.ey]; + for (;;) { + cell = *pcell; + if (cell == NULL || cell->x > x) break; + + if (cell->x == x) goto Exit; + + pcell = &cell->next; + } + + if (ras.num_cells >= ras.max_cells) ft_longjmp(ras.jump_buffer, 1); + + cell = ras.cells + ras.num_cells++; + cell->x = x; + cell->area = 0; + cell->cover = 0; + + cell->next = *pcell; + *pcell = cell; + +Exit: + return cell; +} + +static void gray_record_cell(RAS_ARG) +{ + if (ras.area | ras.cover) { + PCell cell = gray_find_cell(RAS_VAR); + + cell->area += ras.area; + cell->cover += ras.cover; + } +} + +/*************************************************************************/ +/* */ +/* Set the current cell to a new position. */ +/* */ +static void gray_set_cell(RAS_ARG_ TCoord ex, TCoord ey) +{ + /* Move the cell pointer to a new position. We set the `invalid' */ + /* flag to indicate that the cell isn't part of those we're interested */ + /* in during the render phase. This means that: */ + /* */ + /* . the new vertical position must be within min_ey..max_ey-1. */ + /* . the new horizontal position must be strictly less than max_ex */ + /* */ + /* Note that if a cell is to the left of the clipping region, it is */ + /* actually set to the (min_ex-1) horizontal position. */ + + /* All cells that are on the left of the clipping region go to the */ + /* min_ex - 1 horizontal position. */ + ey -= ras.min_ey; + + if (ex > ras.max_ex) ex = ras.max_ex; + + ex -= ras.min_ex; + if (ex < 0) ex = -1; + + /* are we moving to a different cell ? */ + if (ex != ras.ex || ey != ras.ey) { + /* record the current one if it is valid */ + if (!ras.invalid) gray_record_cell(RAS_VAR); + + ras.area = 0; + ras.cover = 0; + ras.ex = ex; + ras.ey = ey; + } + + ras.invalid = + ((unsigned)ey >= (unsigned)ras.count_ey || ex >= ras.count_ex); +} + +/*************************************************************************/ +/* */ +/* Start a new contour at a given cell. */ +/* */ +static void gray_start_cell(RAS_ARG_ TCoord ex, TCoord ey) +{ + if (ex > ras.max_ex) ex = (TCoord)(ras.max_ex); + + if (ex < ras.min_ex) ex = (TCoord)(ras.min_ex - 1); + + ras.area = 0; + ras.cover = 0; + ras.ex = ex - ras.min_ex; + ras.ey = ey - ras.min_ey; + ras.invalid = 0; + + gray_set_cell(RAS_VAR_ ex, ey); +} + +/*************************************************************************/ +/* */ +/* Render a straight line across multiple cells in any direction. */ +/* */ +static void gray_render_line(RAS_ARG_ TPos to_x, TPos to_y) +{ + TPos dx, dy, fx1, fy1, fx2, fy2; + TCoord ex1, ex2, ey1, ey2; + + ex1 = TRUNC(ras.x); + ex2 = TRUNC(to_x); + ey1 = TRUNC(ras.y); + ey2 = TRUNC(to_y); + + /* perform vertical clipping */ + if ((ey1 >= ras.max_ey && ey2 >= ras.max_ey) || + (ey1 < ras.min_ey && ey2 < ras.min_ey)) + goto End; + + dx = to_x - ras.x; + dy = to_y - ras.y; + + fx1 = ras.x - SUBPIXELS(ex1); + fy1 = ras.y - SUBPIXELS(ey1); + + if (ex1 == ex2 && ey1 == ey2) /* inside one cell */ + ; + else if (dy == 0) /* ex1 != ex2 */ /* any horizontal line */ + { + ex1 = ex2; + gray_set_cell(RAS_VAR_ ex1, ey1); + } else if (dx == 0) { + if (dy > 0) /* vertical line up */ + do { + fy2 = ONE_PIXEL; + ras.cover += (fy2 - fy1); + ras.area += (fy2 - fy1) * fx1 * 2; + fy1 = 0; + ey1++; + gray_set_cell(RAS_VAR_ ex1, ey1); + } while (ey1 != ey2); + else /* vertical line down */ + do { + fy2 = 0; + ras.cover += (fy2 - fy1); + ras.area += (fy2 - fy1) * fx1 * 2; + fy1 = ONE_PIXEL; + ey1--; + gray_set_cell(RAS_VAR_ ex1, ey1); + } while (ey1 != ey2); + } else /* any other line */ + { + TArea prod = dx * fy1 - dy * fx1; + SW_FT_UDIVPREP(dx); + SW_FT_UDIVPREP(dy); + + /* The fundamental value `prod' determines which side and the */ + /* exact coordinate where the line exits current cell. It is */ + /* also easily updated when moving from one cell to the next. */ + do { + if (prod <= 0 && prod - dx * ONE_PIXEL > 0) /* left */ + { + fx2 = 0; + fy2 = (TPos)SW_FT_UDIV(-prod, -dx); + prod -= dy * ONE_PIXEL; + ras.cover += (fy2 - fy1); + ras.area += (fy2 - fy1) * (fx1 + fx2); + fx1 = ONE_PIXEL; + fy1 = fy2; + ex1--; + } else if (prod - dx * ONE_PIXEL <= 0 && + prod - dx * ONE_PIXEL + dy * ONE_PIXEL > 0) /* up */ + { + prod -= dx * ONE_PIXEL; + fx2 = (TPos)SW_FT_UDIV(-prod, dy); + fy2 = ONE_PIXEL; + ras.cover += (fy2 - fy1); + ras.area += (fy2 - fy1) * (fx1 + fx2); + fx1 = fx2; + fy1 = 0; + ey1++; + } else if (prod - dx * ONE_PIXEL + dy * ONE_PIXEL <= 0 && + prod + dy * ONE_PIXEL >= 0) /* right */ + { + prod += dy * ONE_PIXEL; + fx2 = ONE_PIXEL; + fy2 = (TPos)SW_FT_UDIV(prod, dx); + ras.cover += (fy2 - fy1); + ras.area += (fy2 - fy1) * (fx1 + fx2); + fx1 = 0; + fy1 = fy2; + ex1++; + } else /* ( prod + dy * ONE_PIXEL < 0 && + prod > 0 ) down */ + { + fx2 = (TPos)SW_FT_UDIV(prod, -dy); + fy2 = 0; + prod += dx * ONE_PIXEL; + ras.cover += (fy2 - fy1); + ras.area += (fy2 - fy1) * (fx1 + fx2); + fx1 = fx2; + fy1 = ONE_PIXEL; + ey1--; + } + + gray_set_cell(RAS_VAR_ ex1, ey1); + } while (ex1 != ex2 || ey1 != ey2); + } + + fx2 = to_x - SUBPIXELS(ex2); + fy2 = to_y - SUBPIXELS(ey2); + + ras.cover += (fy2 - fy1); + ras.area += (fy2 - fy1) * (fx1 + fx2); + +End: + ras.x = to_x; + ras.y = to_y; +} + +static void gray_split_conic(SW_FT_Vector* base) +{ + TPos a, b; + + base[4].x = base[2].x; + a = base[0].x + base[1].x; + b = base[1].x + base[2].x; + base[3].x = b >> 1; + base[2].x = ( a + b ) >> 2; + base[1].x = a >> 1; + + base[4].y = base[2].y; + a = base[0].y + base[1].y; + b = base[1].y + base[2].y; + base[3].y = b >> 1; + base[2].y = ( a + b ) >> 2; + base[1].y = a >> 1; +} + +static void gray_render_conic(RAS_ARG_ const SW_FT_Vector* control, + const SW_FT_Vector* to) +{ + TPos dx, dy; + TPos min, max, y; + int top, level; + int* levels; + SW_FT_Vector* arc; + + levels = ras.lev_stack; + + arc = ras.bez_stack; + arc[0].x = UPSCALE(to->x); + arc[0].y = UPSCALE(to->y); + arc[1].x = UPSCALE(control->x); + arc[1].y = UPSCALE(control->y); + arc[2].x = ras.x; + arc[2].y = ras.y; + top = 0; + + dx = SW_FT_ABS(arc[2].x + arc[0].x - 2 * arc[1].x); + dy = SW_FT_ABS(arc[2].y + arc[0].y - 2 * arc[1].y); + if (dx < dy) dx = dy; + + if (dx < ONE_PIXEL / 4) goto Draw; + + /* short-cut the arc that crosses the current band */ + min = max = arc[0].y; + + y = arc[1].y; + if (y < min) min = y; + if (y > max) max = y; + + y = arc[2].y; + if (y < min) min = y; + if (y > max) max = y; + + if (TRUNC(min) >= ras.max_ey || TRUNC(max) < ras.min_ey) goto Draw; + + level = 0; + do { + dx >>= 2; + level++; + } while (dx > ONE_PIXEL / 4); + + levels[0] = level; + + do { + level = levels[top]; + if (level > 0) { + gray_split_conic(arc); + arc += 2; + top++; + levels[top] = levels[top - 1] = level - 1; + continue; + } + + Draw: + gray_render_line(RAS_VAR_ arc[0].x, arc[0].y); + top--; + arc -= 2; + + } while (top >= 0); +} + +static void gray_split_cubic(SW_FT_Vector* base) +{ + TPos a, b, c; + + + base[6].x = base[3].x; + a = base[0].x + base[1].x; + b = base[1].x + base[2].x; + c = base[2].x + base[3].x; + base[5].x = c >> 1; + c += b; + base[4].x = c >> 2; + base[1].x = a >> 1; + a += b; + base[2].x = a >> 2; + base[3].x = ( a + c ) >> 3; + + base[6].y = base[3].y; + a = base[0].y + base[1].y; + b = base[1].y + base[2].y; + c = base[2].y + base[3].y; + base[5].y = c >> 1; + c += b; + base[4].y = c >> 2; + base[1].y = a >> 1; + a += b; + base[2].y = a >> 2; + base[3].y = ( a + c ) >> 3; +} + + +static void +gray_render_cubic(RAS_ARG_ const SW_FT_Vector* control1, + const SW_FT_Vector* control2, + const SW_FT_Vector* to) +{ + SW_FT_Vector* arc = ras.bez_stack; + + arc[0].x = UPSCALE( to->x ); + arc[0].y = UPSCALE( to->y ); + arc[1].x = UPSCALE( control2->x ); + arc[1].y = UPSCALE( control2->y ); + arc[2].x = UPSCALE( control1->x ); + arc[2].y = UPSCALE( control1->y ); + arc[3].x = ras.x; + arc[3].y = ras.y; + + /* short-cut the arc that crosses the current band */ + if ( ( TRUNC( arc[0].y ) >= ras.max_ey && + TRUNC( arc[1].y ) >= ras.max_ey && + TRUNC( arc[2].y ) >= ras.max_ey && + TRUNC( arc[3].y ) >= ras.max_ey ) || + ( TRUNC( arc[0].y ) < ras.min_ey && + TRUNC( arc[1].y ) < ras.min_ey && + TRUNC( arc[2].y ) < ras.min_ey && + TRUNC( arc[3].y ) < ras.min_ey ) ) + { + ras.x = arc[0].x; + ras.y = arc[0].y; + return; + } + + for (;;) + { + /* with each split, control points quickly converge towards */ + /* chord trisection points and the vanishing distances below */ + /* indicate when the segment is flat enough to draw */ + if ( SW_FT_ABS( 2 * arc[0].x - 3 * arc[1].x + arc[3].x ) > ONE_PIXEL / 2 || + SW_FT_ABS( 2 * arc[0].y - 3 * arc[1].y + arc[3].y ) > ONE_PIXEL / 2 || + SW_FT_ABS( arc[0].x - 3 * arc[2].x + 2 * arc[3].x ) > ONE_PIXEL / 2 || + SW_FT_ABS( arc[0].y - 3 * arc[2].y + 2 * arc[3].y ) > ONE_PIXEL / 2 ) + goto Split; + + gray_render_line( RAS_VAR_ arc[0].x, arc[0].y ); + + if ( arc == ras.bez_stack ) + return; + + arc -= 3; + continue; + + Split: + gray_split_cubic( arc ); + arc += 3; + } +} + +static int gray_move_to(const SW_FT_Vector* to, gray_PWorker worker) +{ + TPos x, y; + + /* record current cell, if any */ + if (!ras.invalid) gray_record_cell(RAS_VAR); + + /* start to a new position */ + x = UPSCALE(to->x); + y = UPSCALE(to->y); + + gray_start_cell(RAS_VAR_ TRUNC(x), TRUNC(y)); + + worker->x = x; + worker->y = y; + return 0; +} + +static int gray_line_to(const SW_FT_Vector* to, gray_PWorker worker) +{ + gray_render_line(RAS_VAR_ UPSCALE(to->x), UPSCALE(to->y)); + return 0; +} + +static int gray_conic_to(const SW_FT_Vector* control, const SW_FT_Vector* to, + gray_PWorker worker) +{ + gray_render_conic(RAS_VAR_ control, to); + return 0; +} + +static int gray_cubic_to(const SW_FT_Vector* control1, + const SW_FT_Vector* control2, const SW_FT_Vector* to, + gray_PWorker worker) +{ + gray_render_cubic(RAS_VAR_ control1, control2, to); + return 0; +} + +static void gray_hline(RAS_ARG_ TCoord x, TCoord y, TPos area, TCoord acount) +{ + int coverage; + + /* compute the coverage line's coverage, depending on the */ + /* outline fill rule */ + /* */ + /* the coverage percentage is area/(PIXEL_BITS*PIXEL_BITS*2) */ + /* */ + coverage = (int)(area >> (PIXEL_BITS * 2 + 1 - 8)); + /* use range 0..256 */ + if (coverage < 0) coverage = -coverage; + + if (ras.outline.flags & SW_FT_OUTLINE_EVEN_ODD_FILL) { + coverage &= 511; + + if (coverage > 256) + coverage = 512 - coverage; + else if (coverage == 256) + coverage = 255; + } else { + /* normal non-zero winding rule */ + if (coverage >= 256) coverage = 255; + } + + y += (TCoord)ras.min_ey; + x += (TCoord)ras.min_ex; + + /* SW_FT_Span.x is a 16-bit short, so limit our coordinates appropriately */ + if (x >= 32767) x = 32767; + + /* SW_FT_Span.y is an integer, so limit our coordinates appropriately */ + if (y >= SW_FT_INT_MAX) y = SW_FT_INT_MAX; + + if (coverage) { + SW_FT_Span* span; + int count; + + // update bounding box. + if (x < ras.bound_left) ras.bound_left = x; + if (y < ras.bound_top) ras.bound_top = y; + if (y > ras.bound_bottom) ras.bound_bottom = y; + if (x + acount > ras.bound_right) ras.bound_right = x + acount; + + /* see whether we can add this span to the current list */ + count = ras.num_gray_spans; + span = ras.gray_spans + count - 1; + if (count > 0 && span->y == y && (int)span->x + span->len == (int)x && + span->coverage == coverage) { + span->len = (unsigned short)(span->len + acount); + return; + } + + if (count >= SW_FT_MAX_GRAY_SPANS) { + if (ras.render_span && count > 0) + ras.render_span(count, ras.gray_spans, ras.render_span_data); + +#ifdef DEBUG_GRAYS + + if (1) { + int n; + + fprintf(stderr, "count = %3d ", count); + span = ras.gray_spans; + for (n = 0; n < count; n++, span++) + fprintf(stderr, "[%d , %d..%d] : %d ", span->y, span->x, + span->x + span->len - 1, span->coverage); + fprintf(stderr, "\n"); + } + +#endif /* DEBUG_GRAYS */ + + ras.num_gray_spans = 0; + + span = ras.gray_spans; + } else + span++; + + /* add a gray span to the current list */ + span->x = (short)x; + span->y = (short)y; + span->len = (unsigned short)acount; + span->coverage = (unsigned char)coverage; + + ras.num_gray_spans++; + } +} + +static void gray_sweep(RAS_ARG) +{ + int yindex; + + if (ras.num_cells == 0) return; + + ras.num_gray_spans = 0; + + for (yindex = 0; yindex < ras.ycount; yindex++) { + PCell cell = ras.ycells[yindex]; + TCoord cover = 0; + TCoord x = 0; + + for (; cell != NULL; cell = cell->next) { + TPos area; + + if (cell->x > x && cover != 0) + gray_hline(RAS_VAR_ x, yindex, cover * (ONE_PIXEL * 2), + cell->x - x); + + cover += cell->cover; + area = cover * (ONE_PIXEL * 2) - cell->area; + + if (area != 0 && cell->x >= 0) + gray_hline(RAS_VAR_ cell->x, yindex, area, 1); + + x = cell->x + 1; + } + + if (cover != 0) + gray_hline(RAS_VAR_ x, yindex, cover * (ONE_PIXEL * 2), + ras.count_ex - x); + } + + if (ras.render_span && ras.num_gray_spans > 0) + ras.render_span(ras.num_gray_spans, ras.gray_spans, + ras.render_span_data); +} + +/*************************************************************************/ +/* */ +/* The following function should only compile in stand-alone mode, */ +/* i.e., when building this component without the rest of FreeType. */ +/* */ +/*************************************************************************/ + +/*************************************************************************/ +/* */ +/* */ +/* SW_FT_Outline_Decompose */ +/* */ +/* */ +/* Walk over an outline's structure to decompose it into individual */ +/* segments and Bézier arcs. This function is also able to emit */ +/* `move to' and `close to' operations to indicate the start and end */ +/* of new contours in the outline. */ +/* */ +/* */ +/* outline :: A pointer to the source target. */ +/* */ +/* func_interface :: A table of `emitters', i.e., function pointers */ +/* called during decomposition to indicate path */ +/* operations. */ +/* */ +/* */ +/* user :: A typeless pointer which is passed to each */ +/* emitter during the decomposition. It can be */ +/* used to store the state during the */ +/* decomposition. */ +/* */ +/* */ +/* Error code. 0 means success. */ +/* */ +static int SW_FT_Outline_Decompose(const SW_FT_Outline* outline, + const SW_FT_Outline_Funcs* func_interface, + void* user) +{ +#undef SCALED +#define SCALED(x) (((x) << shift) - delta) + + SW_FT_Vector v_last; + SW_FT_Vector v_control; + SW_FT_Vector v_start; + + SW_FT_Vector* point; + SW_FT_Vector* limit; + char* tags; + + int error; + + int n; /* index of contour in outline */ + int first; /* index of first point in contour */ + char tag; /* current point's state */ + + int shift; + TPos delta; + + if (!outline || !func_interface) return SW_FT_THROW(Invalid_Argument); + + shift = func_interface->shift; + delta = func_interface->delta; + first = 0; + + for (n = 0; n < outline->n_contours; n++) { + int last; /* index of last point in contour */ + + last = outline->contours[n]; + if (last < 0) goto Invalid_Outline; + limit = outline->points + last; + + v_start = outline->points[first]; + v_start.x = SCALED(v_start.x); + v_start.y = SCALED(v_start.y); + + v_last = outline->points[last]; + v_last.x = SCALED(v_last.x); + v_last.y = SCALED(v_last.y); + + v_control = v_start; + + point = outline->points + first; + tags = outline->tags + first; + tag = SW_FT_CURVE_TAG(tags[0]); + + /* A contour cannot start with a cubic control point! */ + if (tag == SW_FT_CURVE_TAG_CUBIC) goto Invalid_Outline; + + /* check first point to determine origin */ + if (tag == SW_FT_CURVE_TAG_CONIC) { + /* first point is conic control. Yes, this happens. */ + if (SW_FT_CURVE_TAG(outline->tags[last]) == SW_FT_CURVE_TAG_ON) { + /* start at last point if it is on the curve */ + v_start = v_last; + limit--; + } else { + /* if both first and last points are conic, */ + /* start at their middle and record its position */ + /* for closure */ + v_start.x = (v_start.x + v_last.x) / 2; + v_start.y = (v_start.y + v_last.y) / 2; + } + point--; + tags--; + } + + error = func_interface->move_to(&v_start, user); + if (error) goto Exit; + + while (point < limit) { + point++; + tags++; + + tag = SW_FT_CURVE_TAG(tags[0]); + switch (tag) { + case SW_FT_CURVE_TAG_ON: /* emit a single line_to */ + { + SW_FT_Vector vec; + + vec.x = SCALED(point->x); + vec.y = SCALED(point->y); + + error = func_interface->line_to(&vec, user); + if (error) goto Exit; + continue; + } + + case SW_FT_CURVE_TAG_CONIC: /* consume conic arcs */ + v_control.x = SCALED(point->x); + v_control.y = SCALED(point->y); + + Do_Conic: + if (point < limit) { + SW_FT_Vector vec; + SW_FT_Vector v_middle; + + point++; + tags++; + tag = SW_FT_CURVE_TAG(tags[0]); + + vec.x = SCALED(point->x); + vec.y = SCALED(point->y); + + if (tag == SW_FT_CURVE_TAG_ON) { + error = + func_interface->conic_to(&v_control, &vec, user); + if (error) goto Exit; + continue; + } + + if (tag != SW_FT_CURVE_TAG_CONIC) goto Invalid_Outline; + + v_middle.x = (v_control.x + vec.x) / 2; + v_middle.y = (v_control.y + vec.y) / 2; + + error = + func_interface->conic_to(&v_control, &v_middle, user); + if (error) goto Exit; + + v_control = vec; + goto Do_Conic; + } + + error = func_interface->conic_to(&v_control, &v_start, user); + goto Close; + + default: /* SW_FT_CURVE_TAG_CUBIC */ + { + SW_FT_Vector vec1, vec2; + + if (point + 1 > limit || + SW_FT_CURVE_TAG(tags[1]) != SW_FT_CURVE_TAG_CUBIC) + goto Invalid_Outline; + + point += 2; + tags += 2; + + vec1.x = SCALED(point[-2].x); + vec1.y = SCALED(point[-2].y); + + vec2.x = SCALED(point[-1].x); + vec2.y = SCALED(point[-1].y); + + if (point <= limit) { + SW_FT_Vector vec; + + vec.x = SCALED(point->x); + vec.y = SCALED(point->y); + + error = func_interface->cubic_to(&vec1, &vec2, &vec, user); + if (error) goto Exit; + continue; + } + + error = func_interface->cubic_to(&vec1, &vec2, &v_start, user); + goto Close; + } + } + } + + /* close the contour with a line segment */ + error = func_interface->line_to(&v_start, user); + + Close: + if (error) goto Exit; + + first = last + 1; + } + + return 0; + +Exit: + return error; + +Invalid_Outline: + return SW_FT_THROW(Invalid_Outline); +} + +typedef struct gray_TBand_ { + TPos min, max; + +} gray_TBand; + +SW_FT_DEFINE_OUTLINE_FUNCS(func_interface, + (SW_FT_Outline_MoveTo_Func)gray_move_to, + (SW_FT_Outline_LineTo_Func)gray_line_to, + (SW_FT_Outline_ConicTo_Func)gray_conic_to, + (SW_FT_Outline_CubicTo_Func)gray_cubic_to, 0, 0) + +static int gray_convert_glyph_inner(RAS_ARG) +{ + volatile int error = 0; + + if (ft_setjmp(ras.jump_buffer) == 0) { + error = SW_FT_Outline_Decompose(&ras.outline, &func_interface, &ras); + if (!ras.invalid) gray_record_cell(RAS_VAR); + } else + error = SW_FT_THROW(Memory_Overflow); + + return error; +} + +static int gray_convert_glyph(RAS_ARG) +{ + gray_TBand bands[40]; + gray_TBand* volatile band; + int volatile n, num_bands; + TPos volatile min, max, max_y; + SW_FT_BBox* clip; + + /* Set up state in the raster object */ + gray_compute_cbox(RAS_VAR); + + /* clip to target bitmap, exit if nothing to do */ + clip = &ras.clip_box; + + if (ras.max_ex <= clip->xMin || ras.min_ex >= clip->xMax || + ras.max_ey <= clip->yMin || ras.min_ey >= clip->yMax) + return 0; + + if (ras.min_ex < clip->xMin) ras.min_ex = clip->xMin; + if (ras.min_ey < clip->yMin) ras.min_ey = clip->yMin; + + if (ras.max_ex > clip->xMax) ras.max_ex = clip->xMax; + if (ras.max_ey > clip->yMax) ras.max_ey = clip->yMax; + + ras.count_ex = ras.max_ex - ras.min_ex; + ras.count_ey = ras.max_ey - ras.min_ey; + + /* set up vertical bands */ + num_bands = (int)((ras.max_ey - ras.min_ey) / ras.band_size); + if (num_bands == 0) num_bands = 1; + if (num_bands >= 39) num_bands = 39; + + ras.band_shoot = 0; + + min = ras.min_ey; + max_y = ras.max_ey; + + for (n = 0; n < num_bands; n++, min = max) { + max = min + ras.band_size; + if (n == num_bands - 1 || max > max_y) max = max_y; + + bands[0].min = min; + bands[0].max = max; + band = bands; + + while (band >= bands) { + TPos bottom, top, middle; + int error; + + { + PCell cells_max; + int yindex; + long cell_start, cell_end, cell_mod; + + ras.ycells = (PCell*)ras.buffer; + ras.ycount = band->max - band->min; + + cell_start = sizeof(PCell) * ras.ycount; + cell_mod = cell_start % sizeof(TCell); + if (cell_mod > 0) cell_start += sizeof(TCell) - cell_mod; + + cell_end = ras.buffer_size; + cell_end -= cell_end % sizeof(TCell); + + cells_max = (PCell)((char*)ras.buffer + cell_end); + ras.cells = (PCell)((char*)ras.buffer + cell_start); + if (ras.cells >= cells_max) goto ReduceBands; + + ras.max_cells = cells_max - ras.cells; + if (ras.max_cells < 2) goto ReduceBands; + + for (yindex = 0; yindex < ras.ycount; yindex++) + ras.ycells[yindex] = NULL; + } + + ras.num_cells = 0; + ras.invalid = 1; + ras.min_ey = band->min; + ras.max_ey = band->max; + ras.count_ey = band->max - band->min; + + error = gray_convert_glyph_inner(RAS_VAR); + + if (!error) { + gray_sweep(RAS_VAR); + band--; + continue; + } else if (error != ErrRaster_Memory_Overflow) + return 1; + + ReduceBands: + /* render pool overflow; we will reduce the render band by half */ + bottom = band->min; + top = band->max; + middle = bottom + ((top - bottom) >> 1); + + /* This is too complex for a single scanline; there must */ + /* be some problems. */ + if (middle == bottom) { + return 1; + } + + if (bottom - top >= ras.band_size) ras.band_shoot++; + + band[1].min = bottom; + band[1].max = middle; + band[0].min = middle; + band[0].max = top; + band++; + } + } + + if (ras.band_shoot > 8 && ras.band_size > 16) + ras.band_size = ras.band_size / 2; + + return 0; +} + +static int gray_raster_render(gray_PRaster raster, + const SW_FT_Raster_Params* params) +{ + SW_FT_UNUSED(raster); + const SW_FT_Outline* outline = (const SW_FT_Outline*)params->source; + + gray_TWorker worker[1]; + + TCell buffer[SW_FT_RENDER_POOL_SIZE / sizeof(TCell)]; + long buffer_size = sizeof(buffer); + int band_size = (int)(buffer_size / (long)(sizeof(TCell) * 8)); + + if (!outline) return SW_FT_THROW(Invalid_Outline); + + /* return immediately if the outline is empty */ + if (outline->n_points == 0 || outline->n_contours <= 0) return 0; + + if (!outline->contours || !outline->points) + return SW_FT_THROW(Invalid_Outline); + + if (outline->n_points != outline->contours[outline->n_contours - 1] + 1) + return SW_FT_THROW(Invalid_Outline); + + /* this version does not support monochrome rendering */ + if (!(params->flags & SW_FT_RASTER_FLAG_AA)) + return SW_FT_THROW(Invalid_Mode); + + if (params->flags & SW_FT_RASTER_FLAG_CLIP) + ras.clip_box = params->clip_box; + else { + ras.clip_box.xMin = -32768L; + ras.clip_box.yMin = -32768L; + ras.clip_box.xMax = 32767L; + ras.clip_box.yMax = 32767L; + } + + gray_init_cells(RAS_VAR_ buffer, buffer_size); + + ras.outline = *outline; + ras.num_cells = 0; + ras.invalid = 1; + ras.band_size = band_size; + ras.num_gray_spans = 0; + + ras.render_span = (SW_FT_Raster_Span_Func)params->gray_spans; + ras.render_span_data = params->user; + + gray_convert_glyph(RAS_VAR); + params->bbox_cb(ras.bound_left, ras.bound_top, + ras.bound_right - ras.bound_left, + ras.bound_bottom - ras.bound_top + 1, params->user); + return 1; +} + +/**** RASTER OBJECT CREATION: In stand-alone mode, we simply use *****/ +/**** a static object. *****/ + +static int gray_raster_new(SW_FT_Raster* araster) +{ + static gray_TRaster the_raster; + + *araster = (SW_FT_Raster)&the_raster; + SW_FT_MEM_ZERO(&the_raster, sizeof(the_raster)); + + return 0; +} + +static void gray_raster_done(SW_FT_Raster raster) +{ + /* nothing */ + SW_FT_UNUSED(raster); +} + +static void gray_raster_reset(SW_FT_Raster raster, char* pool_base, + long pool_size) +{ + SW_FT_UNUSED(raster); + SW_FT_UNUSED(pool_base); + SW_FT_UNUSED(pool_size); +} + +SW_FT_DEFINE_RASTER_FUNCS(sw_ft_grays_raster, + + (SW_FT_Raster_New_Func)gray_raster_new, + (SW_FT_Raster_Reset_Func)gray_raster_reset, + (SW_FT_Raster_Render_Func)gray_raster_render, + (SW_FT_Raster_Done_Func)gray_raster_done) + +/* END */ diff --git a/vendor/github.com/Benau/go_rlottie/vector_freetype_v_ft_raster.h b/vendor/github.com/Benau/go_rlottie/vector_freetype_v_ft_raster.h new file mode 100644 index 00000000..da585775 --- /dev/null +++ b/vendor/github.com/Benau/go_rlottie/vector_freetype_v_ft_raster.h @@ -0,0 +1,607 @@ +#ifndef V_FT_IMG_H +#define V_FT_IMG_H +/***************************************************************************/ +/* */ +/* ftimage.h */ +/* */ +/* FreeType glyph image formats and default raster interface */ +/* (specification). */ +/* */ +/* Copyright 1996-2010, 2013 by */ +/* David Turner, Robert Wilhelm, and Werner Lemberg. */ +/* */ +/* This file is part of the FreeType project, and may only be used, */ +/* modified, and distributed under the terms of the FreeType project */ +/* license, LICENSE.TXT. By continuing to use, modify, or distribute */ +/* this file you indicate that you have read the license and */ +/* understand and accept it fully. */ +/* */ +/***************************************************************************/ + + /*************************************************************************/ + /* */ + /* Note: A `raster' is simply a scan-line converter, used to render */ + /* SW_FT_Outlines into SW_FT_Bitmaps. */ + /* */ + /*************************************************************************/ + +#include "vector_freetype_v_ft_types.h" + + /*************************************************************************/ + /* */ + /* */ + /* FT_BBox */ + /* */ + /* */ + /* A structure used to hold an outline's bounding box, i.e., the */ + /* coordinates of its extrema in the horizontal and vertical */ + /* directions. */ + /* */ + /* */ + /* xMin :: The horizontal minimum (left-most). */ + /* */ + /* yMin :: The vertical minimum (bottom-most). */ + /* */ + /* xMax :: The horizontal maximum (right-most). */ + /* */ + /* yMax :: The vertical maximum (top-most). */ + /* */ + /* */ + /* The bounding box is specified with the coordinates of the lower */ + /* left and the upper right corner. In PostScript, those values are */ + /* often called (llx,lly) and (urx,ury), respectively. */ + /* */ + /* If `yMin' is negative, this value gives the glyph's descender. */ + /* Otherwise, the glyph doesn't descend below the baseline. */ + /* Similarly, if `ymax' is positive, this value gives the glyph's */ + /* ascender. */ + /* */ + /* `xMin' gives the horizontal distance from the glyph's origin to */ + /* the left edge of the glyph's bounding box. If `xMin' is negative, */ + /* the glyph extends to the left of the origin. */ + /* */ + typedef struct SW_FT_BBox_ + { + SW_FT_Pos xMin, yMin; + SW_FT_Pos xMax, yMax; + + } SW_FT_BBox; + +/*************************************************************************/ +/* */ +/* */ +/* SW_FT_Outline */ +/* */ +/* */ +/* This structure is used to describe an outline to the scan-line */ +/* converter. */ +/* */ +/* */ +/* n_contours :: The number of contours in the outline. */ +/* */ +/* n_points :: The number of points in the outline. */ +/* */ +/* points :: A pointer to an array of `n_points' @SW_FT_Vector */ +/* elements, giving the outline's point coordinates. */ +/* */ +/* tags :: A pointer to an array of `n_points' chars, giving */ +/* each outline point's type. */ +/* */ +/* If bit~0 is unset, the point is `off' the curve, */ +/* i.e., a Bézier control point, while it is `on' if */ +/* set. */ +/* */ +/* Bit~1 is meaningful for `off' points only. If set, */ +/* it indicates a third-order Bézier arc control point; */ +/* and a second-order control point if unset. */ +/* */ +/* If bit~2 is set, bits 5-7 contain the drop-out mode */ +/* (as defined in the OpenType specification; the value */ +/* is the same as the argument to the SCANMODE */ +/* instruction). */ +/* */ +/* Bits 3 and~4 are reserved for internal purposes. */ +/* */ +/* contours :: An array of `n_contours' shorts, giving the end */ +/* point of each contour within the outline. For */ +/* example, the first contour is defined by the points */ +/* `0' to `contours[0]', the second one is defined by */ +/* the points `contours[0]+1' to `contours[1]', etc. */ +/* */ +/* flags :: A set of bit flags used to characterize the outline */ +/* and give hints to the scan-converter and hinter on */ +/* how to convert/grid-fit it. See @SW_FT_OUTLINE_FLAGS.*/ +/* */ +typedef struct SW_FT_Outline_ +{ + short n_contours; /* number of contours in glyph */ + short n_points; /* number of points in the glyph */ + + SW_FT_Vector* points; /* the outline's points */ + char* tags; /* the points flags */ + short* contours; /* the contour end points */ + char* contours_flag; /* the contour open flags */ + + int flags; /* outline masks */ + +} SW_FT_Outline; + + + /*************************************************************************/ + /* */ + /* */ + /* SW_FT_OUTLINE_FLAGS */ + /* */ + /* */ + /* A list of bit-field constants use for the flags in an outline's */ + /* `flags' field. */ + /* */ + /* */ + /* SW_FT_OUTLINE_NONE :: */ + /* Value~0 is reserved. */ + /* */ + /* SW_FT_OUTLINE_OWNER :: */ + /* If set, this flag indicates that the outline's field arrays */ + /* (i.e., `points', `flags', and `contours') are `owned' by the */ + /* outline object, and should thus be freed when it is destroyed. */ + /* */ + /* SW_FT_OUTLINE_EVEN_ODD_FILL :: */ + /* By default, outlines are filled using the non-zero winding rule. */ + /* If set to 1, the outline will be filled using the even-odd fill */ + /* rule (only works with the smooth rasterizer). */ + /* */ + /* SW_FT_OUTLINE_REVERSE_FILL :: */ + /* By default, outside contours of an outline are oriented in */ + /* clock-wise direction, as defined in the TrueType specification. */ + /* This flag is set if the outline uses the opposite direction */ + /* (typically for Type~1 fonts). This flag is ignored by the scan */ + /* converter. */ + /* */ + /* */ + /* */ + /* There exists a second mechanism to pass the drop-out mode to the */ + /* B/W rasterizer; see the `tags' field in @SW_FT_Outline. */ + /* */ + /* Please refer to the description of the `SCANTYPE' instruction in */ + /* the OpenType specification (in file `ttinst1.doc') how simple */ + /* drop-outs, smart drop-outs, and stubs are defined. */ + /* */ +#define SW_FT_OUTLINE_NONE 0x0 +#define SW_FT_OUTLINE_OWNER 0x1 +#define SW_FT_OUTLINE_EVEN_ODD_FILL 0x2 +#define SW_FT_OUTLINE_REVERSE_FILL 0x4 + + /* */ + +#define SW_FT_CURVE_TAG( flag ) ( flag & 3 ) + +#define SW_FT_CURVE_TAG_ON 1 +#define SW_FT_CURVE_TAG_CONIC 0 +#define SW_FT_CURVE_TAG_CUBIC 2 + + +#define SW_FT_Curve_Tag_On SW_FT_CURVE_TAG_ON +#define SW_FT_Curve_Tag_Conic SW_FT_CURVE_TAG_CONIC +#define SW_FT_Curve_Tag_Cubic SW_FT_CURVE_TAG_CUBIC + + /*************************************************************************/ + /* */ + /* A raster is a scan converter, in charge of rendering an outline into */ + /* a a bitmap. This section contains the public API for rasters. */ + /* */ + /* Note that in FreeType 2, all rasters are now encapsulated within */ + /* specific modules called `renderers'. See `ftrender.h' for more */ + /* details on renderers. */ + /* */ + /*************************************************************************/ + + + /*************************************************************************/ + /* */ + /* */ + /* SW_FT_Raster */ + /* */ + /* */ + /* A handle (pointer) to a raster object. Each object can be used */ + /* independently to convert an outline into a bitmap or pixmap. */ + /* */ + typedef struct SW_FT_RasterRec_* SW_FT_Raster; + + + /*************************************************************************/ + /* */ + /* */ + /* SW_FT_Span */ + /* */ + /* */ + /* A structure used to model a single span of gray (or black) pixels */ + /* when rendering a monochrome or anti-aliased bitmap. */ + /* */ + /* */ + /* x :: The span's horizontal start position. */ + /* */ + /* len :: The span's length in pixels. */ + /* */ + /* coverage :: The span color/coverage, ranging from 0 (background) */ + /* to 255 (foreground). Only used for anti-aliased */ + /* rendering. */ + /* */ + /* */ + /* This structure is used by the span drawing callback type named */ + /* @SW_FT_SpanFunc that takes the y~coordinate of the span as a */ + /* parameter. */ + /* */ + /* The coverage value is always between 0 and 255. If you want less */ + /* gray values, the callback function has to reduce them. */ + /* */ + typedef struct SW_FT_Span_ + { + short x; + short y; + unsigned short len; + unsigned char coverage; + + } SW_FT_Span; + + + /*************************************************************************/ + /* */ + /* */ + /* SW_FT_SpanFunc */ + /* */ + /* */ + /* A function used as a call-back by the anti-aliased renderer in */ + /* order to let client applications draw themselves the gray pixel */ + /* spans on each scan line. */ + /* */ + /* */ + /* y :: The scanline's y~coordinate. */ + /* */ + /* count :: The number of spans to draw on this scanline. */ + /* */ + /* spans :: A table of `count' spans to draw on the scanline. */ + /* */ + /* user :: User-supplied data that is passed to the callback. */ + /* */ + /* */ + /* This callback allows client applications to directly render the */ + /* gray spans of the anti-aliased bitmap to any kind of surfaces. */ + /* */ + /* This can be used to write anti-aliased outlines directly to a */ + /* given background bitmap, and even perform translucency. */ + /* */ + /* Note that the `count' field cannot be greater than a fixed value */ + /* defined by the `SW_FT_MAX_GRAY_SPANS' configuration macro in */ + /* `ftoption.h'. By default, this value is set to~32, which means */ + /* that if there are more than 32~spans on a given scanline, the */ + /* callback is called several times with the same `y' parameter in */ + /* order to draw all callbacks. */ + /* */ + /* Otherwise, the callback is only called once per scan-line, and */ + /* only for those scanlines that do have `gray' pixels on them. */ + /* */ + typedef void + (*SW_FT_SpanFunc)( int count, + const SW_FT_Span* spans, + void* user ); + + typedef void + (*SW_FT_BboxFunc)( int x, int y, int w, int h, + void* user); + +#define SW_FT_Raster_Span_Func SW_FT_SpanFunc + + + + /*************************************************************************/ + /* */ + /* */ + /* SW_FT_RASTER_FLAG_XXX */ + /* */ + /* */ + /* A list of bit flag constants as used in the `flags' field of a */ + /* @SW_FT_Raster_Params structure. */ + /* */ + /* */ + /* SW_FT_RASTER_FLAG_DEFAULT :: This value is 0. */ + /* */ + /* SW_FT_RASTER_FLAG_AA :: This flag is set to indicate that an */ + /* anti-aliased glyph image should be */ + /* generated. Otherwise, it will be */ + /* monochrome (1-bit). */ + /* */ + /* SW_FT_RASTER_FLAG_DIRECT :: This flag is set to indicate direct */ + /* rendering. In this mode, client */ + /* applications must provide their own span */ + /* callback. This lets them directly */ + /* draw or compose over an existing bitmap. */ + /* If this bit is not set, the target */ + /* pixmap's buffer _must_ be zeroed before */ + /* rendering. */ + /* */ + /* Note that for now, direct rendering is */ + /* only possible with anti-aliased glyphs. */ + /* */ + /* SW_FT_RASTER_FLAG_CLIP :: This flag is only used in direct */ + /* rendering mode. If set, the output will */ + /* be clipped to a box specified in the */ + /* `clip_box' field of the */ + /* @SW_FT_Raster_Params structure. */ + /* */ + /* Note that by default, the glyph bitmap */ + /* is clipped to the target pixmap, except */ + /* in direct rendering mode where all spans */ + /* are generated if no clipping box is set. */ + /* */ +#define SW_FT_RASTER_FLAG_DEFAULT 0x0 +#define SW_FT_RASTER_FLAG_AA 0x1 +#define SW_FT_RASTER_FLAG_DIRECT 0x2 +#define SW_FT_RASTER_FLAG_CLIP 0x4 + + + /*************************************************************************/ + /* */ + /* */ + /* SW_FT_Raster_Params */ + /* */ + /* */ + /* A structure to hold the arguments used by a raster's render */ + /* function. */ + /* */ + /* */ + /* target :: The target bitmap. */ + /* */ + /* source :: A pointer to the source glyph image (e.g., an */ + /* @SW_FT_Outline). */ + /* */ + /* flags :: The rendering flags. */ + /* */ + /* gray_spans :: The gray span drawing callback. */ + /* */ + /* black_spans :: The black span drawing callback. UNIMPLEMENTED! */ + /* */ + /* bit_test :: The bit test callback. UNIMPLEMENTED! */ + /* */ + /* bit_set :: The bit set callback. UNIMPLEMENTED! */ + /* */ + /* user :: User-supplied data that is passed to each drawing */ + /* callback. */ + /* */ + /* clip_box :: An optional clipping box. It is only used in */ + /* direct rendering mode. Note that coordinates here */ + /* should be expressed in _integer_ pixels (and not in */ + /* 26.6 fixed-point units). */ + /* */ + /* */ + /* An anti-aliased glyph bitmap is drawn if the @SW_FT_RASTER_FLAG_AA */ + /* bit flag is set in the `flags' field, otherwise a monochrome */ + /* bitmap is generated. */ + /* */ + /* If the @SW_FT_RASTER_FLAG_DIRECT bit flag is set in `flags', the */ + /* raster will call the `gray_spans' callback to draw gray pixel */ + /* spans, in the case of an aa glyph bitmap, it will call */ + /* `black_spans', and `bit_test' and `bit_set' in the case of a */ + /* monochrome bitmap. This allows direct composition over a */ + /* pre-existing bitmap through user-provided callbacks to perform the */ + /* span drawing/composition. */ + /* */ + /* Note that the `bit_test' and `bit_set' callbacks are required when */ + /* rendering a monochrome bitmap, as they are crucial to implement */ + /* correct drop-out control as defined in the TrueType specification. */ + /* */ + typedef struct SW_FT_Raster_Params_ + { + const void* source; + int flags; + SW_FT_SpanFunc gray_spans; + SW_FT_BboxFunc bbox_cb; + void* user; + SW_FT_BBox clip_box; + + } SW_FT_Raster_Params; + + +/*************************************************************************/ +/* */ +/* */ +/* SW_FT_Outline_Check */ +/* */ +/* */ +/* Check the contents of an outline descriptor. */ +/* */ +/* */ +/* outline :: A handle to a source outline. */ +/* */ +/* */ +/* FreeType error code. 0~means success. */ +/* */ +SW_FT_Error +SW_FT_Outline_Check( SW_FT_Outline* outline ); + + +/*************************************************************************/ +/* */ +/* */ +/* SW_FT_Outline_Get_CBox */ +/* */ +/* */ +/* Return an outline's `control box'. The control box encloses all */ +/* the outline's points, including Bézier control points. Though it */ +/* coincides with the exact bounding box for most glyphs, it can be */ +/* slightly larger in some situations (like when rotating an outline */ +/* that contains Bézier outside arcs). */ +/* */ +/* Computing the control box is very fast, while getting the bounding */ +/* box can take much more time as it needs to walk over all segments */ +/* and arcs in the outline. To get the latter, you can use the */ +/* `ftbbox' component, which is dedicated to this single task. */ +/* */ +/* */ +/* outline :: A pointer to the source outline descriptor. */ +/* */ +/* */ +/* acbox :: The outline's control box. */ +/* */ +/* */ +/* See @SW_FT_Glyph_Get_CBox for a discussion of tricky fonts. */ +/* */ +void +SW_FT_Outline_Get_CBox( const SW_FT_Outline* outline, + SW_FT_BBox *acbox ); + + + /*************************************************************************/ + /* */ + /* */ + /* SW_FT_Raster_NewFunc */ + /* */ + /* */ + /* A function used to create a new raster object. */ + /* */ + /* */ + /* memory :: A handle to the memory allocator. */ + /* */ + /* */ + /* raster :: A handle to the new raster object. */ + /* */ + /* */ + /* Error code. 0~means success. */ + /* */ + /* */ + /* The `memory' parameter is a typeless pointer in order to avoid */ + /* un-wanted dependencies on the rest of the FreeType code. In */ + /* practice, it is an @SW_FT_Memory object, i.e., a handle to the */ + /* standard FreeType memory allocator. However, this field can be */ + /* completely ignored by a given raster implementation. */ + /* */ + typedef int + (*SW_FT_Raster_NewFunc)( SW_FT_Raster* raster ); + +#define SW_FT_Raster_New_Func SW_FT_Raster_NewFunc + + + /*************************************************************************/ + /* */ + /* */ + /* SW_FT_Raster_DoneFunc */ + /* */ + /* */ + /* A function used to destroy a given raster object. */ + /* */ + /* */ + /* raster :: A handle to the raster object. */ + /* */ + typedef void + (*SW_FT_Raster_DoneFunc)( SW_FT_Raster raster ); + +#define SW_FT_Raster_Done_Func SW_FT_Raster_DoneFunc + + + /*************************************************************************/ + /* */ + /* */ + /* SW_FT_Raster_ResetFunc */ + /* */ + /* */ + /* FreeType provides an area of memory called the `render pool', */ + /* available to all registered rasters. This pool can be freely used */ + /* during a given scan-conversion but is shared by all rasters. Its */ + /* content is thus transient. */ + /* */ + /* This function is called each time the render pool changes, or just */ + /* after a new raster object is created. */ + /* */ + /* */ + /* raster :: A handle to the new raster object. */ + /* */ + /* pool_base :: The address in memory of the render pool. */ + /* */ + /* pool_size :: The size in bytes of the render pool. */ + /* */ + /* */ + /* Rasters can ignore the render pool and rely on dynamic memory */ + /* allocation if they want to (a handle to the memory allocator is */ + /* passed to the raster constructor). However, this is not */ + /* recommended for efficiency purposes. */ + /* */ + typedef void + (*SW_FT_Raster_ResetFunc)( SW_FT_Raster raster, + unsigned char* pool_base, + unsigned long pool_size ); + +#define SW_FT_Raster_Reset_Func SW_FT_Raster_ResetFunc + + + /*************************************************************************/ + /* */ + /* */ + /* SW_FT_Raster_RenderFunc */ + /* */ + /* */ + /* Invoke a given raster to scan-convert a given glyph image into a */ + /* target bitmap. */ + /* */ + /* */ + /* raster :: A handle to the raster object. */ + /* */ + /* params :: A pointer to an @SW_FT_Raster_Params structure used to */ + /* store the rendering parameters. */ + /* */ + /* */ + /* Error code. 0~means success. */ + /* */ + /* */ + /* The exact format of the source image depends on the raster's glyph */ + /* format defined in its @SW_FT_Raster_Funcs structure. It can be an */ + /* @SW_FT_Outline or anything else in order to support a large array of */ + /* glyph formats. */ + /* */ + /* Note also that the render function can fail and return a */ + /* `SW_FT_Err_Unimplemented_Feature' error code if the raster used does */ + /* not support direct composition. */ + /* */ + /* XXX: For now, the standard raster doesn't support direct */ + /* composition but this should change for the final release (see */ + /* the files `demos/src/ftgrays.c' and `demos/src/ftgrays2.c' */ + /* for examples of distinct implementations that support direct */ + /* composition). */ + /* */ + typedef int + (*SW_FT_Raster_RenderFunc)( SW_FT_Raster raster, + const SW_FT_Raster_Params* params ); + +#define SW_FT_Raster_Render_Func SW_FT_Raster_RenderFunc + + + /*************************************************************************/ + /* */ + /* */ + /* SW_FT_Raster_Funcs */ + /* */ + /* */ + /* A structure used to describe a given raster class to the library. */ + /* */ + /* */ + /* glyph_format :: The supported glyph format for this raster. */ + /* */ + /* raster_new :: The raster constructor. */ + /* */ + /* raster_reset :: Used to reset the render pool within the raster. */ + /* */ + /* raster_render :: A function to render a glyph into a given bitmap. */ + /* */ + /* raster_done :: The raster destructor. */ + /* */ + typedef struct SW_FT_Raster_Funcs_ + { + SW_FT_Raster_NewFunc raster_new; + SW_FT_Raster_ResetFunc raster_reset; + SW_FT_Raster_RenderFunc raster_render; + SW_FT_Raster_DoneFunc raster_done; + + } SW_FT_Raster_Funcs; + + +extern const SW_FT_Raster_Funcs sw_ft_grays_raster; + +#endif // V_FT_IMG_H diff --git a/vendor/github.com/Benau/go_rlottie/vector_freetype_v_ft_stroker.cpp b/vendor/github.com/Benau/go_rlottie/vector_freetype_v_ft_stroker.cpp new file mode 100644 index 00000000..aed62c92 --- /dev/null +++ b/vendor/github.com/Benau/go_rlottie/vector_freetype_v_ft_stroker.cpp @@ -0,0 +1,1936 @@ + +/***************************************************************************/ +/* */ +/* ftstroke.c */ +/* */ +/* FreeType path stroker (body). */ +/* */ +/* Copyright 2002-2006, 2008-2011, 2013 by */ +/* David Turner, Robert Wilhelm, and Werner Lemberg. */ +/* */ +/* This file is part of the FreeType project, and may only be used, */ +/* modified, and distributed under the terms of the FreeType project */ +/* license, LICENSE.TXT. By continuing to use, modify, or distribute */ +/* this file you indicate that you have read the license and */ +/* understand and accept it fully. */ +/* */ +/***************************************************************************/ + +#include "vector_freetype_v_ft_stroker.h" +#include +#include +#include +#include "vector_freetype_v_ft_math.h" + +/*************************************************************************/ +/*************************************************************************/ +/***** *****/ +/***** BEZIER COMPUTATIONS *****/ +/***** *****/ +/*************************************************************************/ +/*************************************************************************/ + +#define SW_FT_SMALL_CONIC_THRESHOLD (SW_FT_ANGLE_PI / 6) +#define SW_FT_SMALL_CUBIC_THRESHOLD (SW_FT_ANGLE_PI / 8) + +#define SW_FT_EPSILON 2 + +#define SW_FT_IS_SMALL(x) ((x) > -SW_FT_EPSILON && (x) < SW_FT_EPSILON) + +static SW_FT_Pos ft_pos_abs(SW_FT_Pos x) +{ + return x >= 0 ? x : -x; +} + +static void ft_conic_split(SW_FT_Vector* base) +{ + SW_FT_Pos a, b; + + base[4].x = base[2].x; + a = base[0].x + base[1].x; + b = base[1].x + base[2].x; + base[3].x = b >> 1; + base[2].x = ( a + b ) >> 2; + base[1].x = a >> 1; + + base[4].y = base[2].y; + a = base[0].y + base[1].y; + b = base[1].y + base[2].y; + base[3].y = b >> 1; + base[2].y = ( a + b ) >> 2; + base[1].y = a >> 1; +} + +static SW_FT_Bool ft_conic_is_small_enough(SW_FT_Vector* base, + SW_FT_Angle* angle_in, + SW_FT_Angle* angle_out) +{ + SW_FT_Vector d1, d2; + SW_FT_Angle theta; + SW_FT_Int close1, close2; + + d1.x = base[1].x - base[2].x; + d1.y = base[1].y - base[2].y; + d2.x = base[0].x - base[1].x; + d2.y = base[0].y - base[1].y; + + close1 = SW_FT_IS_SMALL(d1.x) && SW_FT_IS_SMALL(d1.y); + close2 = SW_FT_IS_SMALL(d2.x) && SW_FT_IS_SMALL(d2.y); + + if (close1) { + if (close2) { + /* basically a point; */ + /* do nothing to retain original direction */ + } else { + *angle_in = *angle_out = SW_FT_Atan2(d2.x, d2.y); + } + } else /* !close1 */ + { + if (close2) { + *angle_in = *angle_out = SW_FT_Atan2(d1.x, d1.y); + } else { + *angle_in = SW_FT_Atan2(d1.x, d1.y); + *angle_out = SW_FT_Atan2(d2.x, d2.y); + } + } + + theta = ft_pos_abs(SW_FT_Angle_Diff(*angle_in, *angle_out)); + + return SW_FT_BOOL(theta < SW_FT_SMALL_CONIC_THRESHOLD); +} + +static void ft_cubic_split(SW_FT_Vector* base) +{ + SW_FT_Pos a, b, c; + + base[6].x = base[3].x; + a = base[0].x + base[1].x; + b = base[1].x + base[2].x; + c = base[2].x + base[3].x; + base[5].x = c >> 1; + c += b; + base[4].x = c >> 2; + base[1].x = a >> 1; + a += b; + base[2].x = a >> 2; + base[3].x = ( a + c ) >> 3; + + base[6].y = base[3].y; + a = base[0].y + base[1].y; + b = base[1].y + base[2].y; + c = base[2].y + base[3].y; + base[5].y = c >> 1; + c += b; + base[4].y = c >> 2; + base[1].y = a >> 1; + a += b; + base[2].y = a >> 2; + base[3].y = ( a + c ) >> 3; +} + +/* Return the average of `angle1' and `angle2'. */ +/* This gives correct result even if `angle1' and `angle2' */ +/* have opposite signs. */ +static SW_FT_Angle ft_angle_mean(SW_FT_Angle angle1, SW_FT_Angle angle2) +{ + return angle1 + SW_FT_Angle_Diff(angle1, angle2) / 2; +} + +static SW_FT_Bool ft_cubic_is_small_enough(SW_FT_Vector* base, + SW_FT_Angle* angle_in, + SW_FT_Angle* angle_mid, + SW_FT_Angle* angle_out) +{ + SW_FT_Vector d1, d2, d3; + SW_FT_Angle theta1, theta2; + SW_FT_Int close1, close2, close3; + + d1.x = base[2].x - base[3].x; + d1.y = base[2].y - base[3].y; + d2.x = base[1].x - base[2].x; + d2.y = base[1].y - base[2].y; + d3.x = base[0].x - base[1].x; + d3.y = base[0].y - base[1].y; + + close1 = SW_FT_IS_SMALL(d1.x) && SW_FT_IS_SMALL(d1.y); + close2 = SW_FT_IS_SMALL(d2.x) && SW_FT_IS_SMALL(d2.y); + close3 = SW_FT_IS_SMALL(d3.x) && SW_FT_IS_SMALL(d3.y); + + if (close1) { + if (close2) { + if (close3) { + /* basically a point; */ + /* do nothing to retain original direction */ + } else /* !close3 */ + { + *angle_in = *angle_mid = *angle_out = SW_FT_Atan2(d3.x, d3.y); + } + } else /* !close2 */ + { + if (close3) { + *angle_in = *angle_mid = *angle_out = SW_FT_Atan2(d2.x, d2.y); + } else /* !close3 */ + { + *angle_in = *angle_mid = SW_FT_Atan2(d2.x, d2.y); + *angle_out = SW_FT_Atan2(d3.x, d3.y); + } + } + } else /* !close1 */ + { + if (close2) { + if (close3) { + *angle_in = *angle_mid = *angle_out = SW_FT_Atan2(d1.x, d1.y); + } else /* !close3 */ + { + *angle_in = SW_FT_Atan2(d1.x, d1.y); + *angle_out = SW_FT_Atan2(d3.x, d3.y); + *angle_mid = ft_angle_mean(*angle_in, *angle_out); + } + } else /* !close2 */ + { + if (close3) { + *angle_in = SW_FT_Atan2(d1.x, d1.y); + *angle_mid = *angle_out = SW_FT_Atan2(d2.x, d2.y); + } else /* !close3 */ + { + *angle_in = SW_FT_Atan2(d1.x, d1.y); + *angle_mid = SW_FT_Atan2(d2.x, d2.y); + *angle_out = SW_FT_Atan2(d3.x, d3.y); + } + } + } + + theta1 = ft_pos_abs(SW_FT_Angle_Diff(*angle_in, *angle_mid)); + theta2 = ft_pos_abs(SW_FT_Angle_Diff(*angle_mid, *angle_out)); + + return SW_FT_BOOL(theta1 < SW_FT_SMALL_CUBIC_THRESHOLD && + theta2 < SW_FT_SMALL_CUBIC_THRESHOLD); +} + +/*************************************************************************/ +/*************************************************************************/ +/***** *****/ +/***** STROKE BORDERS *****/ +/***** *****/ +/*************************************************************************/ +/*************************************************************************/ + +typedef enum SW_FT_StrokeTags_ { + SW_FT_STROKE_TAG_ON = 1, /* on-curve point */ + SW_FT_STROKE_TAG_CUBIC = 2, /* cubic off-point */ + SW_FT_STROKE_TAG_BEGIN = 4, /* sub-path start */ + SW_FT_STROKE_TAG_END = 8 /* sub-path end */ + +} SW_FT_StrokeTags; + +#define SW_FT_STROKE_TAG_BEGIN_END \ + (SW_FT_STROKE_TAG_BEGIN | SW_FT_STROKE_TAG_END) + +typedef struct SW_FT_StrokeBorderRec_ { + SW_FT_UInt num_points; + SW_FT_UInt max_points; + SW_FT_Vector* points; + SW_FT_Byte* tags; + SW_FT_Bool movable; /* TRUE for ends of lineto borders */ + SW_FT_Int start; /* index of current sub-path start point */ + SW_FT_Bool valid; + +} SW_FT_StrokeBorderRec, *SW_FT_StrokeBorder; + +SW_FT_Error SW_FT_Outline_Check(SW_FT_Outline* outline) +{ + if (outline) { + SW_FT_Int n_points = outline->n_points; + SW_FT_Int n_contours = outline->n_contours; + SW_FT_Int end0, end; + SW_FT_Int n; + + /* empty glyph? */ + if (n_points == 0 && n_contours == 0) return 0; + + /* check point and contour counts */ + if (n_points <= 0 || n_contours <= 0) goto Bad; + + end0 = end = -1; + for (n = 0; n < n_contours; n++) { + end = outline->contours[n]; + + /* note that we don't accept empty contours */ + if (end <= end0 || end >= n_points) goto Bad; + + end0 = end; + } + + if (end != n_points - 1) goto Bad; + + /* XXX: check the tags array */ + return 0; + } + +Bad: + return -1; // SW_FT_THROW( Invalid_Argument ); +} + +void SW_FT_Outline_Get_CBox(const SW_FT_Outline* outline, SW_FT_BBox* acbox) +{ + SW_FT_Pos xMin, yMin, xMax, yMax; + + if (outline && acbox) { + if (outline->n_points == 0) { + xMin = 0; + yMin = 0; + xMax = 0; + yMax = 0; + } else { + SW_FT_Vector* vec = outline->points; + SW_FT_Vector* limit = vec + outline->n_points; + + xMin = xMax = vec->x; + yMin = yMax = vec->y; + vec++; + + for (; vec < limit; vec++) { + SW_FT_Pos x, y; + + x = vec->x; + if (x < xMin) xMin = x; + if (x > xMax) xMax = x; + + y = vec->y; + if (y < yMin) yMin = y; + if (y > yMax) yMax = y; + } + } + acbox->xMin = xMin; + acbox->xMax = xMax; + acbox->yMin = yMin; + acbox->yMax = yMax; + } +} + +static SW_FT_Error ft_stroke_border_grow(SW_FT_StrokeBorder border, + SW_FT_UInt new_points) +{ + SW_FT_UInt old_max = border->max_points; + SW_FT_UInt new_max = border->num_points + new_points; + SW_FT_Error error = 0; + + if (new_max > old_max) { + SW_FT_UInt cur_max = old_max; + + while (cur_max < new_max) cur_max += (cur_max >> 1) + 16; + + border->points = (SW_FT_Vector*)realloc(border->points, + cur_max * sizeof(SW_FT_Vector)); + border->tags = + (SW_FT_Byte*)realloc(border->tags, cur_max * sizeof(SW_FT_Byte)); + + if (!border->points || !border->tags) goto Exit; + + border->max_points = cur_max; + } + +Exit: + return error; +} + +static void ft_stroke_border_close(SW_FT_StrokeBorder border, + SW_FT_Bool reverse) +{ + SW_FT_UInt start = border->start; + SW_FT_UInt count = border->num_points; + + assert(border->start >= 0); + + /* don't record empty paths! */ + if (count <= start + 1U) + border->num_points = start; + else { + /* copy the last point to the start of this sub-path, since */ + /* it contains the `adjusted' starting coordinates */ + border->num_points = --count; + border->points[start] = border->points[count]; + + if (reverse) { + /* reverse the points */ + { + SW_FT_Vector* vec1 = border->points + start + 1; + SW_FT_Vector* vec2 = border->points + count - 1; + + for (; vec1 < vec2; vec1++, vec2--) { + SW_FT_Vector tmp; + + tmp = *vec1; + *vec1 = *vec2; + *vec2 = tmp; + } + } + + /* then the tags */ + { + SW_FT_Byte* tag1 = border->tags + start + 1; + SW_FT_Byte* tag2 = border->tags + count - 1; + + for (; tag1 < tag2; tag1++, tag2--) { + SW_FT_Byte tmp; + + tmp = *tag1; + *tag1 = *tag2; + *tag2 = tmp; + } + } + } + + border->tags[start] |= SW_FT_STROKE_TAG_BEGIN; + border->tags[count - 1] |= SW_FT_STROKE_TAG_END; + } + + border->start = -1; + border->movable = FALSE; +} + +static SW_FT_Error ft_stroke_border_lineto(SW_FT_StrokeBorder border, + SW_FT_Vector* to, SW_FT_Bool movable) +{ + SW_FT_Error error = 0; + + assert(border->start >= 0); + + if (border->movable) { + /* move last point */ + border->points[border->num_points - 1] = *to; + } else { + /* don't add zero-length lineto */ + if (border->num_points > 0 && + SW_FT_IS_SMALL(border->points[border->num_points - 1].x - to->x) && + SW_FT_IS_SMALL(border->points[border->num_points - 1].y - to->y)) + return error; + + /* add one point */ + error = ft_stroke_border_grow(border, 1); + if (!error) { + SW_FT_Vector* vec = border->points + border->num_points; + SW_FT_Byte* tag = border->tags + border->num_points; + + vec[0] = *to; + tag[0] = SW_FT_STROKE_TAG_ON; + + border->num_points += 1; + } + } + border->movable = movable; + return error; +} + +static SW_FT_Error ft_stroke_border_conicto(SW_FT_StrokeBorder border, + SW_FT_Vector* control, + SW_FT_Vector* to) +{ + SW_FT_Error error; + + assert(border->start >= 0); + + error = ft_stroke_border_grow(border, 2); + if (!error) { + SW_FT_Vector* vec = border->points + border->num_points; + SW_FT_Byte* tag = border->tags + border->num_points; + + vec[0] = *control; + vec[1] = *to; + + tag[0] = 0; + tag[1] = SW_FT_STROKE_TAG_ON; + + border->num_points += 2; + } + + border->movable = FALSE; + + return error; +} + +static SW_FT_Error ft_stroke_border_cubicto(SW_FT_StrokeBorder border, + SW_FT_Vector* control1, + SW_FT_Vector* control2, + SW_FT_Vector* to) +{ + SW_FT_Error error; + + assert(border->start >= 0); + + error = ft_stroke_border_grow(border, 3); + if (!error) { + SW_FT_Vector* vec = border->points + border->num_points; + SW_FT_Byte* tag = border->tags + border->num_points; + + vec[0] = *control1; + vec[1] = *control2; + vec[2] = *to; + + tag[0] = SW_FT_STROKE_TAG_CUBIC; + tag[1] = SW_FT_STROKE_TAG_CUBIC; + tag[2] = SW_FT_STROKE_TAG_ON; + + border->num_points += 3; + } + + border->movable = FALSE; + + return error; +} + +#define SW_FT_ARC_CUBIC_ANGLE (SW_FT_ANGLE_PI / 2) + + +static SW_FT_Error +ft_stroke_border_arcto( SW_FT_StrokeBorder border, + SW_FT_Vector* center, + SW_FT_Fixed radius, + SW_FT_Angle angle_start, + SW_FT_Angle angle_diff ) +{ + SW_FT_Fixed coef; + SW_FT_Vector a0, a1, a2, a3; + SW_FT_Int i, arcs = 1; + SW_FT_Error error = 0; + + + /* number of cubic arcs to draw */ + while ( angle_diff > SW_FT_ARC_CUBIC_ANGLE * arcs || + -angle_diff > SW_FT_ARC_CUBIC_ANGLE * arcs ) + arcs++; + + /* control tangents */ + coef = SW_FT_Tan( angle_diff / ( 4 * arcs ) ); + coef += coef / 3; + + /* compute start and first control point */ + SW_FT_Vector_From_Polar( &a0, radius, angle_start ); + a1.x = SW_FT_MulFix( -a0.y, coef ); + a1.y = SW_FT_MulFix( a0.x, coef ); + + a0.x += center->x; + a0.y += center->y; + a1.x += a0.x; + a1.y += a0.y; + + for ( i = 1; i <= arcs; i++ ) + { + /* compute end and second control point */ + SW_FT_Vector_From_Polar( &a3, radius, + angle_start + i * angle_diff / arcs ); + a2.x = SW_FT_MulFix( a3.y, coef ); + a2.y = SW_FT_MulFix( -a3.x, coef ); + + a3.x += center->x; + a3.y += center->y; + a2.x += a3.x; + a2.y += a3.y; + + /* add cubic arc */ + error = ft_stroke_border_cubicto( border, &a1, &a2, &a3 ); + if ( error ) + break; + + /* a0 = a3; */ + a1.x = a3.x - a2.x + a3.x; + a1.y = a3.y - a2.y + a3.y; + } + + return error; +} + +static SW_FT_Error ft_stroke_border_moveto(SW_FT_StrokeBorder border, + SW_FT_Vector* to) +{ + /* close current open path if any ? */ + if (border->start >= 0) ft_stroke_border_close(border, FALSE); + + border->start = border->num_points; + border->movable = FALSE; + + return ft_stroke_border_lineto(border, to, FALSE); +} + +static void ft_stroke_border_init(SW_FT_StrokeBorder border) +{ + border->points = NULL; + border->tags = NULL; + + border->num_points = 0; + border->max_points = 0; + border->start = -1; + border->valid = FALSE; +} + +static void ft_stroke_border_reset(SW_FT_StrokeBorder border) +{ + border->num_points = 0; + border->start = -1; + border->valid = FALSE; +} + +static void ft_stroke_border_done(SW_FT_StrokeBorder border) +{ + free(border->points); + free(border->tags); + + border->num_points = 0; + border->max_points = 0; + border->start = -1; + border->valid = FALSE; +} + +static SW_FT_Error ft_stroke_border_get_counts(SW_FT_StrokeBorder border, + SW_FT_UInt* anum_points, + SW_FT_UInt* anum_contours) +{ + SW_FT_Error error = 0; + SW_FT_UInt num_points = 0; + SW_FT_UInt num_contours = 0; + + SW_FT_UInt count = border->num_points; + SW_FT_Vector* point = border->points; + SW_FT_Byte* tags = border->tags; + SW_FT_Int in_contour = 0; + + for (; count > 0; count--, num_points++, point++, tags++) { + if (tags[0] & SW_FT_STROKE_TAG_BEGIN) { + if (in_contour != 0) goto Fail; + + in_contour = 1; + } else if (in_contour == 0) + goto Fail; + + if (tags[0] & SW_FT_STROKE_TAG_END) { + in_contour = 0; + num_contours++; + } + } + + if (in_contour != 0) goto Fail; + + border->valid = TRUE; + +Exit: + *anum_points = num_points; + *anum_contours = num_contours; + return error; + +Fail: + num_points = 0; + num_contours = 0; + goto Exit; +} + +static void ft_stroke_border_export(SW_FT_StrokeBorder border, + SW_FT_Outline* outline) +{ + /* copy point locations */ + memcpy(outline->points + outline->n_points, border->points, + border->num_points * sizeof(SW_FT_Vector)); + + /* copy tags */ + { + SW_FT_UInt count = border->num_points; + SW_FT_Byte* read = border->tags; + SW_FT_Byte* write = (SW_FT_Byte*)outline->tags + outline->n_points; + + for (; count > 0; count--, read++, write++) { + if (*read & SW_FT_STROKE_TAG_ON) + *write = SW_FT_CURVE_TAG_ON; + else if (*read & SW_FT_STROKE_TAG_CUBIC) + *write = SW_FT_CURVE_TAG_CUBIC; + else + *write = SW_FT_CURVE_TAG_CONIC; + } + } + + /* copy contours */ + { + SW_FT_UInt count = border->num_points; + SW_FT_Byte* tags = border->tags; + SW_FT_Short* write = outline->contours + outline->n_contours; + SW_FT_Short idx = (SW_FT_Short)outline->n_points; + + for (; count > 0; count--, tags++, idx++) { + if (*tags & SW_FT_STROKE_TAG_END) { + *write++ = idx; + outline->n_contours++; + } + } + } + + outline->n_points = (short)(outline->n_points + border->num_points); + + assert(SW_FT_Outline_Check(outline) == 0); +} + +/*************************************************************************/ +/*************************************************************************/ +/***** *****/ +/***** STROKER *****/ +/***** *****/ +/*************************************************************************/ +/*************************************************************************/ + +#define SW_FT_SIDE_TO_ROTATE(s) (SW_FT_ANGLE_PI2 - (s)*SW_FT_ANGLE_PI) + +typedef struct SW_FT_StrokerRec_ { + SW_FT_Angle angle_in; /* direction into curr join */ + SW_FT_Angle angle_out; /* direction out of join */ + SW_FT_Vector center; /* current position */ + SW_FT_Fixed line_length; /* length of last lineto */ + SW_FT_Bool first_point; /* is this the start? */ + SW_FT_Bool subpath_open; /* is the subpath open? */ + SW_FT_Angle subpath_angle; /* subpath start direction */ + SW_FT_Vector subpath_start; /* subpath start position */ + SW_FT_Fixed subpath_line_length; /* subpath start lineto len */ + SW_FT_Bool handle_wide_strokes; /* use wide strokes logic? */ + + SW_FT_Stroker_LineCap line_cap; + SW_FT_Stroker_LineJoin line_join; + SW_FT_Stroker_LineJoin line_join_saved; + SW_FT_Fixed miter_limit; + SW_FT_Fixed radius; + + SW_FT_StrokeBorderRec borders[2]; +} SW_FT_StrokerRec; + +/* documentation is in ftstroke.h */ + +SW_FT_Error SW_FT_Stroker_New(SW_FT_Stroker* astroker) +{ + SW_FT_Error error = 0; /* assigned in SW_FT_NEW */ + SW_FT_Stroker stroker = NULL; + + stroker = (SW_FT_StrokerRec*)calloc(1, sizeof(SW_FT_StrokerRec)); + if (stroker) { + ft_stroke_border_init(&stroker->borders[0]); + ft_stroke_border_init(&stroker->borders[1]); + } + + *astroker = stroker; + + return error; +} + +void SW_FT_Stroker_Rewind(SW_FT_Stroker stroker) +{ + if (stroker) { + ft_stroke_border_reset(&stroker->borders[0]); + ft_stroke_border_reset(&stroker->borders[1]); + } +} + +/* documentation is in ftstroke.h */ + +void SW_FT_Stroker_Set(SW_FT_Stroker stroker, SW_FT_Fixed radius, + SW_FT_Stroker_LineCap line_cap, + SW_FT_Stroker_LineJoin line_join, + SW_FT_Fixed miter_limit) +{ + stroker->radius = radius; + stroker->line_cap = line_cap; + stroker->line_join = line_join; + stroker->miter_limit = miter_limit; + + /* ensure miter limit has sensible value */ + if (stroker->miter_limit < 0x10000) stroker->miter_limit = 0x10000; + + /* save line join style: */ + /* line join style can be temporarily changed when stroking curves */ + stroker->line_join_saved = line_join; + + SW_FT_Stroker_Rewind(stroker); +} + +/* documentation is in ftstroke.h */ + +void SW_FT_Stroker_Done(SW_FT_Stroker stroker) +{ + if (stroker) { + ft_stroke_border_done(&stroker->borders[0]); + ft_stroke_border_done(&stroker->borders[1]); + + free(stroker); + } +} + +/* create a circular arc at a corner or cap */ +static SW_FT_Error ft_stroker_arcto(SW_FT_Stroker stroker, SW_FT_Int side) +{ + SW_FT_Angle total, rotate; + SW_FT_Fixed radius = stroker->radius; + SW_FT_Error error = 0; + SW_FT_StrokeBorder border = stroker->borders + side; + + rotate = SW_FT_SIDE_TO_ROTATE(side); + + total = SW_FT_Angle_Diff(stroker->angle_in, stroker->angle_out); + if (total == SW_FT_ANGLE_PI) total = -rotate * 2; + + error = ft_stroke_border_arcto(border, &stroker->center, radius, + stroker->angle_in + rotate, total); + border->movable = FALSE; + return error; +} + +/* add a cap at the end of an opened path */ +static SW_FT_Error +ft_stroker_cap(SW_FT_Stroker stroker, + SW_FT_Angle angle, + SW_FT_Int side) +{ + SW_FT_Error error = 0; + + if (stroker->line_cap == SW_FT_STROKER_LINECAP_ROUND) + { + /* add a round cap */ + stroker->angle_in = angle; + stroker->angle_out = angle + SW_FT_ANGLE_PI; + + error = ft_stroker_arcto(stroker, side); + } + else + { + /* add a square or butt cap */ + SW_FT_Vector middle, delta; + SW_FT_Fixed radius = stroker->radius; + SW_FT_StrokeBorder border = stroker->borders + side; + + /* compute middle point and first angle point */ + SW_FT_Vector_From_Polar( &middle, radius, angle ); + delta.x = side ? middle.y : -middle.y; + delta.y = side ? -middle.x : middle.x; + + if ( stroker->line_cap == SW_FT_STROKER_LINECAP_SQUARE ) + { + middle.x += stroker->center.x; + middle.y += stroker->center.y; + } + else /* SW_FT_STROKER_LINECAP_BUTT */ + { + middle.x = stroker->center.x; + middle.y = stroker->center.y; + } + + delta.x += middle.x; + delta.y += middle.y; + + error = ft_stroke_border_lineto( border, &delta, FALSE ); + if ( error ) + goto Exit; + + /* compute second angle point */ + delta.x = middle.x - delta.x + middle.x; + delta.y = middle.y - delta.y + middle.y; + + error = ft_stroke_border_lineto( border, &delta, FALSE ); + } + +Exit: + return error; +} + +/* process an inside corner, i.e. compute intersection */ +static SW_FT_Error ft_stroker_inside(SW_FT_Stroker stroker, SW_FT_Int side, + SW_FT_Fixed line_length) +{ + SW_FT_StrokeBorder border = stroker->borders + side; + SW_FT_Angle phi, theta, rotate; + SW_FT_Fixed length; + SW_FT_Vector sigma, delta; + SW_FT_Error error = 0; + SW_FT_Bool intersect; /* use intersection of lines? */ + + rotate = SW_FT_SIDE_TO_ROTATE(side); + + theta = SW_FT_Angle_Diff(stroker->angle_in, stroker->angle_out) / 2; + + /* Only intersect borders if between two lineto's and both */ + /* lines are long enough (line_length is zero for curves). */ + if (!border->movable || line_length == 0 || + theta > 0x59C000 || theta < -0x59C000 ) + intersect = FALSE; + else { + /* compute minimum required length of lines */ + SW_FT_Fixed min_length; + + + SW_FT_Vector_Unit( &sigma, theta ); + min_length = + ft_pos_abs( SW_FT_MulDiv( stroker->radius, sigma.y, sigma.x ) ); + + intersect = SW_FT_BOOL( min_length && + stroker->line_length >= min_length && + line_length >= min_length ); + } + + if (!intersect) { + SW_FT_Vector_From_Polar(&delta, stroker->radius, + stroker->angle_out + rotate); + delta.x += stroker->center.x; + delta.y += stroker->center.y; + + border->movable = FALSE; + } else { + /* compute median angle */ + phi = stroker->angle_in + theta + rotate; + + length = SW_FT_DivFix( stroker->radius, sigma.x ); + + SW_FT_Vector_From_Polar( &delta, length, phi ); + delta.x += stroker->center.x; + delta.y += stroker->center.y; + } + + error = ft_stroke_border_lineto(border, &delta, FALSE); + + return error; +} + + /* process an outside corner, i.e. compute bevel/miter/round */ +static SW_FT_Error +ft_stroker_outside( SW_FT_Stroker stroker, + SW_FT_Int side, + SW_FT_Fixed line_length ) +{ + SW_FT_StrokeBorder border = stroker->borders + side; + SW_FT_Error error; + SW_FT_Angle rotate; + + + if ( stroker->line_join == SW_FT_STROKER_LINEJOIN_ROUND ) + error = ft_stroker_arcto( stroker, side ); + else + { + /* this is a mitered (pointed) or beveled (truncated) corner */ + SW_FT_Fixed radius = stroker->radius; + SW_FT_Vector sigma; + SW_FT_Angle theta = 0, phi = 0; + SW_FT_Bool bevel, fixed_bevel; + + + rotate = SW_FT_SIDE_TO_ROTATE( side ); + + bevel = + SW_FT_BOOL( stroker->line_join == SW_FT_STROKER_LINEJOIN_BEVEL ); + + fixed_bevel = + SW_FT_BOOL( stroker->line_join != SW_FT_STROKER_LINEJOIN_MITER_VARIABLE ); + + /* check miter limit first */ + if ( !bevel ) + { + theta = SW_FT_Angle_Diff( stroker->angle_in, stroker->angle_out ) / 2; + + if ( theta == SW_FT_ANGLE_PI2 ) + theta = -rotate; + + phi = stroker->angle_in + theta + rotate; + + SW_FT_Vector_From_Polar( &sigma, stroker->miter_limit, theta ); + + /* is miter limit exceeded? */ + if ( sigma.x < 0x10000L ) + { + /* don't create variable bevels for very small deviations; */ + /* FT_Sin(x) = 0 for x <= 57 */ + if ( fixed_bevel || ft_pos_abs( theta ) > 57 ) + bevel = TRUE; + } + } + + if ( bevel ) /* this is a bevel (broken angle) */ + { + if ( fixed_bevel ) + { + /* the outer corners are simply joined together */ + SW_FT_Vector delta; + + + /* add bevel */ + SW_FT_Vector_From_Polar( &delta, + radius, + stroker->angle_out + rotate ); + delta.x += stroker->center.x; + delta.y += stroker->center.y; + + border->movable = FALSE; + error = ft_stroke_border_lineto( border, &delta, FALSE ); + } + else /* variable bevel or clipped miter */ + { + /* the miter is truncated */ + SW_FT_Vector middle, delta; + SW_FT_Fixed coef; + + + /* compute middle point and first angle point */ + SW_FT_Vector_From_Polar( &middle, + SW_FT_MulFix( radius, stroker->miter_limit ), + phi ); + + coef = SW_FT_DivFix( 0x10000L - sigma.x, sigma.y ); + delta.x = SW_FT_MulFix( middle.y, coef ); + delta.y = SW_FT_MulFix( -middle.x, coef ); + + middle.x += stroker->center.x; + middle.y += stroker->center.y; + delta.x += middle.x; + delta.y += middle.y; + + error = ft_stroke_border_lineto( border, &delta, FALSE ); + if ( error ) + goto Exit; + + /* compute second angle point */ + delta.x = middle.x - delta.x + middle.x; + delta.y = middle.y - delta.y + middle.y; + + error = ft_stroke_border_lineto( border, &delta, FALSE ); + if ( error ) + goto Exit; + + /* finally, add an end point; only needed if not lineto */ + /* (line_length is zero for curves) */ + if ( line_length == 0 ) + { + SW_FT_Vector_From_Polar( &delta, + radius, + stroker->angle_out + rotate ); + + delta.x += stroker->center.x; + delta.y += stroker->center.y; + + error = ft_stroke_border_lineto( border, &delta, FALSE ); + } + } + } + else /* this is a miter (intersection) */ + { + SW_FT_Fixed length; + SW_FT_Vector delta; + + + length = SW_FT_MulDiv( stroker->radius, stroker->miter_limit, sigma.x ); + + SW_FT_Vector_From_Polar( &delta, length, phi ); + delta.x += stroker->center.x; + delta.y += stroker->center.y; + + error = ft_stroke_border_lineto( border, &delta, FALSE ); + if ( error ) + goto Exit; + + /* now add an end point; only needed if not lineto */ + /* (line_length is zero for curves) */ + if ( line_length == 0 ) + { + SW_FT_Vector_From_Polar( &delta, + stroker->radius, + stroker->angle_out + rotate ); + delta.x += stroker->center.x; + delta.y += stroker->center.y; + + error = ft_stroke_border_lineto( border, &delta, FALSE ); + } + } + } + + Exit: + return error; +} + +static SW_FT_Error ft_stroker_process_corner(SW_FT_Stroker stroker, + SW_FT_Fixed line_length) +{ + SW_FT_Error error = 0; + SW_FT_Angle turn; + SW_FT_Int inside_side; + + turn = SW_FT_Angle_Diff(stroker->angle_in, stroker->angle_out); + + /* no specific corner processing is required if the turn is 0 */ + if (turn == 0) goto Exit; + + /* when we turn to the right, the inside side is 0 */ + inside_side = 0; + + /* otherwise, the inside side is 1 */ + if (turn < 0) inside_side = 1; + + /* process the inside side */ + error = ft_stroker_inside(stroker, inside_side, line_length); + if (error) goto Exit; + + /* process the outside side */ + error = ft_stroker_outside(stroker, 1 - inside_side, line_length); + +Exit: + return error; +} + +/* add two points to the left and right borders corresponding to the */ +/* start of the subpath */ +static SW_FT_Error ft_stroker_subpath_start(SW_FT_Stroker stroker, + SW_FT_Angle start_angle, + SW_FT_Fixed line_length) +{ + SW_FT_Vector delta; + SW_FT_Vector point; + SW_FT_Error error; + SW_FT_StrokeBorder border; + + SW_FT_Vector_From_Polar(&delta, stroker->radius, + start_angle + SW_FT_ANGLE_PI2); + + point.x = stroker->center.x + delta.x; + point.y = stroker->center.y + delta.y; + + border = stroker->borders; + error = ft_stroke_border_moveto(border, &point); + if (error) goto Exit; + + point.x = stroker->center.x - delta.x; + point.y = stroker->center.y - delta.y; + + border++; + error = ft_stroke_border_moveto(border, &point); + + /* save angle, position, and line length for last join */ + /* (line_length is zero for curves) */ + stroker->subpath_angle = start_angle; + stroker->first_point = FALSE; + stroker->subpath_line_length = line_length; + +Exit: + return error; +} + +/* documentation is in ftstroke.h */ + +SW_FT_Error SW_FT_Stroker_LineTo(SW_FT_Stroker stroker, SW_FT_Vector* to) +{ + SW_FT_Error error = 0; + SW_FT_StrokeBorder border; + SW_FT_Vector delta; + SW_FT_Angle angle; + SW_FT_Int side; + SW_FT_Fixed line_length; + + delta.x = to->x - stroker->center.x; + delta.y = to->y - stroker->center.y; + + /* a zero-length lineto is a no-op; avoid creating a spurious corner */ + if (delta.x == 0 && delta.y == 0) goto Exit; + + /* compute length of line */ + line_length = SW_FT_Vector_Length(&delta); + + angle = SW_FT_Atan2(delta.x, delta.y); + SW_FT_Vector_From_Polar(&delta, stroker->radius, angle + SW_FT_ANGLE_PI2); + + /* process corner if necessary */ + if (stroker->first_point) { + /* This is the first segment of a subpath. We need to */ + /* add a point to each border at their respective starting */ + /* point locations. */ + error = ft_stroker_subpath_start(stroker, angle, line_length); + if (error) goto Exit; + } else { + /* process the current corner */ + stroker->angle_out = angle; + error = ft_stroker_process_corner(stroker, line_length); + if (error) goto Exit; + } + + /* now add a line segment to both the `inside' and `outside' paths */ + for (border = stroker->borders, side = 1; side >= 0; side--, border++) { + SW_FT_Vector point; + + point.x = to->x + delta.x; + point.y = to->y + delta.y; + + /* the ends of lineto borders are movable */ + error = ft_stroke_border_lineto(border, &point, TRUE); + if (error) goto Exit; + + delta.x = -delta.x; + delta.y = -delta.y; + } + + stroker->angle_in = angle; + stroker->center = *to; + stroker->line_length = line_length; + +Exit: + return error; +} + +/* documentation is in ftstroke.h */ + +SW_FT_Error SW_FT_Stroker_ConicTo(SW_FT_Stroker stroker, SW_FT_Vector* control, + SW_FT_Vector* to) +{ + SW_FT_Error error = 0; + SW_FT_Vector bez_stack[34]; + SW_FT_Vector* arc; + SW_FT_Vector* limit = bez_stack + 30; + SW_FT_Bool first_arc = TRUE; + + /* if all control points are coincident, this is a no-op; */ + /* avoid creating a spurious corner */ + if (SW_FT_IS_SMALL(stroker->center.x - control->x) && + SW_FT_IS_SMALL(stroker->center.y - control->y) && + SW_FT_IS_SMALL(control->x - to->x) && + SW_FT_IS_SMALL(control->y - to->y)) { + stroker->center = *to; + goto Exit; + } + + arc = bez_stack; + arc[0] = *to; + arc[1] = *control; + arc[2] = stroker->center; + + while (arc >= bez_stack) { + SW_FT_Angle angle_in, angle_out; + + /* initialize with current direction */ + angle_in = angle_out = stroker->angle_in; + + if (arc < limit && + !ft_conic_is_small_enough(arc, &angle_in, &angle_out)) { + if (stroker->first_point) stroker->angle_in = angle_in; + + ft_conic_split(arc); + arc += 2; + continue; + } + + if (first_arc) { + first_arc = FALSE; + + /* process corner if necessary */ + if (stroker->first_point) + error = ft_stroker_subpath_start(stroker, angle_in, 0); + else { + stroker->angle_out = angle_in; + error = ft_stroker_process_corner(stroker, 0); + } + } else if (ft_pos_abs(SW_FT_Angle_Diff(stroker->angle_in, angle_in)) > + SW_FT_SMALL_CONIC_THRESHOLD / 4) { + /* if the deviation from one arc to the next is too great, */ + /* add a round corner */ + stroker->center = arc[2]; + stroker->angle_out = angle_in; + stroker->line_join = SW_FT_STROKER_LINEJOIN_ROUND; + + error = ft_stroker_process_corner(stroker, 0); + + /* reinstate line join style */ + stroker->line_join = stroker->line_join_saved; + } + + if (error) goto Exit; + + /* the arc's angle is small enough; we can add it directly to each */ + /* border */ + { + SW_FT_Vector ctrl, end; + SW_FT_Angle theta, phi, rotate, alpha0 = 0; + SW_FT_Fixed length; + SW_FT_StrokeBorder border; + SW_FT_Int side; + + theta = SW_FT_Angle_Diff(angle_in, angle_out) / 2; + phi = angle_in + theta; + length = SW_FT_DivFix(stroker->radius, SW_FT_Cos(theta)); + + /* compute direction of original arc */ + if (stroker->handle_wide_strokes) + alpha0 = SW_FT_Atan2(arc[0].x - arc[2].x, arc[0].y - arc[2].y); + + for (border = stroker->borders, side = 0; side <= 1; + side++, border++) { + rotate = SW_FT_SIDE_TO_ROTATE(side); + + /* compute control point */ + SW_FT_Vector_From_Polar(&ctrl, length, phi + rotate); + ctrl.x += arc[1].x; + ctrl.y += arc[1].y; + + /* compute end point */ + SW_FT_Vector_From_Polar(&end, stroker->radius, + angle_out + rotate); + end.x += arc[0].x; + end.y += arc[0].y; + + if (stroker->handle_wide_strokes) { + SW_FT_Vector start; + SW_FT_Angle alpha1; + + /* determine whether the border radius is greater than the + */ + /* radius of curvature of the original arc */ + start = border->points[border->num_points - 1]; + + alpha1 = SW_FT_Atan2(end.x - start.x, end.y - start.y); + + /* is the direction of the border arc opposite to */ + /* that of the original arc? */ + if (ft_pos_abs(SW_FT_Angle_Diff(alpha0, alpha1)) > + SW_FT_ANGLE_PI / 2) { + SW_FT_Angle beta, gamma; + SW_FT_Vector bvec, delta; + SW_FT_Fixed blen, sinA, sinB, alen; + + /* use the sine rule to find the intersection point */ + beta = + SW_FT_Atan2(arc[2].x - start.x, arc[2].y - start.y); + gamma = SW_FT_Atan2(arc[0].x - end.x, arc[0].y - end.y); + + bvec.x = end.x - start.x; + bvec.y = end.y - start.y; + + blen = SW_FT_Vector_Length(&bvec); + + sinA = ft_pos_abs(SW_FT_Sin(alpha1 - gamma)); + sinB = ft_pos_abs(SW_FT_Sin(beta - gamma)); + + alen = SW_FT_MulDiv(blen, sinA, sinB); + + SW_FT_Vector_From_Polar(&delta, alen, beta); + delta.x += start.x; + delta.y += start.y; + + /* circumnavigate the negative sector backwards */ + border->movable = FALSE; + error = ft_stroke_border_lineto(border, &delta, FALSE); + if (error) goto Exit; + error = ft_stroke_border_lineto(border, &end, FALSE); + if (error) goto Exit; + error = ft_stroke_border_conicto(border, &ctrl, &start); + if (error) goto Exit; + /* and then move to the endpoint */ + error = ft_stroke_border_lineto(border, &end, FALSE); + if (error) goto Exit; + + continue; + } + + /* else fall through */ + } + + /* simply add an arc */ + error = ft_stroke_border_conicto(border, &ctrl, &end); + if (error) goto Exit; + } + } + + arc -= 2; + + stroker->angle_in = angle_out; + } + + stroker->center = *to; + +Exit: + return error; +} + +/* documentation is in ftstroke.h */ + +SW_FT_Error SW_FT_Stroker_CubicTo(SW_FT_Stroker stroker, SW_FT_Vector* control1, + SW_FT_Vector* control2, SW_FT_Vector* to) +{ + SW_FT_Error error = 0; + SW_FT_Vector bez_stack[37]; + SW_FT_Vector* arc; + SW_FT_Vector* limit = bez_stack + 32; + SW_FT_Bool first_arc = TRUE; + + /* if all control points are coincident, this is a no-op; */ + /* avoid creating a spurious corner */ + if (SW_FT_IS_SMALL(stroker->center.x - control1->x) && + SW_FT_IS_SMALL(stroker->center.y - control1->y) && + SW_FT_IS_SMALL(control1->x - control2->x) && + SW_FT_IS_SMALL(control1->y - control2->y) && + SW_FT_IS_SMALL(control2->x - to->x) && + SW_FT_IS_SMALL(control2->y - to->y)) { + stroker->center = *to; + goto Exit; + } + + arc = bez_stack; + arc[0] = *to; + arc[1] = *control2; + arc[2] = *control1; + arc[3] = stroker->center; + + while (arc >= bez_stack) { + SW_FT_Angle angle_in, angle_mid, angle_out; + + /* initialize with current direction */ + angle_in = angle_out = angle_mid = stroker->angle_in; + + if (arc < limit && + !ft_cubic_is_small_enough(arc, &angle_in, &angle_mid, &angle_out)) { + if (stroker->first_point) stroker->angle_in = angle_in; + + ft_cubic_split(arc); + arc += 3; + continue; + } + + if (first_arc) { + first_arc = FALSE; + + /* process corner if necessary */ + if (stroker->first_point) + error = ft_stroker_subpath_start(stroker, angle_in, 0); + else { + stroker->angle_out = angle_in; + error = ft_stroker_process_corner(stroker, 0); + } + } else if (ft_pos_abs(SW_FT_Angle_Diff(stroker->angle_in, angle_in)) > + SW_FT_SMALL_CUBIC_THRESHOLD / 4) { + /* if the deviation from one arc to the next is too great, */ + /* add a round corner */ + stroker->center = arc[3]; + stroker->angle_out = angle_in; + stroker->line_join = SW_FT_STROKER_LINEJOIN_ROUND; + + error = ft_stroker_process_corner(stroker, 0); + + /* reinstate line join style */ + stroker->line_join = stroker->line_join_saved; + } + + if (error) goto Exit; + + /* the arc's angle is small enough; we can add it directly to each */ + /* border */ + { + SW_FT_Vector ctrl1, ctrl2, end; + SW_FT_Angle theta1, phi1, theta2, phi2, rotate, alpha0 = 0; + SW_FT_Fixed length1, length2; + SW_FT_StrokeBorder border; + SW_FT_Int side; + + theta1 = SW_FT_Angle_Diff(angle_in, angle_mid) / 2; + theta2 = SW_FT_Angle_Diff(angle_mid, angle_out) / 2; + phi1 = ft_angle_mean(angle_in, angle_mid); + phi2 = ft_angle_mean(angle_mid, angle_out); + length1 = SW_FT_DivFix(stroker->radius, SW_FT_Cos(theta1)); + length2 = SW_FT_DivFix(stroker->radius, SW_FT_Cos(theta2)); + + /* compute direction of original arc */ + if (stroker->handle_wide_strokes) + alpha0 = SW_FT_Atan2(arc[0].x - arc[3].x, arc[0].y - arc[3].y); + + for (border = stroker->borders, side = 0; side <= 1; + side++, border++) { + rotate = SW_FT_SIDE_TO_ROTATE(side); + + /* compute control points */ + SW_FT_Vector_From_Polar(&ctrl1, length1, phi1 + rotate); + ctrl1.x += arc[2].x; + ctrl1.y += arc[2].y; + + SW_FT_Vector_From_Polar(&ctrl2, length2, phi2 + rotate); + ctrl2.x += arc[1].x; + ctrl2.y += arc[1].y; + + /* compute end point */ + SW_FT_Vector_From_Polar(&end, stroker->radius, + angle_out + rotate); + end.x += arc[0].x; + end.y += arc[0].y; + + if (stroker->handle_wide_strokes) { + SW_FT_Vector start; + SW_FT_Angle alpha1; + + /* determine whether the border radius is greater than the + */ + /* radius of curvature of the original arc */ + start = border->points[border->num_points - 1]; + + alpha1 = SW_FT_Atan2(end.x - start.x, end.y - start.y); + + /* is the direction of the border arc opposite to */ + /* that of the original arc? */ + if (ft_pos_abs(SW_FT_Angle_Diff(alpha0, alpha1)) > + SW_FT_ANGLE_PI / 2) { + SW_FT_Angle beta, gamma; + SW_FT_Vector bvec, delta; + SW_FT_Fixed blen, sinA, sinB, alen; + + /* use the sine rule to find the intersection point */ + beta = + SW_FT_Atan2(arc[3].x - start.x, arc[3].y - start.y); + gamma = SW_FT_Atan2(arc[0].x - end.x, arc[0].y - end.y); + + bvec.x = end.x - start.x; + bvec.y = end.y - start.y; + + blen = SW_FT_Vector_Length(&bvec); + + sinA = ft_pos_abs(SW_FT_Sin(alpha1 - gamma)); + sinB = ft_pos_abs(SW_FT_Sin(beta - gamma)); + + alen = SW_FT_MulDiv(blen, sinA, sinB); + + SW_FT_Vector_From_Polar(&delta, alen, beta); + delta.x += start.x; + delta.y += start.y; + + /* circumnavigate the negative sector backwards */ + border->movable = FALSE; + error = ft_stroke_border_lineto(border, &delta, FALSE); + if (error) goto Exit; + error = ft_stroke_border_lineto(border, &end, FALSE); + if (error) goto Exit; + error = ft_stroke_border_cubicto(border, &ctrl2, &ctrl1, + &start); + if (error) goto Exit; + /* and then move to the endpoint */ + error = ft_stroke_border_lineto(border, &end, FALSE); + if (error) goto Exit; + + continue; + } + + /* else fall through */ + } + + /* simply add an arc */ + error = ft_stroke_border_cubicto(border, &ctrl1, &ctrl2, &end); + if (error) goto Exit; + } + } + + arc -= 3; + + stroker->angle_in = angle_out; + } + + stroker->center = *to; + +Exit: + return error; +} + +/* documentation is in ftstroke.h */ + +SW_FT_Error SW_FT_Stroker_BeginSubPath(SW_FT_Stroker stroker, SW_FT_Vector* to, + SW_FT_Bool open) +{ + /* We cannot process the first point, because there is not enough */ + /* information regarding its corner/cap. The latter will be processed */ + /* in the `SW_FT_Stroker_EndSubPath' routine. */ + /* */ + stroker->first_point = TRUE; + stroker->center = *to; + stroker->subpath_open = open; + + /* Determine if we need to check whether the border radius is greater */ + /* than the radius of curvature of a curve, to handle this case */ + /* specially. This is only required if bevel joins or butt caps may */ + /* be created, because round & miter joins and round & square caps */ + /* cover the negative sector created with wide strokes. */ + stroker->handle_wide_strokes = + SW_FT_BOOL(stroker->line_join != SW_FT_STROKER_LINEJOIN_ROUND || + (stroker->subpath_open && + stroker->line_cap == SW_FT_STROKER_LINECAP_BUTT)); + + /* record the subpath start point for each border */ + stroker->subpath_start = *to; + + stroker->angle_in = 0; + + return 0; +} + +static SW_FT_Error ft_stroker_add_reverse_left(SW_FT_Stroker stroker, + SW_FT_Bool open) +{ + SW_FT_StrokeBorder right = stroker->borders + 0; + SW_FT_StrokeBorder left = stroker->borders + 1; + SW_FT_Int new_points; + SW_FT_Error error = 0; + + assert(left->start >= 0); + + new_points = left->num_points - left->start; + if (new_points > 0) { + error = ft_stroke_border_grow(right, (SW_FT_UInt)new_points); + if (error) goto Exit; + + { + SW_FT_Vector* dst_point = right->points + right->num_points; + SW_FT_Byte* dst_tag = right->tags + right->num_points; + SW_FT_Vector* src_point = left->points + left->num_points - 1; + SW_FT_Byte* src_tag = left->tags + left->num_points - 1; + + while (src_point >= left->points + left->start) { + *dst_point = *src_point; + *dst_tag = *src_tag; + + if (open) + dst_tag[0] &= ~SW_FT_STROKE_TAG_BEGIN_END; + else { + SW_FT_Byte ttag = + (SW_FT_Byte)(dst_tag[0] & SW_FT_STROKE_TAG_BEGIN_END); + + /* switch begin/end tags if necessary */ + if (ttag == SW_FT_STROKE_TAG_BEGIN || + ttag == SW_FT_STROKE_TAG_END) + dst_tag[0] ^= SW_FT_STROKE_TAG_BEGIN_END; + } + + src_point--; + src_tag--; + dst_point++; + dst_tag++; + } + } + + left->num_points = left->start; + right->num_points += new_points; + + right->movable = FALSE; + left->movable = FALSE; + } + +Exit: + return error; +} + +/* documentation is in ftstroke.h */ + +/* there's a lot of magic in this function! */ +SW_FT_Error SW_FT_Stroker_EndSubPath(SW_FT_Stroker stroker) +{ + SW_FT_Error error = 0; + + if (stroker->subpath_open) { + SW_FT_StrokeBorder right = stroker->borders; + + /* All right, this is an opened path, we need to add a cap between */ + /* right & left, add the reverse of left, then add a final cap */ + /* between left & right. */ + error = ft_stroker_cap(stroker, stroker->angle_in, 0); + if (error) goto Exit; + + /* add reversed points from `left' to `right' */ + error = ft_stroker_add_reverse_left(stroker, TRUE); + if (error) goto Exit; + + /* now add the final cap */ + stroker->center = stroker->subpath_start; + error = + ft_stroker_cap(stroker, stroker->subpath_angle + SW_FT_ANGLE_PI, 0); + if (error) goto Exit; + + /* Now end the right subpath accordingly. The left one is */ + /* rewind and doesn't need further processing. */ + ft_stroke_border_close(right, FALSE); + } else { + SW_FT_Angle turn; + SW_FT_Int inside_side; + + /* close the path if needed */ + if (stroker->center.x != stroker->subpath_start.x || + stroker->center.y != stroker->subpath_start.y) { + error = SW_FT_Stroker_LineTo(stroker, &stroker->subpath_start); + if (error) goto Exit; + } + + /* process the corner */ + stroker->angle_out = stroker->subpath_angle; + turn = SW_FT_Angle_Diff(stroker->angle_in, stroker->angle_out); + + /* no specific corner processing is required if the turn is 0 */ + if (turn != 0) { + /* when we turn to the right, the inside side is 0 */ + inside_side = 0; + + /* otherwise, the inside side is 1 */ + if (turn < 0) inside_side = 1; + + error = ft_stroker_inside(stroker, inside_side, + stroker->subpath_line_length); + if (error) goto Exit; + + /* process the outside side */ + error = ft_stroker_outside(stroker, 1 - inside_side, + stroker->subpath_line_length); + if (error) goto Exit; + } + + /* then end our two subpaths */ + ft_stroke_border_close(stroker->borders + 0, FALSE); + ft_stroke_border_close(stroker->borders + 1, TRUE); + } + +Exit: + return error; +} + +/* documentation is in ftstroke.h */ + +SW_FT_Error SW_FT_Stroker_GetBorderCounts(SW_FT_Stroker stroker, + SW_FT_StrokerBorder border, + SW_FT_UInt* anum_points, + SW_FT_UInt* anum_contours) +{ + SW_FT_UInt num_points = 0, num_contours = 0; + SW_FT_Error error; + + if (!stroker || border > 1) { + error = -1; // SW_FT_THROW( Invalid_Argument ); + goto Exit; + } + + error = ft_stroke_border_get_counts(stroker->borders + border, &num_points, + &num_contours); +Exit: + if (anum_points) *anum_points = num_points; + + if (anum_contours) *anum_contours = num_contours; + + return error; +} + +/* documentation is in ftstroke.h */ + +SW_FT_Error SW_FT_Stroker_GetCounts(SW_FT_Stroker stroker, + SW_FT_UInt* anum_points, + SW_FT_UInt* anum_contours) +{ + SW_FT_UInt count1, count2, num_points = 0; + SW_FT_UInt count3, count4, num_contours = 0; + SW_FT_Error error; + + error = ft_stroke_border_get_counts(stroker->borders + 0, &count1, &count2); + if (error) goto Exit; + + error = ft_stroke_border_get_counts(stroker->borders + 1, &count3, &count4); + if (error) goto Exit; + + num_points = count1 + count3; + num_contours = count2 + count4; + +Exit: + *anum_points = num_points; + *anum_contours = num_contours; + return error; +} + +/* documentation is in ftstroke.h */ + +void SW_FT_Stroker_ExportBorder(SW_FT_Stroker stroker, + SW_FT_StrokerBorder border, + SW_FT_Outline* outline) +{ + if (border == SW_FT_STROKER_BORDER_LEFT || + border == SW_FT_STROKER_BORDER_RIGHT) { + SW_FT_StrokeBorder sborder = &stroker->borders[border]; + + if (sborder->valid) ft_stroke_border_export(sborder, outline); + } +} + +/* documentation is in ftstroke.h */ + +void SW_FT_Stroker_Export(SW_FT_Stroker stroker, SW_FT_Outline* outline) +{ + SW_FT_Stroker_ExportBorder(stroker, SW_FT_STROKER_BORDER_LEFT, outline); + SW_FT_Stroker_ExportBorder(stroker, SW_FT_STROKER_BORDER_RIGHT, outline); +} + +/* documentation is in ftstroke.h */ + +/* + * The following is very similar to SW_FT_Outline_Decompose, except + * that we do support opened paths, and do not scale the outline. + */ +SW_FT_Error SW_FT_Stroker_ParseOutline(SW_FT_Stroker stroker, + const SW_FT_Outline* outline) +{ + SW_FT_Vector v_last; + SW_FT_Vector v_control; + SW_FT_Vector v_start; + + SW_FT_Vector* point; + SW_FT_Vector* limit; + char* tags; + + SW_FT_Error error; + + SW_FT_Int n; /* index of contour in outline */ + SW_FT_UInt first; /* index of first point in contour */ + SW_FT_Int tag; /* current point's state */ + + if (!outline || !stroker) return -1; // SW_FT_THROW( Invalid_Argument ); + + SW_FT_Stroker_Rewind(stroker); + + first = 0; + + for (n = 0; n < outline->n_contours; n++) { + SW_FT_UInt last; /* index of last point in contour */ + + last = outline->contours[n]; + limit = outline->points + last; + + /* skip empty points; we don't stroke these */ + if (last <= first) { + first = last + 1; + continue; + } + + v_start = outline->points[first]; + v_last = outline->points[last]; + + v_control = v_start; + + point = outline->points + first; + tags = outline->tags + first; + tag = SW_FT_CURVE_TAG(tags[0]); + + /* A contour cannot start with a cubic control point! */ + if (tag == SW_FT_CURVE_TAG_CUBIC) goto Invalid_Outline; + + /* check first point to determine origin */ + if (tag == SW_FT_CURVE_TAG_CONIC) { + /* First point is conic control. Yes, this happens. */ + if (SW_FT_CURVE_TAG(outline->tags[last]) == SW_FT_CURVE_TAG_ON) { + /* start at last point if it is on the curve */ + v_start = v_last; + limit--; + } else { + /* if both first and last points are conic, */ + /* start at their middle */ + v_start.x = (v_start.x + v_last.x) / 2; + v_start.y = (v_start.y + v_last.y) / 2; + } + point--; + tags--; + } + + error = SW_FT_Stroker_BeginSubPath(stroker, &v_start, outline->contours_flag[n]); + if (error) goto Exit; + + while (point < limit) { + point++; + tags++; + + tag = SW_FT_CURVE_TAG(tags[0]); + switch (tag) { + case SW_FT_CURVE_TAG_ON: /* emit a single line_to */ + { + SW_FT_Vector vec; + + vec.x = point->x; + vec.y = point->y; + + error = SW_FT_Stroker_LineTo(stroker, &vec); + if (error) goto Exit; + continue; + } + + case SW_FT_CURVE_TAG_CONIC: /* consume conic arcs */ + v_control.x = point->x; + v_control.y = point->y; + + Do_Conic: + if (point < limit) { + SW_FT_Vector vec; + SW_FT_Vector v_middle; + + point++; + tags++; + tag = SW_FT_CURVE_TAG(tags[0]); + + vec = point[0]; + + if (tag == SW_FT_CURVE_TAG_ON) { + error = + SW_FT_Stroker_ConicTo(stroker, &v_control, &vec); + if (error) goto Exit; + continue; + } + + if (tag != SW_FT_CURVE_TAG_CONIC) goto Invalid_Outline; + + v_middle.x = (v_control.x + vec.x) / 2; + v_middle.y = (v_control.y + vec.y) / 2; + + error = + SW_FT_Stroker_ConicTo(stroker, &v_control, &v_middle); + if (error) goto Exit; + + v_control = vec; + goto Do_Conic; + } + + error = SW_FT_Stroker_ConicTo(stroker, &v_control, &v_start); + goto Close; + + default: /* SW_FT_CURVE_TAG_CUBIC */ + { + SW_FT_Vector vec1, vec2; + + if (point + 1 > limit || + SW_FT_CURVE_TAG(tags[1]) != SW_FT_CURVE_TAG_CUBIC) + goto Invalid_Outline; + + point += 2; + tags += 2; + + vec1 = point[-2]; + vec2 = point[-1]; + + if (point <= limit) { + SW_FT_Vector vec; + + vec = point[0]; + + error = SW_FT_Stroker_CubicTo(stroker, &vec1, &vec2, &vec); + if (error) goto Exit; + continue; + } + + error = SW_FT_Stroker_CubicTo(stroker, &vec1, &vec2, &v_start); + goto Close; + } + } + } + + Close: + if (error) goto Exit; + + /* don't try to end the path if no segments have been generated */ + if (!stroker->first_point) { + error = SW_FT_Stroker_EndSubPath(stroker); + if (error) goto Exit; + } + + first = last + 1; + } + + return 0; + +Exit: + return error; + +Invalid_Outline: + return -2; // SW_FT_THROW( Invalid_Outline ); +} + +/* END */ diff --git a/vendor/github.com/Benau/go_rlottie/vector_freetype_v_ft_stroker.h b/vendor/github.com/Benau/go_rlottie/vector_freetype_v_ft_stroker.h new file mode 100644 index 00000000..0fdeb276 --- /dev/null +++ b/vendor/github.com/Benau/go_rlottie/vector_freetype_v_ft_stroker.h @@ -0,0 +1,319 @@ +#ifndef V_FT_STROKER_H +#define V_FT_STROKER_H +/***************************************************************************/ +/* */ +/* ftstroke.h */ +/* */ +/* FreeType path stroker (specification). */ +/* */ +/* Copyright 2002-2006, 2008, 2009, 2011-2012 by */ +/* David Turner, Robert Wilhelm, and Werner Lemberg. */ +/* */ +/* This file is part of the FreeType project, and may only be used, */ +/* modified, and distributed under the terms of the FreeType project */ +/* license, LICENSE.TXT. By continuing to use, modify, or distribute */ +/* this file you indicate that you have read the license and */ +/* understand and accept it fully. */ +/* */ +/***************************************************************************/ + +#include "vector_freetype_v_ft_raster.h" + + /************************************************************** + * + * @type: + * SW_FT_Stroker + * + * @description: + * Opaque handler to a path stroker object. + */ + typedef struct SW_FT_StrokerRec_* SW_FT_Stroker; + + + /************************************************************** + * + * @enum: + * SW_FT_Stroker_LineJoin + * + * @description: + * These values determine how two joining lines are rendered + * in a stroker. + * + * @values: + * SW_FT_STROKER_LINEJOIN_ROUND :: + * Used to render rounded line joins. Circular arcs are used + * to join two lines smoothly. + * + * SW_FT_STROKER_LINEJOIN_BEVEL :: + * Used to render beveled line joins. The outer corner of + * the joined lines is filled by enclosing the triangular + * region of the corner with a straight line between the + * outer corners of each stroke. + * + * SW_FT_STROKER_LINEJOIN_MITER_FIXED :: + * Used to render mitered line joins, with fixed bevels if the + * miter limit is exceeded. The outer edges of the strokes + * for the two segments are extended until they meet at an + * angle. If the segments meet at too sharp an angle (such + * that the miter would extend from the intersection of the + * segments a distance greater than the product of the miter + * limit value and the border radius), then a bevel join (see + * above) is used instead. This prevents long spikes being + * created. SW_FT_STROKER_LINEJOIN_MITER_FIXED generates a miter + * line join as used in PostScript and PDF. + * + * SW_FT_STROKER_LINEJOIN_MITER_VARIABLE :: + * SW_FT_STROKER_LINEJOIN_MITER :: + * Used to render mitered line joins, with variable bevels if + * the miter limit is exceeded. The intersection of the + * strokes is clipped at a line perpendicular to the bisector + * of the angle between the strokes, at the distance from the + * intersection of the segments equal to the product of the + * miter limit value and the border radius. This prevents + * long spikes being created. + * SW_FT_STROKER_LINEJOIN_MITER_VARIABLE generates a mitered line + * join as used in XPS. SW_FT_STROKER_LINEJOIN_MITER is an alias + * for SW_FT_STROKER_LINEJOIN_MITER_VARIABLE, retained for + * backwards compatibility. + */ + typedef enum SW_FT_Stroker_LineJoin_ + { + SW_FT_STROKER_LINEJOIN_ROUND = 0, + SW_FT_STROKER_LINEJOIN_BEVEL = 1, + SW_FT_STROKER_LINEJOIN_MITER_VARIABLE = 2, + SW_FT_STROKER_LINEJOIN_MITER = SW_FT_STROKER_LINEJOIN_MITER_VARIABLE, + SW_FT_STROKER_LINEJOIN_MITER_FIXED = 3 + + } SW_FT_Stroker_LineJoin; + + + /************************************************************** + * + * @enum: + * SW_FT_Stroker_LineCap + * + * @description: + * These values determine how the end of opened sub-paths are + * rendered in a stroke. + * + * @values: + * SW_FT_STROKER_LINECAP_BUTT :: + * The end of lines is rendered as a full stop on the last + * point itself. + * + * SW_FT_STROKER_LINECAP_ROUND :: + * The end of lines is rendered as a half-circle around the + * last point. + * + * SW_FT_STROKER_LINECAP_SQUARE :: + * The end of lines is rendered as a square around the + * last point. + */ + typedef enum SW_FT_Stroker_LineCap_ + { + SW_FT_STROKER_LINECAP_BUTT = 0, + SW_FT_STROKER_LINECAP_ROUND, + SW_FT_STROKER_LINECAP_SQUARE + + } SW_FT_Stroker_LineCap; + + + /************************************************************** + * + * @enum: + * SW_FT_StrokerBorder + * + * @description: + * These values are used to select a given stroke border + * in @SW_FT_Stroker_GetBorderCounts and @SW_FT_Stroker_ExportBorder. + * + * @values: + * SW_FT_STROKER_BORDER_LEFT :: + * Select the left border, relative to the drawing direction. + * + * SW_FT_STROKER_BORDER_RIGHT :: + * Select the right border, relative to the drawing direction. + * + * @note: + * Applications are generally interested in the `inside' and `outside' + * borders. However, there is no direct mapping between these and the + * `left' and `right' ones, since this really depends on the glyph's + * drawing orientation, which varies between font formats. + * + * You can however use @SW_FT_Outline_GetInsideBorder and + * @SW_FT_Outline_GetOutsideBorder to get these. + */ + typedef enum SW_FT_StrokerBorder_ + { + SW_FT_STROKER_BORDER_LEFT = 0, + SW_FT_STROKER_BORDER_RIGHT + + } SW_FT_StrokerBorder; + + + /************************************************************** + * + * @function: + * SW_FT_Stroker_New + * + * @description: + * Create a new stroker object. + * + * @input: + * library :: + * FreeType library handle. + * + * @output: + * astroker :: + * A new stroker object handle. NULL in case of error. + * + * @return: + * FreeType error code. 0~means success. + */ + SW_FT_Error + SW_FT_Stroker_New( SW_FT_Stroker *astroker ); + + + /************************************************************** + * + * @function: + * SW_FT_Stroker_Set + * + * @description: + * Reset a stroker object's attributes. + * + * @input: + * stroker :: + * The target stroker handle. + * + * radius :: + * The border radius. + * + * line_cap :: + * The line cap style. + * + * line_join :: + * The line join style. + * + * miter_limit :: + * The miter limit for the SW_FT_STROKER_LINEJOIN_MITER_FIXED and + * SW_FT_STROKER_LINEJOIN_MITER_VARIABLE line join styles, + * expressed as 16.16 fixed-point value. + * + * @note: + * The radius is expressed in the same units as the outline + * coordinates. + */ + void + SW_FT_Stroker_Set( SW_FT_Stroker stroker, + SW_FT_Fixed radius, + SW_FT_Stroker_LineCap line_cap, + SW_FT_Stroker_LineJoin line_join, + SW_FT_Fixed miter_limit ); + + /************************************************************** + * + * @function: + * SW_FT_Stroker_ParseOutline + * + * @description: + * A convenience function used to parse a whole outline with + * the stroker. The resulting outline(s) can be retrieved + * later by functions like @SW_FT_Stroker_GetCounts and @SW_FT_Stroker_Export. + * + * @input: + * stroker :: + * The target stroker handle. + * + * outline :: + * The source outline. + * + * + * @return: + * FreeType error code. 0~means success. + * + * @note: + * If `opened' is~0 (the default), the outline is treated as a closed + * path, and the stroker generates two distinct `border' outlines. + * + * + * This function calls @SW_FT_Stroker_Rewind automatically. + */ + SW_FT_Error + SW_FT_Stroker_ParseOutline( SW_FT_Stroker stroker, + const SW_FT_Outline* outline); + + + /************************************************************** + * + * @function: + * SW_FT_Stroker_GetCounts + * + * @description: + * Call this function once you have finished parsing your paths + * with the stroker. It returns the number of points and + * contours necessary to export all points/borders from the stroked + * outline/path. + * + * @input: + * stroker :: + * The target stroker handle. + * + * @output: + * anum_points :: + * The number of points. + * + * anum_contours :: + * The number of contours. + * + * @return: + * FreeType error code. 0~means success. + */ + SW_FT_Error + SW_FT_Stroker_GetCounts( SW_FT_Stroker stroker, + SW_FT_UInt *anum_points, + SW_FT_UInt *anum_contours ); + + + /************************************************************** + * + * @function: + * SW_FT_Stroker_Export + * + * @description: + * Call this function after @SW_FT_Stroker_GetBorderCounts to + * export all borders to your own @SW_FT_Outline structure. + * + * Note that this function appends the border points and + * contours to your outline, but does not try to resize its + * arrays. + * + * @input: + * stroker :: + * The target stroker handle. + * + * outline :: + * The target outline handle. + */ + void + SW_FT_Stroker_Export( SW_FT_Stroker stroker, + SW_FT_Outline* outline ); + + + /************************************************************** + * + * @function: + * SW_FT_Stroker_Done + * + * @description: + * Destroy a stroker object. + * + * @input: + * stroker :: + * A stroker handle. Can be NULL. + */ + void + SW_FT_Stroker_Done( SW_FT_Stroker stroker ); + + +#endif // V_FT_STROKER_H diff --git a/vendor/github.com/Benau/go_rlottie/vector_freetype_v_ft_types.h b/vendor/github.com/Benau/go_rlottie/vector_freetype_v_ft_types.h new file mode 100644 index 00000000..a01c4f28 --- /dev/null +++ b/vendor/github.com/Benau/go_rlottie/vector_freetype_v_ft_types.h @@ -0,0 +1,160 @@ +#ifndef V_FT_TYPES_H +#define V_FT_TYPES_H + +/*************************************************************************/ +/* */ +/* */ +/* SW_FT_Fixed */ +/* */ +/* */ +/* This type is used to store 16.16 fixed-point values, like scaling */ +/* values or matrix coefficients. */ +/* */ +typedef signed long SW_FT_Fixed; + + +/*************************************************************************/ +/* */ +/* */ +/* SW_FT_Int */ +/* */ +/* */ +/* A typedef for the int type. */ +/* */ +typedef signed int SW_FT_Int; + + +/*************************************************************************/ +/* */ +/* */ +/* SW_FT_UInt */ +/* */ +/* */ +/* A typedef for the unsigned int type. */ +/* */ +typedef unsigned int SW_FT_UInt; + + +/*************************************************************************/ +/* */ +/* */ +/* SW_FT_Long */ +/* */ +/* */ +/* A typedef for signed long. */ +/* */ +typedef signed long SW_FT_Long; + + +/*************************************************************************/ +/* */ +/* */ +/* SW_FT_ULong */ +/* */ +/* */ +/* A typedef for unsigned long. */ +/* */ +typedef unsigned long SW_FT_ULong; + +/*************************************************************************/ +/* */ +/* */ +/* SW_FT_Short */ +/* */ +/* */ +/* A typedef for signed short. */ +/* */ +typedef signed short SW_FT_Short; + + +/*************************************************************************/ +/* */ +/* */ +/* SW_FT_Byte */ +/* */ +/* */ +/* A simple typedef for the _unsigned_ char type. */ +/* */ +typedef unsigned char SW_FT_Byte; + + +/*************************************************************************/ +/* */ +/* */ +/* SW_FT_Bool */ +/* */ +/* */ +/* A typedef of unsigned char, used for simple booleans. As usual, */ +/* values 1 and~0 represent true and false, respectively. */ +/* */ +typedef unsigned char SW_FT_Bool; + + + +/*************************************************************************/ +/* */ +/* */ +/* SW_FT_Error */ +/* */ +/* */ +/* The FreeType error code type. A value of~0 is always interpreted */ +/* as a successful operation. */ +/* */ +typedef int SW_FT_Error; + + +/*************************************************************************/ +/* */ +/* */ +/* SW_FT_Pos */ +/* */ +/* */ +/* The type SW_FT_Pos is used to store vectorial coordinates. Depending */ +/* on the context, these can represent distances in integer font */ +/* units, or 16.16, or 26.6 fixed-point pixel coordinates. */ +/* */ +typedef signed long SW_FT_Pos; + + +/*************************************************************************/ +/* */ +/* */ +/* SW_FT_Vector */ +/* */ +/* */ +/* A simple structure used to store a 2D vector; coordinates are of */ +/* the SW_FT_Pos type. */ +/* */ +/* */ +/* x :: The horizontal coordinate. */ +/* y :: The vertical coordinate. */ +/* */ +typedef struct SW_FT_Vector_ +{ + SW_FT_Pos x; + SW_FT_Pos y; + +} SW_FT_Vector; + + +typedef long long int SW_FT_Int64; +typedef unsigned long long int SW_FT_UInt64; + +typedef signed int SW_FT_Int32; +typedef unsigned int SW_FT_UInt32; + + +#define SW_FT_BOOL( x ) ( (SW_FT_Bool)( x ) ) + +#define SW_FT_SIZEOF_LONG 4 + +#ifndef TRUE +#define TRUE 1 +#endif + +#ifndef FALSE +#define FALSE 0 +#endif + + +#endif // V_FT_TYPES_H diff --git a/vendor/github.com/Benau/go_rlottie/vector_pixman_pixman-arm-neon-asm.S b/vendor/github.com/Benau/go_rlottie/vector_pixman_pixman-arm-neon-asm.S new file mode 100644 index 00000000..71554ab5 --- /dev/null +++ b/vendor/github.com/Benau/go_rlottie/vector_pixman_pixman-arm-neon-asm.S @@ -0,0 +1,500 @@ +#include "config.h" +#ifdef USE_ARM_NEON +/* + * Copyright © 2009 Nokia Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + * + * Author: Siarhei Siamashka (siarhei.siamashka@nokia.com) + */ + +/* + * This file contains implementations of NEON optimized pixel processing + * functions. There is no full and detailed tutorial, but some functions + * (those which are exposing some new or interesting features) are + * extensively commented and can be used as examples. + * + * You may want to have a look at the comments for following functions: + * - pixman_composite_over_8888_0565_asm_neon + * - pixman_composite_over_n_8_0565_asm_neon + */ + +/* Prevent the stack from becoming executable for no reason... */ +#if defined(__linux__) && defined(__ELF__) +.section .note.GNU-stack,"",%progbits +#endif + + .text + .fpu neon + .arch armv7a + .object_arch armv4 + .eabi_attribute 10, 0 /* suppress Tag_FP_arch */ + .eabi_attribute 12, 0 /* suppress Tag_Advanced_SIMD_arch */ + .arm + .altmacro + .p2align 2 + + +//#include "pixman-arm-asm.h" +/* Supplementary macro for setting function attributes */ +.macro pixman_asm_function fname + .func fname + .global fname +#ifdef __ELF__ + .hidden fname + .type fname, %function +#endif +fname: +.endm + +//#include "pixman-private.h" +/* + * The defines which are shared between C and assembly code + */ + +/* bilinear interpolation precision (must be < 8) */ +#define BILINEAR_INTERPOLATION_BITS 7 +#define BILINEAR_INTERPOLATION_RANGE (1 << BILINEAR_INTERPOLATION_BITS) + +#include "vector_pixman_pixman-arm-neon-asm.h" + +/* Global configuration options and preferences */ + +/* + * The code can optionally make use of unaligned memory accesses to improve + * performance of handling leading/trailing pixels for each scanline. + * Configuration variable RESPECT_STRICT_ALIGNMENT can be set to 0 for + * example in linux if unaligned memory accesses are not configured to + * generate.exceptions. + */ +.set RESPECT_STRICT_ALIGNMENT, 1 + +/* + * Set default prefetch type. There is a choice between the following options: + * + * PREFETCH_TYPE_NONE (may be useful for the ARM cores where PLD is set to work + * as NOP to workaround some HW bugs or for whatever other reason) + * + * PREFETCH_TYPE_SIMPLE (may be useful for simple single-issue ARM cores where + * advanced prefetch intruduces heavy overhead) + * + * PREFETCH_TYPE_ADVANCED (useful for superscalar cores such as ARM Cortex-A8 + * which can run ARM and NEON instructions simultaneously so that extra ARM + * instructions do not add (many) extra cycles, but improve prefetch efficiency) + * + * Note: some types of function can't support advanced prefetch and fallback + * to simple one (those which handle 24bpp pixels) + */ +.set PREFETCH_TYPE_DEFAULT, PREFETCH_TYPE_ADVANCED + +/* Prefetch distance in pixels for simple prefetch */ +.set PREFETCH_DISTANCE_SIMPLE, 64 + +/* + * Implementation of pixman_composite_over_8888_0565_asm_neon + * + * This function takes a8r8g8b8 source buffer, r5g6b5 destination buffer and + * performs OVER compositing operation. Function fast_composite_over_8888_0565 + * from pixman-fast-path.c does the same in C and can be used as a reference. + * + * First we need to have some NEON assembly code which can do the actual + * operation on the pixels and provide it to the template macro. + * + * Template macro quite conveniently takes care of emitting all the necessary + * code for memory reading and writing (including quite tricky cases of + * handling unaligned leading/trailing pixels), so we only need to deal with + * the data in NEON registers. + * + * NEON registers allocation in general is recommented to be the following: + * d0, d1, d2, d3 - contain loaded source pixel data + * d4, d5, d6, d7 - contain loaded destination pixels (if they are needed) + * d24, d25, d26, d27 - contain loading mask pixel data (if mask is used) + * d28, d29, d30, d31 - place for storing the result (destination pixels) + * + * As can be seen above, four 64-bit NEON registers are used for keeping + * intermediate pixel data and up to 8 pixels can be processed in one step + * for 32bpp formats (16 pixels for 16bpp, 32 pixels for 8bpp). + * + * This particular function uses the following registers allocation: + * d0, d1, d2, d3 - contain loaded source pixel data + * d4, d5 - contain loaded destination pixels (they are needed) + * d28, d29 - place for storing the result (destination pixels) + */ + +/* + * Step one. We need to have some code to do some arithmetics on pixel data. + * This is implemented as a pair of macros: '*_head' and '*_tail'. When used + * back-to-back, they take pixel data from {d0, d1, d2, d3} and {d4, d5}, + * perform all the needed calculations and write the result to {d28, d29}. + * The rationale for having two macros and not just one will be explained + * later. In practice, any single monolitic function which does the work can + * be split into two parts in any arbitrary way without affecting correctness. + * + * There is one special trick here too. Common template macro can optionally + * make our life a bit easier by doing R, G, B, A color components + * deinterleaving for 32bpp pixel formats (and this feature is used in + * 'pixman_composite_over_8888_0565_asm_neon' function). So it means that + * instead of having 8 packed pixels in {d0, d1, d2, d3} registers, we + * actually use d0 register for blue channel (a vector of eight 8-bit + * values), d1 register for green, d2 for red and d3 for alpha. This + * simple conversion can be also done with a few NEON instructions: + * + * Packed to planar conversion: + * vuzp.8 d0, d1 + * vuzp.8 d2, d3 + * vuzp.8 d1, d3 + * vuzp.8 d0, d2 + * + * Planar to packed conversion: + * vzip.8 d0, d2 + * vzip.8 d1, d3 + * vzip.8 d2, d3 + * vzip.8 d0, d1 + * + * But pixel can be loaded directly in planar format using VLD4.8 NEON + * instruction. It is 1 cycle slower than VLD1.32, so this is not always + * desirable, that's why deinterleaving is optional. + * + * But anyway, here is the code: + */ + +/* + * OK, now we got almost everything that we need. Using the above two + * macros, the work can be done right. But now we want to optimize + * it a bit. ARM Cortex-A8 is an in-order core, and benefits really + * a lot from good code scheduling and software pipelining. + * + * Let's construct some code, which will run in the core main loop. + * Some pseudo-code of the main loop will look like this: + * head + * while (...) { + * tail + * head + * } + * tail + * + * It may look a bit weird, but this setup allows to hide instruction + * latencies better and also utilize dual-issue capability more + * efficiently (make pairs of load-store and ALU instructions). + * + * So what we need now is a '*_tail_head' macro, which will be used + * in the core main loop. A trivial straightforward implementation + * of this macro would look like this: + * + * pixman_composite_over_8888_0565_process_pixblock_tail + * vst1.16 {d28, d29}, [DST_W, :128]! + * vld1.16 {d4, d5}, [DST_R, :128]! + * vld4.32 {d0, d1, d2, d3}, [SRC]! + * pixman_composite_over_8888_0565_process_pixblock_head + * cache_preload 8, 8 + * + * Now it also got some VLD/VST instructions. We simply can't move from + * processing one block of pixels to the other one with just arithmetics. + * The previously processed data needs to be written to memory and new + * data needs to be fetched. Fortunately, this main loop does not deal + * with partial leading/trailing pixels and can load/store a full block + * of pixels in a bulk. Additionally, destination buffer is already + * 16 bytes aligned here (which is good for performance). + * + * New things here are DST_R, DST_W, SRC and MASK identifiers. These + * are the aliases for ARM registers which are used as pointers for + * accessing data. We maintain separate pointers for reading and writing + * destination buffer (DST_R and DST_W). + * + * Another new thing is 'cache_preload' macro. It is used for prefetching + * data into CPU L2 cache and improve performance when dealing with large + * images which are far larger than cache size. It uses one argument + * (actually two, but they need to be the same here) - number of pixels + * in a block. Looking into 'pixman-arm-neon-asm.h' can provide some + * details about this macro. Moreover, if good performance is needed + * the code from this macro needs to be copied into '*_tail_head' macro + * and mixed with the rest of code for optimal instructions scheduling. + * We are actually doing it below. + * + * Now after all the explanations, here is the optimized code. + * Different instruction streams (originaling from '*_head', '*_tail' + * and 'cache_preload' macro) use different indentation levels for + * better readability. Actually taking the code from one of these + * indentation levels and ignoring a few VLD/VST instructions would + * result in exactly the code from '*_head', '*_tail' or 'cache_preload' + * macro! + */ + +/* + * And now the final part. We are using 'generate_composite_function' macro + * to put all the stuff together. We are specifying the name of the function + * which we want to get, number of bits per pixel for the source, mask and + * destination (0 if unused, like mask in this case). Next come some bit + * flags: + * FLAG_DST_READWRITE - tells that the destination buffer is both read + * and written, for write-only buffer we would use + * FLAG_DST_WRITEONLY flag instead + * FLAG_DEINTERLEAVE_32BPP - tells that we prefer to work with planar data + * and separate color channels for 32bpp format. + * The next things are: + * - the number of pixels processed per iteration (8 in this case, because + * that's the maximum what can fit into four 64-bit NEON registers). + * - prefetch distance, measured in pixel blocks. In this case it is 5 times + * by 8 pixels. That would be 40 pixels, or up to 160 bytes. Optimal + * prefetch distance can be selected by running some benchmarks. + * + * After that we specify some macros, these are 'default_init', + * 'default_cleanup' here which are empty (but it is possible to have custom + * init/cleanup macros to be able to save/restore some extra NEON registers + * like d8-d15 or do anything else) followed by + * 'pixman_composite_over_8888_0565_process_pixblock_head', + * 'pixman_composite_over_8888_0565_process_pixblock_tail' and + * 'pixman_composite_over_8888_0565_process_pixblock_tail_head' + * which we got implemented above. + * + * The last part is the NEON registers allocation scheme. + */ + +/******************************************************************************/ + +/******************************************************************************/ + .macro pixman_composite_out_reverse_8888_8888_process_pixblock_head + vmvn.8 d24, d3 /* get inverted alpha */ + /* do alpha blending */ + vmull.u8 q8, d24, d4 + vmull.u8 q9, d24, d5 + vmull.u8 q10, d24, d6 + vmull.u8 q11, d24, d7 + .endm + + .macro pixman_composite_out_reverse_8888_8888_process_pixblock_tail + vrshr.u16 q14, q8, #8 + vrshr.u16 q15, q9, #8 + vrshr.u16 q12, q10, #8 + vrshr.u16 q13, q11, #8 + vraddhn.u16 d28, q14, q8 + vraddhn.u16 d29, q15, q9 + vraddhn.u16 d30, q12, q10 + vraddhn.u16 d31, q13, q11 + .endm + +/******************************************************************************/ + +.macro pixman_composite_over_8888_8888_process_pixblock_head + pixman_composite_out_reverse_8888_8888_process_pixblock_head +.endm + +.macro pixman_composite_over_8888_8888_process_pixblock_tail + pixman_composite_out_reverse_8888_8888_process_pixblock_tail + vqadd.u8 q14, q0, q14 + vqadd.u8 q15, q1, q15 +.endm + +.macro pixman_composite_over_8888_8888_process_pixblock_tail_head + vld4.8 {d4, d5, d6, d7}, [DST_R, :128]! + vrshr.u16 q14, q8, #8 + PF add PF_X, PF_X, #8 + PF tst PF_CTL, #0xF + vrshr.u16 q15, q9, #8 + vrshr.u16 q12, q10, #8 + vrshr.u16 q13, q11, #8 + PF addne PF_X, PF_X, #8 + PF subne PF_CTL, PF_CTL, #1 + vraddhn.u16 d28, q14, q8 + vraddhn.u16 d29, q15, q9 + PF cmp PF_X, ORIG_W + vraddhn.u16 d30, q12, q10 + vraddhn.u16 d31, q13, q11 + vqadd.u8 q14, q0, q14 + vqadd.u8 q15, q1, q15 + fetch_src_pixblock + PF pld, [PF_SRC, PF_X, lsl #src_bpp_shift] + vmvn.8 d22, d3 + PF pld, [PF_DST, PF_X, lsl #dst_bpp_shift] + vst4.8 {d28, d29, d30, d31}, [DST_W, :128]! + PF subge PF_X, PF_X, ORIG_W + vmull.u8 q8, d22, d4 + PF subges PF_CTL, PF_CTL, #0x10 + vmull.u8 q9, d22, d5 + PF ldrgeb DUMMY, [PF_SRC, SRC_STRIDE, lsl #src_bpp_shift]! + vmull.u8 q10, d22, d6 + PF ldrgeb DUMMY, [PF_DST, DST_STRIDE, lsl #dst_bpp_shift]! + vmull.u8 q11, d22, d7 +.endm + +generate_composite_function \ + pixman_composite_over_8888_8888_asm_neon, 32, 0, 32, \ + FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \ + 8, /* number of pixels, processed in a single block */ \ + 5, /* prefetch distance */ \ + default_init, \ + default_cleanup, \ + pixman_composite_over_8888_8888_process_pixblock_head, \ + pixman_composite_over_8888_8888_process_pixblock_tail, \ + pixman_composite_over_8888_8888_process_pixblock_tail_head + +generate_composite_function_single_scanline \ + pixman_composite_scanline_over_asm_neon, 32, 0, 32, \ + FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \ + 8, /* number of pixels, processed in a single block */ \ + default_init, \ + default_cleanup, \ + pixman_composite_over_8888_8888_process_pixblock_head, \ + pixman_composite_over_8888_8888_process_pixblock_tail, \ + pixman_composite_over_8888_8888_process_pixblock_tail_head + +/******************************************************************************/ + +.macro pixman_composite_over_n_8888_process_pixblock_head + /* deinterleaved source pixels in {d0, d1, d2, d3} */ + /* inverted alpha in {d24} */ + /* destination pixels in {d4, d5, d6, d7} */ + vmull.u8 q8, d24, d4 + vmull.u8 q9, d24, d5 + vmull.u8 q10, d24, d6 + vmull.u8 q11, d24, d7 +.endm + +.macro pixman_composite_over_n_8888_process_pixblock_tail + vrshr.u16 q14, q8, #8 + vrshr.u16 q15, q9, #8 + vrshr.u16 q2, q10, #8 + vrshr.u16 q3, q11, #8 + vraddhn.u16 d28, q14, q8 + vraddhn.u16 d29, q15, q9 + vraddhn.u16 d30, q2, q10 + vraddhn.u16 d31, q3, q11 + vqadd.u8 q14, q0, q14 + vqadd.u8 q15, q1, q15 +.endm + +.macro pixman_composite_over_n_8888_process_pixblock_tail_head + vrshr.u16 q14, q8, #8 + vrshr.u16 q15, q9, #8 + vrshr.u16 q2, q10, #8 + vrshr.u16 q3, q11, #8 + vraddhn.u16 d28, q14, q8 + vraddhn.u16 d29, q15, q9 + vraddhn.u16 d30, q2, q10 + vraddhn.u16 d31, q3, q11 + vld4.8 {d4, d5, d6, d7}, [DST_R, :128]! + vqadd.u8 q14, q0, q14 + PF add PF_X, PF_X, #8 + PF tst PF_CTL, #0x0F + PF addne PF_X, PF_X, #8 + PF subne PF_CTL, PF_CTL, #1 + vqadd.u8 q15, q1, q15 + PF cmp PF_X, ORIG_W + vmull.u8 q8, d24, d4 + PF pld, [PF_DST, PF_X, lsl #dst_bpp_shift] + vmull.u8 q9, d24, d5 + PF subge PF_X, PF_X, ORIG_W + vmull.u8 q10, d24, d6 + PF subges PF_CTL, PF_CTL, #0x10 + vmull.u8 q11, d24, d7 + PF ldrgeb DUMMY, [PF_DST, DST_STRIDE, lsl #dst_bpp_shift]! + vst4.8 {d28, d29, d30, d31}, [DST_W, :128]! +.endm + +.macro pixman_composite_over_n_8888_init + add DUMMY, sp, #ARGS_STACK_OFFSET + vld1.32 {d3[0]}, [DUMMY] + vdup.8 d0, d3[0] + vdup.8 d1, d3[1] + vdup.8 d2, d3[2] + vdup.8 d3, d3[3] + vmvn.8 d24, d3 /* get inverted alpha */ +.endm + +generate_composite_function \ + pixman_composite_over_n_8888_asm_neon, 0, 0, 32, \ + FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \ + 8, /* number of pixels, processed in a single block */ \ + 5, /* prefetch distance */ \ + pixman_composite_over_n_8888_init, \ + default_cleanup, \ + pixman_composite_over_8888_8888_process_pixblock_head, \ + pixman_composite_over_8888_8888_process_pixblock_tail, \ + pixman_composite_over_n_8888_process_pixblock_tail_head + +/******************************************************************************/ + +.macro pixman_composite_src_n_8888_process_pixblock_head +.endm + +.macro pixman_composite_src_n_8888_process_pixblock_tail +.endm + +.macro pixman_composite_src_n_8888_process_pixblock_tail_head + vst1.32 {d0, d1, d2, d3}, [DST_W, :128]! +.endm + +.macro pixman_composite_src_n_8888_init + add DUMMY, sp, #ARGS_STACK_OFFSET + vld1.32 {d0[0]}, [DUMMY] + vsli.u64 d0, d0, #32 + vorr d1, d0, d0 + vorr q1, q0, q0 +.endm + +.macro pixman_composite_src_n_8888_cleanup +.endm + +generate_composite_function \ + pixman_composite_src_n_8888_asm_neon, 0, 0, 32, \ + FLAG_DST_WRITEONLY, \ + 8, /* number of pixels, processed in a single block */ \ + 0, /* prefetch distance */ \ + pixman_composite_src_n_8888_init, \ + pixman_composite_src_n_8888_cleanup, \ + pixman_composite_src_n_8888_process_pixblock_head, \ + pixman_composite_src_n_8888_process_pixblock_tail, \ + pixman_composite_src_n_8888_process_pixblock_tail_head, \ + 0, /* dst_w_basereg */ \ + 0, /* dst_r_basereg */ \ + 0, /* src_basereg */ \ + 0 /* mask_basereg */ + +/******************************************************************************/ + +.macro pixman_composite_src_8888_8888_process_pixblock_head +.endm + +.macro pixman_composite_src_8888_8888_process_pixblock_tail +.endm + +.macro pixman_composite_src_8888_8888_process_pixblock_tail_head + vst1.32 {d0, d1, d2, d3}, [DST_W, :128]! + fetch_src_pixblock + cache_preload 8, 8 +.endm + +generate_composite_function \ + pixman_composite_src_8888_8888_asm_neon, 32, 0, 32, \ + FLAG_DST_WRITEONLY, \ + 8, /* number of pixels, processed in a single block */ \ + 10, /* prefetch distance */ \ + default_init, \ + default_cleanup, \ + pixman_composite_src_8888_8888_process_pixblock_head, \ + pixman_composite_src_8888_8888_process_pixblock_tail, \ + pixman_composite_src_8888_8888_process_pixblock_tail_head, \ + 0, /* dst_w_basereg */ \ + 0, /* dst_r_basereg */ \ + 0, /* src_basereg */ \ + 0 /* mask_basereg */ + +/******************************************************************************/ +#endif diff --git a/vendor/github.com/Benau/go_rlottie/vector_pixman_pixman-arm-neon-asm.h b/vendor/github.com/Benau/go_rlottie/vector_pixman_pixman-arm-neon-asm.h new file mode 100644 index 00000000..6add220a --- /dev/null +++ b/vendor/github.com/Benau/go_rlottie/vector_pixman_pixman-arm-neon-asm.h @@ -0,0 +1,1126 @@ +/* + * Copyright © 2009 Nokia Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + * + * Author: Siarhei Siamashka (siarhei.siamashka@nokia.com) + */ + +/* + * This file contains a macro ('generate_composite_function') which can + * construct 2D image processing functions, based on a common template. + * Any combinations of source, destination and mask images with 8bpp, + * 16bpp, 24bpp, 32bpp color formats are supported. + * + * This macro takes care of: + * - handling of leading and trailing unaligned pixels + * - doing most of the work related to L2 cache preload + * - encourages the use of software pipelining for better instructions + * scheduling + * + * The user of this macro has to provide some configuration parameters + * (bit depths for the images, prefetch distance, etc.) and a set of + * macros, which should implement basic code chunks responsible for + * pixels processing. See 'pixman-arm-neon-asm.S' file for the usage + * examples. + * + * TODO: + * - try overlapped pixel method (from Ian Rickards) when processing + * exactly two blocks of pixels + * - maybe add an option to do reverse scanline processing + */ + +/* + * Bit flags for 'generate_composite_function' macro which are used + * to tune generated functions behavior. + */ +.set FLAG_DST_WRITEONLY, 0 +.set FLAG_DST_READWRITE, 1 +.set FLAG_DEINTERLEAVE_32BPP, 2 + +/* + * Offset in stack where mask and source pointer/stride can be accessed + * from 'init' macro. This is useful for doing special handling for solid mask. + */ +.set ARGS_STACK_OFFSET, 40 + +/* + * Constants for selecting preferable prefetch type. + */ +.set PREFETCH_TYPE_NONE, 0 /* No prefetch at all */ +.set PREFETCH_TYPE_SIMPLE, 1 /* A simple, fixed-distance-ahead prefetch */ +.set PREFETCH_TYPE_ADVANCED, 2 /* Advanced fine-grained prefetch */ + +/* + * Definitions of supplementary pixld/pixst macros (for partial load/store of + * pixel data). + */ + +.macro pixldst1 op, elem_size, reg1, mem_operand, abits +.if abits > 0 + op&.&elem_size {d®1}, [&mem_operand&, :&abits&]! +.else + op&.&elem_size {d®1}, [&mem_operand&]! +.endif +.endm + +.macro pixldst2 op, elem_size, reg1, reg2, mem_operand, abits +.if abits > 0 + op&.&elem_size {d®1, d®2}, [&mem_operand&, :&abits&]! +.else + op&.&elem_size {d®1, d®2}, [&mem_operand&]! +.endif +.endm + +.macro pixldst4 op, elem_size, reg1, reg2, reg3, reg4, mem_operand, abits +.if abits > 0 + op&.&elem_size {d®1, d®2, d®3, d®4}, [&mem_operand&, :&abits&]! +.else + op&.&elem_size {d®1, d®2, d®3, d®4}, [&mem_operand&]! +.endif +.endm + +.macro pixldst0 op, elem_size, reg1, idx, mem_operand, abits + op&.&elem_size {d®1[idx]}, [&mem_operand&]! +.endm + +.macro pixldst3 op, elem_size, reg1, reg2, reg3, mem_operand + op&.&elem_size {d®1, d®2, d®3}, [&mem_operand&]! +.endm + +.macro pixldst30 op, elem_size, reg1, reg2, reg3, idx, mem_operand + op&.&elem_size {d®1[idx], d®2[idx], d®3[idx]}, [&mem_operand&]! +.endm + +.macro pixldst numbytes, op, elem_size, basereg, mem_operand, abits +.if numbytes == 32 + pixldst4 op, elem_size, %(basereg+4), %(basereg+5), \ + %(basereg+6), %(basereg+7), mem_operand, abits +.elseif numbytes == 16 + pixldst2 op, elem_size, %(basereg+2), %(basereg+3), mem_operand, abits +.elseif numbytes == 8 + pixldst1 op, elem_size, %(basereg+1), mem_operand, abits +.elseif numbytes == 4 + .if !RESPECT_STRICT_ALIGNMENT || (elem_size == 32) + pixldst0 op, 32, %(basereg+0), 1, mem_operand, abits + .elseif elem_size == 16 + pixldst0 op, 16, %(basereg+0), 2, mem_operand, abits + pixldst0 op, 16, %(basereg+0), 3, mem_operand, abits + .else + pixldst0 op, 8, %(basereg+0), 4, mem_operand, abits + pixldst0 op, 8, %(basereg+0), 5, mem_operand, abits + pixldst0 op, 8, %(basereg+0), 6, mem_operand, abits + pixldst0 op, 8, %(basereg+0), 7, mem_operand, abits + .endif +.elseif numbytes == 2 + .if !RESPECT_STRICT_ALIGNMENT || (elem_size == 16) + pixldst0 op, 16, %(basereg+0), 1, mem_operand, abits + .else + pixldst0 op, 8, %(basereg+0), 2, mem_operand, abits + pixldst0 op, 8, %(basereg+0), 3, mem_operand, abits + .endif +.elseif numbytes == 1 + pixldst0 op, 8, %(basereg+0), 1, mem_operand, abits +.else + .error "unsupported size: numbytes" +.endif +.endm + +.macro pixld numpix, bpp, basereg, mem_operand, abits=0 +.if bpp > 0 +.if (bpp == 32) && (numpix == 8) && (DEINTERLEAVE_32BPP_ENABLED != 0) + pixldst4 vld4, 8, %(basereg+4), %(basereg+5), \ + %(basereg+6), %(basereg+7), mem_operand, abits +.elseif (bpp == 24) && (numpix == 8) + pixldst3 vld3, 8, %(basereg+3), %(basereg+4), %(basereg+5), mem_operand +.elseif (bpp == 24) && (numpix == 4) + pixldst30 vld3, 8, %(basereg+0), %(basereg+1), %(basereg+2), 4, mem_operand + pixldst30 vld3, 8, %(basereg+0), %(basereg+1), %(basereg+2), 5, mem_operand + pixldst30 vld3, 8, %(basereg+0), %(basereg+1), %(basereg+2), 6, mem_operand + pixldst30 vld3, 8, %(basereg+0), %(basereg+1), %(basereg+2), 7, mem_operand +.elseif (bpp == 24) && (numpix == 2) + pixldst30 vld3, 8, %(basereg+0), %(basereg+1), %(basereg+2), 2, mem_operand + pixldst30 vld3, 8, %(basereg+0), %(basereg+1), %(basereg+2), 3, mem_operand +.elseif (bpp == 24) && (numpix == 1) + pixldst30 vld3, 8, %(basereg+0), %(basereg+1), %(basereg+2), 1, mem_operand +.else + pixldst %(numpix * bpp / 8), vld1, %(bpp), basereg, mem_operand, abits +.endif +.endif +.endm + +.macro pixst numpix, bpp, basereg, mem_operand, abits=0 +.if bpp > 0 +.if (bpp == 32) && (numpix == 8) && (DEINTERLEAVE_32BPP_ENABLED != 0) + pixldst4 vst4, 8, %(basereg+4), %(basereg+5), \ + %(basereg+6), %(basereg+7), mem_operand, abits +.elseif (bpp == 24) && (numpix == 8) + pixldst3 vst3, 8, %(basereg+3), %(basereg+4), %(basereg+5), mem_operand +.elseif (bpp == 24) && (numpix == 4) + pixldst30 vst3, 8, %(basereg+0), %(basereg+1), %(basereg+2), 4, mem_operand + pixldst30 vst3, 8, %(basereg+0), %(basereg+1), %(basereg+2), 5, mem_operand + pixldst30 vst3, 8, %(basereg+0), %(basereg+1), %(basereg+2), 6, mem_operand + pixldst30 vst3, 8, %(basereg+0), %(basereg+1), %(basereg+2), 7, mem_operand +.elseif (bpp == 24) && (numpix == 2) + pixldst30 vst3, 8, %(basereg+0), %(basereg+1), %(basereg+2), 2, mem_operand + pixldst30 vst3, 8, %(basereg+0), %(basereg+1), %(basereg+2), 3, mem_operand +.elseif (bpp == 24) && (numpix == 1) + pixldst30 vst3, 8, %(basereg+0), %(basereg+1), %(basereg+2), 1, mem_operand +.else + pixldst %(numpix * bpp / 8), vst1, %(bpp), basereg, mem_operand, abits +.endif +.endif +.endm + +.macro pixld_a numpix, bpp, basereg, mem_operand +.if (bpp * numpix) <= 128 + pixld numpix, bpp, basereg, mem_operand, %(bpp * numpix) +.else + pixld numpix, bpp, basereg, mem_operand, 128 +.endif +.endm + +.macro pixst_a numpix, bpp, basereg, mem_operand +.if (bpp * numpix) <= 128 + pixst numpix, bpp, basereg, mem_operand, %(bpp * numpix) +.else + pixst numpix, bpp, basereg, mem_operand, 128 +.endif +.endm + +/* + * Pixel fetcher for nearest scaling (needs TMP1, TMP2, VX, UNIT_X register + * aliases to be defined) + */ +.macro pixld1_s elem_size, reg1, mem_operand +.if elem_size == 16 + mov TMP1, VX, asr #16 + adds VX, VX, UNIT_X +5: subpls VX, VX, SRC_WIDTH_FIXED + bpl 5b + add TMP1, mem_operand, TMP1, asl #1 + mov TMP2, VX, asr #16 + adds VX, VX, UNIT_X +5: subpls VX, VX, SRC_WIDTH_FIXED + bpl 5b + add TMP2, mem_operand, TMP2, asl #1 + vld1.16 {d®1&[0]}, [TMP1, :16] + mov TMP1, VX, asr #16 + adds VX, VX, UNIT_X +5: subpls VX, VX, SRC_WIDTH_FIXED + bpl 5b + add TMP1, mem_operand, TMP1, asl #1 + vld1.16 {d®1&[1]}, [TMP2, :16] + mov TMP2, VX, asr #16 + adds VX, VX, UNIT_X +5: subpls VX, VX, SRC_WIDTH_FIXED + bpl 5b + add TMP2, mem_operand, TMP2, asl #1 + vld1.16 {d®1&[2]}, [TMP1, :16] + vld1.16 {d®1&[3]}, [TMP2, :16] +.elseif elem_size == 32 + mov TMP1, VX, asr #16 + adds VX, VX, UNIT_X +5: subpls VX, VX, SRC_WIDTH_FIXED + bpl 5b + add TMP1, mem_operand, TMP1, asl #2 + mov TMP2, VX, asr #16 + adds VX, VX, UNIT_X +5: subpls VX, VX, SRC_WIDTH_FIXED + bpl 5b + add TMP2, mem_operand, TMP2, asl #2 + vld1.32 {d®1&[0]}, [TMP1, :32] + vld1.32 {d®1&[1]}, [TMP2, :32] +.else + .error "unsupported" +.endif +.endm + +.macro pixld2_s elem_size, reg1, reg2, mem_operand +.if 0 /* elem_size == 32 */ + mov TMP1, VX, asr #16 + add VX, VX, UNIT_X, asl #1 + add TMP1, mem_operand, TMP1, asl #2 + mov TMP2, VX, asr #16 + sub VX, VX, UNIT_X + add TMP2, mem_operand, TMP2, asl #2 + vld1.32 {d®1&[0]}, [TMP1, :32] + mov TMP1, VX, asr #16 + add VX, VX, UNIT_X, asl #1 + add TMP1, mem_operand, TMP1, asl #2 + vld1.32 {d®2&[0]}, [TMP2, :32] + mov TMP2, VX, asr #16 + add VX, VX, UNIT_X + add TMP2, mem_operand, TMP2, asl #2 + vld1.32 {d®1&[1]}, [TMP1, :32] + vld1.32 {d®2&[1]}, [TMP2, :32] +.else + pixld1_s elem_size, reg1, mem_operand + pixld1_s elem_size, reg2, mem_operand +.endif +.endm + +.macro pixld0_s elem_size, reg1, idx, mem_operand +.if elem_size == 16 + mov TMP1, VX, asr #16 + adds VX, VX, UNIT_X +5: subpls VX, VX, SRC_WIDTH_FIXED + bpl 5b + add TMP1, mem_operand, TMP1, asl #1 + vld1.16 {d®1&[idx]}, [TMP1, :16] +.elseif elem_size == 32 + mov TMP1, VX, asr #16 + adds VX, VX, UNIT_X +5: subpls VX, VX, SRC_WIDTH_FIXED + bpl 5b + add TMP1, mem_operand, TMP1, asl #2 + vld1.32 {d®1&[idx]}, [TMP1, :32] +.endif +.endm + +.macro pixld_s_internal numbytes, elem_size, basereg, mem_operand +.if numbytes == 32 + pixld2_s elem_size, %(basereg+4), %(basereg+5), mem_operand + pixld2_s elem_size, %(basereg+6), %(basereg+7), mem_operand + pixdeinterleave elem_size, %(basereg+4) +.elseif numbytes == 16 + pixld2_s elem_size, %(basereg+2), %(basereg+3), mem_operand +.elseif numbytes == 8 + pixld1_s elem_size, %(basereg+1), mem_operand +.elseif numbytes == 4 + .if elem_size == 32 + pixld0_s elem_size, %(basereg+0), 1, mem_operand + .elseif elem_size == 16 + pixld0_s elem_size, %(basereg+0), 2, mem_operand + pixld0_s elem_size, %(basereg+0), 3, mem_operand + .else + pixld0_s elem_size, %(basereg+0), 4, mem_operand + pixld0_s elem_size, %(basereg+0), 5, mem_operand + pixld0_s elem_size, %(basereg+0), 6, mem_operand + pixld0_s elem_size, %(basereg+0), 7, mem_operand + .endif +.elseif numbytes == 2 + .if elem_size == 16 + pixld0_s elem_size, %(basereg+0), 1, mem_operand + .else + pixld0_s elem_size, %(basereg+0), 2, mem_operand + pixld0_s elem_size, %(basereg+0), 3, mem_operand + .endif +.elseif numbytes == 1 + pixld0_s elem_size, %(basereg+0), 1, mem_operand +.else + .error "unsupported size: numbytes" +.endif +.endm + +.macro pixld_s numpix, bpp, basereg, mem_operand +.if bpp > 0 + pixld_s_internal %(numpix * bpp / 8), %(bpp), basereg, mem_operand +.endif +.endm + +.macro vuzp8 reg1, reg2 + vuzp.8 d®1, d®2 +.endm + +.macro vzip8 reg1, reg2 + vzip.8 d®1, d®2 +.endm + +/* deinterleave B, G, R, A channels for eight 32bpp pixels in 4 registers */ +.macro pixdeinterleave bpp, basereg +.if (bpp == 32) && (DEINTERLEAVE_32BPP_ENABLED != 0) + vuzp8 %(basereg+0), %(basereg+1) + vuzp8 %(basereg+2), %(basereg+3) + vuzp8 %(basereg+1), %(basereg+3) + vuzp8 %(basereg+0), %(basereg+2) +.endif +.endm + +/* interleave B, G, R, A channels for eight 32bpp pixels in 4 registers */ +.macro pixinterleave bpp, basereg +.if (bpp == 32) && (DEINTERLEAVE_32BPP_ENABLED != 0) + vzip8 %(basereg+0), %(basereg+2) + vzip8 %(basereg+1), %(basereg+3) + vzip8 %(basereg+2), %(basereg+3) + vzip8 %(basereg+0), %(basereg+1) +.endif +.endm + +/* + * This is a macro for implementing cache preload. The main idea is that + * cache preload logic is mostly independent from the rest of pixels + * processing code. It starts at the top left pixel and moves forward + * across pixels and can jump across scanlines. Prefetch distance is + * handled in an 'incremental' way: it starts from 0 and advances to the + * optimal distance over time. After reaching optimal prefetch distance, + * it is kept constant. There are some checks which prevent prefetching + * unneeded pixel lines below the image (but it still can prefetch a bit + * more data on the right side of the image - not a big issue and may + * be actually helpful when rendering text glyphs). Additional trick is + * the use of LDR instruction for prefetch instead of PLD when moving to + * the next line, the point is that we have a high chance of getting TLB + * miss in this case, and PLD would be useless. + * + * This sounds like it may introduce a noticeable overhead (when working with + * fully cached data). But in reality, due to having a separate pipeline and + * instruction queue for NEON unit in ARM Cortex-A8, normal ARM code can + * execute simultaneously with NEON and be completely shadowed by it. Thus + * we get no performance overhead at all (*). This looks like a very nice + * feature of Cortex-A8, if used wisely. We don't have a hardware prefetcher, + * but still can implement some rather advanced prefetch logic in software + * for almost zero cost! + * + * (*) The overhead of the prefetcher is visible when running some trivial + * pixels processing like simple copy. Anyway, having prefetch is a must + * when working with the graphics data. + */ +.macro PF a, x:vararg +.if (PREFETCH_TYPE_CURRENT == PREFETCH_TYPE_ADVANCED) + a x +.endif +.endm + +.macro cache_preload std_increment, boost_increment +.if (src_bpp_shift >= 0) || (dst_r_bpp != 0) || (mask_bpp_shift >= 0) +.if regs_shortage + PF ldr ORIG_W, [sp] /* If we are short on regs, ORIG_W is kept on stack */ +.endif +.if std_increment != 0 + PF add PF_X, PF_X, #std_increment +.endif + PF tst PF_CTL, #0xF + PF addne PF_X, PF_X, #boost_increment + PF subne PF_CTL, PF_CTL, #1 + PF cmp PF_X, ORIG_W +.if src_bpp_shift >= 0 + PF pld, [PF_SRC, PF_X, lsl #src_bpp_shift] +.endif +.if dst_r_bpp != 0 + PF pld, [PF_DST, PF_X, lsl #dst_bpp_shift] +.endif +.if mask_bpp_shift >= 0 + PF pld, [PF_MASK, PF_X, lsl #mask_bpp_shift] +.endif + PF subge PF_X, PF_X, ORIG_W + PF subges PF_CTL, PF_CTL, #0x10 +.if src_bpp_shift >= 0 + PF ldrgeb DUMMY, [PF_SRC, SRC_STRIDE, lsl #src_bpp_shift]! +.endif +.if dst_r_bpp != 0 + PF ldrgeb DUMMY, [PF_DST, DST_STRIDE, lsl #dst_bpp_shift]! +.endif +.if mask_bpp_shift >= 0 + PF ldrgeb DUMMY, [PF_MASK, MASK_STRIDE, lsl #mask_bpp_shift]! +.endif +.endif +.endm + +.macro cache_preload_simple +.if (PREFETCH_TYPE_CURRENT == PREFETCH_TYPE_SIMPLE) +.if src_bpp > 0 + pld [SRC, #(PREFETCH_DISTANCE_SIMPLE * src_bpp / 8)] +.endif +.if dst_r_bpp > 0 + pld [DST_R, #(PREFETCH_DISTANCE_SIMPLE * dst_r_bpp / 8)] +.endif +.if mask_bpp > 0 + pld [MASK, #(PREFETCH_DISTANCE_SIMPLE * mask_bpp / 8)] +.endif +.endif +.endm + +.macro fetch_mask_pixblock + pixld pixblock_size, mask_bpp, \ + (mask_basereg - pixblock_size * mask_bpp / 64), MASK +.endm + +/* + * Macro which is used to process leading pixels until destination + * pointer is properly aligned (at 16 bytes boundary). When destination + * buffer uses 16bpp format, this is unnecessary, or even pointless. + */ +.macro ensure_destination_ptr_alignment process_pixblock_head, \ + process_pixblock_tail, \ + process_pixblock_tail_head +.if dst_w_bpp != 24 + tst DST_R, #0xF + beq 2f + +.irp lowbit, 1, 2, 4, 8, 16 +local skip1 +.if (dst_w_bpp <= (lowbit * 8)) && ((lowbit * 8) < (pixblock_size * dst_w_bpp)) +.if lowbit < 16 /* we don't need more than 16-byte alignment */ + tst DST_R, #lowbit + beq 1f +.endif + pixld_src (lowbit * 8 / dst_w_bpp), src_bpp, src_basereg, SRC + pixld (lowbit * 8 / dst_w_bpp), mask_bpp, mask_basereg, MASK +.if dst_r_bpp > 0 + pixld_a (lowbit * 8 / dst_r_bpp), dst_r_bpp, dst_r_basereg, DST_R +.else + add DST_R, DST_R, #lowbit +.endif + PF add PF_X, PF_X, #(lowbit * 8 / dst_w_bpp) + sub W, W, #(lowbit * 8 / dst_w_bpp) +1: +.endif +.endr + pixdeinterleave src_bpp, src_basereg + pixdeinterleave mask_bpp, mask_basereg + pixdeinterleave dst_r_bpp, dst_r_basereg + + process_pixblock_head + cache_preload 0, pixblock_size + cache_preload_simple + process_pixblock_tail + + pixinterleave dst_w_bpp, dst_w_basereg +.irp lowbit, 1, 2, 4, 8, 16 +.if (dst_w_bpp <= (lowbit * 8)) && ((lowbit * 8) < (pixblock_size * dst_w_bpp)) +.if lowbit < 16 /* we don't need more than 16-byte alignment */ + tst DST_W, #lowbit + beq 1f +.endif + pixst_a (lowbit * 8 / dst_w_bpp), dst_w_bpp, dst_w_basereg, DST_W +1: +.endif +.endr +.endif +2: +.endm + +/* + * Special code for processing up to (pixblock_size - 1) remaining + * trailing pixels. As SIMD processing performs operation on + * pixblock_size pixels, anything smaller than this has to be loaded + * and stored in a special way. Loading and storing of pixel data is + * performed in such a way that we fill some 'slots' in the NEON + * registers (some slots naturally are unused), then perform compositing + * operation as usual. In the end, the data is taken from these 'slots' + * and saved to memory. + * + * cache_preload_flag - allows to suppress prefetch if + * set to 0 + * dst_aligned_flag - selects whether destination buffer + * is aligned + */ +.macro process_trailing_pixels cache_preload_flag, \ + dst_aligned_flag, \ + process_pixblock_head, \ + process_pixblock_tail, \ + process_pixblock_tail_head + tst W, #(pixblock_size - 1) + beq 2f +.irp chunk_size, 16, 8, 4, 2, 1 +.if pixblock_size > chunk_size + tst W, #chunk_size + beq 1f + pixld_src chunk_size, src_bpp, src_basereg, SRC + pixld chunk_size, mask_bpp, mask_basereg, MASK +.if dst_aligned_flag != 0 + pixld_a chunk_size, dst_r_bpp, dst_r_basereg, DST_R +.else + pixld chunk_size, dst_r_bpp, dst_r_basereg, DST_R +.endif +.if cache_preload_flag != 0 + PF add PF_X, PF_X, #chunk_size +.endif +1: +.endif +.endr + pixdeinterleave src_bpp, src_basereg + pixdeinterleave mask_bpp, mask_basereg + pixdeinterleave dst_r_bpp, dst_r_basereg + + process_pixblock_head +.if cache_preload_flag != 0 + cache_preload 0, pixblock_size + cache_preload_simple +.endif + process_pixblock_tail + pixinterleave dst_w_bpp, dst_w_basereg +.irp chunk_size, 16, 8, 4, 2, 1 +.if pixblock_size > chunk_size + tst W, #chunk_size + beq 1f +.if dst_aligned_flag != 0 + pixst_a chunk_size, dst_w_bpp, dst_w_basereg, DST_W +.else + pixst chunk_size, dst_w_bpp, dst_w_basereg, DST_W +.endif +1: +.endif +.endr +2: +.endm + +/* + * Macro, which performs all the needed operations to switch to the next + * scanline and start the next loop iteration unless all the scanlines + * are already processed. + */ +.macro advance_to_next_scanline start_of_loop_label +.if regs_shortage + ldrd W, [sp] /* load W and H (width and height) from stack */ +.else + mov W, ORIG_W +.endif + add DST_W, DST_W, DST_STRIDE, lsl #dst_bpp_shift +.if src_bpp != 0 + add SRC, SRC, SRC_STRIDE, lsl #src_bpp_shift +.endif +.if mask_bpp != 0 + add MASK, MASK, MASK_STRIDE, lsl #mask_bpp_shift +.endif +.if (dst_w_bpp != 24) + sub DST_W, DST_W, W, lsl #dst_bpp_shift +.endif +.if (src_bpp != 24) && (src_bpp != 0) + sub SRC, SRC, W, lsl #src_bpp_shift +.endif +.if (mask_bpp != 24) && (mask_bpp != 0) + sub MASK, MASK, W, lsl #mask_bpp_shift +.endif + subs H, H, #1 + mov DST_R, DST_W +.if regs_shortage + str H, [sp, #4] /* save updated height to stack */ +.endif + bge start_of_loop_label +.endm + +/* + * Registers are allocated in the following way by default: + * d0, d1, d2, d3 - reserved for loading source pixel data + * d4, d5, d6, d7 - reserved for loading destination pixel data + * d24, d25, d26, d27 - reserved for loading mask pixel data + * d28, d29, d30, d31 - final destination pixel data for writeback to memory + */ +.macro generate_composite_function fname, \ + src_bpp_, \ + mask_bpp_, \ + dst_w_bpp_, \ + flags, \ + pixblock_size_, \ + prefetch_distance, \ + init, \ + cleanup, \ + process_pixblock_head, \ + process_pixblock_tail, \ + process_pixblock_tail_head, \ + dst_w_basereg_ = 28, \ + dst_r_basereg_ = 4, \ + src_basereg_ = 0, \ + mask_basereg_ = 24 + + pixman_asm_function fname + + push {r4-r12, lr} /* save all registers */ + +/* + * Select prefetch type for this function. If prefetch distance is + * set to 0 or one of the color formats is 24bpp, SIMPLE prefetch + * has to be used instead of ADVANCED. + */ + .set PREFETCH_TYPE_CURRENT, PREFETCH_TYPE_DEFAULT +.if prefetch_distance == 0 + .set PREFETCH_TYPE_CURRENT, PREFETCH_TYPE_NONE +.elseif (PREFETCH_TYPE_CURRENT > PREFETCH_TYPE_SIMPLE) && \ + ((src_bpp_ == 24) || (mask_bpp_ == 24) || (dst_w_bpp_ == 24)) + .set PREFETCH_TYPE_CURRENT, PREFETCH_TYPE_SIMPLE +.endif + +/* + * Make some macro arguments globally visible and accessible + * from other macros + */ + .set src_bpp, src_bpp_ + .set mask_bpp, mask_bpp_ + .set dst_w_bpp, dst_w_bpp_ + .set pixblock_size, pixblock_size_ + .set dst_w_basereg, dst_w_basereg_ + .set dst_r_basereg, dst_r_basereg_ + .set src_basereg, src_basereg_ + .set mask_basereg, mask_basereg_ + + .macro pixld_src x:vararg + pixld x + .endm + .macro fetch_src_pixblock + pixld_src pixblock_size, src_bpp, \ + (src_basereg - pixblock_size * src_bpp / 64), SRC + .endm +/* + * Assign symbolic names to registers + */ + W .req r0 /* width (is updated during processing) */ + H .req r1 /* height (is updated during processing) */ + DST_W .req r2 /* destination buffer pointer for writes */ + DST_STRIDE .req r3 /* destination image stride */ + SRC .req r4 /* source buffer pointer */ + SRC_STRIDE .req r5 /* source image stride */ + DST_R .req r6 /* destination buffer pointer for reads */ + + MASK .req r7 /* mask pointer */ + MASK_STRIDE .req r8 /* mask stride */ + + PF_CTL .req r9 /* combined lines counter and prefetch */ + /* distance increment counter */ + PF_X .req r10 /* pixel index in a scanline for current */ + /* pretetch position */ + PF_SRC .req r11 /* pointer to source scanline start */ + /* for prefetch purposes */ + PF_DST .req r12 /* pointer to destination scanline start */ + /* for prefetch purposes */ + PF_MASK .req r14 /* pointer to mask scanline start */ + /* for prefetch purposes */ +/* + * Check whether we have enough registers for all the local variables. + * If we don't have enough registers, original width and height are + * kept on top of stack (and 'regs_shortage' variable is set to indicate + * this for the rest of code). Even if there are enough registers, the + * allocation scheme may be a bit different depending on whether source + * or mask is not used. + */ +.if (PREFETCH_TYPE_CURRENT < PREFETCH_TYPE_ADVANCED) + ORIG_W .req r10 /* saved original width */ + DUMMY .req r12 /* temporary register */ + .set regs_shortage, 0 +.elseif mask_bpp == 0 + ORIG_W .req r7 /* saved original width */ + DUMMY .req r8 /* temporary register */ + .set regs_shortage, 0 +.elseif src_bpp == 0 + ORIG_W .req r4 /* saved original width */ + DUMMY .req r5 /* temporary register */ + .set regs_shortage, 0 +.else + ORIG_W .req r1 /* saved original width */ + DUMMY .req r1 /* temporary register */ + .set regs_shortage, 1 +.endif + + .set mask_bpp_shift, -1 +.if src_bpp == 32 + .set src_bpp_shift, 2 +.elseif src_bpp == 24 + .set src_bpp_shift, 0 +.elseif src_bpp == 16 + .set src_bpp_shift, 1 +.elseif src_bpp == 8 + .set src_bpp_shift, 0 +.elseif src_bpp == 0 + .set src_bpp_shift, -1 +.else + .error "requested src bpp (src_bpp) is not supported" +.endif +.if mask_bpp == 32 + .set mask_bpp_shift, 2 +.elseif mask_bpp == 24 + .set mask_bpp_shift, 0 +.elseif mask_bpp == 8 + .set mask_bpp_shift, 0 +.elseif mask_bpp == 0 + .set mask_bpp_shift, -1 +.else + .error "requested mask bpp (mask_bpp) is not supported" +.endif +.if dst_w_bpp == 32 + .set dst_bpp_shift, 2 +.elseif dst_w_bpp == 24 + .set dst_bpp_shift, 0 +.elseif dst_w_bpp == 16 + .set dst_bpp_shift, 1 +.elseif dst_w_bpp == 8 + .set dst_bpp_shift, 0 +.else + .error "requested dst bpp (dst_w_bpp) is not supported" +.endif + +.if (((flags) & FLAG_DST_READWRITE) != 0) + .set dst_r_bpp, dst_w_bpp +.else + .set dst_r_bpp, 0 +.endif +.if (((flags) & FLAG_DEINTERLEAVE_32BPP) != 0) + .set DEINTERLEAVE_32BPP_ENABLED, 1 +.else + .set DEINTERLEAVE_32BPP_ENABLED, 0 +.endif + +.if prefetch_distance < 0 || prefetch_distance > 15 + .error "invalid prefetch distance (prefetch_distance)" +.endif + +.if src_bpp > 0 + ldr SRC, [sp, #40] +.endif +.if mask_bpp > 0 + ldr MASK, [sp, #48] +.endif + PF mov PF_X, #0 +.if src_bpp > 0 + ldr SRC_STRIDE, [sp, #44] +.endif +.if mask_bpp > 0 + ldr MASK_STRIDE, [sp, #52] +.endif + mov DST_R, DST_W + +.if src_bpp == 24 + sub SRC_STRIDE, SRC_STRIDE, W + sub SRC_STRIDE, SRC_STRIDE, W, lsl #1 +.endif +.if mask_bpp == 24 + sub MASK_STRIDE, MASK_STRIDE, W + sub MASK_STRIDE, MASK_STRIDE, W, lsl #1 +.endif +.if dst_w_bpp == 24 + sub DST_STRIDE, DST_STRIDE, W + sub DST_STRIDE, DST_STRIDE, W, lsl #1 +.endif + +/* + * Setup advanced prefetcher initial state + */ + PF mov PF_SRC, SRC + PF mov PF_DST, DST_R + PF mov PF_MASK, MASK + /* PF_CTL = prefetch_distance | ((h - 1) << 4) */ + PF mov PF_CTL, H, lsl #4 + PF add PF_CTL, #(prefetch_distance - 0x10) + + init +.if regs_shortage + push {r0, r1} +.endif + subs H, H, #1 +.if regs_shortage + str H, [sp, #4] /* save updated height to stack */ +.else + mov ORIG_W, W +.endif + blt 9f + cmp W, #(pixblock_size * 2) + blt 8f +/* + * This is the start of the pipelined loop, which if optimized for + * long scanlines + */ +0: + ensure_destination_ptr_alignment process_pixblock_head, \ + process_pixblock_tail, \ + process_pixblock_tail_head + + /* Implement "head (tail_head) ... (tail_head) tail" loop pattern */ + pixld_a pixblock_size, dst_r_bpp, \ + (dst_r_basereg - pixblock_size * dst_r_bpp / 64), DST_R + fetch_src_pixblock + pixld pixblock_size, mask_bpp, \ + (mask_basereg - pixblock_size * mask_bpp / 64), MASK + PF add PF_X, PF_X, #pixblock_size + process_pixblock_head + cache_preload 0, pixblock_size + cache_preload_simple + subs W, W, #(pixblock_size * 2) + blt 2f +1: + process_pixblock_tail_head + cache_preload_simple + subs W, W, #pixblock_size + bge 1b +2: + process_pixblock_tail + pixst_a pixblock_size, dst_w_bpp, \ + (dst_w_basereg - pixblock_size * dst_w_bpp / 64), DST_W + + /* Process the remaining trailing pixels in the scanline */ + process_trailing_pixels 1, 1, \ + process_pixblock_head, \ + process_pixblock_tail, \ + process_pixblock_tail_head + advance_to_next_scanline 0b + +.if regs_shortage + pop {r0, r1} +.endif + cleanup + pop {r4-r12, pc} /* exit */ +/* + * This is the start of the loop, designed to process images with small width + * (less than pixblock_size * 2 pixels). In this case neither pipelining + * nor prefetch are used. + */ +8: + /* Process exactly pixblock_size pixels if needed */ + tst W, #pixblock_size + beq 1f + pixld pixblock_size, dst_r_bpp, \ + (dst_r_basereg - pixblock_size * dst_r_bpp / 64), DST_R + fetch_src_pixblock + pixld pixblock_size, mask_bpp, \ + (mask_basereg - pixblock_size * mask_bpp / 64), MASK + process_pixblock_head + process_pixblock_tail + pixst pixblock_size, dst_w_bpp, \ + (dst_w_basereg - pixblock_size * dst_w_bpp / 64), DST_W +1: + /* Process the remaining trailing pixels in the scanline */ + process_trailing_pixels 0, 0, \ + process_pixblock_head, \ + process_pixblock_tail, \ + process_pixblock_tail_head + advance_to_next_scanline 8b +9: +.if regs_shortage + pop {r0, r1} +.endif + cleanup + pop {r4-r12, pc} /* exit */ + + .purgem fetch_src_pixblock + .purgem pixld_src + + .unreq SRC + .unreq MASK + .unreq DST_R + .unreq DST_W + .unreq ORIG_W + .unreq W + .unreq H + .unreq SRC_STRIDE + .unreq DST_STRIDE + .unreq MASK_STRIDE + .unreq PF_CTL + .unreq PF_X + .unreq PF_SRC + .unreq PF_DST + .unreq PF_MASK + .unreq DUMMY + .endfunc +.endm + +/* + * A simplified variant of function generation template for a single + * scanline processing (for implementing pixman combine functions) + */ +.macro generate_composite_function_scanline use_nearest_scaling, \ + fname, \ + src_bpp_, \ + mask_bpp_, \ + dst_w_bpp_, \ + flags, \ + pixblock_size_, \ + init, \ + cleanup, \ + process_pixblock_head, \ + process_pixblock_tail, \ + process_pixblock_tail_head, \ + dst_w_basereg_ = 28, \ + dst_r_basereg_ = 4, \ + src_basereg_ = 0, \ + mask_basereg_ = 24 + + pixman_asm_function fname + + .set PREFETCH_TYPE_CURRENT, PREFETCH_TYPE_NONE +/* + * Make some macro arguments globally visible and accessible + * from other macros + */ + .set src_bpp, src_bpp_ + .set mask_bpp, mask_bpp_ + .set dst_w_bpp, dst_w_bpp_ + .set pixblock_size, pixblock_size_ + .set dst_w_basereg, dst_w_basereg_ + .set dst_r_basereg, dst_r_basereg_ + .set src_basereg, src_basereg_ + .set mask_basereg, mask_basereg_ + +.if use_nearest_scaling != 0 + /* + * Assign symbolic names to registers for nearest scaling + */ + W .req r0 + DST_W .req r1 + SRC .req r2 + VX .req r3 + UNIT_X .req ip + MASK .req lr + TMP1 .req r4 + TMP2 .req r5 + DST_R .req r6 + SRC_WIDTH_FIXED .req r7 + + .macro pixld_src x:vararg + pixld_s x + .endm + + ldr UNIT_X, [sp] + push {r4-r8, lr} + ldr SRC_WIDTH_FIXED, [sp, #(24 + 4)] + .if mask_bpp != 0 + ldr MASK, [sp, #(24 + 8)] + .endif +.else + /* + * Assign symbolic names to registers + */ + W .req r0 /* width (is updated during processing) */ + DST_W .req r1 /* destination buffer pointer for writes */ + SRC .req r2 /* source buffer pointer */ + DST_R .req ip /* destination buffer pointer for reads */ + MASK .req r3 /* mask pointer */ + + .macro pixld_src x:vararg + pixld x + .endm +.endif + +.if (((flags) & FLAG_DST_READWRITE) != 0) + .set dst_r_bpp, dst_w_bpp +.else + .set dst_r_bpp, 0 +.endif +.if (((flags) & FLAG_DEINTERLEAVE_32BPP) != 0) + .set DEINTERLEAVE_32BPP_ENABLED, 1 +.else + .set DEINTERLEAVE_32BPP_ENABLED, 0 +.endif + + .macro fetch_src_pixblock + pixld_src pixblock_size, src_bpp, \ + (src_basereg - pixblock_size * src_bpp / 64), SRC + .endm + + init + mov DST_R, DST_W + + cmp W, #pixblock_size + blt 8f + + ensure_destination_ptr_alignment process_pixblock_head, \ + process_pixblock_tail, \ + process_pixblock_tail_head + + subs W, W, #pixblock_size + blt 7f + + /* Implement "head (tail_head) ... (tail_head) tail" loop pattern */ + pixld_a pixblock_size, dst_r_bpp, \ + (dst_r_basereg - pixblock_size * dst_r_bpp / 64), DST_R + fetch_src_pixblock + pixld pixblock_size, mask_bpp, \ + (mask_basereg - pixblock_size * mask_bpp / 64), MASK + process_pixblock_head + subs W, W, #pixblock_size + blt 2f +1: + process_pixblock_tail_head + subs W, W, #pixblock_size + bge 1b +2: + process_pixblock_tail + pixst_a pixblock_size, dst_w_bpp, \ + (dst_w_basereg - pixblock_size * dst_w_bpp / 64), DST_W +7: + /* Process the remaining trailing pixels in the scanline (dst aligned) */ + process_trailing_pixels 0, 1, \ + process_pixblock_head, \ + process_pixblock_tail, \ + process_pixblock_tail_head + + cleanup +.if use_nearest_scaling != 0 + pop {r4-r8, pc} /* exit */ +.else + bx lr /* exit */ +.endif +8: + /* Process the remaining trailing pixels in the scanline (dst unaligned) */ + process_trailing_pixels 0, 0, \ + process_pixblock_head, \ + process_pixblock_tail, \ + process_pixblock_tail_head + + cleanup + +.if use_nearest_scaling != 0 + pop {r4-r8, pc} /* exit */ + + .unreq DST_R + .unreq SRC + .unreq W + .unreq VX + .unreq UNIT_X + .unreq TMP1 + .unreq TMP2 + .unreq DST_W + .unreq MASK + .unreq SRC_WIDTH_FIXED + +.else + bx lr /* exit */ + + .unreq SRC + .unreq MASK + .unreq DST_R + .unreq DST_W + .unreq W +.endif + + .purgem fetch_src_pixblock + .purgem pixld_src + + .endfunc +.endm + +.macro generate_composite_function_single_scanline x:vararg + generate_composite_function_scanline 0, x +.endm + +.macro generate_composite_function_nearest_scanline x:vararg + generate_composite_function_scanline 1, x +.endm + +/* Default prologue/epilogue, nothing special needs to be done */ + +.macro default_init +.endm + +.macro default_cleanup +.endm + +/* + * Prologue/epilogue variant which additionally saves/restores d8-d15 + * registers (they need to be saved/restored by callee according to ABI). + * This is required if the code needs to use all the NEON registers. + */ + +.macro default_init_need_all_regs + vpush {d8-d15} +.endm + +.macro default_cleanup_need_all_regs + vpop {d8-d15} +.endm + +/******************************************************************************/ diff --git a/vendor/github.com/Benau/go_rlottie/vector_stb_stb_image.cpp b/vendor/github.com/Benau/go_rlottie/vector_stb_stb_image.cpp new file mode 100644 index 00000000..6fd89c88 --- /dev/null +++ b/vendor/github.com/Benau/go_rlottie/vector_stb_stb_image.cpp @@ -0,0 +1,59 @@ +/* + * configure stb_image about + * the image we will support + */ +#define STB_IMAGE_IMPLEMENTATION + +#define STBI_ONLY_JPEG +#define STBI_ONLY_PNG +#define STBI_NO_HDR +#define STBI_NO_LINEAR +#define STBI_NO_GIF +#define STBI_NO_PIC + +#include "vector_stb_stb_image.h" + +#if defined _WIN32 || defined __CYGWIN__ + #ifdef RLOTTIE_BUILD + #define RLOTTIE_API __declspec(dllexport) + #else + #define RLOTTIE_API __declspec(dllimport) + #endif +#else + #ifdef RLOTTIE_BUILD + #define RLOTTIE_API __attribute__ ((visibility ("default"))) + #else + #define RLOTTIE_API + #endif +#endif + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * exported function wrapper from the library + */ + +RLOTTIE_API unsigned char *lottie_image_load(char const *filename, int *x, + int *y, int *comp, int req_comp) +{ + return stbi_load(filename, x, y, comp, req_comp); +} + +RLOTTIE_API unsigned char *lottie_image_load_from_data(const char *imageData, + int len, int *x, int *y, + int *comp, int req_comp) +{ + unsigned char *data = (unsigned char *)imageData; + return stbi_load_from_memory(data, len, x, y, comp, req_comp); +} + +RLOTTIE_API void lottie_image_free(unsigned char *data) +{ + stbi_image_free(data); +} + +#ifdef __cplusplus +} +#endif diff --git a/vendor/github.com/Benau/go_rlottie/vector_stb_stb_image.h b/vendor/github.com/Benau/go_rlottie/vector_stb_stb_image.h new file mode 100644 index 00000000..56f81830 --- /dev/null +++ b/vendor/github.com/Benau/go_rlottie/vector_stb_stb_image.h @@ -0,0 +1,7509 @@ +/* stb_image - v2.19 - public domain image loader - http://nothings.org/stb + no warranty implied; use at your own risk + + Do this: + #define STB_IMAGE_IMPLEMENTATION + before you include this file in *one* C or C++ file to create the implementation. + + // i.e. it should look like this: + #include ... + #include ... + #include ... + #define STB_IMAGE_IMPLEMENTATION + #include "stb_image.h" + + You can #define STBI_ASSERT(x) before the #include to avoid using assert.h. + And #define STBI_MALLOC, STBI_REALLOC, and STBI_FREE to avoid using malloc,realloc,free + + + QUICK NOTES: + Primarily of interest to game developers and other people who can + avoid problematic images and only need the trivial interface + + JPEG baseline & progressive (12 bpc/arithmetic not supported, same as stock IJG lib) + PNG 1/2/4/8/16-bit-per-channel + + TGA (not sure what subset, if a subset) + BMP non-1bpp, non-RLE + PSD (composited view only, no extra channels, 8/16 bit-per-channel) + + GIF (*comp always reports as 4-channel) + HDR (radiance rgbE format) + PIC (Softimage PIC) + PNM (PPM and PGM binary only) + + Animated GIF still needs a proper API, but here's one way to do it: + http://gist.github.com/urraka/685d9a6340b26b830d49 + + - decode from memory or through FILE (define STBI_NO_STDIO to remove code) + - decode from arbitrary I/O callbacks + - SIMD acceleration on x86/x64 (SSE2) and ARM (NEON) + + Full documentation under "DOCUMENTATION" below. + + +LICENSE + + See end of file for license information. + +RECENT REVISION HISTORY: + + 2.19 (2018-02-11) fix warning + 2.18 (2018-01-30) fix warnings + 2.17 (2018-01-29) bugfix, 1-bit BMP, 16-bitness query, fix warnings + 2.16 (2017-07-23) all functions have 16-bit variants; optimizations; bugfixes + 2.15 (2017-03-18) fix png-1,2,4; all Imagenet JPGs; no runtime SSE detection on GCC + 2.14 (2017-03-03) remove deprecated STBI_JPEG_OLD; fixes for Imagenet JPGs + 2.13 (2016-12-04) experimental 16-bit API, only for PNG so far; fixes + 2.12 (2016-04-02) fix typo in 2.11 PSD fix that caused crashes + 2.11 (2016-04-02) 16-bit PNGS; enable SSE2 in non-gcc x64 + RGB-format JPEG; remove white matting in PSD; + allocate large structures on the stack; + correct channel count for PNG & BMP + 2.10 (2016-01-22) avoid warning introduced in 2.09 + 2.09 (2016-01-16) 16-bit TGA; comments in PNM files; STBI_REALLOC_SIZED + + See end of file for full revision history. + + + ============================ Contributors ========================= + + Image formats Extensions, features + Sean Barrett (jpeg, png, bmp) Jetro Lauha (stbi_info) + Nicolas Schulz (hdr, psd) Martin "SpartanJ" Golini (stbi_info) + Jonathan Dummer (tga) James "moose2000" Brown (iPhone PNG) + Jean-Marc Lienher (gif) Ben "Disch" Wenger (io callbacks) + Tom Seddon (pic) Omar Cornut (1/2/4-bit PNG) + Thatcher Ulrich (psd) Nicolas Guillemot (vertical flip) + Ken Miller (pgm, ppm) Richard Mitton (16-bit PSD) + github:urraka (animated gif) Junggon Kim (PNM comments) + Christopher Forseth (animated gif) Daniel Gibson (16-bit TGA) + socks-the-fox (16-bit PNG) + Jeremy Sawicki (handle all ImageNet JPGs) + Optimizations & bugfixes Mikhail Morozov (1-bit BMP) + Fabian "ryg" Giesen Anael Seghezzi (is-16-bit query) + Arseny Kapoulkine + John-Mark Allen + + Bug & warning fixes + Marc LeBlanc David Woo Guillaume George Martins Mozeiko + Christpher Lloyd Jerry Jansson Joseph Thomson Phil Jordan + Dave Moore Roy Eltham Hayaki Saito Nathan Reed + Won Chun Luke Graham Johan Duparc Nick Verigakis + the Horde3D community Thomas Ruf Ronny Chevalier github:rlyeh + Janez Zemva John Bartholomew Michal Cichon github:romigrou + Jonathan Blow Ken Hamada Tero Hanninen github:svdijk + Laurent Gomila Cort Stratton Sergio Gonzalez github:snagar + Aruelien Pocheville Thibault Reuille Cass Everitt github:Zelex + Ryamond Barbiero Paul Du Bois Engin Manap github:grim210 + Aldo Culquicondor Philipp Wiesemann Dale Weiler github:sammyhw + Oriol Ferrer Mesia Josh Tobin Matthew Gregan github:phprus + Julian Raschke Gregory Mullen Baldur Karlsson github:poppolopoppo + Christian Floisand Kevin Schmidt github:darealshinji + Blazej Dariusz Roszkowski github:Michaelangel007 +*/ + +#ifndef STBI_INCLUDE_STB_IMAGE_H +#define STBI_INCLUDE_STB_IMAGE_H + +// DOCUMENTATION +// +// Limitations: +// - no 12-bit-per-channel JPEG +// - no JPEGs with arithmetic coding +// - GIF always returns *comp=4 +// +// Basic usage (see HDR discussion below for HDR usage): +// int x,y,n; +// unsigned char *data = stbi_load(filename, &x, &y, &n, 0); +// // ... process data if not NULL ... +// // ... x = width, y = height, n = # 8-bit components per pixel ... +// // ... replace '0' with '1'..'4' to force that many components per pixel +// // ... but 'n' will always be the number that it would have been if you said 0 +// stbi_image_free(data) +// +// Standard parameters: +// int *x -- outputs image width in pixels +// int *y -- outputs image height in pixels +// int *channels_in_file -- outputs # of image components in image file +// int desired_channels -- if non-zero, # of image components requested in result +// +// The return value from an image loader is an 'unsigned char *' which points +// to the pixel data, or NULL on an allocation failure or if the image is +// corrupt or invalid. The pixel data consists of *y scanlines of *x pixels, +// with each pixel consisting of N interleaved 8-bit components; the first +// pixel pointed to is top-left-most in the image. There is no padding between +// image scanlines or between pixels, regardless of format. The number of +// components N is 'desired_channels' if desired_channels is non-zero, or +// *channels_in_file otherwise. If desired_channels is non-zero, +// *channels_in_file has the number of components that _would_ have been +// output otherwise. E.g. if you set desired_channels to 4, you will always +// get RGBA output, but you can check *channels_in_file to see if it's trivially +// opaque because e.g. there were only 3 channels in the source image. +// +// An output image with N components has the following components interleaved +// in this order in each pixel: +// +// N=#comp components +// 1 grey +// 2 grey, alpha +// 3 red, green, blue +// 4 red, green, blue, alpha +// +// If image loading fails for any reason, the return value will be NULL, +// and *x, *y, *channels_in_file will be unchanged. The function +// stbi_failure_reason() can be queried for an extremely brief, end-user +// unfriendly explanation of why the load failed. Define STBI_NO_FAILURE_STRINGS +// to avoid compiling these strings at all, and STBI_FAILURE_USERMSG to get slightly +// more user-friendly ones. +// +// Paletted PNG, BMP, GIF, and PIC images are automatically depalettized. +// +// =========================================================================== +// +// Philosophy +// +// stb libraries are designed with the following priorities: +// +// 1. easy to use +// 2. easy to maintain +// 3. good performance +// +// Sometimes I let "good performance" creep up in priority over "easy to maintain", +// and for best performance I may provide less-easy-to-use APIs that give higher +// performance, in addition to the easy to use ones. Nevertheless, it's important +// to keep in mind that from the standpoint of you, a client of this library, +// all you care about is #1 and #3, and stb libraries DO NOT emphasize #3 above all. +// +// Some secondary priorities arise directly from the first two, some of which +// make more explicit reasons why performance can't be emphasized. +// +// - Portable ("ease of use") +// - Small source code footprint ("easy to maintain") +// - No dependencies ("ease of use") +// +// =========================================================================== +// +// I/O callbacks +// +// I/O callbacks allow you to read from arbitrary sources, like packaged +// files or some other source. Data read from callbacks are processed +// through a small internal buffer (currently 128 bytes) to try to reduce +// overhead. +// +// The three functions you must define are "read" (reads some bytes of data), +// "skip" (skips some bytes of data), "eof" (reports if the stream is at the end). +// +// =========================================================================== +// +// SIMD support +// +// The JPEG decoder will try to automatically use SIMD kernels on x86 when +// supported by the compiler. For ARM Neon support, you must explicitly +// request it. +// +// (The old do-it-yourself SIMD API is no longer supported in the current +// code.) +// +// On x86, SSE2 will automatically be used when available based on a run-time +// test; if not, the generic C versions are used as a fall-back. On ARM targets, +// the typical path is to have separate builds for NEON and non-NEON devices +// (at least this is true for iOS and Android). Therefore, the NEON support is +// toggled by a build flag: define STBI_NEON to get NEON loops. +// +// If for some reason you do not want to use any of SIMD code, or if +// you have issues compiling it, you can disable it entirely by +// defining STBI_NO_SIMD. +// +// =========================================================================== +// +// HDR image support (disable by defining STBI_NO_HDR) +// +// stb_image now supports loading HDR images in general, and currently +// the Radiance .HDR file format, although the support is provided +// generically. You can still load any file through the existing interface; +// if you attempt to load an HDR file, it will be automatically remapped to +// LDR, assuming gamma 2.2 and an arbitrary scale factor defaulting to 1; +// both of these constants can be reconfigured through this interface: +// +// stbi_hdr_to_ldr_gamma(2.2f); +// stbi_hdr_to_ldr_scale(1.0f); +// +// (note, do not use _inverse_ constants; stbi_image will invert them +// appropriately). +// +// Additionally, there is a new, parallel interface for loading files as +// (linear) floats to preserve the full dynamic range: +// +// float *data = stbi_loadf(filename, &x, &y, &n, 0); +// +// If you load LDR images through this interface, those images will +// be promoted to floating point values, run through the inverse of +// constants corresponding to the above: +// +// stbi_ldr_to_hdr_scale(1.0f); +// stbi_ldr_to_hdr_gamma(2.2f); +// +// Finally, given a filename (or an open file or memory block--see header +// file for details) containing image data, you can query for the "most +// appropriate" interface to use (that is, whether the image is HDR or +// not), using: +// +// stbi_is_hdr(char *filename); +// +// =========================================================================== +// +// iPhone PNG support: +// +// By default we convert iphone-formatted PNGs back to RGB, even though +// they are internally encoded differently. You can disable this conversion +// by by calling stbi_convert_iphone_png_to_rgb(0), in which case +// you will always just get the native iphone "format" through (which +// is BGR stored in RGB). +// +// Call stbi_set_unpremultiply_on_load(1) as well to force a divide per +// pixel to remove any premultiplied alpha *only* if the image file explicitly +// says there's premultiplied data (currently only happens in iPhone images, +// and only if iPhone convert-to-rgb processing is on). +// +// =========================================================================== +// +// ADDITIONAL CONFIGURATION +// +// - You can suppress implementation of any of the decoders to reduce +// your code footprint by #defining one or more of the following +// symbols before creating the implementation. +// +// STBI_NO_JPEG +// STBI_NO_PNG +// STBI_NO_BMP +// STBI_NO_PSD +// STBI_NO_TGA +// STBI_NO_GIF +// STBI_NO_HDR +// STBI_NO_PIC +// STBI_NO_PNM (.ppm and .pgm) +// +// - You can request *only* certain decoders and suppress all other ones +// (this will be more forward-compatible, as addition of new decoders +// doesn't require you to disable them explicitly): +// +// STBI_ONLY_JPEG +// STBI_ONLY_PNG +// STBI_ONLY_BMP +// STBI_ONLY_PSD +// STBI_ONLY_TGA +// STBI_ONLY_GIF +// STBI_ONLY_HDR +// STBI_ONLY_PIC +// STBI_ONLY_PNM (.ppm and .pgm) +// +// - If you use STBI_NO_PNG (or _ONLY_ without PNG), and you still +// want the zlib decoder to be available, #define STBI_SUPPORT_ZLIB +// + + +#ifndef STBI_NO_STDIO +#include +#endif // STBI_NO_STDIO + +#if defined _WIN32 || defined __CYGWIN__ +#include +#endif // defined _WIN32 || defined __CYGWIN__ + +#define STBI_VERSION 1 + +enum +{ + STBI_default = 0, // only used for desired_channels + + STBI_grey = 1, + STBI_grey_alpha = 2, + STBI_rgb = 3, + STBI_rgb_alpha = 4 +}; + +typedef unsigned char stbi_uc; +typedef unsigned short stbi_us; + +#ifdef __cplusplus +extern "C" { +#endif + +#ifdef STB_IMAGE_STATIC +#define STBIDEF static +#else +#define STBIDEF extern +#endif + +////////////////////////////////////////////////////////////////////////////// +// +// PRIMARY API - works on images of any type +// + +// +// load image by filename, open file, or memory buffer +// + +typedef struct +{ + int (*read) (void *user,char *data,int size); // fill 'data' with 'size' bytes. return number of bytes actually read + void (*skip) (void *user,int n); // skip the next 'n' bytes, or 'unget' the last -n bytes if negative + int (*eof) (void *user); // returns nonzero if we are at end of file/data +} stbi_io_callbacks; + +//////////////////////////////////// +// +// 8-bits-per-channel interface +// + +STBIDEF stbi_uc *stbi_load_from_memory (stbi_uc const *buffer, int len , int *x, int *y, int *channels_in_file, int desired_channels); +STBIDEF stbi_uc *stbi_load_from_callbacks(stbi_io_callbacks const *clbk , void *user, int *x, int *y, int *channels_in_file, int desired_channels); +#ifndef STBI_NO_GIF +STBIDEF stbi_uc *stbi_load_gif_from_memory(stbi_uc const *buffer, int len, int **delays, int *x, int *y, int *z, int *comp, int req_comp); +#endif + + +#ifndef STBI_NO_STDIO +STBIDEF stbi_uc *stbi_load (char const *filename, int *x, int *y, int *channels_in_file, int desired_channels); +STBIDEF stbi_uc *stbi_load_from_file (FILE *f, int *x, int *y, int *channels_in_file, int desired_channels); +// for stbi_load_from_file, file pointer is left pointing immediately after image +#endif + +//////////////////////////////////// +// +// 16-bits-per-channel interface +// + +STBIDEF stbi_us *stbi_load_16_from_memory (stbi_uc const *buffer, int len, int *x, int *y, int *channels_in_file, int desired_channels); +STBIDEF stbi_us *stbi_load_16_from_callbacks(stbi_io_callbacks const *clbk, void *user, int *x, int *y, int *channels_in_file, int desired_channels); + +#ifndef STBI_NO_STDIO +STBIDEF stbi_us *stbi_load_16 (char const *filename, int *x, int *y, int *channels_in_file, int desired_channels); +STBIDEF stbi_us *stbi_load_from_file_16(FILE *f, int *x, int *y, int *channels_in_file, int desired_channels); +#endif + +//////////////////////////////////// +// +// float-per-channel interface +// +#ifndef STBI_NO_LINEAR + STBIDEF float *stbi_loadf_from_memory (stbi_uc const *buffer, int len, int *x, int *y, int *channels_in_file, int desired_channels); + STBIDEF float *stbi_loadf_from_callbacks (stbi_io_callbacks const *clbk, void *user, int *x, int *y, int *channels_in_file, int desired_channels); + + #ifndef STBI_NO_STDIO + STBIDEF float *stbi_loadf (char const *filename, int *x, int *y, int *channels_in_file, int desired_channels); + STBIDEF float *stbi_loadf_from_file (FILE *f, int *x, int *y, int *channels_in_file, int desired_channels); + #endif +#endif + +#ifndef STBI_NO_HDR + STBIDEF void stbi_hdr_to_ldr_gamma(float gamma); + STBIDEF void stbi_hdr_to_ldr_scale(float scale); +#endif // STBI_NO_HDR + +#ifndef STBI_NO_LINEAR + STBIDEF void stbi_ldr_to_hdr_gamma(float gamma); + STBIDEF void stbi_ldr_to_hdr_scale(float scale); +#endif // STBI_NO_LINEAR + +// stbi_is_hdr is always defined, but always returns false if STBI_NO_HDR +STBIDEF int stbi_is_hdr_from_callbacks(stbi_io_callbacks const *clbk, void *user); +STBIDEF int stbi_is_hdr_from_memory(stbi_uc const *buffer, int len); +#ifndef STBI_NO_STDIO +STBIDEF int stbi_is_hdr (char const *filename); +STBIDEF int stbi_is_hdr_from_file(FILE *f); +#endif // STBI_NO_STDIO + + +// get a VERY brief reason for failure +// NOT THREADSAFE +STBIDEF const char *stbi_failure_reason (void); + +// free the loaded image -- this is just free() +STBIDEF void stbi_image_free (void *retval_from_stbi_load); + +// get image dimensions & components without fully decoding +STBIDEF int stbi_info_from_memory(stbi_uc const *buffer, int len, int *x, int *y, int *comp); +STBIDEF int stbi_info_from_callbacks(stbi_io_callbacks const *clbk, void *user, int *x, int *y, int *comp); +STBIDEF int stbi_is_16_bit_from_memory(stbi_uc const *buffer, int len); +STBIDEF int stbi_is_16_bit_from_callbacks(stbi_io_callbacks const *clbk, void *user); + +#ifndef STBI_NO_STDIO +STBIDEF int stbi_info (char const *filename, int *x, int *y, int *comp); +STBIDEF int stbi_info_from_file (FILE *f, int *x, int *y, int *comp); +STBIDEF int stbi_is_16_bit (char const *filename); +STBIDEF int stbi_is_16_bit_from_file(FILE *f); +#endif + + + +// for image formats that explicitly notate that they have premultiplied alpha, +// we just return the colors as stored in the file. set this flag to force +// unpremultiplication. results are undefined if the unpremultiply overflow. +STBIDEF void stbi_set_unpremultiply_on_load(int flag_true_if_should_unpremultiply); + +// indicate whether we should process iphone images back to canonical format, +// or just pass them through "as-is" +STBIDEF void stbi_convert_iphone_png_to_rgb(int flag_true_if_should_convert); + +// flip the image vertically, so the first pixel in the output array is the bottom left +STBIDEF void stbi_set_flip_vertically_on_load(int flag_true_if_should_flip); + +// ZLIB client - used by PNG, available for other purposes + +STBIDEF char *stbi_zlib_decode_malloc_guesssize(const char *buffer, int len, int initial_size, int *outlen); +STBIDEF char *stbi_zlib_decode_malloc_guesssize_headerflag(const char *buffer, int len, int initial_size, int *outlen, int parse_header); +STBIDEF char *stbi_zlib_decode_malloc(const char *buffer, int len, int *outlen); +STBIDEF int stbi_zlib_decode_buffer(char *obuffer, int olen, const char *ibuffer, int ilen); + +STBIDEF char *stbi_zlib_decode_noheader_malloc(const char *buffer, int len, int *outlen); +STBIDEF int stbi_zlib_decode_noheader_buffer(char *obuffer, int olen, const char *ibuffer, int ilen); + + +#ifdef __cplusplus +} +#endif + +// +// +//// end header file ///////////////////////////////////////////////////// +#endif // STBI_INCLUDE_STB_IMAGE_H + +#ifdef STB_IMAGE_IMPLEMENTATION + +#if defined(STBI_ONLY_JPEG) || defined(STBI_ONLY_PNG) || defined(STBI_ONLY_BMP) \ + || defined(STBI_ONLY_TGA) || defined(STBI_ONLY_GIF) || defined(STBI_ONLY_PSD) \ + || defined(STBI_ONLY_HDR) || defined(STBI_ONLY_PIC) || defined(STBI_ONLY_PNM) \ + || defined(STBI_ONLY_ZLIB) + #ifndef STBI_ONLY_JPEG + #define STBI_NO_JPEG + #endif + #ifndef STBI_ONLY_PNG + #define STBI_NO_PNG + #endif + #ifndef STBI_ONLY_BMP + #define STBI_NO_BMP + #endif + #ifndef STBI_ONLY_PSD + #define STBI_NO_PSD + #endif + #ifndef STBI_ONLY_TGA + #define STBI_NO_TGA + #endif + #ifndef STBI_ONLY_GIF + #define STBI_NO_GIF + #endif + #ifndef STBI_ONLY_HDR + #define STBI_NO_HDR + #endif + #ifndef STBI_ONLY_PIC + #define STBI_NO_PIC + #endif + #ifndef STBI_ONLY_PNM + #define STBI_NO_PNM + #endif +#endif + +#if defined(STBI_NO_PNG) && !defined(STBI_SUPPORT_ZLIB) && !defined(STBI_NO_ZLIB) +#define STBI_NO_ZLIB +#endif + + +#include +#include // ptrdiff_t on osx +#include +#include +#include + +#if !defined(STBI_NO_LINEAR) || !defined(STBI_NO_HDR) +#include // ldexp, pow +#endif + +#ifndef STBI_NO_STDIO +#include +#endif + +#ifndef STBI_ASSERT +#include +#define STBI_ASSERT(x) assert(x) +#endif + + +#ifndef _MSC_VER + #ifdef __cplusplus + #define stbi_inline inline + #else + #define stbi_inline + #endif +#else + #define stbi_inline __forceinline +#endif + + +#ifdef _MSC_VER +typedef unsigned short stbi__uint16; +typedef signed short stbi__int16; +typedef unsigned int stbi__uint32; +typedef signed int stbi__int32; +#else +#include +typedef uint16_t stbi__uint16; +typedef int16_t stbi__int16; +typedef uint32_t stbi__uint32; +typedef int32_t stbi__int32; +#endif + +// should produce compiler error if size is wrong +typedef unsigned char validate_uint32[sizeof(stbi__uint32)==4 ? 1 : -1]; + +#ifdef _MSC_VER +#define STBI_NOTUSED(v) (void)(v) +#else +#define STBI_NOTUSED(v) (void)sizeof(v) +#endif + +#ifdef _MSC_VER +#define STBI_HAS_LROTL +#endif + +#ifdef STBI_HAS_LROTL + #define stbi_lrot(x,y) _lrotl(x,y) +#else + #define stbi_lrot(x,y) (((x) << (y)) | ((x) >> (32 - (y)))) +#endif + +#if defined(STBI_MALLOC) && defined(STBI_FREE) && (defined(STBI_REALLOC) || defined(STBI_REALLOC_SIZED)) +// ok +#elif !defined(STBI_MALLOC) && !defined(STBI_FREE) && !defined(STBI_REALLOC) && !defined(STBI_REALLOC_SIZED) +// ok +#else +#error "Must define all or none of STBI_MALLOC, STBI_FREE, and STBI_REALLOC (or STBI_REALLOC_SIZED)." +#endif + +#ifndef STBI_MALLOC +#define STBI_MALLOC(sz) malloc(sz) +#define STBI_REALLOC(p,newsz) realloc(p,newsz) +#define STBI_FREE(p) free(p) +#endif + +#ifndef STBI_REALLOC_SIZED +#define STBI_REALLOC_SIZED(p,oldsz,newsz) STBI_REALLOC(p,newsz) +#endif + +// x86/x64 detection +#if defined(__x86_64__) || defined(_M_X64) +#define STBI__X64_TARGET +#elif defined(__i386) || defined(_M_IX86) +#define STBI__X86_TARGET +#endif + +#if defined(__GNUC__) && defined(STBI__X86_TARGET) && !defined(__SSE2__) && !defined(STBI_NO_SIMD) +// gcc doesn't support sse2 intrinsics unless you compile with -msse2, +// which in turn means it gets to use SSE2 everywhere. This is unfortunate, +// but previous attempts to provide the SSE2 functions with runtime +// detection caused numerous issues. The way architecture extensions are +// exposed in GCC/Clang is, sadly, not really suited for one-file libs. +// New behavior: if compiled with -msse2, we use SSE2 without any +// detection; if not, we don't use it at all. +#define STBI_NO_SIMD +#endif + +#if defined(__MINGW32__) && defined(STBI__X86_TARGET) && !defined(STBI_MINGW_ENABLE_SSE2) && !defined(STBI_NO_SIMD) +// Note that __MINGW32__ doesn't actually mean 32-bit, so we have to avoid STBI__X64_TARGET +// +// 32-bit MinGW wants ESP to be 16-byte aligned, but this is not in the +// Windows ABI and VC++ as well as Windows DLLs don't maintain that invariant. +// As a result, enabling SSE2 on 32-bit MinGW is dangerous when not +// simultaneously enabling "-mstackrealign". +// +// See https://github.com/nothings/stb/issues/81 for more information. +// +// So default to no SSE2 on 32-bit MinGW. If you've read this far and added +// -mstackrealign to your build settings, feel free to #define STBI_MINGW_ENABLE_SSE2. +#define STBI_NO_SIMD +#endif + +#if !defined(STBI_NO_SIMD) && (defined(STBI__X86_TARGET) || defined(STBI__X64_TARGET)) +#define STBI_SSE2 +#include + +#ifdef _MSC_VER + +#if _MSC_VER >= 1400 // not VC6 +#include // __cpuid +static int stbi__cpuid3(void) +{ + int info[4]; + __cpuid(info,1); + return info[3]; +} +#else +static int stbi__cpuid3(void) +{ + int res; + __asm { + mov eax,1 + cpuid + mov res,edx + } + return res; +} +#endif + +#define STBI_SIMD_ALIGN(type, name) __declspec(align(16)) type name + +static int stbi__sse2_available(void) +{ + int info3 = stbi__cpuid3(); + return ((info3 >> 26) & 1) != 0; +} +#else // assume GCC-style if not VC++ +#define STBI_SIMD_ALIGN(type, name) type name __attribute__((aligned(16))) + +static int stbi__sse2_available(void) +{ + // If we're even attempting to compile this on GCC/Clang, that means + // -msse2 is on, which means the compiler is allowed to use SSE2 + // instructions at will, and so are we. + return 1; +} +#endif +#endif + +// ARM NEON +#if defined(STBI_NO_SIMD) && defined(STBI_NEON) +#undef STBI_NEON +#endif + +#ifdef STBI_NEON +#include +// assume GCC or Clang on ARM targets +#define STBI_SIMD_ALIGN(type, name) type name __attribute__((aligned(16))) +#endif + +#ifndef STBI_SIMD_ALIGN +#define STBI_SIMD_ALIGN(type, name) type name +#endif + +/////////////////////////////////////////////// +// +// stbi__context struct and start_xxx functions + +// stbi__context structure is our basic context used by all images, so it +// contains all the IO context, plus some basic image information +typedef struct +{ + stbi__uint32 img_x, img_y; + int img_n, img_out_n; + + stbi_io_callbacks io; + void *io_user_data; + + int read_from_callbacks; + int buflen; + stbi_uc buffer_start[128]; + + stbi_uc *img_buffer, *img_buffer_end; + stbi_uc *img_buffer_original, *img_buffer_original_end; +} stbi__context; + + +static void stbi__refill_buffer(stbi__context *s); + +// initialize a memory-decode context +static void stbi__start_mem(stbi__context *s, stbi_uc const *buffer, int len) +{ + s->io.read = NULL; + s->read_from_callbacks = 0; + s->img_buffer = s->img_buffer_original = (stbi_uc *) buffer; + s->img_buffer_end = s->img_buffer_original_end = (stbi_uc *) buffer+len; +} + +// initialize a callback-based context +static void stbi__start_callbacks(stbi__context *s, stbi_io_callbacks *c, void *user) +{ + s->io = *c; + s->io_user_data = user; + s->buflen = sizeof(s->buffer_start); + s->read_from_callbacks = 1; + s->img_buffer_original = s->buffer_start; + stbi__refill_buffer(s); + s->img_buffer_original_end = s->img_buffer_end; +} + +// this is not threadsafe +static const char *stbi__g_failure_reason; + +STBIDEF const char *stbi_failure_reason(void) +{ + return stbi__g_failure_reason; +} + +static int stbi__err(const char *str) +{ + stbi__g_failure_reason = str; + return 0; +} + +static void *stbi__malloc(size_t size) +{ + return STBI_MALLOC(size); +} + + +#ifndef STBI_NO_STDIO + +static int stbi__stdio_read(void *user, char *data, int size) +{ + return (int) fread(data,1,size,(FILE*) user); +} + +static void stbi__stdio_skip(void *user, int n) +{ + if (fseek((FILE*) user, n, SEEK_CUR) == -1) + stbi__err("fseek() error"); +} + +static int stbi__stdio_eof(void *user) +{ + return feof((FILE*) user); +} + +static stbi_io_callbacks stbi__stdio_callbacks = +{ + stbi__stdio_read, + stbi__stdio_skip, + stbi__stdio_eof, +}; + +static void stbi__start_file(stbi__context *s, FILE *f) +{ + stbi__start_callbacks(s, &stbi__stdio_callbacks, (void *) f); +} + +//static void stop_file(stbi__context *s) { } + +#endif // !STBI_NO_STDIO + +static void stbi__rewind(stbi__context *s) +{ + // conceptually rewind SHOULD rewind to the beginning of the stream, + // but we just rewind to the beginning of the initial buffer, because + // we only use it after doing 'test', which only ever looks at at most 92 bytes + s->img_buffer = s->img_buffer_original; + s->img_buffer_end = s->img_buffer_original_end; +} + +enum +{ + STBI_ORDER_RGB, + STBI_ORDER_BGR +}; + +typedef struct +{ + int bits_per_channel; + int num_channels; + int channel_order; +} stbi__result_info; + +#ifndef STBI_NO_JPEG +static int stbi__jpeg_test(stbi__context *s); +static void *stbi__jpeg_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri); +static int stbi__jpeg_info(stbi__context *s, int *x, int *y, int *comp); +#endif + +#ifndef STBI_NO_PNG +static int stbi__png_test(stbi__context *s); +static void *stbi__png_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri); +static int stbi__png_info(stbi__context *s, int *x, int *y, int *comp); +static int stbi__png_is16(stbi__context *s); +#endif + +#ifndef STBI_NO_BMP +static int stbi__bmp_test(stbi__context *s); +static void *stbi__bmp_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri); +static int stbi__bmp_info(stbi__context *s, int *x, int *y, int *comp); +#endif + +#ifndef STBI_NO_TGA +static int stbi__tga_test(stbi__context *s); +static void *stbi__tga_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri); +static int stbi__tga_info(stbi__context *s, int *x, int *y, int *comp); +#endif + +#ifndef STBI_NO_PSD +static int stbi__psd_test(stbi__context *s); +static void *stbi__psd_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri, int bpc); +static int stbi__psd_info(stbi__context *s, int *x, int *y, int *comp); +static int stbi__psd_is16(stbi__context *s); +#endif + +#ifndef STBI_NO_HDR +static int stbi__hdr_test(stbi__context *s); +static float *stbi__hdr_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri); +static int stbi__hdr_info(stbi__context *s, int *x, int *y, int *comp); +#endif + +#ifndef STBI_NO_PIC +static int stbi__pic_test(stbi__context *s); +static void *stbi__pic_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri); +static int stbi__pic_info(stbi__context *s, int *x, int *y, int *comp); +#endif + +#ifndef STBI_NO_GIF +static int stbi__gif_test(stbi__context *s); +static void *stbi__gif_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri); +static void *stbi__load_gif_main(stbi__context *s, int **delays, int *x, int *y, int *z, int *comp, int req_comp); +static int stbi__gif_info(stbi__context *s, int *x, int *y, int *comp); +#endif + +#ifndef STBI_NO_PNM +static int stbi__pnm_test(stbi__context *s); +static void *stbi__pnm_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri); +static int stbi__pnm_info(stbi__context *s, int *x, int *y, int *comp); +#endif + +// stb_image uses ints pervasively, including for offset calculations. +// therefore the largest decoded image size we can support with the +// current code, even on 64-bit targets, is INT_MAX. this is not a +// significant limitation for the intended use case. +// +// we do, however, need to make sure our size calculations don't +// overflow. hence a few helper functions for size calculations that +// multiply integers together, making sure that they're non-negative +// and no overflow occurs. + +// return 1 if the sum is valid, 0 on overflow. +// negative terms are considered invalid. +static int stbi__addsizes_valid(int a, int b) +{ + if (b < 0) return 0; + // now 0 <= b <= INT_MAX, hence also + // 0 <= INT_MAX - b <= INTMAX. + // And "a + b <= INT_MAX" (which might overflow) is the + // same as a <= INT_MAX - b (no overflow) + return a <= INT_MAX - b; +} + +// returns 1 if the product is valid, 0 on overflow. +// negative factors are considered invalid. +static int stbi__mul2sizes_valid(int a, int b) +{ + if (a < 0 || b < 0) return 0; + if (b == 0) return 1; // mul-by-0 is always safe + // portable way to check for no overflows in a*b + return a <= INT_MAX/b; +} + +// returns 1 if "a*b + add" has no negative terms/factors and doesn't overflow +static int stbi__mad2sizes_valid(int a, int b, int add) +{ + return stbi__mul2sizes_valid(a, b) && stbi__addsizes_valid(a*b, add); +} + +// returns 1 if "a*b*c + add" has no negative terms/factors and doesn't overflow +static int stbi__mad3sizes_valid(int a, int b, int c, int add) +{ + return stbi__mul2sizes_valid(a, b) && stbi__mul2sizes_valid(a*b, c) && + stbi__addsizes_valid(a*b*c, add); +} + +// returns 1 if "a*b*c*d + add" has no negative terms/factors and doesn't overflow +#if !defined(STBI_NO_LINEAR) || !defined(STBI_NO_HDR) +static int stbi__mad4sizes_valid(int a, int b, int c, int d, int add) +{ + return stbi__mul2sizes_valid(a, b) && stbi__mul2sizes_valid(a*b, c) && + stbi__mul2sizes_valid(a*b*c, d) && stbi__addsizes_valid(a*b*c*d, add); +} +#endif + +// mallocs with size overflow checking +static void *stbi__malloc_mad2(int a, int b, int add) +{ + if (!stbi__mad2sizes_valid(a, b, add)) return NULL; + return stbi__malloc(a*b + add); +} + +static void *stbi__malloc_mad3(int a, int b, int c, int add) +{ + if (!stbi__mad3sizes_valid(a, b, c, add)) return NULL; + return stbi__malloc(a*b*c + add); +} + +#if !defined(STBI_NO_LINEAR) || !defined(STBI_NO_HDR) +static void *stbi__malloc_mad4(int a, int b, int c, int d, int add) +{ + if (!stbi__mad4sizes_valid(a, b, c, d, add)) return NULL; + return stbi__malloc(a*b*c*d + add); +} +#endif + +// stbi__err - error +// stbi__errpf - error returning pointer to float +// stbi__errpuc - error returning pointer to unsigned char + +#ifdef STBI_NO_FAILURE_STRINGS + #define stbi__err(x,y) 0 +#elif defined(STBI_FAILURE_USERMSG) + #define stbi__err(x,y) stbi__err(y) +#else + #define stbi__err(x,y) stbi__err(x) +#endif + +#define stbi__errpf(x,y) ((float *)(size_t) (stbi__err(x,y)?NULL:NULL)) +#define stbi__errpuc(x,y) ((unsigned char *)(size_t) (stbi__err(x,y)?NULL:NULL)) + +STBIDEF void stbi_image_free(void *retval_from_stbi_load) +{ + STBI_FREE(retval_from_stbi_load); +} + +#ifndef STBI_NO_LINEAR +static float *stbi__ldr_to_hdr(stbi_uc *data, int x, int y, int comp); +#endif + +#ifndef STBI_NO_HDR +static stbi_uc *stbi__hdr_to_ldr(float *data, int x, int y, int comp); +#endif + +static int stbi__vertically_flip_on_load = 0; + +STBIDEF void stbi_set_flip_vertically_on_load(int flag_true_if_should_flip) +{ + stbi__vertically_flip_on_load = flag_true_if_should_flip; +} + +static void *stbi__load_main(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri, int /*bpc*/) +{ + memset(ri, 0, sizeof(*ri)); // make sure it's initialized if we add new fields + ri->bits_per_channel = 8; // default is 8 so most paths don't have to be changed + ri->channel_order = STBI_ORDER_RGB; // all current input & output are this, but this is here so we can add BGR order + ri->num_channels = 0; + + #ifndef STBI_NO_JPEG + if (stbi__jpeg_test(s)) return stbi__jpeg_load(s,x,y,comp,req_comp, ri); + #endif + #ifndef STBI_NO_PNG + if (stbi__png_test(s)) return stbi__png_load(s,x,y,comp,req_comp, ri); + #endif + #ifndef STBI_NO_BMP + if (stbi__bmp_test(s)) return stbi__bmp_load(s,x,y,comp,req_comp, ri); + #endif + #ifndef STBI_NO_GIF + if (stbi__gif_test(s)) return stbi__gif_load(s,x,y,comp,req_comp, ri); + #endif + #ifndef STBI_NO_PSD + if (stbi__psd_test(s)) return stbi__psd_load(s,x,y,comp,req_comp, ri, bpc); + #endif + #ifndef STBI_NO_PIC + if (stbi__pic_test(s)) return stbi__pic_load(s,x,y,comp,req_comp, ri); + #endif + #ifndef STBI_NO_PNM + if (stbi__pnm_test(s)) return stbi__pnm_load(s,x,y,comp,req_comp, ri); + #endif + + #ifndef STBI_NO_HDR + if (stbi__hdr_test(s)) { + float *hdr = stbi__hdr_load(s, x,y,comp,req_comp, ri); + return stbi__hdr_to_ldr(hdr, *x, *y, req_comp ? req_comp : *comp); + } + #endif + + #ifndef STBI_NO_TGA + // test tga last because it's a crappy test! + if (stbi__tga_test(s)) + return stbi__tga_load(s,x,y,comp,req_comp, ri); + #endif + + return stbi__errpuc("unknown image type", "Image not of any known type, or corrupt"); +} + +static stbi_uc *stbi__convert_16_to_8(stbi__uint16 *orig, int w, int h, int channels) +{ + int i; + int img_len = w * h * channels; + stbi_uc *reduced; + + reduced = (stbi_uc *) stbi__malloc(img_len); + if (reduced == NULL) return stbi__errpuc("outofmem", "Out of memory"); + + for (i = 0; i < img_len; ++i) + reduced[i] = (stbi_uc)((orig[i] >> 8) & 0xFF); // top half of each byte is sufficient approx of 16->8 bit scaling + + STBI_FREE(orig); + return reduced; +} + +static stbi__uint16 *stbi__convert_8_to_16(stbi_uc *orig, int w, int h, int channels) +{ + int i; + int img_len = w * h * channels; + stbi__uint16 *enlarged; + + enlarged = (stbi__uint16 *) stbi__malloc(img_len*2); + if (enlarged == NULL) return (stbi__uint16 *) stbi__errpuc("outofmem", "Out of memory"); + + for (i = 0; i < img_len; ++i) + enlarged[i] = (stbi__uint16)((orig[i] << 8) + orig[i]); // replicate to high and low byte, maps 0->0, 255->0xffff + + STBI_FREE(orig); + return enlarged; +} + +static void stbi__vertical_flip(void *image, int w, int h, int bytes_per_pixel) +{ + int row; + size_t bytes_per_row = (size_t)w * bytes_per_pixel; + stbi_uc temp[2048]; + stbi_uc *bytes = (stbi_uc *)image; + + for (row = 0; row < (h>>1); row++) { + stbi_uc *row0 = bytes + row*bytes_per_row; + stbi_uc *row1 = bytes + (h - row - 1)*bytes_per_row; + // swap row0 with row1 + size_t bytes_left = bytes_per_row; + while (bytes_left) { + size_t bytes_copy = (bytes_left < sizeof(temp)) ? bytes_left : sizeof(temp); + memcpy(temp, row0, bytes_copy); + memcpy(row0, row1, bytes_copy); + memcpy(row1, temp, bytes_copy); + row0 += bytes_copy; + row1 += bytes_copy; + bytes_left -= bytes_copy; + } + } +} + +#ifndef STBI_NO_GIF +static void stbi__vertical_flip_slices(void *image, int w, int h, int z, int bytes_per_pixel) +{ + int slice; + int slice_size = w * h * bytes_per_pixel; + + stbi_uc *bytes = (stbi_uc *)image; + for (slice = 0; slice < z; ++slice) { + stbi__vertical_flip(bytes, w, h, bytes_per_pixel); + bytes += slice_size; + } +} +#endif + +static unsigned char *stbi__load_and_postprocess_8bit(stbi__context *s, int *x, int *y, int *comp, int req_comp) +{ + stbi__result_info ri; + void *result = stbi__load_main(s, x, y, comp, req_comp, &ri, 8); + + if (result == NULL) + return NULL; + + if (ri.bits_per_channel != 8) { + STBI_ASSERT(ri.bits_per_channel == 16); + result = stbi__convert_16_to_8((stbi__uint16 *) result, *x, *y, req_comp == 0 ? *comp : req_comp); + ri.bits_per_channel = 8; + } + + // @TODO: move stbi__convert_format to here + + if (stbi__vertically_flip_on_load) { + int channels = req_comp ? req_comp : *comp; + stbi__vertical_flip(result, *x, *y, channels * sizeof(stbi_uc)); + } + + return (unsigned char *) result; +} + +static stbi__uint16 *stbi__load_and_postprocess_16bit(stbi__context *s, int *x, int *y, int *comp, int req_comp) +{ + stbi__result_info ri; + void *result = stbi__load_main(s, x, y, comp, req_comp, &ri, 16); + + if (result == NULL) + return NULL; + + if (ri.bits_per_channel != 16) { + STBI_ASSERT(ri.bits_per_channel == 8); + result = stbi__convert_8_to_16((stbi_uc *) result, *x, *y, req_comp == 0 ? *comp : req_comp); + ri.bits_per_channel = 16; + } + + // @TODO: move stbi__convert_format16 to here + // @TODO: special case RGB-to-Y (and RGBA-to-YA) for 8-bit-to-16-bit case to keep more precision + + if (stbi__vertically_flip_on_load) { + int channels = req_comp ? req_comp : *comp; + stbi__vertical_flip(result, *x, *y, channels * sizeof(stbi__uint16)); + } + + return (stbi__uint16 *) result; +} + +#if !defined(STBI_NO_HDR) || !defined(STBI_NO_LINEAR) +static void stbi__float_postprocess(float *result, int *x, int *y, int *comp, int req_comp) +{ + if (stbi__vertically_flip_on_load && result != NULL) { + int channels = req_comp ? req_comp : *comp; + stbi__vertical_flip(result, *x, *y, channels * sizeof(float)); + } +} +#endif + +#ifndef STBI_NO_STDIO + +static FILE *stbi__fopen(char const *filename, char const *mode) +{ + FILE *f; +#if defined(_MSC_VER) + DWORD cch = + MultiByteToWideChar(CP_UTF8, 0, filename, -1, nullptr, 0); + wchar_t *filenameU = new wchar_t[cch]; + if(!filenameU) + { + return 0; + } + MultiByteToWideChar(CP_UTF8, 0, filename, -1, filenameU, cch); + cch = MultiByteToWideChar(CP_UTF8, 0, mode, -1, nullptr, 0); + wchar_t *modeU = new wchar_t[cch]; + if(!modeU) + { + delete[] filenameU; + return 0; + } + MultiByteToWideChar(CP_UTF8, 0, mode, -1, modeU, cch); +#if _MSC_VER >= 1400 + if(0 != _wfopen_s(&f, filenameU, modeU)) + f = 0; + delete[] filenameU; + delete[] modeU; +#else // _MSC_VER >= 1400 + f = _wfopen(filenameU, modeU); + delete[] filenameU; + delete[] modeU; +#endif // _MSC_VER >= 1400 +#else // _MSC_VER + f = fopen(filename, mode); +#endif //_MSC_VER + return f; +} + + +STBIDEF stbi_uc *stbi_load(char const *filename, int *x, int *y, int *comp, int req_comp) +{ + FILE *f = stbi__fopen(filename, "rb"); + unsigned char *result; + if (!f) return stbi__errpuc("can't fopen", "Unable to open file"); + result = stbi_load_from_file(f,x,y,comp,req_comp); + fclose(f); + return result; +} + +STBIDEF stbi_uc *stbi_load_from_file(FILE *f, int *x, int *y, int *comp, int req_comp) +{ + unsigned char *result; + stbi__context s; + stbi__start_file(&s,f); + result = stbi__load_and_postprocess_8bit(&s,x,y,comp,req_comp); + if (result) { + // need to 'unget' all the characters in the IO buffer + if (fseek(f, - (int) (s.img_buffer_end - s.img_buffer), SEEK_CUR) == -1) { + stbi_image_free(result); + return stbi__errpuc("fseek() error", "File Seek Fail"); + } + } + return result; +} + +STBIDEF stbi__uint16 *stbi_load_from_file_16(FILE *f, int *x, int *y, int *comp, int req_comp) +{ + stbi__uint16 *result; + stbi__context s; + stbi__start_file(&s,f); + result = stbi__load_and_postprocess_16bit(&s,x,y,comp,req_comp); + if (result) { + // need to 'unget' all the characters in the IO buffer + if (fseek(f, - (int) (s.img_buffer_end - s.img_buffer), SEEK_CUR) == -1) { + stbi_image_free(result); + return (stbi__uint16 *) stbi__errpuc("fseek() error", "File Seek Fail"); + } + } + return result; +} + +STBIDEF stbi_us *stbi_load_16(char const *filename, int *x, int *y, int *comp, int req_comp) +{ + FILE *f = stbi__fopen(filename, "rb"); + stbi__uint16 *result; + if (!f) return (stbi_us *) stbi__errpuc("can't fopen", "Unable to open file"); + result = stbi_load_from_file_16(f,x,y,comp,req_comp); + fclose(f); + return result; +} + + +#endif //!STBI_NO_STDIO + +STBIDEF stbi_us *stbi_load_16_from_memory(stbi_uc const *buffer, int len, int *x, int *y, int *channels_in_file, int desired_channels) +{ + stbi__context s; + stbi__start_mem(&s,buffer,len); + return stbi__load_and_postprocess_16bit(&s,x,y,channels_in_file,desired_channels); +} + +STBIDEF stbi_us *stbi_load_16_from_callbacks(stbi_io_callbacks const *clbk, void *user, int *x, int *y, int *channels_in_file, int desired_channels) +{ + stbi__context s; + stbi__start_callbacks(&s, (stbi_io_callbacks *)clbk, user); + return stbi__load_and_postprocess_16bit(&s,x,y,channels_in_file,desired_channels); +} + +STBIDEF stbi_uc *stbi_load_from_memory(stbi_uc const *buffer, int len, int *x, int *y, int *comp, int req_comp) +{ + stbi__context s; + stbi__start_mem(&s,buffer,len); + return stbi__load_and_postprocess_8bit(&s,x,y,comp,req_comp); +} + +STBIDEF stbi_uc *stbi_load_from_callbacks(stbi_io_callbacks const *clbk, void *user, int *x, int *y, int *comp, int req_comp) +{ + stbi__context s; + stbi__start_callbacks(&s, (stbi_io_callbacks *) clbk, user); + return stbi__load_and_postprocess_8bit(&s,x,y,comp,req_comp); +} + +#ifndef STBI_NO_GIF +STBIDEF stbi_uc *stbi_load_gif_from_memory(stbi_uc const *buffer, int len, int **delays, int *x, int *y, int *z, int *comp, int req_comp) +{ + unsigned char *result; + stbi__context s; + stbi__start_mem(&s,buffer,len); + + result = (unsigned char*) stbi__load_gif_main(&s, delays, x, y, z, comp, req_comp); + if (stbi__vertically_flip_on_load) { + stbi__vertical_flip_slices( result, *x, *y, *z, *comp ); + } + + return result; +} +#endif + +#ifndef STBI_NO_LINEAR +static float *stbi__loadf_main(stbi__context *s, int *x, int *y, int *comp, int req_comp) +{ + unsigned char *data; + #ifndef STBI_NO_HDR + if (stbi__hdr_test(s)) { + stbi__result_info ri; + float *hdr_data = stbi__hdr_load(s,x,y,comp,req_comp, &ri); + if (hdr_data) + stbi__float_postprocess(hdr_data,x,y,comp,req_comp); + return hdr_data; + } + #endif + data = stbi__load_and_postprocess_8bit(s, x, y, comp, req_comp); + if (data) + return stbi__ldr_to_hdr(data, *x, *y, req_comp ? req_comp : *comp); + return stbi__errpf("unknown image type", "Image not of any known type, or corrupt"); +} + +STBIDEF float *stbi_loadf_from_memory(stbi_uc const *buffer, int len, int *x, int *y, int *comp, int req_comp) +{ + stbi__context s; + stbi__start_mem(&s,buffer,len); + return stbi__loadf_main(&s,x,y,comp,req_comp); +} + +STBIDEF float *stbi_loadf_from_callbacks(stbi_io_callbacks const *clbk, void *user, int *x, int *y, int *comp, int req_comp) +{ + stbi__context s; + stbi__start_callbacks(&s, (stbi_io_callbacks *) clbk, user); + return stbi__loadf_main(&s,x,y,comp,req_comp); +} + +#ifndef STBI_NO_STDIO +STBIDEF float *stbi_loadf(char const *filename, int *x, int *y, int *comp, int req_comp) +{ + float *result; + FILE *f = stbi__fopen(filename, "rb"); + if (!f) return stbi__errpf("can't fopen", "Unable to open file"); + result = stbi_loadf_from_file(f,x,y,comp,req_comp); + fclose(f); + return result; +} + +STBIDEF float *stbi_loadf_from_file(FILE *f, int *x, int *y, int *comp, int req_comp) +{ + stbi__context s; + stbi__start_file(&s,f); + return stbi__loadf_main(&s,x,y,comp,req_comp); +} +#endif // !STBI_NO_STDIO + +#endif // !STBI_NO_LINEAR + +// these is-hdr-or-not is defined independent of whether STBI_NO_LINEAR is +// defined, for API simplicity; if STBI_NO_LINEAR is defined, it always +// reports false! + +STBIDEF int stbi_is_hdr_from_memory(stbi_uc const *buffer, int len) +{ + #ifndef STBI_NO_HDR + stbi__context s; + stbi__start_mem(&s,buffer,len); + return stbi__hdr_test(&s); + #else + STBI_NOTUSED(buffer); + STBI_NOTUSED(len); + return 0; + #endif +} + +#ifndef STBI_NO_STDIO +STBIDEF int stbi_is_hdr (char const *filename) +{ + FILE *f = stbi__fopen(filename, "rb"); + int result=0; + if (f) { + result = stbi_is_hdr_from_file(f); + fclose(f); + } + return result; +} + +STBIDEF int stbi_is_hdr_from_file(FILE *f) +{ + #ifndef STBI_NO_HDR + long pos = ftell(f); + int res; + stbi__context s; + stbi__start_file(&s,f); + res = stbi__hdr_test(&s); + if (fseek(f, pos, SEEK_SET) == -1) return stbi__err("fseek() error", "File Seek Fail"); + return res; + #else + STBI_NOTUSED(f); + return 0; + #endif +} +#endif // !STBI_NO_STDIO + +STBIDEF int stbi_is_hdr_from_callbacks(stbi_io_callbacks const *clbk, void *user) +{ + #ifndef STBI_NO_HDR + stbi__context s; + stbi__start_callbacks(&s, (stbi_io_callbacks *) clbk, user); + return stbi__hdr_test(&s); + #else + STBI_NOTUSED(clbk); + STBI_NOTUSED(user); + return 0; + #endif +} + +#ifndef STBI_NO_LINEAR +static float stbi__l2h_gamma=2.2f, stbi__l2h_scale=1.0f; + +STBIDEF void stbi_ldr_to_hdr_gamma(float gamma) { stbi__l2h_gamma = gamma; } +STBIDEF void stbi_ldr_to_hdr_scale(float scale) { stbi__l2h_scale = scale; } +#endif + +static float stbi__h2l_gamma_i=1.0f/2.2f, stbi__h2l_scale_i=1.0f; + +STBIDEF void stbi_hdr_to_ldr_gamma(float gamma) { stbi__h2l_gamma_i = 1/gamma; } +STBIDEF void stbi_hdr_to_ldr_scale(float scale) { stbi__h2l_scale_i = 1/scale; } + + +////////////////////////////////////////////////////////////////////////////// +// +// Common code used by all image loaders +// + +enum +{ + STBI__SCAN_load=0, + STBI__SCAN_type, + STBI__SCAN_header +}; + +static void stbi__refill_buffer(stbi__context *s) +{ + int n = (s->io.read)(s->io_user_data,(char*)s->buffer_start,s->buflen); + if (n == 0) { + // at end of file, treat same as if from memory, but need to handle case + // where s->img_buffer isn't pointing to safe memory, e.g. 0-byte file + s->read_from_callbacks = 0; + s->img_buffer = s->buffer_start; + s->img_buffer_end = s->buffer_start+1; + *s->img_buffer = 0; + } else { + s->img_buffer = s->buffer_start; + s->img_buffer_end = s->buffer_start + n; + } +} + +stbi_inline static stbi_uc stbi__get8(stbi__context *s) +{ + if (s->img_buffer < s->img_buffer_end) + return *s->img_buffer++; + if (s->read_from_callbacks) { + stbi__refill_buffer(s); + return *s->img_buffer++; + } + return 0; +} + +stbi_inline static int stbi__at_eof(stbi__context *s) +{ + if (s->io.read) { + if (!(s->io.eof)(s->io_user_data)) return 0; + // if feof() is true, check if buffer = end + // special case: we've only got the special 0 character at the end + if (s->read_from_callbacks == 0) return 1; + } + + return s->img_buffer >= s->img_buffer_end; +} + +static void stbi__skip(stbi__context *s, int n) +{ + if (n < 0) { + s->img_buffer = s->img_buffer_end; + return; + } + if (s->io.read) { + int blen = (int) (s->img_buffer_end - s->img_buffer); + if (blen < n) { + s->img_buffer = s->img_buffer_end; + (s->io.skip)(s->io_user_data, n - blen); + return; + } + } + s->img_buffer += n; +} + +static int stbi__getn(stbi__context *s, stbi_uc *buffer, int n) +{ + if (s->io.read) { + int blen = (int) (s->img_buffer_end - s->img_buffer); + if (blen < n) { + int res, count; + + memcpy(buffer, s->img_buffer, blen); + + count = (s->io.read)(s->io_user_data, (char*) buffer + blen, n - blen); + res = (count == (n-blen)); + s->img_buffer = s->img_buffer_end; + return res; + } + } + + if (s->img_buffer+n <= s->img_buffer_end) { + memcpy(buffer, s->img_buffer, n); + s->img_buffer += n; + return 1; + } else + return 0; +} + +static int stbi__get16be(stbi__context *s) +{ + int z = stbi__get8(s); + return (z << 8) + stbi__get8(s); +} + +static stbi__uint32 stbi__get32be(stbi__context *s) +{ + stbi__uint32 z = stbi__get16be(s); + return (z << 16) + stbi__get16be(s); +} + +#if defined(STBI_NO_BMP) && defined(STBI_NO_TGA) && defined(STBI_NO_GIF) +// nothing +#else +static int stbi__get16le(stbi__context *s) +{ + int z = stbi__get8(s); + return z + (stbi__get8(s) << 8); +} +#endif + +#ifndef STBI_NO_BMP +static stbi__uint32 stbi__get32le(stbi__context *s) +{ + stbi__uint32 z = stbi__get16le(s); + return z + (stbi__get16le(s) << 16); +} +#endif + +#define STBI__BYTECAST(x) ((stbi_uc) ((x) & 255)) // truncate int to byte without warnings + + +////////////////////////////////////////////////////////////////////////////// +// +// generic converter from built-in img_n to req_comp +// individual types do this automatically as much as possible (e.g. jpeg +// does all cases internally since it needs to colorspace convert anyway, +// and it never has alpha, so very few cases ). png can automatically +// interleave an alpha=255 channel, but falls back to this for other cases +// +// assume data buffer is malloced, so malloc a new one and free that one +// only failure mode is malloc failing + +static stbi_uc stbi__compute_y(int r, int g, int b) +{ + return (stbi_uc) (((r*77) + (g*150) + (29*b)) >> 8); +} + +static unsigned char *stbi__convert_format(unsigned char *data, int img_n, int req_comp, unsigned int x, unsigned int y) +{ + int i,j; + unsigned char *good; + + if (req_comp == img_n) return data; + STBI_ASSERT(req_comp >= 1 && req_comp <= 4); + + good = (unsigned char *) stbi__malloc_mad3(req_comp, x, y, 0); + if (good == NULL) { + STBI_FREE(data); + return stbi__errpuc("outofmem", "Out of memory"); + } + + for (j=0; j < (int) y; ++j) { + unsigned char *src = data + j * x * img_n ; + unsigned char *dest = good + j * x * req_comp; + + #define STBI__COMBO(a,b) ((a)*8+(b)) + #define STBI__CASE(a,b) case STBI__COMBO(a,b): for(i=x-1; i >= 0; --i, src += a, dest += b) + // convert source image with img_n components to one with req_comp components; + // avoid switch per pixel, so use switch per scanline and massive macros + switch (STBI__COMBO(img_n, req_comp)) { + STBI__CASE(1,2) { dest[0]=src[0], dest[1]=255; } break; + STBI__CASE(1,3) { dest[0]=dest[1]=dest[2]=src[0]; } break; + STBI__CASE(1,4) { dest[0]=dest[1]=dest[2]=src[0], dest[3]=255; } break; + STBI__CASE(2,1) { dest[0]=src[0]; } break; + STBI__CASE(2,3) { dest[0]=dest[1]=dest[2]=src[0]; } break; + STBI__CASE(2,4) { dest[0]=dest[1]=dest[2]=src[0], dest[3]=src[1]; } break; + STBI__CASE(3,4) { dest[0]=src[0],dest[1]=src[1],dest[2]=src[2],dest[3]=255; } break; + STBI__CASE(3,1) { dest[0]=stbi__compute_y(src[0],src[1],src[2]); } break; + STBI__CASE(3,2) { dest[0]=stbi__compute_y(src[0],src[1],src[2]), dest[1] = 255; } break; + STBI__CASE(4,1) { dest[0]=stbi__compute_y(src[0],src[1],src[2]); } break; + STBI__CASE(4,2) { dest[0]=stbi__compute_y(src[0],src[1],src[2]), dest[1] = src[3]; } break; + STBI__CASE(4,3) { dest[0]=src[0],dest[1]=src[1],dest[2]=src[2]; } break; + default: STBI_ASSERT(0); + } + #undef STBI__CASE + } + + STBI_FREE(data); + return good; +} + +static stbi__uint16 stbi__compute_y_16(int r, int g, int b) +{ + return (stbi__uint16) (((r*77) + (g*150) + (29*b)) >> 8); +} + +static stbi__uint16 *stbi__convert_format16(stbi__uint16 *data, int img_n, int req_comp, unsigned int x, unsigned int y) +{ + int i,j; + stbi__uint16 *good; + + if (req_comp == img_n) return data; + STBI_ASSERT(req_comp >= 1 && req_comp <= 4); + + good = (stbi__uint16 *) stbi__malloc(req_comp * x * y * 2); + if (good == NULL) { + STBI_FREE(data); + return (stbi__uint16 *) stbi__errpuc("outofmem", "Out of memory"); + } + + for (j=0; j < (int) y; ++j) { + stbi__uint16 *src = data + j * x * img_n ; + stbi__uint16 *dest = good + j * x * req_comp; + + #define STBI__COMBO(a,b) ((a)*8+(b)) + #define STBI__CASE(a,b) case STBI__COMBO(a,b): for(i=x-1; i >= 0; --i, src += a, dest += b) + // convert source image with img_n components to one with req_comp components; + // avoid switch per pixel, so use switch per scanline and massive macros + switch (STBI__COMBO(img_n, req_comp)) { + STBI__CASE(1,2) { dest[0]=src[0], dest[1]=0xffff; } break; + STBI__CASE(1,3) { dest[0]=dest[1]=dest[2]=src[0]; } break; + STBI__CASE(1,4) { dest[0]=dest[1]=dest[2]=src[0], dest[3]=0xffff; } break; + STBI__CASE(2,1) { dest[0]=src[0]; } break; + STBI__CASE(2,3) { dest[0]=dest[1]=dest[2]=src[0]; } break; + STBI__CASE(2,4) { dest[0]=dest[1]=dest[2]=src[0], dest[3]=src[1]; } break; + STBI__CASE(3,4) { dest[0]=src[0],dest[1]=src[1],dest[2]=src[2],dest[3]=0xffff; } break; + STBI__CASE(3,1) { dest[0]=stbi__compute_y_16(src[0],src[1],src[2]); } break; + STBI__CASE(3,2) { dest[0]=stbi__compute_y_16(src[0],src[1],src[2]), dest[1] = 0xffff; } break; + STBI__CASE(4,1) { dest[0]=stbi__compute_y_16(src[0],src[1],src[2]); } break; + STBI__CASE(4,2) { dest[0]=stbi__compute_y_16(src[0],src[1],src[2]), dest[1] = src[3]; } break; + STBI__CASE(4,3) { dest[0]=src[0],dest[1]=src[1],dest[2]=src[2]; } break; + default: STBI_ASSERT(0); + } + #undef STBI__CASE + } + + STBI_FREE(data); + return good; +} + +#ifndef STBI_NO_LINEAR +static float *stbi__ldr_to_hdr(stbi_uc *data, int x, int y, int comp) +{ + int i,k,n; + float *output; + if (!data) return NULL; + output = (float *) stbi__malloc_mad4(x, y, comp, sizeof(float), 0); + if (output == NULL) { STBI_FREE(data); return stbi__errpf("outofmem", "Out of memory"); } + // compute number of non-alpha components + if (comp & 1) n = comp; else n = comp-1; + for (i=0; i < x*y; ++i) { + for (k=0; k < n; ++k) { + output[i*comp + k] = (float) (pow(data[i*comp+k]/255.0f, stbi__l2h_gamma) * stbi__l2h_scale); + } + if (k < comp) output[i*comp + k] = data[i*comp+k]/255.0f; + } + STBI_FREE(data); + return output; +} +#endif + +#ifndef STBI_NO_HDR +#define stbi__float2int(x) ((int) (x)) +static stbi_uc *stbi__hdr_to_ldr(float *data, int x, int y, int comp) +{ + int i,k,n; + stbi_uc *output; + if (!data) return NULL; + output = (stbi_uc *) stbi__malloc_mad3(x, y, comp, 0); + if (output == NULL) { STBI_FREE(data); return stbi__errpuc("outofmem", "Out of memory"); } + // compute number of non-alpha components + if (comp & 1) n = comp; else n = comp-1; + for (i=0; i < x*y; ++i) { + for (k=0; k < n; ++k) { + float z = (float) pow(data[i*comp+k]*stbi__h2l_scale_i, stbi__h2l_gamma_i) * 255 + 0.5f; + if (z < 0) z = 0; + if (z > 255) z = 255; + output[i*comp + k] = (stbi_uc) stbi__float2int(z); + } + if (k < comp) { + float z = data[i*comp+k] * 255 + 0.5f; + if (z < 0) z = 0; + if (z > 255) z = 255; + output[i*comp + k] = (stbi_uc) stbi__float2int(z); + } + } + STBI_FREE(data); + return output; +} +#endif + +////////////////////////////////////////////////////////////////////////////// +// +// "baseline" JPEG/JFIF decoder +// +// simple implementation +// - doesn't support delayed output of y-dimension +// - simple interface (only one output format: 8-bit interleaved RGB) +// - doesn't try to recover corrupt jpegs +// - doesn't allow partial loading, loading multiple at once +// - still fast on x86 (copying globals into locals doesn't help x86) +// - allocates lots of intermediate memory (full size of all components) +// - non-interleaved case requires this anyway +// - allows good upsampling (see next) +// high-quality +// - upsampled channels are bilinearly interpolated, even across blocks +// - quality integer IDCT derived from IJG's 'slow' +// performance +// - fast huffman; reasonable integer IDCT +// - some SIMD kernels for common paths on targets with SSE2/NEON +// - uses a lot of intermediate memory, could cache poorly + +#ifndef STBI_NO_JPEG + +// huffman decoding acceleration +#define FAST_BITS 9 // larger handles more cases; smaller stomps less cache + +typedef struct +{ + stbi_uc fast[1 << FAST_BITS]; + // weirdly, repacking this into AoS is a 10% speed loss, instead of a win + stbi__uint16 code[256]; + stbi_uc values[256]; + stbi_uc size[257]; + unsigned int maxcode[18]; + int delta[17]; // old 'firstsymbol' - old 'firstcode' +} stbi__huffman; + +typedef struct +{ + stbi__context *s; + stbi__huffman huff_dc[4]; + stbi__huffman huff_ac[4]; + stbi__uint16 dequant[4][64]; + stbi__int16 fast_ac[4][1 << FAST_BITS]; + +// sizes for components, interleaved MCUs + int img_h_max, img_v_max; + int img_mcu_x, img_mcu_y; + int img_mcu_w, img_mcu_h; + +// definition of jpeg image component + struct + { + int id; + int h,v; + int tq; + int hd,ha; + int dc_pred; + + int x,y,w2,h2; + stbi_uc *data; + void *raw_data, *raw_coeff; + stbi_uc *linebuf; + short *coeff; // progressive only + int coeff_w, coeff_h; // number of 8x8 coefficient blocks + } img_comp[4]; + + stbi__uint32 code_buffer; // jpeg entropy-coded buffer + int code_bits; // number of valid bits + unsigned char marker; // marker seen while filling entropy buffer + int nomore; // flag if we saw a marker so must stop + + int progressive; + int spec_start; + int spec_end; + int succ_high; + int succ_low; + int eob_run; + int jfif; + int app14_color_transform; // Adobe APP14 tag + int rgb; + + int scan_n, order[4]; + int restart_interval, todo; + +// kernels + void (*idct_block_kernel)(stbi_uc *out, int out_stride, short data[64]); + void (*YCbCr_to_RGB_kernel)(stbi_uc *out, const stbi_uc *y, const stbi_uc *pcb, const stbi_uc *pcr, int count, int step); + stbi_uc *(*resample_row_hv_2_kernel)(stbi_uc *out, stbi_uc *in_near, stbi_uc *in_far, int w, int hs); +} stbi__jpeg; + +static int stbi__build_huffman(stbi__huffman *h, int *count) +{ + int i,j,k=0; + unsigned int code; + // build size list for each symbol (from JPEG spec) + for (i=0; i < 16; ++i) + for (j=0; j < count[i]; ++j) + h->size[k++] = (stbi_uc) (i+1); + h->size[k] = 0; + + // compute actual symbols (from jpeg spec) + code = 0; + k = 0; + for(j=1; j <= 16; ++j) { + // compute delta to add to code to compute symbol id + h->delta[j] = k - code; + if (h->size[k] == j) { + while (h->size[k] == j) + h->code[k++] = (stbi__uint16) (code++); + if (code-1 >= (1u << j)) return stbi__err("bad code lengths","Corrupt JPEG"); + } + // compute largest code + 1 for this size, preshifted as needed later + h->maxcode[j] = code << (16-j); + code <<= 1; + } + h->maxcode[j] = 0xffffffff; + + // build non-spec acceleration table; 255 is flag for not-accelerated + memset(h->fast, 255, 1 << FAST_BITS); + for (i=0; i < k; ++i) { + int s = h->size[i]; + if (s <= FAST_BITS) { + int c = h->code[i] << (FAST_BITS-s); + int m = 1 << (FAST_BITS-s); + for (j=0; j < m; ++j) { + h->fast[c+j] = (stbi_uc) i; + } + } + } + return 1; +} + +// build a table that decodes both magnitude and value of small ACs in +// one go. +static void stbi__build_fast_ac(stbi__int16 *fast_ac, stbi__huffman *h) +{ + int i; + for (i=0; i < (1 << FAST_BITS); ++i) { + stbi_uc fast = h->fast[i]; + fast_ac[i] = 0; + if (fast < 255) { + int rs = h->values[fast]; + int run = (rs >> 4) & 15; + int magbits = rs & 15; + int len = h->size[fast]; + + if (magbits && len + magbits <= FAST_BITS) { + // magnitude code followed by receive_extend code + int k = ((i << len) & ((1 << FAST_BITS) - 1)) >> (FAST_BITS - magbits); + int m = 1 << (magbits - 1); + if (k < m) k += (~0U << magbits) + 1; + // if the result is small enough, we can fit it in fast_ac table + if (k >= -128 && k <= 127) + fast_ac[i] = (stbi__int16) ((k * 256) + (run * 16) + (len + magbits)); + } + } + } +} + +static void stbi__grow_buffer_unsafe(stbi__jpeg *j) +{ + do { + unsigned int b = j->nomore ? 0 : stbi__get8(j->s); + if (b == 0xff) { + int c = stbi__get8(j->s); + while (c == 0xff) c = stbi__get8(j->s); // consume fill bytes + if (c != 0) { + j->marker = (unsigned char) c; + j->nomore = 1; + return; + } + } + j->code_buffer |= b << (24 - j->code_bits); + j->code_bits += 8; + } while (j->code_bits <= 24); +} + +// (1 << n) - 1 +static const stbi__uint32 stbi__bmask[17]={0,1,3,7,15,31,63,127,255,511,1023,2047,4095,8191,16383,32767,65535}; + +// decode a jpeg huffman value from the bitstream +stbi_inline static int stbi__jpeg_huff_decode(stbi__jpeg *j, stbi__huffman *h) +{ + unsigned int temp; + int c,k; + + if (j->code_bits < 16) stbi__grow_buffer_unsafe(j); + + // look at the top FAST_BITS and determine what symbol ID it is, + // if the code is <= FAST_BITS + c = (j->code_buffer >> (32 - FAST_BITS)) & ((1 << FAST_BITS)-1); + k = h->fast[c]; + if (k < 255) { + int s = h->size[k]; + if (s > j->code_bits) + return -1; + j->code_buffer <<= s; + j->code_bits -= s; + return h->values[k]; + } + + // naive test is to shift the code_buffer down so k bits are + // valid, then test against maxcode. To speed this up, we've + // preshifted maxcode left so that it has (16-k) 0s at the + // end; in other words, regardless of the number of bits, it + // wants to be compared against something shifted to have 16; + // that way we don't need to shift inside the loop. + temp = j->code_buffer >> 16; + for (k=FAST_BITS+1 ; ; ++k) + if (temp < h->maxcode[k]) + break; + if (k == 17) { + // error! code not found + j->code_bits -= 16; + return -1; + } + + if (k > j->code_bits) + return -1; + + // convert the huffman code to the symbol id + c = ((j->code_buffer >> (32 - k)) & stbi__bmask[k]) + h->delta[k]; + STBI_ASSERT((((j->code_buffer) >> (32 - h->size[c])) & stbi__bmask[h->size[c]]) == h->code[c]); + + // convert the id to a symbol + j->code_bits -= k; + j->code_buffer <<= k; + return h->values[c]; +} + +// bias[n] = (-1<code_bits < n) stbi__grow_buffer_unsafe(j); + + sgn = (stbi__int32)j->code_buffer >> 31; // sign bit is always in MSB + k = stbi_lrot(j->code_buffer, n); + STBI_ASSERT(n >= 0 && n < (int) (sizeof(stbi__bmask)/sizeof(*stbi__bmask))); + j->code_buffer = k & ~stbi__bmask[n]; + k &= stbi__bmask[n]; + j->code_bits -= n; + return k + (stbi__jbias[n] & ~sgn); +} + +// get some unsigned bits +stbi_inline static int stbi__jpeg_get_bits(stbi__jpeg *j, int n) +{ + unsigned int k; + if (j->code_bits < n) stbi__grow_buffer_unsafe(j); + k = stbi_lrot(j->code_buffer, n); + j->code_buffer = k & ~stbi__bmask[n]; + k &= stbi__bmask[n]; + j->code_bits -= n; + return k; +} + +stbi_inline static int stbi__jpeg_get_bit(stbi__jpeg *j) +{ + unsigned int k; + if (j->code_bits < 1) stbi__grow_buffer_unsafe(j); + k = j->code_buffer; + j->code_buffer <<= 1; + --j->code_bits; + return k & 0x80000000; +} + +// given a value that's at position X in the zigzag stream, +// where does it appear in the 8x8 matrix coded as row-major? +static const stbi_uc stbi__jpeg_dezigzag[64+15] = +{ + 0, 1, 8, 16, 9, 2, 3, 10, + 17, 24, 32, 25, 18, 11, 4, 5, + 12, 19, 26, 33, 40, 48, 41, 34, + 27, 20, 13, 6, 7, 14, 21, 28, + 35, 42, 49, 56, 57, 50, 43, 36, + 29, 22, 15, 23, 30, 37, 44, 51, + 58, 59, 52, 45, 38, 31, 39, 46, + 53, 60, 61, 54, 47, 55, 62, 63, + // let corrupt input sample past end + 63, 63, 63, 63, 63, 63, 63, 63, + 63, 63, 63, 63, 63, 63, 63 +}; + +// decode one 64-entry block-- +static int stbi__jpeg_decode_block(stbi__jpeg *j, short data[64], stbi__huffman *hdc, stbi__huffman *hac, stbi__int16 *fac, int b, stbi__uint16 *dequant) +{ + int diff,dc,k; + int t; + + if (j->code_bits < 16) stbi__grow_buffer_unsafe(j); + t = stbi__jpeg_huff_decode(j, hdc); + if (t < 0) return stbi__err("bad huffman code","Corrupt JPEG"); + + // 0 all the ac values now so we can do it 32-bits at a time + memset(data,0,64*sizeof(data[0])); + + diff = t ? stbi__extend_receive(j, t) : 0; + dc = j->img_comp[b].dc_pred + diff; + j->img_comp[b].dc_pred = dc; + data[0] = (short) (dc * dequant[0]); + + // decode AC components, see JPEG spec + k = 1; + do { + unsigned int zig; + int c,r,s; + if (j->code_bits < 16) stbi__grow_buffer_unsafe(j); + c = (j->code_buffer >> (32 - FAST_BITS)) & ((1 << FAST_BITS)-1); + r = fac[c]; + if (r) { // fast-AC path + k += (r >> 4) & 15; // run + s = r & 15; // combined length + j->code_buffer <<= s; + j->code_bits -= s; + // decode into unzigzag'd location + zig = stbi__jpeg_dezigzag[k++]; + data[zig] = (short) ((r >> 8) * dequant[zig]); + } else { + int rs = stbi__jpeg_huff_decode(j, hac); + if (rs < 0) return stbi__err("bad huffman code","Corrupt JPEG"); + s = rs & 15; + r = rs >> 4; + if (s == 0) { + if (rs != 0xf0) break; // end block + k += 16; + } else { + k += r; + // decode into unzigzag'd location + zig = stbi__jpeg_dezigzag[k++]; + data[zig] = (short) (stbi__extend_receive(j,s) * dequant[zig]); + } + } + } while (k < 64); + return 1; +} + +static int stbi__jpeg_decode_block_prog_dc(stbi__jpeg *j, short data[64], stbi__huffman *hdc, int b) +{ + int diff,dc; + int t; + if (j->spec_end != 0) return stbi__err("can't merge dc and ac", "Corrupt JPEG"); + + if (j->code_bits < 16) stbi__grow_buffer_unsafe(j); + + if (j->succ_high == 0) { + // first scan for DC coefficient, must be first + memset(data,0,64*sizeof(data[0])); // 0 all the ac values now + t = stbi__jpeg_huff_decode(j, hdc); + diff = t ? stbi__extend_receive(j, t) : 0; + + dc = j->img_comp[b].dc_pred + diff; + j->img_comp[b].dc_pred = dc; + data[0] = (short) (dc << j->succ_low); + } else { + // refinement scan for DC coefficient + if (stbi__jpeg_get_bit(j)) + data[0] += (short) (1 << j->succ_low); + } + return 1; +} + +// @OPTIMIZE: store non-zigzagged during the decode passes, +// and only de-zigzag when dequantizing +static int stbi__jpeg_decode_block_prog_ac(stbi__jpeg *j, short data[64], stbi__huffman *hac, stbi__int16 *fac) +{ + int k; + if (j->spec_start == 0) return stbi__err("can't merge dc and ac", "Corrupt JPEG"); + + if (j->succ_high == 0) { + int shift = j->succ_low; + + if (j->eob_run) { + --j->eob_run; + return 1; + } + + k = j->spec_start; + do { + unsigned int zig; + int c,r,s; + if (j->code_bits < 16) stbi__grow_buffer_unsafe(j); + c = (j->code_buffer >> (32 - FAST_BITS)) & ((1 << FAST_BITS)-1); + r = fac[c]; + if (r) { // fast-AC path + k += (r >> 4) & 15; // run + s = r & 15; // combined length + j->code_buffer <<= s; + j->code_bits -= s; + zig = stbi__jpeg_dezigzag[k++]; + data[zig] = (short) ((r >> 8) << shift); + } else { + int rs = stbi__jpeg_huff_decode(j, hac); + if (rs < 0) return stbi__err("bad huffman code","Corrupt JPEG"); + s = rs & 15; + r = rs >> 4; + if (s == 0) { + if (r < 15) { + j->eob_run = (1 << r); + if (r) + j->eob_run += stbi__jpeg_get_bits(j, r); + --j->eob_run; + break; + } + k += 16; + } else { + k += r; + zig = stbi__jpeg_dezigzag[k++]; + data[zig] = (short) (stbi__extend_receive(j,s) << shift); + } + } + } while (k <= j->spec_end); + } else { + // refinement scan for these AC coefficients + + short bit = (short) (1 << j->succ_low); + + if (j->eob_run) { + --j->eob_run; + for (k = j->spec_start; k <= j->spec_end; ++k) { + short *p = &data[stbi__jpeg_dezigzag[k]]; + if (*p != 0) + if (stbi__jpeg_get_bit(j)) + if ((*p & bit)==0) { + if (*p > 0) + *p += bit; + else + *p -= bit; + } + } + } else { + k = j->spec_start; + do { + int r,s; + int rs = stbi__jpeg_huff_decode(j, hac); // @OPTIMIZE see if we can use the fast path here, advance-by-r is so slow, eh + if (rs < 0) return stbi__err("bad huffman code","Corrupt JPEG"); + s = rs & 15; + r = rs >> 4; + if (s == 0) { + if (r < 15) { + j->eob_run = (1 << r) - 1; + if (r) + j->eob_run += stbi__jpeg_get_bits(j, r); + r = 64; // force end of block + } else { + // r=15 s=0 should write 16 0s, so we just do + // a run of 15 0s and then write s (which is 0), + // so we don't have to do anything special here + } + } else { + if (s != 1) return stbi__err("bad huffman code", "Corrupt JPEG"); + // sign bit + if (stbi__jpeg_get_bit(j)) + s = bit; + else + s = -bit; + } + + // advance by r + while (k <= j->spec_end) { + short *p = &data[stbi__jpeg_dezigzag[k++]]; + if (*p != 0) { + if (stbi__jpeg_get_bit(j)) + if ((*p & bit)==0) { + if (*p > 0) + *p += bit; + else + *p -= bit; + } + } else { + if (r == 0) { + *p = (short) s; + break; + } + --r; + } + } + } while (k <= j->spec_end); + } + } + return 1; +} + +// take a -128..127 value and stbi__clamp it and convert to 0..255 +stbi_inline static stbi_uc stbi__clamp(int x) +{ + // trick to use a single test to catch both cases + if ((unsigned int) x > 255) { + if (x < 0) return 0; + if (x > 255) return 255; + } + return (stbi_uc) x; +} + +#define stbi__f2f(x) ((int) (((x) * 4096 + 0.5))) +#define stbi__fsh(x) ((x) * 4096) + +// derived from jidctint -- DCT_ISLOW +#define STBI__IDCT_1D(s0,s1,s2,s3,s4,s5,s6,s7) \ + int t0,t1,t2,t3,p1,p2,p3,p4,p5,x0,x1,x2,x3; \ + p2 = s2; \ + p3 = s6; \ + p1 = (p2+p3) * stbi__f2f(0.5411961f); \ + t2 = p1 + p3*stbi__f2f(-1.847759065f); \ + t3 = p1 + p2*stbi__f2f( 0.765366865f); \ + p2 = s0; \ + p3 = s4; \ + t0 = stbi__fsh(p2+p3); \ + t1 = stbi__fsh(p2-p3); \ + x0 = t0+t3; \ + x3 = t0-t3; \ + x1 = t1+t2; \ + x2 = t1-t2; \ + t0 = s7; \ + t1 = s5; \ + t2 = s3; \ + t3 = s1; \ + p3 = t0+t2; \ + p4 = t1+t3; \ + p1 = t0+t3; \ + p2 = t1+t2; \ + p5 = (p3+p4)*stbi__f2f( 1.175875602f); \ + t0 = t0*stbi__f2f( 0.298631336f); \ + t1 = t1*stbi__f2f( 2.053119869f); \ + t2 = t2*stbi__f2f( 3.072711026f); \ + t3 = t3*stbi__f2f( 1.501321110f); \ + p1 = p5 + p1*stbi__f2f(-0.899976223f); \ + p2 = p5 + p2*stbi__f2f(-2.562915447f); \ + p3 = p3*stbi__f2f(-1.961570560f); \ + p4 = p4*stbi__f2f(-0.390180644f); \ + t3 += p1+p4; \ + t2 += p2+p3; \ + t1 += p2+p4; \ + t0 += p1+p3; + +static void stbi__idct_block(stbi_uc *out, int out_stride, short data[64]) +{ + int i,val[64],*v=val; + stbi_uc *o; + short *d = data; + + // columns + for (i=0; i < 8; ++i,++d, ++v) { + // if all zeroes, shortcut -- this avoids dequantizing 0s and IDCTing + if (d[ 8]==0 && d[16]==0 && d[24]==0 && d[32]==0 + && d[40]==0 && d[48]==0 && d[56]==0) { + // no shortcut 0 seconds + // (1|2|3|4|5|6|7)==0 0 seconds + // all separate -0.047 seconds + // 1 && 2|3 && 4|5 && 6|7: -0.047 seconds + int dcterm = d[0]*4; + v[0] = v[8] = v[16] = v[24] = v[32] = v[40] = v[48] = v[56] = dcterm; + } else { + STBI__IDCT_1D(d[ 0],d[ 8],d[16],d[24],d[32],d[40],d[48],d[56]) + // constants scaled things up by 1<<12; let's bring them back + // down, but keep 2 extra bits of precision + x0 += 512; x1 += 512; x2 += 512; x3 += 512; + v[ 0] = (x0+t3) >> 10; + v[56] = (x0-t3) >> 10; + v[ 8] = (x1+t2) >> 10; + v[48] = (x1-t2) >> 10; + v[16] = (x2+t1) >> 10; + v[40] = (x2-t1) >> 10; + v[24] = (x3+t0) >> 10; + v[32] = (x3-t0) >> 10; + } + } + + for (i=0, v=val, o=out; i < 8; ++i,v+=8,o+=out_stride) { + // no fast case since the first 1D IDCT spread components out + STBI__IDCT_1D(v[0],v[1],v[2],v[3],v[4],v[5],v[6],v[7]) + // constants scaled things up by 1<<12, plus we had 1<<2 from first + // loop, plus horizontal and vertical each scale by sqrt(8) so together + // we've got an extra 1<<3, so 1<<17 total we need to remove. + // so we want to round that, which means adding 0.5 * 1<<17, + // aka 65536. Also, we'll end up with -128 to 127 that we want + // to encode as 0..255 by adding 128, so we'll add that before the shift + x0 += 65536 + (128<<17); + x1 += 65536 + (128<<17); + x2 += 65536 + (128<<17); + x3 += 65536 + (128<<17); + // tried computing the shifts into temps, or'ing the temps to see + // if any were out of range, but that was slower + o[0] = stbi__clamp((x0+t3) >> 17); + o[7] = stbi__clamp((x0-t3) >> 17); + o[1] = stbi__clamp((x1+t2) >> 17); + o[6] = stbi__clamp((x1-t2) >> 17); + o[2] = stbi__clamp((x2+t1) >> 17); + o[5] = stbi__clamp((x2-t1) >> 17); + o[3] = stbi__clamp((x3+t0) >> 17); + o[4] = stbi__clamp((x3-t0) >> 17); + } +} + +#ifdef STBI_SSE2 +// sse2 integer IDCT. not the fastest possible implementation but it +// produces bit-identical results to the generic C version so it's +// fully "transparent". +static void stbi__idct_simd(stbi_uc *out, int out_stride, short data[64]) +{ + // This is constructed to match our regular (generic) integer IDCT exactly. + __m128i row0, row1, row2, row3, row4, row5, row6, row7; + __m128i tmp; + + // dot product constant: even elems=x, odd elems=y + #define dct_const(x,y) _mm_setr_epi16((x),(y),(x),(y),(x),(y),(x),(y)) + + // out(0) = c0[even]*x + c0[odd]*y (c0, x, y 16-bit, out 32-bit) + // out(1) = c1[even]*x + c1[odd]*y + #define dct_rot(out0,out1, x,y,c0,c1) \ + __m128i c0##lo = _mm_unpacklo_epi16((x),(y)); \ + __m128i c0##hi = _mm_unpackhi_epi16((x),(y)); \ + __m128i out0##_l = _mm_madd_epi16(c0##lo, c0); \ + __m128i out0##_h = _mm_madd_epi16(c0##hi, c0); \ + __m128i out1##_l = _mm_madd_epi16(c0##lo, c1); \ + __m128i out1##_h = _mm_madd_epi16(c0##hi, c1) + + // out = in << 12 (in 16-bit, out 32-bit) + #define dct_widen(out, in) \ + __m128i out##_l = _mm_srai_epi32(_mm_unpacklo_epi16(_mm_setzero_si128(), (in)), 4); \ + __m128i out##_h = _mm_srai_epi32(_mm_unpackhi_epi16(_mm_setzero_si128(), (in)), 4) + + // wide add + #define dct_wadd(out, a, b) \ + __m128i out##_l = _mm_add_epi32(a##_l, b##_l); \ + __m128i out##_h = _mm_add_epi32(a##_h, b##_h) + + // wide sub + #define dct_wsub(out, a, b) \ + __m128i out##_l = _mm_sub_epi32(a##_l, b##_l); \ + __m128i out##_h = _mm_sub_epi32(a##_h, b##_h) + + // butterfly a/b, add bias, then shift by "s" and pack + #define dct_bfly32o(out0, out1, a,b,bias,s) \ + { \ + __m128i abiased_l = _mm_add_epi32(a##_l, bias); \ + __m128i abiased_h = _mm_add_epi32(a##_h, bias); \ + dct_wadd(sum, abiased, b); \ + dct_wsub(dif, abiased, b); \ + out0 = _mm_packs_epi32(_mm_srai_epi32(sum_l, s), _mm_srai_epi32(sum_h, s)); \ + out1 = _mm_packs_epi32(_mm_srai_epi32(dif_l, s), _mm_srai_epi32(dif_h, s)); \ + } + + // 8-bit interleave step (for transposes) + #define dct_interleave8(a, b) \ + tmp = a; \ + a = _mm_unpacklo_epi8(a, b); \ + b = _mm_unpackhi_epi8(tmp, b) + + // 16-bit interleave step (for transposes) + #define dct_interleave16(a, b) \ + tmp = a; \ + a = _mm_unpacklo_epi16(a, b); \ + b = _mm_unpackhi_epi16(tmp, b) + + #define dct_pass(bias,shift) \ + { \ + /* even part */ \ + dct_rot(t2e,t3e, row2,row6, rot0_0,rot0_1); \ + __m128i sum04 = _mm_add_epi16(row0, row4); \ + __m128i dif04 = _mm_sub_epi16(row0, row4); \ + dct_widen(t0e, sum04); \ + dct_widen(t1e, dif04); \ + dct_wadd(x0, t0e, t3e); \ + dct_wsub(x3, t0e, t3e); \ + dct_wadd(x1, t1e, t2e); \ + dct_wsub(x2, t1e, t2e); \ + /* odd part */ \ + dct_rot(y0o,y2o, row7,row3, rot2_0,rot2_1); \ + dct_rot(y1o,y3o, row5,row1, rot3_0,rot3_1); \ + __m128i sum17 = _mm_add_epi16(row1, row7); \ + __m128i sum35 = _mm_add_epi16(row3, row5); \ + dct_rot(y4o,y5o, sum17,sum35, rot1_0,rot1_1); \ + dct_wadd(x4, y0o, y4o); \ + dct_wadd(x5, y1o, y5o); \ + dct_wadd(x6, y2o, y5o); \ + dct_wadd(x7, y3o, y4o); \ + dct_bfly32o(row0,row7, x0,x7,bias,shift); \ + dct_bfly32o(row1,row6, x1,x6,bias,shift); \ + dct_bfly32o(row2,row5, x2,x5,bias,shift); \ + dct_bfly32o(row3,row4, x3,x4,bias,shift); \ + } + + __m128i rot0_0 = dct_const(stbi__f2f(0.5411961f), stbi__f2f(0.5411961f) + stbi__f2f(-1.847759065f)); + __m128i rot0_1 = dct_const(stbi__f2f(0.5411961f) + stbi__f2f( 0.765366865f), stbi__f2f(0.5411961f)); + __m128i rot1_0 = dct_const(stbi__f2f(1.175875602f) + stbi__f2f(-0.899976223f), stbi__f2f(1.175875602f)); + __m128i rot1_1 = dct_const(stbi__f2f(1.175875602f), stbi__f2f(1.175875602f) + stbi__f2f(-2.562915447f)); + __m128i rot2_0 = dct_const(stbi__f2f(-1.961570560f) + stbi__f2f( 0.298631336f), stbi__f2f(-1.961570560f)); + __m128i rot2_1 = dct_const(stbi__f2f(-1.961570560f), stbi__f2f(-1.961570560f) + stbi__f2f( 3.072711026f)); + __m128i rot3_0 = dct_const(stbi__f2f(-0.390180644f) + stbi__f2f( 2.053119869f), stbi__f2f(-0.390180644f)); + __m128i rot3_1 = dct_const(stbi__f2f(-0.390180644f), stbi__f2f(-0.390180644f) + stbi__f2f( 1.501321110f)); + + // rounding biases in column/row passes, see stbi__idct_block for explanation. + __m128i bias_0 = _mm_set1_epi32(512); + __m128i bias_1 = _mm_set1_epi32(65536 + (128<<17)); + + // load + row0 = _mm_load_si128((const __m128i *) (data + 0*8)); + row1 = _mm_load_si128((const __m128i *) (data + 1*8)); + row2 = _mm_load_si128((const __m128i *) (data + 2*8)); + row3 = _mm_load_si128((const __m128i *) (data + 3*8)); + row4 = _mm_load_si128((const __m128i *) (data + 4*8)); + row5 = _mm_load_si128((const __m128i *) (data + 5*8)); + row6 = _mm_load_si128((const __m128i *) (data + 6*8)); + row7 = _mm_load_si128((const __m128i *) (data + 7*8)); + + // column pass + dct_pass(bias_0, 10); + + { + // 16bit 8x8 transpose pass 1 + dct_interleave16(row0, row4); + dct_interleave16(row1, row5); + dct_interleave16(row2, row6); + dct_interleave16(row3, row7); + + // transpose pass 2 + dct_interleave16(row0, row2); + dct_interleave16(row1, row3); + dct_interleave16(row4, row6); + dct_interleave16(row5, row7); + + // transpose pass 3 + dct_interleave16(row0, row1); + dct_interleave16(row2, row3); + dct_interleave16(row4, row5); + dct_interleave16(row6, row7); + } + + // row pass + dct_pass(bias_1, 17); + + { + // pack + __m128i p0 = _mm_packus_epi16(row0, row1); // a0a1a2a3...a7b0b1b2b3...b7 + __m128i p1 = _mm_packus_epi16(row2, row3); + __m128i p2 = _mm_packus_epi16(row4, row5); + __m128i p3 = _mm_packus_epi16(row6, row7); + + // 8bit 8x8 transpose pass 1 + dct_interleave8(p0, p2); // a0e0a1e1... + dct_interleave8(p1, p3); // c0g0c1g1... + + // transpose pass 2 + dct_interleave8(p0, p1); // a0c0e0g0... + dct_interleave8(p2, p3); // b0d0f0h0... + + // transpose pass 3 + dct_interleave8(p0, p2); // a0b0c0d0... + dct_interleave8(p1, p3); // a4b4c4d4... + + // store + _mm_storel_epi64((__m128i *) out, p0); out += out_stride; + _mm_storel_epi64((__m128i *) out, _mm_shuffle_epi32(p0, 0x4e)); out += out_stride; + _mm_storel_epi64((__m128i *) out, p2); out += out_stride; + _mm_storel_epi64((__m128i *) out, _mm_shuffle_epi32(p2, 0x4e)); out += out_stride; + _mm_storel_epi64((__m128i *) out, p1); out += out_stride; + _mm_storel_epi64((__m128i *) out, _mm_shuffle_epi32(p1, 0x4e)); out += out_stride; + _mm_storel_epi64((__m128i *) out, p3); out += out_stride; + _mm_storel_epi64((__m128i *) out, _mm_shuffle_epi32(p3, 0x4e)); + } + +#undef dct_const +#undef dct_rot +#undef dct_widen +#undef dct_wadd +#undef dct_wsub +#undef dct_bfly32o +#undef dct_interleave8 +#undef dct_interleave16 +#undef dct_pass +} + +#endif // STBI_SSE2 + +#ifdef STBI_NEON + +// NEON integer IDCT. should produce bit-identical +// results to the generic C version. +static void stbi__idct_simd(stbi_uc *out, int out_stride, short data[64]) +{ + int16x8_t row0, row1, row2, row3, row4, row5, row6, row7; + + int16x4_t rot0_0 = vdup_n_s16(stbi__f2f(0.5411961f)); + int16x4_t rot0_1 = vdup_n_s16(stbi__f2f(-1.847759065f)); + int16x4_t rot0_2 = vdup_n_s16(stbi__f2f( 0.765366865f)); + int16x4_t rot1_0 = vdup_n_s16(stbi__f2f( 1.175875602f)); + int16x4_t rot1_1 = vdup_n_s16(stbi__f2f(-0.899976223f)); + int16x4_t rot1_2 = vdup_n_s16(stbi__f2f(-2.562915447f)); + int16x4_t rot2_0 = vdup_n_s16(stbi__f2f(-1.961570560f)); + int16x4_t rot2_1 = vdup_n_s16(stbi__f2f(-0.390180644f)); + int16x4_t rot3_0 = vdup_n_s16(stbi__f2f( 0.298631336f)); + int16x4_t rot3_1 = vdup_n_s16(stbi__f2f( 2.053119869f)); + int16x4_t rot3_2 = vdup_n_s16(stbi__f2f( 3.072711026f)); + int16x4_t rot3_3 = vdup_n_s16(stbi__f2f( 1.501321110f)); + +#define dct_long_mul(out, inq, coeff) \ + int32x4_t out##_l = vmull_s16(vget_low_s16(inq), coeff); \ + int32x4_t out##_h = vmull_s16(vget_high_s16(inq), coeff) + +#define dct_long_mac(out, acc, inq, coeff) \ + int32x4_t out##_l = vmlal_s16(acc##_l, vget_low_s16(inq), coeff); \ + int32x4_t out##_h = vmlal_s16(acc##_h, vget_high_s16(inq), coeff) + +#define dct_widen(out, inq) \ + int32x4_t out##_l = vshll_n_s16(vget_low_s16(inq), 12); \ + int32x4_t out##_h = vshll_n_s16(vget_high_s16(inq), 12) + +// wide add +#define dct_wadd(out, a, b) \ + int32x4_t out##_l = vaddq_s32(a##_l, b##_l); \ + int32x4_t out##_h = vaddq_s32(a##_h, b##_h) + +// wide sub +#define dct_wsub(out, a, b) \ + int32x4_t out##_l = vsubq_s32(a##_l, b##_l); \ + int32x4_t out##_h = vsubq_s32(a##_h, b##_h) + +// butterfly a/b, then shift using "shiftop" by "s" and pack +#define dct_bfly32o(out0,out1, a,b,shiftop,s) \ + { \ + dct_wadd(sum, a, b); \ + dct_wsub(dif, a, b); \ + out0 = vcombine_s16(shiftop(sum_l, s), shiftop(sum_h, s)); \ + out1 = vcombine_s16(shiftop(dif_l, s), shiftop(dif_h, s)); \ + } + +#define dct_pass(shiftop, shift) \ + { \ + /* even part */ \ + int16x8_t sum26 = vaddq_s16(row2, row6); \ + dct_long_mul(p1e, sum26, rot0_0); \ + dct_long_mac(t2e, p1e, row6, rot0_1); \ + dct_long_mac(t3e, p1e, row2, rot0_2); \ + int16x8_t sum04 = vaddq_s16(row0, row4); \ + int16x8_t dif04 = vsubq_s16(row0, row4); \ + dct_widen(t0e, sum04); \ + dct_widen(t1e, dif04); \ + dct_wadd(x0, t0e, t3e); \ + dct_wsub(x3, t0e, t3e); \ + dct_wadd(x1, t1e, t2e); \ + dct_wsub(x2, t1e, t2e); \ + /* odd part */ \ + int16x8_t sum15 = vaddq_s16(row1, row5); \ + int16x8_t sum17 = vaddq_s16(row1, row7); \ + int16x8_t sum35 = vaddq_s16(row3, row5); \ + int16x8_t sum37 = vaddq_s16(row3, row7); \ + int16x8_t sumodd = vaddq_s16(sum17, sum35); \ + dct_long_mul(p5o, sumodd, rot1_0); \ + dct_long_mac(p1o, p5o, sum17, rot1_1); \ + dct_long_mac(p2o, p5o, sum35, rot1_2); \ + dct_long_mul(p3o, sum37, rot2_0); \ + dct_long_mul(p4o, sum15, rot2_1); \ + dct_wadd(sump13o, p1o, p3o); \ + dct_wadd(sump24o, p2o, p4o); \ + dct_wadd(sump23o, p2o, p3o); \ + dct_wadd(sump14o, p1o, p4o); \ + dct_long_mac(x4, sump13o, row7, rot3_0); \ + dct_long_mac(x5, sump24o, row5, rot3_1); \ + dct_long_mac(x6, sump23o, row3, rot3_2); \ + dct_long_mac(x7, sump14o, row1, rot3_3); \ + dct_bfly32o(row0,row7, x0,x7,shiftop,shift); \ + dct_bfly32o(row1,row6, x1,x6,shiftop,shift); \ + dct_bfly32o(row2,row5, x2,x5,shiftop,shift); \ + dct_bfly32o(row3,row4, x3,x4,shiftop,shift); \ + } + + // load + row0 = vld1q_s16(data + 0*8); + row1 = vld1q_s16(data + 1*8); + row2 = vld1q_s16(data + 2*8); + row3 = vld1q_s16(data + 3*8); + row4 = vld1q_s16(data + 4*8); + row5 = vld1q_s16(data + 5*8); + row6 = vld1q_s16(data + 6*8); + row7 = vld1q_s16(data + 7*8); + + // add DC bias + row0 = vaddq_s16(row0, vsetq_lane_s16(1024, vdupq_n_s16(0), 0)); + + // column pass + dct_pass(vrshrn_n_s32, 10); + + // 16bit 8x8 transpose + { +// these three map to a single VTRN.16, VTRN.32, and VSWP, respectively. +// whether compilers actually get this is another story, sadly. +#define dct_trn16(x, y) { int16x8x2_t t = vtrnq_s16(x, y); x = t.val[0]; y = t.val[1]; } +#define dct_trn32(x, y) { int32x4x2_t t = vtrnq_s32(vreinterpretq_s32_s16(x), vreinterpretq_s32_s16(y)); x = vreinterpretq_s16_s32(t.val[0]); y = vreinterpretq_s16_s32(t.val[1]); } +#define dct_trn64(x, y) { int16x8_t x0 = x; int16x8_t y0 = y; x = vcombine_s16(vget_low_s16(x0), vget_low_s16(y0)); y = vcombine_s16(vget_high_s16(x0), vget_high_s16(y0)); } + + // pass 1 + dct_trn16(row0, row1); // a0b0a2b2a4b4a6b6 + dct_trn16(row2, row3); + dct_trn16(row4, row5); + dct_trn16(row6, row7); + + // pass 2 + dct_trn32(row0, row2); // a0b0c0d0a4b4c4d4 + dct_trn32(row1, row3); + dct_trn32(row4, row6); + dct_trn32(row5, row7); + + // pass 3 + dct_trn64(row0, row4); // a0b0c0d0e0f0g0h0 + dct_trn64(row1, row5); + dct_trn64(row2, row6); + dct_trn64(row3, row7); + +#undef dct_trn16 +#undef dct_trn32 +#undef dct_trn64 + } + + // row pass + // vrshrn_n_s32 only supports shifts up to 16, we need + // 17. so do a non-rounding shift of 16 first then follow + // up with a rounding shift by 1. + dct_pass(vshrn_n_s32, 16); + + { + // pack and round + uint8x8_t p0 = vqrshrun_n_s16(row0, 1); + uint8x8_t p1 = vqrshrun_n_s16(row1, 1); + uint8x8_t p2 = vqrshrun_n_s16(row2, 1); + uint8x8_t p3 = vqrshrun_n_s16(row3, 1); + uint8x8_t p4 = vqrshrun_n_s16(row4, 1); + uint8x8_t p5 = vqrshrun_n_s16(row5, 1); + uint8x8_t p6 = vqrshrun_n_s16(row6, 1); + uint8x8_t p7 = vqrshrun_n_s16(row7, 1); + + // again, these can translate into one instruction, but often don't. +#define dct_trn8_8(x, y) { uint8x8x2_t t = vtrn_u8(x, y); x = t.val[0]; y = t.val[1]; } +#define dct_trn8_16(x, y) { uint16x4x2_t t = vtrn_u16(vreinterpret_u16_u8(x), vreinterpret_u16_u8(y)); x = vreinterpret_u8_u16(t.val[0]); y = vreinterpret_u8_u16(t.val[1]); } +#define dct_trn8_32(x, y) { uint32x2x2_t t = vtrn_u32(vreinterpret_u32_u8(x), vreinterpret_u32_u8(y)); x = vreinterpret_u8_u32(t.val[0]); y = vreinterpret_u8_u32(t.val[1]); } + + // sadly can't use interleaved stores here since we only write + // 8 bytes to each scan line! + + // 8x8 8-bit transpose pass 1 + dct_trn8_8(p0, p1); + dct_trn8_8(p2, p3); + dct_trn8_8(p4, p5); + dct_trn8_8(p6, p7); + + // pass 2 + dct_trn8_16(p0, p2); + dct_trn8_16(p1, p3); + dct_trn8_16(p4, p6); + dct_trn8_16(p5, p7); + + // pass 3 + dct_trn8_32(p0, p4); + dct_trn8_32(p1, p5); + dct_trn8_32(p2, p6); + dct_trn8_32(p3, p7); + + // store + vst1_u8(out, p0); out += out_stride; + vst1_u8(out, p1); out += out_stride; + vst1_u8(out, p2); out += out_stride; + vst1_u8(out, p3); out += out_stride; + vst1_u8(out, p4); out += out_stride; + vst1_u8(out, p5); out += out_stride; + vst1_u8(out, p6); out += out_stride; + vst1_u8(out, p7); + +#undef dct_trn8_8 +#undef dct_trn8_16 +#undef dct_trn8_32 + } + +#undef dct_long_mul +#undef dct_long_mac +#undef dct_widen +#undef dct_wadd +#undef dct_wsub +#undef dct_bfly32o +#undef dct_pass +} + +#endif // STBI_NEON + +#define STBI__MARKER_none 0xff +// if there's a pending marker from the entropy stream, return that +// otherwise, fetch from the stream and get a marker. if there's no +// marker, return 0xff, which is never a valid marker value +static stbi_uc stbi__get_marker(stbi__jpeg *j) +{ + stbi_uc x; + if (j->marker != STBI__MARKER_none) { x = j->marker; j->marker = STBI__MARKER_none; return x; } + x = stbi__get8(j->s); + if (x != 0xff) return STBI__MARKER_none; + while (x == 0xff) + x = stbi__get8(j->s); // consume repeated 0xff fill bytes + return x; +} + +// in each scan, we'll have scan_n components, and the order +// of the components is specified by order[] +#define STBI__RESTART(x) ((x) >= 0xd0 && (x) <= 0xd7) + +// after a restart interval, stbi__jpeg_reset the entropy decoder and +// the dc prediction +static void stbi__jpeg_reset(stbi__jpeg *j) +{ + j->code_bits = 0; + j->code_buffer = 0; + j->nomore = 0; + j->img_comp[0].dc_pred = j->img_comp[1].dc_pred = j->img_comp[2].dc_pred = j->img_comp[3].dc_pred = 0; + j->marker = STBI__MARKER_none; + j->todo = j->restart_interval ? j->restart_interval : 0x7fffffff; + j->eob_run = 0; + // no more than 1<<31 MCUs if no restart_interal? that's plenty safe, + // since we don't even allow 1<<30 pixels +} + +static int stbi__parse_entropy_coded_data(stbi__jpeg *z) +{ + stbi__jpeg_reset(z); + if (!z->progressive) { + if (z->scan_n == 1) { + int i,j; + STBI_SIMD_ALIGN(short, data[64]); + int n = z->order[0]; + // non-interleaved data, we just need to process one block at a time, + // in trivial scanline order + // number of blocks to do just depends on how many actual "pixels" this + // component has, independent of interleaved MCU blocking and such + int w = (z->img_comp[n].x+7) >> 3; + int h = (z->img_comp[n].y+7) >> 3; + for (j=0; j < h; ++j) { + for (i=0; i < w; ++i) { + int ha = z->img_comp[n].ha; + if (!stbi__jpeg_decode_block(z, data, z->huff_dc+z->img_comp[n].hd, z->huff_ac+ha, z->fast_ac[ha], n, z->dequant[z->img_comp[n].tq])) return 0; + z->idct_block_kernel(z->img_comp[n].data+z->img_comp[n].w2*j*8+i*8, z->img_comp[n].w2, data); + // every data block is an MCU, so countdown the restart interval + if (--z->todo <= 0) { + if (z->code_bits < 24) stbi__grow_buffer_unsafe(z); + // if it's NOT a restart, then just bail, so we get corrupt data + // rather than no data + if (!STBI__RESTART(z->marker)) return 1; + stbi__jpeg_reset(z); + } + } + } + return 1; + } else { // interleaved + int i,j,k,x,y; + STBI_SIMD_ALIGN(short, data[64]); + for (j=0; j < z->img_mcu_y; ++j) { + for (i=0; i < z->img_mcu_x; ++i) { + // scan an interleaved mcu... process scan_n components in order + for (k=0; k < z->scan_n; ++k) { + int n = z->order[k]; + // scan out an mcu's worth of this component; that's just determined + // by the basic H and V specified for the component + for (y=0; y < z->img_comp[n].v; ++y) { + for (x=0; x < z->img_comp[n].h; ++x) { + int x2 = (i*z->img_comp[n].h + x)*8; + int y2 = (j*z->img_comp[n].v + y)*8; + int ha = z->img_comp[n].ha; + if (!stbi__jpeg_decode_block(z, data, z->huff_dc+z->img_comp[n].hd, z->huff_ac+ha, z->fast_ac[ha], n, z->dequant[z->img_comp[n].tq])) return 0; + z->idct_block_kernel(z->img_comp[n].data+z->img_comp[n].w2*y2+x2, z->img_comp[n].w2, data); + } + } + } + // after all interleaved components, that's an interleaved MCU, + // so now count down the restart interval + if (--z->todo <= 0) { + if (z->code_bits < 24) stbi__grow_buffer_unsafe(z); + if (!STBI__RESTART(z->marker)) return 1; + stbi__jpeg_reset(z); + } + } + } + return 1; + } + } else { + if (z->scan_n == 1) { + int i,j; + int n = z->order[0]; + // non-interleaved data, we just need to process one block at a time, + // in trivial scanline order + // number of blocks to do just depends on how many actual "pixels" this + // component has, independent of interleaved MCU blocking and such + int w = (z->img_comp[n].x+7) >> 3; + int h = (z->img_comp[n].y+7) >> 3; + for (j=0; j < h; ++j) { + for (i=0; i < w; ++i) { + short *data = z->img_comp[n].coeff + 64 * (i + j * z->img_comp[n].coeff_w); + if (z->spec_start == 0) { + if (!stbi__jpeg_decode_block_prog_dc(z, data, &z->huff_dc[z->img_comp[n].hd], n)) + return 0; + } else { + int ha = z->img_comp[n].ha; + if (!stbi__jpeg_decode_block_prog_ac(z, data, &z->huff_ac[ha], z->fast_ac[ha])) + return 0; + } + // every data block is an MCU, so countdown the restart interval + if (--z->todo <= 0) { + if (z->code_bits < 24) stbi__grow_buffer_unsafe(z); + if (!STBI__RESTART(z->marker)) return 1; + stbi__jpeg_reset(z); + } + } + } + return 1; + } else { // interleaved + int i,j,k,x,y; + for (j=0; j < z->img_mcu_y; ++j) { + for (i=0; i < z->img_mcu_x; ++i) { + // scan an interleaved mcu... process scan_n components in order + for (k=0; k < z->scan_n; ++k) { + int n = z->order[k]; + // scan out an mcu's worth of this component; that's just determined + // by the basic H and V specified for the component + for (y=0; y < z->img_comp[n].v; ++y) { + for (x=0; x < z->img_comp[n].h; ++x) { + int x2 = (i*z->img_comp[n].h + x); + int y2 = (j*z->img_comp[n].v + y); + short *data = z->img_comp[n].coeff + 64 * (x2 + y2 * z->img_comp[n].coeff_w); + if (!stbi__jpeg_decode_block_prog_dc(z, data, &z->huff_dc[z->img_comp[n].hd], n)) + return 0; + } + } + } + // after all interleaved components, that's an interleaved MCU, + // so now count down the restart interval + if (--z->todo <= 0) { + if (z->code_bits < 24) stbi__grow_buffer_unsafe(z); + if (!STBI__RESTART(z->marker)) return 1; + stbi__jpeg_reset(z); + } + } + } + return 1; + } + } +} + +static void stbi__jpeg_dequantize(short *data, stbi__uint16 *dequant) +{ + int i; + for (i=0; i < 64; ++i) + data[i] *= dequant[i]; +} + +static void stbi__jpeg_finish(stbi__jpeg *z) +{ + if (z->progressive) { + // dequantize and idct the data + int i,j,n; + for (n=0; n < z->s->img_n; ++n) { + int w = (z->img_comp[n].x+7) >> 3; + int h = (z->img_comp[n].y+7) >> 3; + for (j=0; j < h; ++j) { + for (i=0; i < w; ++i) { + short *data = z->img_comp[n].coeff + 64 * (i + j * z->img_comp[n].coeff_w); + stbi__jpeg_dequantize(data, z->dequant[z->img_comp[n].tq]); + z->idct_block_kernel(z->img_comp[n].data+z->img_comp[n].w2*j*8+i*8, z->img_comp[n].w2, data); + } + } + } + } +} + +static int stbi__process_marker(stbi__jpeg *z, int m) +{ + int L; + switch (m) { + case STBI__MARKER_none: // no marker found + return stbi__err("expected marker","Corrupt JPEG"); + + case 0xDD: // DRI - specify restart interval + if (stbi__get16be(z->s) != 4) return stbi__err("bad DRI len","Corrupt JPEG"); + z->restart_interval = stbi__get16be(z->s); + return 1; + + case 0xDB: // DQT - define quantization table + L = stbi__get16be(z->s)-2; + while (L > 0) { + int q = stbi__get8(z->s); + int p = q >> 4, sixteen = (p != 0); + int t = q & 15,i; + if (p != 0 && p != 1) return stbi__err("bad DQT type","Corrupt JPEG"); + if (t > 3) return stbi__err("bad DQT table","Corrupt JPEG"); + + for (i=0; i < 64; ++i) + z->dequant[t][stbi__jpeg_dezigzag[i]] = (stbi__uint16)(sixteen ? stbi__get16be(z->s) : stbi__get8(z->s)); + L -= (sixteen ? 129 : 65); + } + return L==0; + + case 0xC4: // DHT - define huffman table + L = stbi__get16be(z->s)-2; + while (L > 0) { + stbi_uc *v; + int sizes[16],i,n=0; + int q = stbi__get8(z->s); + int tc = q >> 4; + int th = q & 15; + if (tc > 1 || th > 3) return stbi__err("bad DHT header","Corrupt JPEG"); + for (i=0; i < 16; ++i) { + sizes[i] = stbi__get8(z->s); + n += sizes[i]; + } + L -= 17; + if (tc == 0) { + if (!stbi__build_huffman(z->huff_dc+th, sizes)) return 0; + v = z->huff_dc[th].values; + } else { + if (!stbi__build_huffman(z->huff_ac+th, sizes)) return 0; + v = z->huff_ac[th].values; + } + for (i=0; i < n; ++i) + v[i] = stbi__get8(z->s); + if (tc != 0) + stbi__build_fast_ac(z->fast_ac[th], z->huff_ac + th); + L -= n; + } + return L==0; + } + + // check for comment block or APP blocks + if ((m >= 0xE0 && m <= 0xEF) || m == 0xFE) { + L = stbi__get16be(z->s); + if (L < 2) { + if (m == 0xFE) + return stbi__err("bad COM len","Corrupt JPEG"); + else + return stbi__err("bad APP len","Corrupt JPEG"); + } + L -= 2; + + if (m == 0xE0 && L >= 5) { // JFIF APP0 segment + static const unsigned char tag[5] = {'J','F','I','F','\0'}; + int ok = 1; + int i; + for (i=0; i < 5; ++i) + if (stbi__get8(z->s) != tag[i]) + ok = 0; + L -= 5; + if (ok) + z->jfif = 1; + } else if (m == 0xEE && L >= 12) { // Adobe APP14 segment + static const unsigned char tag[6] = {'A','d','o','b','e','\0'}; + int ok = 1; + int i; + for (i=0; i < 6; ++i) + if (stbi__get8(z->s) != tag[i]) + ok = 0; + L -= 6; + if (ok) { + stbi__get8(z->s); // version + stbi__get16be(z->s); // flags0 + stbi__get16be(z->s); // flags1 + z->app14_color_transform = stbi__get8(z->s); // color transform + L -= 6; + } + } + + stbi__skip(z->s, L); + return 1; + } + + return stbi__err("unknown marker","Corrupt JPEG"); +} + +// after we see SOS +static int stbi__process_scan_header(stbi__jpeg *z) +{ + int i; + int Ls = stbi__get16be(z->s); + z->scan_n = stbi__get8(z->s); + if (z->scan_n < 1 || z->scan_n > 4 || z->scan_n > (int) z->s->img_n) return stbi__err("bad SOS component count","Corrupt JPEG"); + if (Ls != 6+2*z->scan_n) return stbi__err("bad SOS len","Corrupt JPEG"); + for (i=0; i < z->scan_n; ++i) { + int id = stbi__get8(z->s), which; + int q = stbi__get8(z->s); + for (which = 0; which < z->s->img_n; ++which) + if (z->img_comp[which].id == id) + break; + if (which == z->s->img_n) return 0; // no match + z->img_comp[which].hd = q >> 4; if (z->img_comp[which].hd > 3) return stbi__err("bad DC huff","Corrupt JPEG"); + z->img_comp[which].ha = q & 15; if (z->img_comp[which].ha > 3) return stbi__err("bad AC huff","Corrupt JPEG"); + z->order[i] = which; + } + + { + int aa; + z->spec_start = stbi__get8(z->s); + z->spec_end = stbi__get8(z->s); // should be 63, but might be 0 + aa = stbi__get8(z->s); + z->succ_high = (aa >> 4); + z->succ_low = (aa & 15); + if (z->progressive) { + if (z->spec_start > 63 || z->spec_end > 63 || z->spec_start > z->spec_end || z->succ_high > 13 || z->succ_low > 13) + return stbi__err("bad SOS", "Corrupt JPEG"); + } else { + if (z->spec_start != 0) return stbi__err("bad SOS","Corrupt JPEG"); + if (z->succ_high != 0 || z->succ_low != 0) return stbi__err("bad SOS","Corrupt JPEG"); + z->spec_end = 63; + } + } + + return 1; +} + +static int stbi__free_jpeg_components(stbi__jpeg *z, int ncomp, int why) +{ + int i; + for (i=0; i < ncomp; ++i) { + if (z->img_comp[i].raw_data) { + STBI_FREE(z->img_comp[i].raw_data); + z->img_comp[i].raw_data = NULL; + z->img_comp[i].data = NULL; + } + if (z->img_comp[i].raw_coeff) { + STBI_FREE(z->img_comp[i].raw_coeff); + z->img_comp[i].raw_coeff = 0; + z->img_comp[i].coeff = 0; + } + if (z->img_comp[i].linebuf) { + STBI_FREE(z->img_comp[i].linebuf); + z->img_comp[i].linebuf = NULL; + } + } + return why; +} + +static int stbi__process_frame_header(stbi__jpeg *z, int scan) +{ + stbi__context *s = z->s; + int Lf,p,i,q, h_max=1,v_max=1,c; + Lf = stbi__get16be(s); if (Lf < 11) return stbi__err("bad SOF len","Corrupt JPEG"); // JPEG + p = stbi__get8(s); if (p != 8) return stbi__err("only 8-bit","JPEG format not supported: 8-bit only"); // JPEG baseline + s->img_y = stbi__get16be(s); if (s->img_y == 0) return stbi__err("no header height", "JPEG format not supported: delayed height"); // Legal, but we don't handle it--but neither does IJG + s->img_x = stbi__get16be(s); if (s->img_x == 0) return stbi__err("0 width","Corrupt JPEG"); // JPEG requires + c = stbi__get8(s); + if (c != 3 && c != 1 && c != 4) return stbi__err("bad component count","Corrupt JPEG"); + s->img_n = c; + for (i=0; i < c; ++i) { + z->img_comp[i].data = NULL; + z->img_comp[i].linebuf = NULL; + } + + if (Lf != 8+3*s->img_n) return stbi__err("bad SOF len","Corrupt JPEG"); + + z->rgb = 0; + for (i=0; i < s->img_n; ++i) { + static const unsigned char rgb[3] = { 'R', 'G', 'B' }; + z->img_comp[i].id = stbi__get8(s); + if (s->img_n == 3 && z->img_comp[i].id == rgb[i]) + ++z->rgb; + q = stbi__get8(s); + z->img_comp[i].h = (q >> 4); if (!z->img_comp[i].h || z->img_comp[i].h > 4) return stbi__err("bad H","Corrupt JPEG"); + z->img_comp[i].v = q & 15; if (!z->img_comp[i].v || z->img_comp[i].v > 4) return stbi__err("bad V","Corrupt JPEG"); + z->img_comp[i].tq = stbi__get8(s); if (z->img_comp[i].tq > 3) return stbi__err("bad TQ","Corrupt JPEG"); + } + + if (scan != STBI__SCAN_load) return 1; + + if (!stbi__mad3sizes_valid(s->img_x, s->img_y, s->img_n, 0)) return stbi__err("too large", "Image too large to decode"); + + for (i=0; i < s->img_n; ++i) { + if (z->img_comp[i].h > h_max) h_max = z->img_comp[i].h; + if (z->img_comp[i].v > v_max) v_max = z->img_comp[i].v; + } + + // compute interleaved mcu info + z->img_h_max = h_max; + z->img_v_max = v_max; + z->img_mcu_w = h_max * 8; + z->img_mcu_h = v_max * 8; + // these sizes can't be more than 17 bits + z->img_mcu_x = (s->img_x + z->img_mcu_w-1) / z->img_mcu_w; + z->img_mcu_y = (s->img_y + z->img_mcu_h-1) / z->img_mcu_h; + + for (i=0; i < s->img_n; ++i) { + // number of effective pixels (e.g. for non-interleaved MCU) + z->img_comp[i].x = (s->img_x * z->img_comp[i].h + h_max-1) / h_max; + z->img_comp[i].y = (s->img_y * z->img_comp[i].v + v_max-1) / v_max; + // to simplify generation, we'll allocate enough memory to decode + // the bogus oversized data from using interleaved MCUs and their + // big blocks (e.g. a 16x16 iMCU on an image of width 33); we won't + // discard the extra data until colorspace conversion + // + // img_mcu_x, img_mcu_y: <=17 bits; comp[i].h and .v are <=4 (checked earlier) + // so these muls can't overflow with 32-bit ints (which we require) + z->img_comp[i].w2 = z->img_mcu_x * z->img_comp[i].h * 8; + z->img_comp[i].h2 = z->img_mcu_y * z->img_comp[i].v * 8; + z->img_comp[i].coeff = 0; + z->img_comp[i].raw_coeff = 0; + z->img_comp[i].linebuf = NULL; + z->img_comp[i].raw_data = stbi__malloc_mad2(z->img_comp[i].w2, z->img_comp[i].h2, 15); + if (z->img_comp[i].raw_data == NULL) + return stbi__free_jpeg_components(z, i+1, stbi__err("outofmem", "Out of memory")); + // align blocks for idct using mmx/sse + z->img_comp[i].data = (stbi_uc*) (((size_t) z->img_comp[i].raw_data + 15) & ~15); + if (z->progressive) { + // w2, h2 are multiples of 8 (see above) + z->img_comp[i].coeff_w = z->img_comp[i].w2 / 8; + z->img_comp[i].coeff_h = z->img_comp[i].h2 / 8; + z->img_comp[i].raw_coeff = stbi__malloc_mad3(z->img_comp[i].w2, z->img_comp[i].h2, sizeof(short), 15); + if (z->img_comp[i].raw_coeff == NULL) + return stbi__free_jpeg_components(z, i+1, stbi__err("outofmem", "Out of memory")); + z->img_comp[i].coeff = (short*) (((size_t) z->img_comp[i].raw_coeff + 15) & ~15); + } + } + + return 1; +} + +// use comparisons since in some cases we handle more than one case (e.g. SOF) +#define stbi__DNL(x) ((x) == 0xdc) +#define stbi__SOI(x) ((x) == 0xd8) +#define stbi__EOI(x) ((x) == 0xd9) +#define stbi__SOF(x) ((x) == 0xc0 || (x) == 0xc1 || (x) == 0xc2) +#define stbi__SOS(x) ((x) == 0xda) + +#define stbi__SOF_progressive(x) ((x) == 0xc2) + +static int stbi__decode_jpeg_header(stbi__jpeg *z, int scan) +{ + int m; + z->jfif = 0; + z->app14_color_transform = -1; // valid values are 0,1,2 + z->marker = STBI__MARKER_none; // initialize cached marker to empty + m = stbi__get_marker(z); + if (!stbi__SOI(m)) return stbi__err("no SOI","Corrupt JPEG"); + if (scan == STBI__SCAN_type) return 1; + m = stbi__get_marker(z); + while (!stbi__SOF(m)) { + if (!stbi__process_marker(z,m)) return 0; + m = stbi__get_marker(z); + while (m == STBI__MARKER_none) { + // some files have extra padding after their blocks, so ok, we'll scan + if (stbi__at_eof(z->s)) return stbi__err("no SOF", "Corrupt JPEG"); + m = stbi__get_marker(z); + } + } + z->progressive = stbi__SOF_progressive(m); + if (!stbi__process_frame_header(z, scan)) return 0; + return 1; +} + +// decode image to YCbCr format +static int stbi__decode_jpeg_image(stbi__jpeg *j) +{ + int m; + for (m = 0; m < 4; m++) { + j->img_comp[m].raw_data = NULL; + j->img_comp[m].raw_coeff = NULL; + } + j->restart_interval = 0; + if (!stbi__decode_jpeg_header(j, STBI__SCAN_load)) return 0; + m = stbi__get_marker(j); + while (!stbi__EOI(m)) { + if (stbi__SOS(m)) { + if (!stbi__process_scan_header(j)) return 0; + if (!stbi__parse_entropy_coded_data(j)) return 0; + if (j->marker == STBI__MARKER_none ) { + // handle 0s at the end of image data from IP Kamera 9060 + while (!stbi__at_eof(j->s)) { + int x = stbi__get8(j->s); + if (x == 255) { + j->marker = stbi__get8(j->s); + break; + } + } + // if we reach eof without hitting a marker, stbi__get_marker() below will fail and we'll eventually return 0 + } + } else if (stbi__DNL(m)) { + int Ld = stbi__get16be(j->s); + stbi__uint32 NL = stbi__get16be(j->s); + if (Ld != 4) return stbi__err("bad DNL len", "Corrupt JPEG"); + if (NL != j->s->img_y) return stbi__err("bad DNL height", "Corrupt JPEG"); + } else { + if (!stbi__process_marker(j, m)) return 0; + } + m = stbi__get_marker(j); + } + if (j->progressive) + stbi__jpeg_finish(j); + return 1; +} + +// static jfif-centered resampling (across block boundaries) + +typedef stbi_uc *(*resample_row_func)(stbi_uc *out, stbi_uc *in0, stbi_uc *in1, + int w, int hs); + +#define stbi__div4(x) ((stbi_uc) ((x) >> 2)) + +static stbi_uc *resample_row_1(stbi_uc *out, stbi_uc *in_near, stbi_uc *in_far, int w, int hs) +{ + STBI_NOTUSED(out); + STBI_NOTUSED(in_far); + STBI_NOTUSED(w); + STBI_NOTUSED(hs); + return in_near; +} + +static stbi_uc* stbi__resample_row_v_2(stbi_uc *out, stbi_uc *in_near, stbi_uc *in_far, int w, int hs) +{ + // need to generate two samples vertically for every one in input + int i; + STBI_NOTUSED(hs); + for (i=0; i < w; ++i) + out[i] = stbi__div4(3*in_near[i] + in_far[i] + 2); + return out; +} + +static stbi_uc* stbi__resample_row_h_2(stbi_uc *out, stbi_uc *in_near, stbi_uc *in_far, int w, int hs) +{ + // need to generate two samples horizontally for every one in input + int i; + stbi_uc *input = in_near; + + if (w == 1) { + // if only one sample, can't do any interpolation + out[0] = out[1] = input[0]; + return out; + } + + out[0] = input[0]; + out[1] = stbi__div4(input[0]*3 + input[1] + 2); + for (i=1; i < w-1; ++i) { + int n = 3*input[i]+2; + out[i*2+0] = stbi__div4(n+input[i-1]); + out[i*2+1] = stbi__div4(n+input[i+1]); + } + out[i*2+0] = stbi__div4(input[w-2]*3 + input[w-1] + 2); + out[i*2+1] = input[w-1]; + + STBI_NOTUSED(in_far); + STBI_NOTUSED(hs); + + return out; +} + +#define stbi__div16(x) ((stbi_uc) ((x) >> 4)) + +static stbi_uc *stbi__resample_row_hv_2(stbi_uc *out, stbi_uc *in_near, stbi_uc *in_far, int w, int hs) +{ + // need to generate 2x2 samples for every one in input + int i,t0,t1; + if (w == 1) { + out[0] = out[1] = stbi__div4(3*in_near[0] + in_far[0] + 2); + return out; + } + + t1 = 3*in_near[0] + in_far[0]; + out[0] = stbi__div4(t1+2); + for (i=1; i < w; ++i) { + t0 = t1; + t1 = 3*in_near[i]+in_far[i]; + out[i*2-1] = stbi__div16(3*t0 + t1 + 8); + out[i*2 ] = stbi__div16(3*t1 + t0 + 8); + } + out[w*2-1] = stbi__div4(t1+2); + + STBI_NOTUSED(hs); + + return out; +} + +#if defined(STBI_SSE2) || defined(STBI_NEON) +static stbi_uc *stbi__resample_row_hv_2_simd(stbi_uc *out, stbi_uc *in_near, stbi_uc *in_far, int w, int hs) +{ + // need to generate 2x2 samples for every one in input + int i=0,t0,t1; + + if (w == 1) { + out[0] = out[1] = stbi__div4(3*in_near[0] + in_far[0] + 2); + return out; + } + + t1 = 3*in_near[0] + in_far[0]; + // process groups of 8 pixels for as long as we can. + // note we can't handle the last pixel in a row in this loop + // because we need to handle the filter boundary conditions. + for (; i < ((w-1) & ~7); i += 8) { +#if defined(STBI_SSE2) + // load and perform the vertical filtering pass + // this uses 3*x + y = 4*x + (y - x) + __m128i zero = _mm_setzero_si128(); + __m128i farb = _mm_loadl_epi64((__m128i *) (in_far + i)); + __m128i nearb = _mm_loadl_epi64((__m128i *) (in_near + i)); + __m128i farw = _mm_unpacklo_epi8(farb, zero); + __m128i nearw = _mm_unpacklo_epi8(nearb, zero); + __m128i diff = _mm_sub_epi16(farw, nearw); + __m128i nears = _mm_slli_epi16(nearw, 2); + __m128i curr = _mm_add_epi16(nears, diff); // current row + + // horizontal filter works the same based on shifted vers of current + // row. "prev" is current row shifted right by 1 pixel; we need to + // insert the previous pixel value (from t1). + // "next" is current row shifted left by 1 pixel, with first pixel + // of next block of 8 pixels added in. + __m128i prv0 = _mm_slli_si128(curr, 2); + __m128i nxt0 = _mm_srli_si128(curr, 2); + __m128i prev = _mm_insert_epi16(prv0, t1, 0); + __m128i next = _mm_insert_epi16(nxt0, 3*in_near[i+8] + in_far[i+8], 7); + + // horizontal filter, polyphase implementation since it's convenient: + // even pixels = 3*cur + prev = cur*4 + (prev - cur) + // odd pixels = 3*cur + next = cur*4 + (next - cur) + // note the shared term. + __m128i bias = _mm_set1_epi16(8); + __m128i curs = _mm_slli_epi16(curr, 2); + __m128i prvd = _mm_sub_epi16(prev, curr); + __m128i nxtd = _mm_sub_epi16(next, curr); + __m128i curb = _mm_add_epi16(curs, bias); + __m128i even = _mm_add_epi16(prvd, curb); + __m128i odd = _mm_add_epi16(nxtd, curb); + + // interleave even and odd pixels, then undo scaling. + __m128i int0 = _mm_unpacklo_epi16(even, odd); + __m128i int1 = _mm_unpackhi_epi16(even, odd); + __m128i de0 = _mm_srli_epi16(int0, 4); + __m128i de1 = _mm_srli_epi16(int1, 4); + + // pack and write output + __m128i outv = _mm_packus_epi16(de0, de1); + _mm_storeu_si128((__m128i *) (out + i*2), outv); +#elif defined(STBI_NEON) + // load and perform the vertical filtering pass + // this uses 3*x + y = 4*x + (y - x) + uint8x8_t farb = vld1_u8(in_far + i); + uint8x8_t nearb = vld1_u8(in_near + i); + int16x8_t diff = vreinterpretq_s16_u16(vsubl_u8(farb, nearb)); + int16x8_t nears = vreinterpretq_s16_u16(vshll_n_u8(nearb, 2)); + int16x8_t curr = vaddq_s16(nears, diff); // current row + + // horizontal filter works the same based on shifted vers of current + // row. "prev" is current row shifted right by 1 pixel; we need to + // insert the previous pixel value (from t1). + // "next" is current row shifted left by 1 pixel, with first pixel + // of next block of 8 pixels added in. + int16x8_t prv0 = vextq_s16(curr, curr, 7); + int16x8_t nxt0 = vextq_s16(curr, curr, 1); + int16x8_t prev = vsetq_lane_s16(t1, prv0, 0); + int16x8_t next = vsetq_lane_s16(3*in_near[i+8] + in_far[i+8], nxt0, 7); + + // horizontal filter, polyphase implementation since it's convenient: + // even pixels = 3*cur + prev = cur*4 + (prev - cur) + // odd pixels = 3*cur + next = cur*4 + (next - cur) + // note the shared term. + int16x8_t curs = vshlq_n_s16(curr, 2); + int16x8_t prvd = vsubq_s16(prev, curr); + int16x8_t nxtd = vsubq_s16(next, curr); + int16x8_t even = vaddq_s16(curs, prvd); + int16x8_t odd = vaddq_s16(curs, nxtd); + + // undo scaling and round, then store with even/odd phases interleaved + uint8x8x2_t o; + o.val[0] = vqrshrun_n_s16(even, 4); + o.val[1] = vqrshrun_n_s16(odd, 4); + vst2_u8(out + i*2, o); +#endif + + // "previous" value for next iter + t1 = 3*in_near[i+7] + in_far[i+7]; + } + + t0 = t1; + t1 = 3*in_near[i] + in_far[i]; + out[i*2] = stbi__div16(3*t1 + t0 + 8); + + for (++i; i < w; ++i) { + t0 = t1; + t1 = 3*in_near[i]+in_far[i]; + out[i*2-1] = stbi__div16(3*t0 + t1 + 8); + out[i*2 ] = stbi__div16(3*t1 + t0 + 8); + } + out[w*2-1] = stbi__div4(t1+2); + + STBI_NOTUSED(hs); + + return out; +} +#endif + +static stbi_uc *stbi__resample_row_generic(stbi_uc *out, stbi_uc *in_near, stbi_uc *in_far, int w, int hs) +{ + // resample with nearest-neighbor + int i,j; + STBI_NOTUSED(in_far); + for (i=0; i < w; ++i) + for (j=0; j < hs; ++j) + out[i*hs+j] = in_near[i]; + return out; +} + +// this is a reduced-precision calculation of YCbCr-to-RGB introduced +// to make sure the code produces the same results in both SIMD and scalar +#define stbi__float2fixed(x) (((int) ((x) * 4096.0f + 0.5f)) << 8) +static void stbi__YCbCr_to_RGB_row(stbi_uc *out, const stbi_uc *y, const stbi_uc *pcb, const stbi_uc *pcr, int count, int step) +{ + int i; + for (i=0; i < count; ++i) { + int y_fixed = (y[i] << 20) + (1<<19); // rounding + int r,g,b; + int cr = pcr[i] - 128; + int cb = pcb[i] - 128; + r = y_fixed + cr* stbi__float2fixed(1.40200f); + g = y_fixed + (cr*-stbi__float2fixed(0.71414f)) + ((cb*-stbi__float2fixed(0.34414f)) & 0xffff0000); + b = y_fixed + cb* stbi__float2fixed(1.77200f); + r >>= 20; + g >>= 20; + b >>= 20; + if ((unsigned) r > 255) { if (r < 0) r = 0; else r = 255; } + if ((unsigned) g > 255) { if (g < 0) g = 0; else g = 255; } + if ((unsigned) b > 255) { if (b < 0) b = 0; else b = 255; } + out[0] = (stbi_uc)r; + out[1] = (stbi_uc)g; + out[2] = (stbi_uc)b; + out[3] = 255; + out += step; + } +} + +#if defined(STBI_SSE2) || defined(STBI_NEON) +static void stbi__YCbCr_to_RGB_simd(stbi_uc *out, stbi_uc const *y, stbi_uc const *pcb, stbi_uc const *pcr, int count, int step) +{ + int i = 0; + +#ifdef STBI_SSE2 + // step == 3 is pretty ugly on the final interleave, and i'm not convinced + // it's useful in practice (you wouldn't use it for textures, for example). + // so just accelerate step == 4 case. + if (step == 4) { + // this is a fairly straightforward implementation and not super-optimized. + __m128i signflip = _mm_set1_epi8(-0x80); + __m128i cr_const0 = _mm_set1_epi16( (short) ( 1.40200f*4096.0f+0.5f)); + __m128i cr_const1 = _mm_set1_epi16( - (short) ( 0.71414f*4096.0f+0.5f)); + __m128i cb_const0 = _mm_set1_epi16( - (short) ( 0.34414f*4096.0f+0.5f)); + __m128i cb_const1 = _mm_set1_epi16( (short) ( 1.77200f*4096.0f+0.5f)); + __m128i y_bias = _mm_set1_epi8((char) (unsigned char) 128); + __m128i xw = _mm_set1_epi16(255); // alpha channel + + for (; i+7 < count; i += 8) { + // load + __m128i y_bytes = _mm_loadl_epi64((__m128i *) (y+i)); + __m128i cr_bytes = _mm_loadl_epi64((__m128i *) (pcr+i)); + __m128i cb_bytes = _mm_loadl_epi64((__m128i *) (pcb+i)); + __m128i cr_biased = _mm_xor_si128(cr_bytes, signflip); // -128 + __m128i cb_biased = _mm_xor_si128(cb_bytes, signflip); // -128 + + // unpack to short (and left-shift cr, cb by 8) + __m128i yw = _mm_unpacklo_epi8(y_bias, y_bytes); + __m128i crw = _mm_unpacklo_epi8(_mm_setzero_si128(), cr_biased); + __m128i cbw = _mm_unpacklo_epi8(_mm_setzero_si128(), cb_biased); + + // color transform + __m128i yws = _mm_srli_epi16(yw, 4); + __m128i cr0 = _mm_mulhi_epi16(cr_const0, crw); + __m128i cb0 = _mm_mulhi_epi16(cb_const0, cbw); + __m128i cb1 = _mm_mulhi_epi16(cbw, cb_const1); + __m128i cr1 = _mm_mulhi_epi16(crw, cr_const1); + __m128i rws = _mm_add_epi16(cr0, yws); + __m128i gwt = _mm_add_epi16(cb0, yws); + __m128i bws = _mm_add_epi16(yws, cb1); + __m128i gws = _mm_add_epi16(gwt, cr1); + + // descale + __m128i rw = _mm_srai_epi16(rws, 4); + __m128i bw = _mm_srai_epi16(bws, 4); + __m128i gw = _mm_srai_epi16(gws, 4); + + // back to byte, set up for transpose + __m128i brb = _mm_packus_epi16(rw, bw); + __m128i gxb = _mm_packus_epi16(gw, xw); + + // transpose to interleave channels + __m128i t0 = _mm_unpacklo_epi8(brb, gxb); + __m128i t1 = _mm_unpackhi_epi8(brb, gxb); + __m128i o0 = _mm_unpacklo_epi16(t0, t1); + __m128i o1 = _mm_unpackhi_epi16(t0, t1); + + // store + _mm_storeu_si128((__m128i *) (out + 0), o0); + _mm_storeu_si128((__m128i *) (out + 16), o1); + out += 32; + } + } +#endif + +#ifdef STBI_NEON + // in this version, step=3 support would be easy to add. but is there demand? + if (step == 4) { + // this is a fairly straightforward implementation and not super-optimized. + uint8x8_t signflip = vdup_n_u8(0x80); + int16x8_t cr_const0 = vdupq_n_s16( (short) ( 1.40200f*4096.0f+0.5f)); + int16x8_t cr_const1 = vdupq_n_s16( - (short) ( 0.71414f*4096.0f+0.5f)); + int16x8_t cb_const0 = vdupq_n_s16( - (short) ( 0.34414f*4096.0f+0.5f)); + int16x8_t cb_const1 = vdupq_n_s16( (short) ( 1.77200f*4096.0f+0.5f)); + + for (; i+7 < count; i += 8) { + // load + uint8x8_t y_bytes = vld1_u8(y + i); + uint8x8_t cr_bytes = vld1_u8(pcr + i); + uint8x8_t cb_bytes = vld1_u8(pcb + i); + int8x8_t cr_biased = vreinterpret_s8_u8(vsub_u8(cr_bytes, signflip)); + int8x8_t cb_biased = vreinterpret_s8_u8(vsub_u8(cb_bytes, signflip)); + + // expand to s16 + int16x8_t yws = vreinterpretq_s16_u16(vshll_n_u8(y_bytes, 4)); + int16x8_t crw = vshll_n_s8(cr_biased, 7); + int16x8_t cbw = vshll_n_s8(cb_biased, 7); + + // color transform + int16x8_t cr0 = vqdmulhq_s16(crw, cr_const0); + int16x8_t cb0 = vqdmulhq_s16(cbw, cb_const0); + int16x8_t cr1 = vqdmulhq_s16(crw, cr_const1); + int16x8_t cb1 = vqdmulhq_s16(cbw, cb_const1); + int16x8_t rws = vaddq_s16(yws, cr0); + int16x8_t gws = vaddq_s16(vaddq_s16(yws, cb0), cr1); + int16x8_t bws = vaddq_s16(yws, cb1); + + // undo scaling, round, convert to byte + uint8x8x4_t o; + o.val[0] = vqrshrun_n_s16(rws, 4); + o.val[1] = vqrshrun_n_s16(gws, 4); + o.val[2] = vqrshrun_n_s16(bws, 4); + o.val[3] = vdup_n_u8(255); + + // store, interleaving r/g/b/a + vst4_u8(out, o); + out += 8*4; + } + } +#endif + + for (; i < count; ++i) { + int y_fixed = (y[i] << 20) + (1<<19); // rounding + int r,g,b; + int cr = pcr[i] - 128; + int cb = pcb[i] - 128; + r = y_fixed + cr* stbi__float2fixed(1.40200f); + g = y_fixed + cr*-stbi__float2fixed(0.71414f) + ((cb*-stbi__float2fixed(0.34414f)) & 0xffff0000); + b = y_fixed + cb* stbi__float2fixed(1.77200f); + r >>= 20; + g >>= 20; + b >>= 20; + if ((unsigned) r > 255) { if (r < 0) r = 0; else r = 255; } + if ((unsigned) g > 255) { if (g < 0) g = 0; else g = 255; } + if ((unsigned) b > 255) { if (b < 0) b = 0; else b = 255; } + out[0] = (stbi_uc)r; + out[1] = (stbi_uc)g; + out[2] = (stbi_uc)b; + out[3] = 255; + out += step; + } +} +#endif + +// set up the kernels +static void stbi__setup_jpeg(stbi__jpeg *j) +{ + j->idct_block_kernel = stbi__idct_block; + j->YCbCr_to_RGB_kernel = stbi__YCbCr_to_RGB_row; + j->resample_row_hv_2_kernel = stbi__resample_row_hv_2; + +#ifdef STBI_SSE2 + if (stbi__sse2_available()) { + j->idct_block_kernel = stbi__idct_simd; + j->YCbCr_to_RGB_kernel = stbi__YCbCr_to_RGB_simd; + j->resample_row_hv_2_kernel = stbi__resample_row_hv_2_simd; + } +#endif + +#ifdef STBI_NEON + j->idct_block_kernel = stbi__idct_simd; + j->YCbCr_to_RGB_kernel = stbi__YCbCr_to_RGB_simd; + j->resample_row_hv_2_kernel = stbi__resample_row_hv_2_simd; +#endif +} + +// clean up the temporary component buffers +static void stbi__cleanup_jpeg(stbi__jpeg *j) +{ + stbi__free_jpeg_components(j, j->s->img_n, 0); +} + +typedef struct +{ + resample_row_func resample; + stbi_uc *line0,*line1; + int hs,vs; // expansion factor in each axis + int w_lores; // horizontal pixels pre-expansion + int ystep; // how far through vertical expansion we are + int ypos; // which pre-expansion row we're on +} stbi__resample; + +// fast 0..255 * 0..255 => 0..255 rounded multiplication +static stbi_uc stbi__blinn_8x8(stbi_uc x, stbi_uc y) +{ + unsigned int t = x*y + 128; + return (stbi_uc) ((t + (t >>8)) >> 8); +} + +static stbi_uc *load_jpeg_image(stbi__jpeg *z, int *out_x, int *out_y, int *comp, int req_comp) +{ + int n, decode_n, is_rgb; + z->s->img_n = 0; // make stbi__cleanup_jpeg safe + + // validate req_comp + if (req_comp < 0 || req_comp > 4) return stbi__errpuc("bad req_comp", "Internal error"); + + // load a jpeg image from whichever source, but leave in YCbCr format + if (!stbi__decode_jpeg_image(z)) { stbi__cleanup_jpeg(z); return NULL; } + + // determine actual number of components to generate + n = req_comp ? req_comp : z->s->img_n >= 3 ? 3 : 1; + + is_rgb = z->s->img_n == 3 && (z->rgb == 3 || (z->app14_color_transform == 0 && !z->jfif)); + + if (z->s->img_n == 3 && n < 3 && !is_rgb) + decode_n = 1; + else + decode_n = z->s->img_n; + + // resample and color-convert + { + int k; + unsigned int i,j; + stbi_uc *output; + stbi_uc *coutput[4]; + + stbi__resample res_comp[4]; + + for (k=0; k < decode_n; ++k) { + stbi__resample *r = &res_comp[k]; + + // allocate line buffer big enough for upsampling off the edges + // with upsample factor of 4 + z->img_comp[k].linebuf = (stbi_uc *) stbi__malloc(z->s->img_x + 3); + if (!z->img_comp[k].linebuf) { stbi__cleanup_jpeg(z); return stbi__errpuc("outofmem", "Out of memory"); } + + r->hs = z->img_h_max / z->img_comp[k].h; + r->vs = z->img_v_max / z->img_comp[k].v; + r->ystep = r->vs >> 1; + r->w_lores = (z->s->img_x + r->hs-1) / r->hs; + r->ypos = 0; + r->line0 = r->line1 = z->img_comp[k].data; + + if (r->hs == 1 && r->vs == 1) r->resample = resample_row_1; + else if (r->hs == 1 && r->vs == 2) r->resample = stbi__resample_row_v_2; + else if (r->hs == 2 && r->vs == 1) r->resample = stbi__resample_row_h_2; + else if (r->hs == 2 && r->vs == 2) r->resample = z->resample_row_hv_2_kernel; + else r->resample = stbi__resample_row_generic; + } + + // can't error after this so, this is safe + output = (stbi_uc *) stbi__malloc_mad3(n, z->s->img_x, z->s->img_y, 1); + if (!output) { stbi__cleanup_jpeg(z); return stbi__errpuc("outofmem", "Out of memory"); } + + // now go ahead and resample + for (j=0; j < z->s->img_y; ++j) { + stbi_uc *out = output + n * z->s->img_x * j; + for (k=0; k < decode_n; ++k) { + stbi__resample *r = &res_comp[k]; + int y_bot = r->ystep >= (r->vs >> 1); + coutput[k] = r->resample(z->img_comp[k].linebuf, + y_bot ? r->line1 : r->line0, + y_bot ? r->line0 : r->line1, + r->w_lores, r->hs); + if (++r->ystep >= r->vs) { + r->ystep = 0; + r->line0 = r->line1; + if (++r->ypos < z->img_comp[k].y) + r->line1 += z->img_comp[k].w2; + } + } + if (n >= 3) { + stbi_uc *y = coutput[0]; + if (z->s->img_n == 3) { + if (is_rgb) { + for (i=0; i < z->s->img_x; ++i) { + out[0] = y[i]; + out[1] = coutput[1][i]; + out[2] = coutput[2][i]; + out[3] = 255; + out += n; + } + } else { + z->YCbCr_to_RGB_kernel(out, y, coutput[1], coutput[2], z->s->img_x, n); + } + } else if (z->s->img_n == 4) { + if (z->app14_color_transform == 0) { // CMYK + for (i=0; i < z->s->img_x; ++i) { + stbi_uc m = coutput[3][i]; + out[0] = stbi__blinn_8x8(coutput[0][i], m); + out[1] = stbi__blinn_8x8(coutput[1][i], m); + out[2] = stbi__blinn_8x8(coutput[2][i], m); + out[3] = 255; + out += n; + } + } else if (z->app14_color_transform == 2) { // YCCK + z->YCbCr_to_RGB_kernel(out, y, coutput[1], coutput[2], z->s->img_x, n); + for (i=0; i < z->s->img_x; ++i) { + stbi_uc m = coutput[3][i]; + out[0] = stbi__blinn_8x8(255 - out[0], m); + out[1] = stbi__blinn_8x8(255 - out[1], m); + out[2] = stbi__blinn_8x8(255 - out[2], m); + out += n; + } + } else { // YCbCr + alpha? Ignore the fourth channel for now + z->YCbCr_to_RGB_kernel(out, y, coutput[1], coutput[2], z->s->img_x, n); + } + } else + for (i=0; i < z->s->img_x; ++i) { + out[0] = out[1] = out[2] = y[i]; + out[3] = 255; // not used if n==3 + out += n; + } + } else { + if (is_rgb) { + if (n == 1) + for (i=0; i < z->s->img_x; ++i) + *out++ = stbi__compute_y(coutput[0][i], coutput[1][i], coutput[2][i]); + else { + for (i=0; i < z->s->img_x; ++i, out += 2) { + out[0] = stbi__compute_y(coutput[0][i], coutput[1][i], coutput[2][i]); + out[1] = 255; + } + } + } else if (z->s->img_n == 4 && z->app14_color_transform == 0) { + for (i=0; i < z->s->img_x; ++i) { + stbi_uc m = coutput[3][i]; + stbi_uc r = stbi__blinn_8x8(coutput[0][i], m); + stbi_uc g = stbi__blinn_8x8(coutput[1][i], m); + stbi_uc b = stbi__blinn_8x8(coutput[2][i], m); + out[0] = stbi__compute_y(r, g, b); + out[1] = 255; + out += n; + } + } else if (z->s->img_n == 4 && z->app14_color_transform == 2) { + for (i=0; i < z->s->img_x; ++i) { + out[0] = stbi__blinn_8x8(255 - coutput[0][i], coutput[3][i]); + out[1] = 255; + out += n; + } + } else { + stbi_uc *y = coutput[0]; + if (n == 1) + for (i=0; i < z->s->img_x; ++i) out[i] = y[i]; + else + for (i=0; i < z->s->img_x; ++i) *out++ = y[i], *out++ = 255; + } + } + } + stbi__cleanup_jpeg(z); + *out_x = z->s->img_x; + *out_y = z->s->img_y; + if (comp) *comp = z->s->img_n >= 3 ? 3 : 1; // report original components, not output + return output; + } +} + +static void *stbi__jpeg_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri) +{ + unsigned char* result; + stbi__jpeg* j = (stbi__jpeg*) stbi__malloc(sizeof(stbi__jpeg)); + STBI_NOTUSED(ri); + j->s = s; + stbi__setup_jpeg(j); + result = load_jpeg_image(j, x,y,comp,req_comp); + STBI_FREE(j); + return result; +} + +static int stbi__jpeg_test(stbi__context *s) +{ + int r; + stbi__jpeg* j = (stbi__jpeg*)stbi__malloc(sizeof(stbi__jpeg)); + j->s = s; + stbi__setup_jpeg(j); + r = stbi__decode_jpeg_header(j, STBI__SCAN_type); + stbi__rewind(s); + STBI_FREE(j); + return r; +} + +static int stbi__jpeg_info_raw(stbi__jpeg *j, int *x, int *y, int *comp) +{ + if (!stbi__decode_jpeg_header(j, STBI__SCAN_header)) { + stbi__rewind( j->s ); + return 0; + } + if (x) *x = j->s->img_x; + if (y) *y = j->s->img_y; + if (comp) *comp = j->s->img_n >= 3 ? 3 : 1; + return 1; +} + +static int stbi__jpeg_info(stbi__context *s, int *x, int *y, int *comp) +{ + int result; + stbi__jpeg* j = (stbi__jpeg*) (stbi__malloc(sizeof(stbi__jpeg))); + if (!j) { + stbi__errpuc("outofmem", "Out of memory"); + return 0; + } + j->s = s; + result = stbi__jpeg_info_raw(j, x, y, comp); + STBI_FREE(j); + return result; +} +#endif + +// public domain zlib decode v0.2 Sean Barrett 2006-11-18 +// simple implementation +// - all input must be provided in an upfront buffer +// - all output is written to a single output buffer (can malloc/realloc) +// performance +// - fast huffman + +#ifndef STBI_NO_ZLIB + +// fast-way is faster to check than jpeg huffman, but slow way is slower +#define STBI__ZFAST_BITS 9 // accelerate all cases in default tables +#define STBI__ZFAST_MASK ((1 << STBI__ZFAST_BITS) - 1) + +// zlib-style huffman encoding +// (jpegs packs from left, zlib from right, so can't share code) +typedef struct +{ + stbi__uint16 fast[1 << STBI__ZFAST_BITS]; + stbi__uint16 firstcode[16]; + int maxcode[17]; + stbi__uint16 firstsymbol[16]; + stbi_uc size[288]; + stbi__uint16 value[288]; +} stbi__zhuffman; + +stbi_inline static int stbi__bitreverse16(int n) +{ + n = ((n & 0xAAAA) >> 1) | ((n & 0x5555) << 1); + n = ((n & 0xCCCC) >> 2) | ((n & 0x3333) << 2); + n = ((n & 0xF0F0) >> 4) | ((n & 0x0F0F) << 4); + n = ((n & 0xFF00) >> 8) | ((n & 0x00FF) << 8); + return n; +} + +stbi_inline static int stbi__bit_reverse(int v, int bits) +{ + STBI_ASSERT(bits <= 16); + // to bit reverse n bits, reverse 16 and shift + // e.g. 11 bits, bit reverse and shift away 5 + return stbi__bitreverse16(v) >> (16-bits); +} + +static int stbi__zbuild_huffman(stbi__zhuffman *z, const stbi_uc *sizelist, int num) +{ + int i,k=0; + int code, next_code[16], sizes[17]; + + // DEFLATE spec for generating codes + memset(sizes, 0, sizeof(sizes)); + memset(z->fast, 0, sizeof(z->fast)); + for (i=0; i < num; ++i) + ++sizes[sizelist[i]]; + sizes[0] = 0; + for (i=1; i < 16; ++i) + if (sizes[i] > (1 << i)) + return stbi__err("bad sizes", "Corrupt PNG"); + code = 0; + for (i=1; i < 16; ++i) { + next_code[i] = code; + z->firstcode[i] = (stbi__uint16) code; + z->firstsymbol[i] = (stbi__uint16) k; + code = (code + sizes[i]); + if (sizes[i]) + if (code-1 >= (1 << i)) return stbi__err("bad codelengths","Corrupt PNG"); + z->maxcode[i] = code << (16-i); // preshift for inner loop + code <<= 1; + k += sizes[i]; + } + z->maxcode[16] = 0x10000; // sentinel + for (i=0; i < num; ++i) { + int s = sizelist[i]; + if (s) { + int c = next_code[s] - z->firstcode[s] + z->firstsymbol[s]; + stbi__uint16 fastv = (stbi__uint16) ((s << 9) | i); + z->size [c] = (stbi_uc ) s; + z->value[c] = (stbi__uint16) i; + if (s <= STBI__ZFAST_BITS) { + int j = stbi__bit_reverse(next_code[s],s); + while (j < (1 << STBI__ZFAST_BITS)) { + z->fast[j] = fastv; + j += (1 << s); + } + } + ++next_code[s]; + } + } + return 1; +} + +// zlib-from-memory implementation for PNG reading +// because PNG allows splitting the zlib stream arbitrarily, +// and it's annoying structurally to have PNG call ZLIB call PNG, +// we require PNG read all the IDATs and combine them into a single +// memory buffer + +typedef struct +{ + stbi_uc *zbuffer, *zbuffer_end; + int num_bits; + stbi__uint32 code_buffer; + + char *zout; + char *zout_start; + char *zout_end; + int z_expandable; + + stbi__zhuffman z_length, z_distance; +} stbi__zbuf; + +stbi_inline static stbi_uc stbi__zget8(stbi__zbuf *z) +{ + if (z->zbuffer >= z->zbuffer_end) return 0; + return *z->zbuffer++; +} + +static void stbi__fill_bits(stbi__zbuf *z) +{ + do { + STBI_ASSERT(z->code_buffer < (1U << z->num_bits)); + z->code_buffer |= (unsigned int) stbi__zget8(z) << z->num_bits; + z->num_bits += 8; + } while (z->num_bits <= 24); +} + +stbi_inline static unsigned int stbi__zreceive(stbi__zbuf *z, int n) +{ + unsigned int k; + if (z->num_bits < n) stbi__fill_bits(z); + k = z->code_buffer & ((1 << n) - 1); + z->code_buffer >>= n; + z->num_bits -= n; + return k; +} + +static int stbi__zhuffman_decode_slowpath(stbi__zbuf *a, stbi__zhuffman *z) +{ + int b,s,k; + // not resolved by fast table, so compute it the slow way + // use jpeg approach, which requires MSbits at top + k = stbi__bit_reverse(a->code_buffer, 16); + for (s=STBI__ZFAST_BITS+1; ; ++s) + if (k < z->maxcode[s]) + break; + if (s == 16) return -1; // invalid code! + // code size is s, so: + b = (k >> (16-s)) - z->firstcode[s] + z->firstsymbol[s]; + STBI_ASSERT(z->size[b] == s); + a->code_buffer >>= s; + a->num_bits -= s; + return z->value[b]; +} + +stbi_inline static int stbi__zhuffman_decode(stbi__zbuf *a, stbi__zhuffman *z) +{ + int b,s; + if (a->num_bits < 16) stbi__fill_bits(a); + b = z->fast[a->code_buffer & STBI__ZFAST_MASK]; + if (b) { + s = b >> 9; + a->code_buffer >>= s; + a->num_bits -= s; + return b & 511; + } + return stbi__zhuffman_decode_slowpath(a, z); +} + +static int stbi__zexpand(stbi__zbuf *z, char *zout, int n) // need to make room for n bytes +{ + char *q; + int cur, limit, old_limit; + z->zout = zout; + if (!z->z_expandable) return stbi__err("output buffer limit","Corrupt PNG"); + cur = (int) (z->zout - z->zout_start); + limit = old_limit = (int) (z->zout_end - z->zout_start); + while (cur + n > limit) + limit *= 2; + q = (char *) STBI_REALLOC_SIZED(z->zout_start, old_limit, limit); + STBI_NOTUSED(old_limit); + if (q == NULL) return stbi__err("outofmem", "Out of memory"); + z->zout_start = q; + z->zout = q + cur; + z->zout_end = q + limit; + return 1; +} + +static const int stbi__zlength_base[31] = { + 3,4,5,6,7,8,9,10,11,13, + 15,17,19,23,27,31,35,43,51,59, + 67,83,99,115,131,163,195,227,258,0,0 }; + +static const int stbi__zlength_extra[31]= +{ 0,0,0,0,0,0,0,0,1,1,1,1,2,2,2,2,3,3,3,3,4,4,4,4,5,5,5,5,0,0,0 }; + +static const int stbi__zdist_base[32] = { 1,2,3,4,5,7,9,13,17,25,33,49,65,97,129,193, +257,385,513,769,1025,1537,2049,3073,4097,6145,8193,12289,16385,24577,0,0}; + +static const int stbi__zdist_extra[32] = +{ 0,0,0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,8,8,9,9,10,10,11,11,12,12,13,13}; + +static int stbi__parse_huffman_block(stbi__zbuf *a) +{ + char *zout = a->zout; + for(;;) { + int z = stbi__zhuffman_decode(a, &a->z_length); + if (z < 256) { + if (z < 0) return stbi__err("bad huffman code","Corrupt PNG"); // error in huffman codes + if (zout >= a->zout_end) { + if (!stbi__zexpand(a, zout, 1)) return 0; + zout = a->zout; + } + *zout++ = (char) z; + } else { + stbi_uc *p; + int len,dist; + if (z == 256) { + a->zout = zout; + return 1; + } + z -= 257; + len = stbi__zlength_base[z]; + if (stbi__zlength_extra[z]) len += stbi__zreceive(a, stbi__zlength_extra[z]); + z = stbi__zhuffman_decode(a, &a->z_distance); + if (z < 0) return stbi__err("bad huffman code","Corrupt PNG"); + dist = stbi__zdist_base[z]; + if (stbi__zdist_extra[z]) dist += stbi__zreceive(a, stbi__zdist_extra[z]); + if (zout - a->zout_start < dist) return stbi__err("bad dist","Corrupt PNG"); + if (zout + len > a->zout_end) { + if (!stbi__zexpand(a, zout, len)) return 0; + zout = a->zout; + } + p = (stbi_uc *) (zout - dist); + if (dist == 1) { // run of one byte; common in images. + stbi_uc v = *p; + if (len) { do *zout++ = v; while (--len); } + } else { + if (len) { do *zout++ = *p++; while (--len); } + } + } + } +} + +static int stbi__compute_huffman_codes(stbi__zbuf *a) +{ + static const stbi_uc length_dezigzag[19] = { 16,17,18,0,8,7,9,6,10,5,11,4,12,3,13,2,14,1,15 }; + stbi__zhuffman z_codelength; + stbi_uc lencodes[286+32+137];//padding for maximum single op + stbi_uc codelength_sizes[19]; + int i,n; + + int hlit = stbi__zreceive(a,5) + 257; + int hdist = stbi__zreceive(a,5) + 1; + int hclen = stbi__zreceive(a,4) + 4; + int ntot = hlit + hdist; + + memset(codelength_sizes, 0, sizeof(codelength_sizes)); + for (i=0; i < hclen; ++i) { + int s = stbi__zreceive(a,3); + codelength_sizes[length_dezigzag[i]] = (stbi_uc) s; + } + if (!stbi__zbuild_huffman(&z_codelength, codelength_sizes, 19)) return 0; + + n = 0; + while (n < ntot) { + int c = stbi__zhuffman_decode(a, &z_codelength); + if (c < 0 || c >= 19) return stbi__err("bad codelengths", "Corrupt PNG"); + if (c < 16) + lencodes[n++] = (stbi_uc) c; + else { + stbi_uc fill = 0; + if (c == 16) { + c = stbi__zreceive(a,2)+3; + if (n == 0) return stbi__err("bad codelengths", "Corrupt PNG"); + fill = lencodes[n-1]; + } else if (c == 17) + c = stbi__zreceive(a,3)+3; + else { + STBI_ASSERT(c == 18); + c = stbi__zreceive(a,7)+11; + } + if (ntot - n < c) return stbi__err("bad codelengths", "Corrupt PNG"); + memset(lencodes+n, fill, c); + n += c; + } + } + if (n != ntot) return stbi__err("bad codelengths","Corrupt PNG"); + if (!stbi__zbuild_huffman(&a->z_length, lencodes, hlit)) return 0; + if (!stbi__zbuild_huffman(&a->z_distance, lencodes+hlit, hdist)) return 0; + return 1; +} + +static int stbi__parse_uncompressed_block(stbi__zbuf *a) +{ + stbi_uc header[4]; + int len,nlen,k; + if (a->num_bits & 7) + stbi__zreceive(a, a->num_bits & 7); // discard + // drain the bit-packed data into header + k = 0; + while (a->num_bits > 0) { + header[k++] = (stbi_uc) (a->code_buffer & 255); // suppress MSVC run-time check + a->code_buffer >>= 8; + a->num_bits -= 8; + } + STBI_ASSERT(a->num_bits == 0); + // now fill header the normal way + while (k < 4) + header[k++] = stbi__zget8(a); + len = header[1] * 256 + header[0]; + nlen = header[3] * 256 + header[2]; + if (nlen != (len ^ 0xffff)) return stbi__err("zlib corrupt","Corrupt PNG"); + if (a->zbuffer + len > a->zbuffer_end) return stbi__err("read past buffer","Corrupt PNG"); + if (a->zout + len > a->zout_end) + if (!stbi__zexpand(a, a->zout, len)) return 0; + memcpy(a->zout, a->zbuffer, len); + a->zbuffer += len; + a->zout += len; + return 1; +} + +static int stbi__parse_zlib_header(stbi__zbuf *a) +{ + int cmf = stbi__zget8(a); + int cm = cmf & 15; + /* int cinfo = cmf >> 4; */ + int flg = stbi__zget8(a); + if ((cmf*256+flg) % 31 != 0) return stbi__err("bad zlib header","Corrupt PNG"); // zlib spec + if (flg & 32) return stbi__err("no preset dict","Corrupt PNG"); // preset dictionary not allowed in png + if (cm != 8) return stbi__err("bad compression","Corrupt PNG"); // DEFLATE required for png + // window = 1 << (8 + cinfo)... but who cares, we fully buffer output + return 1; +} + +static const stbi_uc stbi__zdefault_length[288] = +{ + 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8, 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8, + 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8, 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8, + 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8, 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8, + 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8, 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8, + 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8, 9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9, + 9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9, 9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9, + 9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9, 9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9, + 9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9, 9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9, + 7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7, 7,7,7,7,7,7,7,7,8,8,8,8,8,8,8,8 +}; +static const stbi_uc stbi__zdefault_distance[32] = +{ + 5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5 +}; +/* +Init algorithm: +{ + int i; // use <= to match clearly with spec + for (i=0; i <= 143; ++i) stbi__zdefault_length[i] = 8; + for ( ; i <= 255; ++i) stbi__zdefault_length[i] = 9; + for ( ; i <= 279; ++i) stbi__zdefault_length[i] = 7; + for ( ; i <= 287; ++i) stbi__zdefault_length[i] = 8; + + for (i=0; i <= 31; ++i) stbi__zdefault_distance[i] = 5; +} +*/ + +static int stbi__parse_zlib(stbi__zbuf *a, int parse_header) +{ + int final, type; + if (parse_header) + if (!stbi__parse_zlib_header(a)) return 0; + a->num_bits = 0; + a->code_buffer = 0; + do { + final = stbi__zreceive(a,1); + type = stbi__zreceive(a,2); + if (type == 0) { + if (!stbi__parse_uncompressed_block(a)) return 0; + } else if (type == 3) { + return 0; + } else { + if (type == 1) { + // use fixed code lengths + if (!stbi__zbuild_huffman(&a->z_length , stbi__zdefault_length , 288)) return 0; + if (!stbi__zbuild_huffman(&a->z_distance, stbi__zdefault_distance, 32)) return 0; + } else { + if (!stbi__compute_huffman_codes(a)) return 0; + } + if (!stbi__parse_huffman_block(a)) return 0; + } + } while (!final); + return 1; +} + +static int stbi__do_zlib(stbi__zbuf *a, char *obuf, int olen, int exp, int parse_header) +{ + a->zout_start = obuf; + a->zout = obuf; + a->zout_end = obuf + olen; + a->z_expandable = exp; + + return stbi__parse_zlib(a, parse_header); +} + +STBIDEF char *stbi_zlib_decode_malloc_guesssize(const char *buffer, int len, int initial_size, int *outlen) +{ + stbi__zbuf a; + char *p = (char *) stbi__malloc(initial_size); + if (p == NULL) return NULL; + a.zbuffer = (stbi_uc *) buffer; + a.zbuffer_end = (stbi_uc *) buffer + len; + if (stbi__do_zlib(&a, p, initial_size, 1, 1)) { + if (outlen) *outlen = (int) (a.zout - a.zout_start); + return a.zout_start; + } else { + STBI_FREE(a.zout_start); + return NULL; + } +} + +STBIDEF char *stbi_zlib_decode_malloc(char const *buffer, int len, int *outlen) +{ + return stbi_zlib_decode_malloc_guesssize(buffer, len, 16384, outlen); +} + +STBIDEF char *stbi_zlib_decode_malloc_guesssize_headerflag(const char *buffer, int len, int initial_size, int *outlen, int parse_header) +{ + stbi__zbuf a; + char *p = (char *) stbi__malloc(initial_size); + if (p == NULL) return NULL; + a.zbuffer = (stbi_uc *) buffer; + a.zbuffer_end = (stbi_uc *) buffer + len; + if (stbi__do_zlib(&a, p, initial_size, 1, parse_header)) { + if (outlen) *outlen = (int) (a.zout - a.zout_start); + return a.zout_start; + } else { + STBI_FREE(a.zout_start); + return NULL; + } +} + +STBIDEF int stbi_zlib_decode_buffer(char *obuffer, int olen, char const *ibuffer, int ilen) +{ + stbi__zbuf a; + a.zbuffer = (stbi_uc *) ibuffer; + a.zbuffer_end = (stbi_uc *) ibuffer + ilen; + if (stbi__do_zlib(&a, obuffer, olen, 0, 1)) + return (int) (a.zout - a.zout_start); + else + return -1; +} + +STBIDEF char *stbi_zlib_decode_noheader_malloc(char const *buffer, int len, int *outlen) +{ + stbi__zbuf a; + char *p = (char *) stbi__malloc(16384); + if (p == NULL) return NULL; + a.zbuffer = (stbi_uc *) buffer; + a.zbuffer_end = (stbi_uc *) buffer+len; + if (stbi__do_zlib(&a, p, 16384, 1, 0)) { + if (outlen) *outlen = (int) (a.zout - a.zout_start); + return a.zout_start; + } else { + STBI_FREE(a.zout_start); + return NULL; + } +} + +STBIDEF int stbi_zlib_decode_noheader_buffer(char *obuffer, int olen, const char *ibuffer, int ilen) +{ + stbi__zbuf a; + a.zbuffer = (stbi_uc *) ibuffer; + a.zbuffer_end = (stbi_uc *) ibuffer + ilen; + if (stbi__do_zlib(&a, obuffer, olen, 0, 0)) + return (int) (a.zout - a.zout_start); + else + return -1; +} +#endif + +// public domain "baseline" PNG decoder v0.10 Sean Barrett 2006-11-18 +// simple implementation +// - only 8-bit samples +// - no CRC checking +// - allocates lots of intermediate memory +// - avoids problem of streaming data between subsystems +// - avoids explicit window management +// performance +// - uses stb_zlib, a PD zlib implementation with fast huffman decoding + +#ifndef STBI_NO_PNG +typedef struct +{ + stbi__uint32 length; + stbi__uint32 type; +} stbi__pngchunk; + +static stbi__pngchunk stbi__get_chunk_header(stbi__context *s) +{ + stbi__pngchunk c; + c.length = stbi__get32be(s); + c.type = stbi__get32be(s); + return c; +} + +static int stbi__check_png_header(stbi__context *s) +{ + static const stbi_uc png_sig[8] = { 137,80,78,71,13,10,26,10 }; + int i; + for (i=0; i < 8; ++i) + if (stbi__get8(s) != png_sig[i]) return stbi__err("bad png sig","Not a PNG"); + return 1; +} + +typedef struct +{ + stbi__context *s; + stbi_uc *idata, *expanded, *out; + int depth; +} stbi__png; + + +enum { + STBI__F_none=0, + STBI__F_sub=1, + STBI__F_up=2, + STBI__F_avg=3, + STBI__F_paeth=4, + // synthetic filters used for first scanline to avoid needing a dummy row of 0s + STBI__F_avg_first, + STBI__F_paeth_first +}; + +static stbi_uc first_row_filter[5] = +{ + STBI__F_none, + STBI__F_sub, + STBI__F_none, + STBI__F_avg_first, + STBI__F_paeth_first +}; + +static int stbi__paeth(int a, int b, int c) +{ + int p = a + b - c; + int pa = abs(p-a); + int pb = abs(p-b); + int pc = abs(p-c); + if (pa <= pb && pa <= pc) return a; + if (pb <= pc) return b; + return c; +} + +static const stbi_uc stbi__depth_scale_table[9] = { 0, 0xff, 0x55, 0, 0x11, 0,0,0, 0x01 }; + +// create the png data from post-deflated data +static int stbi__create_png_image_raw(stbi__png *a, stbi_uc *raw, stbi__uint32 raw_len, int out_n, stbi__uint32 x, stbi__uint32 y, int depth, int color) +{ + int bytes = (depth == 16? 2 : 1); + stbi__context *s = a->s; + stbi__uint32 i,j,stride = x*out_n*bytes; + stbi__uint32 img_len, img_width_bytes; + int k; + int img_n = s->img_n; // copy it into a local for later + + int output_bytes = out_n*bytes; + int filter_bytes = img_n*bytes; + int width = x; + + STBI_ASSERT(out_n == s->img_n || out_n == s->img_n+1); + a->out = (stbi_uc *) stbi__malloc_mad3(x, y, output_bytes, 0); // extra bytes to write off the end into + if (!a->out) return stbi__err("outofmem", "Out of memory"); + + if (!stbi__mad3sizes_valid(img_n, x, depth, 7)) return stbi__err("too large", "Corrupt PNG"); + img_width_bytes = (((img_n * x * depth) + 7) >> 3); + img_len = (img_width_bytes + 1) * y; + + // we used to check for exact match between raw_len and img_len on non-interlaced PNGs, + // but issue #276 reported a PNG in the wild that had extra data at the end (all zeros), + // so just check for raw_len < img_len always. + if (raw_len < img_len) return stbi__err("not enough pixels","Corrupt PNG"); + + for (j=0; j < y; ++j) { + stbi_uc *cur = a->out + stride*j; + stbi_uc *prior; + int filter = *raw++; + + if (filter > 4) + return stbi__err("invalid filter","Corrupt PNG"); + + if (depth < 8) { + STBI_ASSERT(img_width_bytes <= x); + cur += x*out_n - img_width_bytes; // store output to the rightmost img_len bytes, so we can decode in place + filter_bytes = 1; + width = img_width_bytes; + } + prior = cur - stride; // bugfix: need to compute this after 'cur +=' computation above + + // if first row, use special filter that doesn't sample previous row + if (j == 0) filter = first_row_filter[filter]; + + // handle first byte explicitly + for (k=0; k < filter_bytes; ++k) { + switch (filter) { + case STBI__F_none : cur[k] = raw[k]; break; + case STBI__F_sub : cur[k] = raw[k]; break; + case STBI__F_up : cur[k] = STBI__BYTECAST(raw[k] + prior[k]); break; + case STBI__F_avg : cur[k] = STBI__BYTECAST(raw[k] + (prior[k]>>1)); break; + case STBI__F_paeth : cur[k] = STBI__BYTECAST(raw[k] + stbi__paeth(0,prior[k],0)); break; + case STBI__F_avg_first : cur[k] = raw[k]; break; + case STBI__F_paeth_first: cur[k] = raw[k]; break; + } + } + + if (depth == 8) { + if (img_n != out_n) + cur[img_n] = 255; // first pixel + raw += img_n; + cur += out_n; + prior += out_n; + } else if (depth == 16) { + if (img_n != out_n) { + cur[filter_bytes] = 255; // first pixel top byte + cur[filter_bytes+1] = 255; // first pixel bottom byte + } + raw += filter_bytes; + cur += output_bytes; + prior += output_bytes; + } else { + raw += 1; + cur += 1; + prior += 1; + } + + // this is a little gross, so that we don't switch per-pixel or per-component + if (depth < 8 || img_n == out_n) { + int nk = (width - 1)*filter_bytes; + #define STBI__CASE(f) \ + case f: \ + for (k=0; k < nk; ++k) + switch (filter) { + // "none" filter turns into a memcpy here; make that explicit. + case STBI__F_none: memcpy(cur, raw, nk); break; + STBI__CASE(STBI__F_sub) { cur[k] = STBI__BYTECAST(raw[k] + cur[k-filter_bytes]); } break; + STBI__CASE(STBI__F_up) { cur[k] = STBI__BYTECAST(raw[k] + prior[k]); } break; + STBI__CASE(STBI__F_avg) { cur[k] = STBI__BYTECAST(raw[k] + ((prior[k] + cur[k-filter_bytes])>>1)); } break; + STBI__CASE(STBI__F_paeth) { cur[k] = STBI__BYTECAST(raw[k] + stbi__paeth(cur[k-filter_bytes],prior[k],prior[k-filter_bytes])); } break; + STBI__CASE(STBI__F_avg_first) { cur[k] = STBI__BYTECAST(raw[k] + (cur[k-filter_bytes] >> 1)); } break; + STBI__CASE(STBI__F_paeth_first) { cur[k] = STBI__BYTECAST(raw[k] + stbi__paeth(cur[k-filter_bytes],0,0)); } break; + } + #undef STBI__CASE + raw += nk; + } else { + STBI_ASSERT(img_n+1 == out_n); + #define STBI__CASE(f) \ + case f: \ + for (i=x-1; i >= 1; --i, cur[filter_bytes]=255,raw+=filter_bytes,cur+=output_bytes,prior+=output_bytes) \ + for (k=0; k < filter_bytes; ++k) + switch (filter) { + STBI__CASE(STBI__F_none) { cur[k] = raw[k]; } break; + STBI__CASE(STBI__F_sub) { cur[k] = STBI__BYTECAST(raw[k] + cur[k- output_bytes]); } break; + STBI__CASE(STBI__F_up) { cur[k] = STBI__BYTECAST(raw[k] + prior[k]); } break; + STBI__CASE(STBI__F_avg) { cur[k] = STBI__BYTECAST(raw[k] + ((prior[k] + cur[k- output_bytes])>>1)); } break; + STBI__CASE(STBI__F_paeth) { cur[k] = STBI__BYTECAST(raw[k] + stbi__paeth(cur[k- output_bytes],prior[k],prior[k- output_bytes])); } break; + STBI__CASE(STBI__F_avg_first) { cur[k] = STBI__BYTECAST(raw[k] + (cur[k- output_bytes] >> 1)); } break; + STBI__CASE(STBI__F_paeth_first) { cur[k] = STBI__BYTECAST(raw[k] + stbi__paeth(cur[k- output_bytes],0,0)); } break; + } + #undef STBI__CASE + + // the loop above sets the high byte of the pixels' alpha, but for + // 16 bit png files we also need the low byte set. we'll do that here. + if (depth == 16) { + cur = a->out + stride*j; // start at the beginning of the row again + for (i=0; i < x; ++i,cur+=output_bytes) { + cur[filter_bytes+1] = 255; + } + } + } + } + + // we make a separate pass to expand bits to pixels; for performance, + // this could run two scanlines behind the above code, so it won't + // intefere with filtering but will still be in the cache. + if (depth < 8) { + for (j=0; j < y; ++j) { + stbi_uc *cur = a->out + stride*j; + stbi_uc *in = a->out + stride*j + x*out_n - img_width_bytes; + // unpack 1/2/4-bit into a 8-bit buffer. allows us to keep the common 8-bit path optimal at minimal cost for 1/2/4-bit + // png guarante byte alignment, if width is not multiple of 8/4/2 we'll decode dummy trailing data that will be skipped in the later loop + stbi_uc scale = (color == 0) ? stbi__depth_scale_table[depth] : 1; // scale grayscale values to 0..255 range + + // note that the final byte might overshoot and write more data than desired. + // we can allocate enough data that this never writes out of memory, but it + // could also overwrite the next scanline. can it overwrite non-empty data + // on the next scanline? yes, consider 1-pixel-wide scanlines with 1-bit-per-pixel. + // so we need to explicitly clamp the final ones + + if (depth == 4) { + for (k=x*img_n; k >= 2; k-=2, ++in) { + *cur++ = scale * ((*in >> 4) ); + *cur++ = scale * ((*in ) & 0x0f); + } + if (k > 0) *cur++ = scale * ((*in >> 4) ); + } else if (depth == 2) { + for (k=x*img_n; k >= 4; k-=4, ++in) { + *cur++ = scale * ((*in >> 6) ); + *cur++ = scale * ((*in >> 4) & 0x03); + *cur++ = scale * ((*in >> 2) & 0x03); + *cur++ = scale * ((*in ) & 0x03); + } + if (k > 0) *cur++ = scale * ((*in >> 6) ); + if (k > 1) *cur++ = scale * ((*in >> 4) & 0x03); + if (k > 2) *cur++ = scale * ((*in >> 2) & 0x03); + } else if (depth == 1) { + for (k=x*img_n; k >= 8; k-=8, ++in) { + *cur++ = scale * ((*in >> 7) ); + *cur++ = scale * ((*in >> 6) & 0x01); + *cur++ = scale * ((*in >> 5) & 0x01); + *cur++ = scale * ((*in >> 4) & 0x01); + *cur++ = scale * ((*in >> 3) & 0x01); + *cur++ = scale * ((*in >> 2) & 0x01); + *cur++ = scale * ((*in >> 1) & 0x01); + *cur++ = scale * ((*in ) & 0x01); + } + if (k > 0) *cur++ = scale * ((*in >> 7) ); + if (k > 1) *cur++ = scale * ((*in >> 6) & 0x01); + if (k > 2) *cur++ = scale * ((*in >> 5) & 0x01); + if (k > 3) *cur++ = scale * ((*in >> 4) & 0x01); + if (k > 4) *cur++ = scale * ((*in >> 3) & 0x01); + if (k > 5) *cur++ = scale * ((*in >> 2) & 0x01); + if (k > 6) *cur++ = scale * ((*in >> 1) & 0x01); + } + if (img_n != out_n) { + int q; + // insert alpha = 255 + cur = a->out + stride*j; + if (img_n == 1) { + for (q=x-1; q >= 0; --q) { + cur[q*2+1] = 255; + cur[q*2+0] = cur[q]; + } + } else { + STBI_ASSERT(img_n == 3); + for (q=x-1; q >= 0; --q) { + cur[q*4+3] = 255; + cur[q*4+2] = cur[q*3+2]; + cur[q*4+1] = cur[q*3+1]; + cur[q*4+0] = cur[q*3+0]; + } + } + } + } + } else if (depth == 16) { + // force the image data from big-endian to platform-native. + // this is done in a separate pass due to the decoding relying + // on the data being untouched, but could probably be done + // per-line during decode if care is taken. + stbi_uc *cur = a->out; + stbi__uint16 *cur16 = (stbi__uint16*)cur; + + for(i=0; i < x*y*out_n; ++i,cur16++,cur+=2) { + *cur16 = (cur[0] << 8) | cur[1]; + } + } + + return 1; +} + +static int stbi__create_png_image(stbi__png *a, stbi_uc *image_data, stbi__uint32 image_data_len, int out_n, int depth, int color, int interlaced) +{ + int bytes = (depth == 16 ? 2 : 1); + int out_bytes = out_n * bytes; + stbi_uc *final; + int p; + if (!interlaced) + return stbi__create_png_image_raw(a, image_data, image_data_len, out_n, a->s->img_x, a->s->img_y, depth, color); + + // de-interlacing + final = (stbi_uc *) stbi__malloc_mad3(a->s->img_x, a->s->img_y, out_bytes, 0); + for (p=0; p < 7; ++p) { + int xorig[] = { 0,4,0,2,0,1,0 }; + int yorig[] = { 0,0,4,0,2,0,1 }; + int xspc[] = { 8,8,4,4,2,2,1 }; + int yspc[] = { 8,8,8,4,4,2,2 }; + int i,j,x,y; + // pass1_x[4] = 0, pass1_x[5] = 1, pass1_x[12] = 1 + x = (a->s->img_x - xorig[p] + xspc[p]-1) / xspc[p]; + y = (a->s->img_y - yorig[p] + yspc[p]-1) / yspc[p]; + if (x && y) { + stbi__uint32 img_len = ((((a->s->img_n * x * depth) + 7) >> 3) + 1) * y; + if (!stbi__create_png_image_raw(a, image_data, image_data_len, out_n, x, y, depth, color)) { + STBI_FREE(final); + return 0; + } + for (j=0; j < y; ++j) { + for (i=0; i < x; ++i) { + int out_y = j*yspc[p]+yorig[p]; + int out_x = i*xspc[p]+xorig[p]; + memcpy(final + out_y*a->s->img_x*out_bytes + out_x*out_bytes, + a->out + (j*x+i)*out_bytes, out_bytes); + } + } + STBI_FREE(a->out); + a->out = NULL; + image_data += img_len; + image_data_len -= img_len; + } + } + a->out = final; + + return 1; +} + +static int stbi__compute_transparency(stbi__png *z, stbi_uc tc[3], int out_n) +{ + stbi__context *s = z->s; + stbi__uint32 i, pixel_count = s->img_x * s->img_y; + stbi_uc *p = z->out; + + // compute color-based transparency, assuming we've + // already got 255 as the alpha value in the output + STBI_ASSERT(out_n == 2 || out_n == 4); + + if (out_n == 2) { + for (i=0; i < pixel_count; ++i) { + p[1] = (p[0] == tc[0] ? 0 : 255); + p += 2; + } + } else { + for (i=0; i < pixel_count; ++i) { + if (p[0] == tc[0] && p[1] == tc[1] && p[2] == tc[2]) + p[3] = 0; + p += 4; + } + } + return 1; +} + +static int stbi__compute_transparency16(stbi__png *z, stbi__uint16 tc[3], int out_n) +{ + stbi__context *s = z->s; + stbi__uint32 i, pixel_count = s->img_x * s->img_y; + stbi__uint16 *p = (stbi__uint16*) z->out; + + // compute color-based transparency, assuming we've + // already got 65535 as the alpha value in the output + STBI_ASSERT(out_n == 2 || out_n == 4); + + if (out_n == 2) { + for (i = 0; i < pixel_count; ++i) { + p[1] = (p[0] == tc[0] ? 0 : 65535); + p += 2; + } + } else { + for (i = 0; i < pixel_count; ++i) { + if (p[0] == tc[0] && p[1] == tc[1] && p[2] == tc[2]) + p[3] = 0; + p += 4; + } + } + return 1; +} + +static int stbi__expand_png_palette(stbi__png *a, stbi_uc *palette, int len, int pal_img_n) +{ + stbi__uint32 i, pixel_count = a->s->img_x * a->s->img_y; + stbi_uc *p, *temp_out, *orig = a->out; + + p = (stbi_uc *) stbi__malloc_mad2(pixel_count, pal_img_n, 0); + if (p == NULL) return stbi__err("outofmem", "Out of memory"); + + // between here and free(out) below, exitting would leak + temp_out = p; + + if (pal_img_n == 3) { + for (i=0; i < pixel_count; ++i) { + int n = orig[i]*4; + p[0] = palette[n ]; + p[1] = palette[n+1]; + p[2] = palette[n+2]; + p += 3; + } + } else { + for (i=0; i < pixel_count; ++i) { + int n = orig[i]*4; + p[0] = palette[n ]; + p[1] = palette[n+1]; + p[2] = palette[n+2]; + p[3] = palette[n+3]; + p += 4; + } + } + STBI_FREE(a->out); + a->out = temp_out; + + STBI_NOTUSED(len); + + return 1; +} + +static int stbi__unpremultiply_on_load = 0; +static int stbi__de_iphone_flag = 0; + +STBIDEF void stbi_set_unpremultiply_on_load(int flag_true_if_should_unpremultiply) +{ + stbi__unpremultiply_on_load = flag_true_if_should_unpremultiply; +} + +STBIDEF void stbi_convert_iphone_png_to_rgb(int flag_true_if_should_convert) +{ + stbi__de_iphone_flag = flag_true_if_should_convert; +} + +static void stbi__de_iphone(stbi__png *z) +{ + stbi__context *s = z->s; + stbi__uint32 i, pixel_count = s->img_x * s->img_y; + stbi_uc *p = z->out; + + if (s->img_out_n == 3) { // convert bgr to rgb + for (i=0; i < pixel_count; ++i) { + stbi_uc t = p[0]; + p[0] = p[2]; + p[2] = t; + p += 3; + } + } else { + STBI_ASSERT(s->img_out_n == 4); + if (stbi__unpremultiply_on_load) { + // convert bgr to rgb and unpremultiply + for (i=0; i < pixel_count; ++i) { + stbi_uc a = p[3]; + stbi_uc t = p[0]; + if (a) { + stbi_uc half = a / 2; + p[0] = (p[2] * 255 + half) / a; + p[1] = (p[1] * 255 + half) / a; + p[2] = ( t * 255 + half) / a; + } else { + p[0] = p[2]; + p[2] = t; + } + p += 4; + } + } else { + // convert bgr to rgb + for (i=0; i < pixel_count; ++i) { + stbi_uc t = p[0]; + p[0] = p[2]; + p[2] = t; + p += 4; + } + } + } +} + +#define STBI__PNG_TYPE(a,b,c,d) (((unsigned) (a) << 24) + ((unsigned) (b) << 16) + ((unsigned) (c) << 8) + (unsigned) (d)) + +static int stbi__parse_png_file(stbi__png *z, int scan, int req_comp) +{ + stbi_uc palette[1024], pal_img_n=0; + stbi_uc has_trans=0, tc[3]; + stbi__uint16 tc16[3]; + stbi__uint32 ioff=0, idata_limit=0, i, pal_len=0; + int first=1,k,interlace=0, color=0, is_iphone=0; + stbi__context *s = z->s; + + z->expanded = NULL; + z->idata = NULL; + z->out = NULL; + + if (!stbi__check_png_header(s)) return 0; + + if (scan == STBI__SCAN_type) return 1; + + for (;;) { + stbi__pngchunk c = stbi__get_chunk_header(s); + switch (c.type) { + case STBI__PNG_TYPE('C','g','B','I'): + is_iphone = 1; + stbi__skip(s, c.length); + break; + case STBI__PNG_TYPE('I','H','D','R'): { + int comp,filter; + if (!first) return stbi__err("multiple IHDR","Corrupt PNG"); + first = 0; + if (c.length != 13) return stbi__err("bad IHDR len","Corrupt PNG"); + s->img_x = stbi__get32be(s); if (s->img_x > (1 << 24)) return stbi__err("too large","Very large image (corrupt?)"); + s->img_y = stbi__get32be(s); if (s->img_y > (1 << 24)) return stbi__err("too large","Very large image (corrupt?)"); + z->depth = stbi__get8(s); if (z->depth != 1 && z->depth != 2 && z->depth != 4 && z->depth != 8 && z->depth != 16) return stbi__err("1/2/4/8/16-bit only","PNG not supported: 1/2/4/8/16-bit only"); + color = stbi__get8(s); if (color > 6) return stbi__err("bad ctype","Corrupt PNG"); + if (color == 3 && z->depth == 16) return stbi__err("bad ctype","Corrupt PNG"); + if (color == 3) pal_img_n = 3; else if (color & 1) return stbi__err("bad ctype","Corrupt PNG"); + comp = stbi__get8(s); if (comp) return stbi__err("bad comp method","Corrupt PNG"); + filter= stbi__get8(s); if (filter) return stbi__err("bad filter method","Corrupt PNG"); + interlace = stbi__get8(s); if (interlace>1) return stbi__err("bad interlace method","Corrupt PNG"); + if (!s->img_x || !s->img_y) return stbi__err("0-pixel image","Corrupt PNG"); + if (!pal_img_n) { + s->img_n = (color & 2 ? 3 : 1) + (color & 4 ? 1 : 0); + if ((1 << 30) / s->img_x / s->img_n < s->img_y) return stbi__err("too large", "Image too large to decode"); + if (scan == STBI__SCAN_header) return 1; + } else { + // if paletted, then pal_n is our final components, and + // img_n is # components to decompress/filter. + s->img_n = 1; + if ((1 << 30) / s->img_x / 4 < s->img_y) return stbi__err("too large","Corrupt PNG"); + // if SCAN_header, have to scan to see if we have a tRNS + } + break; + } + + case STBI__PNG_TYPE('P','L','T','E'): { + if (first) return stbi__err("first not IHDR", "Corrupt PNG"); + if (c.length > 256*3) return stbi__err("invalid PLTE","Corrupt PNG"); + pal_len = c.length / 3; + if (pal_len * 3 != c.length) return stbi__err("invalid PLTE","Corrupt PNG"); + for (i=0; i < pal_len; ++i) { + palette[i*4+0] = stbi__get8(s); + palette[i*4+1] = stbi__get8(s); + palette[i*4+2] = stbi__get8(s); + palette[i*4+3] = 255; + } + break; + } + + case STBI__PNG_TYPE('t','R','N','S'): { + if (first) return stbi__err("first not IHDR", "Corrupt PNG"); + if (z->idata) return stbi__err("tRNS after IDAT","Corrupt PNG"); + if (pal_img_n) { + if (scan == STBI__SCAN_header) { s->img_n = 4; return 1; } + if (pal_len == 0) return stbi__err("tRNS before PLTE","Corrupt PNG"); + if (c.length > pal_len) return stbi__err("bad tRNS len","Corrupt PNG"); + pal_img_n = 4; + for (i=0; i < c.length; ++i) + palette[i*4+3] = stbi__get8(s); + } else { + if (!(s->img_n & 1)) return stbi__err("tRNS with alpha","Corrupt PNG"); + if (c.length != (stbi__uint32) s->img_n*2) return stbi__err("bad tRNS len","Corrupt PNG"); + has_trans = 1; + if (z->depth == 16) { + for (k = 0; k < s->img_n; ++k) tc16[k] = (stbi__uint16)stbi__get16be(s); // copy the values as-is + } else { + for (k = 0; k < s->img_n; ++k) tc[k] = (stbi_uc)(stbi__get16be(s) & 255) * stbi__depth_scale_table[z->depth]; // non 8-bit images will be larger + } + } + break; + } + + case STBI__PNG_TYPE('I','D','A','T'): { + if (first) return stbi__err("first not IHDR", "Corrupt PNG"); + if (pal_img_n && !pal_len) return stbi__err("no PLTE","Corrupt PNG"); + if (scan == STBI__SCAN_header) { s->img_n = pal_img_n; return 1; } + if ((int)(ioff + c.length) < (int)ioff) return 0; + if (ioff + c.length > idata_limit) { + stbi__uint32 idata_limit_old = idata_limit; + stbi_uc *p; + if (idata_limit == 0) idata_limit = c.length > 4096 ? c.length : 4096; + while (ioff + c.length > idata_limit) + idata_limit *= 2; + STBI_NOTUSED(idata_limit_old); + p = (stbi_uc *) STBI_REALLOC_SIZED(z->idata, idata_limit_old, idata_limit); if (p == NULL) return stbi__err("outofmem", "Out of memory"); + z->idata = p; + } + if (!stbi__getn(s, z->idata+ioff,c.length)) return stbi__err("outofdata","Corrupt PNG"); + ioff += c.length; + break; + } + + case STBI__PNG_TYPE('I','E','N','D'): { + stbi__uint32 raw_len, bpl; + if (first) return stbi__err("first not IHDR", "Corrupt PNG"); + if (scan != STBI__SCAN_load) return 1; + if (z->idata == NULL) return stbi__err("no IDAT","Corrupt PNG"); + // initial guess for decoded data size to avoid unnecessary reallocs + bpl = (s->img_x * z->depth + 7) / 8; // bytes per line, per component + raw_len = bpl * s->img_y * s->img_n /* pixels */ + s->img_y /* filter mode per row */; + z->expanded = (stbi_uc *) stbi_zlib_decode_malloc_guesssize_headerflag((char *) z->idata, ioff, raw_len, (int *) &raw_len, !is_iphone); + if (z->expanded == NULL) return 0; // zlib should set error + STBI_FREE(z->idata); z->idata = NULL; + if ((req_comp == s->img_n+1 && req_comp != 3 && !pal_img_n) || has_trans) + s->img_out_n = s->img_n+1; + else + s->img_out_n = s->img_n; + if (!stbi__create_png_image(z, z->expanded, raw_len, s->img_out_n, z->depth, color, interlace)) return 0; + if (has_trans) { + if (z->depth == 16) { + if (!stbi__compute_transparency16(z, tc16, s->img_out_n)) return 0; + } else { + if (!stbi__compute_transparency(z, tc, s->img_out_n)) return 0; + } + } + if (is_iphone && stbi__de_iphone_flag && s->img_out_n > 2) + stbi__de_iphone(z); + if (pal_img_n) { + // pal_img_n == 3 or 4 + s->img_n = pal_img_n; // record the actual colors we had + s->img_out_n = pal_img_n; + if (req_comp >= 3) s->img_out_n = req_comp; + if (!stbi__expand_png_palette(z, palette, pal_len, s->img_out_n)) + return 0; + } else if (has_trans) { + // non-paletted image with tRNS -> source image has (constant) alpha + ++s->img_n; + } + STBI_FREE(z->expanded); z->expanded = NULL; + return 1; + } + + default: + // if critical, fail + if (first) return stbi__err("first not IHDR", "Corrupt PNG"); + if ((c.type & (1 << 29)) == 0) { + #ifndef STBI_NO_FAILURE_STRINGS + // not threadsafe + static char invalid_chunk[] = "XXXX PNG chunk not known"; + invalid_chunk[0] = STBI__BYTECAST(c.type >> 24); + invalid_chunk[1] = STBI__BYTECAST(c.type >> 16); + invalid_chunk[2] = STBI__BYTECAST(c.type >> 8); + invalid_chunk[3] = STBI__BYTECAST(c.type >> 0); + #endif + return stbi__err(invalid_chunk, "PNG not supported: unknown PNG chunk type"); + } + stbi__skip(s, c.length); + break; + } + // end of PNG chunk, read and skip CRC + stbi__get32be(s); + } +} + +static void *stbi__do_png(stbi__png *p, int *x, int *y, int *n, int req_comp, stbi__result_info *ri) +{ + void *result=NULL; + if (req_comp < 0 || req_comp > 4) return stbi__errpuc("bad req_comp", "Internal error"); + if (stbi__parse_png_file(p, STBI__SCAN_load, req_comp)) { + if (p->depth < 8) + ri->bits_per_channel = 8; + else + ri->bits_per_channel = p->depth; + result = p->out; + p->out = NULL; + if (req_comp && req_comp != p->s->img_out_n) { + if (ri->bits_per_channel == 8) + result = stbi__convert_format((unsigned char *) result, p->s->img_out_n, req_comp, p->s->img_x, p->s->img_y); + else + result = stbi__convert_format16((stbi__uint16 *) result, p->s->img_out_n, req_comp, p->s->img_x, p->s->img_y); + p->s->img_out_n = req_comp; + if (result == NULL) return result; + } + *x = p->s->img_x; + *y = p->s->img_y; + if (n) *n = p->s->img_n; + } + STBI_FREE(p->out); p->out = NULL; + STBI_FREE(p->expanded); p->expanded = NULL; + STBI_FREE(p->idata); p->idata = NULL; + + return result; +} + +static void *stbi__png_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri) +{ + stbi__png p; + p.s = s; + return stbi__do_png(&p, x,y,comp,req_comp, ri); +} + +static int stbi__png_test(stbi__context *s) +{ + int r; + r = stbi__check_png_header(s); + stbi__rewind(s); + return r; +} + +static int stbi__png_info_raw(stbi__png *p, int *x, int *y, int *comp) +{ + if (!stbi__parse_png_file(p, STBI__SCAN_header, 0)) { + stbi__rewind( p->s ); + return 0; + } + if (x) *x = p->s->img_x; + if (y) *y = p->s->img_y; + if (comp) *comp = p->s->img_n; + return 1; +} + +static int stbi__png_info(stbi__context *s, int *x, int *y, int *comp) +{ + stbi__png p; + p.s = s; + return stbi__png_info_raw(&p, x, y, comp); +} + +static int stbi__png_is16(stbi__context *s) +{ + stbi__png p; + p.s = s; + if (!stbi__png_info_raw(&p, NULL, NULL, NULL)) + return 0; + if (p.depth != 16) { + stbi__rewind(p.s); + return 0; + } + return 1; +} +#endif + +// Microsoft/Windows BMP image + +#ifndef STBI_NO_BMP +static int stbi__bmp_test_raw(stbi__context *s) +{ + int r; + int sz; + if (stbi__get8(s) != 'B') return 0; + if (stbi__get8(s) != 'M') return 0; + stbi__get32le(s); // discard filesize + stbi__get16le(s); // discard reserved + stbi__get16le(s); // discard reserved + stbi__get32le(s); // discard data offset + sz = stbi__get32le(s); + r = (sz == 12 || sz == 40 || sz == 56 || sz == 108 || sz == 124); + return r; +} + +static int stbi__bmp_test(stbi__context *s) +{ + int r = stbi__bmp_test_raw(s); + stbi__rewind(s); + return r; +} + + +// returns 0..31 for the highest set bit +static int stbi__high_bit(unsigned int z) +{ + int n=0; + if (z == 0) return -1; + if (z >= 0x10000) n += 16, z >>= 16; + if (z >= 0x00100) n += 8, z >>= 8; + if (z >= 0x00010) n += 4, z >>= 4; + if (z >= 0x00004) n += 2, z >>= 2; + if (z >= 0x00002) n += 1, z >>= 1; + return n; +} + +static int stbi__bitcount(unsigned int a) +{ + a = (a & 0x55555555) + ((a >> 1) & 0x55555555); // max 2 + a = (a & 0x33333333) + ((a >> 2) & 0x33333333); // max 4 + a = (a + (a >> 4)) & 0x0f0f0f0f; // max 8 per 4, now 8 bits + a = (a + (a >> 8)); // max 16 per 8 bits + a = (a + (a >> 16)); // max 32 per 8 bits + return a & 0xff; +} + +// extract an arbitrarily-aligned N-bit value (N=bits) +// from v, and then make it 8-bits long and fractionally +// extend it to full full range. +static int stbi__shiftsigned(int v, int shift, int bits) +{ + static unsigned int mul_table[9] = { + 0, + 0xff/*0b11111111*/, 0x55/*0b01010101*/, 0x49/*0b01001001*/, 0x11/*0b00010001*/, + 0x21/*0b00100001*/, 0x41/*0b01000001*/, 0x81/*0b10000001*/, 0x01/*0b00000001*/, + }; + static unsigned int shift_table[9] = { + 0, 0,0,1,0,2,4,6,0, + }; + if (shift < 0) + v <<= -shift; + else + v >>= shift; + STBI_ASSERT(v >= 0 && v < 256); + v >>= (8-bits); + STBI_ASSERT(bits >= 0 && bits <= 8); + return (int) ((unsigned) v * mul_table[bits]) >> shift_table[bits]; +} + +typedef struct +{ + int bpp, offset, hsz; + unsigned int mr,mg,mb,ma, all_a; +} stbi__bmp_data; + +static void *stbi__bmp_parse_header(stbi__context *s, stbi__bmp_data *info) +{ + int hsz; + if (stbi__get8(s) != 'B' || stbi__get8(s) != 'M') return stbi__errpuc("not BMP", "Corrupt BMP"); + stbi__get32le(s); // discard filesize + stbi__get16le(s); // discard reserved + stbi__get16le(s); // discard reserved + info->offset = stbi__get32le(s); + info->hsz = hsz = stbi__get32le(s); + info->mr = info->mg = info->mb = info->ma = 0; + + if (hsz != 12 && hsz != 40 && hsz != 56 && hsz != 108 && hsz != 124) return stbi__errpuc("unknown BMP", "BMP type not supported: unknown"); + if (hsz == 12) { + s->img_x = stbi__get16le(s); + s->img_y = stbi__get16le(s); + } else { + s->img_x = stbi__get32le(s); + s->img_y = stbi__get32le(s); + } + if (stbi__get16le(s) != 1) return stbi__errpuc("bad BMP", "bad BMP"); + info->bpp = stbi__get16le(s); + if (hsz != 12) { + int compress = stbi__get32le(s); + if (compress == 1 || compress == 2) return stbi__errpuc("BMP RLE", "BMP type not supported: RLE"); + stbi__get32le(s); // discard sizeof + stbi__get32le(s); // discard hres + stbi__get32le(s); // discard vres + stbi__get32le(s); // discard colorsused + stbi__get32le(s); // discard max important + if (hsz == 40 || hsz == 56) { + if (hsz == 56) { + stbi__get32le(s); + stbi__get32le(s); + stbi__get32le(s); + stbi__get32le(s); + } + if (info->bpp == 16 || info->bpp == 32) { + if (compress == 0) { + if (info->bpp == 32) { + info->mr = 0xffu << 16; + info->mg = 0xffu << 8; + info->mb = 0xffu << 0; + info->ma = 0xffu << 24; + info->all_a = 0; // if all_a is 0 at end, then we loaded alpha channel but it was all 0 + } else { + info->mr = 31u << 10; + info->mg = 31u << 5; + info->mb = 31u << 0; + } + } else if (compress == 3) { + info->mr = stbi__get32le(s); + info->mg = stbi__get32le(s); + info->mb = stbi__get32le(s); + // not documented, but generated by photoshop and handled by mspaint + if (info->mr == info->mg && info->mg == info->mb) { + // ?!?!? + return stbi__errpuc("bad BMP", "bad BMP"); + } + } else + return stbi__errpuc("bad BMP", "bad BMP"); + } + } else { + int i; + if (hsz != 108 && hsz != 124) + return stbi__errpuc("bad BMP", "bad BMP"); + info->mr = stbi__get32le(s); + info->mg = stbi__get32le(s); + info->mb = stbi__get32le(s); + info->ma = stbi__get32le(s); + stbi__get32le(s); // discard color space + for (i=0; i < 12; ++i) + stbi__get32le(s); // discard color space parameters + if (hsz == 124) { + stbi__get32le(s); // discard rendering intent + stbi__get32le(s); // discard offset of profile data + stbi__get32le(s); // discard size of profile data + stbi__get32le(s); // discard reserved + } + } + } + return (void *) 1; +} + + +static void *stbi__bmp_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri) +{ + stbi_uc *out; + unsigned int mr=0,mg=0,mb=0,ma=0, all_a; + stbi_uc pal[256][4]; + int psize=0,i,j,width; + int flip_vertically, pad, target; + stbi__bmp_data info; + STBI_NOTUSED(ri); + + info.all_a = 255; + if (stbi__bmp_parse_header(s, &info) == NULL) + return NULL; // error code already set + + flip_vertically = ((int) s->img_y) > 0; + s->img_y = abs((int) s->img_y); + + mr = info.mr; + mg = info.mg; + mb = info.mb; + ma = info.ma; + all_a = info.all_a; + + if (info.hsz == 12) { + if (info.bpp < 24) + psize = (info.offset - 14 - 24) / 3; + } else { + if (info.bpp < 16) + psize = (info.offset - 14 - info.hsz) >> 2; + } + + s->img_n = ma ? 4 : 3; + if (req_comp && req_comp >= 3) // we can directly decode 3 or 4 + target = req_comp; + else + target = s->img_n; // if they want monochrome, we'll post-convert + + // sanity-check size + if (!stbi__mad3sizes_valid(target, s->img_x, s->img_y, 0)) + return stbi__errpuc("too large", "Corrupt BMP"); + + out = (stbi_uc *) stbi__malloc_mad3(target, s->img_x, s->img_y, 0); + if (!out) return stbi__errpuc("outofmem", "Out of memory"); + if (info.bpp < 16) { + int z=0; + if (psize == 0 || psize > 256) { STBI_FREE(out); return stbi__errpuc("invalid", "Corrupt BMP"); } + for (i=0; i < psize; ++i) { + pal[i][2] = stbi__get8(s); + pal[i][1] = stbi__get8(s); + pal[i][0] = stbi__get8(s); + if (info.hsz != 12) stbi__get8(s); + pal[i][3] = 255; + } + stbi__skip(s, info.offset - 14 - info.hsz - psize * (info.hsz == 12 ? 3 : 4)); + if (info.bpp == 1) width = (s->img_x + 7) >> 3; + else if (info.bpp == 4) width = (s->img_x + 1) >> 1; + else if (info.bpp == 8) width = s->img_x; + else { STBI_FREE(out); return stbi__errpuc("bad bpp", "Corrupt BMP"); } + pad = (-width)&3; + if (info.bpp == 1) { + for (j=0; j < (int) s->img_y; ++j) { + int bit_offset = 7, v = stbi__get8(s); + for (i=0; i < (int) s->img_x; ++i) { + int color = (v>>bit_offset)&0x1; + out[z++] = pal[color][0]; + out[z++] = pal[color][1]; + out[z++] = pal[color][2]; + if((--bit_offset) < 0) { + bit_offset = 7; + v = stbi__get8(s); + } + } + stbi__skip(s, pad); + } + } else { + for (j=0; j < (int) s->img_y; ++j) { + for (i=0; i < (int) s->img_x; i += 2) { + int v=stbi__get8(s),v2=0; + if (info.bpp == 4) { + v2 = v & 15; + v >>= 4; + } + out[z++] = pal[v][0]; + out[z++] = pal[v][1]; + out[z++] = pal[v][2]; + if (target == 4) out[z++] = 255; + if (i+1 == (int) s->img_x) break; + v = (info.bpp == 8) ? stbi__get8(s) : v2; + out[z++] = pal[v][0]; + out[z++] = pal[v][1]; + out[z++] = pal[v][2]; + if (target == 4) out[z++] = 255; + } + stbi__skip(s, pad); + } + } + } else { + int rshift=0,gshift=0,bshift=0,ashift=0,rcount=0,gcount=0,bcount=0,acount=0; + int z = 0; + int easy=0; + stbi__skip(s, info.offset - 14 - info.hsz); + if (info.bpp == 24) width = 3 * s->img_x; + else if (info.bpp == 16) width = 2*s->img_x; + else /* bpp = 32 and pad = 0 */ width=0; + pad = (-width) & 3; + if (info.bpp == 24) { + easy = 1; + } else if (info.bpp == 32) { + if (mb == 0xff && mg == 0xff00 && mr == 0x00ff0000 && ma == 0xff000000) + easy = 2; + } + if (!easy) { + if (!mr || !mg || !mb) { STBI_FREE(out); return stbi__errpuc("bad masks", "Corrupt BMP"); } + // right shift amt to put high bit in position #7 + rshift = stbi__high_bit(mr)-7; rcount = stbi__bitcount(mr); + gshift = stbi__high_bit(mg)-7; gcount = stbi__bitcount(mg); + bshift = stbi__high_bit(mb)-7; bcount = stbi__bitcount(mb); + ashift = stbi__high_bit(ma)-7; acount = stbi__bitcount(ma); + } + for (j=0; j < (int) s->img_y; ++j) { + if (easy) { + for (i=0; i < (int) s->img_x; ++i) { + unsigned char a; + out[z+2] = stbi__get8(s); + out[z+1] = stbi__get8(s); + out[z+0] = stbi__get8(s); + z += 3; + a = (easy == 2 ? stbi__get8(s) : 255); + all_a |= a; + if (target == 4) out[z++] = a; + } + } else { + int bpp = info.bpp; + for (i=0; i < (int) s->img_x; ++i) { + stbi__uint32 v = (bpp == 16 ? (stbi__uint32) stbi__get16le(s) : stbi__get32le(s)); + unsigned int a; + out[z++] = STBI__BYTECAST(stbi__shiftsigned(v & mr, rshift, rcount)); + out[z++] = STBI__BYTECAST(stbi__shiftsigned(v & mg, gshift, gcount)); + out[z++] = STBI__BYTECAST(stbi__shiftsigned(v & mb, bshift, bcount)); + a = (ma ? stbi__shiftsigned(v & ma, ashift, acount) : 255); + all_a |= a; + if (target == 4) out[z++] = STBI__BYTECAST(a); + } + } + stbi__skip(s, pad); + } + } + + // if alpha channel is all 0s, replace with all 255s + if (target == 4 && all_a == 0) + for (i=4*s->img_x*s->img_y-1; i >= 0; i -= 4) + out[i] = 255; + + if (flip_vertically) { + stbi_uc t; + for (j=0; j < (int) s->img_y>>1; ++j) { + stbi_uc *p1 = out + j *s->img_x*target; + stbi_uc *p2 = out + (s->img_y-1-j)*s->img_x*target; + for (i=0; i < (int) s->img_x*target; ++i) { + t = p1[i], p1[i] = p2[i], p2[i] = t; + } + } + } + + if (req_comp && req_comp != target) { + out = stbi__convert_format(out, target, req_comp, s->img_x, s->img_y); + if (out == NULL) return out; // stbi__convert_format frees input on failure + } + + *x = s->img_x; + *y = s->img_y; + if (comp) *comp = s->img_n; + return out; +} +#endif + +// Targa Truevision - TGA +// by Jonathan Dummer +#ifndef STBI_NO_TGA +// returns STBI_rgb or whatever, 0 on error +static int stbi__tga_get_comp(int bits_per_pixel, int is_grey, int* is_rgb16) +{ + // only RGB or RGBA (incl. 16bit) or grey allowed + if (is_rgb16) *is_rgb16 = 0; + switch(bits_per_pixel) { + case 8: return STBI_grey; + case 16: if(is_grey) return STBI_grey_alpha; + // fallthrough + case 15: if(is_rgb16) *is_rgb16 = 1; + return STBI_rgb; + case 24: // fallthrough + case 32: return bits_per_pixel/8; + default: return 0; + } +} + +static int stbi__tga_info(stbi__context *s, int *x, int *y, int *comp) +{ + int tga_w, tga_h, tga_comp, tga_image_type, tga_bits_per_pixel, tga_colormap_bpp; + int sz, tga_colormap_type; + stbi__get8(s); // discard Offset + tga_colormap_type = stbi__get8(s); // colormap type + if( tga_colormap_type > 1 ) { + stbi__rewind(s); + return 0; // only RGB or indexed allowed + } + tga_image_type = stbi__get8(s); // image type + if ( tga_colormap_type == 1 ) { // colormapped (paletted) image + if (tga_image_type != 1 && tga_image_type != 9) { + stbi__rewind(s); + return 0; + } + stbi__skip(s,4); // skip index of first colormap entry and number of entries + sz = stbi__get8(s); // check bits per palette color entry + if ( (sz != 8) && (sz != 15) && (sz != 16) && (sz != 24) && (sz != 32) ) { + stbi__rewind(s); + return 0; + } + stbi__skip(s,4); // skip image x and y origin + tga_colormap_bpp = sz; + } else { // "normal" image w/o colormap - only RGB or grey allowed, +/- RLE + if ( (tga_image_type != 2) && (tga_image_type != 3) && (tga_image_type != 10) && (tga_image_type != 11) ) { + stbi__rewind(s); + return 0; // only RGB or grey allowed, +/- RLE + } + stbi__skip(s,9); // skip colormap specification and image x/y origin + tga_colormap_bpp = 0; + } + tga_w = stbi__get16le(s); + if( tga_w < 1 ) { + stbi__rewind(s); + return 0; // test width + } + tga_h = stbi__get16le(s); + if( tga_h < 1 ) { + stbi__rewind(s); + return 0; // test height + } + tga_bits_per_pixel = stbi__get8(s); // bits per pixel + stbi__get8(s); // ignore alpha bits + if (tga_colormap_bpp != 0) { + if((tga_bits_per_pixel != 8) && (tga_bits_per_pixel != 16)) { + // when using a colormap, tga_bits_per_pixel is the size of the indexes + // I don't think anything but 8 or 16bit indexes makes sense + stbi__rewind(s); + return 0; + } + tga_comp = stbi__tga_get_comp(tga_colormap_bpp, 0, NULL); + } else { + tga_comp = stbi__tga_get_comp(tga_bits_per_pixel, (tga_image_type == 3) || (tga_image_type == 11), NULL); + } + if(!tga_comp) { + stbi__rewind(s); + return 0; + } + if (x) *x = tga_w; + if (y) *y = tga_h; + if (comp) *comp = tga_comp; + return 1; // seems to have passed everything +} + +static int stbi__tga_test(stbi__context *s) +{ + int res = 0; + int sz, tga_color_type; + stbi__get8(s); // discard Offset + tga_color_type = stbi__get8(s); // color type + if ( tga_color_type > 1 ) goto errorEnd; // only RGB or indexed allowed + sz = stbi__get8(s); // image type + if ( tga_color_type == 1 ) { // colormapped (paletted) image + if (sz != 1 && sz != 9) goto errorEnd; // colortype 1 demands image type 1 or 9 + stbi__skip(s,4); // skip index of first colormap entry and number of entries + sz = stbi__get8(s); // check bits per palette color entry + if ( (sz != 8) && (sz != 15) && (sz != 16) && (sz != 24) && (sz != 32) ) goto errorEnd; + stbi__skip(s,4); // skip image x and y origin + } else { // "normal" image w/o colormap + if ( (sz != 2) && (sz != 3) && (sz != 10) && (sz != 11) ) goto errorEnd; // only RGB or grey allowed, +/- RLE + stbi__skip(s,9); // skip colormap specification and image x/y origin + } + if ( stbi__get16le(s) < 1 ) goto errorEnd; // test width + if ( stbi__get16le(s) < 1 ) goto errorEnd; // test height + sz = stbi__get8(s); // bits per pixel + if ( (tga_color_type == 1) && (sz != 8) && (sz != 16) ) goto errorEnd; // for colormapped images, bpp is size of an index + if ( (sz != 8) && (sz != 15) && (sz != 16) && (sz != 24) && (sz != 32) ) goto errorEnd; + + res = 1; // if we got this far, everything's good and we can return 1 instead of 0 + +errorEnd: + stbi__rewind(s); + return res; +} + +// read 16bit value and convert to 24bit RGB +static void stbi__tga_read_rgb16(stbi__context *s, stbi_uc* out) +{ + stbi__uint16 px = (stbi__uint16)stbi__get16le(s); + stbi__uint16 fiveBitMask = 31; + // we have 3 channels with 5bits each + int r = (px >> 10) & fiveBitMask; + int g = (px >> 5) & fiveBitMask; + int b = px & fiveBitMask; + // Note that this saves the data in RGB(A) order, so it doesn't need to be swapped later + out[0] = (stbi_uc)((r * 255)/31); + out[1] = (stbi_uc)((g * 255)/31); + out[2] = (stbi_uc)((b * 255)/31); + + // some people claim that the most significant bit might be used for alpha + // (possibly if an alpha-bit is set in the "image descriptor byte") + // but that only made 16bit test images completely translucent.. + // so let's treat all 15 and 16bit TGAs as RGB with no alpha. +} + +static void *stbi__tga_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri) +{ + // read in the TGA header stuff + int tga_offset = stbi__get8(s); + int tga_indexed = stbi__get8(s); + int tga_image_type = stbi__get8(s); + int tga_is_RLE = 0; + int tga_palette_start = stbi__get16le(s); + int tga_palette_len = stbi__get16le(s); + int tga_palette_bits = stbi__get8(s); + int tga_x_origin = stbi__get16le(s); + int tga_y_origin = stbi__get16le(s); + int tga_width = stbi__get16le(s); + int tga_height = stbi__get16le(s); + int tga_bits_per_pixel = stbi__get8(s); + int tga_comp, tga_rgb16=0; + int tga_inverted = stbi__get8(s); + // int tga_alpha_bits = tga_inverted & 15; // the 4 lowest bits - unused (useless?) + // image data + unsigned char *tga_data; + unsigned char *tga_palette = NULL; + int i, j; + unsigned char raw_data[4] = {0}; + int RLE_count = 0; + int RLE_repeating = 0; + int read_next_pixel = 1; + STBI_NOTUSED(ri); + + // do a tiny bit of precessing + if ( tga_image_type >= 8 ) + { + tga_image_type -= 8; + tga_is_RLE = 1; + } + tga_inverted = 1 - ((tga_inverted >> 5) & 1); + + // If I'm paletted, then I'll use the number of bits from the palette + if ( tga_indexed ) tga_comp = stbi__tga_get_comp(tga_palette_bits, 0, &tga_rgb16); + else tga_comp = stbi__tga_get_comp(tga_bits_per_pixel, (tga_image_type == 3), &tga_rgb16); + + if(!tga_comp) // shouldn't really happen, stbi__tga_test() should have ensured basic consistency + return stbi__errpuc("bad format", "Can't find out TGA pixelformat"); + + // tga info + *x = tga_width; + *y = tga_height; + if (comp) *comp = tga_comp; + + if (!stbi__mad3sizes_valid(tga_width, tga_height, tga_comp, 0)) + return stbi__errpuc("too large", "Corrupt TGA"); + + tga_data = (unsigned char*)stbi__malloc_mad3(tga_width, tga_height, tga_comp, 0); + if (!tga_data) return stbi__errpuc("outofmem", "Out of memory"); + + // skip to the data's starting position (offset usually = 0) + stbi__skip(s, tga_offset ); + + if ( !tga_indexed && !tga_is_RLE && !tga_rgb16 ) { + for (i=0; i < tga_height; ++i) { + int row = tga_inverted ? tga_height -i - 1 : i; + stbi_uc *tga_row = tga_data + row*tga_width*tga_comp; + stbi__getn(s, tga_row, tga_width * tga_comp); + } + } else { + // do I need to load a palette? + if ( tga_indexed) + { + // any data to skip? (offset usually = 0) + stbi__skip(s, tga_palette_start ); + // load the palette + tga_palette = (unsigned char*)stbi__malloc_mad2(tga_palette_len, tga_comp, 0); + if (!tga_palette) { + STBI_FREE(tga_data); + return stbi__errpuc("outofmem", "Out of memory"); + } + if (tga_rgb16) { + stbi_uc *pal_entry = tga_palette; + STBI_ASSERT(tga_comp == STBI_rgb); + for (i=0; i < tga_palette_len; ++i) { + stbi__tga_read_rgb16(s, pal_entry); + pal_entry += tga_comp; + } + } else if (!stbi__getn(s, tga_palette, tga_palette_len * tga_comp)) { + STBI_FREE(tga_data); + STBI_FREE(tga_palette); + return stbi__errpuc("bad palette", "Corrupt TGA"); + } + } + // load the data + for (i=0; i < tga_width * tga_height; ++i) + { + // if I'm in RLE mode, do I need to get a RLE stbi__pngchunk? + if ( tga_is_RLE ) + { + if ( RLE_count == 0 ) + { + // yep, get the next byte as a RLE command + int RLE_cmd = stbi__get8(s); + RLE_count = 1 + (RLE_cmd & 127); + RLE_repeating = RLE_cmd >> 7; + read_next_pixel = 1; + } else if ( !RLE_repeating ) + { + read_next_pixel = 1; + } + } else + { + read_next_pixel = 1; + } + // OK, if I need to read a pixel, do it now + if ( read_next_pixel ) + { + // load however much data we did have + if ( tga_indexed ) + { + // read in index, then perform the lookup + int pal_idx = (tga_bits_per_pixel == 8) ? stbi__get8(s) : stbi__get16le(s); + if ( pal_idx >= tga_palette_len ) { + // invalid index + pal_idx = 0; + } + pal_idx *= tga_comp; + for (j = 0; j < tga_comp; ++j) { + raw_data[j] = tga_palette[pal_idx+j]; + } + } else if(tga_rgb16) { + STBI_ASSERT(tga_comp == STBI_rgb); + stbi__tga_read_rgb16(s, raw_data); + } else { + // read in the data raw + for (j = 0; j < tga_comp; ++j) { + raw_data[j] = stbi__get8(s); + } + } + // clear the reading flag for the next pixel + read_next_pixel = 0; + } // end of reading a pixel + + // copy data + for (j = 0; j < tga_comp; ++j) + tga_data[i*tga_comp+j] = raw_data[j]; + + // in case we're in RLE mode, keep counting down + --RLE_count; + } + // do I need to invert the image? + if ( tga_inverted ) + { + for (j = 0; j*2 < tga_height; ++j) + { + int index1 = j * tga_width * tga_comp; + int index2 = (tga_height - 1 - j) * tga_width * tga_comp; + for (i = tga_width * tga_comp; i > 0; --i) + { + unsigned char temp = tga_data[index1]; + tga_data[index1] = tga_data[index2]; + tga_data[index2] = temp; + ++index1; + ++index2; + } + } + } + // clear my palette, if I had one + if ( tga_palette != NULL ) + { + STBI_FREE( tga_palette ); + } + } + + // swap RGB - if the source data was RGB16, it already is in the right order + if (tga_comp >= 3 && !tga_rgb16) + { + unsigned char* tga_pixel = tga_data; + for (i=0; i < tga_width * tga_height; ++i) + { + unsigned char temp = tga_pixel[0]; + tga_pixel[0] = tga_pixel[2]; + tga_pixel[2] = temp; + tga_pixel += tga_comp; + } + } + + // convert to target component count + if (req_comp && req_comp != tga_comp) + tga_data = stbi__convert_format(tga_data, tga_comp, req_comp, tga_width, tga_height); + + // the things I do to get rid of an error message, and yet keep + // Microsoft's C compilers happy... [8^( + tga_palette_start = tga_palette_len = tga_palette_bits = + tga_x_origin = tga_y_origin = 0; + // OK, done + return tga_data; +} +#endif + +// ************************************************************************************************* +// Photoshop PSD loader -- PD by Thatcher Ulrich, integration by Nicolas Schulz, tweaked by STB + +#ifndef STBI_NO_PSD +static int stbi__psd_test(stbi__context *s) +{ + int r = (stbi__get32be(s) == 0x38425053); + stbi__rewind(s); + return r; +} + +static int stbi__psd_decode_rle(stbi__context *s, stbi_uc *p, int pixelCount) +{ + int count, nleft, len; + + count = 0; + while ((nleft = pixelCount - count) > 0) { + len = stbi__get8(s); + if (len == 128) { + // No-op. + } else if (len < 128) { + // Copy next len+1 bytes literally. + len++; + if (len > nleft) return 0; // corrupt data + count += len; + while (len) { + *p = stbi__get8(s); + p += 4; + len--; + } + } else if (len > 128) { + stbi_uc val; + // Next -len+1 bytes in the dest are replicated from next source byte. + // (Interpret len as a negative 8-bit int.) + len = 257 - len; + if (len > nleft) return 0; // corrupt data + val = stbi__get8(s); + count += len; + while (len) { + *p = val; + p += 4; + len--; + } + } + } + + return 1; +} + +static void *stbi__psd_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri, int bpc) +{ + int pixelCount; + int channelCount, compression; + int channel, i; + int bitdepth; + int w,h; + stbi_uc *out; + STBI_NOTUSED(ri); + + // Check identifier + if (stbi__get32be(s) != 0x38425053) // "8BPS" + return stbi__errpuc("not PSD", "Corrupt PSD image"); + + // Check file type version. + if (stbi__get16be(s) != 1) + return stbi__errpuc("wrong version", "Unsupported version of PSD image"); + + // Skip 6 reserved bytes. + stbi__skip(s, 6 ); + + // Read the number of channels (R, G, B, A, etc). + channelCount = stbi__get16be(s); + if (channelCount < 0 || channelCount > 16) + return stbi__errpuc("wrong channel count", "Unsupported number of channels in PSD image"); + + // Read the rows and columns of the image. + h = stbi__get32be(s); + w = stbi__get32be(s); + + // Make sure the depth is 8 bits. + bitdepth = stbi__get16be(s); + if (bitdepth != 8 && bitdepth != 16) + return stbi__errpuc("unsupported bit depth", "PSD bit depth is not 8 or 16 bit"); + + // Make sure the color mode is RGB. + // Valid options are: + // 0: Bitmap + // 1: Grayscale + // 2: Indexed color + // 3: RGB color + // 4: CMYK color + // 7: Multichannel + // 8: Duotone + // 9: Lab color + if (stbi__get16be(s) != 3) + return stbi__errpuc("wrong color format", "PSD is not in RGB color format"); + + // Skip the Mode Data. (It's the palette for indexed color; other info for other modes.) + stbi__skip(s,stbi__get32be(s) ); + + // Skip the image resources. (resolution, pen tool paths, etc) + stbi__skip(s, stbi__get32be(s) ); + + // Skip the reserved data. + stbi__skip(s, stbi__get32be(s) ); + + // Find out if the data is compressed. + // Known values: + // 0: no compression + // 1: RLE compressed + compression = stbi__get16be(s); + if (compression > 1) + return stbi__errpuc("bad compression", "PSD has an unknown compression format"); + + // Check size + if (!stbi__mad3sizes_valid(4, w, h, 0)) + return stbi__errpuc("too large", "Corrupt PSD"); + + // Create the destination image. + + if (!compression && bitdepth == 16 && bpc == 16) { + out = (stbi_uc *) stbi__malloc_mad3(8, w, h, 0); + ri->bits_per_channel = 16; + } else + out = (stbi_uc *) stbi__malloc(4 * w*h); + + if (!out) return stbi__errpuc("outofmem", "Out of memory"); + pixelCount = w*h; + + // Initialize the data to zero. + //memset( out, 0, pixelCount * 4 ); + + // Finally, the image data. + if (compression) { + // RLE as used by .PSD and .TIFF + // Loop until you get the number of unpacked bytes you are expecting: + // Read the next source byte into n. + // If n is between 0 and 127 inclusive, copy the next n+1 bytes literally. + // Else if n is between -127 and -1 inclusive, copy the next byte -n+1 times. + // Else if n is 128, noop. + // Endloop + + // The RLE-compressed data is preceeded by a 2-byte data count for each row in the data, + // which we're going to just skip. + stbi__skip(s, h * channelCount * 2 ); + + // Read the RLE data by channel. + for (channel = 0; channel < 4; channel++) { + stbi_uc *p; + + p = out+channel; + if (channel >= channelCount) { + // Fill this channel with default data. + for (i = 0; i < pixelCount; i++, p += 4) + *p = (channel == 3 ? 255 : 0); + } else { + // Read the RLE data. + if (!stbi__psd_decode_rle(s, p, pixelCount)) { + STBI_FREE(out); + return stbi__errpuc("corrupt", "bad RLE data"); + } + } + } + + } else { + // We're at the raw image data. It's each channel in order (Red, Green, Blue, Alpha, ...) + // where each channel consists of an 8-bit (or 16-bit) value for each pixel in the image. + + // Read the data by channel. + for (channel = 0; channel < 4; channel++) { + if (channel >= channelCount) { + // Fill this channel with default data. + if (bitdepth == 16 && bpc == 16) { + stbi__uint16 *q = ((stbi__uint16 *) out) + channel; + stbi__uint16 val = channel == 3 ? 65535 : 0; + for (i = 0; i < pixelCount; i++, q += 4) + *q = val; + } else { + stbi_uc *p = out+channel; + stbi_uc val = channel == 3 ? 255 : 0; + for (i = 0; i < pixelCount; i++, p += 4) + *p = val; + } + } else { + if (ri->bits_per_channel == 16) { // output bpc + stbi__uint16 *q = ((stbi__uint16 *) out) + channel; + for (i = 0; i < pixelCount; i++, q += 4) + *q = (stbi__uint16) stbi__get16be(s); + } else { + stbi_uc *p = out+channel; + if (bitdepth == 16) { // input bpc + for (i = 0; i < pixelCount; i++, p += 4) + *p = (stbi_uc) (stbi__get16be(s) >> 8); + } else { + for (i = 0; i < pixelCount; i++, p += 4) + *p = stbi__get8(s); + } + } + } + } + } + + // remove weird white matte from PSD + if (channelCount >= 4) { + if (ri->bits_per_channel == 16) { + for (i=0; i < w*h; ++i) { + stbi__uint16 *pixel = (stbi__uint16 *) out + 4*i; + if (pixel[3] != 0 && pixel[3] != 65535) { + float a = pixel[3] / 65535.0f; + float ra = 1.0f / a; + float inv_a = 65535.0f * (1 - ra); + pixel[0] = (stbi__uint16) (pixel[0]*ra + inv_a); + pixel[1] = (stbi__uint16) (pixel[1]*ra + inv_a); + pixel[2] = (stbi__uint16) (pixel[2]*ra + inv_a); + } + } + } else { + for (i=0; i < w*h; ++i) { + unsigned char *pixel = out + 4*i; + if (pixel[3] != 0 && pixel[3] != 255) { + float a = pixel[3] / 255.0f; + float ra = 1.0f / a; + float inv_a = 255.0f * (1 - ra); + pixel[0] = (unsigned char) (pixel[0]*ra + inv_a); + pixel[1] = (unsigned char) (pixel[1]*ra + inv_a); + pixel[2] = (unsigned char) (pixel[2]*ra + inv_a); + } + } + } + } + + // convert to desired output format + if (req_comp && req_comp != 4) { + if (ri->bits_per_channel == 16) + out = (stbi_uc *) stbi__convert_format16((stbi__uint16 *) out, 4, req_comp, w, h); + else + out = stbi__convert_format(out, 4, req_comp, w, h); + if (out == NULL) return out; // stbi__convert_format frees input on failure + } + + if (comp) *comp = 4; + *y = h; + *x = w; + + return out; +} +#endif + +// ************************************************************************************************* +// Softimage PIC loader +// by Tom Seddon +// +// See http://softimage.wiki.softimage.com/index.php/INFO:_PIC_file_format +// See http://ozviz.wasp.uwa.edu.au/~pbourke/dataformats/softimagepic/ + +#ifndef STBI_NO_PIC +static int stbi__pic_is4(stbi__context *s,const char *str) +{ + int i; + for (i=0; i<4; ++i) + if (stbi__get8(s) != (stbi_uc)str[i]) + return 0; + + return 1; +} + +static int stbi__pic_test_core(stbi__context *s) +{ + int i; + + if (!stbi__pic_is4(s,"\x53\x80\xF6\x34")) + return 0; + + for(i=0;i<84;++i) + stbi__get8(s); + + if (!stbi__pic_is4(s,"PICT")) + return 0; + + return 1; +} + +typedef struct +{ + stbi_uc size,type,channel; +} stbi__pic_packet; + +static stbi_uc *stbi__readval(stbi__context *s, int channel, stbi_uc *dest) +{ + int mask=0x80, i; + + for (i=0; i<4; ++i, mask>>=1) { + if (channel & mask) { + if (stbi__at_eof(s)) return stbi__errpuc("bad file","PIC file too short"); + dest[i]=stbi__get8(s); + } + } + + return dest; +} + +static void stbi__copyval(int channel,stbi_uc *dest,const stbi_uc *src) +{ + int mask=0x80,i; + + for (i=0;i<4; ++i, mask>>=1) + if (channel&mask) + dest[i]=src[i]; +} + +static stbi_uc *stbi__pic_load_core(stbi__context *s,int width,int height,int *comp, stbi_uc *result) +{ + int act_comp=0,num_packets=0,y,chained; + stbi__pic_packet packets[10]; + + // this will (should...) cater for even some bizarre stuff like having data + // for the same channel in multiple packets. + do { + stbi__pic_packet *packet; + + if (num_packets==sizeof(packets)/sizeof(packets[0])) + return stbi__errpuc("bad format","too many packets"); + + packet = &packets[num_packets++]; + + chained = stbi__get8(s); + packet->size = stbi__get8(s); + packet->type = stbi__get8(s); + packet->channel = stbi__get8(s); + + act_comp |= packet->channel; + + if (stbi__at_eof(s)) return stbi__errpuc("bad file","file too short (reading packets)"); + if (packet->size != 8) return stbi__errpuc("bad format","packet isn't 8bpp"); + } while (chained); + + *comp = (act_comp & 0x10 ? 4 : 3); // has alpha channel? + + for(y=0; ytype) { + default: + return stbi__errpuc("bad format","packet has bad compression type"); + + case 0: {//uncompressed + int x; + + for(x=0;xchannel,dest)) + return 0; + break; + } + + case 1://Pure RLE + { + int left=width, i; + + while (left>0) { + stbi_uc count,value[4]; + + count=stbi__get8(s); + if (stbi__at_eof(s)) return stbi__errpuc("bad file","file too short (pure read count)"); + + if (count > left) + count = (stbi_uc) left; + + if (!stbi__readval(s,packet->channel,value)) return 0; + + for(i=0; ichannel,dest,value); + left -= count; + } + } + break; + + case 2: {//Mixed RLE + int left=width; + while (left>0) { + int count = stbi__get8(s), i; + if (stbi__at_eof(s)) return stbi__errpuc("bad file","file too short (mixed read count)"); + + if (count >= 128) { // Repeated + stbi_uc value[4]; + + if (count==128) + count = stbi__get16be(s); + else + count -= 127; + if (count > left) + return stbi__errpuc("bad file","scanline overrun"); + + if (!stbi__readval(s,packet->channel,value)) + return 0; + + for(i=0;ichannel,dest,value); + } else { // Raw + ++count; + if (count>left) return stbi__errpuc("bad file","scanline overrun"); + + for(i=0;ichannel,dest)) + return 0; + } + left-=count; + } + break; + } + } + } + } + + return result; +} + +static void *stbi__pic_load(stbi__context *s,int *px,int *py,int *comp,int req_comp, stbi__result_info *ri) +{ + stbi_uc *result; + int i, x,y, internal_comp; + STBI_NOTUSED(ri); + + if (!comp) comp = &internal_comp; + + for (i=0; i<92; ++i) + stbi__get8(s); + + x = stbi__get16be(s); + y = stbi__get16be(s); + if (stbi__at_eof(s)) return stbi__errpuc("bad file","file too short (pic header)"); + if (!stbi__mad3sizes_valid(x, y, 4, 0)) return stbi__errpuc("too large", "PIC image too large to decode"); + + stbi__get32be(s); //skip `ratio' + stbi__get16be(s); //skip `fields' + stbi__get16be(s); //skip `pad' + + // intermediate buffer is RGBA + result = (stbi_uc *) stbi__malloc_mad3(x, y, 4, 0); + memset(result, 0xff, x*y*4); + + if (!stbi__pic_load_core(s,x,y,comp, result)) { + STBI_FREE(result); + result=0; + } + *px = x; + *py = y; + if (req_comp == 0) req_comp = *comp; + result=stbi__convert_format(result,4,req_comp,x,y); + + return result; +} + +static int stbi__pic_test(stbi__context *s) +{ + int r = stbi__pic_test_core(s); + stbi__rewind(s); + return r; +} +#endif + +// ************************************************************************************************* +// GIF loader -- public domain by Jean-Marc Lienher -- simplified/shrunk by stb + +#ifndef STBI_NO_GIF +typedef struct +{ + stbi__int16 prefix; + stbi_uc first; + stbi_uc suffix; +} stbi__gif_lzw; + +typedef struct +{ + int w,h; + stbi_uc *out; // output buffer (always 4 components) + stbi_uc *background; // The current "background" as far as a gif is concerned + stbi_uc *history; + int flags, bgindex, ratio, transparent, eflags; + stbi_uc pal[256][4]; + stbi_uc lpal[256][4]; + stbi__gif_lzw codes[8192]; + stbi_uc *color_table; + int parse, step; + int lflags; + int start_x, start_y; + int max_x, max_y; + int cur_x, cur_y; + int line_size; + int delay; +} stbi__gif; + +static int stbi__gif_test_raw(stbi__context *s) +{ + int sz; + if (stbi__get8(s) != 'G' || stbi__get8(s) != 'I' || stbi__get8(s) != 'F' || stbi__get8(s) != '8') return 0; + sz = stbi__get8(s); + if (sz != '9' && sz != '7') return 0; + if (stbi__get8(s) != 'a') return 0; + return 1; +} + +static int stbi__gif_test(stbi__context *s) +{ + int r = stbi__gif_test_raw(s); + stbi__rewind(s); + return r; +} + +static void stbi__gif_parse_colortable(stbi__context *s, stbi_uc pal[256][4], int num_entries, int transp) +{ + int i; + for (i=0; i < num_entries; ++i) { + pal[i][2] = stbi__get8(s); + pal[i][1] = stbi__get8(s); + pal[i][0] = stbi__get8(s); + pal[i][3] = transp == i ? 0 : 255; + } +} + +static int stbi__gif_header(stbi__context *s, stbi__gif *g, int *comp, int is_info) +{ + stbi_uc version; + if (stbi__get8(s) != 'G' || stbi__get8(s) != 'I' || stbi__get8(s) != 'F' || stbi__get8(s) != '8') + return stbi__err("not GIF", "Corrupt GIF"); + + version = stbi__get8(s); + if (version != '7' && version != '9') return stbi__err("not GIF", "Corrupt GIF"); + if (stbi__get8(s) != 'a') return stbi__err("not GIF", "Corrupt GIF"); + + stbi__g_failure_reason = ""; + g->w = stbi__get16le(s); + g->h = stbi__get16le(s); + g->flags = stbi__get8(s); + g->bgindex = stbi__get8(s); + g->ratio = stbi__get8(s); + g->transparent = -1; + + if (comp != 0) *comp = 4; // can't actually tell whether it's 3 or 4 until we parse the comments + + if (is_info) return 1; + + if (g->flags & 0x80) + stbi__gif_parse_colortable(s,g->pal, 2 << (g->flags & 7), -1); + + return 1; +} + +static int stbi__gif_info_raw(stbi__context *s, int *x, int *y, int *comp) +{ + stbi__gif* g = (stbi__gif*) stbi__malloc(sizeof(stbi__gif)); + if (!stbi__gif_header(s, g, comp, 1)) { + STBI_FREE(g); + stbi__rewind( s ); + return 0; + } + if (x) *x = g->w; + if (y) *y = g->h; + STBI_FREE(g); + return 1; +} + +static void stbi__out_gif_code(stbi__gif *g, stbi__uint16 code) +{ + stbi_uc *p, *c; + int idx; + + // recurse to decode the prefixes, since the linked-list is backwards, + // and working backwards through an interleaved image would be nasty + if (g->codes[code].prefix >= 0) + stbi__out_gif_code(g, g->codes[code].prefix); + + if (g->cur_y >= g->max_y) return; + + idx = g->cur_x + g->cur_y; + p = &g->out[idx]; + g->history[idx / 4] = 1; + + c = &g->color_table[g->codes[code].suffix * 4]; + if (c[3] > 128) { // don't render transparent pixels; + p[0] = c[2]; + p[1] = c[1]; + p[2] = c[0]; + p[3] = c[3]; + } + g->cur_x += 4; + + if (g->cur_x >= g->max_x) { + g->cur_x = g->start_x; + g->cur_y += g->step; + + while (g->cur_y >= g->max_y && g->parse > 0) { + g->step = (1 << g->parse) * g->line_size; + g->cur_y = g->start_y + (g->step >> 1); + --g->parse; + } + } +} + +static stbi_uc *stbi__process_gif_raster(stbi__context *s, stbi__gif *g) +{ + stbi_uc lzw_cs; + stbi__int32 len, init_code; + stbi__uint32 first; + stbi__int32 codesize, codemask, avail, oldcode, bits, valid_bits, clear; + stbi__gif_lzw *p; + + lzw_cs = stbi__get8(s); + if (lzw_cs > 12) return NULL; + clear = 1 << lzw_cs; + first = 1; + codesize = lzw_cs + 1; + codemask = (1 << codesize) - 1; + bits = 0; + valid_bits = 0; + for (init_code = 0; init_code < clear; init_code++) { + g->codes[init_code].prefix = -1; + g->codes[init_code].first = (stbi_uc) init_code; + g->codes[init_code].suffix = (stbi_uc) init_code; + } + + // support no starting clear code + avail = clear+2; + oldcode = -1; + + len = 0; + for(;;) { + if (valid_bits < codesize) { + if (len == 0) { + len = stbi__get8(s); // start new block + if (len == 0) + return g->out; + } + --len; + bits |= (stbi__int32) stbi__get8(s) << valid_bits; + valid_bits += 8; + } else { + stbi__int32 code = bits & codemask; + bits >>= codesize; + valid_bits -= codesize; + // @OPTIMIZE: is there some way we can accelerate the non-clear path? + if (code == clear) { // clear code + codesize = lzw_cs + 1; + codemask = (1 << codesize) - 1; + avail = clear + 2; + oldcode = -1; + first = 0; + } else if (code == clear + 1) { // end of stream code + stbi__skip(s, len); + while ((len = stbi__get8(s)) > 0) + stbi__skip(s,len); + return g->out; + } else if (code <= avail) { + if (first) { + return stbi__errpuc("no clear code", "Corrupt GIF"); + } + + if (oldcode >= 0) { + p = &g->codes[avail++]; + if (avail > 8192) { + return stbi__errpuc("too many codes", "Corrupt GIF"); + } + + p->prefix = (stbi__int16) oldcode; + p->first = g->codes[oldcode].first; + p->suffix = (code == avail) ? p->first : g->codes[code].first; + } else if (code == avail) + return stbi__errpuc("illegal code in raster", "Corrupt GIF"); + + stbi__out_gif_code(g, (stbi__uint16) code); + + if ((avail & codemask) == 0 && avail <= 0x0FFF) { + codesize++; + codemask = (1 << codesize) - 1; + } + + oldcode = code; + } else { + return stbi__errpuc("illegal code in raster", "Corrupt GIF"); + } + } + } +} + +// this function is designed to support animated gifs, although stb_image doesn't support it +// two back is the image from two frames ago, used for a very specific disposal format +static stbi_uc *stbi__gif_load_next(stbi__context *s, stbi__gif *g, int *comp, int req_comp, stbi_uc *two_back) +{ + int dispose; + int first_frame; + int pi; + int pcount; + + // on first frame, any non-written pixels get the background colour (non-transparent) + first_frame = 0; + if (g->out == 0) { + if (!stbi__gif_header(s, g, comp,0)) return 0; // stbi__g_failure_reason set by stbi__gif_header + g->out = (stbi_uc *) stbi__malloc(4 * g->w * g->h); + g->background = (stbi_uc *) stbi__malloc(4 * g->w * g->h); + g->history = (stbi_uc *) stbi__malloc(g->w * g->h); + if (g->out == 0) return stbi__errpuc("outofmem", "Out of memory"); + + // image is treated as "tranparent" at the start - ie, nothing overwrites the current background; + // background colour is only used for pixels that are not rendered first frame, after that "background" + // color refers to teh color that was there the previous frame. + memset( g->out, 0x00, 4 * g->w * g->h ); + memset( g->background, 0x00, 4 * g->w * g->h ); // state of the background (starts transparent) + memset( g->history, 0x00, g->w * g->h ); // pixels that were affected previous frame + first_frame = 1; + } else { + // second frame - how do we dispoase of the previous one? + dispose = (g->eflags & 0x1C) >> 2; + pcount = g->w * g->h; + + if ((dispose == 3) && (two_back == 0)) { + dispose = 2; // if I don't have an image to revert back to, default to the old background + } + + if (dispose == 3) { // use previous graphic + for (pi = 0; pi < pcount; ++pi) { + if (g->history[pi]) { + memcpy( &g->out[pi * 4], &two_back[pi * 4], 4 ); + } + } + } else if (dispose == 2) { + // restore what was changed last frame to background before that frame; + for (pi = 0; pi < pcount; ++pi) { + if (g->history[pi]) { + memcpy( &g->out[pi * 4], &g->background[pi * 4], 4 ); + } + } + } else { + // This is a non-disposal case eithe way, so just + // leave the pixels as is, and they will become the new background + // 1: do not dispose + // 0: not specified. + } + + // background is what out is after the undoing of the previou frame; + memcpy( g->background, g->out, 4 * g->w * g->h ); + } + + // clear my history; + memset( g->history, 0x00, g->w * g->h ); // pixels that were affected previous frame + + for (;;) { + int tag = stbi__get8(s); + switch (tag) { + case 0x2C: /* Image Descriptor */ + { + stbi__int32 x, y, w, h; + stbi_uc *o; + + x = stbi__get16le(s); + y = stbi__get16le(s); + w = stbi__get16le(s); + h = stbi__get16le(s); + if (((x + w) > (g->w)) || ((y + h) > (g->h))) + return stbi__errpuc("bad Image Descriptor", "Corrupt GIF"); + + g->line_size = g->w * 4; + g->start_x = x * 4; + g->start_y = y * g->line_size; + g->max_x = g->start_x + w * 4; + g->max_y = g->start_y + h * g->line_size; + g->cur_x = g->start_x; + g->cur_y = g->start_y; + + g->lflags = stbi__get8(s); + + if (g->lflags & 0x40) { + g->step = 8 * g->line_size; // first interlaced spacing + g->parse = 3; + } else { + g->step = g->line_size; + g->parse = 0; + } + + if (g->lflags & 0x80) { + stbi__gif_parse_colortable(s,g->lpal, 2 << (g->lflags & 7), g->eflags & 0x01 ? g->transparent : -1); + g->color_table = (stbi_uc *) g->lpal; + } else if (g->flags & 0x80) { + g->color_table = (stbi_uc *) g->pal; + } else + return stbi__errpuc("missing color table", "Corrupt GIF"); + + o = stbi__process_gif_raster(s, g); + if (o == NULL) return NULL; + + // if this was the first frame, + pcount = g->w * g->h; + if (first_frame && (g->bgindex > 0)) { + // if first frame, any pixel not drawn to gets the background color + for (pi = 0; pi < pcount; ++pi) { + if (g->history[pi] == 0) { + g->pal[g->bgindex][3] = 255; // just in case it was made transparent, undo that; It will be reset next frame if need be; + memcpy( &g->out[pi * 4], &g->pal[g->bgindex], 4 ); + } + } + } + + return o; + } + + case 0x21: // Comment Extension. + { + int len; + int ext = stbi__get8(s); + if (ext == 0xF9) { // Graphic Control Extension. + len = stbi__get8(s); + if (len == 4) { + g->eflags = stbi__get8(s); + g->delay = 10 * stbi__get16le(s); // delay - 1/100th of a second, saving as 1/1000ths. + + // unset old transparent + if (g->transparent >= 0) { + g->pal[g->transparent][3] = 255; + } + if (g->eflags & 0x01) { + g->transparent = stbi__get8(s); + if (g->transparent >= 0) { + g->pal[g->transparent][3] = 0; + } + } else { + // don't need transparent + stbi__skip(s, 1); + g->transparent = -1; + } + } else { + stbi__skip(s, len); + break; + } + } + while ((len = stbi__get8(s)) != 0) { + stbi__skip(s, len); + } + break; + } + + case 0x3B: // gif stream termination code + return (stbi_uc *) s; // using '1' causes warning on some compilers + + default: + return stbi__errpuc("unknown code", "Corrupt GIF"); + } + } +} + +static void *stbi__load_gif_main(stbi__context *s, int **delays, int *x, int *y, int *z, int *comp, int req_comp) +{ + if (stbi__gif_test(s)) { + int layers = 0; + stbi_uc *u = 0; + stbi_uc *out = 0; + stbi_uc *two_back = 0; + stbi__gif g; + int stride; + memset(&g, 0, sizeof(g)); + if (delays) { + *delays = 0; + } + + do { + u = stbi__gif_load_next(s, &g, comp, req_comp, two_back); + if (u == (stbi_uc *) s) u = 0; // end of animated gif marker + + if (u) { + *x = g.w; + *y = g.h; + ++layers; + stride = g.w * g.h * 4; + + if (out) { + out = (stbi_uc*) STBI_REALLOC( out, layers * stride ); + if (delays) { + *delays = (int*) STBI_REALLOC( *delays, sizeof(int) * layers ); + } + } else { + out = (stbi_uc*)stbi__malloc( layers * stride ); + if (delays) { + *delays = (int*) stbi__malloc( layers * sizeof(int) ); + } + } + memcpy( out + ((layers - 1) * stride), u, stride ); + if (layers >= 2) { + two_back = out - 2 * stride; + } + + if (delays) { + (*delays)[layers - 1U] = g.delay; + } + } + } while (u != 0); + + // free temp buffer; + STBI_FREE(g.out); + STBI_FREE(g.history); + STBI_FREE(g.background); + + // do the final conversion after loading everything; + if (req_comp && req_comp != 4) + out = stbi__convert_format(out, 4, req_comp, layers * g.w, g.h); + + *z = layers; + return out; + } else { + return stbi__errpuc("not GIF", "Image was not as a gif type."); + } +} + +static void *stbi__gif_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri) +{ + stbi_uc *u = 0; + stbi__gif g; + memset(&g, 0, sizeof(g)); + + u = stbi__gif_load_next(s, &g, comp, req_comp, 0); + if (u == (stbi_uc *) s) u = 0; // end of animated gif marker + if (u) { + *x = g.w; + *y = g.h; + + // moved conversion to after successful load so that the same + // can be done for multiple frames. + if (req_comp && req_comp != 4) + u = stbi__convert_format(u, 4, req_comp, g.w, g.h); + } + + // free buffers needed for multiple frame loading; + STBI_FREE(g.history); + STBI_FREE(g.background); + + return u; +} + +static int stbi__gif_info(stbi__context *s, int *x, int *y, int *comp) +{ + return stbi__gif_info_raw(s,x,y,comp); +} +#endif + +// ************************************************************************************************* +// Radiance RGBE HDR loader +// originally by Nicolas Schulz +#ifndef STBI_NO_HDR +static int stbi__hdr_test_core(stbi__context *s, const char *signature) +{ + int i; + for (i=0; signature[i]; ++i) + if (stbi__get8(s) != signature[i]) + return 0; + stbi__rewind(s); + return 1; +} + +static int stbi__hdr_test(stbi__context* s) +{ + int r = stbi__hdr_test_core(s, "#?RADIANCE\n"); + stbi__rewind(s); + if(!r) { + r = stbi__hdr_test_core(s, "#?RGBE\n"); + stbi__rewind(s); + } + return r; +} + +#define STBI__HDR_BUFLEN 1024 +static char *stbi__hdr_gettoken(stbi__context *z, char *buffer) +{ + int len=0; + char c = '\0'; + + c = (char) stbi__get8(z); + + while (!stbi__at_eof(z) && c != '\n') { + buffer[len++] = c; + if (len == STBI__HDR_BUFLEN-1) { + // flush to end of line + while (!stbi__at_eof(z) && stbi__get8(z) != '\n') + ; + break; + } + c = (char) stbi__get8(z); + } + + buffer[len] = 0; + return buffer; +} + +static void stbi__hdr_convert(float *output, stbi_uc *input, int req_comp) +{ + if ( input[3] != 0 ) { + float f1; + // Exponent + f1 = (float) ldexp(1.0f, input[3] - (int)(128 + 8)); + if (req_comp <= 2) + output[0] = (input[0] + input[1] + input[2]) * f1 / 3; + else { + output[0] = input[0] * f1; + output[1] = input[1] * f1; + output[2] = input[2] * f1; + } + if (req_comp == 2) output[1] = 1; + if (req_comp == 4) output[3] = 1; + } else { + switch (req_comp) { + case 4: output[3] = 1; /* fallthrough */ + case 3: output[0] = output[1] = output[2] = 0; + break; + case 2: output[1] = 1; /* fallthrough */ + case 1: output[0] = 0; + break; + } + } +} + +static float *stbi__hdr_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri) +{ + char buffer[STBI__HDR_BUFLEN]; + char *token; + int valid = 0; + int width, height; + stbi_uc *scanline; + float *hdr_data; + int len; + unsigned char count, value; + int i, j, k, c1,c2, z; + const char *headerToken; + STBI_NOTUSED(ri); + + // Check identifier + headerToken = stbi__hdr_gettoken(s,buffer); + if (strcmp(headerToken, "#?RADIANCE") != 0 && strcmp(headerToken, "#?RGBE") != 0) + return stbi__errpf("not HDR", "Corrupt HDR image"); + + // Parse header + for(;;) { + token = stbi__hdr_gettoken(s,buffer); + if (token[0] == 0) break; + if (strcmp(token, "FORMAT=32-bit_rle_rgbe") == 0) valid = 1; + } + + if (!valid) return stbi__errpf("unsupported format", "Unsupported HDR format"); + + // Parse width and height + // can't use sscanf() if we're not using stdio! + token = stbi__hdr_gettoken(s,buffer); + if (strncmp(token, "-Y ", 3)) return stbi__errpf("unsupported data layout", "Unsupported HDR format"); + token += 3; + height = (int) strtol(token, &token, 10); + while (*token == ' ') ++token; + if (strncmp(token, "+X ", 3)) return stbi__errpf("unsupported data layout", "Unsupported HDR format"); + token += 3; + width = (int) strtol(token, NULL, 10); + + *x = width; + *y = height; + + if (comp) *comp = 3; + if (req_comp == 0) req_comp = 3; + + if (!stbi__mad4sizes_valid(width, height, req_comp, sizeof(float), 0)) + return stbi__errpf("too large", "HDR image is too large"); + + // Read data + hdr_data = (float *) stbi__malloc_mad4(width, height, req_comp, sizeof(float), 0); + if (!hdr_data) + return stbi__errpf("outofmem", "Out of memory"); + + // Load image data + // image data is stored as some number of sca + if ( width < 8 || width >= 32768) { + // Read flat data + for (j=0; j < height; ++j) { + for (i=0; i < width; ++i) { + stbi_uc rgbe[4]; + main_decode_loop: + stbi__getn(s, rgbe, 4); + stbi__hdr_convert(hdr_data + j * width * req_comp + i * req_comp, rgbe, req_comp); + } + } + } else { + // Read RLE-encoded data + scanline = NULL; + + for (j = 0; j < height; ++j) { + c1 = stbi__get8(s); + c2 = stbi__get8(s); + len = stbi__get8(s); + if (c1 != 2 || c2 != 2 || (len & 0x80)) { + // not run-length encoded, so we have to actually use THIS data as a decoded + // pixel (note this can't be a valid pixel--one of RGB must be >= 128) + stbi_uc rgbe[4]; + rgbe[0] = (stbi_uc) c1; + rgbe[1] = (stbi_uc) c2; + rgbe[2] = (stbi_uc) len; + rgbe[3] = (stbi_uc) stbi__get8(s); + stbi__hdr_convert(hdr_data, rgbe, req_comp); + i = 1; + j = 0; + STBI_FREE(scanline); + goto main_decode_loop; // yes, this makes no sense + } + len <<= 8; + len |= stbi__get8(s); + if (len != width) { STBI_FREE(hdr_data); STBI_FREE(scanline); return stbi__errpf("invalid decoded scanline length", "corrupt HDR"); } + if (scanline == NULL) { + scanline = (stbi_uc *) stbi__malloc_mad2(width, 4, 0); + if (!scanline) { + STBI_FREE(hdr_data); + return stbi__errpf("outofmem", "Out of memory"); + } + } + + for (k = 0; k < 4; ++k) { + int nleft; + i = 0; + while ((nleft = width - i) > 0) { + count = stbi__get8(s); + if (count > 128) { + // Run + value = stbi__get8(s); + count -= 128; + if (count > nleft) { STBI_FREE(hdr_data); STBI_FREE(scanline); return stbi__errpf("corrupt", "bad RLE data in HDR"); } + for (z = 0; z < count; ++z) + scanline[i++ * 4 + k] = value; + } else { + // Dump + if (count > nleft) { STBI_FREE(hdr_data); STBI_FREE(scanline); return stbi__errpf("corrupt", "bad RLE data in HDR"); } + for (z = 0; z < count; ++z) + scanline[i++ * 4 + k] = stbi__get8(s); + } + } + } + for (i=0; i < width; ++i) + stbi__hdr_convert(hdr_data+(j*width + i)*req_comp, scanline + i*4, req_comp); + } + if (scanline) + STBI_FREE(scanline); + } + + return hdr_data; +} + +static int stbi__hdr_info(stbi__context *s, int *x, int *y, int *comp) +{ + char buffer[STBI__HDR_BUFLEN]; + char *token; + int valid = 0; + int dummy; + + if (!x) x = &dummy; + if (!y) y = &dummy; + if (!comp) comp = &dummy; + + if (stbi__hdr_test(s) == 0) { + stbi__rewind( s ); + return 0; + } + + for(;;) { + token = stbi__hdr_gettoken(s,buffer); + if (token[0] == 0) break; + if (strcmp(token, "FORMAT=32-bit_rle_rgbe") == 0) valid = 1; + } + + if (!valid) { + stbi__rewind( s ); + return 0; + } + token = stbi__hdr_gettoken(s,buffer); + if (strncmp(token, "-Y ", 3)) { + stbi__rewind( s ); + return 0; + } + token += 3; + *y = (int) strtol(token, &token, 10); + while (*token == ' ') ++token; + if (strncmp(token, "+X ", 3)) { + stbi__rewind( s ); + return 0; + } + token += 3; + *x = (int) strtol(token, NULL, 10); + *comp = 3; + return 1; +} +#endif // STBI_NO_HDR + +#ifndef STBI_NO_BMP +static int stbi__bmp_info(stbi__context *s, int *x, int *y, int *comp) +{ + void *p; + stbi__bmp_data info; + + info.all_a = 255; + p = stbi__bmp_parse_header(s, &info); + stbi__rewind( s ); + if (p == NULL) + return 0; + if (x) *x = s->img_x; + if (y) *y = s->img_y; + if (comp) *comp = info.ma ? 4 : 3; + return 1; +} +#endif + +#ifndef STBI_NO_PSD +static int stbi__psd_info(stbi__context *s, int *x, int *y, int *comp) +{ + int channelCount, dummy, depth; + if (!x) x = &dummy; + if (!y) y = &dummy; + if (!comp) comp = &dummy; + if (stbi__get32be(s) != 0x38425053) { + stbi__rewind( s ); + return 0; + } + if (stbi__get16be(s) != 1) { + stbi__rewind( s ); + return 0; + } + stbi__skip(s, 6); + channelCount = stbi__get16be(s); + if (channelCount < 0 || channelCount > 16) { + stbi__rewind( s ); + return 0; + } + *y = stbi__get32be(s); + *x = stbi__get32be(s); + depth = stbi__get16be(s); + if (depth != 8 && depth != 16) { + stbi__rewind( s ); + return 0; + } + if (stbi__get16be(s) != 3) { + stbi__rewind( s ); + return 0; + } + *comp = 4; + return 1; +} + +static int stbi__psd_is16(stbi__context *s) +{ + int channelCount, depth; + if (stbi__get32be(s) != 0x38425053) { + stbi__rewind( s ); + return 0; + } + if (stbi__get16be(s) != 1) { + stbi__rewind( s ); + return 0; + } + stbi__skip(s, 6); + channelCount = stbi__get16be(s); + if (channelCount < 0 || channelCount > 16) { + stbi__rewind( s ); + return 0; + } + (void) stbi__get32be(s); + (void) stbi__get32be(s); + depth = stbi__get16be(s); + if (depth != 16) { + stbi__rewind( s ); + return 0; + } + return 1; +} +#endif + +#ifndef STBI_NO_PIC +static int stbi__pic_info(stbi__context *s, int *x, int *y, int *comp) +{ + int act_comp=0,num_packets=0,chained,dummy; + stbi__pic_packet packets[10]; + + if (!x) x = &dummy; + if (!y) y = &dummy; + if (!comp) comp = &dummy; + + if (!stbi__pic_is4(s,"\x53\x80\xF6\x34")) { + stbi__rewind(s); + return 0; + } + + stbi__skip(s, 88); + + *x = stbi__get16be(s); + *y = stbi__get16be(s); + if (stbi__at_eof(s)) { + stbi__rewind( s); + return 0; + } + if ( (*x) != 0 && (1 << 28) / (*x) < (*y)) { + stbi__rewind( s ); + return 0; + } + + stbi__skip(s, 8); + + do { + stbi__pic_packet *packet; + + if (num_packets==sizeof(packets)/sizeof(packets[0])) + return 0; + + packet = &packets[num_packets++]; + chained = stbi__get8(s); + packet->size = stbi__get8(s); + packet->type = stbi__get8(s); + packet->channel = stbi__get8(s); + act_comp |= packet->channel; + + if (stbi__at_eof(s)) { + stbi__rewind( s ); + return 0; + } + if (packet->size != 8) { + stbi__rewind( s ); + return 0; + } + } while (chained); + + *comp = (act_comp & 0x10 ? 4 : 3); + + return 1; +} +#endif + +// ************************************************************************************************* +// Portable Gray Map and Portable Pixel Map loader +// by Ken Miller +// +// PGM: http://netpbm.sourceforge.net/doc/pgm.html +// PPM: http://netpbm.sourceforge.net/doc/ppm.html +// +// Known limitations: +// Does not support comments in the header section +// Does not support ASCII image data (formats P2 and P3) +// Does not support 16-bit-per-channel + +#ifndef STBI_NO_PNM + +static int stbi__pnm_test(stbi__context *s) +{ + char p, t; + p = (char) stbi__get8(s); + t = (char) stbi__get8(s); + if (p != 'P' || (t != '5' && t != '6')) { + stbi__rewind( s ); + return 0; + } + return 1; +} + +static void *stbi__pnm_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri) +{ + stbi_uc *out; + STBI_NOTUSED(ri); + + if (!stbi__pnm_info(s, (int *)&s->img_x, (int *)&s->img_y, (int *)&s->img_n)) + return 0; + + *x = s->img_x; + *y = s->img_y; + if (comp) *comp = s->img_n; + + if (!stbi__mad3sizes_valid(s->img_n, s->img_x, s->img_y, 0)) + return stbi__errpuc("too large", "PNM too large"); + + out = (stbi_uc *) stbi__malloc_mad3(s->img_n, s->img_x, s->img_y, 0); + if (!out) return stbi__errpuc("outofmem", "Out of memory"); + stbi__getn(s, out, s->img_n * s->img_x * s->img_y); + + if (req_comp && req_comp != s->img_n) { + out = stbi__convert_format(out, s->img_n, req_comp, s->img_x, s->img_y); + if (out == NULL) return out; // stbi__convert_format frees input on failure + } + return out; +} + +static int stbi__pnm_isspace(char c) +{ + return c == ' ' || c == '\t' || c == '\n' || c == '\v' || c == '\f' || c == '\r'; +} + +static void stbi__pnm_skip_whitespace(stbi__context *s, char *c) +{ + for (;;) { + while (!stbi__at_eof(s) && stbi__pnm_isspace(*c)) + *c = (char) stbi__get8(s); + + if (stbi__at_eof(s) || *c != '#') + break; + + while (!stbi__at_eof(s) && *c != '\n' && *c != '\r' ) + *c = (char) stbi__get8(s); + } +} + +static int stbi__pnm_isdigit(char c) +{ + return c >= '0' && c <= '9'; +} + +static int stbi__pnm_getinteger(stbi__context *s, char *c) +{ + int value = 0; + + while (!stbi__at_eof(s) && stbi__pnm_isdigit(*c)) { + value = value*10 + (*c - '0'); + *c = (char) stbi__get8(s); + } + + return value; +} + +static int stbi__pnm_info(stbi__context *s, int *x, int *y, int *comp) +{ + int maxv, dummy; + char c, p, t; + + if (!x) x = &dummy; + if (!y) y = &dummy; + if (!comp) comp = &dummy; + + stbi__rewind(s); + + // Get identifier + p = (char) stbi__get8(s); + t = (char) stbi__get8(s); + if (p != 'P' || (t != '5' && t != '6')) { + stbi__rewind(s); + return 0; + } + + *comp = (t == '6') ? 3 : 1; // '5' is 1-component .pgm; '6' is 3-component .ppm + + c = (char) stbi__get8(s); + stbi__pnm_skip_whitespace(s, &c); + + *x = stbi__pnm_getinteger(s, &c); // read width + stbi__pnm_skip_whitespace(s, &c); + + *y = stbi__pnm_getinteger(s, &c); // read height + stbi__pnm_skip_whitespace(s, &c); + + maxv = stbi__pnm_getinteger(s, &c); // read max value + + if (maxv > 255) + return stbi__err("max value > 255", "PPM image not 8-bit"); + else + return 1; +} +#endif + +static int stbi__info_main(stbi__context *s, int *x, int *y, int *comp) +{ + #ifndef STBI_NO_JPEG + if (stbi__jpeg_info(s, x, y, comp)) return 1; + #endif + + #ifndef STBI_NO_PNG + if (stbi__png_info(s, x, y, comp)) return 1; + #endif + + #ifndef STBI_NO_GIF + if (stbi__gif_info(s, x, y, comp)) return 1; + #endif + + #ifndef STBI_NO_BMP + if (stbi__bmp_info(s, x, y, comp)) return 1; + #endif + + #ifndef STBI_NO_PSD + if (stbi__psd_info(s, x, y, comp)) return 1; + #endif + + #ifndef STBI_NO_PIC + if (stbi__pic_info(s, x, y, comp)) return 1; + #endif + + #ifndef STBI_NO_PNM + if (stbi__pnm_info(s, x, y, comp)) return 1; + #endif + + #ifndef STBI_NO_HDR + if (stbi__hdr_info(s, x, y, comp)) return 1; + #endif + + // test tga last because it's a crappy test! + #ifndef STBI_NO_TGA + if (stbi__tga_info(s, x, y, comp)) + return 1; + #endif + return stbi__err("unknown image type", "Image not of any known type, or corrupt"); +} + +static int stbi__is_16_main(stbi__context *s) +{ + #ifndef STBI_NO_PNG + if (stbi__png_is16(s)) return 1; + #endif + + #ifndef STBI_NO_PSD + if (stbi__psd_is16(s)) return 1; + #endif + + return 0; +} + +#ifndef STBI_NO_STDIO +STBIDEF int stbi_info(char const *filename, int *x, int *y, int *comp) +{ + FILE *f = stbi__fopen(filename, "rb"); + int result; + if (!f) return stbi__err("can't fopen", "Unable to open file"); + result = stbi_info_from_file(f, x, y, comp); + fclose(f); + return result; +} + +STBIDEF int stbi_info_from_file(FILE *f, int *x, int *y, int *comp) +{ + int r; + stbi__context s; + long pos = ftell(f); + stbi__start_file(&s, f); + r = stbi__info_main(&s,x,y,comp); + if (pos >= 0) { + if (fseek(f,pos,SEEK_SET) == -1) return stbi__err("fseek() error", "File Seek Fail"); + } + return r; +} + +STBIDEF int stbi_is_16_bit(char const *filename) +{ + FILE *f = stbi__fopen(filename, "rb"); + int result; + if (!f) return stbi__err("can't fopen", "Unable to open file"); + result = stbi_is_16_bit_from_file(f); + fclose(f); + return result; +} + +STBIDEF int stbi_is_16_bit_from_file(FILE *f) +{ + int r; + stbi__context s; + long pos = ftell(f); + stbi__start_file(&s, f); + r = stbi__is_16_main(&s); + if (pos >= 0) { + if (fseek(f,pos,SEEK_SET) == -1) return stbi__err("fseek() error", "File Seek Fail"); + } + return r; +} +#endif // !STBI_NO_STDIO + +STBIDEF int stbi_info_from_memory(stbi_uc const *buffer, int len, int *x, int *y, int *comp) +{ + stbi__context s; + stbi__start_mem(&s,buffer,len); + return stbi__info_main(&s,x,y,comp); +} + +STBIDEF int stbi_info_from_callbacks(stbi_io_callbacks const *c, void *user, int *x, int *y, int *comp) +{ + stbi__context s; + stbi__start_callbacks(&s, (stbi_io_callbacks *) c, user); + return stbi__info_main(&s,x,y,comp); +} + +STBIDEF int stbi_is_16_bit_from_memory(stbi_uc const *buffer, int len) +{ + stbi__context s; + stbi__start_mem(&s,buffer,len); + return stbi__is_16_main(&s); +} + +STBIDEF int stbi_is_16_bit_from_callbacks(stbi_io_callbacks const *c, void *user) +{ + stbi__context s; + stbi__start_callbacks(&s, (stbi_io_callbacks *) c, user); + return stbi__is_16_main(&s); +} + +#endif // STB_IMAGE_IMPLEMENTATION + +/* + revision history: + 2.19 (2018-02-11) fix warning + 2.18 (2018-01-30) fix warnings + 2.17 (2018-01-29) change sbti__shiftsigned to avoid clang -O2 bug + 1-bit BMP + *_is_16_bit api + avoid warnings + 2.16 (2017-07-23) all functions have 16-bit variants; + STBI_NO_STDIO works again; + compilation fixes; + fix rounding in unpremultiply; + optimize vertical flip; + disable raw_len validation; + documentation fixes + 2.15 (2017-03-18) fix png-1,2,4 bug; now all Imagenet JPGs decode; + warning fixes; disable run-time SSE detection on gcc; + uniform handling of optional "return" values; + thread-safe initialization of zlib tables + 2.14 (2017-03-03) remove deprecated STBI_JPEG_OLD; fixes for Imagenet JPGs + 2.13 (2016-11-29) add 16-bit API, only supported for PNG right now + 2.12 (2016-04-02) fix typo in 2.11 PSD fix that caused crashes + 2.11 (2016-04-02) allocate large structures on the stack + remove white matting for transparent PSD + fix reported channel count for PNG & BMP + re-enable SSE2 in non-gcc 64-bit + support RGB-formatted JPEG + read 16-bit PNGs (only as 8-bit) + 2.10 (2016-01-22) avoid warning introduced in 2.09 by STBI_REALLOC_SIZED + 2.09 (2016-01-16) allow comments in PNM files + 16-bit-per-pixel TGA (not bit-per-component) + info() for TGA could break due to .hdr handling + info() for BMP to shares code instead of sloppy parse + can use STBI_REALLOC_SIZED if allocator doesn't support realloc + code cleanup + 2.08 (2015-09-13) fix to 2.07 cleanup, reading RGB PSD as RGBA + 2.07 (2015-09-13) fix compiler warnings + partial animated GIF support + limited 16-bpc PSD support + #ifdef unused functions + bug with < 92 byte PIC,PNM,HDR,TGA + 2.06 (2015-04-19) fix bug where PSD returns wrong '*comp' value + 2.05 (2015-04-19) fix bug in progressive JPEG handling, fix warning + 2.04 (2015-04-15) try to re-enable SIMD on MinGW 64-bit + 2.03 (2015-04-12) extra corruption checking (mmozeiko) + stbi_set_flip_vertically_on_load (nguillemot) + fix NEON support; fix mingw support + 2.02 (2015-01-19) fix incorrect assert, fix warning + 2.01 (2015-01-17) fix various warnings; suppress SIMD on gcc 32-bit without -msse2 + 2.00b (2014-12-25) fix STBI_MALLOC in progressive JPEG + 2.00 (2014-12-25) optimize JPG, including x86 SSE2 & NEON SIMD (ryg) + progressive JPEG (stb) + PGM/PPM support (Ken Miller) + STBI_MALLOC,STBI_REALLOC,STBI_FREE + GIF bugfix -- seemingly never worked + STBI_NO_*, STBI_ONLY_* + 1.48 (2014-12-14) fix incorrectly-named assert() + 1.47 (2014-12-14) 1/2/4-bit PNG support, both direct and paletted (Omar Cornut & stb) + optimize PNG (ryg) + fix bug in interlaced PNG with user-specified channel count (stb) + 1.46 (2014-08-26) + fix broken tRNS chunk (colorkey-style transparency) in non-paletted PNG + 1.45 (2014-08-16) + fix MSVC-ARM internal compiler error by wrapping malloc + 1.44 (2014-08-07) + various warning fixes from Ronny Chevalier + 1.43 (2014-07-15) + fix MSVC-only compiler problem in code changed in 1.42 + 1.42 (2014-07-09) + don't define _CRT_SECURE_NO_WARNINGS (affects user code) + fixes to stbi__cleanup_jpeg path + added STBI_ASSERT to avoid requiring assert.h + 1.41 (2014-06-25) + fix search&replace from 1.36 that messed up comments/error messages + 1.40 (2014-06-22) + fix gcc struct-initialization warning + 1.39 (2014-06-15) + fix to TGA optimization when req_comp != number of components in TGA; + fix to GIF loading because BMP wasn't rewinding (whoops, no GIFs in my test suite) + add support for BMP version 5 (more ignored fields) + 1.38 (2014-06-06) + suppress MSVC warnings on integer casts truncating values + fix accidental rename of 'skip' field of I/O + 1.37 (2014-06-04) + remove duplicate typedef + 1.36 (2014-06-03) + convert to header file single-file library + if de-iphone isn't set, load iphone images color-swapped instead of returning NULL + 1.35 (2014-05-27) + various warnings + fix broken STBI_SIMD path + fix bug where stbi_load_from_file no longer left file pointer in correct place + fix broken non-easy path for 32-bit BMP (possibly never used) + TGA optimization by Arseny Kapoulkine + 1.34 (unknown) + use STBI_NOTUSED in stbi__resample_row_generic(), fix one more leak in tga failure case + 1.33 (2011-07-14) + make stbi_is_hdr work in STBI_NO_HDR (as specified), minor compiler-friendly improvements + 1.32 (2011-07-13) + support for "info" function for all supported filetypes (SpartanJ) + 1.31 (2011-06-20) + a few more leak fixes, bug in PNG handling (SpartanJ) + 1.30 (2011-06-11) + added ability to load files via callbacks to accomidate custom input streams (Ben Wenger) + removed deprecated format-specific test/load functions + removed support for installable file formats (stbi_loader) -- would have been broken for IO callbacks anyway + error cases in bmp and tga give messages and don't leak (Raymond Barbiero, grisha) + fix inefficiency in decoding 32-bit BMP (David Woo) + 1.29 (2010-08-16) + various warning fixes from Aurelien Pocheville + 1.28 (2010-08-01) + fix bug in GIF palette transparency (SpartanJ) + 1.27 (2010-08-01) + cast-to-stbi_uc to fix warnings + 1.26 (2010-07-24) + fix bug in file buffering for PNG reported by SpartanJ + 1.25 (2010-07-17) + refix trans_data warning (Won Chun) + 1.24 (2010-07-12) + perf improvements reading from files on platforms with lock-heavy fgetc() + minor perf improvements for jpeg + deprecated type-specific functions so we'll get feedback if they're needed + attempt to fix trans_data warning (Won Chun) + 1.23 fixed bug in iPhone support + 1.22 (2010-07-10) + removed image *writing* support + stbi_info support from Jetro Lauha + GIF support from Jean-Marc Lienher + iPhone PNG-extensions from James Brown + warning-fixes from Nicolas Schulz and Janez Zemva (i.stbi__err. Janez (U+017D)emva) + 1.21 fix use of 'stbi_uc' in header (reported by jon blow) + 1.20 added support for Softimage PIC, by Tom Seddon + 1.19 bug in interlaced PNG corruption check (found by ryg) + 1.18 (2008-08-02) + fix a threading bug (local mutable static) + 1.17 support interlaced PNG + 1.16 major bugfix - stbi__convert_format converted one too many pixels + 1.15 initialize some fields for thread safety + 1.14 fix threadsafe conversion bug + header-file-only version (#define STBI_HEADER_FILE_ONLY before including) + 1.13 threadsafe + 1.12 const qualifiers in the API + 1.11 Support installable IDCT, colorspace conversion routines + 1.10 Fixes for 64-bit (don't use "unsigned long") + optimized upsampling by Fabian "ryg" Giesen + 1.09 Fix format-conversion for PSD code (bad global variables!) + 1.08 Thatcher Ulrich's PSD code integrated by Nicolas Schulz + 1.07 attempt to fix C++ warning/errors again + 1.06 attempt to fix C++ warning/errors again + 1.05 fix TGA loading to return correct *comp and use good luminance calc + 1.04 default float alpha is 1, not 255; use 'void *' for stbi_image_free + 1.03 bugfixes to STBI_NO_STDIO, STBI_NO_HDR + 1.02 support for (subset of) HDR files, float interface for preferred access to them + 1.01 fix bug: possible bug in handling right-side up bmps... not sure + fix bug: the stbi__bmp_load() and stbi__tga_load() functions didn't work at all + 1.00 interface to zlib that skips zlib header + 0.99 correct handling of alpha in palette + 0.98 TGA loader by lonesock; dynamically add loaders (untested) + 0.97 jpeg errors on too large a file; also catch another malloc failure + 0.96 fix detection of invalid v value - particleman@mollyrocket forum + 0.95 during header scan, seek to markers in case of padding + 0.94 STBI_NO_STDIO to disable stdio usage; rename all #defines the same + 0.93 handle jpegtran output; verbose errors + 0.92 read 4,8,16,24,32-bit BMP files of several formats + 0.91 output 24-bit Windows 3.0 BMP files + 0.90 fix a few more warnings; bump version number to approach 1.0 + 0.61 bugfixes due to Marc LeBlanc, Christopher Lloyd + 0.60 fix compiling as c++ + 0.59 fix warnings: merge Dave Moore's -Wall fixes + 0.58 fix bug: zlib uncompressed mode len/nlen was wrong endian + 0.57 fix bug: jpg last huffman symbol before marker was >9 bits but less than 16 available + 0.56 fix bug: zlib uncompressed mode len vs. nlen + 0.55 fix bug: restart_interval not initialized to 0 + 0.54 allow NULL for 'int *comp' + 0.53 fix bug in png 3->4; speedup png decoding + 0.52 png handles req_comp=3,4 directly; minor cleanup; jpeg comments + 0.51 obey req_comp requests, 1-component jpegs return as 1-component, + on 'test' only check type, not whether we support this variant + 0.50 (2006-11-19) + first released version +*/ + + +/* +------------------------------------------------------------------------------ +This software is available under 2 licenses -- choose whichever you prefer. +------------------------------------------------------------------------------ +ALTERNATIVE A - MIT License +Copyright (c) 2017 Sean Barrett +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. +------------------------------------------------------------------------------ +ALTERNATIVE B - Public Domain (www.unlicense.org) +This is free and unencumbered software released into the public domain. +Anyone is free to copy, modify, publish, use, compile, sell, or distribute this +software, either in source code form or as a compiled binary, for any purpose, +commercial or non-commercial, and by any means. +In jurisdictions that recognize copyright laws, the author or authors of this +software dedicate any and all copyright interest in the software to the public +domain. We make this dedication for the benefit of the public at large and to +the detriment of our heirs and successors. We intend this dedication to be an +overt act of relinquishment in perpetuity of all present and future rights to +this software under copyright law. +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN +ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +------------------------------------------------------------------------------ +*/ diff --git a/vendor/github.com/Benau/go_rlottie/vector_varenaalloc.cpp b/vendor/github.com/Benau/go_rlottie/vector_varenaalloc.cpp new file mode 100644 index 00000000..55bd3793 --- /dev/null +++ b/vendor/github.com/Benau/go_rlottie/vector_varenaalloc.cpp @@ -0,0 +1,166 @@ +/* + * Copyright 2016 Google Inc. + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#include "vector_varenaalloc.h" +#include +#include + +static char* end_chain(char*) { return nullptr; } + +static uint32_t first_allocated_block(uint32_t blockSize, uint32_t firstHeapAllocation) { + return firstHeapAllocation > 0 ? firstHeapAllocation : + blockSize > 0 ? blockSize : 1024; +} + +VArenaAlloc::VArenaAlloc(char* block, size_t size, size_t firstHeapAllocation) + : fDtorCursor {block} + , fCursor {block} + , fEnd {block + ToU32(size)} + , fFirstBlock {block} + , fFirstSize {ToU32(size)} + , fFirstHeapAllocationSize {first_allocated_block(ToU32(size), ToU32(firstHeapAllocation))} +{ + if (size < sizeof(Footer)) { + fEnd = fCursor = fDtorCursor = nullptr; + } + + if (fCursor != nullptr) { + this->installFooter(end_chain, 0); + } +} + +VArenaAlloc::~VArenaAlloc() { + RunDtorsOnBlock(fDtorCursor); +} + +void VArenaAlloc::reset() { + this->~VArenaAlloc(); + new (this) VArenaAlloc{fFirstBlock, fFirstSize, fFirstHeapAllocationSize}; +} + +void VArenaAlloc::installFooter(FooterAction* action, uint32_t padding) { + assert(padding < 64); + int64_t actionInt = (int64_t)(intptr_t)action; + + // The top 14 bits should be either all 0s or all 1s. Check this. + assert((actionInt << 6) >> 6 == actionInt); + Footer encodedFooter = (actionInt << 6) | padding; + memmove(fCursor, &encodedFooter, sizeof(Footer)); + fCursor += sizeof(Footer); + fDtorCursor = fCursor; +} + +void VArenaAlloc::installPtrFooter(FooterAction* action, char* ptr, uint32_t padding) { + memmove(fCursor, &ptr, sizeof(char*)); + fCursor += sizeof(char*); + this->installFooter(action, padding); +} + +char* VArenaAlloc::SkipPod(char* footerEnd) { + char* objEnd = footerEnd - (sizeof(Footer) + sizeof(int32_t)); + int32_t skip; + memmove(&skip, objEnd, sizeof(int32_t)); + return objEnd - skip; +} + +void VArenaAlloc::RunDtorsOnBlock(char* footerEnd) { + while (footerEnd != nullptr) { + Footer footer; + memcpy(&footer, footerEnd - sizeof(Footer), sizeof(Footer)); + + FooterAction* action = (FooterAction*)(footer >> 6); + ptrdiff_t padding = footer & 63; + + footerEnd = action(footerEnd) - padding; + } +} + +char* VArenaAlloc::NextBlock(char* footerEnd) { + char* objEnd = footerEnd - (sizeof(Footer) + sizeof(char*)); + char* next; + memmove(&next, objEnd, sizeof(char*)); + RunDtorsOnBlock(next); + delete [] objEnd; + return nullptr; +} + +void VArenaAlloc::installUint32Footer(FooterAction* action, uint32_t value, uint32_t padding) { + memmove(fCursor, &value, sizeof(uint32_t)); + fCursor += sizeof(uint32_t); + this->installFooter(action, padding); +} + +void VArenaAlloc::ensureSpace(uint32_t size, uint32_t alignment) { + constexpr uint32_t headerSize = sizeof(Footer) + sizeof(ptrdiff_t); + // The chrome c++ library we use does not define std::max_align_t. + // This must be conservative to add the right amount of extra memory to handle the alignment + // padding. + constexpr uint32_t alignof_max_align_t = 8; + constexpr uint32_t maxSize = std::numeric_limits::max(); + constexpr uint32_t overhead = headerSize + sizeof(Footer); + AssertRelease(size <= maxSize - overhead); + uint32_t objSizeAndOverhead = size + overhead; + if (alignment > alignof_max_align_t) { + uint32_t alignmentOverhead = alignment - 1; + AssertRelease(objSizeAndOverhead <= maxSize - alignmentOverhead); + objSizeAndOverhead += alignmentOverhead; + } + + uint32_t minAllocationSize; + if (fFirstHeapAllocationSize <= maxSize / fFib0) { + minAllocationSize = fFirstHeapAllocationSize * fFib0; + fFib0 += fFib1; + std::swap(fFib0, fFib1); + } else { + minAllocationSize = maxSize; + } + uint32_t allocationSize = std::max(objSizeAndOverhead, minAllocationSize); + + // Round up to a nice size. If > 32K align to 4K boundary else up to max_align_t. The > 32K + // heuristic is from the JEMalloc behavior. + { + uint32_t mask = allocationSize > (1 << 15) ? (1 << 12) - 1 : 16 - 1; + AssertRelease(allocationSize <= maxSize - mask); + allocationSize = (allocationSize + mask) & ~mask; + } + + char* newBlock = new char[allocationSize]; + + auto previousDtor = fDtorCursor; + fCursor = newBlock; + fDtorCursor = newBlock; + fEnd = fCursor + allocationSize; + this->installPtrFooter(NextBlock, previousDtor, 0); +} + +char* VArenaAlloc::allocObjectWithFooter(uint32_t sizeIncludingFooter, uint32_t alignment) { + uintptr_t mask = alignment - 1; + +restart: + uint32_t skipOverhead = 0; + bool needsSkipFooter = fCursor != fDtorCursor; + if (needsSkipFooter) { + skipOverhead = sizeof(Footer) + sizeof(uint32_t); + } + char* objStart = (char*)((uintptr_t)(fCursor + skipOverhead + mask) & ~mask); + uint32_t totalSize = sizeIncludingFooter + skipOverhead; + //std::cout<<"non POD object size = "< fEnd - objStart) { + this->ensureSpace(totalSize, alignment); + goto restart; + } + + AssertRelease((ptrdiff_t)totalSize <= fEnd - objStart); + + // Install a skip footer if needed, thus terminating a run of POD data. The calling code is + // responsible for installing the footer after the object. + if (needsSkipFooter) { + this->installUint32Footer(SkipPod, ToU32(fCursor - fDtorCursor), 0); + } + + return objStart; +} diff --git a/vendor/github.com/Benau/go_rlottie/vector_varenaalloc.h b/vendor/github.com/Benau/go_rlottie/vector_varenaalloc.h new file mode 100644 index 00000000..ed03b53f --- /dev/null +++ b/vendor/github.com/Benau/go_rlottie/vector_varenaalloc.h @@ -0,0 +1,232 @@ +/* + * Copyright 2016 Google Inc. + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef VARENAALLOC_H +#define VARENAALLOC_H +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +// SkArenaAlloc allocates object and destroys the allocated objects when destroyed. It's designed +// to minimize the number of underlying block allocations. SkArenaAlloc allocates first out of an +// (optional) user-provided block of memory, and when that's exhausted it allocates on the heap, +// starting with an allocation of firstHeapAllocation bytes. If your data (plus a small overhead) +// fits in the user-provided block, SkArenaAlloc never uses the heap, and if it fits in +// firstHeapAllocation bytes, it'll use the heap only once. If 0 is specified for +// firstHeapAllocation, then blockSize is used unless that too is 0, then 1024 is used. +// +// Examples: +// +// char block[mostCasesSize]; +// SkArenaAlloc arena(block, mostCasesSize); +// +// If mostCasesSize is too large for the stack, you can use the following pattern. +// +// std::unique_ptr block{new char[mostCasesSize]}; +// SkArenaAlloc arena(block.get(), mostCasesSize, almostAllCasesSize); +// +// If the program only sometimes allocates memory, use the following pattern. +// +// SkArenaAlloc arena(nullptr, 0, almostAllCasesSize); +// +// The storage does not necessarily need to be on the stack. Embedding the storage in a class also +// works. +// +// class Foo { +// char storage[mostCasesSize]; +// SkArenaAlloc arena (storage, mostCasesSize); +// }; +// +// In addition, the system is optimized to handle POD data including arrays of PODs (where +// POD is really data with no destructors). For POD data it has zero overhead per item, and a +// typical per block overhead of 8 bytes. For non-POD objects there is a per item overhead of 4 +// bytes. For arrays of non-POD objects there is a per array overhead of typically 8 bytes. There +// is an addition overhead when switching from POD data to non-POD data of typically 8 bytes. +// +// If additional blocks are needed they are increased exponentially. This strategy bounds the +// recursion of the RunDtorsOnBlock to be limited to O(log size-of-memory). Block size grow using +// the Fibonacci sequence which means that for 2^32 memory there are 48 allocations, and for 2^48 +// there are 71 allocations. +class VArenaAlloc { +public: + VArenaAlloc(char* block, size_t blockSize, size_t firstHeapAllocation); + + explicit VArenaAlloc(size_t firstHeapAllocation) + : VArenaAlloc(nullptr, 0, firstHeapAllocation) + {} + + ~VArenaAlloc(); + + template + T* make(Args&&... args) { + uint32_t size = ToU32(sizeof(T)); + uint32_t alignment = ToU32(alignof(T)); + char* objStart; + if (std::is_trivially_destructible::value) { + objStart = this->allocObject(size, alignment); + fCursor = objStart + size; + } else { + objStart = this->allocObjectWithFooter(size + sizeof(Footer), alignment); + // Can never be UB because max value is alignof(T). + uint32_t padding = ToU32(objStart - fCursor); + + // Advance to end of object to install footer. + fCursor = objStart + size; + FooterAction* releaser = [](char* objEnd) { + char* objStart = objEnd - (sizeof(T) + sizeof(Footer)); + ((T*)objStart)->~T(); + return objStart; + }; + this->installFooter(releaser, padding); + } + + // This must be last to make objects with nested use of this allocator work. + return new(objStart) T(std::forward(args)...); + } + + template + T* makeArrayDefault(size_t count) { + uint32_t safeCount = ToU32(count); + T* array = (T*)this->commonArrayAlloc(safeCount); + + // If T is primitive then no initialization takes place. + for (size_t i = 0; i < safeCount; i++) { + new (&array[i]) T; + } + return array; + } + + template + T* makeArray(size_t count) { + uint32_t safeCount = ToU32(count); + T* array = (T*)this->commonArrayAlloc(safeCount); + + // If T is primitive then the memory is initialized. For example, an array of chars will + // be zeroed. + for (size_t i = 0; i < safeCount; i++) { + new (&array[i]) T(); + } + return array; + } + + // Only use makeBytesAlignedTo if none of the typed variants are impractical to use. + void* makeBytesAlignedTo(size_t size, size_t align) { + auto objStart = this->allocObject(ToU32(size), ToU32(align)); + fCursor = objStart + size; + return objStart; + } + + // Destroy all allocated objects, free any heap allocations. + void reset(); + +private: + static void AssertRelease(bool cond) { if (!cond) { ::abort(); } } + static uint32_t ToU32(size_t v) { + return (uint32_t)v; + } + + using Footer = int64_t; + using FooterAction = char* (char*); + + static char* SkipPod(char* footerEnd); + static void RunDtorsOnBlock(char* footerEnd); + static char* NextBlock(char* footerEnd); + + void installFooter(FooterAction* releaser, uint32_t padding); + void installUint32Footer(FooterAction* action, uint32_t value, uint32_t padding); + void installPtrFooter(FooterAction* action, char* ptr, uint32_t padding); + + void ensureSpace(uint32_t size, uint32_t alignment); + + char* allocObject(uint32_t size, uint32_t alignment) { + uintptr_t mask = alignment - 1; + uintptr_t alignedOffset = (~reinterpret_cast(fCursor) + 1) & mask; + uintptr_t totalSize = size + alignedOffset; + AssertRelease(totalSize >= size); + + if (totalSize > static_cast(fEnd - fCursor)) { + this->ensureSpace(size, alignment); + alignedOffset = (~reinterpret_cast(fCursor) + 1) & mask; + } + return fCursor + alignedOffset; + } + + char* allocObjectWithFooter(uint32_t sizeIncludingFooter, uint32_t alignment); + + template + char* commonArrayAlloc(uint32_t count) { + char* objStart; + AssertRelease(count <= std::numeric_limits::max() / sizeof(T)); + uint32_t arraySize = ToU32(count * sizeof(T)); + uint32_t alignment = ToU32(alignof(T)); + + if (std::is_trivially_destructible::value) { + objStart = this->allocObject(arraySize, alignment); + fCursor = objStart + arraySize; + } else { + constexpr uint32_t overhead = sizeof(Footer) + sizeof(uint32_t); + AssertRelease(arraySize <= std::numeric_limits::max() - overhead); + uint32_t totalSize = arraySize + overhead; + objStart = this->allocObjectWithFooter(totalSize, alignment); + + // Can never be UB because max value is alignof(T). + uint32_t padding = ToU32(objStart - fCursor); + + // Advance to end of array to install footer.? + fCursor = objStart + arraySize; + this->installUint32Footer( + [](char* footerEnd) { + char* objEnd = footerEnd - (sizeof(Footer) + sizeof(uint32_t)); + uint32_t count; + memmove(&count, objEnd, sizeof(uint32_t)); + char* objStart = objEnd - count * sizeof(T); + T* array = (T*) objStart; + for (uint32_t i = 0; i < count; i++) { + array[i].~T(); + } + return objStart; + }, + ToU32(count), + padding); + } + + return objStart; + } + + char* fDtorCursor; + char* fCursor; + char* fEnd; + char* const fFirstBlock; + const uint32_t fFirstSize; + const uint32_t fFirstHeapAllocationSize; + + // Use the Fibonacci sequence as the growth factor for block size. The size of the block + // allocated is fFib0 * fFirstHeapAllocationSize. Using 2 ^ n * fFirstHeapAllocationSize + // had too much slop for Android. + uint32_t fFib0 {1}, fFib1 {1}; +}; + +// Helper for defining allocators with inline/reserved storage. +// For argument declarations, stick to the base type (SkArenaAlloc). +template +class VSTArenaAlloc : public VArenaAlloc { +public: + explicit VSTArenaAlloc(size_t firstHeapAllocation = InlineStorageSize) + : VArenaAlloc(fInlineStorage, InlineStorageSize, firstHeapAllocation) {} + +private: + char fInlineStorage[InlineStorageSize]; +}; + +#endif // VARENAALLOC_H diff --git a/vendor/github.com/Benau/go_rlottie/vector_vbezier.cpp b/vendor/github.com/Benau/go_rlottie/vector_vbezier.cpp new file mode 100644 index 00000000..3aa324e7 --- /dev/null +++ b/vendor/github.com/Benau/go_rlottie/vector_vbezier.cpp @@ -0,0 +1,135 @@ +/* + * Copyright (c) 2020 Samsung Electronics Co., Ltd. All rights reserved. + + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include "vector_vbezier.h" +#include +#include "vector_vline.h" + +V_BEGIN_NAMESPACE + +VBezier VBezier::fromPoints(const VPointF &p1, const VPointF &p2, + const VPointF &p3, const VPointF &p4) +{ + VBezier b; + b.x1 = p1.x(); + b.y1 = p1.y(); + b.x2 = p2.x(); + b.y2 = p2.y(); + b.x3 = p3.x(); + b.y3 = p3.y(); + b.x4 = p4.x(); + b.y4 = p4.y(); + return b; +} + +float VBezier::length() const +{ + const auto len = VLine::length(x1, y1, x2, y2) + + VLine::length(x2, y2, x3, y3) + + VLine::length(x3, y3, x4, y4); + + const auto chord = VLine::length(x1, y1, x4, y4); + + if ((len - chord) > 0.01) { + VBezier left, right; + split(&left, &right); + return left.length() + right.length(); + } + + return len; +} + +VBezier VBezier::onInterval(float t0, float t1) const +{ + if (t0 == 0 && t1 == 1) return *this; + + VBezier bezier = *this; + + VBezier result; + bezier.parameterSplitLeft(t0, &result); + float trueT = (t1 - t0) / (1 - t0); + bezier.parameterSplitLeft(trueT, &result); + + return result; +} + +float VBezier::tAtLength(float l, float totalLength) const +{ + float t = 1.0; + const float error = 0.01f; + if (l > totalLength || vCompare(l, totalLength)) return t; + + t *= 0.5; + + float lastBigger = 1.0; + for (int num = 0; num < 100500; num++) { + VBezier right = *this; + VBezier left; + right.parameterSplitLeft(t, &left); + float lLen = left.length(); + if (fabs(lLen - l) < error) return t; + + if (lLen < l) { + t += (lastBigger - t) * 0.5f; + } else { + lastBigger = t; + t -= t * 0.5f; + } + } + vWarning << "no convergence"; + return t; +} + +void VBezier::splitAtLength(float len, VBezier *left, VBezier *right) +{ + float t; + + *right = *this; + t = right->tAtLength(len); + right->parameterSplitLeft(t, left); +} + +VPointF VBezier::derivative(float t) const +{ + // p'(t) = 3 * (-(1-2t+t^2) * p0 + (1 - 4 * t + 3 * t^2) * p1 + (2 * t - 3 * + // t^2) * p2 + t^2 * p3) + + float m_t = 1.0f - t; + + float d = t * t; + float a = -m_t * m_t; + float b = 1 - 4 * t + 3 * d; + float c = 2 * t - 3 * d; + + return 3 * VPointF(a * x1 + b * x2 + c * x3 + d * x4, + a * y1 + b * y2 + c * y3 + d * y4); +} + +float VBezier::angleAt(float t) const +{ + if (t < 0 || t > 1) { + return 0; + } + return VLine({}, derivative(t)).angle(); +} + +V_END_NAMESPACE diff --git a/vendor/github.com/Benau/go_rlottie/vector_vbezier.h b/vendor/github.com/Benau/go_rlottie/vector_vbezier.h new file mode 100644 index 00000000..1210d27a --- /dev/null +++ b/vendor/github.com/Benau/go_rlottie/vector_vbezier.h @@ -0,0 +1,139 @@ +/* + * Copyright (c) 2020 Samsung Electronics Co., Ltd. All rights reserved. + + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef VBEZIER_H +#define VBEZIER_H + +#include "vector_vpoint.h" + +V_BEGIN_NAMESPACE + +class VBezier { +public: + VBezier() = default; + VPointF pointAt(float t) const; + float angleAt(float t) const; + VBezier onInterval(float t0, float t1) const; + float length() const; + static void coefficients(float t, float &a, float &b, float &c, float &d); + static VBezier fromPoints(const VPointF &start, const VPointF &cp1, + const VPointF &cp2, const VPointF &end); + inline void parameterSplitLeft(float t, VBezier *left); + inline void split(VBezier *firstHalf, VBezier *secondHalf) const; + float tAtLength(float len) const { return tAtLength(len , length());} + float tAtLength(float len, float totalLength) const; + void splitAtLength(float len, VBezier *left, VBezier *right); + VPointF pt1() const { return {x1, y1}; } + VPointF pt2() const { return {x2, y2}; } + VPointF pt3() const { return {x3, y3}; } + VPointF pt4() const { return {x4, y4}; } + +private: + VPointF derivative(float t) const; + float x1, y1, x2, y2, x3, y3, x4, y4; +}; + +inline void VBezier::coefficients(float t, float &a, float &b, float &c, + float &d) +{ + float m_t = 1.0f - t; + b = m_t * m_t; + c = t * t; + d = c * t; + a = b * m_t; + b *= 3.0f * t; + c *= 3.0f * m_t; +} + +inline VPointF VBezier::pointAt(float t) const +{ + // numerically more stable: + float x, y; + + float m_t = 1.0f - t; + { + float a = x1 * m_t + x2 * t; + float b = x2 * m_t + x3 * t; + float c = x3 * m_t + x4 * t; + a = a * m_t + b * t; + b = b * m_t + c * t; + x = a * m_t + b * t; + } + { + float a = y1 * m_t + y2 * t; + float b = y2 * m_t + y3 * t; + float c = y3 * m_t + y4 * t; + a = a * m_t + b * t; + b = b * m_t + c * t; + y = a * m_t + b * t; + } + return {x, y}; +} + +inline void VBezier::parameterSplitLeft(float t, VBezier *left) +{ + left->x1 = x1; + left->y1 = y1; + + left->x2 = x1 + t * (x2 - x1); + left->y2 = y1 + t * (y2 - y1); + + left->x3 = x2 + t * (x3 - x2); // temporary holding spot + left->y3 = y2 + t * (y3 - y2); // temporary holding spot + + x3 = x3 + t * (x4 - x3); + y3 = y3 + t * (y4 - y3); + + x2 = left->x3 + t * (x3 - left->x3); + y2 = left->y3 + t * (y3 - left->y3); + + left->x3 = left->x2 + t * (left->x3 - left->x2); + left->y3 = left->y2 + t * (left->y3 - left->y2); + + left->x4 = x1 = left->x3 + t * (x2 - left->x3); + left->y4 = y1 = left->y3 + t * (y2 - left->y3); +} + +inline void VBezier::split(VBezier *firstHalf, VBezier *secondHalf) const +{ + float c = (x2 + x3) * 0.5f; + firstHalf->x2 = (x1 + x2) * 0.5f; + secondHalf->x3 = (x3 + x4) * 0.5f; + firstHalf->x1 = x1; + secondHalf->x4 = x4; + firstHalf->x3 = (firstHalf->x2 + c) * 0.5f; + secondHalf->x2 = (secondHalf->x3 + c) * 0.5f; + firstHalf->x4 = secondHalf->x1 = (firstHalf->x3 + secondHalf->x2) * 0.5f; + + c = (y2 + y3) / 2; + firstHalf->y2 = (y1 + y2) * 0.5f; + secondHalf->y3 = (y3 + y4) * 0.5f; + firstHalf->y1 = y1; + secondHalf->y4 = y4; + firstHalf->y3 = (firstHalf->y2 + c) * 0.5f; + secondHalf->y2 = (secondHalf->y3 + c) * 0.5f; + firstHalf->y4 = secondHalf->y1 = (firstHalf->y3 + secondHalf->y2) * 0.5f; +} + +V_END_NAMESPACE + +#endif // VBEZIER_H diff --git a/vendor/github.com/Benau/go_rlottie/vector_vbitmap.cpp b/vendor/github.com/Benau/go_rlottie/vector_vbitmap.cpp new file mode 100644 index 00000000..cadae578 --- /dev/null +++ b/vendor/github.com/Benau/go_rlottie/vector_vbitmap.cpp @@ -0,0 +1,219 @@ +/* + * Copyright (c) 2020 Samsung Electronics Co., Ltd. All rights reserved. + + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include "vector_vbitmap.h" +#include +#include +#include "vector_vdrawhelper.h" +#include "vector_vglobal.h" + +V_BEGIN_NAMESPACE + +void VBitmap::Impl::reset(size_t width, size_t height, VBitmap::Format format) +{ + mRoData = nullptr; + mWidth = uint(width); + mHeight = uint(height); + mFormat = format; + + mDepth = depth(format); + mStride = ((mWidth * mDepth + 31) >> 5) + << 2; // bytes per scanline (must be multiple of 4) + mOwnData = std::make_unique(mStride * mHeight); +} + +void VBitmap::Impl::reset(uchar *data, size_t width, size_t height, size_t bytesPerLine, + VBitmap::Format format) +{ + mRoData = data; + mWidth = uint(width); + mHeight = uint(height); + mStride = uint(bytesPerLine); + mFormat = format; + mDepth = depth(format); + mOwnData = nullptr; +} + +uchar VBitmap::Impl::depth(VBitmap::Format format) +{ + uchar depth = 1; + switch (format) { + case VBitmap::Format::Alpha8: + depth = 8; + break; + case VBitmap::Format::ARGB32: + case VBitmap::Format::ARGB32_Premultiplied: + depth = 32; + break; + default: + break; + } + return depth; +} + +void VBitmap::Impl::fill(uint /*pixel*/) +{ + //@TODO +} + +void VBitmap::Impl::updateLuma() +{ + if (mFormat != VBitmap::Format::ARGB32_Premultiplied) return; + auto dataPtr = data(); + for (uint col = 0; col < mHeight; col++) { + uint *pixel = (uint *)(dataPtr + mStride * col); + for (uint row = 0; row < mWidth; row++) { + int alpha = vAlpha(*pixel); + if (alpha == 0) { + pixel++; + continue; + } + + int red = vRed(*pixel); + int green = vGreen(*pixel); + int blue = vBlue(*pixel); + + if (alpha != 255) { + // un multiply + red = (red * 255) / alpha; + green = (green * 255) / alpha; + blue = (blue * 255) / alpha; + } + int luminosity = int(0.299f * red + 0.587f * green + 0.114f * blue); + *pixel = luminosity << 24; + pixel++; + } + } +} + +VBitmap::VBitmap(size_t width, size_t height, VBitmap::Format format) +{ + if (width <= 0 || height <= 0 || format == Format::Invalid) return; + + mImpl = rc_ptr(width, height, format); +} + +VBitmap::VBitmap(uchar *data, size_t width, size_t height, size_t bytesPerLine, + VBitmap::Format format) +{ + if (!data || width <= 0 || height <= 0 || bytesPerLine <= 0 || + format == Format::Invalid) + return; + + mImpl = rc_ptr(data, width, height, bytesPerLine, format); +} + +void VBitmap::reset(uchar *data, size_t w, size_t h, size_t bytesPerLine, + VBitmap::Format format) +{ + if (mImpl) { + mImpl->reset(data, w, h, bytesPerLine, format); + } else { + mImpl = rc_ptr(data, w, h, bytesPerLine, format); + } +} + +void VBitmap::reset(size_t w, size_t h, VBitmap::Format format) +{ + if (mImpl) { + if (w == mImpl->width() && h == mImpl->height() && + format == mImpl->format()) { + return; + } + mImpl->reset(w, h, format); + } else { + mImpl = rc_ptr(w, h, format); + } +} + +size_t VBitmap::stride() const +{ + return mImpl ? mImpl->stride() : 0; +} + +size_t VBitmap::width() const +{ + return mImpl ? mImpl->width() : 0; +} + +size_t VBitmap::height() const +{ + return mImpl ? mImpl->height() : 0; +} + +size_t VBitmap::depth() const +{ + return mImpl ? mImpl->mDepth : 0; +} + +uchar *VBitmap::data() +{ + return mImpl ? mImpl->data() : nullptr; +} + +uchar *VBitmap::data() const +{ + return mImpl ? mImpl->data() : nullptr; +} + +VRect VBitmap::rect() const +{ + return mImpl ? mImpl->rect() : VRect(); +} + +VSize VBitmap::size() const +{ + return mImpl ? mImpl->size() : VSize(); +} + +bool VBitmap::valid() const +{ + return mImpl; +} + +VBitmap::Format VBitmap::format() const +{ + return mImpl ? mImpl->format() : VBitmap::Format::Invalid; +} + +void VBitmap::fill(uint pixel) +{ + if (mImpl) mImpl->fill(pixel); +} + +/* + * This is special function which converts + * RGB value to Luminosity and stores it in + * the Alpha component of the pixel. + * After this conversion the bitmap data is no more + * in RGB space. but the Alpha component contains the + * Luminosity value of the pixel in HSL color space. + * NOTE: this api has its own special usecase + * make sure you know what you are doing before using + * this api. + */ +void VBitmap::updateLuma() +{ + if (mImpl) mImpl->updateLuma(); +} + +V_END_NAMESPACE diff --git a/vendor/github.com/Benau/go_rlottie/vector_vbitmap.h b/vendor/github.com/Benau/go_rlottie/vector_vbitmap.h new file mode 100644 index 00000000..0236e34d --- /dev/null +++ b/vendor/github.com/Benau/go_rlottie/vector_vbitmap.h @@ -0,0 +1,94 @@ +/* + * Copyright (c) 2020 Samsung Electronics Co., Ltd. All rights reserved. + + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef VBITMAP_H +#define VBITMAP_H + +#include "vector_vrect.h" +#include "vector_vsharedptr.h" + +V_BEGIN_NAMESPACE + +class VBitmap { +public: + enum class Format: uchar { + Invalid, + Alpha8, + ARGB32, + ARGB32_Premultiplied + }; + + VBitmap() = default; + VBitmap(size_t w, size_t h, VBitmap::Format format); + VBitmap(uchar *data, size_t w, size_t h, size_t bytesPerLine, VBitmap::Format format); + void reset(uchar *data, size_t w, size_t h, size_t stride, VBitmap::Format format); + void reset(size_t w, size_t h, VBitmap::Format format=Format::ARGB32_Premultiplied); + size_t stride() const; + size_t width() const; + size_t height() const; + size_t depth() const; + VBitmap::Format format() const; + bool valid() const; + uchar * data(); + uchar * data() const; + VRect rect() const; + VSize size() const; + void fill(uint pixel); + void updateLuma(); +private: + struct Impl { + std::unique_ptr mOwnData{nullptr}; + uchar * mRoData{nullptr}; + uint mWidth{0}; + uint mHeight{0}; + uint mStride{0}; + uchar mDepth{0}; + VBitmap::Format mFormat{VBitmap::Format::Invalid}; + + explicit Impl(size_t width, size_t height, VBitmap::Format format) + { + reset(width, height, format); + } + explicit Impl(uchar *data, size_t w, size_t h, size_t bytesPerLine, VBitmap::Format format) + { + reset(data, w, h, bytesPerLine, format); + } + VRect rect() const { return VRect(0, 0, mWidth, mHeight);} + VSize size() const { return VSize(mWidth, mHeight); } + size_t stride() const { return mStride; } + size_t width() const { return mWidth; } + size_t height() const { return mHeight; } + uchar * data() { return mRoData ? mRoData : mOwnData.get(); } + VBitmap::Format format() const { return mFormat; } + void reset(uchar *, size_t, size_t, size_t, VBitmap::Format); + void reset(size_t, size_t, VBitmap::Format); + static uchar depth(VBitmap::Format format); + void fill(uint); + void updateLuma(); + }; + + rc_ptr mImpl; +}; + +V_END_NAMESPACE + +#endif // VBITMAP_H diff --git a/vendor/github.com/Benau/go_rlottie/vector_vbrush.cpp b/vendor/github.com/Benau/go_rlottie/vector_vbrush.cpp new file mode 100644 index 00000000..6f1e5394 --- /dev/null +++ b/vendor/github.com/Benau/go_rlottie/vector_vbrush.cpp @@ -0,0 +1,69 @@ +/* + * Copyright (c) 2020 Samsung Electronics Co., Ltd. All rights reserved. + + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include "vector_vbrush.h" + +V_BEGIN_NAMESPACE + +VGradient::VGradient(VGradient::Type type) + : mType(type) +{ + if (mType == Type::Linear) + linear.x1 = linear.y1 = linear.x2 = linear.y2 = 0.0f; + else + radial.cx = radial.cy = radial.fx = + radial.fy = radial.cradius = radial.fradius = 0.0f; +} + +void VGradient::setStops(const VGradientStops &stops) +{ + mStops = stops; +} + +VBrush::VBrush(const VColor &color) : mType(VBrush::Type::Solid), mColor(color) +{ +} + +VBrush::VBrush(uchar r, uchar g, uchar b, uchar a) + : mType(VBrush::Type::Solid), mColor(r, g, b, a) + +{ +} + +VBrush::VBrush(const VGradient *gradient) +{ + if (!gradient) return; + + mGradient = gradient; + + if (gradient->mType == VGradient::Type::Linear) { + mType = VBrush::Type::LinearGradient; + } else if (gradient->mType == VGradient::Type::Radial) { + mType = VBrush::Type::RadialGradient; + } +} + +VBrush::VBrush(const VTexture *texture):mType(VBrush::Type::Texture), mTexture(texture) +{ +} + +V_END_NAMESPACE diff --git a/vendor/github.com/Benau/go_rlottie/vector_vbrush.h b/vendor/github.com/Benau/go_rlottie/vector_vbrush.h new file mode 100644 index 00000000..7cede06a --- /dev/null +++ b/vendor/github.com/Benau/go_rlottie/vector_vbrush.h @@ -0,0 +1,92 @@ +/* + * Copyright (c) 2020 Samsung Electronics Co., Ltd. All rights reserved. + + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef VBRUSH_H +#define VBRUSH_H + +#include +#include "vector_vglobal.h" +#include "vector_vmatrix.h" +#include "vector_vpoint.h" +#include "vector_vbitmap.h" + +V_BEGIN_NAMESPACE + +using VGradientStop = std::pair; +using VGradientStops = std::vector; +class VGradient { +public: + enum class Mode { Absolute, Relative }; + enum class Spread { Pad, Repeat, Reflect }; + enum class Type { Linear, Radial }; + explicit VGradient(VGradient::Type type); + void setStops(const VGradientStops &stops); + void setAlpha(float alpha) {mAlpha = alpha;} + float alpha() const {return mAlpha;} + +public: + static constexpr int colorTableSize = 1024; + VGradient::Type mType{Type::Linear}; + VGradient::Spread mSpread{Spread::Pad}; + VGradient::Mode mMode{Mode::Absolute}; + VGradientStops mStops; + float mAlpha{1.0}; + struct Linear{ + float x1{0}, y1{0}, x2{0}, y2{0}; + }; + struct Radial{ + float cx{0}, cy{0}, fx{0}, fy{0}, cradius{0}, fradius{0}; + }; + union { + Linear linear; + Radial radial; + }; + VMatrix mMatrix; +}; + +struct VTexture { + VBitmap mBitmap; + VMatrix mMatrix; + int mAlpha{255}; +}; + +class VBrush { +public: + enum class Type { NoBrush, Solid, LinearGradient, RadialGradient, Texture }; + VBrush():mType(Type::NoBrush),mColor(){}; + explicit VBrush(const VColor &color); + explicit VBrush(const VGradient *gradient); + explicit VBrush(uchar r, uchar g, uchar b, uchar a); + explicit VBrush(const VTexture *texture); + inline VBrush::Type type() const { return mType; } +public: + VBrush::Type mType{Type::NoBrush}; + union { + VColor mColor{}; + const VGradient *mGradient; + const VTexture *mTexture; + }; +}; + +V_END_NAMESPACE + +#endif // VBRUSH_H diff --git a/vendor/github.com/Benau/go_rlottie/vector_vcowptr.h b/vendor/github.com/Benau/go_rlottie/vector_vcowptr.h new file mode 100644 index 00000000..af2c7f24 --- /dev/null +++ b/vendor/github.com/Benau/go_rlottie/vector_vcowptr.h @@ -0,0 +1,126 @@ +/* + * Copyright (c) 2020 Samsung Electronics Co., Ltd. All rights reserved. + + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef VCOWPTR_H +#define VCOWPTR_H + +#include +#include + +template +class vcow_ptr { + struct model { + std::atomic mRef{1}; + + model() = default; + + template + explicit model(Args&&... args) : mValue(std::forward(args)...){} + explicit model(const T& other) : mValue(other){} + + T mValue; + }; + model* mModel; + +public: + using element_type = T; + + vcow_ptr() + { + static model default_s; + mModel = &default_s; + ++mModel->mRef; + } + + ~vcow_ptr() + { + if (mModel && (--mModel->mRef == 0)) delete mModel; + } + + template + explicit vcow_ptr(Args&&... args) : mModel(new model(std::forward(args)...)) + { + } + + vcow_ptr(const vcow_ptr& x) noexcept : mModel(x.mModel) + { + assert(mModel); + ++mModel->mRef; + } + vcow_ptr(vcow_ptr&& x) noexcept : mModel(x.mModel) + { + assert(mModel); + x.mModel = nullptr; + } + + auto operator=(const vcow_ptr& x) noexcept -> vcow_ptr& + { + *this = vcow_ptr(x); + return *this; + } + + auto operator=(vcow_ptr&& x) noexcept -> vcow_ptr& + { + auto tmp = std::move(x); + swap(*this, tmp); + return *this; + } + + auto operator*() const noexcept -> const element_type& { return read(); } + + auto operator-> () const noexcept -> const element_type* { return &read(); } + + std::size_t refCount() const noexcept + { + assert(mModel); + + return mModel->mRef; + } + + bool unique() const noexcept + { + assert(mModel); + + return mModel->mRef == 1; + } + + auto write() -> element_type& + { + if (!unique()) *this = vcow_ptr(read()); + + return mModel->mValue; + } + + auto read() const noexcept -> const element_type& + { + assert(mModel); + + return mModel->mValue; + } + + friend inline void swap(vcow_ptr& x, vcow_ptr& y) noexcept + { + std::swap(x.mModel, y.mModel); + } +}; + +#endif // VCOWPTR_H diff --git a/vendor/github.com/Benau/go_rlottie/vector_vdasher.cpp b/vendor/github.com/Benau/go_rlottie/vector_vdasher.cpp new file mode 100644 index 00000000..b2bc9d1d --- /dev/null +++ b/vendor/github.com/Benau/go_rlottie/vector_vdasher.cpp @@ -0,0 +1,254 @@ +/* + * Copyright (c) 2020 Samsung Electronics Co., Ltd. All rights reserved. + + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#include "vector_vbezier.h" + +#include + +#include "vector_vdasher.h" +#include "vector_vline.h" + +V_BEGIN_NAMESPACE + +static constexpr float tolerance = 0.1f; +VDasher::VDasher(const float *dashArray, size_t size) +{ + mDashArray = reinterpret_cast(dashArray); + mArraySize = size / 2; + if (size % 2) mDashOffset = dashArray[size - 1]; + mIndex = 0; + mCurrentLength = 0; + mDiscard = false; + //if the dash array contains ZERO length + // segments or ZERO lengths gaps we could + // optimize those usecase. + for (size_t i = 0; i < mArraySize; i++) { + if (!vCompare(mDashArray[i].length, 0.0f)) + mNoLength = false; + if (!vCompare(mDashArray[i].gap, 0.0f)) + mNoGap = false; + } +} + +void VDasher::moveTo(const VPointF &p) +{ + mDiscard = false; + mStartNewSegment = true; + mCurPt = p; + mIndex = 0; + + if (!vCompare(mDashOffset, 0.0f)) { + float totalLength = 0.0; + for (size_t i = 0; i < mArraySize; i++) { + totalLength = mDashArray[i].length + mDashArray[i].gap; + } + float normalizeLen = std::fmod(mDashOffset, totalLength); + if (normalizeLen < 0.0f) { + normalizeLen = totalLength + normalizeLen; + } + // now the length is less than total length and +ve + // findout the current dash index , dashlength and gap. + for (size_t i = 0; i < mArraySize; i++) { + if (normalizeLen < mDashArray[i].length) { + mIndex = i; + mCurrentLength = mDashArray[i].length - normalizeLen; + mDiscard = false; + break; + } + normalizeLen -= mDashArray[i].length; + if (normalizeLen < mDashArray[i].gap) { + mIndex = i; + mCurrentLength = mDashArray[i].gap - normalizeLen; + mDiscard = true; + break; + } + normalizeLen -= mDashArray[i].gap; + } + } else { + mCurrentLength = mDashArray[mIndex].length; + } + if (vIsZero(mCurrentLength)) updateActiveSegment(); +} + +void VDasher::addLine(const VPointF &p) +{ + if (mDiscard) return; + + if (mStartNewSegment) { + mResult->moveTo(mCurPt); + mStartNewSegment = false; + } + mResult->lineTo(p); +} + +void VDasher::updateActiveSegment() +{ + mStartNewSegment = true; + + if (mDiscard) { + mDiscard = false; + mIndex = (mIndex + 1) % mArraySize; + mCurrentLength = mDashArray[mIndex].length; + } else { + mDiscard = true; + mCurrentLength = mDashArray[mIndex].gap; + } + if (vIsZero(mCurrentLength)) updateActiveSegment(); +} + +void VDasher::lineTo(const VPointF &p) +{ + VLine left, right; + VLine line(mCurPt, p); + float length = line.length(); + + if (length <= mCurrentLength) { + mCurrentLength -= length; + addLine(p); + } else { + while (length > mCurrentLength) { + length -= mCurrentLength; + line.splitAtLength(mCurrentLength, left, right); + + addLine(left.p2()); + updateActiveSegment(); + + line = right; + mCurPt = line.p1(); + } + // handle remainder + if (length > tolerance) { + mCurrentLength -= length; + addLine(line.p2()); + } + } + + if (mCurrentLength < tolerance) updateActiveSegment(); + + mCurPt = p; +} + +void VDasher::addCubic(const VPointF &cp1, const VPointF &cp2, const VPointF &e) +{ + if (mDiscard) return; + + if (mStartNewSegment) { + mResult->moveTo(mCurPt); + mStartNewSegment = false; + } + mResult->cubicTo(cp1, cp2, e); +} + +void VDasher::cubicTo(const VPointF &cp1, const VPointF &cp2, const VPointF &e) +{ + VBezier left, right; + VBezier b = VBezier::fromPoints(mCurPt, cp1, cp2, e); + float bezLen = b.length(); + + if (bezLen <= mCurrentLength) { + mCurrentLength -= bezLen; + addCubic(cp1, cp2, e); + } else { + while (bezLen > mCurrentLength) { + bezLen -= mCurrentLength; + b.splitAtLength(mCurrentLength, &left, &right); + + addCubic(left.pt2(), left.pt3(), left.pt4()); + updateActiveSegment(); + + b = right; + mCurPt = b.pt1(); + } + // handle remainder + if (bezLen > tolerance) { + mCurrentLength -= bezLen; + addCubic(b.pt2(), b.pt3(), b.pt4()); + } + } + + if (mCurrentLength < tolerance) updateActiveSegment(); + + mCurPt = e; +} + +void VDasher::dashHelper(const VPath &path, VPath &result) +{ + mResult = &result; + mResult->reserve(path.points().size(), path.elements().size()); + mIndex = 0; + const std::vector &elms = path.elements(); + const std::vector & pts = path.points(); + const VPointF * ptPtr = pts.data(); + + for (auto &i : elms) { + switch (i) { + case VPath::Element::MoveTo: { + moveTo(*ptPtr++); + break; + } + case VPath::Element::LineTo: { + lineTo(*ptPtr++); + break; + } + case VPath::Element::CubicTo: { + cubicTo(*ptPtr, *(ptPtr + 1), *(ptPtr + 2)); + ptPtr += 3; + break; + } + case VPath::Element::Close: { + // The point is already joined to start point in VPath + // no need to do anything here. + break; + } + } + } + mResult = nullptr; +} + +void VDasher::dashed(const VPath &path, VPath &result) +{ + if (mNoLength && mNoGap) return result.reset(); + + if (path.empty() || mNoLength) return result.reset(); + + if (mNoGap) return result.clone(path); + + result.reset(); + + dashHelper(path, result); +} + +VPath VDasher::dashed(const VPath &path) +{ + if (mNoLength && mNoGap) return path; + + if (path.empty() || mNoLength) return VPath(); + + if (mNoGap) return path; + + VPath result; + + dashHelper(path, result); + + return result; +} + +V_END_NAMESPACE diff --git a/vendor/github.com/Benau/go_rlottie/vector_vdasher.h b/vendor/github.com/Benau/go_rlottie/vector_vdasher.h new file mode 100644 index 00000000..aa4bcb5e --- /dev/null +++ b/vendor/github.com/Benau/go_rlottie/vector_vdasher.h @@ -0,0 +1,65 @@ +/* + * Copyright (c) 2020 Samsung Electronics Co., Ltd. All rights reserved. + + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef VDASHER_H +#define VDASHER_H +#include "vector_vpath.h" + +V_BEGIN_NAMESPACE + +class VDasher { +public: + VDasher(const float *dashArray, size_t size); + VPath dashed(const VPath &path); + void dashed(const VPath &path, VPath &result); + +private: + void moveTo(const VPointF &p); + void lineTo(const VPointF &p); + void cubicTo(const VPointF &cp1, const VPointF &cp2, const VPointF &e); + void close(); + void addLine(const VPointF &p); + void addCubic(const VPointF &cp1, const VPointF &cp2, const VPointF &e); + void updateActiveSegment(); + +private: + void dashHelper(const VPath &path, VPath &result); + struct Dash { + float length; + float gap; + }; + const VDasher::Dash *mDashArray; + size_t mArraySize{0}; + VPointF mCurPt; + size_t mIndex{0}; /* index to the dash Array */ + float mCurrentLength; + float mDashOffset{0}; + VPath *mResult{nullptr}; + bool mDiscard{false}; + bool mStartNewSegment{true}; + bool mNoLength{true}; + bool mNoGap{true}; +}; + +V_END_NAMESPACE + +#endif // VDASHER_H diff --git a/vendor/github.com/Benau/go_rlottie/vector_vdebug.cpp b/vendor/github.com/Benau/go_rlottie/vector_vdebug.cpp new file mode 100644 index 00000000..dc11f233 --- /dev/null +++ b/vendor/github.com/Benau/go_rlottie/vector_vdebug.cpp @@ -0,0 +1,758 @@ +/* + * Copyright (c) 2020 Samsung Electronics Co., Ltd. All rights reserved. + + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include "vector_vdebug.h" + +#ifdef LOTTIE_LOGGING_SUPPORT + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace { + +/* Returns microseconds since epoch */ +uint64_t timestamp_now() +{ + return std::chrono::duration_cast( + std::chrono::high_resolution_clock::now().time_since_epoch()) + .count(); +} + +/* I want [2016-10-13 00:01:23.528514] */ +void format_timestamp(std::ostream& os, uint64_t timestamp) +{ + // The next 3 lines do not work on MSVC! + // auto duration = std::chrono::microseconds(timestamp); + // std::chrono::high_resolution_clock::time_point time_point(duration); + // std::time_t time_t = + // std::chrono::high_resolution_clock::to_time_t(time_point); + std::time_t time_t = timestamp / 1000000; + auto gmtime = std::gmtime(&time_t); + char buffer[32]; + strftime(buffer, 32, "%Y-%m-%d %T.", gmtime); + char microseconds[7]; + snprintf(microseconds, 7, "%06llu", + (long long unsigned int)timestamp % 1000000); + os << '[' << buffer << microseconds << ']'; +} + +std::thread::id this_thread_id() +{ + static thread_local const std::thread::id id = std::this_thread::get_id(); + return id; +} + +template +struct TupleIndex; + +template +struct TupleIndex > { + static constexpr const std::size_t value = 0; +}; + +template +struct TupleIndex > { + static constexpr const std::size_t value = + 1 + TupleIndex >::value; +}; + +} // anonymous namespace + +typedef std::tuple + SupportedTypes; + +char const* to_string(LogLevel loglevel) +{ + switch (loglevel) { + case LogLevel::OFF: + return "OFF"; + case LogLevel::INFO: + return "INFO"; + case LogLevel::WARN: + return "WARN"; + case LogLevel::CRIT: + return "CRIT"; + } + return "XXXX"; +} + +template +void VDebug::encode(Arg arg) +{ + *reinterpret_cast(buffer()) = arg; + m_bytes_used += sizeof(Arg); +} + +template +void VDebug::encode(Arg arg, uint8_t type_id) +{ + resize_buffer_if_needed(sizeof(Arg) + sizeof(uint8_t)); + encode(type_id); + encode(arg); +} + +VDebug::VDebug(LogLevel level, char const* file, char const* function, + uint32_t line) + : m_bytes_used(0), m_buffer_size(sizeof(m_stack_buffer)) +{ + encode(timestamp_now()); + encode(this_thread_id()); + encode(string_literal_t(file)); + encode(string_literal_t(function)); + encode(line); + encode(level); + if (level == LogLevel::INFO) { + m_logAll = false; + } else { + m_logAll = true; + } +} + +VDebug::~VDebug() = default; + +void VDebug::stringify(std::ostream& os) +{ + char* b = !m_heap_buffer ? m_stack_buffer : m_heap_buffer.get(); + char const* const end = b + m_bytes_used; + uint64_t timestamp = *reinterpret_cast(b); + b += sizeof(uint64_t); + std::thread::id threadid = *reinterpret_cast(b); + b += sizeof(std::thread::id); + string_literal_t file = *reinterpret_cast(b); + b += sizeof(string_literal_t); + string_literal_t function = *reinterpret_cast(b); + b += sizeof(string_literal_t); + uint32_t line = *reinterpret_cast(b); + b += sizeof(uint32_t); + LogLevel loglevel = *reinterpret_cast(b); + b += sizeof(LogLevel); + if (m_logAll) { + format_timestamp(os, timestamp); + + os << '[' << to_string(loglevel) << ']' << '[' << threadid << ']' << '[' + << file.m_s << ':' << function.m_s << ':' << line << "] "; + } + + stringify(os, b, end); + os << std::endl; + + if (loglevel >= LogLevel::CRIT) os.flush(); +} + +template +char* decode(std::ostream& os, char* b, Arg* /*dummy*/) +{ + Arg arg = *reinterpret_cast(b); + os << arg; + return b + sizeof(Arg); +} + +template <> +char* decode(std::ostream& os, char* b, VDebug::string_literal_t* /*dummy*/) +{ + VDebug::string_literal_t s = + *reinterpret_cast(b); + os << s.m_s; + return b + sizeof(VDebug::string_literal_t); +} + +template <> +char* decode(std::ostream& os, char* b, char** /*dummy*/) +{ + while (*b != '\0') { + os << *b; + ++b; + } + return ++b; +} + +void VDebug::stringify(std::ostream& os, char* start, char const* const end) +{ + if (start == end) return; + + int type_id = static_cast(*start); + start++; + + switch (type_id) { + case 0: + stringify( + os, + decode(os, start, + static_cast::type*>( + nullptr)), + end); + return; + case 1: + stringify( + os, + decode(os, start, + static_cast::type*>( + nullptr)), + end); + return; + case 2: + stringify( + os, + decode(os, start, + static_cast::type*>( + nullptr)), + end); + return; + case 3: + stringify( + os, + decode(os, start, + static_cast::type*>( + nullptr)), + end); + return; + case 4: + stringify( + os, + decode(os, start, + static_cast::type*>( + nullptr)), + end); + return; + case 5: + stringify( + os, + decode(os, start, + static_cast::type*>( + nullptr)), + end); + return; + case 6: + stringify( + os, + decode(os, start, + static_cast::type*>( + nullptr)), + end); + return; + case 7: + stringify( + os, + decode(os, start, + static_cast::type*>( + nullptr)), + end); + return; + } +} + +char* VDebug::buffer() +{ + return !m_heap_buffer ? &m_stack_buffer[m_bytes_used] + : &(m_heap_buffer.get())[m_bytes_used]; +} + +void VDebug::resize_buffer_if_needed(size_t additional_bytes) +{ + size_t const required_size = m_bytes_used + additional_bytes; + + if (required_size <= m_buffer_size) return; + + if (!m_heap_buffer) { + m_buffer_size = std::max(static_cast(512), required_size); + m_heap_buffer = std::make_unique(m_buffer_size); + memcpy(m_heap_buffer.get(), m_stack_buffer, m_bytes_used); + return; + } else { + m_buffer_size = + std::max(static_cast(2 * m_buffer_size), required_size); + std::unique_ptr new_heap_buffer(new char[m_buffer_size]); + memcpy(new_heap_buffer.get(), m_heap_buffer.get(), m_bytes_used); + m_heap_buffer.swap(new_heap_buffer); + } +} + +void VDebug::encode(char const* arg) +{ + if (arg != nullptr) encode_c_string(arg, strlen(arg)); +} + +void VDebug::encode(char* arg) +{ + if (arg != nullptr) encode_c_string(arg, strlen(arg)); +} + +void VDebug::encode_c_string(char const* arg, size_t length) +{ + if (length == 0) return; + + resize_buffer_if_needed(1 + length + 1); + char* b = buffer(); + auto type_id = TupleIndex::value; + *reinterpret_cast(b++) = static_cast(type_id); + memcpy(b, arg, length + 1); + m_bytes_used += 1 + length + 1; +} + +void VDebug::encode(string_literal_t arg) +{ + encode( + arg, TupleIndex::value); +} + +VDebug& VDebug::operator<<(std::string const& arg) +{ + encode_c_string(arg.c_str(), arg.length()); + return *this; +} + +VDebug& VDebug::operator<<(int32_t arg) +{ + encode(arg, TupleIndex::value); + return *this; +} + +VDebug& VDebug::operator<<(uint32_t arg) +{ + encode(arg, TupleIndex::value); + return *this; +} + +// VDebug& VDebug::operator<<(int64_t arg) +// { +// encode < int64_t >(arg, TupleIndex < int64_t, SupportedTypes >::value); +// return *this; +// } + +// VDebug& VDebug::operator<<(uint64_t arg) +// { +// encode < uint64_t >(arg, TupleIndex < uint64_t, SupportedTypes >::value); +// return *this; +// } +VDebug& VDebug::operator<<(unsigned long arg) +{ + encode(arg, TupleIndex::value); + return *this; +} + +VDebug& VDebug::operator<<(long arg) +{ + encode(arg, TupleIndex::value); + return *this; +} + +VDebug& VDebug::operator<<(double arg) +{ + encode(arg, TupleIndex::value); + return *this; +} + +VDebug& VDebug::operator<<(char arg) +{ + encode(arg, TupleIndex::value); + return *this; +} + +struct BufferBase { + virtual ~BufferBase() = default; + virtual void push(VDebug&& logline) = 0; + virtual bool try_pop(VDebug& logline) = 0; +}; + +struct SpinLock { + SpinLock(std::atomic_flag& flag) : m_flag(flag) + { + while (m_flag.test_and_set(std::memory_order_acquire)) + ; + } + + ~SpinLock() { m_flag.clear(std::memory_order_release); } + +private: + std::atomic_flag& m_flag; +}; + +/* Multi Producer Single Consumer Ring Buffer */ +class RingBuffer : public BufferBase { +public: + struct alignas(64) Item { + Item() + : flag(), written(0), logline(LogLevel::INFO, nullptr, nullptr, 0) + { + } + + std::atomic_flag flag; + char written; + char padding[256 - sizeof(std::atomic_flag) - sizeof(char) - + sizeof(VDebug)]; + VDebug logline; + }; + + RingBuffer(size_t const size) + : m_size(size), + m_ring(static_cast(std::malloc(size * sizeof(Item)))), + m_write_index(0), + m_read_index(0) + { + for (size_t i = 0; i < m_size; ++i) { + new (&m_ring[i]) Item(); + } + static_assert(sizeof(Item) == 256, "Unexpected size != 256"); + } + + ~RingBuffer() override + { + for (size_t i = 0; i < m_size; ++i) { + m_ring[i].~Item(); + } + std::free(m_ring); + } + + void push(VDebug&& logline) override + { + unsigned int write_index = + m_write_index.fetch_add(1, std::memory_order_relaxed) % m_size; + Item& item = m_ring[write_index]; + SpinLock spinlock(item.flag); + item.logline = std::move(logline); + item.written = 1; + } + + bool try_pop(VDebug& logline) override + { + Item& item = m_ring[m_read_index % m_size]; + SpinLock spinlock(item.flag); + if (item.written == 1) { + logline = std::move(item.logline); + item.written = 0; + ++m_read_index; + return true; + } + return false; + } + + RingBuffer(RingBuffer const&) = delete; + RingBuffer& operator=(RingBuffer const&) = delete; + +private: + size_t const m_size; + Item* m_ring; + std::atomic m_write_index; + +public: + char pad[64]; + +private: + unsigned int m_read_index; +}; + +class Buffer { +public: + struct Item { + Item(VDebug&& logline) : logline(std::move(logline)) {} + char padding[256 - sizeof(VDebug)]; + VDebug logline; + }; + + static constexpr const size_t size = + 32768; // 8MB. Helps reduce memory fragmentation + + Buffer() : m_buffer(static_cast(std::malloc(size * sizeof(Item)))) + { + for (size_t i = 0; i <= size; ++i) { + m_write_state[i].store(0, std::memory_order_relaxed); + } + static_assert(sizeof(Item) == 256, "Unexpected size != 256"); + } + + ~Buffer() + { + unsigned int write_count = m_write_state[size].load(); + for (size_t i = 0; i < write_count; ++i) { + m_buffer[i].~Item(); + } + std::free(m_buffer); + } + + // Returns true if we need to switch to next buffer + bool push(VDebug&& logline, unsigned int const write_index) + { + new (&m_buffer[write_index]) Item(std::move(logline)); + m_write_state[write_index].store(1, std::memory_order_release); + return m_write_state[size].fetch_add(1, std::memory_order_acquire) + + 1 == + size; + } + + bool try_pop(VDebug& logline, unsigned int const read_index) + { + if (m_write_state[read_index].load(std::memory_order_acquire)) { + Item& item = m_buffer[read_index]; + logline = std::move(item.logline); + return true; + } + return false; + } + + Buffer(Buffer const&) = delete; + Buffer& operator=(Buffer const&) = delete; + +private: + Item* m_buffer; + std::atomic m_write_state[size + 1]; +}; + +class QueueBuffer : public BufferBase { +public: + QueueBuffer(QueueBuffer const&) = delete; + QueueBuffer& operator=(QueueBuffer const&) = delete; + + QueueBuffer() + : m_current_read_buffer{nullptr}, + m_write_index(0), + m_flag(), + m_read_index(0) + { + setup_next_write_buffer(); + } + + void push(VDebug&& logline) override + { + unsigned int write_index = + m_write_index.fetch_add(1, std::memory_order_relaxed); + if (write_index < Buffer::size) { + if (m_current_write_buffer.load(std::memory_order_acquire) + ->push(std::move(logline), write_index)) { + setup_next_write_buffer(); + } + } else { + while (m_write_index.load(std::memory_order_acquire) >= + Buffer::size) + ; + push(std::move(logline)); + } + } + + bool try_pop(VDebug& logline) override + { + if (m_current_read_buffer == nullptr) + m_current_read_buffer = get_next_read_buffer(); + + Buffer* read_buffer = m_current_read_buffer; + + if (read_buffer == nullptr) return false; + + if (read_buffer->try_pop(logline, m_read_index)) { + m_read_index++; + if (m_read_index == Buffer::size) { + m_read_index = 0; + m_current_read_buffer = nullptr; + SpinLock spinlock(m_flag); + m_buffers.pop(); + } + return true; + } + + return false; + } + +private: + void setup_next_write_buffer() + { + std::unique_ptr next_write_buffer(new Buffer()); + m_current_write_buffer.store(next_write_buffer.get(), + std::memory_order_release); + SpinLock spinlock(m_flag); + m_buffers.push(std::move(next_write_buffer)); + m_write_index.store(0, std::memory_order_relaxed); + } + + Buffer* get_next_read_buffer() + { + SpinLock spinlock(m_flag); + return m_buffers.empty() ? nullptr : m_buffers.front().get(); + } + +private: + std::queue > m_buffers; + std::atomic m_current_write_buffer; + Buffer* m_current_read_buffer; + std::atomic m_write_index; + std::atomic_flag m_flag; + unsigned int m_read_index; +}; + +class FileWriter { +public: + FileWriter(std::string const& log_directory, + std::string const& log_file_name, uint32_t log_file_roll_size_mb) + : m_log_file_roll_size_bytes(log_file_roll_size_mb * 1024 * 1024), + m_name(log_directory + log_file_name) + { + roll_file(); + } + + void write(VDebug& logline) + { + auto pos = m_os->tellp(); + logline.stringify(*m_os); + m_bytes_written += m_os->tellp() - pos; + if (m_bytes_written > m_log_file_roll_size_bytes) { + roll_file(); + } + } + +private: + void roll_file() + { + if (m_os) { + m_os->flush(); + m_os->close(); + } + + m_bytes_written = 0; + m_os = std::make_unique(); + // TODO Optimize this part. Does it even matter ? + std::string log_file_name = m_name; + log_file_name.append("."); + log_file_name.append(std::to_string(++m_file_number)); + log_file_name.append(".txt"); + m_os->open(log_file_name, std::ofstream::out | std::ofstream::trunc); + } + +private: + uint32_t m_file_number = 0; + std::streamoff m_bytes_written = 0; + uint32_t const m_log_file_roll_size_bytes; + std::string const m_name; + std::unique_ptr m_os; +}; + +class NanoLogger { +public: + NanoLogger(NonGuaranteedLogger ngl, std::string const& log_directory, + std::string const& log_file_name, uint32_t log_file_roll_size_mb) + : m_state(State::INIT), + m_buffer_base( + new RingBuffer(std::max(1u, ngl.ring_buffer_size_mb) * 1024 * 4)), + m_file_writer(log_directory, log_file_name, + std::max(1u, log_file_roll_size_mb)), + m_thread(&NanoLogger::pop, this) + { + m_state.store(State::READY, std::memory_order_release); + } + + NanoLogger(GuaranteedLogger /*gl*/, std::string const& log_directory, + std::string const& log_file_name, uint32_t log_file_roll_size_mb) + : m_state(State::INIT), + m_buffer_base(new QueueBuffer()), + m_file_writer(log_directory, log_file_name, + std::max(1u, log_file_roll_size_mb)), + m_thread(&NanoLogger::pop, this) + { + m_state.store(State::READY, std::memory_order_release); + } + + ~NanoLogger() + { + m_state.store(State::SHUTDOWN); + m_thread.join(); + } + + void add(VDebug&& logline) { m_buffer_base->push(std::move(logline)); } + + void pop() + { + // Wait for constructor to complete and pull all stores done there to + // this thread / core. + while (m_state.load(std::memory_order_acquire) == State::INIT) + std::this_thread::sleep_for(std::chrono::microseconds(50)); + + VDebug logline(LogLevel::INFO, nullptr, nullptr, 0); + + while (m_state.load() == State::READY) { + if (m_buffer_base->try_pop(logline)) + m_file_writer.write(logline); + else + std::this_thread::sleep_for(std::chrono::microseconds(50)); + } + + // Pop and log all remaining entries + while (m_buffer_base->try_pop(logline)) { + m_file_writer.write(logline); + } + } + +private: + enum class State { INIT, READY, SHUTDOWN }; + + std::atomic m_state; + std::unique_ptr m_buffer_base; + FileWriter m_file_writer; + std::thread m_thread; +}; + +std::unique_ptr nanologger; +std::atomic atomic_nanologger; + +bool VDebugServer::operator==(VDebug& logline) +{ + atomic_nanologger.load(std::memory_order_acquire)->add(std::move(logline)); + return true; +} + +void initialize(NonGuaranteedLogger ngl, std::string const& log_directory, + std::string const& log_file_name, + uint32_t log_file_roll_size_mb) +{ + nanologger = std::make_unique(ngl, log_directory, log_file_name, + log_file_roll_size_mb); + atomic_nanologger.store(nanologger.get(), std::memory_order_seq_cst); +} + +void initialize(GuaranteedLogger gl, std::string const& log_directory, + std::string const& log_file_name, + uint32_t log_file_roll_size_mb) +{ + nanologger = std::make_unique(gl, log_directory, log_file_name, + log_file_roll_size_mb); + atomic_nanologger.store(nanologger.get(), std::memory_order_seq_cst); +} + +std::atomic loglevel = {0}; + +void set_log_level(LogLevel level) +{ + loglevel.store(static_cast(level), std::memory_order_release); +} + +bool is_logged(LogLevel level) +{ + return static_cast(level) >= + loglevel.load(std::memory_order_relaxed); +} + +#endif // LOTTIE_LOGGING_SUPPORT diff --git a/vendor/github.com/Benau/go_rlottie/vector_vdebug.h b/vendor/github.com/Benau/go_rlottie/vector_vdebug.h new file mode 100644 index 00000000..5b6bef5b --- /dev/null +++ b/vendor/github.com/Benau/go_rlottie/vector_vdebug.h @@ -0,0 +1,187 @@ +/* + * Copyright (c) 2020 Samsung Electronics Co., Ltd. All rights reserved. + + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef VDEBUG_H +#define VDEBUG_H + +#include "config.h" + +#ifdef LOTTIE_LOGGING_SUPPORT + +#include +#include +#include +#include +#include + +enum class LogLevel : uint8_t { INFO, WARN, CRIT, OFF }; + +class VDebug { +public: + VDebug(); + VDebug& debug() { return *this; } + VDebug(LogLevel level, char const* file, char const* function, + uint32_t line); + ~VDebug(); + + VDebug(VDebug&&) = default; + VDebug& operator=(VDebug&&) = default; + + void stringify(std::ostream& os); + + VDebug& operator<<(char arg); + VDebug& operator<<(int32_t arg); + VDebug& operator<<(uint32_t arg); + // VDebug& operator<<(int64_t arg); + // VDebug& operator<<(uint64_t arg); + + VDebug& operator<<(long arg); + VDebug& operator<<(unsigned long arg); + VDebug& operator<<(double arg); + VDebug& operator<<(std::string const& arg); + + template + VDebug& operator<<(const char (&arg)[N]) + { + encode(string_literal_t(arg)); + return *this; + } + + template + typename std::enable_if::value, + VDebug&>::type + operator<<(Arg const& arg) + { + encode(arg); + return *this; + } + + template + typename std::enable_if::value, VDebug&>::type + operator<<(Arg const& arg) + { + encode(arg); + return *this; + } + + struct string_literal_t { + explicit string_literal_t(char const* s) : m_s(s) {} + char const* m_s; + }; + +private: + char* buffer(); + + template + void encode(Arg arg); + + template + void encode(Arg arg, uint8_t type_id); + + void encode(char* arg); + void encode(char const* arg); + void encode(string_literal_t arg); + void encode_c_string(char const* arg, size_t length); + void resize_buffer_if_needed(size_t additional_bytes); + void stringify(std::ostream& os, char* start, char const* const end); + +private: + size_t m_bytes_used{0}; + size_t m_buffer_size{0}; + std::unique_ptr m_heap_buffer; + bool m_logAll; + char m_stack_buffer[256 - sizeof(bool) - 2 * sizeof(size_t) - + sizeof(decltype(m_heap_buffer)) - 8 /* Reserved */]; +}; + +struct VDebugServer { + /* + * Ideally this should have been operator+= + * Could not get that to compile, so here we are... + */ + bool operator==(VDebug&); +}; + +void set_log_level(LogLevel level); + +bool is_logged(LogLevel level); + +/* + * Non guaranteed logging. Uses a ring buffer to hold log lines. + * When the ring gets full, the previous log line in the slot will be dropped. + * Does not block producer even if the ring buffer is full. + * ring_buffer_size_mb - LogLines are pushed into a mpsc ring buffer whose size + * is determined by this parameter. Since each LogLine is 256 bytes, + * ring_buffer_size = ring_buffer_size_mb * 1024 * 1024 / 256 + */ +struct NonGuaranteedLogger { + NonGuaranteedLogger(uint32_t ring_buffer_size_mb_) + : ring_buffer_size_mb(ring_buffer_size_mb_) + { + } + uint32_t ring_buffer_size_mb; +}; + +/* + * Provides a guarantee log lines will not be dropped. + */ +struct GuaranteedLogger { +}; + +/* + * Ensure initialize() is called prior to any log statements. + * log_directory - where to create the logs. For example - "/tmp/" + * log_file_name - root of the file name. For example - "nanolog" + * This will create log files of the form - + * /tmp/nanolog.1.txt + * /tmp/nanolog.2.txt + * etc. + * log_file_roll_size_mb - mega bytes after which we roll to next log file. + */ +void initialize(GuaranteedLogger gl, std::string const& log_directory, + std::string const& log_file_name, + uint32_t log_file_roll_size_mb); +void initialize(NonGuaranteedLogger ngl, std::string const& log_directory, + std::string const& log_file_name, + uint32_t log_file_roll_size_mb); + +#define VDEBUG_LOG(LEVEL) \ + VDebugServer() == VDebug(LEVEL, __FILE__, __func__, __LINE__).debug() +#define vDebug is_logged(LogLevel::INFO) && VDEBUG_LOG(LogLevel::INFO) +#define vWarning is_logged(LogLevel::WARN) && VDEBUG_LOG(LogLevel::WARN) +#define vCritical is_logged(LogLevel::CRIT) && VDEBUG_LOG(LogLevel::CRIT) + +#else + +struct VDebug +{ + template + VDebug& operator<<(const Args &){return *this;} +}; + +#define vDebug VDebug() +#define vWarning VDebug() +#define vCritical VDebug() + +#endif //LOTTIE_LOGGING_SUPPORT + +#endif // VDEBUG_H diff --git a/vendor/github.com/Benau/go_rlottie/vector_vdrawable.cpp b/vendor/github.com/Benau/go_rlottie/vector_vdrawable.cpp new file mode 100644 index 00000000..a7a477f8 --- /dev/null +++ b/vendor/github.com/Benau/go_rlottie/vector_vdrawable.cpp @@ -0,0 +1,130 @@ +/* + * Copyright (c) 2020 Samsung Electronics Co., Ltd. All rights reserved. + + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include "vector_vdrawable.h" +#include "vector_vdasher.h" +#include "vector_vraster.h" + +VDrawable::VDrawable(VDrawable::Type type) +{ + setType(type); +} + +VDrawable::~VDrawable() noexcept +{ + if (mStrokeInfo) { + if (mType == Type::StrokeWithDash) { + delete static_cast(mStrokeInfo); + } else { + delete mStrokeInfo; + } + } +} + +void VDrawable::setType(VDrawable::Type type) +{ + mType = type; + if (mType == VDrawable::Type::Stroke) { + mStrokeInfo = new StrokeInfo(); + } else if (mType == VDrawable::Type::StrokeWithDash) { + mStrokeInfo = new StrokeWithDashInfo(); + } +} + +void VDrawable::applyDashOp() +{ + if (mStrokeInfo && (mType == Type::StrokeWithDash)) { + auto obj = static_cast(mStrokeInfo); + if (!obj->mDash.empty()) { + VDasher dasher(obj->mDash.data(), obj->mDash.size()); + mPath.clone(dasher.dashed(mPath)); + } + } +} + +void VDrawable::preprocess(const VRect &clip) +{ + if (mFlag & (DirtyState::Path)) { + if (mType == Type::Fill) { + mRasterizer.rasterize(std::move(mPath), mFillRule, clip); + } else { + applyDashOp(); + mRasterizer.rasterize(std::move(mPath), mStrokeInfo->cap, mStrokeInfo->join, + mStrokeInfo->width, mStrokeInfo->miterLimit, clip); + } + mPath = {}; + mFlag &= ~DirtyFlag(DirtyState::Path); + } +} + +VRle VDrawable::rle() +{ + return mRasterizer.rle(); +} + +void VDrawable::setStrokeInfo(CapStyle cap, JoinStyle join, float miterLimit, + float strokeWidth) +{ + assert(mStrokeInfo); + if ((mStrokeInfo->cap == cap) && (mStrokeInfo->join == join) && + vCompare(mStrokeInfo->miterLimit, miterLimit) && + vCompare(mStrokeInfo->width, strokeWidth)) + return; + + mStrokeInfo->cap = cap; + mStrokeInfo->join = join; + mStrokeInfo->miterLimit = miterLimit; + mStrokeInfo->width = strokeWidth; + mFlag |= DirtyState::Path; +} + +void VDrawable::setDashInfo(std::vector &dashInfo) +{ + assert(mStrokeInfo); + assert(mType == VDrawable::Type::StrokeWithDash); + + auto obj = static_cast(mStrokeInfo); + bool hasChanged = false; + + if (obj->mDash.size() == dashInfo.size()) { + for (uint i = 0; i < dashInfo.size(); ++i) { + if (!vCompare(obj->mDash[i], dashInfo[i])) { + hasChanged = true; + break; + } + } + } else { + hasChanged = true; + } + + if (!hasChanged) return; + + obj->mDash = dashInfo; + + mFlag |= DirtyState::Path; +} + +void VDrawable::setPath(const VPath &path) +{ + mPath = path; + mFlag |= DirtyState::Path; +} diff --git a/vendor/github.com/Benau/go_rlottie/vector_vdrawable.h b/vendor/github.com/Benau/go_rlottie/vector_vdrawable.h new file mode 100644 index 00000000..77f889da --- /dev/null +++ b/vendor/github.com/Benau/go_rlottie/vector_vdrawable.h @@ -0,0 +1,93 @@ +/* + * Copyright (c) 2020 Samsung Electronics Co., Ltd. All rights reserved. + + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef VDRAWABLE_H +#define VDRAWABLE_H +#include +#include +#include "vector_vbrush.h" +#include "vector_vpath.h" +#include "vector_vrle.h" +#include "vector_vraster.h" + +class VDrawable { +public: + enum class DirtyState : unsigned char { + None = 1<<0, + Path = 1<<1, + Stroke = 1<<2, + Brush = 1<<3, + All = (Path | Stroke | Brush) + }; + + enum class Type : unsigned char{ + Fill, + Stroke, + StrokeWithDash + }; + + explicit VDrawable(VDrawable::Type type = Type::Fill); + void setType(VDrawable::Type type); + ~VDrawable() noexcept; + + typedef vFlag DirtyFlag; + void setPath(const VPath &path); + void setFillRule(FillRule rule) { mFillRule = rule; } + void setBrush(const VBrush &brush) { mBrush = brush; } + void setStrokeInfo(CapStyle cap, JoinStyle join, float miterLimit, + float strokeWidth); + void setDashInfo(std::vector &dashInfo); + void preprocess(const VRect &clip); + void applyDashOp(); + VRle rle(); + void setName(const char *name) + { + mName = name; + } + const char* name() const { return mName; } + +public: + struct StrokeInfo { + float width{0.0}; + float miterLimit{10}; + CapStyle cap{CapStyle::Flat}; + JoinStyle join{JoinStyle::Bevel}; + }; + + struct StrokeWithDashInfo : public StrokeInfo{ + std::vector mDash; + }; + +public: + VPath mPath; + VBrush mBrush; + VRasterizer mRasterizer; + StrokeInfo *mStrokeInfo{nullptr}; + + DirtyFlag mFlag{DirtyState::All}; + FillRule mFillRule{FillRule::Winding}; + VDrawable::Type mType{Type::Fill}; + + const char *mName{nullptr}; +}; + +#endif // VDRAWABLE_H diff --git a/vendor/github.com/Benau/go_rlottie/vector_vdrawhelper.cpp b/vendor/github.com/Benau/go_rlottie/vector_vdrawhelper.cpp new file mode 100644 index 00000000..e2601362 --- /dev/null +++ b/vendor/github.com/Benau/go_rlottie/vector_vdrawhelper.cpp @@ -0,0 +1,767 @@ +#include "config.h" +/* + * Copyright (c) 2020 Samsung Electronics Co., Ltd. All rights reserved. + + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include "vector_vdrawhelper.h" +#include +#include +#include +#include +#include +#include + +static RenderFuncTable RenderTable; + +void VTextureData::setClip(const VRect &clip) +{ + left = clip.left(); + top = clip.top(); + right = std::min(clip.right(), int(width())) - 1; + bottom = std::min(clip.bottom(), int(height())) - 1; +} + +class VGradientCache { +public: + struct CacheInfo : public VColorTable { + inline CacheInfo(VGradientStops s) : stops(std::move(s)) {} + VGradientStops stops; + }; + using VCacheData = std::shared_ptr; + using VCacheKey = int64_t; + using VGradientColorTableHash = + std::unordered_multimap; + + bool generateGradientColorTable(const VGradientStops &stops, float alpha, + uint32_t *colorTable, int size); + VCacheData getBuffer(const VGradient &gradient) + { + VCacheKey hash_val = 0; + VCacheData info; + const VGradientStops &stops = gradient.mStops; + for (uint i = 0; i < stops.size() && i <= 2; i++) + hash_val += + VCacheKey(stops[i].second.premulARGB() * gradient.alpha()); + + { + std::lock_guard guard(mMutex); + + size_t count = mCache.count(hash_val); + if (!count) { + // key is not present in the hash + info = addCacheElement(hash_val, gradient); + } else if (count == 1) { + auto search = mCache.find(hash_val); + if (search->second->stops == stops) { + info = search->second; + } else { + // didn't find an exact match + info = addCacheElement(hash_val, gradient); + } + } else { + // we have a multiple data with same key + auto range = mCache.equal_range(hash_val); + for (auto it = range.first; it != range.second; ++it) { + if (it->second->stops == stops) { + info = it->second; + break; + } + } + if (!info) { + // didn't find an exact match + info = addCacheElement(hash_val, gradient); + } + } + } + return info; + } + + static VGradientCache &instance() + { + static VGradientCache CACHE; + return CACHE; + } + +protected: + uint maxCacheSize() const { return 60; } + VCacheData addCacheElement(VCacheKey hash_val, const VGradient &gradient) + { + if (mCache.size() == maxCacheSize()) { + uint count = maxCacheSize() / 10; + while (count--) { + mCache.erase(mCache.begin()); + } + } + auto cache_entry = std::make_shared(gradient.mStops); + cache_entry->alpha = generateGradientColorTable( + gradient.mStops, gradient.alpha(), cache_entry->buffer32, + VGradient::colorTableSize); + mCache.insert(std::make_pair(hash_val, cache_entry)); + return cache_entry; + } + +private: + VGradientCache() = default; + + VGradientColorTableHash mCache; + std::mutex mMutex; +}; + +bool VGradientCache::generateGradientColorTable(const VGradientStops &stops, + float opacity, + uint32_t *colorTable, int size) +{ + int dist, idist, pos = 0; + size_t i; + bool alpha = false; + size_t stopCount = stops.size(); + const VGradientStop *curr, *next, *start; + uint32_t curColor, nextColor; + float delta, t, incr, fpos; + + if (!vCompare(opacity, 1.0f)) alpha = true; + + start = stops.data(); + curr = start; + if (!curr->second.isOpaque()) alpha = true; + curColor = curr->second.premulARGB(opacity); + incr = 1.0f / (float)size; + fpos = 1.5f * incr; + + colorTable[pos++] = curColor; + + while (fpos <= curr->first) { + colorTable[pos] = colorTable[pos - 1]; + pos++; + fpos += incr; + } + + for (i = 0; i < stopCount - 1; ++i) { + curr = (start + i); + next = (start + i + 1); + delta = 1 / (next->first - curr->first); + if (!next->second.isOpaque()) alpha = true; + nextColor = next->second.premulARGB(opacity); + while (fpos < next->first && pos < size) { + t = (fpos - curr->first) * delta; + dist = (int)(255 * t); + idist = 255 - dist; + colorTable[pos] = + interpolate_pixel(curColor, idist, nextColor, dist); + ++pos; + fpos += incr; + } + curColor = nextColor; + } + + for (; pos < size; ++pos) colorTable[pos] = curColor; + + // Make sure the last color stop is represented at the end of the table + colorTable[size - 1] = curColor; + return alpha; +} + +void VRasterBuffer::clear() +{ + memset(mBuffer, 0, mHeight * mBytesPerLine); +} + +VBitmap::Format VRasterBuffer::prepare(const VBitmap *image) +{ + mBuffer = image->data(); + mWidth = image->width(); + mHeight = image->height(); + mBytesPerPixel = 4; + mBytesPerLine = image->stride(); + + mFormat = image->format(); + return mFormat; +} + +void VSpanData::init(VRasterBuffer *image) +{ + mRasterBuffer = image; + setDrawRegion(VRect(0, 0, int(image->width()), int(image->height()))); + mType = VSpanData::Type::None; + mBlendFunc = nullptr; + mUnclippedBlendFunc = nullptr; +} + +/* + * Gradient Draw routines + * + */ + +#define FIXPT_BITS 8 +#define FIXPT_SIZE (1 << FIXPT_BITS) +static inline void getLinearGradientValues(LinearGradientValues *v, + const VSpanData * data) +{ + const VGradientData *grad = &data->mGradient; + v->dx = grad->linear.x2 - grad->linear.x1; + v->dy = grad->linear.y2 - grad->linear.y1; + v->l = v->dx * v->dx + v->dy * v->dy; + v->off = 0; + if (v->l != 0) { + v->dx /= v->l; + v->dy /= v->l; + v->off = -v->dx * grad->linear.x1 - v->dy * grad->linear.y1; + } +} + +static inline void getRadialGradientValues(RadialGradientValues *v, + const VSpanData * data) +{ + const VGradientData &gradient = data->mGradient; + v->dx = gradient.radial.cx - gradient.radial.fx; + v->dy = gradient.radial.cy - gradient.radial.fy; + + v->dr = gradient.radial.cradius - gradient.radial.fradius; + v->sqrfr = gradient.radial.fradius * gradient.radial.fradius; + + v->a = v->dr * v->dr - v->dx * v->dx - v->dy * v->dy; + v->inv2a = 1 / (2 * v->a); + + v->extended = !vIsZero(gradient.radial.fradius) || v->a <= 0; +} + +static inline int gradientClamp(const VGradientData *grad, int ipos) +{ + int limit; + + if (grad->mSpread == VGradient::Spread::Repeat) { + ipos = ipos % VGradient::colorTableSize; + ipos = ipos < 0 ? VGradient::colorTableSize + ipos : ipos; + } else if (grad->mSpread == VGradient::Spread::Reflect) { + limit = VGradient::colorTableSize * 2; + ipos = ipos % limit; + ipos = ipos < 0 ? limit + ipos : ipos; + ipos = ipos >= VGradient::colorTableSize ? limit - 1 - ipos : ipos; + } else { + if (ipos < 0) + ipos = 0; + else if (ipos >= VGradient::colorTableSize) + ipos = VGradient::colorTableSize - 1; + } + return ipos; +} + +static uint32_t gradientPixelFixed(const VGradientData *grad, int fixed_pos) +{ + int ipos = (fixed_pos + (FIXPT_SIZE / 2)) >> FIXPT_BITS; + + return grad->mColorTable[gradientClamp(grad, ipos)]; +} + +static inline uint32_t gradientPixel(const VGradientData *grad, float pos) +{ + int ipos = (int)(pos * (VGradient::colorTableSize - 1) + (float)(0.5)); + + return grad->mColorTable[gradientClamp(grad, ipos)]; +} + +void fetch_linear_gradient(uint32_t *buffer, const Operator *op, + const VSpanData *data, int y, int x, int length) +{ + float t, inc; + const VGradientData *gradient = &data->mGradient; + + bool affine = true; + float rx = 0, ry = 0; + if (op->linear.l == 0) { + t = inc = 0; + } else { + rx = data->m21 * (y + float(0.5)) + data->m11 * (x + float(0.5)) + + data->dx; + ry = data->m22 * (y + float(0.5)) + data->m12 * (x + float(0.5)) + + data->dy; + t = op->linear.dx * rx + op->linear.dy * ry + op->linear.off; + inc = op->linear.dx * data->m11 + op->linear.dy * data->m12; + affine = !data->m13 && !data->m23; + + if (affine) { + t *= (VGradient::colorTableSize - 1); + inc *= (VGradient::colorTableSize - 1); + } + } + + const uint32_t *end = buffer + length; + if (affine) { + if (inc > float(-1e-5) && inc < float(1e-5)) { + memfill32(buffer, gradientPixelFixed(gradient, int(t * FIXPT_SIZE)), + length); + } else { + if (t + inc * length < float(INT_MAX >> (FIXPT_BITS + 1)) && + t + inc * length > float(INT_MIN >> (FIXPT_BITS + 1))) { + // we can use fixed point math + int t_fixed = int(t * FIXPT_SIZE); + int inc_fixed = int(inc * FIXPT_SIZE); + while (buffer < end) { + *buffer = gradientPixelFixed(gradient, t_fixed); + t_fixed += inc_fixed; + ++buffer; + } + } else { + // we have to fall back to float math + while (buffer < end) { + *buffer = + gradientPixel(gradient, t / VGradient::colorTableSize); + t += inc; + ++buffer; + } + } + } + } else { // fall back to float math here as well + float rw = data->m23 * (y + float(0.5)) + data->m13 * (x + float(0.5)) + + data->m33; + while (buffer < end) { + float xt = rx / rw; + float yt = ry / rw; + t = (op->linear.dx * xt + op->linear.dy * yt) + op->linear.off; + + *buffer = gradientPixel(gradient, t); + rx += data->m11; + ry += data->m12; + rw += data->m13; + if (!rw) { + rw += data->m13; + } + ++buffer; + } + } +} + +static inline float radialDeterminant(float a, float b, float c) +{ + return (b * b) - (4 * a * c); +} + +static void fetch(uint32_t *buffer, uint32_t *end, const Operator *op, + const VSpanData *data, float det, float delta_det, + float delta_delta_det, float b, float delta_b) +{ + if (op->radial.extended) { + while (buffer < end) { + uint32_t result = 0; + if (det >= 0) { + float w = std::sqrt(det) - b; + if (data->mGradient.radial.fradius + op->radial.dr * w >= 0) + result = gradientPixel(&data->mGradient, w); + } + + *buffer = result; + + det += delta_det; + delta_det += delta_delta_det; + b += delta_b; + + ++buffer; + } + } else { + while (buffer < end) { + *buffer++ = gradientPixel(&data->mGradient, std::sqrt(det) - b); + + det += delta_det; + delta_det += delta_delta_det; + b += delta_b; + } + } +} + +void fetch_radial_gradient(uint32_t *buffer, const Operator *op, + const VSpanData *data, int y, int x, int length) +{ + // avoid division by zero + if (vIsZero(op->radial.a)) { + memfill32(buffer, 0, length); + return; + } + + float rx = + data->m21 * (y + float(0.5)) + data->dx + data->m11 * (x + float(0.5)); + float ry = + data->m22 * (y + float(0.5)) + data->dy + data->m12 * (x + float(0.5)); + bool affine = !data->m13 && !data->m23; + + uint32_t *end = buffer + length; + if (affine) { + rx -= data->mGradient.radial.fx; + ry -= data->mGradient.radial.fy; + + float inv_a = 1 / float(2 * op->radial.a); + + const float delta_rx = data->m11; + const float delta_ry = data->m12; + + float b = 2 * (op->radial.dr * data->mGradient.radial.fradius + + rx * op->radial.dx + ry * op->radial.dy); + float delta_b = + 2 * (delta_rx * op->radial.dx + delta_ry * op->radial.dy); + const float b_delta_b = 2 * b * delta_b; + const float delta_b_delta_b = 2 * delta_b * delta_b; + + const float bb = b * b; + const float delta_bb = delta_b * delta_b; + + b *= inv_a; + delta_b *= inv_a; + + const float rxrxryry = rx * rx + ry * ry; + const float delta_rxrxryry = delta_rx * delta_rx + delta_ry * delta_ry; + const float rx_plus_ry = 2 * (rx * delta_rx + ry * delta_ry); + const float delta_rx_plus_ry = 2 * delta_rxrxryry; + + inv_a *= inv_a; + + float det = + (bb - 4 * op->radial.a * (op->radial.sqrfr - rxrxryry)) * inv_a; + float delta_det = (b_delta_b + delta_bb + + 4 * op->radial.a * (rx_plus_ry + delta_rxrxryry)) * + inv_a; + const float delta_delta_det = + (delta_b_delta_b + 4 * op->radial.a * delta_rx_plus_ry) * inv_a; + + fetch(buffer, end, op, data, det, delta_det, delta_delta_det, b, + delta_b); + } else { + float rw = data->m23 * (y + float(0.5)) + data->m33 + + data->m13 * (x + float(0.5)); + + while (buffer < end) { + if (rw == 0) { + *buffer = 0; + } else { + float invRw = 1 / rw; + float gx = rx * invRw - data->mGradient.radial.fx; + float gy = ry * invRw - data->mGradient.radial.fy; + float b = 2 * (op->radial.dr * data->mGradient.radial.fradius + + gx * op->radial.dx + gy * op->radial.dy); + float det = radialDeterminant( + op->radial.a, b, op->radial.sqrfr - (gx * gx + gy * gy)); + + uint32_t result = 0; + if (det >= 0) { + float detSqrt = std::sqrt(det); + + float s0 = (-b - detSqrt) * op->radial.inv2a; + float s1 = (-b + detSqrt) * op->radial.inv2a; + + float s = vMax(s0, s1); + + if (data->mGradient.radial.fradius + op->radial.dr * s >= 0) + result = gradientPixel(&data->mGradient, s); + } + + *buffer = result; + } + + rx += data->m11; + ry += data->m12; + rw += data->m13; + + ++buffer; + } + } +} + +static inline Operator getOperator(const VSpanData *data) +{ + Operator op; + bool solidSource = false; + + switch (data->mType) { + case VSpanData::Type::Solid: + solidSource = (vAlpha(data->mSolid) == 255); + op.srcFetch = nullptr; + break; + case VSpanData::Type::LinearGradient: + solidSource = false; + getLinearGradientValues(&op.linear, data); + op.srcFetch = &fetch_linear_gradient; + break; + case VSpanData::Type::RadialGradient: + solidSource = false; + getRadialGradientValues(&op.radial, data); + op.srcFetch = &fetch_radial_gradient; + break; + default: + op.srcFetch = nullptr; + break; + } + + op.mode = data->mBlendMode; + if (op.mode == BlendMode::SrcOver && solidSource) op.mode = BlendMode::Src; + + op.funcSolid = RenderTable.color(op.mode); + op.func = RenderTable.src(op.mode); + + return op; +} + +static void blend_color(size_t size, const VRle::Span *array, void *userData) +{ + VSpanData *data = (VSpanData *)(userData); + Operator op = getOperator(data); + const uint color = data->mSolid; + + for (size_t i = 0 ; i < size; ++i) { + const auto &span = array[i]; + op.funcSolid(data->buffer(span.x, span.y), span.len, color, span.coverage); + } +} + +// Signature of Process Object +// void Pocess(uint* scratchBuffer, size_t x, size_t y, uchar cov) +template +static inline void process_in_chunk(const VRle::Span *array, size_t size, + Process process) +{ + std::array buf; + for (size_t i = 0; i < size; i++) { + const auto &span = array[i]; + size_t len = span.len; + auto x = span.x; + while (len) { + auto l = std::min(len, buf.size()); + process(buf.data(), x, span.y, l, span.coverage); + x += l; + len -= l; + } + } +} + +static void blend_gradient(size_t size, const VRle::Span *array, + void *userData) +{ + VSpanData *data = (VSpanData *)(userData); + Operator op = getOperator(data); + + if (!op.srcFetch) return; + + process_in_chunk( + array, size, + [&](uint *scratch, size_t x, size_t y, size_t len, uchar cov) { + op.srcFetch(scratch, &op, data, (int)y, (int)x, (int)len); + op.func(data->buffer((int)x, (int)y), (int)len, scratch, cov); + }); +} + +template +constexpr const T &clamp(const T &v, const T &lo, const T &hi) +{ + return v < lo ? lo : hi < v ? hi : v; +} + +static constexpr inline uchar alpha_mul(uchar a, uchar b) +{ + return ((a * b) >> 8); +} + +static void blend_image_xform(size_t size, const VRle::Span *array, + void *userData) +{ + const auto data = reinterpret_cast(userData); + const auto &src = data->texture(); + + if (src.format() != VBitmap::Format::ARGB32_Premultiplied && + src.format() != VBitmap::Format::ARGB32) { + //@TODO other formats not yet handled. + return; + } + + Operator op = getOperator(data); + + process_in_chunk( + array, size, + [&](uint *scratch, size_t x, size_t y, size_t len, uchar cov) { + const auto coverage = (cov * src.alpha()) >> 8; + const float xfactor = y * data->m21 + data->dx + data->m11; + const float yfactor = y * data->m22 + data->dy + data->m12; + for (size_t i = 0; i < len; i++) { + const float fx = (x + i) * data->m11 + xfactor; + const float fy = (x + i) * data->m12 + yfactor; + const int px = clamp(int(fx), src.left, src.right); + const int py = clamp(int(fy), src.top, src.bottom); + scratch[i] = src.pixel(px, py); + } + op.func(data->buffer((int)x, (int)y), (int)len, scratch, coverage); + }); +} + +static void blend_image(size_t size, const VRle::Span *array, void *userData) +{ + const auto data = reinterpret_cast(userData); + const auto &src = data->texture(); + + if (src.format() != VBitmap::Format::ARGB32_Premultiplied && + src.format() != VBitmap::Format::ARGB32) { + //@TODO other formats not yet handled. + return; + } + + Operator op = getOperator(data); + + for (size_t i = 0; i < size; i++) { + const auto &span = array[i]; + int x = span.x; + int length = span.len; + int sx = x + int(data->dx); + int sy = span.y + int(data->dy); + + // notyhing to copy. + if (sy < 0 || sy >= int(src.height()) || sx >= int(src.width()) || + (sx + length) <= 0) + continue; + + // intersecting left edge of image + if (sx < 0) { + x -= sx; + length += sx; + sx = 0; + } + // intersecting right edge of image + if (sx + length > int(src.width())) length = (int)src.width() - sx; + + op.func(data->buffer(x, span.y), length, src.pixelRef(sx, sy), + alpha_mul(span.coverage, src.alpha())); + } +} + +void VSpanData::setup(const VBrush &brush, BlendMode /*mode*/, int /*alpha*/) +{ + transformType = VMatrix::MatrixType::None; + + switch (brush.type()) { + case VBrush::Type::NoBrush: + mType = VSpanData::Type::None; + break; + case VBrush::Type::Solid: + mType = VSpanData::Type::Solid; + mSolid = brush.mColor.premulARGB(); + break; + case VBrush::Type::LinearGradient: { + mType = VSpanData::Type::LinearGradient; + mColorTable = VGradientCache::instance().getBuffer(*brush.mGradient); + mGradient.mColorTable = mColorTable->buffer32; + mGradient.mColorTableAlpha = mColorTable->alpha; + mGradient.linear.x1 = brush.mGradient->linear.x1; + mGradient.linear.y1 = brush.mGradient->linear.y1; + mGradient.linear.x2 = brush.mGradient->linear.x2; + mGradient.linear.y2 = brush.mGradient->linear.y2; + mGradient.mSpread = brush.mGradient->mSpread; + setupMatrix(brush.mGradient->mMatrix); + break; + } + case VBrush::Type::RadialGradient: { + mType = VSpanData::Type::RadialGradient; + mColorTable = VGradientCache::instance().getBuffer(*brush.mGradient); + mGradient.mColorTable = mColorTable->buffer32; + mGradient.mColorTableAlpha = mColorTable->alpha; + mGradient.radial.cx = brush.mGradient->radial.cx; + mGradient.radial.cy = brush.mGradient->radial.cy; + mGradient.radial.fx = brush.mGradient->radial.fx; + mGradient.radial.fy = brush.mGradient->radial.fy; + mGradient.radial.cradius = brush.mGradient->radial.cradius; + mGradient.radial.fradius = brush.mGradient->radial.fradius; + mGradient.mSpread = brush.mGradient->mSpread; + setupMatrix(brush.mGradient->mMatrix); + break; + } + case VBrush::Type::Texture: { + mType = VSpanData::Type::Texture; + initTexture(&brush.mTexture->mBitmap, brush.mTexture->mAlpha, + brush.mTexture->mBitmap.rect()); + setupMatrix(brush.mTexture->mMatrix); + break; + } + default: + break; + } + updateSpanFunc(); +} + +void VSpanData::setupMatrix(const VMatrix &matrix) +{ + VMatrix inv = matrix.inverted(); + m11 = inv.m11; + m12 = inv.m12; + m13 = inv.m13; + m21 = inv.m21; + m22 = inv.m22; + m23 = inv.m23; + m33 = inv.m33; + dx = inv.mtx; + dy = inv.mty; + transformType = inv.type(); + + const bool affine = inv.isAffine(); + const float f1 = m11 * m11 + m21 * m21; + const float f2 = m12 * m12 + m22 * m22; + fast_matrix = affine && f1 < 1e4 && f2 < 1e4 && f1 > (1.0 / 65536) && + f2 > (1.0 / 65536) && fabs(dx) < 1e4 && fabs(dy) < 1e4; +} + +void VSpanData::initTexture(const VBitmap *bitmap, int alpha, + const VRect &sourceRect) +{ + mType = VSpanData::Type::Texture; + mTexture.prepare(bitmap); + mTexture.setClip(sourceRect); + mTexture.setAlpha(alpha); + updateSpanFunc(); +} + +void VSpanData::updateSpanFunc() +{ + switch (mType) { + case VSpanData::Type::None: + mUnclippedBlendFunc = nullptr; + break; + case VSpanData::Type::Solid: + mUnclippedBlendFunc = &blend_color; + break; + case VSpanData::Type::LinearGradient: + case VSpanData::Type::RadialGradient: { + mUnclippedBlendFunc = &blend_gradient; + break; + } + case VSpanData::Type::Texture: { + //@TODO update proper image function. + if (transformType <= VMatrix::MatrixType::Translate) { + mUnclippedBlendFunc = &blend_image; + } else { + mUnclippedBlendFunc = &blend_image_xform; + } + break; + } + } +} + +#if !defined(__SSE2__) && !defined(USE_ARM_NEON) +void memfill32(uint32_t *dest, uint32_t value, int length) +{ + // let compiler do the auto vectorization. + for (int i = 0 ; i < length; i++) { + *dest++ = value; + } +} +#endif + diff --git a/vendor/github.com/Benau/go_rlottie/vector_vdrawhelper.h b/vendor/github.com/Benau/go_rlottie/vector_vdrawhelper.h new file mode 100644 index 00000000..590c5950 --- /dev/null +++ b/vendor/github.com/Benau/go_rlottie/vector_vdrawhelper.h @@ -0,0 +1,270 @@ +/* + * Copyright (c) 2020 Samsung Electronics Co., Ltd. All rights reserved. + + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef VDRAWHELPER_H +#define VDRAWHELPER_H + +#include +#include +#include "assert.h" +#include "vector_vbitmap.h" +#include "vector_vbrush.h" +#include "vector_vrect.h" +#include "vector_vrle.h" + +V_USE_NAMESPACE + +struct VSpanData; +struct Operator; + +struct RenderFunc +{ + using Color = void (*)(uint32_t *dest, int length, uint32_t color, uint32_t alpha); + using Src = void (*)(uint32_t *dest, int length, const uint32_t *src, uint32_t alpha); + enum class Type { + Invalid, + Color, + Src, + }; + RenderFunc() = default; + RenderFunc(Type t, Color f):type_(t), color_(f){assert(t == Type::Color);} + RenderFunc(Type t, Src f):type_(t), src_(f){ assert(t == Type::Src);} + + Type type_{Type::Invalid}; + union { + Color color_; + Src src_; + }; +}; + +class RenderFuncTable +{ +public: + RenderFuncTable(); + RenderFunc::Color color(BlendMode mode) const + { + return colorTable[uint32_t(mode)].color_; + } + RenderFunc::Src src(BlendMode mode) const + { + return srcTable[uint32_t(mode)].src_; + } +private: + void neon(); + void sse(); + void updateColor(BlendMode mode, RenderFunc::Color f) + { + colorTable[uint32_t(mode)] = {RenderFunc::Type::Color, f}; + } + void updateSrc(BlendMode mode, RenderFunc::Src f) + { + srcTable[uint32_t(mode)] = {RenderFunc::Type::Src, f}; + } +private: + std::array colorTable; + std::array srcTable; +}; + +typedef void (*SourceFetchProc)(uint32_t *buffer, const Operator *o, + const VSpanData *data, int y, int x, + int length); +typedef void (*ProcessRleSpan)(size_t count, const VRle::Span *spans, + void *userData); + +extern void memfill32(uint32_t *dest, uint32_t value, int count); + +struct LinearGradientValues { + float dx; + float dy; + float l; + float off; +}; + +struct RadialGradientValues { + float dx; + float dy; + float dr; + float sqrfr; + float a; + float inv2a; + bool extended; +}; + +struct Operator { + BlendMode mode; + SourceFetchProc srcFetch; + RenderFunc::Color funcSolid; + RenderFunc::Src func; + union { + LinearGradientValues linear; + RadialGradientValues radial; + }; +}; + +class VRasterBuffer { +public: + VBitmap::Format prepare(const VBitmap *image); + void clear(); + + void resetBuffer(int val = 0); + + inline uchar *scanLine(int y) + { + assert(y >= 0); + assert(size_t(y) < mHeight); + return mBuffer + y * mBytesPerLine; + } + uint32_t *pixelRef(int x, int y) const + { + return (uint32_t *)(mBuffer + y * mBytesPerLine + x * mBytesPerPixel); + } + + size_t width() const { return mWidth; } + size_t height() const { return mHeight; } + size_t bytesPerLine() const { return mBytesPerLine; } + size_t bytesPerPixel() const { return mBytesPerPixel; } + VBitmap::Format format() const { return mFormat; } + +private: + VBitmap::Format mFormat{VBitmap::Format::ARGB32_Premultiplied}; + size_t mWidth{0}; + size_t mHeight{0}; + size_t mBytesPerLine{0}; + size_t mBytesPerPixel{0}; + mutable uchar * mBuffer{nullptr}; +}; + +struct VGradientData { + VGradient::Spread mSpread; + struct Linear { + float x1, y1, x2, y2; + }; + struct Radial { + float cx, cy, fx, fy, cradius, fradius; + }; + union { + Linear linear; + Radial radial; + }; + const uint32_t *mColorTable; + bool mColorTableAlpha; +}; + +struct VTextureData : public VRasterBuffer { + uint32_t pixel(int x, int y) const { return *pixelRef(x, y); }; + uchar alpha() const { return mAlpha; } + void setAlpha(uchar alpha) { mAlpha = alpha; } + void setClip(const VRect &clip); + // clip rect + int left; + int right; + int top; + int bottom; + bool hasAlpha; + uchar mAlpha; +}; + +struct VColorTable { + uint32_t buffer32[VGradient::colorTableSize]; + bool alpha{true}; +}; + +struct VSpanData { + enum class Type { None, Solid, LinearGradient, RadialGradient, Texture }; + + void updateSpanFunc(); + void init(VRasterBuffer *image); + void setup(const VBrush &brush, BlendMode mode = BlendMode::SrcOver, + int alpha = 255); + void setupMatrix(const VMatrix &matrix); + + VRect clipRect() const + { + return VRect(0, 0, mDrawableSize.width(), mDrawableSize.height()); + } + + void setDrawRegion(const VRect ®ion) + { + mOffset = VPoint(region.left(), region.top()); + mDrawableSize = VSize(region.width(), region.height()); + } + + uint *buffer(int x, int y) const + { + return mRasterBuffer->pixelRef(x + mOffset.x(), y + mOffset.y()); + } + void initTexture(const VBitmap *image, int alpha, const VRect &sourceRect); + const VTextureData &texture() const { return mTexture; } + + BlendMode mBlendMode{BlendMode::SrcOver}; + VRasterBuffer * mRasterBuffer; + ProcessRleSpan mBlendFunc; + ProcessRleSpan mUnclippedBlendFunc; + VSpanData::Type mType; + std::shared_ptr mColorTable{nullptr}; + VPoint mOffset; // offset to the subsurface + VSize mDrawableSize; // suburface size + uint32_t mSolid; + VGradientData mGradient; + VTextureData mTexture; + + float m11, m12, m13, m21, m22, m23, m33, dx, dy; // inverse xform matrix + bool fast_matrix{true}; + VMatrix::MatrixType transformType{VMatrix::MatrixType::None}; +}; + +#define BYTE_MUL(c, a) \ + ((((((c) >> 8) & 0x00ff00ff) * (a)) & 0xff00ff00) + \ + (((((c)&0x00ff00ff) * (a)) >> 8) & 0x00ff00ff)) + +inline constexpr int vRed(uint32_t c) +{ + return ((c >> 16) & 0xff); +} + +inline constexpr int vGreen(uint32_t c) +{ + return ((c >> 8) & 0xff); +} + +inline constexpr int vBlue(uint32_t c) +{ + return (c & 0xff); +} + +inline constexpr int vAlpha(uint32_t c) +{ + return c >> 24; +} + +static inline uint32_t interpolate_pixel(uint x, uint a, uint y, uint b) +{ + uint t = (x & 0xff00ff) * a + (y & 0xff00ff) * b; + t >>= 8; + t &= 0xff00ff; + x = ((x >> 8) & 0xff00ff) * a + ((y >> 8) & 0xff00ff) * b; + x &= 0xff00ff00; + x |= t; + return x; +} + +#endif // QDRAWHELPER_P_H diff --git a/vendor/github.com/Benau/go_rlottie/vector_vdrawhelper_common.cpp b/vendor/github.com/Benau/go_rlottie/vector_vdrawhelper_common.cpp new file mode 100644 index 00000000..a4346c96 --- /dev/null +++ b/vendor/github.com/Benau/go_rlottie/vector_vdrawhelper_common.cpp @@ -0,0 +1,190 @@ +#include "config.h" +/* + * Copyright (c) 2020 Samsung Electronics Co., Ltd. All rights reserved. + + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include +#include "vector_vdrawhelper.h" + +/* +result = s +dest = s * ca + d * cia +*/ +static void color_Source(uint32_t *dest, int length, uint32_t color, + uint32_t alpha) +{ + int ialpha, i; + + if (alpha == 255) { + memfill32(dest, color, length); + } else { + ialpha = 255 - alpha; + color = BYTE_MUL(color, alpha); + for (i = 0; i < length; ++i) + dest[i] = color + BYTE_MUL(dest[i], ialpha); + } +} + +/* + r = s + d * sia + dest = r * ca + d * cia + = (s + d * sia) * ca + d * cia + = s * ca + d * (sia * ca + cia) + = s * ca + d * (1 - sa*ca) + = s' + d ( 1 - s'a) +*/ +static void color_SourceOver(uint32_t *dest, int length, uint32_t color, + uint32_t alpha) +{ + int ialpha, i; + + if (alpha != 255) color = BYTE_MUL(color, alpha); + ialpha = 255 - vAlpha(color); + for (i = 0; i < length; ++i) dest[i] = color + BYTE_MUL(dest[i], ialpha); +} + +/* + result = d * sa + dest = d * sa * ca + d * cia + = d * (sa * ca + cia) +*/ +static void color_DestinationIn(uint *dest, int length, uint color, + uint alpha) +{ + uint a = vAlpha(color); + if (alpha != 255) { + a = BYTE_MUL(a, alpha) + 255 - alpha; + } + for (int i = 0; i < length; ++i) { + dest[i] = BYTE_MUL(dest[i], a); + } +} + +/* + result = d * sia + dest = d * sia * ca + d * cia + = d * (sia * ca + cia) +*/ +static void color_DestinationOut(uint *dest, int length, uint color, + uint alpha) +{ + uint a = vAlpha(~color); + if (alpha != 255) a = BYTE_MUL(a, alpha) + 255 - alpha; + for (int i = 0; i < length; ++i) { + dest[i] = BYTE_MUL(dest[i], a); + } +} + +static void src_Source(uint32_t *dest, int length, const uint32_t *src, + uint32_t alpha) +{ + if (alpha == 255) { + memcpy(dest, src, size_t(length) * sizeof(uint)); + } else { + uint ialpha = 255 - alpha; + for (int i = 0; i < length; ++i) { + dest[i] = + interpolate_pixel(src[i], alpha, dest[i], ialpha); + } + } +} + +/* s' = s * ca + * d' = s' + d (1 - s'a) + */ +static void src_SourceOver(uint32_t *dest, int length, const uint32_t *src, + uint32_t alpha) +{ + uint s, sia; + + if (alpha == 255) { + for (int i = 0; i < length; ++i) { + s = src[i]; + if (s >= 0xff000000) + dest[i] = s; + else if (s != 0) { + sia = vAlpha(~s); + dest[i] = s + BYTE_MUL(dest[i], sia); + } + } + } else { + /* source' = source * const_alpha + * dest = source' + dest ( 1- source'a) + */ + for (int i = 0; i < length; ++i) { + s = BYTE_MUL(src[i], alpha); + sia = vAlpha(~s); + dest[i] = s + BYTE_MUL(dest[i], sia); + } + } +} + +static void src_DestinationIn(uint *dest, int length, const uint *src, + uint alpha) +{ + if (alpha == 255) { + for (int i = 0; i < length; ++i) { + dest[i] = BYTE_MUL(dest[i], vAlpha(src[i])); + } + } else { + uint cia = 255 - alpha; + for (int i = 0; i < length; ++i) { + uint a = BYTE_MUL(vAlpha(src[i]), alpha) + cia; + dest[i] = BYTE_MUL(dest[i], a); + } + } +} + +static void src_DestinationOut(uint *dest, int length, const uint *src, + uint alpha) +{ + if (alpha == 255) { + for (int i = 0; i < length; ++i) { + dest[i] = BYTE_MUL(dest[i], vAlpha(~src[i])); + } + } else { + uint cia = 255 - alpha; + for (int i = 0; i < length; ++i) { + uint sia = BYTE_MUL(vAlpha(~src[i]), alpha) + cia; + dest[i] = BYTE_MUL(dest[i], sia); + } + } +} + +RenderFuncTable::RenderFuncTable() +{ + updateColor(BlendMode::Src, color_Source); + updateColor(BlendMode::SrcOver, color_SourceOver); + updateColor(BlendMode::DestIn, color_DestinationIn); + updateColor(BlendMode::DestOut, color_DestinationOut); + + updateSrc(BlendMode::Src, src_Source); + updateSrc(BlendMode::SrcOver, src_SourceOver); + updateSrc(BlendMode::DestIn, src_DestinationIn); + updateSrc(BlendMode::DestOut, src_DestinationOut); + +#if defined(USE_ARM_NEON) + neon(); +#endif +#if defined(__SSE2__) + sse(); +#endif +} diff --git a/vendor/github.com/Benau/go_rlottie/vector_vdrawhelper_neon.cpp b/vendor/github.com/Benau/go_rlottie/vector_vdrawhelper_neon.cpp new file mode 100644 index 00000000..24d7f579 --- /dev/null +++ b/vendor/github.com/Benau/go_rlottie/vector_vdrawhelper_neon.cpp @@ -0,0 +1,34 @@ +#include "config.h" +#if defined(USE_ARM_NEON) + +#include "vector_vdrawhelper.h" + +extern "C" void pixman_composite_src_n_8888_asm_neon(int32_t w, int32_t h, + uint32_t *dst, + int32_t dst_stride, + uint32_t src); + +extern "C" void pixman_composite_over_n_8888_asm_neon(int32_t w, int32_t h, + uint32_t *dst, + int32_t dst_stride, + uint32_t src); + +void memfill32(uint32_t *dest, uint32_t value, int length) +{ + pixman_composite_src_n_8888_asm_neon(length, 1, dest, length, value); +} + +static void color_SourceOver(uint32_t *dest, int length, + uint32_t color, + uint32_t const_alpha) +{ + if (const_alpha != 255) color = BYTE_MUL(color, const_alpha); + + pixman_composite_over_n_8888_asm_neon(length, 1, dest, length, color); +} + +void RenderFuncTable::neon() +{ + updateColor(BlendMode::Src , color_SourceOver); +} +#endif diff --git a/vendor/github.com/Benau/go_rlottie/vector_vdrawhelper_sse2.cpp b/vendor/github.com/Benau/go_rlottie/vector_vdrawhelper_sse2.cpp new file mode 100644 index 00000000..51a6c3e0 --- /dev/null +++ b/vendor/github.com/Benau/go_rlottie/vector_vdrawhelper_sse2.cpp @@ -0,0 +1,261 @@ +#if defined(__SSE2__) + +#include +#include /* for SSE2 intrinsics */ +#include /* for _mm_shuffle_pi16 and _MM_SHUFFLE */ + +#include "vector_vdrawhelper.h" +// Each 32bits components of alphaChannel must be in the form 0x00AA00AA +inline static __m128i v4_byte_mul_sse2(__m128i c, __m128i a) +{ + const __m128i ag_mask = _mm_set1_epi32(0xFF00FF00); + const __m128i rb_mask = _mm_set1_epi32(0x00FF00FF); + + /* for AG */ + __m128i v_ag = _mm_and_si128(ag_mask, c); + v_ag = _mm_srli_epi32(v_ag, 8); + v_ag = _mm_mullo_epi16(a, v_ag); + v_ag = _mm_and_si128(ag_mask, v_ag); + + /* for RB */ + __m128i v_rb = _mm_and_si128(rb_mask, c); + v_rb = _mm_mullo_epi16(a, v_rb); + v_rb = _mm_srli_epi32(v_rb, 8); + v_rb = _mm_and_si128(rb_mask, v_rb); + + /* combine */ + return _mm_add_epi32(v_ag, v_rb); +} + +static inline __m128i v4_interpolate_color_sse2(__m128i a, __m128i c0, + __m128i c1) +{ + const __m128i rb_mask = _mm_set1_epi32(0xFF00FF00); + const __m128i zero = _mm_setzero_si128(); + + __m128i a_l = a; + __m128i a_h = a; + a_l = _mm_unpacklo_epi16(a_l, a_l); + a_h = _mm_unpackhi_epi16(a_h, a_h); + + __m128i a_t = _mm_slli_epi64(a_l, 32); + __m128i a_t0 = _mm_slli_epi64(a_h, 32); + + a_l = _mm_add_epi32(a_l, a_t); + a_h = _mm_add_epi32(a_h, a_t0); + + __m128i c0_l = c0; + __m128i c0_h = c0; + + c0_l = _mm_unpacklo_epi8(c0_l, zero); + c0_h = _mm_unpackhi_epi8(c0_h, zero); + + __m128i c1_l = c1; + __m128i c1_h = c1; + + c1_l = _mm_unpacklo_epi8(c1_l, zero); + c1_h = _mm_unpackhi_epi8(c1_h, zero); + + __m128i cl_sub = _mm_sub_epi16(c0_l, c1_l); + __m128i ch_sub = _mm_sub_epi16(c0_h, c1_h); + + cl_sub = _mm_mullo_epi16(cl_sub, a_l); + ch_sub = _mm_mullo_epi16(ch_sub, a_h); + + __m128i c1ls = _mm_slli_epi16(c1_l, 8); + __m128i c1hs = _mm_slli_epi16(c1_h, 8); + + cl_sub = _mm_add_epi16(cl_sub, c1ls); + ch_sub = _mm_add_epi16(ch_sub, c1hs); + + cl_sub = _mm_and_si128(cl_sub, rb_mask); + ch_sub = _mm_and_si128(ch_sub, rb_mask); + + cl_sub = _mm_srli_epi64(cl_sub, 8); + ch_sub = _mm_srli_epi64(ch_sub, 8); + + cl_sub = _mm_packus_epi16(cl_sub, cl_sub); + ch_sub = _mm_packus_epi16(ch_sub, ch_sub); + + return (__m128i)_mm_shuffle_ps((__m128)cl_sub, (__m128)ch_sub, 0x44); +} + +// Load src and dest vector +#define V4_FETCH_SRC_DEST \ + __m128i v_src = _mm_loadu_si128((__m128i*)src); \ + __m128i v_dest = _mm_load_si128((__m128i*)dest); + +#define V4_FETCH_SRC __m128i v_src = _mm_loadu_si128((__m128i*)src); + +#define V4_STORE_DEST _mm_store_si128((__m128i*)dest, v_src); + +#define V4_SRC_DEST_LEN_INC \ + dest += 4; \ + src += 4; \ + length -= 4; + +// Multiply src color with const_alpha +#define V4_ALPHA_MULTIPLY v_src = v4_byte_mul_sse2(v_src, v_alpha); + + +// dest = src + dest * sia +#define V4_COMP_OP_SRC \ + v_src = v4_interpolate_color_sse2(v_alpha, v_src, v_dest); + +#define LOOP_ALIGNED_U1_A4(DEST, LENGTH, UOP, A4OP) \ + { \ + while ((uintptr_t)DEST & 0xF && LENGTH) \ + UOP \ + \ + while (LENGTH) \ + { \ + switch (LENGTH) { \ + case 3: \ + case 2: \ + case 1: \ + UOP break; \ + default: \ + A4OP break; \ + } \ + } \ + } + +void memfill32(uint32_t* dest, uint32_t value, int length) +{ + __m128i vector_data = _mm_set_epi32(value, value, value, value); + + // run till memory alligned to 16byte memory + while (length && ((uintptr_t)dest & 0xf)) { + *dest++ = value; + length--; + } + + while (length >= 32) { + _mm_store_si128((__m128i*)(dest), vector_data); + _mm_store_si128((__m128i*)(dest + 4), vector_data); + _mm_store_si128((__m128i*)(dest + 8), vector_data); + _mm_store_si128((__m128i*)(dest + 12), vector_data); + _mm_store_si128((__m128i*)(dest + 16), vector_data); + _mm_store_si128((__m128i*)(dest + 20), vector_data); + _mm_store_si128((__m128i*)(dest + 24), vector_data); + _mm_store_si128((__m128i*)(dest + 28), vector_data); + + dest += 32; + length -= 32; + } + + if (length >= 16) { + _mm_store_si128((__m128i*)(dest), vector_data); + _mm_store_si128((__m128i*)(dest + 4), vector_data); + _mm_store_si128((__m128i*)(dest + 8), vector_data); + _mm_store_si128((__m128i*)(dest + 12), vector_data); + + dest += 16; + length -= 16; + } + + if (length >= 8) { + _mm_store_si128((__m128i*)(dest), vector_data); + _mm_store_si128((__m128i*)(dest + 4), vector_data); + + dest += 8; + length -= 8; + } + + if (length >= 4) { + _mm_store_si128((__m128i*)(dest), vector_data); + + dest += 4; + length -= 4; + } + + while (length) { + *dest++ = value; + length--; + } +} + +// dest = color + (dest * alpha) +inline static void copy_helper_sse2(uint32_t* dest, int length, + uint32_t color, uint32_t alpha) +{ + const __m128i v_color = _mm_set1_epi32(color); + const __m128i v_a = _mm_set1_epi16(alpha); + + LOOP_ALIGNED_U1_A4(dest, length, + { /* UOP */ + *dest = color + BYTE_MUL(*dest, alpha); + dest++; + length--; + }, + { /* A4OP */ + __m128i v_dest = _mm_load_si128((__m128i*)dest); + + v_dest = v4_byte_mul_sse2(v_dest, v_a); + v_dest = _mm_add_epi32(v_dest, v_color); + + _mm_store_si128((__m128i*)dest, v_dest); + + dest += 4; + length -= 4; + }) +} + +static void color_Source(uint32_t* dest, int length, uint32_t color, + uint32_t const_alpha) +{ + if (const_alpha == 255) { + memfill32(dest, color, length); + } else { + int ialpha; + + ialpha = 255 - const_alpha; + color = BYTE_MUL(color, const_alpha); + copy_helper_sse2(dest, length, color, ialpha); + } +} + +static void color_SourceOver(uint32_t* dest, int length, + uint32_t color, + uint32_t const_alpha) +{ + int ialpha; + + if (const_alpha != 255) color = BYTE_MUL(color, const_alpha); + ialpha = 255 - vAlpha(color); + copy_helper_sse2(dest, length, color, ialpha); +} + +static void src_Source(uint32_t* dest, int length, const uint32_t* src, + uint32_t const_alpha) +{ + int ialpha; + if (const_alpha == 255) { + memcpy(dest, src, length * sizeof(uint32_t)); + } else { + ialpha = 255 - const_alpha; + __m128i v_alpha = _mm_set1_epi32(const_alpha); + + LOOP_ALIGNED_U1_A4(dest, length, + { /* UOP */ + *dest = interpolate_pixel(*src, const_alpha, + *dest, ialpha); + dest++; + src++; + length--; + }, + {/* A4OP */ + V4_FETCH_SRC_DEST V4_COMP_OP_SRC V4_STORE_DEST + V4_SRC_DEST_LEN_INC}) + } +} + +void RenderFuncTable::sse() +{ + updateColor(BlendMode::Src , color_Source); + updateColor(BlendMode::SrcOver , color_SourceOver); + + updateSrc(BlendMode::Src , src_Source); +} + +#endif diff --git a/vendor/github.com/Benau/go_rlottie/vector_velapsedtimer.cpp b/vendor/github.com/Benau/go_rlottie/vector_velapsedtimer.cpp new file mode 100644 index 00000000..be065c27 --- /dev/null +++ b/vendor/github.com/Benau/go_rlottie/vector_velapsedtimer.cpp @@ -0,0 +1,50 @@ +/* + * Copyright (c) 2020 Samsung Electronics Co., Ltd. All rights reserved. + + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#include "vector_velapsedtimer.h" + +void VElapsedTimer::start() +{ + clock = std::chrono::high_resolution_clock::now(); + m_valid = true; +} + +double VElapsedTimer::restart() +{ + double elapsedTime = elapsed(); + start(); + return elapsedTime; +} + +double VElapsedTimer::elapsed() const +{ + if (!isValid()) return 0; + return std::chrono::duration( + std::chrono::high_resolution_clock::now() - clock) + .count(); +} + +bool VElapsedTimer::hasExpired(double time) +{ + double elapsedTime = elapsed(); + if (elapsedTime > time) return true; + return false; +} diff --git a/vendor/github.com/Benau/go_rlottie/vector_velapsedtimer.h b/vendor/github.com/Benau/go_rlottie/vector_velapsedtimer.h new file mode 100644 index 00000000..2cfe9cef --- /dev/null +++ b/vendor/github.com/Benau/go_rlottie/vector_velapsedtimer.h @@ -0,0 +1,41 @@ +/* + * Copyright (c) 2020 Samsung Electronics Co., Ltd. All rights reserved. + + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef VELAPSEDTIMER_H +#define VELAPSEDTIMER_H + +#include +#include "vector_vglobal.h" + +class VElapsedTimer { +public: + double elapsed() const; + bool hasExpired(double millsec); + void start(); + double restart(); + inline bool isValid() const { return m_valid; } + +private: + std::chrono::high_resolution_clock::time_point clock; + bool m_valid{false}; +}; +#endif // VELAPSEDTIMER_H diff --git a/vendor/github.com/Benau/go_rlottie/vector_vglobal.h b/vendor/github.com/Benau/go_rlottie/vector_vglobal.h new file mode 100644 index 00000000..678ee199 --- /dev/null +++ b/vendor/github.com/Benau/go_rlottie/vector_vglobal.h @@ -0,0 +1,302 @@ +/* + * Copyright (c) 2020 Samsung Electronics Co., Ltd. All rights reserved. + + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef VGLOBAL_H +#define VGLOBAL_H + +#include +#include +#include +#include +#include + +using uint = uint32_t; +using ushort = uint16_t; +using uchar = uint8_t; + +#if !defined(V_NAMESPACE) + +#define V_USE_NAMESPACE +#define V_BEGIN_NAMESPACE +#define V_END_NAMESPACE + +#else /* user namespace */ + +#define V_USE_NAMESPACE using namespace ::V_NAMESPACE; +#define V_BEGIN_NAMESPACE namespace V_NAMESPACE { +#define V_END_NAMESPACE } + +#endif + +#ifndef __has_attribute +# define __has_attribute(x) 0 +#endif /* !__has_attribute */ + +#if __has_attribute(unused) +# define V_UNUSED __attribute__((__unused__)) +#else +# define V_UNUSED +#endif /* V_UNUSED */ + +#if __has_attribute(warn_unused_result) +# define V_REQUIRED_RESULT __attribute__((__warn_unused_result__)) +#else +# define V_REQUIRED_RESULT +#endif /* V_REQUIRED_RESULT */ + +#define V_CONSTEXPR constexpr +#define V_NOTHROW noexcept + +#include "vector_vdebug.h" + +#if __GNUC__ >= 7 +#define VECTOR_FALLTHROUGH __attribute__ ((fallthrough)); +#else +#define VECTOR_FALLTHROUGH +#endif + +#ifdef LOTTIE_THREAD_SUPPORT +#define vthread_local thread_local +#else +#define vthread_local +#endif + +#if defined(_MSC_VER) + #define V_ALWAYS_INLINE __forceinline +#else + #define V_ALWAYS_INLINE __attribute__((always_inline)) +#endif + +template +V_CONSTEXPR inline const T &vMin(const T &a, const T &b) +{ + return (a < b) ? a : b; +} +template +V_CONSTEXPR inline const T &vMax(const T &a, const T &b) +{ + return (a < b) ? b : a; +} + +static const double EPSILON_DOUBLE = 0.000000000001f; +static const float EPSILON_FLOAT = 0.000001f; + +static inline bool vCompare(float p1, float p2) +{ + return (std::abs(p1 - p2) < EPSILON_FLOAT); +} + +static inline bool vIsZero(float f) +{ + return (std::abs(f) <= EPSILON_FLOAT); +} + +static inline bool vIsZero(double f) +{ + return (std::abs(f) <= EPSILON_DOUBLE); +} + +class vFlagHelper { + int i; + +public: + explicit constexpr inline vFlagHelper(int ai) noexcept : i(ai) {} + constexpr inline operator int() const noexcept { return i; } + + explicit constexpr inline vFlagHelper(uint ai) noexcept : i(int(ai)) {} + explicit constexpr inline vFlagHelper(short ai) noexcept : i(int(ai)) {} + explicit constexpr inline vFlagHelper(ushort ai) noexcept : i(int(uint(ai))) {} + constexpr inline operator uint() const noexcept { return uint(i); } +}; + +template +class vFlag { +public: + static_assert( + (sizeof(Enum) <= sizeof(int)), + "vFlag only supports int as storage so bigger type will overflow"); + static_assert((std::is_enum::value), + "vFlag is only usable on enumeration types."); + + using Int = typename std::conditional< + std::is_unsigned::type>::value, + unsigned int, signed int>::type; + + using enum_type = Enum; + // compiler-generated copy/move ctor/assignment operators are fine! + + vFlag() = default; + constexpr vFlag(Enum f) noexcept : i(Int(f)) {} + explicit constexpr vFlag(vFlagHelper f) noexcept : i(f) {} + + inline vFlag &operator&=(int mask) noexcept + { + i &= mask; + return *this; + } + inline vFlag &operator&=(uint mask) noexcept + { + i &= mask; + return *this; + } + inline vFlag &operator&=(Enum mask) noexcept + { + i &= Int(mask); + return *this; + } + inline vFlag &operator|=(vFlag f) noexcept + { + i |= f.i; + return *this; + } + inline vFlag &operator|=(Enum f) noexcept + { + i |= Int(f); + return *this; + } + inline vFlag &operator^=(vFlag f) noexcept + { + i ^= f.i; + return *this; + } + inline vFlag &operator^=(Enum f) noexcept + { + i ^= Int(f); + return *this; + } + + constexpr inline operator Int() const noexcept { return i; } + + constexpr inline vFlag operator|(vFlag f) const + { + return vFlag(vFlagHelper(i | f.i)); + } + constexpr inline vFlag operator|(Enum f) const noexcept + { + return vFlag(vFlagHelper(i | Int(f))); + } + constexpr inline vFlag operator^(vFlag f) const noexcept + { + return vFlag(vFlagHelper(i ^ f.i)); + } + constexpr inline vFlag operator^(Enum f) const noexcept + { + return vFlag(vFlagHelper(i ^ Int(f))); + } + constexpr inline vFlag operator&(int mask) const noexcept + { + return vFlag(vFlagHelper(i & mask)); + } + constexpr inline vFlag operator&(uint mask) const noexcept + { + return vFlag(vFlagHelper(i & mask)); + } + constexpr inline vFlag operator&(Enum f) const noexcept + { + return vFlag(vFlagHelper(i & Int(f))); + } + constexpr inline vFlag operator~() const noexcept + { + return vFlag(vFlagHelper(~i)); + } + + constexpr inline bool operator!() const noexcept { return !i; } + + constexpr inline bool testFlag(Enum f) const noexcept + { + return (i & Int(f)) == Int(f) && (Int(f) != 0 || i == Int(f)); + } + inline vFlag &setFlag(Enum f, bool on = true) noexcept + { + return on ? (*this |= f) : (*this &= ~f); + } + + Int i{0}; +}; + +class VColor { +public: + VColor() = default; + explicit VColor(uchar red, uchar green, uchar blue, uchar alpha = 255) noexcept + :a(alpha), r(red), g(green), b(blue){} + inline uchar red() const noexcept { return r; } + inline uchar green() const noexcept { return g; } + inline uchar blue() const noexcept { return b; } + inline uchar alpha() const noexcept { return a; } + inline void setRed(uchar red) noexcept { r = red; } + inline void setGreen(uchar green) noexcept { g = green; } + inline void setBlue(uchar blue) noexcept { b = blue; } + inline void setAlpha(uchar alpha) noexcept { a = alpha; } + inline bool isOpaque() const { return a == 255; } + inline bool isTransparent() const { return a == 0; } + inline bool operator==(const VColor &o) const + { + return ((a == o.a) && (r == o.r) && (g == o.g) && (b == o.b)); + } + uint premulARGB() const + { + int pr = (r * a) / 255; + int pg = (g * a) / 255; + int pb = (b * a) / 255; + return uint((a << 24) | (pr << 16) | (pg << 8) | (pb)); + } + + uint premulARGB(float opacity) const + { + int alpha = int(a * opacity); + int pr = (r * alpha) / 255; + int pg = (g * alpha) / 255; + int pb = (b * alpha) / 255; + return uint((alpha << 24) | (pr << 16) | (pg << 8) | (pb)); + } + +public: + uchar a{0}; + uchar r{0}; + uchar g{0}; + uchar b{0}; +}; + +enum class FillRule: unsigned char { EvenOdd, Winding }; +enum class JoinStyle: unsigned char { Miter, Bevel, Round }; +enum class CapStyle: unsigned char { Flat, Square, Round }; + +enum class BlendMode { + Src, + SrcOver, + DestIn, + DestOut, + Last, +}; + +#ifndef V_CONSTRUCTOR_FUNCTION +#define V_CONSTRUCTOR_FUNCTION0(AFUNC) \ + namespace { \ + static const struct AFUNC##_ctor_class_ { \ + inline AFUNC##_ctor_class_() { AFUNC(); } \ + } AFUNC##_ctor_instance_; \ + } + +#define V_CONSTRUCTOR_FUNCTION(AFUNC) V_CONSTRUCTOR_FUNCTION0(AFUNC) +#endif + +#endif // VGLOBAL_H diff --git a/vendor/github.com/Benau/go_rlottie/vector_vimageloader.cpp b/vendor/github.com/Benau/go_rlottie/vector_vimageloader.cpp new file mode 100644 index 00000000..08f502f8 --- /dev/null +++ b/vendor/github.com/Benau/go_rlottie/vector_vimageloader.cpp @@ -0,0 +1,220 @@ +#include "vector_vimageloader.h" +#include "config.h" +#include "vector_vdebug.h" +#include + +#ifdef _WIN32 +# include +#else +# include +#endif // _WIN32 + +using lottie_image_load_f = unsigned char *(*)(const char *filename, int *x, + int *y, int *comp, int req_comp); +using lottie_image_load_data_f = unsigned char *(*)(const char *data, int len, + int *x, int *y, int *comp, + int req_comp); +using lottie_image_free_f = void (*)(unsigned char *); + +#ifdef __cplusplus +extern "C" { +#endif + +extern unsigned char *lottie_image_load(char const *filename, int *x, int *y, + int *comp, int req_comp); +extern unsigned char *lottie_image_load_from_data(const char *imageData, + int len, int *x, int *y, + int *comp, int req_comp); +extern void lottie_image_free(unsigned char *data); + +#ifdef __cplusplus +} +#endif + +struct VImageLoader::Impl { + lottie_image_load_f imageLoad{nullptr}; + lottie_image_free_f imageFree{nullptr}; + lottie_image_load_data_f imageFromData{nullptr}; + +#ifdef LOTTIE_IMAGE_MODULE_SUPPORT +# ifdef _WIN32 + HMODULE dl_handle{nullptr}; + bool moduleLoad() + { + dl_handle = LoadLibraryA(LOTTIE_IMAGE_MODULE_PLUGIN); + return (dl_handle == nullptr); + } + void moduleFree() + { + if (dl_handle) FreeLibrary(dl_handle); + } + void init() + { + imageLoad = reinterpret_cast( + GetProcAddress(dl_handle, "lottie_image_load")); + imageFree = reinterpret_cast( + GetProcAddress(dl_handle, "lottie_image_free")); + imageFromData = reinterpret_cast( + GetProcAddress(dl_handle, "lottie_image_load_from_data")); + } +# else // _WIN32 + void *dl_handle{nullptr}; + void init() + { + imageLoad = reinterpret_cast( + dlsym(dl_handle, "lottie_image_load")); + imageFree = reinterpret_cast( + dlsym(dl_handle, "lottie_image_free")); + imageFromData = reinterpret_cast( + dlsym(dl_handle, "lottie_image_load_from_data")); + } + + void moduleFree() + { + if (dl_handle) dlclose(dl_handle); + } + bool moduleLoad() + { + dl_handle = dlopen(LOTTIE_IMAGE_MODULE_PLUGIN, RTLD_LAZY); + return (dl_handle == nullptr); + } +# endif // _WIN32 +#else // LOTTIE_IMAGE_MODULE_SUPPORT + void init() + { + imageLoad = lottie_image_load; + imageFree = lottie_image_free; + imageFromData = lottie_image_load_from_data; + } + void moduleFree() {} + bool moduleLoad() { return false; } +#endif // LOTTIE_IMAGE_MODULE_SUPPORT + + Impl() + { + if (moduleLoad()) { + vWarning << "Failed to dlopen librlottie-image-loader library"; + return; + } + + init(); + + if (!imageLoad) + vWarning << "Failed to find symbol lottie_image_load in " + "librlottie-image-loader library"; + + if (!imageFree) + vWarning << "Failed to find symbol lottie_image_free in " + "librlottie-image-loader library"; + + if (!imageFromData) + vWarning << "Failed to find symbol lottie_image_load_data in " + "librlottie-image-loader library"; + } + + ~Impl() { moduleFree(); } + + VBitmap createBitmap(unsigned char *data, int width, int height, + int channel) + { + // premultiply alpha + if (channel == 4) + convertToBGRAPremul(data, width, height); + else + convertToBGRA(data, width, height); + + // create a bitmap of same size. + VBitmap result = + VBitmap(width, height, VBitmap::Format::ARGB32_Premultiplied); + + // copy the data to bitmap buffer + memcpy(result.data(), data, width * height * 4); + + // free the image data + imageFree(data); + + return result; + } + + VBitmap load(const char *fileName) + { + if (!imageLoad) return VBitmap(); + + int width, height, n; + unsigned char *data = imageLoad(fileName, &width, &height, &n, 4); + + if (!data) { + return VBitmap(); + } + + return createBitmap(data, width, height, n); + } + + VBitmap load(const char *imageData, size_t len) + { + if (!imageFromData) return VBitmap(); + + int width, height, n; + unsigned char *data = + imageFromData(imageData, static_cast(len), &width, &height, &n, 4); + + if (!data) { + return VBitmap(); + } + + return createBitmap(data, width, height, n); + } + /* + * convert from RGBA to BGRA and premultiply + */ + void convertToBGRAPremul(unsigned char *bits, int width, int height) + { + int pixelCount = width * height; + unsigned char *pix = bits; + for (int i = 0; i < pixelCount; i++) { + unsigned char r = pix[0]; + unsigned char g = pix[1]; + unsigned char b = pix[2]; + unsigned char a = pix[3]; + + r = (r * a) / 255; + g = (g * a) / 255; + b = (b * a) / 255; + + pix[0] = b; + pix[1] = g; + pix[2] = r; + + pix += 4; + } + } + /* + * convert from RGBA to BGRA + */ + void convertToBGRA(unsigned char *bits, int width, int height) + { + int pixelCount = width * height; + unsigned char *pix = bits; + for (int i = 0; i < pixelCount; i++) { + unsigned char r = pix[0]; + unsigned char b = pix[2]; + pix[0] = b; + pix[2] = r; + pix += 4; + } + } +}; + +VImageLoader::VImageLoader() : mImpl(std::make_unique()) {} + +VImageLoader::~VImageLoader() {} + +VBitmap VImageLoader::load(const char *fileName) +{ + return mImpl->load(fileName); +} + +VBitmap VImageLoader::load(const char *data, size_t len) +{ + return mImpl->load(data, int(len)); +} diff --git a/vendor/github.com/Benau/go_rlottie/vector_vimageloader.h b/vendor/github.com/Benau/go_rlottie/vector_vimageloader.h new file mode 100644 index 00000000..fe9a0be4 --- /dev/null +++ b/vendor/github.com/Benau/go_rlottie/vector_vimageloader.h @@ -0,0 +1,26 @@ +#ifndef VIMAGELOADER_H +#define VIMAGELOADER_H + +#include + +#include "vector_vbitmap.h" + +class VImageLoader +{ +public: + static VImageLoader& instance() + { + static VImageLoader singleton; + return singleton; + } + + VBitmap load(const char *fileName); + VBitmap load(const char *data, size_t len); + ~VImageLoader(); +private: + VImageLoader(); + struct Impl; + std::unique_ptr mImpl; +}; + +#endif // VIMAGELOADER_H diff --git a/vendor/github.com/Benau/go_rlottie/vector_vinterpolator.cpp b/vendor/github.com/Benau/go_rlottie/vector_vinterpolator.cpp new file mode 100644 index 00000000..89e462a1 --- /dev/null +++ b/vendor/github.com/Benau/go_rlottie/vector_vinterpolator.cpp @@ -0,0 +1,124 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim: set ts=8 sts=2 et sw=2 tw=80: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#include "vector_vinterpolator.h" +#include + +V_BEGIN_NAMESPACE + +#define NEWTON_ITERATIONS 4 +#define NEWTON_MIN_SLOPE 0.02 +#define SUBDIVISION_PRECISION 0.0000001 +#define SUBDIVISION_MAX_ITERATIONS 10 + +const float VInterpolator::kSampleStepSize = + 1.0f / float(VInterpolator::kSplineTableSize - 1); + +void VInterpolator::init(float aX1, float aY1, float aX2, float aY2) +{ + mX1 = aX1; + mY1 = aY1; + mX2 = aX2; + mY2 = aY2; + + if (mX1 != mY1 || mX2 != mY2) CalcSampleValues(); +} + +/*static*/ float VInterpolator::CalcBezier(float aT, float aA1, float aA2) +{ + // use Horner's scheme to evaluate the Bezier polynomial + return ((A(aA1, aA2) * aT + B(aA1, aA2)) * aT + C(aA1)) * aT; +} + +void VInterpolator::CalcSampleValues() +{ + for (int i = 0; i < kSplineTableSize; ++i) { + mSampleValues[i] = CalcBezier(float(i) * kSampleStepSize, mX1, mX2); + } +} + +float VInterpolator::GetSlope(float aT, float aA1, float aA2) +{ + return 3.0f * A(aA1, aA2) * aT * aT + 2.0f * B(aA1, aA2) * aT + C(aA1); +} + +float VInterpolator::value(float aX) const +{ + if (mX1 == mY1 && mX2 == mY2) return aX; + + return CalcBezier(GetTForX(aX), mY1, mY2); +} + +float VInterpolator::GetTForX(float aX) const +{ + // Find interval where t lies + float intervalStart = 0.0; + const float* currentSample = &mSampleValues[1]; + const float* const lastSample = &mSampleValues[kSplineTableSize - 1]; + for (; currentSample != lastSample && *currentSample <= aX; + ++currentSample) { + intervalStart += kSampleStepSize; + } + --currentSample; // t now lies between *currentSample and *currentSample+1 + + // Interpolate to provide an initial guess for t + float dist = + (aX - *currentSample) / (*(currentSample + 1) - *currentSample); + float guessForT = intervalStart + dist * kSampleStepSize; + + // Check the slope to see what strategy to use. If the slope is too small + // Newton-Raphson iteration won't converge on a root so we use bisection + // instead. + float initialSlope = GetSlope(guessForT, mX1, mX2); + if (initialSlope >= NEWTON_MIN_SLOPE) { + return NewtonRaphsonIterate(aX, guessForT); + } else if (initialSlope == 0.0) { + return guessForT; + } else { + return BinarySubdivide(aX, intervalStart, + intervalStart + kSampleStepSize); + } +} + +float VInterpolator::NewtonRaphsonIterate(float aX, float aGuessT) const +{ + // Refine guess with Newton-Raphson iteration + for (int i = 0; i < NEWTON_ITERATIONS; ++i) { + // We're trying to find where f(t) = aX, + // so we're actually looking for a root for: CalcBezier(t) - aX + float currentX = CalcBezier(aGuessT, mX1, mX2) - aX; + float currentSlope = GetSlope(aGuessT, mX1, mX2); + + if (currentSlope == 0.0) return aGuessT; + + aGuessT -= currentX / currentSlope; + } + + return aGuessT; +} + +float VInterpolator::BinarySubdivide(float aX, float aA, float aB) const +{ + float currentX; + float currentT; + int i = 0; + + do { + currentT = aA + (aB - aA) / 2.0f; + currentX = CalcBezier(currentT, mX1, mX2) - aX; + + if (currentX > 0.0) { + aB = currentT; + } else { + aA = currentT; + } + } while (fabs(currentX) > SUBDIVISION_PRECISION && + ++i < SUBDIVISION_MAX_ITERATIONS); + + return currentT; +} + +V_END_NAMESPACE diff --git a/vendor/github.com/Benau/go_rlottie/vector_vinterpolator.h b/vendor/github.com/Benau/go_rlottie/vector_vinterpolator.h new file mode 100644 index 00000000..fb7fa937 --- /dev/null +++ b/vendor/github.com/Benau/go_rlottie/vector_vinterpolator.h @@ -0,0 +1,88 @@ +/* + * Copyright (c) 2020 Samsung Electronics Co., Ltd. All rights reserved. + + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef VINTERPOLATOR_H +#define VINTERPOLATOR_H + +#include "vector_vpoint.h" + +V_BEGIN_NAMESPACE + +class VInterpolator { +public: + VInterpolator() + { /* caller must call Init later */ + } + + VInterpolator(float aX1, float aY1, float aX2, float aY2) + { + init(aX1, aY1, aX2, aY2); + } + + VInterpolator(VPointF pt1, VPointF pt2) + { + init(pt1.x(), pt1.y(), pt2.x(), pt2.y()); + } + + void init(float aX1, float aY1, float aX2, float aY2); + + float value(float aX) const; + + void GetSplineDerivativeValues(float aX, float& aDX, float& aDY) const; + +private: + void CalcSampleValues(); + + /** + * Returns x(t) given t, x1, and x2, or y(t) given t, y1, and y2. + */ + static float CalcBezier(float aT, float aA1, float aA2); + + /** + * Returns dx/dt given t, x1, and x2, or dy/dt given t, y1, and y2. + */ + static float GetSlope(float aT, float aA1, float aA2); + + float GetTForX(float aX) const; + + float NewtonRaphsonIterate(float aX, float aGuessT) const; + + float BinarySubdivide(float aX, float aA, float aB) const; + + static float A(float aA1, float aA2) { return 1.0f - 3.0f * aA2 + 3.0f * aA1; } + + static float B(float aA1, float aA2) { return 3.0f * aA2 - 6.0f * aA1; } + + static float C(float aA1) { return 3.0f * aA1; } + + float mX1; + float mY1; + float mX2; + float mY2; + enum { kSplineTableSize = 11 }; + float mSampleValues[kSplineTableSize]; + static const float kSampleStepSize; +}; + +V_END_NAMESPACE + +#endif // VINTERPOLATOR_H diff --git a/vendor/github.com/Benau/go_rlottie/vector_vline.h b/vendor/github.com/Benau/go_rlottie/vector_vline.h new file mode 100644 index 00000000..68edaa59 --- /dev/null +++ b/vendor/github.com/Benau/go_rlottie/vector_vline.h @@ -0,0 +1,97 @@ +/* + * Copyright (c) 2020 Samsung Electronics Co., Ltd. All rights reserved. + + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef VLINE_H +#define VLINE_H + +#include "vector_vglobal.h" +#include "vector_vpoint.h" + +V_BEGIN_NAMESPACE + +class VLine { +public: + VLine() = default; + VLine(float x1, float y1, float x2, float y2) + : mX1(x1), mY1(y1), mX2(x2), mY2(y2) + { + } + VLine(const VPointF &p1, const VPointF &p2) + : mX1(p1.x()), mY1(p1.y()), mX2(p2.x()), mY2(p2.y()) + { + } + float length() const { return length(mX1, mY1, mX2, mY2);} + void splitAtLength(float length, VLine &left, VLine &right) const; + VPointF p1() const { return {mX1, mY1}; } + VPointF p2() const { return {mX2, mY2}; } + float angle() const; + static float length(float x1, float y1, float x2, float y2); + +private: + float mX1{0}; + float mY1{0}; + float mX2{0}; + float mY2{0}; +}; + +inline float VLine::angle() const +{ + static constexpr float K_PI = 3.141592f; + const float dx = mX2 - mX1; + const float dy = mY2 - mY1; + + const float theta = std::atan2(dy, dx) * 180.0f / K_PI; + return theta; +} + +// approximate sqrt(x*x + y*y) using alpha max plus beta min algorithm. +// With alpha = 1, beta = 3/8, giving results with the largest error less +// than 7% compared to the exact value. +inline V_ALWAYS_INLINE float VLine::length(float x1, float y1, float x2, float y2) +{ + float x = x2 - x1; + float y = y2 - y1; + + x = x < 0 ? -x : x; + y = y < 0 ? -y : y; + + return (x > y ? x + 0.375f * y : y + 0.375f * x); +} + +inline void VLine::splitAtLength(float lengthAt, VLine &left, VLine &right) const +{ + float len = length(); + float dx = ((mX2 - mX1) / len) * lengthAt; + float dy = ((mY2 - mY1) / len) * lengthAt; + + left.mX1 = mX1; + left.mY1 = mY1; + left.mX2 = left.mX1 + dx; + left.mY2 = left.mY1 + dy; + + right.mX1 = left.mX2; + right.mY1 = left.mY2; + right.mX2 = mX2; + right.mY2 = mY2; +} + +#endif //VLINE_H diff --git a/vendor/github.com/Benau/go_rlottie/vector_vmatrix.cpp b/vendor/github.com/Benau/go_rlottie/vector_vmatrix.cpp new file mode 100644 index 00000000..a06ad1b0 --- /dev/null +++ b/vendor/github.com/Benau/go_rlottie/vector_vmatrix.cpp @@ -0,0 +1,684 @@ +/* + * Copyright (c) 2020 Samsung Electronics Co., Ltd. All rights reserved. + + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include "vector_vmatrix.h" +#include "vector_vglobal.h" +#include +#include + +V_BEGIN_NAMESPACE + +/* m11 m21 mtx + * m12 m22 mty + * m13 m23 m33 + */ + +inline float VMatrix::determinant() const +{ + return m11 * (m33 * m22 - mty * m23) - m21 * (m33 * m12 - mty * m13) + + mtx * (m23 * m12 - m22 * m13); +} + +bool VMatrix::isAffine() const +{ + return type() < MatrixType::Project; +} + +bool VMatrix::isIdentity() const +{ + return type() == MatrixType::None; +} + +bool VMatrix::isInvertible() const +{ + return !vIsZero(determinant()); +} + +bool VMatrix::isScaling() const +{ + return type() >= MatrixType::Scale; +} +bool VMatrix::isRotating() const +{ + return type() >= MatrixType::Rotate; +} + +bool VMatrix::isTranslating() const +{ + return type() >= MatrixType::Translate; +} + +VMatrix &VMatrix::operator*=(float num) +{ + if (num == 1.) return *this; + + m11 *= num; + m12 *= num; + m13 *= num; + m21 *= num; + m22 *= num; + m23 *= num; + mtx *= num; + mty *= num; + m33 *= num; + if (dirty < MatrixType::Scale) dirty = MatrixType::Scale; + + return *this; +} + +VMatrix &VMatrix::operator/=(float div) +{ + if (div == 0) return *this; + + div = 1 / div; + return operator*=(div); +} + +VMatrix::MatrixType VMatrix::type() const +{ + if (dirty == MatrixType::None || dirty < mType) return mType; + + switch (dirty) { + case MatrixType::Project: + if (!vIsZero(m13) || !vIsZero(m23) || !vIsZero(m33 - 1)) { + mType = MatrixType::Project; + break; + } + VECTOR_FALLTHROUGH + case MatrixType::Shear: + case MatrixType::Rotate: + if (!vIsZero(m12) || !vIsZero(m21)) { + const float dot = m11 * m12 + m21 * m22; + if (vIsZero(dot)) + mType = MatrixType::Rotate; + else + mType = MatrixType::Shear; + break; + } + VECTOR_FALLTHROUGH + case MatrixType::Scale: + if (!vIsZero(m11 - 1) || !vIsZero(m22 - 1)) { + mType = MatrixType::Scale; + break; + } + VECTOR_FALLTHROUGH + case MatrixType::Translate: + if (!vIsZero(mtx) || !vIsZero(mty)) { + mType = MatrixType::Translate; + break; + } + VECTOR_FALLTHROUGH + case MatrixType::None: + mType = MatrixType::None; + break; + } + + dirty = MatrixType::None; + return mType; +} + +VMatrix &VMatrix::translate(float dx, float dy) +{ + if (dx == 0 && dy == 0) return *this; + + switch (type()) { + case MatrixType::None: + mtx = dx; + mty = dy; + break; + case MatrixType::Translate: + mtx += dx; + mty += dy; + break; + case MatrixType::Scale: + mtx += dx * m11; + mty += dy * m22; + break; + case MatrixType::Project: + m33 += dx * m13 + dy * m23; + VECTOR_FALLTHROUGH + case MatrixType::Shear: + case MatrixType::Rotate: + mtx += dx * m11 + dy * m21; + mty += dy * m22 + dx * m12; + break; + } + if (dirty < MatrixType::Translate) dirty = MatrixType::Translate; + return *this; +} + +VMatrix &VMatrix::scale(float sx, float sy) +{ + if (sx == 1 && sy == 1) return *this; + + switch (type()) { + case MatrixType::None: + case MatrixType::Translate: + m11 = sx; + m22 = sy; + break; + case MatrixType::Project: + m13 *= sx; + m23 *= sy; + VECTOR_FALLTHROUGH + case MatrixType::Rotate: + case MatrixType::Shear: + m12 *= sx; + m21 *= sy; + VECTOR_FALLTHROUGH + case MatrixType::Scale: + m11 *= sx; + m22 *= sy; + break; + } + if (dirty < MatrixType::Scale) dirty = MatrixType::Scale; + return *this; +} + +VMatrix &VMatrix::shear(float sh, float sv) +{ + if (sh == 0 && sv == 0) return *this; + + switch (type()) { + case MatrixType::None: + case MatrixType::Translate: + m12 = sv; + m21 = sh; + break; + case MatrixType::Scale: + m12 = sv * m22; + m21 = sh * m11; + break; + case MatrixType::Project: { + float tm13 = sv * m23; + float tm23 = sh * m13; + m13 += tm13; + m23 += tm23; + VECTOR_FALLTHROUGH + } + case MatrixType::Rotate: + case MatrixType::Shear: { + float tm11 = sv * m21; + float tm22 = sh * m12; + float tm12 = sv * m22; + float tm21 = sh * m11; + m11 += tm11; + m12 += tm12; + m21 += tm21; + m22 += tm22; + break; + } + } + if (dirty < MatrixType::Shear) dirty = MatrixType::Shear; + return *this; +} + +static const float deg2rad = float(0.017453292519943295769); // pi/180 +static const float inv_dist_to_plane = 1. / 1024.; + +VMatrix &VMatrix::rotate(float a, Axis axis) +{ + if (a == 0) return *this; + + float sina = 0; + float cosa = 0; + if (a == 90. || a == -270.) + sina = 1.; + else if (a == 270. || a == -90.) + sina = -1.; + else if (a == 180.) + cosa = -1.; + else { + float b = deg2rad * a; // convert to radians + sina = std::sin(b); // fast and convenient + cosa = std::cos(b); + } + + if (axis == Axis::Z) { + switch (type()) { + case MatrixType::None: + case MatrixType::Translate: + m11 = cosa; + m12 = sina; + m21 = -sina; + m22 = cosa; + break; + case MatrixType::Scale: { + float tm11 = cosa * m11; + float tm12 = sina * m22; + float tm21 = -sina * m11; + float tm22 = cosa * m22; + m11 = tm11; + m12 = tm12; + m21 = tm21; + m22 = tm22; + break; + } + case MatrixType::Project: { + float tm13 = cosa * m13 + sina * m23; + float tm23 = -sina * m13 + cosa * m23; + m13 = tm13; + m23 = tm23; + VECTOR_FALLTHROUGH + } + case MatrixType::Rotate: + case MatrixType::Shear: { + float tm11 = cosa * m11 + sina * m21; + float tm12 = cosa * m12 + sina * m22; + float tm21 = -sina * m11 + cosa * m21; + float tm22 = -sina * m12 + cosa * m22; + m11 = tm11; + m12 = tm12; + m21 = tm21; + m22 = tm22; + break; + } + } + if (dirty < MatrixType::Rotate) dirty = MatrixType::Rotate; + } else { + VMatrix result; + if (axis == Axis::Y) { + result.m11 = cosa; + result.m13 = -sina * inv_dist_to_plane; + } else { + result.m22 = cosa; + result.m23 = -sina * inv_dist_to_plane; + } + result.mType = MatrixType::Project; + *this = result * *this; + } + + return *this; +} + +VMatrix VMatrix::operator*(const VMatrix &m) const +{ + const MatrixType otherType = m.type(); + if (otherType == MatrixType::None) return *this; + + const MatrixType thisType = type(); + if (thisType == MatrixType::None) return m; + + VMatrix t; + MatrixType type = vMax(thisType, otherType); + switch (type) { + case MatrixType::None: + break; + case MatrixType::Translate: + t.mtx = mtx + m.mtx; + t.mty += mty + m.mty; + break; + case MatrixType::Scale: { + float m11v = m11 * m.m11; + float m22v = m22 * m.m22; + + float m31v = mtx * m.m11 + m.mtx; + float m32v = mty * m.m22 + m.mty; + + t.m11 = m11v; + t.m22 = m22v; + t.mtx = m31v; + t.mty = m32v; + break; + } + case MatrixType::Rotate: + case MatrixType::Shear: { + float m11v = m11 * m.m11 + m12 * m.m21; + float m12v = m11 * m.m12 + m12 * m.m22; + + float m21v = m21 * m.m11 + m22 * m.m21; + float m22v = m21 * m.m12 + m22 * m.m22; + + float m31v = mtx * m.m11 + mty * m.m21 + m.mtx; + float m32v = mtx * m.m12 + mty * m.m22 + m.mty; + + t.m11 = m11v; + t.m12 = m12v; + t.m21 = m21v; + t.m22 = m22v; + t.mtx = m31v; + t.mty = m32v; + break; + } + case MatrixType::Project: { + float m11v = m11 * m.m11 + m12 * m.m21 + m13 * m.mtx; + float m12v = m11 * m.m12 + m12 * m.m22 + m13 * m.mty; + float m13v = m11 * m.m13 + m12 * m.m23 + m13 * m.m33; + + float m21v = m21 * m.m11 + m22 * m.m21 + m23 * m.mtx; + float m22v = m21 * m.m12 + m22 * m.m22 + m23 * m.mty; + float m23v = m21 * m.m13 + m22 * m.m23 + m23 * m.m33; + + float m31v = mtx * m.m11 + mty * m.m21 + m33 * m.mtx; + float m32v = mtx * m.m12 + mty * m.m22 + m33 * m.mty; + float m33v = mtx * m.m13 + mty * m.m23 + m33 * m.m33; + + t.m11 = m11v; + t.m12 = m12v; + t.m13 = m13v; + t.m21 = m21v; + t.m22 = m22v; + t.m23 = m23v; + t.mtx = m31v; + t.mty = m32v; + t.m33 = m33v; + } + } + + t.dirty = type; + t.mType = type; + + return t; +} + +VMatrix &VMatrix::operator*=(const VMatrix &o) +{ + const MatrixType otherType = o.type(); + if (otherType == MatrixType::None) return *this; + + const MatrixType thisType = type(); + if (thisType == MatrixType::None) return operator=(o); + + MatrixType t = vMax(thisType, otherType); + switch (t) { + case MatrixType::None: + break; + case MatrixType::Translate: + mtx += o.mtx; + mty += o.mty; + break; + case MatrixType::Scale: { + float m11v = m11 * o.m11; + float m22v = m22 * o.m22; + + float m31v = mtx * o.m11 + o.mtx; + float m32v = mty * o.m22 + o.mty; + + m11 = m11v; + m22 = m22v; + mtx = m31v; + mty = m32v; + break; + } + case MatrixType::Rotate: + case MatrixType::Shear: { + float m11v = m11 * o.m11 + m12 * o.m21; + float m12v = m11 * o.m12 + m12 * o.m22; + + float m21v = m21 * o.m11 + m22 * o.m21; + float m22v = m21 * o.m12 + m22 * o.m22; + + float m31v = mtx * o.m11 + mty * o.m21 + o.mtx; + float m32v = mtx * o.m12 + mty * o.m22 + o.mty; + + m11 = m11v; + m12 = m12v; + m21 = m21v; + m22 = m22v; + mtx = m31v; + mty = m32v; + break; + } + case MatrixType::Project: { + float m11v = m11 * o.m11 + m12 * o.m21 + m13 * o.mtx; + float m12v = m11 * o.m12 + m12 * o.m22 + m13 * o.mty; + float m13v = m11 * o.m13 + m12 * o.m23 + m13 * o.m33; + + float m21v = m21 * o.m11 + m22 * o.m21 + m23 * o.mtx; + float m22v = m21 * o.m12 + m22 * o.m22 + m23 * o.mty; + float m23v = m21 * o.m13 + m22 * o.m23 + m23 * o.m33; + + float m31v = mtx * o.m11 + mty * o.m21 + m33 * o.mtx; + float m32v = mtx * o.m12 + mty * o.m22 + m33 * o.mty; + float m33v = mtx * o.m13 + mty * o.m23 + m33 * o.m33; + + m11 = m11v; + m12 = m12v; + m13 = m13v; + m21 = m21v; + m22 = m22v; + m23 = m23v; + mtx = m31v; + mty = m32v; + m33 = m33v; + } + } + + dirty = t; + mType = t; + + return *this; +} + +VMatrix VMatrix::adjoint() const +{ + float h11, h12, h13, h21, h22, h23, h31, h32, h33; + h11 = m22 * m33 - m23 * mty; + h21 = m23 * mtx - m21 * m33; + h31 = m21 * mty - m22 * mtx; + h12 = m13 * mty - m12 * m33; + h22 = m11 * m33 - m13 * mtx; + h32 = m12 * mtx - m11 * mty; + h13 = m12 * m23 - m13 * m22; + h23 = m13 * m21 - m11 * m23; + h33 = m11 * m22 - m12 * m21; + + VMatrix res; + res.m11 = h11; + res.m12 = h12; + res.m13 = h13; + res.m21 = h21; + res.m22 = h22; + res.m23 = h23; + res.mtx = h31; + res.mty = h32; + res.m33 = h33; + res.mType = MatrixType::None; + res.dirty = MatrixType::Project; + + return res; +} + +VMatrix VMatrix::inverted(bool *invertible) const +{ + VMatrix invert; + bool inv = true; + + switch (type()) { + case MatrixType::None: + break; + case MatrixType::Translate: + invert.mtx = -mtx; + invert.mty = -mty; + break; + case MatrixType::Scale: + inv = !vIsZero(m11); + inv &= !vIsZero(m22); + if (inv) { + invert.m11 = 1.0f / m11; + invert.m22 = 1.0f / m22; + invert.mtx = -mtx * invert.m11; + invert.mty = -mty * invert.m22; + } + break; + default: + // general case + float det = determinant(); + inv = !vIsZero(det); + if (inv) invert = (adjoint() /= det); + // TODO Test above line + break; + } + + if (invertible) *invertible = inv; + + if (inv) { + // inverting doesn't change the type + invert.mType = mType; + invert.dirty = dirty; + } + + return invert; +} + +bool VMatrix::operator==(const VMatrix &o) const +{ + return fuzzyCompare(o); +} + +bool VMatrix::operator!=(const VMatrix &o) const +{ + return !operator==(o); +} + +bool VMatrix::fuzzyCompare(const VMatrix &o) const +{ + return vCompare(m11, o.m11) && vCompare(m12, o.m12) && + vCompare(m21, o.m21) && vCompare(m22, o.m22) && + vCompare(mtx, o.mtx) && vCompare(mty, o.mty); +} + +#define V_NEAR_CLIP 0.000001f +#ifdef MAP +#undef MAP +#endif +#define MAP(x, y, nx, ny) \ + do { \ + float FX_ = x; \ + float FY_ = y; \ + switch (t) { \ + case MatrixType::None: \ + nx = FX_; \ + ny = FY_; \ + break; \ + case MatrixType::Translate: \ + nx = FX_ + mtx; \ + ny = FY_ + mty; \ + break; \ + case MatrixType::Scale: \ + nx = m11 * FX_ + mtx; \ + ny = m22 * FY_ + mty; \ + break; \ + case MatrixType::Rotate: \ + case MatrixType::Shear: \ + case MatrixType::Project: \ + nx = m11 * FX_ + m21 * FY_ + mtx; \ + ny = m12 * FX_ + m22 * FY_ + mty; \ + if (t == MatrixType::Project) { \ + float w = (m13 * FX_ + m23 * FY_ + m33); \ + if (w < V_NEAR_CLIP) w = V_NEAR_CLIP; \ + w = 1. / w; \ + nx *= w; \ + ny *= w; \ + } \ + } \ + } while (0) + +VRect VMatrix::map(const VRect &rect) const +{ + VMatrix::MatrixType t = type(); + if (t <= MatrixType::Translate) + return rect.translated(std::lround(mtx), std::lround(mty)); + + if (t <= MatrixType::Scale) { + int x = std::lround(m11 * rect.x() + mtx); + int y = std::lround(m22 * rect.y() + mty); + int w = std::lround(m11 * rect.width()); + int h = std::lround(m22 * rect.height()); + if (w < 0) { + w = -w; + x -= w; + } + if (h < 0) { + h = -h; + y -= h; + } + return {x, y, w, h}; + } else if (t < MatrixType::Project) { + // see mapToPolygon for explanations of the algorithm. + float x = 0, y = 0; + MAP(rect.left(), rect.top(), x, y); + float xmin = x; + float ymin = y; + float xmax = x; + float ymax = y; + MAP(rect.right() + 1, rect.top(), x, y); + xmin = vMin(xmin, x); + ymin = vMin(ymin, y); + xmax = vMax(xmax, x); + ymax = vMax(ymax, y); + MAP(rect.right() + 1, rect.bottom() + 1, x, y); + xmin = vMin(xmin, x); + ymin = vMin(ymin, y); + xmax = vMax(xmax, x); + ymax = vMax(ymax, y); + MAP(rect.left(), rect.bottom() + 1, x, y); + xmin = vMin(xmin, x); + ymin = vMin(ymin, y); + xmax = vMax(xmax, x); + ymax = vMax(ymax, y); + return VRect(std::lround(xmin), std::lround(ymin), + std::lround(xmax) - std::lround(xmin), + std::lround(ymax) - std::lround(ymin)); + } else { + // Not supported + assert(0); + return {}; + } +} + +VPointF VMatrix::map(const VPointF &p) const +{ + float fx = p.x(); + float fy = p.y(); + + float x = 0, y = 0; + + VMatrix::MatrixType t = type(); + switch (t) { + case MatrixType::None: + x = fx; + y = fy; + break; + case MatrixType::Translate: + x = fx + mtx; + y = fy + mty; + break; + case MatrixType::Scale: + x = m11 * fx + mtx; + y = m22 * fy + mty; + break; + case MatrixType::Rotate: + case MatrixType::Shear: + case MatrixType::Project: + x = m11 * fx + m21 * fy + mtx; + y = m12 * fx + m22 * fy + mty; + if (t == MatrixType::Project) { + float w = 1.0f / (m13 * fx + m23 * fy + m33); + x *= w; + y *= w; + } + } + return {x, y}; +} + +V_END_NAMESPACE diff --git a/vendor/github.com/Benau/go_rlottie/vector_vmatrix.h b/vendor/github.com/Benau/go_rlottie/vector_vmatrix.h new file mode 100644 index 00000000..ee3ad2b5 --- /dev/null +++ b/vendor/github.com/Benau/go_rlottie/vector_vmatrix.h @@ -0,0 +1,116 @@ +/* + * Copyright (c) 2020 Samsung Electronics Co., Ltd. All rights reserved. + + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef VMATRIX_H +#define VMATRIX_H +#include "vector_vglobal.h" +#include "vector_vpoint.h" +#include "vector_vrect.h" + +V_BEGIN_NAMESPACE + +struct VMatrixData; +class VMatrix { +public: + enum class Axis { X, Y, Z }; + enum class MatrixType: unsigned char { + None = 0x00, + Translate = 0x01, + Scale = 0x02, + Rotate = 0x04, + Shear = 0x08, + Project = 0x10 + }; + VMatrix() = default; + bool isAffine() const; + bool isIdentity() const; + bool isInvertible() const; + bool isScaling() const; + bool isRotating() const; + bool isTranslating() const; + MatrixType type() const; + inline float determinant() const; + + float m_11() const { return m11;} + float m_12() const { return m12;} + float m_13() const { return m13;} + + float m_21() const { return m21;} + float m_22() const { return m22;} + float m_23() const { return m23;} + + float m_tx() const { return mtx;} + float m_ty() const { return mty;} + float m_33() const { return m33;} + + VMatrix &translate(VPointF pos) { return translate(pos.x(), pos.y()); } + VMatrix &translate(float dx, float dy); + VMatrix &scale(VPointF s) { return scale(s.x(), s.y()); } + VMatrix &scale(float sx, float sy); + VMatrix &shear(float sh, float sv); + VMatrix &rotate(float a, Axis axis = VMatrix::Axis::Z); + VMatrix &rotateRadians(float a, Axis axis = VMatrix::Axis::Z); + + VPointF map(const VPointF &p) const; + inline VPointF map(float x, float y) const; + VRect map(const VRect &r) const; + + V_REQUIRED_RESULT VMatrix inverted(bool *invertible = nullptr) const; + V_REQUIRED_RESULT VMatrix adjoint() const; + + VMatrix operator*(const VMatrix &o) const; + VMatrix & operator*=(const VMatrix &); + VMatrix & operator*=(float mul); + VMatrix & operator/=(float div); + bool operator==(const VMatrix &) const; + bool operator!=(const VMatrix &) const; + bool fuzzyCompare(const VMatrix &) const; + float scale() const; +private: + friend struct VSpanData; + float m11{1}, m12{0}, m13{0}; + float m21{0}, m22{1}, m23{0}; + float mtx{0}, mty{0}, m33{1}; + mutable MatrixType mType{MatrixType::None}; + mutable MatrixType dirty{MatrixType::None}; +}; + +inline float VMatrix::scale() const +{ + constexpr float SQRT_2 = 1.41421f; + VPointF p1(0, 0); + VPointF p2(SQRT_2, SQRT_2); + p1 = map(p1); + p2 = map(p2); + VPointF final = p2 - p1; + + return std::sqrt(final.x() * final.x() + final.y() * final.y()) / 2.0f; +} + +inline VPointF VMatrix::map(float x, float y) const +{ + return map(VPointF(x, y)); +} + +V_END_NAMESPACE + +#endif // VMATRIX_H diff --git a/vendor/github.com/Benau/go_rlottie/vector_vpainter.cpp b/vendor/github.com/Benau/go_rlottie/vector_vpainter.cpp new file mode 100644 index 00000000..bcabc554 --- /dev/null +++ b/vendor/github.com/Benau/go_rlottie/vector_vpainter.cpp @@ -0,0 +1,172 @@ +/* + * Copyright (c) 2020 Samsung Electronics Co., Ltd. All rights reserved. + + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include "vector_vpainter.h" +#include + + +V_BEGIN_NAMESPACE + + +void VPainter::drawRle(const VPoint &, const VRle &rle) +{ + if (rle.empty()) return; + // mSpanData.updateSpanFunc(); + + if (!mSpanData.mUnclippedBlendFunc) return; + + // do draw after applying clip. + rle.intersect(mSpanData.clipRect(), mSpanData.mUnclippedBlendFunc, + &mSpanData); +} + +void VPainter::drawRle(const VRle &rle, const VRle &clip) +{ + if (rle.empty() || clip.empty()) return; + + if (!mSpanData.mUnclippedBlendFunc) return; + + rle.intersect(clip, mSpanData.mUnclippedBlendFunc, &mSpanData); +} + +static void fillRect(const VRect &r, VSpanData *data) +{ + auto x1 = std::max(r.x(), 0); + auto x2 = std::min(r.x() + r.width(), data->mDrawableSize.width()); + auto y1 = std::max(r.y(), 0); + auto y2 = std::min(r.y() + r.height(), data->mDrawableSize.height()); + + if (x2 <= x1 || y2 <= y1) return; + + const int nspans = 256; + VRle::Span spans[nspans]; + + int y = y1; + while (y < y2) { + int n = std::min(nspans, y2 - y); + int i = 0; + while (i < n) { + spans[i].x = short(x1); + spans[i].len = ushort(x2 - x1); + spans[i].y = short(y + i); + spans[i].coverage = 255; + ++i; + } + + data->mUnclippedBlendFunc(n, spans, data); + y += n; + } +} + +void VPainter::drawBitmapUntransform(const VRect & target, + const VBitmap &bitmap, + const VRect & source, + uint8_t const_alpha) +{ + mSpanData.initTexture(&bitmap, const_alpha, source); + if (!mSpanData.mUnclippedBlendFunc) return; + + // update translation matrix for source texture. + mSpanData.dx = float(target.x() - source.x()); + mSpanData.dy = float(target.y() - source.y()); + + fillRect(target, &mSpanData); +} + +VPainter::VPainter(VBitmap *buffer) +{ + begin(buffer); +} +bool VPainter::begin(VBitmap *buffer) +{ + mBuffer.prepare(buffer); + mSpanData.init(&mBuffer); + // TODO find a better api to clear the surface + mBuffer.clear(); + return true; +} +void VPainter::end() {} + +void VPainter::setDrawRegion(const VRect ®ion) +{ + mSpanData.setDrawRegion(region); +} + +void VPainter::setBrush(const VBrush &brush) +{ + mSpanData.setup(brush); +} + +void VPainter::setBlendMode(BlendMode mode) +{ + mSpanData.mBlendMode = mode; +} + +VRect VPainter::clipBoundingRect() const +{ + return mSpanData.clipRect(); +} + +void VPainter::drawBitmap(const VPoint &point, const VBitmap &bitmap, + const VRect &source, uint8_t const_alpha) +{ + if (!bitmap.valid()) return; + + drawBitmap(VRect(point, bitmap.size()), + bitmap, source, const_alpha); +} + +void VPainter::drawBitmap(const VRect &target, const VBitmap &bitmap, + const VRect &source, uint8_t const_alpha) +{ + if (!bitmap.valid()) return; + + // clear any existing brush data. + setBrush(VBrush()); + + if (target.size() == source.size()) { + drawBitmapUntransform(target, bitmap, source, const_alpha); + } else { + // @TODO scaling + } +} + +void VPainter::drawBitmap(const VPoint &point, const VBitmap &bitmap, + uint8_t const_alpha) +{ + if (!bitmap.valid()) return; + + drawBitmap(VRect(point, bitmap.size()), + bitmap, bitmap.rect(), + const_alpha); +} + +void VPainter::drawBitmap(const VRect &rect, const VBitmap &bitmap, + uint8_t const_alpha) +{ + if (!bitmap.valid()) return; + + drawBitmap(rect, bitmap, bitmap.rect(), + const_alpha); +} + +V_END_NAMESPACE diff --git a/vendor/github.com/Benau/go_rlottie/vector_vpainter.h b/vendor/github.com/Benau/go_rlottie/vector_vpainter.h new file mode 100644 index 00000000..01cd99a5 --- /dev/null +++ b/vendor/github.com/Benau/go_rlottie/vector_vpainter.h @@ -0,0 +1,60 @@ +/* + * Copyright (c) 2020 Samsung Electronics Co., Ltd. All rights reserved. + + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef VPAINTER_H +#define VPAINTER_H + +#include "vector_vbrush.h" +#include "vector_vpoint.h" +#include "vector_vrle.h" +#include "vector_vdrawhelper.h" + +V_BEGIN_NAMESPACE + +class VBitmap; +class VPainter { +public: + VPainter() = default; + explicit VPainter(VBitmap *buffer); + bool begin(VBitmap *buffer); + void end(); + void setDrawRegion(const VRect ®ion); // sub surface rendering area. + void setBrush(const VBrush &brush); + void setBlendMode(BlendMode mode); + void drawRle(const VPoint &pos, const VRle &rle); + void drawRle(const VRle &rle, const VRle &clip); + VRect clipBoundingRect() const; + + void drawBitmap(const VPoint &point, const VBitmap &bitmap, const VRect &source, uint8_t const_alpha = 255); + void drawBitmap(const VRect &target, const VBitmap &bitmap, const VRect &source, uint8_t const_alpha = 255); + void drawBitmap(const VPoint &point, const VBitmap &bitmap, uint8_t const_alpha = 255); + void drawBitmap(const VRect &rect, const VBitmap &bitmap, uint8_t const_alpha = 255); +private: + void drawBitmapUntransform(const VRect &target, const VBitmap &bitmap, + const VRect &source, uint8_t const_alpha); + VRasterBuffer mBuffer; + VSpanData mSpanData; +}; + +V_END_NAMESPACE + +#endif // VPAINTER_H diff --git a/vendor/github.com/Benau/go_rlottie/vector_vpath.cpp b/vendor/github.com/Benau/go_rlottie/vector_vpath.cpp new file mode 100644 index 00000000..470c6d42 --- /dev/null +++ b/vendor/github.com/Benau/go_rlottie/vector_vpath.cpp @@ -0,0 +1,709 @@ +/* + * Copyright (c) 2020 Samsung Electronics Co., Ltd. All rights reserved. + + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#include "vector_vpath.h" +#include +#include +#include +#include "vector_vbezier.h" +#include "vector_vdebug.h" +#include "vector_vline.h" +#include "vector_vrect.h" + +V_BEGIN_NAMESPACE + +void VPath::VPathData::transform(const VMatrix &m) +{ + for (auto &i : m_points) { + i = m.map(i); + } + mLengthDirty = true; +} + +float VPath::VPathData::length() const +{ + if (!mLengthDirty) return mLength; + + mLengthDirty = false; + mLength = 0.0; + + size_t i = 0; + for (auto e : m_elements) { + switch (e) { + case VPath::Element::MoveTo: + i++; + break; + case VPath::Element::LineTo: { + mLength += VLine(m_points[i - 1], m_points[i]).length(); + i++; + break; + } + case VPath::Element::CubicTo: { + mLength += VBezier::fromPoints(m_points[i - 1], m_points[i], + m_points[i + 1], m_points[i + 2]) + .length(); + i += 3; + break; + } + case VPath::Element::Close: + break; + } + } + + return mLength; +} + +void VPath::VPathData::checkNewSegment() +{ + if (mNewSegment) { + moveTo(0, 0); + mNewSegment = false; + } +} + +void VPath::VPathData::moveTo(float x, float y) +{ + mStartPoint = {x, y}; + mNewSegment = false; + m_elements.emplace_back(VPath::Element::MoveTo); + m_points.emplace_back(x, y); + m_segments++; + mLengthDirty = true; +} + +void VPath::VPathData::lineTo(float x, float y) +{ + checkNewSegment(); + m_elements.emplace_back(VPath::Element::LineTo); + m_points.emplace_back(x, y); + mLengthDirty = true; +} + +void VPath::VPathData::cubicTo(float cx1, float cy1, float cx2, float cy2, + float ex, float ey) +{ + checkNewSegment(); + m_elements.emplace_back(VPath::Element::CubicTo); + m_points.emplace_back(cx1, cy1); + m_points.emplace_back(cx2, cy2); + m_points.emplace_back(ex, ey); + mLengthDirty = true; +} + +void VPath::VPathData::close() +{ + if (empty()) return; + + const VPointF &lastPt = m_points.back(); + if (!fuzzyCompare(mStartPoint, lastPt)) { + lineTo(mStartPoint.x(), mStartPoint.y()); + } + m_elements.push_back(VPath::Element::Close); + mNewSegment = true; + mLengthDirty = true; +} + +void VPath::VPathData::reset() +{ + if (empty()) return; + + m_elements.clear(); + m_points.clear(); + m_segments = 0; + mLength = 0; + mLengthDirty = false; +} + +size_t VPath::VPathData::segments() const +{ + return m_segments; +} + +void VPath::VPathData::reserve(size_t pts, size_t elms) +{ + if (m_points.capacity() < m_points.size() + pts) + m_points.reserve(m_points.size() + pts); + if (m_elements.capacity() < m_elements.size() + elms) + m_elements.reserve(m_elements.size() + elms); +} + +static VPointF curvesForArc(const VRectF &, float, float, VPointF *, size_t *); +static constexpr float PATH_KAPPA = 0.5522847498f; +static constexpr float K_PI = 3.141592f; + +void VPath::VPathData::arcTo(const VRectF &rect, float startAngle, + float sweepLength, bool forceMoveTo) +{ + size_t point_count = 0; + VPointF pts[15]; + VPointF curve_start = + curvesForArc(rect, startAngle, sweepLength, pts, &point_count); + + reserve(point_count + 1, point_count / 3 + 1); + if (empty() || forceMoveTo) { + moveTo(curve_start.x(), curve_start.y()); + } else { + lineTo(curve_start.x(), curve_start.y()); + } + for (size_t i = 0; i < point_count; i += 3) { + cubicTo(pts[i].x(), pts[i].y(), pts[i + 1].x(), pts[i + 1].y(), + pts[i + 2].x(), pts[i + 2].y()); + } +} + +void VPath::VPathData::addCircle(float cx, float cy, float radius, + VPath::Direction dir) +{ + addOval(VRectF(cx - radius, cy - radius, 2 * radius, 2 * radius), dir); +} + +void VPath::VPathData::addOval(const VRectF &rect, VPath::Direction dir) +{ + if (rect.empty()) return; + + float x = rect.x(); + float y = rect.y(); + + float w = rect.width(); + float w2 = rect.width() / 2; + float w2k = w2 * PATH_KAPPA; + + float h = rect.height(); + float h2 = rect.height() / 2; + float h2k = h2 * PATH_KAPPA; + + reserve(13, 6); // 1Move + 4Cubic + 1Close + if (dir == VPath::Direction::CW) { + // moveto 12 o'clock. + moveTo(x + w2, y); + // 12 -> 3 o'clock + cubicTo(x + w2 + w2k, y, x + w, y + h2 - h2k, x + w, y + h2); + // 3 -> 6 o'clock + cubicTo(x + w, y + h2 + h2k, x + w2 + w2k, y + h, x + w2, y + h); + // 6 -> 9 o'clock + cubicTo(x + w2 - w2k, y + h, x, y + h2 + h2k, x, y + h2); + // 9 -> 12 o'clock + cubicTo(x, y + h2 - h2k, x + w2 - w2k, y, x + w2, y); + } else { + // moveto 12 o'clock. + moveTo(x + w2, y); + // 12 -> 9 o'clock + cubicTo(x + w2 - w2k, y, x, y + h2 - h2k, x, y + h2); + // 9 -> 6 o'clock + cubicTo(x, y + h2 + h2k, x + w2 - w2k, y + h, x + w2, y + h); + // 6 -> 3 o'clock + cubicTo(x + w2 + w2k, y + h, x + w, y + h2 + h2k, x + w, y + h2); + // 3 -> 12 o'clock + cubicTo(x + w, y + h2 - h2k, x + w2 + w2k, y, x + w2, y); + } + close(); +} + +void VPath::VPathData::addRect(const VRectF &rect, VPath::Direction dir) +{ + float x = rect.x(); + float y = rect.y(); + float w = rect.width(); + float h = rect.height(); + + if (vCompare(w, 0.f) && vCompare(h, 0.f)) return; + + reserve(5, 6); // 1Move + 4Line + 1Close + if (dir == VPath::Direction::CW) { + moveTo(x + w, y); + lineTo(x + w, y + h); + lineTo(x, y + h); + lineTo(x, y); + close(); + } else { + moveTo(x + w, y); + lineTo(x, y); + lineTo(x, y + h); + lineTo(x + w, y + h); + close(); + } +} + +void VPath::VPathData::addRoundRect(const VRectF &rect, float roundness, + VPath::Direction dir) +{ + if (2 * roundness > rect.width()) roundness = rect.width() / 2.0f; + if (2 * roundness > rect.height()) roundness = rect.height() / 2.0f; + addRoundRect(rect, roundness, roundness, dir); +} + +void VPath::VPathData::addRoundRect(const VRectF &rect, float rx, float ry, + VPath::Direction dir) +{ + if (vCompare(rx, 0.f) || vCompare(ry, 0.f)) { + addRect(rect, dir); + return; + } + + float x = rect.x(); + float y = rect.y(); + float w = rect.width(); + float h = rect.height(); + // clamp the rx and ry radius value. + rx = 2 * rx; + ry = 2 * ry; + if (rx > w) rx = w; + if (ry > h) ry = h; + + reserve(17, 10); // 1Move + 4Cubic + 1Close + if (dir == VPath::Direction::CW) { + moveTo(x + w, y + ry / 2.f); + arcTo(VRectF(x + w - rx, y + h - ry, rx, ry), 0, -90, false); + arcTo(VRectF(x, y + h - ry, rx, ry), -90, -90, false); + arcTo(VRectF(x, y, rx, ry), -180, -90, false); + arcTo(VRectF(x + w - rx, y, rx, ry), -270, -90, false); + close(); + } else { + moveTo(x + w, y + ry / 2.f); + arcTo(VRectF(x + w - rx, y, rx, ry), 0, 90, false); + arcTo(VRectF(x, y, rx, ry), 90, 90, false); + arcTo(VRectF(x, y + h - ry, rx, ry), 180, 90, false); + arcTo(VRectF(x + w - rx, y + h - ry, rx, ry), 270, 90, false); + close(); + } +} + +static float tForArcAngle(float angle); +void findEllipseCoords(const VRectF &r, float angle, float length, + VPointF *startPoint, VPointF *endPoint) +{ + if (r.empty()) { + if (startPoint) *startPoint = VPointF(); + if (endPoint) *endPoint = VPointF(); + return; + } + + float w2 = r.width() / 2; + float h2 = r.height() / 2; + + float angles[2] = {angle, angle + length}; + VPointF *points[2] = {startPoint, endPoint}; + + for (int i = 0; i < 2; ++i) { + if (!points[i]) continue; + + float theta = angles[i] - 360 * floorf(angles[i] / 360); + float t = theta / 90; + // truncate + int quadrant = int(t); + t -= quadrant; + + t = tForArcAngle(90 * t); + + // swap x and y? + if (quadrant & 1) t = 1 - t; + + float a, b, c, d; + VBezier::coefficients(t, a, b, c, d); + VPointF p(a + b + c * PATH_KAPPA, d + c + b * PATH_KAPPA); + + // left quadrants + if (quadrant == 1 || quadrant == 2) p.rx() = -p.x(); + + // top quadrants + if (quadrant == 0 || quadrant == 1) p.ry() = -p.y(); + + *points[i] = r.center() + VPointF(w2 * p.x(), h2 * p.y()); + } +} + +static float tForArcAngle(float angle) +{ + float radians, cos_angle, sin_angle, tc, ts, t; + + if (vCompare(angle, 0.f)) return 0; + if (vCompare(angle, 90.0f)) return 1; + + radians = (angle / 180) * K_PI; + + cos_angle = cosf(radians); + sin_angle = sinf(radians); + + // initial guess + tc = angle / 90; + + // do some iterations of newton's method to approximate cos_angle + // finds the zero of the function b.pointAt(tc).x() - cos_angle + tc -= ((((2 - 3 * PATH_KAPPA) * tc + 3 * (PATH_KAPPA - 1)) * tc) * tc + 1 - + cos_angle) // value + / (((6 - 9 * PATH_KAPPA) * tc + 6 * (PATH_KAPPA - 1)) * + tc); // derivative + tc -= ((((2 - 3 * PATH_KAPPA) * tc + 3 * (PATH_KAPPA - 1)) * tc) * tc + 1 - + cos_angle) // value + / (((6 - 9 * PATH_KAPPA) * tc + 6 * (PATH_KAPPA - 1)) * + tc); // derivative + + // initial guess + ts = tc; + // do some iterations of newton's method to approximate sin_angle + // finds the zero of the function b.pointAt(tc).y() - sin_angle + ts -= ((((3 * PATH_KAPPA - 2) * ts - 6 * PATH_KAPPA + 3) * ts + + 3 * PATH_KAPPA) * + ts - + sin_angle) / + (((9 * PATH_KAPPA - 6) * ts + 12 * PATH_KAPPA - 6) * ts + + 3 * PATH_KAPPA); + ts -= ((((3 * PATH_KAPPA - 2) * ts - 6 * PATH_KAPPA + 3) * ts + + 3 * PATH_KAPPA) * + ts - + sin_angle) / + (((9 * PATH_KAPPA - 6) * ts + 12 * PATH_KAPPA - 6) * ts + + 3 * PATH_KAPPA); + + // use the average of the t that best approximates cos_angle + // and the t that best approximates sin_angle + t = 0.5f * (tc + ts); + return t; +} + +// The return value is the starting point of the arc +static VPointF curvesForArc(const VRectF &rect, float startAngle, + float sweepLength, VPointF *curves, + size_t *point_count) +{ + if (rect.empty()) { + return {}; + } + + float x = rect.x(); + float y = rect.y(); + + float w = rect.width(); + float w2 = rect.width() / 2; + float w2k = w2 * PATH_KAPPA; + + float h = rect.height(); + float h2 = rect.height() / 2; + float h2k = h2 * PATH_KAPPA; + + VPointF points[16] = { + // start point + VPointF(x + w, y + h2), + + // 0 -> 270 degrees + VPointF(x + w, y + h2 + h2k), VPointF(x + w2 + w2k, y + h), + VPointF(x + w2, y + h), + + // 270 -> 180 degrees + VPointF(x + w2 - w2k, y + h), VPointF(x, y + h2 + h2k), + VPointF(x, y + h2), + + // 180 -> 90 degrees + VPointF(x, y + h2 - h2k), VPointF(x + w2 - w2k, y), VPointF(x + w2, y), + + // 90 -> 0 degrees + VPointF(x + w2 + w2k, y), VPointF(x + w, y + h2 - h2k), + VPointF(x + w, y + h2)}; + + if (sweepLength > 360) + sweepLength = 360; + else if (sweepLength < -360) + sweepLength = -360; + + // Special case fast paths + if (startAngle == 0.0f) { + if (vCompare(sweepLength, 360)) { + for (int i = 11; i >= 0; --i) curves[(*point_count)++] = points[i]; + return points[12]; + } else if (vCompare(sweepLength, -360)) { + for (int i = 1; i <= 12; ++i) curves[(*point_count)++] = points[i]; + return points[0]; + } + } + + int startSegment = int(floorf(startAngle / 90.0f)); + int endSegment = int(floorf((startAngle + sweepLength) / 90.0f)); + + float startT = (startAngle - startSegment * 90) / 90; + float endT = (startAngle + sweepLength - endSegment * 90) / 90; + + int delta = sweepLength > 0 ? 1 : -1; + if (delta < 0) { + startT = 1 - startT; + endT = 1 - endT; + } + + // avoid empty start segment + if (vIsZero(startT - float(1))) { + startT = 0; + startSegment += delta; + } + + // avoid empty end segment + if (vIsZero(endT)) { + endT = 1; + endSegment -= delta; + } + + startT = tForArcAngle(startT * 90); + endT = tForArcAngle(endT * 90); + + const bool splitAtStart = !vIsZero(startT); + const bool splitAtEnd = !vIsZero(endT - float(1)); + + const int end = endSegment + delta; + + // empty arc? + if (startSegment == end) { + const int quadrant = 3 - ((startSegment % 4) + 4) % 4; + const int j = 3 * quadrant; + return delta > 0 ? points[j + 3] : points[j]; + } + + VPointF startPoint, endPoint; + findEllipseCoords(rect, startAngle, sweepLength, &startPoint, &endPoint); + + for (int i = startSegment; i != end; i += delta) { + const int quadrant = 3 - ((i % 4) + 4) % 4; + const int j = 3 * quadrant; + + VBezier b; + if (delta > 0) + b = VBezier::fromPoints(points[j + 3], points[j + 2], points[j + 1], + points[j]); + else + b = VBezier::fromPoints(points[j], points[j + 1], points[j + 2], + points[j + 3]); + + // empty arc? + if (startSegment == endSegment && vCompare(startT, endT)) + return startPoint; + + if (i == startSegment) { + if (i == endSegment && splitAtEnd) + b = b.onInterval(startT, endT); + else if (splitAtStart) + b = b.onInterval(startT, 1); + } else if (i == endSegment && splitAtEnd) { + b = b.onInterval(0, endT); + } + + // push control points + curves[(*point_count)++] = b.pt2(); + curves[(*point_count)++] = b.pt3(); + curves[(*point_count)++] = b.pt4(); + } + + curves[*(point_count)-1] = endPoint; + + return startPoint; +} + +void VPath::VPathData::addPolystar(float points, float innerRadius, + float outerRadius, float innerRoundness, + float outerRoundness, float startAngle, + float cx, float cy, VPath::Direction dir) +{ + const static float POLYSTAR_MAGIC_NUMBER = 0.47829f / 0.28f; + float currentAngle = (startAngle - 90.0f) * K_PI / 180.0f; + float x; + float y; + float partialPointRadius = 0; + float anglePerPoint = (2.0f * K_PI / points); + float halfAnglePerPoint = anglePerPoint / 2.0f; + float partialPointAmount = points - floorf(points); + bool longSegment = false; + size_t numPoints = size_t(ceilf(points) * 2); + float angleDir = ((dir == VPath::Direction::CW) ? 1.0f : -1.0f); + bool hasRoundness = false; + + innerRoundness /= 100.0f; + outerRoundness /= 100.0f; + + if (!vCompare(partialPointAmount, 0)) { + currentAngle += + halfAnglePerPoint * (1.0f - partialPointAmount) * angleDir; + } + + if (!vCompare(partialPointAmount, 0)) { + partialPointRadius = + innerRadius + partialPointAmount * (outerRadius - innerRadius); + x = partialPointRadius * cosf(currentAngle); + y = partialPointRadius * sinf(currentAngle); + currentAngle += anglePerPoint * partialPointAmount / 2.0f * angleDir; + } else { + x = outerRadius * cosf(currentAngle); + y = outerRadius * sinf(currentAngle); + currentAngle += halfAnglePerPoint * angleDir; + } + + if (vIsZero(innerRoundness) && vIsZero(outerRoundness)) { + reserve(numPoints + 2, numPoints + 3); + } else { + reserve(numPoints * 3 + 2, numPoints + 3); + hasRoundness = true; + } + + moveTo(x + cx, y + cy); + + for (size_t i = 0; i < numPoints; i++) { + float radius = longSegment ? outerRadius : innerRadius; + float dTheta = halfAnglePerPoint; + if (!vIsZero(partialPointRadius) && i == numPoints - 2) { + dTheta = anglePerPoint * partialPointAmount / 2.0f; + } + if (!vIsZero(partialPointRadius) && i == numPoints - 1) { + radius = partialPointRadius; + } + float previousX = x; + float previousY = y; + x = radius * cosf(currentAngle); + y = radius * sinf(currentAngle); + + if (hasRoundness) { + float cp1Theta = + (atan2f(previousY, previousX) - K_PI / 2.0f * angleDir); + float cp1Dx = cosf(cp1Theta); + float cp1Dy = sinf(cp1Theta); + float cp2Theta = (atan2f(y, x) - K_PI / 2.0f * angleDir); + float cp2Dx = cosf(cp2Theta); + float cp2Dy = sinf(cp2Theta); + + float cp1Roundness = longSegment ? innerRoundness : outerRoundness; + float cp2Roundness = longSegment ? outerRoundness : innerRoundness; + float cp1Radius = longSegment ? innerRadius : outerRadius; + float cp2Radius = longSegment ? outerRadius : innerRadius; + + float cp1x = cp1Radius * cp1Roundness * POLYSTAR_MAGIC_NUMBER * + cp1Dx / points; + float cp1y = cp1Radius * cp1Roundness * POLYSTAR_MAGIC_NUMBER * + cp1Dy / points; + float cp2x = cp2Radius * cp2Roundness * POLYSTAR_MAGIC_NUMBER * + cp2Dx / points; + float cp2y = cp2Radius * cp2Roundness * POLYSTAR_MAGIC_NUMBER * + cp2Dy / points; + + if (!vIsZero(partialPointAmount) && + ((i == 0) || (i == numPoints - 1))) { + cp1x *= partialPointAmount; + cp1y *= partialPointAmount; + cp2x *= partialPointAmount; + cp2y *= partialPointAmount; + } + + cubicTo(previousX - cp1x + cx, previousY - cp1y + cy, x + cp2x + cx, + y + cp2y + cy, x + cx, y + cy); + } else { + lineTo(x + cx, y + cy); + } + + currentAngle += dTheta * angleDir; + longSegment = !longSegment; + } + + close(); +} + +void VPath::VPathData::addPolygon(float points, float radius, float roundness, + float startAngle, float cx, float cy, + VPath::Direction dir) +{ + // TODO: Need to support floating point number for number of points + const static float POLYGON_MAGIC_NUMBER = 0.25; + float currentAngle = (startAngle - 90.0f) * K_PI / 180.0f; + float x; + float y; + float anglePerPoint = 2.0f * K_PI / floorf(points); + size_t numPoints = size_t(floorf(points)); + float angleDir = ((dir == VPath::Direction::CW) ? 1.0f : -1.0f); + bool hasRoundness = false; + + roundness /= 100.0f; + + currentAngle = (currentAngle - 90.0f) * K_PI / 180.0f; + x = radius * cosf(currentAngle); + y = radius * sinf(currentAngle); + currentAngle += anglePerPoint * angleDir; + + if (vIsZero(roundness)) { + reserve(numPoints + 2, numPoints + 3); + } else { + reserve(numPoints * 3 + 2, numPoints + 3); + hasRoundness = true; + } + + moveTo(x + cx, y + cy); + + for (size_t i = 0; i < numPoints; i++) { + float previousX = x; + float previousY = y; + x = (radius * cosf(currentAngle)); + y = (radius * sinf(currentAngle)); + + if (hasRoundness) { + float cp1Theta = + (atan2f(previousY, previousX) - K_PI / 2.0f * angleDir); + float cp1Dx = cosf(cp1Theta); + float cp1Dy = sinf(cp1Theta); + float cp2Theta = atan2f(y, x) - K_PI / 2.0f * angleDir; + float cp2Dx = cosf(cp2Theta); + float cp2Dy = sinf(cp2Theta); + + float cp1x = radius * roundness * POLYGON_MAGIC_NUMBER * cp1Dx; + float cp1y = radius * roundness * POLYGON_MAGIC_NUMBER * cp1Dy; + float cp2x = radius * roundness * POLYGON_MAGIC_NUMBER * cp2Dx; + float cp2y = radius * roundness * POLYGON_MAGIC_NUMBER * cp2Dy; + + cubicTo(previousX - cp1x + cx, previousY - cp1y + cy, x + cp2x + cx, + y + cp2y + cy, x, y); + } else { + lineTo(x + cx, y + cy); + } + + currentAngle += anglePerPoint * angleDir; + } + + close(); +} + +void VPath::VPathData::addPath(const VPathData &path, const VMatrix *m) +{ + size_t segment = path.segments(); + + // make sure enough memory available + if (m_points.capacity() < m_points.size() + path.m_points.size()) + m_points.reserve(m_points.size() + path.m_points.size()); + + if (m_elements.capacity() < m_elements.size() + path.m_elements.size()) + m_elements.reserve(m_elements.size() + path.m_elements.size()); + + if (m) { + for (const auto &i : path.m_points) { + m_points.push_back(m->map(i)); + } + } else { + std::copy(path.m_points.begin(), path.m_points.end(), + back_inserter(m_points)); + } + + std::copy(path.m_elements.begin(), path.m_elements.end(), + back_inserter(m_elements)); + + m_segments += segment; + mLengthDirty = true; +} + +V_END_NAMESPACE diff --git a/vendor/github.com/Benau/go_rlottie/vector_vpath.h b/vendor/github.com/Benau/go_rlottie/vector_vpath.h new file mode 100644 index 00000000..17d6687d --- /dev/null +++ b/vendor/github.com/Benau/go_rlottie/vector_vpath.h @@ -0,0 +1,285 @@ +/* + * Copyright (c) 2020 Samsung Electronics Co., Ltd. All rights reserved. + + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef VPATH_H +#define VPATH_H +#include +#include "vector_vcowptr.h" +#include "vector_vmatrix.h" +#include "vector_vpoint.h" +#include "vector_vrect.h" + +V_BEGIN_NAMESPACE + +struct VPathData; +class VPath { +public: + enum class Direction { CCW, CW }; + + enum class Element : uchar { MoveTo, LineTo, CubicTo, Close }; + bool empty() const; + bool null() const; + void moveTo(const VPointF &p); + void moveTo(float x, float y); + void lineTo(const VPointF &p); + void lineTo(float x, float y); + void cubicTo(const VPointF &c1, const VPointF &c2, const VPointF &e); + void cubicTo(float c1x, float c1y, float c2x, float c2y, float ex, + float ey); + void arcTo(const VRectF &rect, float startAngle, float sweepLength, + bool forceMoveTo); + void close(); + void reset(); + void reserve(size_t pts, size_t elms); + size_t segments() const; + void addCircle(float cx, float cy, float radius, + VPath::Direction dir = Direction::CW); + void addOval(const VRectF &rect, VPath::Direction dir = Direction::CW); + void addRoundRect(const VRectF &rect, float rx, float ry, + VPath::Direction dir = Direction::CW); + void addRoundRect(const VRectF &rect, float roundness, + VPath::Direction dir = Direction::CW); + void addRect(const VRectF &rect, VPath::Direction dir = Direction::CW); + void addPolystar(float points, float innerRadius, float outerRadius, + float innerRoundness, float outerRoundness, + float startAngle, float cx, float cy, + VPath::Direction dir = Direction::CW); + void addPolygon(float points, float radius, float roundness, + float startAngle, float cx, float cy, + VPath::Direction dir = Direction::CW); + void addPath(const VPath &path); + void addPath(const VPath &path, const VMatrix &m); + void transform(const VMatrix &m); + float length() const; + const std::vector &elements() const; + const std::vector & points() const; + void clone(const VPath &srcPath); + bool unique() const { return d.unique();} + size_t refCount() const { return d.refCount();} + +private: + struct VPathData { + bool empty() const { return m_elements.empty(); } + bool null() const { return empty() && !m_elements.capacity();} + void moveTo(float x, float y); + void lineTo(float x, float y); + void cubicTo(float cx1, float cy1, float cx2, float cy2, float ex, float ey); + void close(); + void reset(); + void reserve(size_t, size_t); + void checkNewSegment(); + size_t segments() const; + void transform(const VMatrix &m); + float length() const; + void addRoundRect(const VRectF &, float, float, VPath::Direction); + void addRoundRect(const VRectF &, float, VPath::Direction); + void addRect(const VRectF &, VPath::Direction); + void arcTo(const VRectF &, float, float, bool); + void addCircle(float, float, float, VPath::Direction); + void addOval(const VRectF &, VPath::Direction); + void addPolystar(float points, float innerRadius, float outerRadius, + float innerRoundness, float outerRoundness, + float startAngle, float cx, float cy, + VPath::Direction dir = Direction::CW); + void addPolygon(float points, float radius, float roundness, + float startAngle, float cx, float cy, + VPath::Direction dir = Direction::CW); + void addPath(const VPathData &path, const VMatrix *m = nullptr); + void clone(const VPath::VPathData &o) { *this = o;} + const std::vector &elements() const + { + return m_elements; + } + const std::vector &points() const { return m_points; } + std::vector m_points; + std::vector m_elements; + size_t m_segments; + VPointF mStartPoint; + mutable float mLength{0}; + mutable bool mLengthDirty{true}; + bool mNewSegment; + }; + + vcow_ptr d; +}; + +inline bool VPath::empty() const +{ + return d->empty(); +} + +/* + * path is empty as well as null(no memory for data allocated yet). + */ +inline bool VPath::null() const +{ + return d->null(); +} + +inline void VPath::moveTo(const VPointF &p) +{ + d.write().moveTo(p.x(), p.y()); +} + +inline void VPath::lineTo(const VPointF &p) +{ + d.write().lineTo(p.x(), p.y()); +} + +inline void VPath::close() +{ + d.write().close(); +} + +inline void VPath::reset() +{ + d.write().reset(); +} + +inline void VPath::reserve(size_t pts, size_t elms) +{ + d.write().reserve(pts, elms); +} + +inline size_t VPath::segments() const +{ + return d->segments(); +} + +inline float VPath::length() const +{ + return d->length(); +} + +inline void VPath::cubicTo(const VPointF &c1, const VPointF &c2, + const VPointF &e) +{ + d.write().cubicTo(c1.x(), c1.y(), c2.x(), c2.y(), e.x(), e.y()); +} + +inline void VPath::lineTo(float x, float y) +{ + d.write().lineTo(x, y); +} + +inline void VPath::moveTo(float x, float y) +{ + d.write().moveTo(x, y); +} + +inline void VPath::cubicTo(float c1x, float c1y, float c2x, float c2y, float ex, + float ey) +{ + d.write().cubicTo(c1x, c1y, c2x, c2y, ex, ey); +} + +inline void VPath::transform(const VMatrix &m) +{ + d.write().transform(m); +} + +inline void VPath::arcTo(const VRectF &rect, float startAngle, + float sweepLength, bool forceMoveTo) +{ + d.write().arcTo(rect, startAngle, sweepLength, forceMoveTo); +} + +inline void VPath::addRect(const VRectF &rect, VPath::Direction dir) +{ + d.write().addRect(rect, dir); +} + +inline void VPath::addRoundRect(const VRectF &rect, float rx, float ry, + VPath::Direction dir) +{ + d.write().addRoundRect(rect, rx, ry, dir); +} + +inline void VPath::addRoundRect(const VRectF &rect, float roundness, + VPath::Direction dir) +{ + d.write().addRoundRect(rect, roundness, dir); +} + +inline void VPath::addCircle(float cx, float cy, float radius, + VPath::Direction dir) +{ + d.write().addCircle(cx, cy, radius, dir); +} + +inline void VPath::addOval(const VRectF &rect, VPath::Direction dir) +{ + d.write().addOval(rect, dir); +} + +inline void VPath::addPolystar(float points, float innerRadius, + float outerRadius, float innerRoundness, + float outerRoundness, float startAngle, float cx, + float cy, VPath::Direction dir) +{ + d.write().addPolystar(points, innerRadius, outerRadius, innerRoundness, + outerRoundness, startAngle, cx, cy, dir); +} + +inline void VPath::addPolygon(float points, float radius, float roundness, + float startAngle, float cx, float cy, + VPath::Direction dir) +{ + d.write().addPolygon(points, radius, roundness, startAngle, cx, cy, dir); +} + +inline void VPath::addPath(const VPath &path) +{ + if (path.empty()) return; + + if (null()) { + *this = path; + } else { + d.write().addPath(path.d.read()); + } +} + +inline void VPath::addPath(const VPath &path, const VMatrix &m) +{ + if (path.empty()) return; + + d.write().addPath(path.d.read(), &m); +} + +inline const std::vector &VPath::elements() const +{ + return d->elements(); +} + +inline const std::vector &VPath::points() const +{ + return d->points(); +} + +inline void VPath::clone(const VPath &o) +{ + d.write().clone(o.d.read()); +} + +V_END_NAMESPACE + +#endif // VPATH_H diff --git a/vendor/github.com/Benau/go_rlottie/vector_vpathmesure.cpp b/vendor/github.com/Benau/go_rlottie/vector_vpathmesure.cpp new file mode 100644 index 00000000..4288b941 --- /dev/null +++ b/vendor/github.com/Benau/go_rlottie/vector_vpathmesure.cpp @@ -0,0 +1,67 @@ +/* + * Copyright (c) 2020 Samsung Electronics Co., Ltd. All rights reserved. + + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include "vector_vpathmesure.h" +#include +#include "vector_vbezier.h" +#include "vector_vdasher.h" + +V_BEGIN_NAMESPACE + +/* + * start and end value must be normalized to [0 - 1] + * Path mesure trims the path from [start --> end] + * if start > end it treates as a loop and trims as two segment + * [0-->end] and [start --> 1] + */ +VPath VPathMesure::trim(const VPath &path) +{ + if (vCompare(mStart, mEnd)) return VPath(); + + if ((vCompare(mStart, 0.0f) && (vCompare(mEnd, 1.0f))) || + (vCompare(mStart, 1.0f) && (vCompare(mEnd, 0.0f)))) + return path; + + float length = path.length(); + + if (mStart < mEnd) { + float array[4] = { + 0.0f, length * mStart, // 1st segment + (mEnd - mStart) * length, + std::numeric_limits::max(), // 2nd segment + }; + VDasher dasher(array, 4); + dasher.dashed(path, mScratchObject); + return mScratchObject; + } else { + float array[4] = { + length * mEnd, (mStart - mEnd) * length, // 1st segment + (1 - mStart) * length, + std::numeric_limits::max(), // 2nd segment + }; + VDasher dasher(array, 4); + dasher.dashed(path, mScratchObject); + return mScratchObject; + } +} + +V_END_NAMESPACE diff --git a/vendor/github.com/Benau/go_rlottie/vector_vpathmesure.h b/vendor/github.com/Benau/go_rlottie/vector_vpathmesure.h new file mode 100644 index 00000000..43bd0863 --- /dev/null +++ b/vendor/github.com/Benau/go_rlottie/vector_vpathmesure.h @@ -0,0 +1,44 @@ +/* + * Copyright (c) 2020 Samsung Electronics Co., Ltd. All rights reserved. + + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef VPATHMESURE_H +#define VPATHMESURE_H + +#include "vector_vpath.h" + +V_BEGIN_NAMESPACE + +class VPathMesure { +public: + void setRange(float start, float end) {mStart = start; mEnd = end;} + void setStart(float start){mStart = start;} + void setEnd(float end){mEnd = end;} + VPath trim(const VPath &path); +private: + float mStart{0.0f}; + float mEnd{1.0f}; + VPath mScratchObject; +}; + +V_END_NAMESPACE + +#endif // VPATHMESURE_H diff --git a/vendor/github.com/Benau/go_rlottie/vector_vpoint.h b/vendor/github.com/Benau/go_rlottie/vector_vpoint.h new file mode 100644 index 00000000..be5c0ff9 --- /dev/null +++ b/vendor/github.com/Benau/go_rlottie/vector_vpoint.h @@ -0,0 +1,210 @@ +/* + * Copyright (c) 2020 Samsung Electronics Co., Ltd. All rights reserved. + + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef VPOINT_H +#define VPOINT_H + +#include "vector_vglobal.h" + +V_BEGIN_NAMESPACE + +class VPointF { +public: + VPointF() = default; + constexpr inline VPointF(float x, float y) noexcept : mx(x), my(y) {} + constexpr inline float x() const noexcept { return mx; } + constexpr inline float y() const noexcept { return my; } + inline float & rx() noexcept { return mx; } + inline float & ry() noexcept { return my; } + inline void setX(float x) { mx = x; } + inline void setY(float y) { my = y; } + inline VPointF operator-() noexcept { return {-mx, -my}; } + inline VPointF & operator+=(const VPointF &p) noexcept; + inline VPointF & operator-=(const VPointF &p) noexcept; + friend const VPointF operator+(const VPointF &p1, const VPointF &p2) + { + return VPointF(p1.mx + p2.mx, p1.my + p2.my); + } + inline friend bool fuzzyCompare(const VPointF &p1, const VPointF &p2); + inline friend VDebug & operator<<(VDebug &os, const VPointF &o); + + friend inline VPointF operator-(const VPointF &p1, const VPointF &p2); + friend inline const VPointF operator*(const VPointF &, float); + friend inline const VPointF operator*(float, const VPointF &); + friend inline const VPointF operator/(const VPointF &, float); + friend inline const VPointF operator/(float, const VPointF &); + +private: + float mx{0}; + float my{0}; +}; + +inline bool fuzzyCompare(const VPointF &p1, const VPointF &p2) +{ + return (vCompare(p1.mx, p2.mx) && vCompare(p1.my, p2.my)); +} + +inline VPointF operator-(const VPointF &p1, const VPointF &p2) +{ + return {p1.mx - p2.mx, p1.my - p2.my}; +} + +inline const VPointF operator*(const VPointF &p, float c) +{ + return VPointF(p.mx * c, p.my * c); +} + +inline const VPointF operator*(float c, const VPointF &p) +{ + return VPointF(p.mx * c, p.my * c); +} + +inline const VPointF operator/(const VPointF &p, float c) +{ + return VPointF(p.mx / c, p.my / c); +} + +inline const VPointF operator/(float c, const VPointF &p) +{ + return VPointF(p.mx / c, p.my / c); +} + +inline VDebug &operator<<(VDebug &os, const VPointF &o) +{ + os << "{P " << o.x() << "," << o.y() << "}"; + return os; +} + +inline VPointF &VPointF::operator+=(const VPointF &p) noexcept +{ + mx += p.mx; + my += p.my; + return *this; +} + +inline VPointF &VPointF::operator-=(const VPointF &p) noexcept +{ + mx -= p.mx; + my -= p.my; + return *this; +} + +class VPoint { +public: + VPoint() = default; + constexpr inline VPoint(int x, int y) noexcept : mx(x), my(y) {} + constexpr inline int x() const noexcept { return mx; } + constexpr inline int y() const noexcept { return my; } + inline void setX(int x) { mx = x; } + inline void setY(int y) { my = y; } + inline VPoint & operator+=(const VPoint &p) noexcept; + inline VPoint & operator-=(const VPoint &p) noexcept; + constexpr inline bool operator==(const VPoint &o) const; + constexpr inline bool operator!=(const VPoint &o) const + { + return !(operator==(o)); + } + friend inline VPoint operator-(const VPoint &p1, const VPoint &p2); + inline friend VDebug &operator<<(VDebug &os, const VPoint &o); + +private: + int mx{0}; + int my{0}; +}; +inline VDebug &operator<<(VDebug &os, const VPoint &o) +{ + os << "{P " << o.x() << "," << o.y() << "}"; + return os; +} + +inline VPoint operator-(const VPoint &p1, const VPoint &p2) +{ + return {p1.mx - p2.mx, p1.my - p2.my}; +} + +constexpr inline bool VPoint::operator==(const VPoint &o) const +{ + return (mx == o.x() && my == o.y()); +} + +inline VPoint &VPoint::operator+=(const VPoint &p) noexcept +{ + mx += p.mx; + my += p.my; + return *this; +} + +inline VPoint &VPoint::operator-=(const VPoint &p) noexcept +{ + mx -= p.mx; + my -= p.my; + return *this; +} + +class VSize { +public: + VSize() = default; + constexpr inline VSize(int w, int h) noexcept : mw(w), mh(h) {} + bool empty() const {return (mw <= 0 || mh <= 0);} + constexpr inline int width() const noexcept { return mw; } + constexpr inline int height() const noexcept { return mh; } + inline void setWidth(int w) { mw = w; } + inline void setHeight(int h) { mh = h; } + inline VSize & operator+=(const VSize &p) noexcept; + inline VSize & operator-=(const VSize &p) noexcept; + constexpr inline bool operator==(const VSize &o) const; + constexpr inline bool operator!=(const VSize &o) const + { + return !(operator==(o)); + } + inline friend VDebug &operator<<(VDebug &os, const VSize &o); + +private: + int mw{0}; + int mh{0}; +}; +inline VDebug &operator<<(VDebug &os, const VSize &o) +{ + os << "{P " << o.width() << "," << o.height() << "}"; + return os; +} +constexpr inline bool VSize::operator==(const VSize &o) const +{ + return (mw == o.width() && mh == o.height()); +} + +inline VSize &VSize::operator+=(const VSize &p) noexcept +{ + mw += p.mw; + mh += p.mh; + return *this; +} + +inline VSize &VSize::operator-=(const VSize &p) noexcept +{ + mw -= p.mw; + mh -= p.mh; + return *this; +} + +V_END_NAMESPACE + +#endif // VPOINT_H diff --git a/vendor/github.com/Benau/go_rlottie/vector_vraster.cpp b/vendor/github.com/Benau/go_rlottie/vector_vraster.cpp new file mode 100644 index 00000000..656bc32c --- /dev/null +++ b/vendor/github.com/Benau/go_rlottie/vector_vraster.cpp @@ -0,0 +1,563 @@ +/* + * Copyright (c) 2020 Samsung Electronics Co., Ltd. All rights reserved. + + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#include "vector_vraster.h" +#include +#include +#include +#include "config.h" +#include "vector_freetype_v_ft_raster.h" +#include "vector_freetype_v_ft_stroker.h" +#include "vector_vdebug.h" +#include "vector_vmatrix.h" +#include "vector_vpath.h" +#include "vector_vrle.h" + +V_BEGIN_NAMESPACE + +template +class dyn_array { +public: + explicit dyn_array(size_t size) + : mCapacity(size), mData(std::make_unique(mCapacity)) + { + } + void reserve(size_t size) + { + if (mCapacity > size) return; + mCapacity = size; + mData = std::make_unique(mCapacity); + } + T * data() const { return mData.get(); } + dyn_array &operator=(dyn_array &&) noexcept = delete; + +private: + size_t mCapacity{0}; + std::unique_ptr mData{nullptr}; +}; + +struct FTOutline { +public: + void reset(); + void grow(size_t, size_t); + void convert(const VPath &path); + void convert(CapStyle, JoinStyle, float, float); + void moveTo(const VPointF &pt); + void lineTo(const VPointF &pt); + void cubicTo(const VPointF &ctr1, const VPointF &ctr2, const VPointF end); + void close(); + void end(); + void transform(const VMatrix &m); + SW_FT_Pos TO_FT_COORD(float x) + { + return SW_FT_Pos(x * 64); + } // to freetype 26.6 coordinate. + SW_FT_Outline ft; + bool closed{false}; + SW_FT_Stroker_LineCap ftCap; + SW_FT_Stroker_LineJoin ftJoin; + SW_FT_Fixed ftWidth; + SW_FT_Fixed ftMiterLimit; + dyn_array mPointMemory{100}; + dyn_array mTagMemory{100}; + dyn_array mContourMemory{10}; + dyn_array mContourFlagMemory{10}; +}; + +void FTOutline::reset() +{ + ft.n_points = ft.n_contours = 0; + ft.flags = 0x0; +} + +void FTOutline::grow(size_t points, size_t segments) +{ + reset(); + mPointMemory.reserve(points + segments); + mTagMemory.reserve(points + segments); + mContourMemory.reserve(segments); + mContourFlagMemory.reserve(segments); + + ft.points = mPointMemory.data(); + ft.tags = mTagMemory.data(); + ft.contours = mContourMemory.data(); + ft.contours_flag = mContourFlagMemory.data(); +} + +void FTOutline::convert(const VPath &path) +{ + const std::vector &elements = path.elements(); + const std::vector & points = path.points(); + + grow(points.size(), path.segments()); + + size_t index = 0; + for (auto element : elements) { + switch (element) { + case VPath::Element::MoveTo: + moveTo(points[index]); + index++; + break; + case VPath::Element::LineTo: + lineTo(points[index]); + index++; + break; + case VPath::Element::CubicTo: + cubicTo(points[index], points[index + 1], points[index + 2]); + index = index + 3; + break; + case VPath::Element::Close: + close(); + break; + } + } + end(); +} + +void FTOutline::convert(CapStyle cap, JoinStyle join, float width, + float miterLimit) +{ + // map strokeWidth to freetype. It uses as the radius of the pen not the + // diameter + width = width / 2.0f; + // convert to freetype co-ordinate + // IMP: stroker takes radius in 26.6 co-ordinate + ftWidth = SW_FT_Fixed(width * (1 << 6)); + // IMP: stroker takes meterlimit in 16.16 co-ordinate + ftMiterLimit = SW_FT_Fixed(miterLimit * (1 << 16)); + + // map to freetype capstyle + switch (cap) { + case CapStyle::Square: + ftCap = SW_FT_STROKER_LINECAP_SQUARE; + break; + case CapStyle::Round: + ftCap = SW_FT_STROKER_LINECAP_ROUND; + break; + default: + ftCap = SW_FT_STROKER_LINECAP_BUTT; + break; + } + switch (join) { + case JoinStyle::Bevel: + ftJoin = SW_FT_STROKER_LINEJOIN_BEVEL; + break; + case JoinStyle::Round: + ftJoin = SW_FT_STROKER_LINEJOIN_ROUND; + break; + default: + ftJoin = SW_FT_STROKER_LINEJOIN_MITER_FIXED; + break; + } +} + +void FTOutline::moveTo(const VPointF &pt) +{ + assert(ft.n_points <= SHRT_MAX - 1); + + ft.points[ft.n_points].x = TO_FT_COORD(pt.x()); + ft.points[ft.n_points].y = TO_FT_COORD(pt.y()); + ft.tags[ft.n_points] = SW_FT_CURVE_TAG_ON; + if (ft.n_points) { + ft.contours[ft.n_contours] = ft.n_points - 1; + ft.n_contours++; + } + // mark the current contour as open + // will be updated if ther is a close tag at the end. + ft.contours_flag[ft.n_contours] = 1; + + ft.n_points++; +} + +void FTOutline::lineTo(const VPointF &pt) +{ + assert(ft.n_points <= SHRT_MAX - 1); + + ft.points[ft.n_points].x = TO_FT_COORD(pt.x()); + ft.points[ft.n_points].y = TO_FT_COORD(pt.y()); + ft.tags[ft.n_points] = SW_FT_CURVE_TAG_ON; + ft.n_points++; +} + +void FTOutline::cubicTo(const VPointF &cp1, const VPointF &cp2, + const VPointF ep) +{ + assert(ft.n_points <= SHRT_MAX - 3); + + ft.points[ft.n_points].x = TO_FT_COORD(cp1.x()); + ft.points[ft.n_points].y = TO_FT_COORD(cp1.y()); + ft.tags[ft.n_points] = SW_FT_CURVE_TAG_CUBIC; + ft.n_points++; + + ft.points[ft.n_points].x = TO_FT_COORD(cp2.x()); + ft.points[ft.n_points].y = TO_FT_COORD(cp2.y()); + ft.tags[ft.n_points] = SW_FT_CURVE_TAG_CUBIC; + ft.n_points++; + + ft.points[ft.n_points].x = TO_FT_COORD(ep.x()); + ft.points[ft.n_points].y = TO_FT_COORD(ep.y()); + ft.tags[ft.n_points] = SW_FT_CURVE_TAG_ON; + ft.n_points++; +} +void FTOutline::close() +{ + assert(ft.n_points <= SHRT_MAX - 1); + + // mark the contour as a close path. + ft.contours_flag[ft.n_contours] = 0; + + int index; + if (ft.n_contours) { + index = ft.contours[ft.n_contours - 1] + 1; + } else { + index = 0; + } + + // make sure atleast 1 point exists in the segment. + if (ft.n_points == index) { + closed = false; + return; + } + + ft.points[ft.n_points].x = ft.points[index].x; + ft.points[ft.n_points].y = ft.points[index].y; + ft.tags[ft.n_points] = SW_FT_CURVE_TAG_ON; + ft.n_points++; +} + +void FTOutline::end() +{ + assert(ft.n_contours <= SHRT_MAX - 1); + + if (ft.n_points) { + ft.contours[ft.n_contours] = ft.n_points - 1; + ft.n_contours++; + } +} + +static void rleGenerationCb(int count, const SW_FT_Span *spans, void *user) +{ + VRle *rle = static_cast(user); + auto *rleSpan = reinterpret_cast(spans); + rle->addSpan(rleSpan, count); +} + +static void bboxCb(int x, int y, int w, int h, void *user) +{ + VRle *rle = static_cast(user); + rle->setBoundingRect({x, y, w, h}); +} + +class SharedRle { +public: + SharedRle() = default; + VRle &unsafe() { return _rle; } + void notify() + { + { + std::lock_guard lock(_mutex); + _ready = true; + } + _cv.notify_one(); + } + void wait() + { + if (!_pending) return; + + { + std::unique_lock lock(_mutex); + while (!_ready) _cv.wait(lock); + } + + _pending = false; + } + + VRle &get() + { + wait(); + return _rle; + } + + void reset() + { + wait(); + _ready = false; + _pending = true; + } + +private: + VRle _rle; + std::mutex _mutex; + std::condition_variable _cv; + bool _ready{true}; + bool _pending{false}; +}; + +struct VRleTask { + SharedRle mRle; + VPath mPath; + float mStrokeWidth; + float mMiterLimit; + VRect mClip; + FillRule mFillRule; + CapStyle mCap; + JoinStyle mJoin; + bool mGenerateStroke; + + VRle &rle() { return mRle.get(); } + + void update(VPath path, FillRule fillRule, const VRect &clip) + { + mRle.reset(); + mPath = std::move(path); + mFillRule = fillRule; + mClip = clip; + mGenerateStroke = false; + } + + void update(VPath path, CapStyle cap, JoinStyle join, float width, + float miterLimit, const VRect &clip) + { + mRle.reset(); + mPath = std::move(path); + mCap = cap; + mJoin = join; + mStrokeWidth = width; + mMiterLimit = miterLimit; + mClip = clip; + mGenerateStroke = true; + } + void render(FTOutline &outRef) + { + SW_FT_Raster_Params params; + + mRle.unsafe().reset(); + + params.flags = SW_FT_RASTER_FLAG_DIRECT | SW_FT_RASTER_FLAG_AA; + params.gray_spans = &rleGenerationCb; + params.bbox_cb = &bboxCb; + params.user = &mRle.unsafe(); + params.source = &outRef.ft; + + if (!mClip.empty()) { + params.flags |= SW_FT_RASTER_FLAG_CLIP; + + params.clip_box.xMin = mClip.left(); + params.clip_box.yMin = mClip.top(); + params.clip_box.xMax = mClip.right(); + params.clip_box.yMax = mClip.bottom(); + } + // compute rle + sw_ft_grays_raster.raster_render(nullptr, ¶ms); + } + + void operator()(FTOutline &outRef, SW_FT_Stroker &stroker) + { + if (mPath.points().size() > SHRT_MAX || + mPath.points().size() + mPath.segments() > SHRT_MAX) { + return; + } + + if (mGenerateStroke) { // Stroke Task + outRef.convert(mPath); + outRef.convert(mCap, mJoin, mStrokeWidth, mMiterLimit); + + uint points, contors; + + SW_FT_Stroker_Set(stroker, outRef.ftWidth, outRef.ftCap, + outRef.ftJoin, outRef.ftMiterLimit); + SW_FT_Stroker_ParseOutline(stroker, &outRef.ft); + SW_FT_Stroker_GetCounts(stroker, &points, &contors); + + outRef.grow(points, contors); + + SW_FT_Stroker_Export(stroker, &outRef.ft); + + } else { // Fill Task + outRef.convert(mPath); + int fillRuleFlag = SW_FT_OUTLINE_NONE; + switch (mFillRule) { + case FillRule::EvenOdd: + fillRuleFlag = SW_FT_OUTLINE_EVEN_ODD_FILL; + break; + default: + fillRuleFlag = SW_FT_OUTLINE_NONE; + break; + } + outRef.ft.flags = fillRuleFlag; + } + + render(outRef); + + mPath = VPath(); + + mRle.notify(); + } +}; + +using VTask = std::shared_ptr; + +#ifdef LOTTIE_THREAD_SUPPORT + +#include +#include "vector_vtaskqueue.h" + +class RleTaskScheduler { + const unsigned _count{std::thread::hardware_concurrency()}; + std::vector _threads; + std::vector> _q{_count}; + std::atomic _index{0}; + + void run(unsigned i) + { + /* + * initalize per thread objects. + */ + FTOutline outlineRef; + SW_FT_Stroker stroker; + SW_FT_Stroker_New(&stroker); + + // Task Loop + VTask task; + while (true) { + bool success = false; + + for (unsigned n = 0; n != _count * 2; ++n) { + if (_q[(i + n) % _count].try_pop(task)) { + success = true; + break; + } + } + + if (!success && !_q[i].pop(task)) break; + + (*task)(outlineRef, stroker); + } + + // cleanup + SW_FT_Stroker_Done(stroker); + } + + RleTaskScheduler() + { + for (unsigned n = 0; n != _count; ++n) { + _threads.emplace_back([&, n] { run(n); }); + } + } + +public: + static RleTaskScheduler &instance() + { + static RleTaskScheduler singleton; + return singleton; + } + + ~RleTaskScheduler() + { + for (auto &e : _q) e.done(); + + for (auto &e : _threads) e.join(); + } + + void process(VTask task) + { + auto i = _index++; + + for (unsigned n = 0; n != _count; ++n) { + if (_q[(i + n) % _count].try_push(std::move(task))) return; + } + + if (_count > 0) { + _q[i % _count].push(std::move(task)); + } + } +}; + +#else + +class RleTaskScheduler { +public: + FTOutline outlineRef{}; + SW_FT_Stroker stroker; + +public: + static RleTaskScheduler &instance() + { + static RleTaskScheduler singleton; + return singleton; + } + + RleTaskScheduler() { SW_FT_Stroker_New(&stroker); } + + ~RleTaskScheduler() { SW_FT_Stroker_Done(stroker); } + + void process(VTask task) { (*task)(outlineRef, stroker); } +}; +#endif + +struct VRasterizer::VRasterizerImpl { + VRleTask mTask; + + VRle & rle() { return mTask.rle(); } + VRleTask &task() { return mTask; } +}; + +VRle VRasterizer::rle() +{ + if (!d) return VRle(); + return d->rle(); +} + +void VRasterizer::init() +{ + if (!d) d = std::make_shared(); +} + +void VRasterizer::updateRequest() +{ + VTask taskObj = VTask(d, &d->task()); + RleTaskScheduler::instance().process(std::move(taskObj)); +} + +void VRasterizer::rasterize(VPath path, FillRule fillRule, const VRect &clip) +{ + init(); + if (path.empty()) { + d->rle().reset(); + return; + } + d->task().update(std::move(path), fillRule, clip); + updateRequest(); +} + +void VRasterizer::rasterize(VPath path, CapStyle cap, JoinStyle join, + float width, float miterLimit, const VRect &clip) +{ + init(); + if (path.empty() || vIsZero(width)) { + d->rle().reset(); + return; + } + d->task().update(std::move(path), cap, join, width, miterLimit, clip); + updateRequest(); +} + +V_END_NAMESPACE diff --git a/vendor/github.com/Benau/go_rlottie/vector_vraster.h b/vendor/github.com/Benau/go_rlottie/vector_vraster.h new file mode 100644 index 00000000..45d8ebc3 --- /dev/null +++ b/vendor/github.com/Benau/go_rlottie/vector_vraster.h @@ -0,0 +1,50 @@ +/* + * Copyright (c) 2020 Samsung Electronics Co., Ltd. All rights reserved. + + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef VRASTER_H +#define VRASTER_H +#include +#include "vector_vglobal.h" +#include "vector_vrect.h" + +V_BEGIN_NAMESPACE + +class VPath; +class VRle; + +class VRasterizer +{ +public: + void rasterize(VPath path, FillRule fillRule = FillRule::Winding, const VRect &clip = VRect()); + void rasterize(VPath path, CapStyle cap, JoinStyle join, float width, + float miterLimit, const VRect &clip = VRect()); + VRle rle(); +private: + struct VRasterizerImpl; + void init(); + void updateRequest(); + std::shared_ptr d{nullptr}; +}; + +V_END_NAMESPACE + +#endif // VRASTER_H diff --git a/vendor/github.com/Benau/go_rlottie/vector_vrect.cpp b/vendor/github.com/Benau/go_rlottie/vector_vrect.cpp new file mode 100644 index 00000000..38d29952 --- /dev/null +++ b/vendor/github.com/Benau/go_rlottie/vector_vrect.cpp @@ -0,0 +1,68 @@ +/* + * Copyright (c) 2020 Samsung Electronics Co., Ltd. All rights reserved. + + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include "vector_vrect.h" +#include + +VRect VRect::operator&(const VRect &r) const +{ + if (empty()) return VRect(); + + int l1 = x1; + int r1 = x1; + if (x2 - x1 + 1 < 0) + l1 = x2; + else + r1 = x2; + + int l2 = r.x1; + int r2 = r.x1; + if (r.x2 - r.x1 + 1 < 0) + l2 = r.x2; + else + r2 = r.x2; + + if (l1 > r2 || l2 > r1) return VRect(); + + int t1 = y1; + int b1 = y1; + if (y2 - y1 + 1 < 0) + t1 = y2; + else + b1 = y2; + + int t2 = r.y1; + int b2 = r.y1; + if (r.y2 - r.y1 + 1 < 0) + t2 = r.y2; + else + b2 = r.y2; + + if (t1 > b2 || t2 > b1) return VRect(); + + VRect tmp; + tmp.x1 = std::max(l1, l2); + tmp.x2 = std::min(r1, r2); + tmp.y1 = std::max(t1, t2); + tmp.y2 = std::min(b1, b2); + return tmp; +} diff --git a/vendor/github.com/Benau/go_rlottie/vector_vrect.h b/vendor/github.com/Benau/go_rlottie/vector_vrect.h new file mode 100644 index 00000000..590e2174 --- /dev/null +++ b/vendor/github.com/Benau/go_rlottie/vector_vrect.h @@ -0,0 +1,172 @@ +/* + * Copyright (c) 2020 Samsung Electronics Co., Ltd. All rights reserved. + + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef VRECT_H +#define VRECT_H +#include "vector_vglobal.h" +#include "vector_vpoint.h" + +V_BEGIN_NAMESPACE +class VRectF; + +class VRect { +public: + VRect() = default; + VRect(int x, int y, int w, int h):x1(x),y1(y),x2(x+w),y2(y+h){} + explicit VRect(VPoint pt, VSize sz):VRect(pt.x(), pt.y(), sz.width(), sz.height()){} + operator VRectF() const; + V_CONSTEXPR bool empty() const {return x1 >= x2 || y1 >= y2;} + V_CONSTEXPR int left() const {return x1;} + V_CONSTEXPR int top() const {return y1;} + V_CONSTEXPR int right() const {return x2;} + V_CONSTEXPR int bottom() const {return y2;} + V_CONSTEXPR int width() const {return x2-x1;} + V_CONSTEXPR int height() const {return y2-y1;} + V_CONSTEXPR int x() const {return x1;} + V_CONSTEXPR int y() const {return y1;} + VSize size() const {return {width(), height()};} + void setLeft(int l) { x1 = l; } + void setTop(int t) { y1 = t; } + void setRight(int r) { x2 = r; } + void setBottom(int b) { y2 = b; } + void setWidth(int w) { x2 = x1 + w; } + void setHeight(int h) { y2 = y1 + h; } + VRect translated(int dx, int dy) const; + void translate(int dx, int dy); + bool contains(const VRect &r, bool proper = false) const; + bool intersects(const VRect &r); + friend V_CONSTEXPR inline bool operator==(const VRect &, + const VRect &) noexcept; + friend V_CONSTEXPR inline bool operator!=(const VRect &, + const VRect &) noexcept; + friend VDebug & operator<<(VDebug &os, const VRect &o); + + VRect intersected(const VRect &r) const; + VRect operator&(const VRect &r) const; + +private: + int x1{0}; + int y1{0}; + int x2{0}; + int y2{0}; +}; + +inline VRect VRect::intersected(const VRect &r) const +{ + return *this & r; +} + +inline bool VRect::intersects(const VRect &r) +{ + return (right() > r.left() && left() < r.right() && bottom() > r.top() && + top() < r.bottom()); +} + +inline VDebug &operator<<(VDebug &os, const VRect &o) +{ + os << "{R " << o.x() << "," << o.y() << "," << o.width() << "," + << o.height() << "}"; + return os; +} +V_CONSTEXPR inline bool operator==(const VRect &r1, const VRect &r2) noexcept +{ + return r1.x1 == r2.x1 && r1.x2 == r2.x2 && r1.y1 == r2.y1 && r1.y2 == r2.y2; +} + +V_CONSTEXPR inline bool operator!=(const VRect &r1, const VRect &r2) noexcept +{ + return r1.x1 != r2.x1 || r1.x2 != r2.x2 || r1.y1 != r2.y1 || r1.y2 != r2.y2; +} + +inline VRect VRect::translated(int dx, int dy) const +{ + return {x1 + dx, y1 + dy, x2 - x1, y2 - y1}; +} + +inline void VRect::translate(int dx, int dy) +{ + x1 += dx; + y1 += dy; + x2 += dx; + y2 += dy; +} + +inline bool VRect::contains(const VRect &r, bool proper) const +{ + return proper ? + ((x1 < r.x1) && (x2 > r.x2) && (y1 < r.y1) && (y2 > r.y2)) : + ((x1 <= r.x1) && (x2 >= r.x2) && (y1 <= r.y1) && (y2 >= r.y2)); +} + +class VRectF { +public: + VRectF() = default; + + VRectF(double x, double y, double w, double h): + x1(float(x)),y1(float(y)), + x2(float(x+w)),y2(float(y+h)){} + operator VRect() const { + return {int(left()), int(right()), int(width()), int(height())}; + } + + V_CONSTEXPR bool empty() const {return x1 >= x2 || y1 >= y2;} + V_CONSTEXPR float left() const {return x1;} + V_CONSTEXPR float top() const {return y1;} + V_CONSTEXPR float right() const {return x2;} + V_CONSTEXPR float bottom() const {return y2;} + V_CONSTEXPR float width() const {return x2-x1;} + V_CONSTEXPR float height() const {return y2-y1;} + V_CONSTEXPR float x() const {return x1;} + V_CONSTEXPR float y() const {return y1;} + V_CONSTEXPR inline VPointF center() const + { + return {x1 + (x2 - x1) / 2.f, y1 + (y2 - y1) / 2.f}; + } + void setLeft(float l) { x1 = l; } + void setTop(float t) { y1 = t; } + void setRight(float r) { x2 = r; } + void setBottom(float b) { y2 = b; } + void setWidth(float w) { x2 = x1 + w; } + void setHeight(float h) { y2 = y1 + h; } + void translate(float dx, float dy) + { + x1 += dx; + y1 += dy; + x2 += dx; + y2 += dy; + } + +private: + float x1{0}; + float y1{0}; + float x2{0}; + float y2{0}; +}; + +inline VRect::operator VRectF() const +{ + return {double(left()), double(right()), double(width()), double(height())}; +} + +V_END_NAMESPACE + +#endif // VRECT_H diff --git a/vendor/github.com/Benau/go_rlottie/vector_vrle.cpp b/vendor/github.com/Benau/go_rlottie/vector_vrle.cpp new file mode 100644 index 00000000..57c7585f --- /dev/null +++ b/vendor/github.com/Benau/go_rlottie/vector_vrle.cpp @@ -0,0 +1,748 @@ +/* + * Copyright (c) 2020 Samsung Electronics Co., Ltd. All rights reserved. + + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + + * The above copyright notice and this permission notice shall be included in + all + * copies or substantial portions of the Software. + + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include "vector_vrle.h" +#include "vector_vrect.h" +#include +#include +#include +#include +#include +#include +#include "vector_vdebug.h" +#include "vector_vglobal.h" + +V_BEGIN_NAMESPACE + +using Result = std::array; +using rle_view = VRle::View; +static size_t _opGeneric(rle_view &a, rle_view &b, Result &result, + VRle::Data::Op op); +static size_t _opIntersect(const VRect &, rle_view &, Result &); +static size_t _opIntersect(rle_view &, rle_view &, Result &); + +static inline uchar divBy255(int x) +{ + return (x + (x >> 8) + 0x80) >> 8; +} + +inline static void copy(const VRle::Span *span, size_t count, + std::vector &v) +{ + // make sure enough memory available + if (v.capacity() < v.size() + count) v.reserve(v.size() + count); + std::copy(span, span + count, back_inserter(v)); +} + +void VRle::Data::addSpan(const VRle::Span *span, size_t count) +{ + copy(span, count, mSpans); + mBboxDirty = true; +} + +VRect VRle::Data::bbox() const +{ + updateBbox(); + return mBbox; +} + +void VRle::Data::setBbox(const VRect &bbox) const +{ + mBboxDirty = false; + mBbox = bbox; +} + +void VRle::Data::reset() +{ + mSpans.clear(); + mBbox = VRect(); + mOffset = VPoint(); + mBboxDirty = false; +} + +void VRle::Data::clone(const VRle::Data &o) +{ + *this = o; +} + +void VRle::Data::translate(const VPoint &p) +{ + // take care of last offset if applied + mOffset = p - mOffset; + int x = mOffset.x(); + int y = mOffset.y(); + for (auto &i : mSpans) { + i.x = i.x + x; + i.y = i.y + y; + } + updateBbox(); + mBbox.translate(mOffset.x(), mOffset.y()); +} + +void VRle::Data::addRect(const VRect &rect) +{ + int x = rect.left(); + int y = rect.top(); + int width = rect.width(); + int height = rect.height(); + + mSpans.reserve(size_t(height)); + + VRle::Span span; + for (int i = 0; i < height; i++) { + span.x = x; + span.y = y + i; + span.len = width; + span.coverage = 255; + mSpans.push_back(span); + } + mBbox = rect; +} + +void VRle::Data::updateBbox() const +{ + if (!mBboxDirty) return; + + mBboxDirty = false; + + int l = std::numeric_limits::max(); + const VRle::Span *span = mSpans.data(); + + mBbox = VRect(); + size_t sz = mSpans.size(); + if (sz) { + int t = span[0].y; + int b = span[sz - 1].y; + int r = 0; + for (size_t i = 0; i < sz; i++) { + if (span[i].x < l) l = span[i].x; + if (span[i].x + span[i].len > r) r = span[i].x + span[i].len; + } + mBbox = VRect(l, t, r - l, b - t + 1); + } +} + +void VRle::Data::operator*=(uchar alpha) +{ + for (auto &i : mSpans) { + i.coverage = divBy255(i.coverage * alpha); + } +} + +void VRle::Data::opIntersect(const VRect &r, VRle::VRleSpanCb cb, + void *userData) const +{ + if (empty()) return; + + if (r.contains(bbox())) { + cb(mSpans.size(), mSpans.data(), userData); + return; + } + + auto obj = view(); + Result result; + // run till all the spans are processed + while (obj.size()) { + auto count = _opIntersect(r, obj, result); + if (count) cb(count, result.data(), userData); + } +} + +// res = a - b; +void VRle::Data::opSubstract(const VRle::Data &aObj, const VRle::Data &bObj) +{ + // if two rle are disjoint + if (!aObj.bbox().intersects(bObj.bbox())) { + mSpans = aObj.mSpans; + } else { + auto a = aObj.view(); + auto b = bObj.view(); + + auto aPtr = a.data(); + auto aEnd = a.data() + a.size(); + auto bPtr = b.data(); + auto bEnd = b.data() + b.size(); + + // 1. forward a till it intersects with b + while ((aPtr != aEnd) && (aPtr->y < bPtr->y)) aPtr++; + auto count = aPtr - a.data(); + if (count) copy(a.data(), count, mSpans); + + // 2. forward b till it intersects with a + if (aPtr != aEnd) + while ((bPtr != bEnd) && (bPtr->y < aPtr->y)) bPtr++; + + // update a and b object + a = {aPtr, size_t(aEnd - aPtr)}; + b = {bPtr, size_t(bEnd - bPtr)}; + + // 3. calculate the intersect region + Result result; + + // run till all the spans are processed + while (a.size() && b.size()) { + auto count = _opGeneric(a, b, result, Op::Substract); + if (count) copy(result.data(), count, mSpans); + } + + // 4. copy the rest of a + if (a.size()) copy(a.data(), a.size(), mSpans); + } + + mBboxDirty = true; +} + +void VRle::Data::opGeneric(const VRle::Data &aObj, const VRle::Data &bObj, + Op op) +{ + // This routine assumes, obj1(span_y) < obj2(span_y). + + auto a = aObj.view(); + auto b = bObj.view(); + + // reserve some space for the result vector. + mSpans.reserve(a.size() + b.size()); + + // if two rle are disjoint + if (!aObj.bbox().intersects(bObj.bbox())) { + if (a.data()[0].y < b.data()[0].y) { + copy(a.data(), a.size(), mSpans); + copy(b.data(), b.size(), mSpans); + } else { + copy(b.data(), b.size(), mSpans); + copy(a.data(), a.size(), mSpans); + } + } else { + auto aPtr = a.data(); + auto aEnd = a.data() + a.size(); + auto bPtr = b.data(); + auto bEnd = b.data() + b.size(); + + // 1. forward a till it intersects with b + while ((aPtr != aEnd) && (aPtr->y < bPtr->y)) aPtr++; + + auto count = aPtr - a.data(); + if (count) copy(a.data(), count, mSpans); + + // 2. forward b till it intersects with a + if (aPtr != aEnd) + while ((bPtr != bEnd) && (bPtr->y < aPtr->y)) bPtr++; + + count = bPtr - b.data(); + if (count) copy(b.data(), count, mSpans); + + // update a and b object + a = {aPtr, size_t(aEnd - aPtr)}; + b = {bPtr, size_t(bEnd - bPtr)}; + + // 3. calculate the intersect region + Result result; + + // run till all the spans are processed + while (a.size() && b.size()) { + auto count = _opGeneric(a, b, result, op); + if (count) copy(result.data(), count, mSpans); + } + // 3. copy the rest + if (b.size()) copy(b.data(), b.size(), mSpans); + if (a.size()) copy(a.data(), a.size(), mSpans); + } + + mBboxDirty = true; +} + +static inline V_ALWAYS_INLINE void _opIntersectPrepare(VRle::View &a, + VRle::View &b) +{ + auto aPtr = a.data(); + auto aEnd = a.data() + a.size(); + auto bPtr = b.data(); + auto bEnd = b.data() + b.size(); + + // 1. advance a till it intersects with b + while ((aPtr != aEnd) && (aPtr->y < bPtr->y)) aPtr++; + + // 2. advance b till it intersects with a + if (aPtr != aEnd) + while ((bPtr != bEnd) && (bPtr->y < aPtr->y)) bPtr++; + + // update a and b object + a = {aPtr, size_t(aEnd - aPtr)}; + b = {bPtr, size_t(bEnd - bPtr)}; +} + +void VRle::Data::opIntersect(VRle::View a, VRle::View b) +{ + _opIntersectPrepare(a, b); + Result result; + while (a.size()) { + auto count = _opIntersect(a, b, result); + if (count) copy(result.data(), count, mSpans); + } + + updateBbox(); +} + +static void _opIntersect(rle_view a, rle_view b, VRle::VRleSpanCb cb, + void *userData) +{ + if (!cb) return; + + _opIntersectPrepare(a, b); + Result result; + while (a.size()) { + auto count = _opIntersect(a, b, result); + if (count) cb(count, result.data(), userData); + } +} + +/* + * This function will clip a rle list with another rle object + * tmp_clip : The rle list that will be use to clip the rle + * tmp_obj : holds the list of spans that has to be clipped + * result : will hold the result after the processing + * NOTE: if the algorithm runs out of the result buffer list + * it will stop and update the tmp_obj with the span list + * that are yet to be processed as well as the tpm_clip object + * with the unprocessed clip spans. + */ + +static size_t _opIntersect(rle_view &obj, rle_view &clip, Result &result) +{ + auto out = result.data(); + auto available = result.max_size(); + auto spans = obj.data(); + auto end = obj.data() + obj.size(); + auto clipSpans = clip.data(); + auto clipEnd = clip.data() + clip.size(); + int sx1, sx2, cx1, cx2, x, len; + + while (available && spans < end) { + if (clipSpans >= clipEnd) { + spans = end; + break; + } + if (clipSpans->y > spans->y) { + ++spans; + continue; + } + if (spans->y != clipSpans->y) { + ++clipSpans; + continue; + } + // assert(spans->y == (clipSpans->y + clip_offset_y)); + sx1 = spans->x; + sx2 = sx1 + spans->len; + cx1 = clipSpans->x; + cx2 = cx1 + clipSpans->len; + + if (cx1 < sx1 && cx2 < sx1) { + ++clipSpans; + continue; + } else if (sx1 < cx1 && sx2 < cx1) { + ++spans; + continue; + } + x = std::max(sx1, cx1); + len = std::min(sx2, cx2) - x; + if (len) { + out->x = std::max(sx1, cx1); + out->len = (std::min(sx2, cx2) - out->x); + out->y = spans->y; + out->coverage = divBy255(spans->coverage * clipSpans->coverage); + ++out; + --available; + } + if (sx2 < cx2) { + ++spans; + } else { + ++clipSpans; + } + } + + // update the obj view yet to be processed + obj = {spans, size_t(end - spans)}; + + // update the clip view yet to be processed + clip = {clipSpans, size_t(clipEnd - clipSpans)}; + + return result.max_size() - available; +} + +/* + * This function will clip a rle list with a given rect + * clip : The clip rect that will be use to clip the rle + * tmp_obj : holds the list of spans that has to be clipped + * result : will hold the result after the processing + * NOTE: if the algorithm runs out of the result buffer list + * it will stop and update the tmp_obj with the span list + * that are yet to be processed + */ +static size_t _opIntersect(const VRect &clip, rle_view &obj, Result &result) +{ + auto out = result.data(); + auto available = result.max_size(); + auto ptr = obj.data(); + auto end = obj.data() + obj.size(); + + const auto minx = clip.left(); + const auto miny = clip.top(); + const auto maxx = clip.right() - 1; + const auto maxy = clip.bottom() - 1; + + while (available && ptr < end) { + const auto &span = *ptr; + if (span.y > maxy) { + ptr = end; // update spans so that we can breakout + break; + } + if (span.y < miny || span.x > maxx || span.x + span.len <= minx) { + ++ptr; + continue; + } + if (span.x < minx) { + out->len = std::min(span.len - (minx - span.x), maxx - minx + 1); + out->x = minx; + } else { + out->x = span.x; + out->len = std::min(span.len, ushort(maxx - span.x + 1)); + } + if (out->len != 0) { + out->y = span.y; + out->coverage = span.coverage; + ++out; + --available; + } + ++ptr; + } + + // update the span list that yet to be processed + obj = {ptr, size_t(end - ptr)}; + + return result.max_size() - available; +} + +static void blitXor(VRle::Span *spans, int count, uchar *buffer, int offsetX) +{ + while (count--) { + int x = spans->x + offsetX; + int l = spans->len; + uchar *ptr = buffer + x; + while (l--) { + int da = *ptr; + *ptr = divBy255((255 - spans->coverage) * (da) + + spans->coverage * (255 - da)); + ptr++; + } + spans++; + } +} + +static void blitDestinationOut(VRle::Span *spans, int count, uchar *buffer, + int offsetX) +{ + while (count--) { + int x = spans->x + offsetX; + int l = spans->len; + uchar *ptr = buffer + x; + while (l--) { + *ptr = divBy255((255 - spans->coverage) * (*ptr)); + ptr++; + } + spans++; + } +} + +static void blitSrcOver(VRle::Span *spans, int count, uchar *buffer, + int offsetX) +{ + while (count--) { + int x = spans->x + offsetX; + int l = spans->len; + uchar *ptr = buffer + x; + while (l--) { + *ptr = spans->coverage + divBy255((255 - spans->coverage) * (*ptr)); + ptr++; + } + spans++; + } +} + +void blitSrc(VRle::Span *spans, int count, uchar *buffer, int offsetX) +{ + while (count--) { + int x = spans->x + offsetX; + int l = spans->len; + uchar *ptr = buffer + x; + while (l--) { + *ptr = std::max(spans->coverage, *ptr); + ptr++; + } + spans++; + } +} + +size_t bufferToRle(uchar *buffer, int size, int offsetX, int y, VRle::Span *out) +{ + size_t count = 0; + uchar value = buffer[0]; + int curIndex = 0; + + // size = offsetX < 0 ? size + offsetX : size; + for (int i = 0; i < size; i++) { + uchar curValue = buffer[0]; + if (value != curValue) { + if (value) { + out->y = y; + out->x = offsetX + curIndex; + out->len = i - curIndex; + out->coverage = value; + out++; + count++; + } + curIndex = i; + value = curValue; + } + buffer++; + } + if (value) { + out->y = y; + out->x = offsetX + curIndex; + out->len = size - curIndex; + out->coverage = value; + count++; + } + return count; +} + +struct SpanMerger { + explicit SpanMerger(VRle::Data::Op op) + { + switch (op) { + case VRle::Data::Op::Add: + _blitter = &blitSrcOver; + break; + case VRle::Data::Op::Xor: + _blitter = &blitXor; + break; + case VRle::Data::Op::Substract: + _blitter = &blitDestinationOut; + break; + } + } + using blitter = void (*)(VRle::Span *, int, uchar *, int); + blitter _blitter; + std::array _result; + std::array _buffer; + VRle::Span * _aStart{nullptr}; + VRle::Span * _bStart{nullptr}; + + void revert(VRle::Span *&aPtr, VRle::Span *&bPtr) + { + aPtr = _aStart; + bPtr = _bStart; + } + VRle::Span *data() { return _result.data(); } + size_t merge(VRle::Span *&aPtr, const VRle::Span *aEnd, VRle::Span *&bPtr, + const VRle::Span *bEnd); +}; + +size_t SpanMerger::merge(VRle::Span *&aPtr, const VRle::Span *aEnd, + VRle::Span *&bPtr, const VRle::Span *bEnd) +{ + assert(aPtr->y == bPtr->y); + + _aStart = aPtr; + _bStart = bPtr; + int lb = std::min(aPtr->x, bPtr->x); + int y = aPtr->y; + + while (aPtr < aEnd && aPtr->y == y) aPtr++; + while (bPtr < bEnd && bPtr->y == y) bPtr++; + + int ub = std::max((aPtr - 1)->x + (aPtr - 1)->len, + (bPtr - 1)->x + (bPtr - 1)->len); + int length = (lb < 0) ? ub + lb : ub - lb; + + if (length <= 0 || size_t(length) >= _buffer.max_size()) { + // can't handle merge . skip + return 0; + } + + // clear buffer + memset(_buffer.data(), 0, length); + + // blit a to buffer + blitSrc(_aStart, aPtr - _aStart, _buffer.data(), -lb); + + // blit b to buffer + _blitter(_bStart, bPtr - _bStart, _buffer.data(), -lb); + + // convert buffer to span + return bufferToRle(_buffer.data(), length, lb, y, _result.data()); +} + +static size_t _opGeneric(rle_view &a, rle_view &b, Result &result, + VRle::Data::Op op) +{ + SpanMerger merger{op}; + + auto out = result.data(); + size_t available = result.max_size(); + auto aPtr = a.data(); + auto aEnd = a.data() + a.size(); + auto bPtr = b.data(); + auto bEnd = b.data() + b.size(); + + // only logic change for substract operation. + const bool keep = op != (VRle::Data::Op::Substract); + + while (available && aPtr < aEnd && bPtr < bEnd) { + if (aPtr->y < bPtr->y) { + *out++ = *aPtr++; + available--; + } else if (bPtr->y < aPtr->y) { + if (keep) { + *out++ = *bPtr; + available--; + } + bPtr++; + } else { // same y + auto count = merger.merge(aPtr, aEnd, bPtr, bEnd); + if (available >= count) { + if (count) { + memcpy(out, merger.data(), count * sizeof(VRle::Span)); + out += count; + available -= count; + } + } else { + // not enough space try next time. + merger.revert(aPtr, bPtr); + break; + } + } + } + // update the span list that yet to be processed + a = {aPtr, size_t(aEnd - aPtr)}; + b = {bPtr, size_t(bEnd - bPtr)}; + + return result.max_size() - available; +} + +/* + * this api makes use of thread_local temporary + * buffer to avoid creating intermediate temporary rle buffer + * the scratch buffer object will grow its size on demand + * so that future call won't need any more memory allocation. + * this function is thread safe as it uses thread_local variable + * which is unique per thread. + */ +static vthread_local VRle::Data Scratch_Object; + +VRle VRle::opGeneric(const VRle &o, Data::Op op) const +{ + if (empty()) return o; + if (o.empty()) return *this; + + Scratch_Object.reset(); + Scratch_Object.opGeneric(d.read(), o.d.read(), op); + + VRle result; + result.d.write() = Scratch_Object; + + return result; +} + +VRle VRle::operator-(const VRle &o) const +{ + if (empty()) return {}; + if (o.empty()) return *this; + + Scratch_Object.reset(); + Scratch_Object.opSubstract(d.read(), o.d.read()); + + VRle result; + result.d.write() = Scratch_Object; + + return result; +} + +VRle VRle::operator&(const VRle &o) const +{ + if (empty() || o.empty()) return {}; + + Scratch_Object.reset(); + Scratch_Object.opIntersect(d.read().view(), o.d.read().view()); + + VRle result; + result.d.write() = Scratch_Object; + + return result; +} + +void VRle::operator&=(const VRle &o) +{ + if (empty()) return; + if (o.empty()) { + reset(); + return; + } + Scratch_Object.reset(); + Scratch_Object.opIntersect(d.read().view(), o.d.read().view()); + d.write() = Scratch_Object; +} + +VRle operator-(const VRect &rect, const VRle &o) +{ + if (rect.empty()) return {}; + + Scratch_Object.reset(); + Scratch_Object.addRect(rect); + + VRle result; + result.d.write().opSubstract(Scratch_Object, o.d.read()); + + return result; +} + +VRle operator&(const VRect &rect, const VRle &o) +{ + if (rect.empty() || o.empty()) return {}; + + Scratch_Object.reset(); + Scratch_Object.addRect(rect); + + VRle result; + result.d.write().opIntersect(Scratch_Object.view(), o.d.read().view()); + + return result; +} + +void VRle::intersect(const VRle &clip, VRleSpanCb cb, void *userData) const +{ + if (empty() || clip.empty()) return; + + _opIntersect(d.read().view(), clip.d.read().view(), cb, userData); +} + +V_END_NAMESPACE diff --git a/vendor/github.com/Benau/go_rlottie/vector_vrle.h b/vendor/github.com/Benau/go_rlottie/vector_vrle.h new file mode 100644 index 00000000..a47893fc --- /dev/null +++ b/vendor/github.com/Benau/go_rlottie/vector_vrle.h @@ -0,0 +1,121 @@ +/* + * Copyright (c) 2020 Samsung Electronics Co., Ltd. All rights reserved. + + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef VRLE_H +#define VRLE_H + +#include +#include "vector_vcowptr.h" +#include "vector_vglobal.h" +#include "vector_vpoint.h" +#include "vector_vrect.h" + +V_BEGIN_NAMESPACE + +class VRle { +public: + struct Span { + short x{0}; + short y{0}; + ushort len{0}; + uchar coverage{0}; + }; + using VRleSpanCb = void (*)(size_t count, const VRle::Span *spans, + void *userData); + bool empty() const { return d->empty(); } + VRect boundingRect() const { return d->bbox(); } + void setBoundingRect(const VRect &bbox) { d->setBbox(bbox); } + void addSpan(const VRle::Span *span, size_t count) + { + d.write().addSpan(span, count); + } + + void reset() { d.write().reset(); } + void translate(const VPoint &p) { d.write().translate(p); } + + void operator*=(uchar alpha) { d.write() *= alpha; } + + void intersect(const VRect &r, VRleSpanCb cb, void *userData) const; + void intersect(const VRle &rle, VRleSpanCb cb, void *userData) const; + + void operator&=(const VRle &o); + VRle operator&(const VRle &o) const; + VRle operator-(const VRle &o) const; + VRle operator+(const VRle &o) const { return opGeneric(o, Data::Op::Add); } + VRle operator^(const VRle &o) const { return opGeneric(o, Data::Op::Xor); } + + friend VRle operator-(const VRect &rect, const VRle &o); + friend VRle operator&(const VRect &rect, const VRle &o); + + bool unique() const { return d.unique(); } + size_t refCount() const { return d.refCount(); } + void clone(const VRle &o) { d.write().clone(o.d.read()); } + +public: + struct View { + Span * _data; + size_t _size; + View(const Span *data, size_t sz) : _data((Span *)data), _size(sz) {} + Span * data() { return _data; } + size_t size() { return _size; } + }; + struct Data { + enum class Op { Add, Xor, Substract }; + VRle::View view() const + { + return VRle::View(mSpans.data(), mSpans.size()); + } + bool empty() const { return mSpans.empty(); } + void addSpan(const VRle::Span *span, size_t count); + void updateBbox() const; + VRect bbox() const; + void setBbox(const VRect &bbox) const; + void reset(); + void translate(const VPoint &p); + void operator*=(uchar alpha); + void opGeneric(const VRle::Data &, const VRle::Data &, Op code); + void opSubstract(const VRle::Data &, const VRle::Data &); + void opIntersect(VRle::View a, VRle::View b); + void opIntersect(const VRect &, VRle::VRleSpanCb, void *) const; + void addRect(const VRect &rect); + void clone(const VRle::Data &); + + std::vector mSpans; + VPoint mOffset; + mutable VRect mBbox; + mutable bool mBboxDirty = true; + }; + +private: + VRle opGeneric(const VRle &o, Data::Op opcode) const; + + vcow_ptr d; +}; + +inline void VRle::intersect(const VRect &r, VRleSpanCb cb, void *userData) const +{ + d->opIntersect(r, cb, userData); +} + +V_END_NAMESPACE + +#endif // VRLE_H diff --git a/vendor/github.com/Benau/go_rlottie/vector_vsharedptr.h b/vendor/github.com/Benau/go_rlottie/vector_vsharedptr.h new file mode 100644 index 00000000..fc0c419c --- /dev/null +++ b/vendor/github.com/Benau/go_rlottie/vector_vsharedptr.h @@ -0,0 +1,123 @@ +#ifndef VSHAREDPTR_H +#define VSHAREDPTR_H + +#include +#include +#include + +template +class vshared_ptr { + struct model { + Rc mRef{1}; + + model() = default; + + template + explicit model(Args&&... args) : mValue(std::forward(args)...){} + explicit model(const T& other) : mValue(other){} + + T mValue; + }; + model* mModel{nullptr}; + +public: + using element_type = T; + + vshared_ptr() = default; + + ~vshared_ptr() + { + unref(); + } + + template + explicit vshared_ptr(Args&&... args) : mModel(new model(std::forward(args)...)) + { + } + + vshared_ptr(const vshared_ptr& x) noexcept : vshared_ptr() + { + if (x.mModel) { + mModel = x.mModel; + ++mModel->mRef; + } + } + + vshared_ptr(vshared_ptr&& x) noexcept : vshared_ptr() + { + if (x.mModel) { + mModel = x.mModel; + x.mModel = nullptr; + } + } + + auto operator=(const vshared_ptr& x) noexcept -> vshared_ptr& + { + unref(); + mModel = x.mModel; + ref(); + return *this; + } + + auto operator=(vshared_ptr&& x) noexcept -> vshared_ptr& + { + unref(); + mModel = x.mModel; + x.mModel = nullptr; + return *this; + } + + operator bool() const noexcept { + return mModel != nullptr; + } + + auto operator*() const noexcept -> element_type& { return read(); } + + auto operator-> () const noexcept -> element_type* { return &read(); } + + std::size_t refCount() const noexcept + { + assert(mModel); + + return mModel->mRef; + } + + bool unique() const noexcept + { + assert(mModel); + + return mModel->mRef == 1; + } + +private: + + auto read() const noexcept -> element_type& + { + assert(mModel); + + return mModel->mValue; + } + + void ref() + { + if (mModel) ++mModel->mRef; + } + + void unref() + { + if (mModel && (--mModel->mRef == 0)) { + delete mModel; + mModel = nullptr; + } + } +}; + +// atomic ref counted pointer implementation. +template < typename T> +using arc_ptr = vshared_ptr>; + +// ref counter pointer implementation. +template < typename T> +using rc_ptr = vshared_ptr; + +#endif // VSHAREDPTR_H diff --git a/vendor/github.com/Benau/go_rlottie/vector_vstackallocator.h b/vendor/github.com/Benau/go_rlottie/vector_vstackallocator.h new file mode 100644 index 00000000..a305b739 --- /dev/null +++ b/vendor/github.com/Benau/go_rlottie/vector_vstackallocator.h @@ -0,0 +1,156 @@ +/* + * Copyright (c) 2020 Samsung Electronics Co., Ltd. All rights reserved. + + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef VSTACK_ALLOCATOR_H +#define VSTACK_ALLOCATOR_H + +#include +#include + +template +class arena +{ + alignas(alignment) char buf_[N]; + char* ptr_; + +public: + ~arena() {ptr_ = nullptr;} + arena() noexcept : ptr_(buf_) {} + arena(const arena&) = delete; + arena& operator=(const arena&) = delete; + + template char* allocate(std::size_t n); + void deallocate(char* p, std::size_t n) noexcept; + + static constexpr std::size_t size() noexcept {return N;} + std::size_t used() const noexcept {return static_cast(ptr_ - buf_);} + void reset() noexcept {ptr_ = buf_;} + +private: + static + std::size_t + align_up(std::size_t n) noexcept + {return (n + (alignment-1)) & ~(alignment-1);} + + bool + pointer_in_buffer(char* p) noexcept + {return buf_ <= p && p <= buf_ + N;} +}; + +template +template +char* +arena::allocate(std::size_t n) +{ + static_assert(ReqAlign <= alignment, "alignment is too small for this arena"); + assert(pointer_in_buffer(ptr_) && "stack_alloc has outlived arena"); + auto const aligned_n = align_up(n); + if (static_cast(buf_ + N - ptr_) >= aligned_n) + { + char* r = ptr_; + ptr_ += aligned_n; + return r; + } + + static_assert(alignment <= alignof(std::max_align_t), "you've chosen an " + "alignment that is larger than alignof(std::max_align_t), and " + "cannot be guaranteed by normal operator new"); + return static_cast(::operator new(n)); +} + +template +void +arena::deallocate(char* p, std::size_t n) noexcept +{ + assert(pointer_in_buffer(ptr_) && "stack_alloc has outlived arena"); + if (pointer_in_buffer(p)) + { + n = align_up(n); + if (p + n == ptr_) + ptr_ = p; + } + else + ::operator delete(p); +} + +template +class stack_alloc +{ +public: + using value_type = T; + static auto constexpr alignment = Align; + static auto constexpr size = N; + using arena_type = arena; + +private: + arena_type& a_; + +public: + stack_alloc(const stack_alloc&) = default; + stack_alloc& operator=(const stack_alloc&) = delete; + + stack_alloc(arena_type& a) noexcept : a_(a) + { + static_assert(size % alignment == 0, + "size N needs to be a multiple of alignment Align"); + } + template + stack_alloc(const stack_alloc& a) noexcept + : a_(a.a_) {} + + template struct rebind {using other = stack_alloc<_Up, N, alignment>;}; + + T* allocate(std::size_t n) + { + return reinterpret_cast(a_.template allocate(n*sizeof(T))); + } + void deallocate(T* p, std::size_t n) noexcept + { + a_.deallocate(reinterpret_cast(p), n*sizeof(T)); + } + + template + friend + bool + operator==(const stack_alloc& x, const stack_alloc& y) noexcept; + + template friend class stack_alloc; +}; + +template +inline +bool +operator==(const stack_alloc& x, const stack_alloc& y) noexcept +{ + return N == M && A1 == A2 && &x.a_ == &y.a_; +} + +template +inline +bool +operator!=(const stack_alloc& x, const stack_alloc& y) noexcept +{ + return !(x == y); +} + +#endif // VSTACK_ALLOCATOR_H diff --git a/vendor/github.com/Benau/go_rlottie/vector_vtaskqueue.h b/vendor/github.com/Benau/go_rlottie/vector_vtaskqueue.h new file mode 100644 index 00000000..e505c2f4 --- /dev/null +++ b/vendor/github.com/Benau/go_rlottie/vector_vtaskqueue.h @@ -0,0 +1,87 @@ +/* + * Copyright (c) 2020 Samsung Electronics Co., Ltd. All rights reserved. + + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef VTASKQUEUE_H +#define VTASKQUEUE_H + +#include + +template +class TaskQueue { + using lock_t = std::unique_lock; + std::deque _q; + bool _done{false}; + std::mutex _mutex; + std::condition_variable _ready; + +public: + bool try_pop(Task &task) + { + lock_t lock{_mutex, std::try_to_lock}; + if (!lock || _q.empty()) return false; + task = std::move(_q.front()); + _q.pop_front(); + return true; + } + + bool try_push(Task &&task) + { + { + lock_t lock{_mutex, std::try_to_lock}; + if (!lock) return false; + _q.push_back(std::move(task)); + } + _ready.notify_one(); + return true; + } + + void done() + { + { + lock_t lock{_mutex}; + _done = true; + } + _ready.notify_all(); + } + + bool pop(Task &task) + { + lock_t lock{_mutex}; + while (_q.empty() && !_done) _ready.wait(lock); + if (_q.empty()) return false; + task = std::move(_q.front()); + _q.pop_front(); + return true; + } + + void push(Task &&task) + { + { + lock_t lock{_mutex}; + _q.push_back(std::move(task)); + } + _ready.notify_one(); + } + +}; + +#endif // VTASKQUEUE_H diff --git a/vendor/github.com/Benau/tgsconverter/LICENSE b/vendor/github.com/Benau/tgsconverter/LICENSE new file mode 100644 index 00000000..86fd0417 --- /dev/null +++ b/vendor/github.com/Benau/tgsconverter/LICENSE @@ -0,0 +1,24 @@ +The MIT License + +Copyright (c) 2021, (see AUTHORS) + +Permission is hereby granted, free of charge, to any person +obtaining a copy of this software and associated documentation +files (the "Software"), to deal in the Software without +restriction, including without limitation the rights to use, +copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the +Software is furnished to do so, subject to the following +conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES +OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT +HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR +OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/Benau/tgsconverter/libtgsconverter/apng.go b/vendor/github.com/Benau/tgsconverter/libtgsconverter/apng.go new file mode 100644 index 00000000..78541533 --- /dev/null +++ b/vendor/github.com/Benau/tgsconverter/libtgsconverter/apng.go @@ -0,0 +1,51 @@ +package libtgsconverter + +import "bytes" +import "image" + +import "github.com/kettek/apng" +import "github.com/av-elier/go-decimal-to-rational" + +type toapng struct { + apng apng.APNG + prev_frame *image.RGBA +} + +func(to_apng *toapng) init(w uint, h uint, options ConverterOptions) { +} + +func(to_apng *toapng) SupportsAnimation() bool { + return true +} + +func (to_apng *toapng) AddFrame(image *image.RGBA, fps uint) error { + if to_apng.prev_frame != nil && sameImage(to_apng.prev_frame, image) { + var idx = len(to_apng.apng.Frames) - 1 + var prev_fps = float64(to_apng.apng.Frames[idx].DelayNumerator) / float64(to_apng.apng.Frames[idx].DelayDenominator) + prev_fps += 1.0 / float64(fps) + rat := dectofrac.NewRatP(prev_fps, 0.001) + to_apng.apng.Frames[idx].DelayNumerator = uint16(rat.Num().Int64()) + to_apng.apng.Frames[idx].DelayDenominator = uint16(rat.Denom().Int64()) + return nil + } + f := apng.Frame{} + f.Image = image + f.DelayNumerator = 1 + f.DelayDenominator = uint16(fps) + f.DisposeOp = apng.DISPOSE_OP_BACKGROUND + f.BlendOp = apng.BLEND_OP_SOURCE + f.IsDefault = false + to_apng.apng.Frames = append(to_apng.apng.Frames, f) + to_apng.prev_frame = image + return nil +} + +func (to_apng *toapng) Result() []byte { + var data []byte + w := bytes.NewBuffer(data) + err := apng.Encode(w, to_apng.apng) + if err != nil { + return nil + } + return w.Bytes() +} diff --git a/vendor/github.com/Benau/tgsconverter/libtgsconverter/gif.go b/vendor/github.com/Benau/tgsconverter/libtgsconverter/gif.go new file mode 100644 index 00000000..c4f5c5d9 --- /dev/null +++ b/vendor/github.com/Benau/tgsconverter/libtgsconverter/gif.go @@ -0,0 +1,81 @@ +package libtgsconverter + +import "bytes" + +import "image" +import "image/color" +import "image/gif" + +type togif struct { + gif gif.GIF + images []image.Image + prev_frame *image.RGBA +} + +func(to_gif *togif) init(w uint, h uint, options ConverterOptions) { + to_gif.gif.Config.Width = int(w) + to_gif.gif.Config.Height = int(h) +} + +func(to_gif *togif) SupportsAnimation() bool { + return true +} + +func (to_gif *togif) AddFrame(image *image.RGBA, fps uint) error { + var fps_int = int(1.0 / float32(fps) * 100.) + if to_gif.prev_frame != nil && sameImage(to_gif.prev_frame, image) { + to_gif.gif.Delay[len(to_gif.gif.Delay) - 1] += fps_int + return nil + } + to_gif.gif.Image = append(to_gif.gif.Image, nil) + to_gif.gif.Delay = append(to_gif.gif.Delay, fps_int) + to_gif.gif.Disposal = append(to_gif.gif.Disposal, gif.DisposalBackground) + to_gif.images = append(to_gif.images, image) + to_gif.prev_frame = image + return nil +} + +func (to_gif *togif) Result() []byte { + q := medianCutQuantizer{mode, nil, false} + p := q.quantizeMultiple(make([]color.Color, 0, 256), to_gif.images) + // Add transparent entry finally + var trans_idx uint8 = 0 + if q.reserveTransparent { + trans_idx = uint8(len(p)) + } + var id_map = make(map[uint32]uint8) + for i, img := range to_gif.images { + pi := image.NewPaletted(img.Bounds(), p) + for y := 0; y < img.Bounds().Dy(); y++ { + for x := 0; x < img.Bounds().Dx(); x++ { + c := img.At(x, y) + cr, cg, cb, ca := c.RGBA() + cid := (cr >> 8) << 16 | cg | (cb >> 8) + if q.reserveTransparent && ca == 0 { + pi.Pix[pi.PixOffset(x, y)] = trans_idx + } else if val, ok := id_map[cid]; ok { + pi.Pix[pi.PixOffset(x, y)] = val + } else { + val := uint8(p.Index(c)) + pi.Pix[pi.PixOffset(x, y)] = val + id_map[cid] = val + } + } + } + to_gif.gif.Image[i] = pi + } + if q.reserveTransparent { + p = append(p, color.RGBA{0, 0, 0, 0}) + } + for _, img := range to_gif.gif.Image { + img.Palette = p + } + to_gif.gif.Config.ColorModel = p + var data []byte + w := bytes.NewBuffer(data) + err := gif.EncodeAll(w, &to_gif.gif) + if err != nil { + return nil + } + return w.Bytes() +} diff --git a/vendor/github.com/Benau/tgsconverter/libtgsconverter/imagewriter.go b/vendor/github.com/Benau/tgsconverter/libtgsconverter/imagewriter.go new file mode 100644 index 00000000..9549e337 --- /dev/null +++ b/vendor/github.com/Benau/tgsconverter/libtgsconverter/imagewriter.go @@ -0,0 +1,40 @@ +package libtgsconverter + +import "image" + +type imageWriter interface { + init(w uint, h uint, options ConverterOptions) + SupportsAnimation() bool + AddFrame(image *image.RGBA, fps uint) error + Result() []byte +} + +func sameImage(a *image.RGBA, b *image.RGBA) bool { + if len(a.Pix) != len(b.Pix) { + return false + } + for i, v := range a.Pix { + if v != b.Pix[i] { + return false + } + } + return true +} + +func newImageWriter(extension string, w uint, h uint, options ConverterOptions) imageWriter { + var writer imageWriter + switch extension { + case "apng": + writer = &toapng{} + case "gif": + writer = &togif{} + case "png": + writer = &topng{} + case "webp": + writer = &towebp{} + default: + return nil + } + writer.init(w, h, options) + return writer +} diff --git a/vendor/github.com/Benau/tgsconverter/libtgsconverter/lib.go b/vendor/github.com/Benau/tgsconverter/libtgsconverter/lib.go new file mode 100644 index 00000000..af6f8ab0 --- /dev/null +++ b/vendor/github.com/Benau/tgsconverter/libtgsconverter/lib.go @@ -0,0 +1,160 @@ +package libtgsconverter + +import "bytes" +import "errors" +import "compress/gzip" +import "image" +import "io/ioutil" + +import "github.com/Benau/go_rlottie" + +type ConverterOptions interface { + SetExtension(ext string) + SetFPS(fps uint) + SetScale(scale float32) + SetWebpQuality(webp_quality float32) + GetExtension() string + GetFPS() uint + GetScale() float32 + GetWebpQuality() float32 +} + +type converter_options struct { + // apng, gif, png or webp + extension string + // Frame per second of output image (if you specify apng, gif or webp) + fps uint + // Scale of image result + scale float32 + // Webp encoder quality (0 to 100) + webpQuality float32 +} + +func(opt *converter_options) SetExtension(ext string) { + opt.extension = ext +} + +func(opt *converter_options) SetFPS(fps uint) { + opt.fps = fps +} + +func(opt *converter_options) SetScale(scale float32) { + opt.scale = scale +} + +func(opt *converter_options) SetWebpQuality(webp_quality float32) { + opt.webpQuality = webp_quality +} + +func(opt *converter_options) GetExtension() string { + return opt.extension +} + +func(opt *converter_options) GetFPS() uint { + return opt.fps +} + +func(opt *converter_options) GetScale() float32 { + return opt.scale +} + +func(opt *converter_options) GetWebpQuality() float32 { + return opt.webpQuality +} + +func NewConverterOptions() ConverterOptions { + return &converter_options{"png", 30, 1.0, 75} +} + +func imageFromBuffer(p []byte, w uint, h uint) *image.RGBA { + // rlottie use ARGB32_Premultiplied + for i := 0; i < len(p); i += 4 { + p[i + 0], p[i + 2] = p[i + 2], p[i + 0] + } + m := image.NewRGBA(image.Rect(0, 0, int(w), int(h))) + m.Pix = p + m.Stride = int(w) * 4 + return m +} + +var disabled_cache = false +func ImportFromData(data []byte, options ConverterOptions) ([]byte, error) { + if !disabled_cache { + disabled_cache = true + go_rlottie.LottieConfigureModelCacheSize(0) + } + z, err := gzip.NewReader(bytes.NewReader(data)) + if err != nil { + return nil, errors.New("Failed to create gzip reader:" + err.Error()) + } + uncompressed, err := ioutil.ReadAll(z) + if err != nil { + return nil, errors.New("Failed to read gzip archive") + } + z.Close() + + animation := go_rlottie.LottieAnimationFromData(string(uncompressed[:]), "", "") + if animation == nil { + return nil, errors.New("Failed to import lottie animation data") + } + + w, h := go_rlottie.LottieAnimationGetSize(animation) + w = uint(float32(w) * options.GetScale()) + h = uint(float32(h) * options.GetScale()) + + frame_rate := go_rlottie.LottieAnimationGetFramerate(animation) + frame_count := go_rlottie.LottieAnimationGetTotalframe(animation) + duration := float32(frame_count) / float32(frame_rate) + var desired_framerate = float32(options.GetFPS()) + // Most (Gif) player doesn't support ~60fps (found in most tgs) + if desired_framerate > 50. { + desired_framerate = 50. + } + step := 1.0 / desired_framerate + + writer := newImageWriter(options.GetExtension(), w, h, options) + if writer == nil { + return nil, errors.New("Failed create imagewriter") + } + + var i float32 + for i = 0.; i < duration; i += step { + frame := go_rlottie.LottieAnimationGetFrameAtPos(animation, i / duration) + buf := make([]byte, w * h * 4) + go_rlottie.LottieAnimationRender(animation, frame, buf, w, h, w * 4) + m := imageFromBuffer(buf, w, h) + err := writer.AddFrame(m, uint(desired_framerate)) + if err != nil { + return nil, errors.New("Failed to add frame:" + err.Error()) + } + if !writer.SupportsAnimation() { + break + } + } + go_rlottie.LottieAnimationDestroy(animation) + return writer.Result(), nil +} + +func ImportFromFile(path string, options ConverterOptions) ([]byte, error) { + tgs, err := ioutil.ReadFile(path) + if err != nil { + return nil, errors.New("Error when opening file:" + err.Error()) + } + return ImportFromData(tgs, options) +} + +func SupportsExtension(extension string) (bool) { + switch extension { + case "apng": + fallthrough + case "gif": + fallthrough + case "png": + fallthrough + case "webp": + return true + default: + return false + } + return false +} diff --git a/vendor/github.com/Benau/tgsconverter/libtgsconverter/png.go b/vendor/github.com/Benau/tgsconverter/libtgsconverter/png.go new file mode 100644 index 00000000..cf492ea4 --- /dev/null +++ b/vendor/github.com/Benau/tgsconverter/libtgsconverter/png.go @@ -0,0 +1,30 @@ +package libtgsconverter + +import "bytes" +import "image" +import "image/png" + +type topng struct { + result []byte +} + +func(to_png *topng) init(w uint, h uint, options ConverterOptions) { +} + +func(to_png *topng) SupportsAnimation() bool { + return false +} + +func (to_png *topng) AddFrame(image *image.RGBA, fps uint) error { + var data []byte + w := bytes.NewBuffer(data) + if err := png.Encode(w, image); err != nil { + return err + } + to_png.result = w.Bytes() + return nil +} + +func (to_png *topng) Result() []byte { + return to_png.result +} diff --git a/vendor/github.com/Benau/tgsconverter/libtgsconverter/quantize_bucket.go b/vendor/github.com/Benau/tgsconverter/libtgsconverter/quantize_bucket.go new file mode 100644 index 00000000..1f00c685 --- /dev/null +++ b/vendor/github.com/Benau/tgsconverter/libtgsconverter/quantize_bucket.go @@ -0,0 +1,119 @@ +package libtgsconverter + +import "image/color" + +type colorAxis uint8 + +// Color axis constants +const ( + red colorAxis = iota + green + blue +) + +type colorPriority struct { + p uint32 + color.RGBA +} + +func (c colorPriority) axis(span colorAxis) uint8 { + switch span { + case red: + return c.R + case green: + return c.G + default: + return c.B + } +} + +type colorBucket []colorPriority + +func (cb colorBucket) partition() (colorBucket, colorBucket) { + mean, span := cb.span() + left, right := 0, len(cb)-1 + for left < right { + cb[left], cb[right] = cb[right], cb[left] + for cb[left].axis(span) < mean && left < right { + left++ + } + for cb[right].axis(span) >= mean && left < right { + right-- + } + } + if left == 0 { + return cb[:1], cb[1:] + } + if left == len(cb)-1 { + return cb[:len(cb)-1], cb[len(cb)-1:] + } + return cb[:left], cb[left:] +} + +func (cb colorBucket) mean() color.RGBA { + var r, g, b uint64 + var p uint64 + for _, c := range cb { + p += uint64(c.p) + r += uint64(c.R) * uint64(c.p) + g += uint64(c.G) * uint64(c.p) + b += uint64(c.B) * uint64(c.p) + } + return color.RGBA{uint8(r / p), uint8(g / p), uint8(b / p), 255} +} + +type constraint struct { + min uint8 + max uint8 + vals [256]uint64 +} + +func (c *constraint) update(index uint8, p uint32) { + if index < c.min { + c.min = index + } + if index > c.max { + c.max = index + } + c.vals[index] += uint64(p) +} + +func (c *constraint) span() uint8 { + return c.max - c.min +} + +func (cb colorBucket) span() (uint8, colorAxis) { + var R, G, B constraint + R.min = 255 + G.min = 255 + B.min = 255 + var p uint64 + for _, c := range cb { + R.update(c.R, c.p) + G.update(c.G, c.p) + B.update(c.B, c.p) + p += uint64(c.p) + } + var toCount *constraint + var span colorAxis + if R.span() > G.span() && R.span() > B.span() { + span = red + toCount = &R + } else if G.span() > B.span() { + span = green + toCount = &G + } else { + span = blue + toCount = &B + } + var counted uint64 + var i int + var c uint64 + for i, c = range toCount.vals { + if counted > p/2 || counted+c == p { + break + } + counted += c + } + return uint8(i), span +} diff --git a/vendor/github.com/Benau/tgsconverter/libtgsconverter/quantize_mediancut.go b/vendor/github.com/Benau/tgsconverter/libtgsconverter/quantize_mediancut.go new file mode 100644 index 00000000..850708b9 --- /dev/null +++ b/vendor/github.com/Benau/tgsconverter/libtgsconverter/quantize_mediancut.go @@ -0,0 +1,209 @@ +package libtgsconverter + +import ( + "image" + "image/color" + "sync" +) + +type bucketPool struct { + sync.Pool + maxCap int + m sync.Mutex +} + +func (p *bucketPool) getBucket(c int) colorBucket { + p.m.Lock() + if p.maxCap > c { + p.maxCap = p.maxCap * 99 / 100 + } + if p.maxCap < c { + p.maxCap = c + } + maxCap := p.maxCap + p.m.Unlock() + val := p.Pool.Get() + if val == nil || cap(val.(colorBucket)) < c { + return make(colorBucket, maxCap)[0:c] + } + slice := val.(colorBucket) + slice = slice[0:c] + for i := range slice { + slice[i] = colorPriority{} + } + return slice +} + +var bpool bucketPool + +// aggregationType specifies the type of aggregation to be done +type aggregationType uint8 + +const ( + // Mode - pick the highest priority value + mode aggregationType = iota + // Mean - weighted average all values + mean +) + +// medianCutQuantizer implements the go draw.Quantizer interface using the Median Cut method +type medianCutQuantizer struct { + // The type of aggregation to be used to find final colors + aggregation aggregationType + // The weighting function to use on each pixel + weighting func(image.Image, int, int) uint32 + // Whether need to add a transparent entry after conversion + reserveTransparent bool +} + +//bucketize takes a bucket and performs median cut on it to obtain the target number of grouped buckets +func bucketize(colors colorBucket, num int) (buckets []colorBucket) { + if len(colors) == 0 || num == 0 { + return nil + } + bucket := colors + buckets = make([]colorBucket, 1, num*2) + buckets[0] = bucket + + for len(buckets) < num && len(buckets) < len(colors) { // Limit to palette capacity or number of colors + bucket, buckets = buckets[0], buckets[1:] + if len(bucket) < 2 { + buckets = append(buckets, bucket) + continue + } else if len(bucket) == 2 { + buckets = append(buckets, bucket[:1], bucket[1:]) + continue + } + + left, right := bucket.partition() + buckets = append(buckets, left, right) + } + return +} + +// palettize finds a single color to represent a set of color buckets +func (q* medianCutQuantizer) palettize(p color.Palette, buckets []colorBucket) color.Palette { + for _, bucket := range buckets { + switch q.aggregation { + case mean: + mean := bucket.mean() + p = append(p, mean) + case mode: + var best colorPriority + for _, c := range bucket { + if c.p > best.p { + best = c + } + } + p = append(p, best.RGBA) + } + } + return p +} + +// quantizeSlice expands the provided bucket and then palettizes the result +func (q* medianCutQuantizer) quantizeSlice(p color.Palette, colors []colorPriority) color.Palette { + numColors := cap(p) - len(p) + reserveTransparent := q.reserveTransparent + if reserveTransparent { + numColors-- + } + buckets := bucketize(colors, numColors) + p = q.palettize(p, buckets) + return p +} + +func colorAt(m image.Image, x int, y int) color.RGBA { + switch i := m.(type) { + case *image.YCbCr: + yi := i.YOffset(x, y) + ci := i.COffset(x, y) + c := color.YCbCr{ + i.Y[yi], + i.Cb[ci], + i.Cr[ci], + } + return color.RGBA{c.Y, c.Cb, c.Cr, 255} + case *image.RGBA: + ci := i.PixOffset(x, y) + return color.RGBA{i.Pix[ci+0], i.Pix[ci+1], i.Pix[ci+2], i.Pix[ci+3]} + default: + return color.RGBAModel.Convert(i.At(x, y)).(color.RGBA) + } +} + +// buildBucketMultiple creates a prioritized color slice with all the colors in +// the images. +func (q* medianCutQuantizer) buildBucketMultiple(ms []image.Image) (bucket colorBucket) { + if len(ms) < 1 { + return colorBucket{} + } + + bounds := ms[0].Bounds() + size := (bounds.Max.X - bounds.Min.X) * (bounds.Max.Y - bounds.Min.Y) * 2 + sparseBucket := bpool.getBucket(size) + + for _, m := range ms { + for y := bounds.Min.Y; y < bounds.Max.Y; y++ { + for x := bounds.Min.X; x < bounds.Max.X; x++ { + priority := uint32(1) + if q.weighting != nil { + priority = q.weighting(m, x, y) + } + c := colorAt(m, x, y) + if c.A == 0 { + if !q.reserveTransparent { + q.reserveTransparent = true + } + continue + } + if priority != 0 { + index := int(c.R)<<16 | int(c.G)<<8 | int(c.B) + for i := 1; ; i++ { + p := &sparseBucket[index%size] + if p.p == 0 || p.RGBA == c { + *p = colorPriority{p.p + priority, c} + break + } + index += 1 + i + } + } + } + } + } + + bucket = sparseBucket[:0] + switch ms[0].(type) { + case *image.YCbCr: + for _, p := range sparseBucket { + if p.p != 0 { + r, g, b := color.YCbCrToRGB(p.R, p.G, p.B) + bucket = append(bucket, colorPriority{p.p, color.RGBA{r, g, b, p.A}}) + } + } + default: + for _, p := range sparseBucket { + if p.p != 0 { + bucket = append(bucket, p) + } + } + } + return +} + +// Quantize quantizes an image to a palette and returns the palette +func (q* medianCutQuantizer) quantize(p color.Palette, m image.Image) color.Palette { + // Package quantize offers an implementation of the draw.Quantize interface using an optimized Median Cut method, + // including advanced functionality for fine-grained control of color priority + bucket := q.buildBucketMultiple([]image.Image{m}) + defer bpool.Put(bucket) + return q.quantizeSlice(p, bucket) +} + +// QuantizeMultiple quantizes several images at once to a palette and returns +// the palette +func (q* medianCutQuantizer) quantizeMultiple(p color.Palette, m []image.Image) color.Palette { + bucket := q.buildBucketMultiple(m) + defer bpool.Put(bucket) + return q.quantizeSlice(p, bucket) +} diff --git a/vendor/github.com/Benau/tgsconverter/libtgsconverter/webp.go b/vendor/github.com/Benau/tgsconverter/libtgsconverter/webp.go new file mode 100644 index 00000000..60e9887c --- /dev/null +++ b/vendor/github.com/Benau/tgsconverter/libtgsconverter/webp.go @@ -0,0 +1,39 @@ +package libtgsconverter + +import "bytes" +import "image" + +import "github.com/sizeofint/webpanimation" + +type towebp struct { + timestamp int + webpanim *webpanimation.WebpAnimation + config webpanimation.WebPConfig +} + +func(to_webp *towebp) init(w uint, h uint, options ConverterOptions) { + to_webp.timestamp = 0 + to_webp.webpanim = webpanimation.NewWebpAnimation(int(w), int(h), 0) + to_webp.config = webpanimation.NewWebpConfig() + to_webp.config.SetQuality(options.GetWebpQuality()) +} + +func(to_webp *towebp) SupportsAnimation() bool { + return true +} + +func (to_webp *towebp) AddFrame(image *image.RGBA, fps uint) error { + err := to_webp.webpanim.AddFrame(image, to_webp.timestamp, to_webp.config) + to_webp.timestamp += int((1.0 / float32(fps)) * 1000.) + return err +} + +func (to_webp *towebp) Result() []byte { + var buf bytes.Buffer + err := to_webp.webpanim.Encode(&buf) + if err != nil { + return nil + } + to_webp.webpanim.ReleaseMemory() + return buf.Bytes() +} diff --git a/vendor/github.com/av-elier/go-decimal-to-rational/.travis.yml b/vendor/github.com/av-elier/go-decimal-to-rational/.travis.yml new file mode 100644 index 00000000..e3afdd49 --- /dev/null +++ b/vendor/github.com/av-elier/go-decimal-to-rational/.travis.yml @@ -0,0 +1,10 @@ +language: go + +go: + - "1.8" + - "1.9" + - "1.10" + - "1.11" + +script: + - go test -v ./... diff --git a/vendor/github.com/av-elier/go-decimal-to-rational/LICENSE b/vendor/github.com/av-elier/go-decimal-to-rational/LICENSE new file mode 100644 index 00000000..dce70403 --- /dev/null +++ b/vendor/github.com/av-elier/go-decimal-to-rational/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2018 Alexey Pozdnyakov + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/av-elier/go-decimal-to-rational/README.md b/vendor/github.com/av-elier/go-decimal-to-rational/README.md new file mode 100644 index 00000000..6a5abe92 --- /dev/null +++ b/vendor/github.com/av-elier/go-decimal-to-rational/README.md @@ -0,0 +1,54 @@ +# go-decimal-to-rational + +[![Build Status](https://travis-ci.org/av-elier/go-decimal-to-rational.svg?branch=master)](https://travis-ci.org/av-elier/go-decimal-to-rational) + +Go library to convert decimal (float64) to rational fraction with required precision + +Relies on [Continued Fraction](http://mathworld.wolfram.com/ContinuedFraction.html) algorythm. + +It's sometimes more appropriate than default big.Rat SetString, because +you can get `2/3` from `0.6666` by specifiing required precision. In big.Rat SetString +you can only get `3333/50000`, and have no way to manipulate than (as of go 1.11). + +# Example +```go +func ExampleNewRatP() { + fmt.Println(NewRatP(0.6666, 0.01).String()) + fmt.Println(NewRatP(0.981, 0.001).String()) + fmt.Println(NewRatP(0.75, 0.01).String()) + // Output: + // 2/3 + // 981/1000 + // 3/4 +} +``` +```go +func ExampleNewRatI() { + fmt.Println(NewRatI(0.6667, 3).String()) + fmt.Println(NewRatI(0.6667, 4).String()) + // Output: + // 2/3 + // 6667/10000 +} +``` + +# Docs +``` +import dectofrac "github.com/av-elier/go-decimal-to-rational" +``` + +#### func NewRatI + +```go +func NewRatI(val float64, iterations int64) *big.Rat +``` +NewRatI returns rational from decimal using `iterations` number of +iterations in Continued Fraction algorythm + +#### func NewRatP + +```go +func NewRatP(val float64, stepPrecision float64) *big.Rat +``` +NewRatP returns rational from decimal by going as mush iterations, until +next fraction is less than `stepPrecision` diff --git a/vendor/github.com/av-elier/go-decimal-to-rational/frac.go b/vendor/github.com/av-elier/go-decimal-to-rational/frac.go new file mode 100644 index 00000000..a20f8532 --- /dev/null +++ b/vendor/github.com/av-elier/go-decimal-to-rational/frac.go @@ -0,0 +1,44 @@ +package dectofrac + +import ( + "math" + "math/big" +) + +// MaxIterations is some sane limit of iterations for precision mode +const MaxIterations = 5000 + +// NewRatI returns rational from decimal +// using `iterations` number of iterations in Continued Fraction algorythm +func NewRatI(val float64, iterations int64) *big.Rat { + return NewRat(val, iterations, 0) +} + +// NewRatP returns rational from decimal +// by going as mush iterations, until next fraction is less than `stepPrecision` +func NewRatP(val float64, stepPrecision float64) *big.Rat { + return NewRat(val, MaxIterations, stepPrecision) +} + +func NewRat(val float64, iterations int64, stepPrecision float64) *big.Rat { + a0 := int64(math.Floor(val)) + x0 := val - float64(a0) + rat := cf(x0, 1, iterations, stepPrecision) + return rat.Add(rat, new(big.Rat).SetInt64(a0)) +} + +func cf(xi float64, i int64, limit int64, stepPrecision float64) *big.Rat { + if i >= limit || xi <= stepPrecision { + return big.NewRat(0, 1) + } + + inverted := 1 / xi + aj := int64(math.Floor(inverted)) + xj := inverted - float64(aj) + ratAJ := new(big.Rat).SetInt64(aj) + ratNext := cf(xj, i+1, limit, stepPrecision) + res := ratAJ.Add(ratAJ, ratNext) + res = res.Inv(res) + + return res +} diff --git a/vendor/github.com/kettek/apng/LICENSE b/vendor/github.com/kettek/apng/LICENSE new file mode 100644 index 00000000..399ef573 --- /dev/null +++ b/vendor/github.com/kettek/apng/LICENSE @@ -0,0 +1,30 @@ +Original PNG code Copyright (c) 2009 The Go Authors. +APNG enhancements Copyright (c) 2018 Ketchetwahmeegwun T. Southall / +kts of kettek. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/kettek/apng/README.md b/vendor/github.com/kettek/apng/README.md new file mode 100644 index 00000000..862c3593 --- /dev/null +++ b/vendor/github.com/kettek/apng/README.md @@ -0,0 +1,114 @@ +# APNG golang library +This `apng` package provides methods for decoding and encoding APNG files. It is based upon the work in the official "image/png" package. + +See [apngr](https://github.com/kettek/apngr) for an APNG extraction and combination tool using this library. + +**NOTE**: The decoder should work for most anything you throw at it. Malformed PNGs should result in an error message. The encoder currently doesn't handle differences of Image formats and similar and has not been tested as thoroughly. + +If a regular PNG file is read, the first Frame of the APNG returned by `DecodeAll(*File)` will be the PNG data. + +## Types +### APNG +The APNG type contains the frames of a decoded `.apng` file, along with any important properties. It may also be created and used for Encoding. + +| Signature | Description | +|---------------------------|-------------------------------| +| Frames []Frame | The stored frames of the APNG.| +| LoopCount uint | The number of times an animation should be restarted during display. A value of 0 means to loop forever. | + +### Frame +The Frame type contains an individual frame of an APNG. The following table provides the important properties and methods. + +| Signature | Description | +|---------------------------|------------------| +| Image image.Image | Frame image data. | +| IsDefault bool | Indicates if this frame is a default image that should not be included as part of the animation frames. May only be true for the first Frame. | +| XOffset int | Returns the x offset of the frame. | +| YOffset int | Returns the y offset of the frame. | +| DelayNumerator int | Returns the delay numerator. | +| DelayDenominator int | Returns the delay denominator. | +| DisposeOp byte | Returns the frame disposal operation. May be `apng.DISPOSE_OP_NONE`, `apng.DISPOSE_OP_BACKGROUND`, or `apng.DISPOSE_OP_PREVIOUS`. See the [APNG Specification](https://wiki.mozilla.org/APNG_Specification#.60fcTL.60:_The_Frame_Control_Chunk) for more information. | +| BlendOp byte | Returns the frame blending operation. May be `apng.BLEND_OP_SOURCE` or `apng.BLEND_OP_OVER`. See the [APNG Specification](https://wiki.mozilla.org/APNG_Specification#.60fcTL.60:_The_Frame_Control_Chunk) for more information. | + +## Methods +### DecodeAll(io.Reader) (APNG, error) +This method returns an APNG type containing the frames and associated data within the passed file. + +### Example +```go +package main + +import ( + "github.com/kettek/apng" + "os" + "log" +) + +func main() { + // Open our animated PNG file + f, err := os.Open("animation.png") + if err != nil { + panic(err) + } + defer f.Close() + // Decode all frames into an APNG + a, err := apng.DecodeAll(f) + if err != nil { + panic(err) + } + // Print some information on the APNG + log.Printf("Found %d frames\n", len(a.Frames)) + for i, frame := range a.Frames { + b := frame.Image.Bounds() + log.Printf("Frame %d: %dx%d\n", i, b.Max.X, b.Max.Y) + } +} + +``` + +### Decode(io.Reader) (image.Image, error) +This method returns the Image of the default frame of an APNG file. + +### Encode(io.Writer, APNG) error +This method writes the passed APNG object to the given io.Writer as an APNG binary file. + +### Example +```go +package main + +import ( + "github.com/kettek/apng" + "image/png" + "os" +) + +func main() { + // Define our variables + output := "animation.png" + images := [4]string{"0.png", "1.png", "2.png", "3.png"} + a := apng.APNG{ + Frames: make([]apng.Frame, len(images)), + } + // Open our file for writing + out, err := os.Create(output) + if err != nil { + panic(err) + } + defer out.Close() + // Assign each decoded PNG's Image to the appropriate Frame Image + for i, s := range images { + in, err := os.Open(s) + if err != nil { + panic(err) + } + defer in.Close() + m, err := png.Decode(in) + if err != nil { + panic(err) + } + a.Frames[i].Image = m + } + // Write APNG to our output file + apng.Encode(out, a) +} +``` diff --git a/vendor/github.com/kettek/apng/apng.go b/vendor/github.com/kettek/apng/apng.go new file mode 100644 index 00000000..ee75fbd0 --- /dev/null +++ b/vendor/github.com/kettek/apng/apng.go @@ -0,0 +1,13 @@ +// Copyright 2018 kts of kettek / Ketchetwahmeegwun Tecumseh Southall. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package apng + +type APNG struct { + Frames []Frame + // LoopCount defines the number of times an animation will be + // restarted during display. + // A LoopCount of 0 means to loop forever + LoopCount uint +} diff --git a/vendor/github.com/kettek/apng/frame.go b/vendor/github.com/kettek/apng/frame.go new file mode 100644 index 00000000..31c21eda --- /dev/null +++ b/vendor/github.com/kettek/apng/frame.go @@ -0,0 +1,47 @@ +// Copyright 2018 kts of kettek / Ketchetwahmeegwun Tecumseh Southall. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package apng + +import ( + "image" +) + +// dispose_op values, as per the APNG spec. +const ( + DISPOSE_OP_NONE = 0 + DISPOSE_OP_BACKGROUND = 1 + DISPOSE_OP_PREVIOUS = 2 +) + +// blend_op values, as per the APNG spec. +const ( + BLEND_OP_SOURCE = 0 + BLEND_OP_OVER = 1 +) + +type Frame struct { + Image image.Image + width, height int + XOffset, YOffset int + DelayNumerator uint16 + DelayDenominator uint16 + DisposeOp byte + BlendOp byte + // IsDefault indicates if the Frame is a default image that + // should not be used in the animation. IsDefault can only + // be true on the first frame. + IsDefault bool +} + +// GetDelay returns the number of seconds in the frame. +func (f *Frame) GetDelay() float64 { + d := uint16(0) + if f.DelayDenominator == 0 { + d = 100 + } else { + d = f.DelayDenominator + } + return float64(f.DelayNumerator) / float64(d) +} diff --git a/vendor/github.com/kettek/apng/paeth.go b/vendor/github.com/kettek/apng/paeth.go new file mode 100644 index 00000000..adcf311d --- /dev/null +++ b/vendor/github.com/kettek/apng/paeth.go @@ -0,0 +1,71 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package apng + +// intSize is either 32 or 64. +const intSize = 32 << (^uint(0) >> 63) + +func abs(x int) int { + // m := -1 if x < 0. m := 0 otherwise. + m := x >> (intSize - 1) + + // In two's complement representation, the negative number + // of any number (except the smallest one) can be computed + // by flipping all the bits and add 1. This is faster than + // code with a branch. + // See Hacker's Delight, section 2-4. + return (x ^ m) - m +} + +// paeth implements the Paeth filter function, as per the PNG specification. +func paeth(a, b, c uint8) uint8 { + // This is an optimized version of the sample code in the PNG spec. + // For example, the sample code starts with: + // p := int(a) + int(b) - int(c) + // pa := abs(p - int(a)) + // but the optimized form uses fewer arithmetic operations: + // pa := int(b) - int(c) + // pa = abs(pa) + pc := int(c) + pa := int(b) - pc + pb := int(a) - pc + pc = abs(pa + pb) + pa = abs(pa) + pb = abs(pb) + if pa <= pb && pa <= pc { + return a + } else if pb <= pc { + return b + } + return c +} + +// filterPaeth applies the Paeth filter to the cdat slice. +// cdat is the current row's data, pdat is the previous row's data. +func filterPaeth(cdat, pdat []byte, bytesPerPixel int) { + var a, b, c, pa, pb, pc int + for i := 0; i < bytesPerPixel; i++ { + a, c = 0, 0 + for j := i; j < len(cdat); j += bytesPerPixel { + b = int(pdat[j]) + pa = b - c + pb = a - c + pc = abs(pa + pb) + pa = abs(pa) + pb = abs(pb) + if pa <= pb && pa <= pc { + // No-op. + } else if pb <= pc { + a = b + } else { + a = c + } + a += int(cdat[j]) + a &= 0xff + cdat[j] = uint8(a) + c = b + } + } +} diff --git a/vendor/github.com/kettek/apng/reader.go b/vendor/github.com/kettek/apng/reader.go new file mode 100644 index 00000000..fd6ccee3 --- /dev/null +++ b/vendor/github.com/kettek/apng/reader.go @@ -0,0 +1,1141 @@ +// Original PNG code Copyright 2009 The Go Authors. +// Additional APNG enhancements Copyright 2018 Ketchetwahmeegwun +// Tecumseh Southall / kts of kettek. +// All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package apng implements an APNG image decoder. +// +// The PNG specification is at https://www.w3.org/TR/PNG/. +// The APNG specification is at https://wiki.mozilla.org/APNG_Specification +package apng + +import ( + "compress/zlib" + "encoding/binary" + "fmt" + "hash" + "hash/crc32" + "image" + "image/color" + "io" +) + +// Color type, as per the PNG spec. +const ( + ctGrayscale = 0 + ctTrueColor = 2 + ctPaletted = 3 + ctGrayscaleAlpha = 4 + ctTrueColorAlpha = 6 +) + +// A cb is a combination of color type and bit depth. +const ( + cbInvalid = iota + cbG1 + cbG2 + cbG4 + cbG8 + cbGA8 + cbTC8 + cbP1 + cbP2 + cbP4 + cbP8 + cbTCA8 + cbG16 + cbGA16 + cbTC16 + cbTCA16 +) + +func cbPaletted(cb int) bool { + return cbP1 <= cb && cb <= cbP8 +} + +// Filter type, as per the PNG spec. +const ( + ftNone = 0 + ftSub = 1 + ftUp = 2 + ftAverage = 3 + ftPaeth = 4 + nFilter = 5 +) + +// Interlace type. +const ( + itNone = 0 + itAdam7 = 1 +) + +// interlaceScan defines the placement and size of a pass for Adam7 interlacing. +type interlaceScan struct { + xFactor, yFactor, xOffset, yOffset int +} + +// interlacing defines Adam7 interlacing, with 7 passes of reduced images. +// See https://www.w3.org/TR/PNG/#8Interlace +var interlacing = []interlaceScan{ + {8, 8, 0, 0}, + {8, 8, 4, 0}, + {4, 8, 0, 4}, + {4, 4, 2, 0}, + {2, 4, 0, 2}, + {2, 2, 1, 0}, + {1, 2, 0, 1}, +} + +// Decoding stage. +// The PNG specification says that the IHDR, PLTE (if present), tRNS (if +// present), IDAT and IEND chunks must appear in that order. There may be +// multiple IDAT chunks, and IDAT chunks must be sequential (i.e. they may not +// have any other chunks between them). +// https://www.w3.org/TR/PNG/#5ChunkOrdering +const ( + dsStart = iota + dsSeenIHDR + dsSeenPLTE + dsSeentRNS + dsSeenacTL + dsSeenIDAT + dsSeenfdAT + dsSeenIEND +) + +const pngHeader = "\x89PNG\r\n\x1a\n" + +type decoder struct { + r io.Reader + num_frames uint32 + a APNG + frame_index int + crc hash.Hash32 + width, height int + depth int + palette color.Palette + cb int + stage int + idatLength uint32 + tmp [3 * 256]byte + interlace int + + // useTransparent and transparent are used for grayscale and truecolor + // transparency, as opposed to palette transparency. + useTransparent bool + transparent [6]byte +} + +// A FormatError reports that the input is not a valid PNG. +type FormatError string + +func (e FormatError) Error() string { return "apng: invalid format: " + string(e) } + +var chunkOrderError = FormatError("chunk out of order") + +// An UnsupportedError reports that the input uses a valid but unimplemented PNG feature. +type UnsupportedError string + +func (e UnsupportedError) Error() string { return "apng: unsupported feature: " + string(e) } + +func min(a, b int) int { + if a < b { + return a + } + return b +} + +func (d *decoder) parseIHDR(length uint32) error { + if length != 13 { + return FormatError("bad IHDR length") + } + if _, err := io.ReadFull(d.r, d.tmp[:13]); err != nil { + return err + } + d.crc.Write(d.tmp[:13]) + if d.tmp[10] != 0 { + return UnsupportedError("compression method") + } + if d.tmp[11] != 0 { + return UnsupportedError("filter method") + } + if d.tmp[12] != itNone && d.tmp[12] != itAdam7 { + return FormatError("invalid interlace method") + } + d.interlace = int(d.tmp[12]) + + w := int32(binary.BigEndian.Uint32(d.tmp[0:4])) + h := int32(binary.BigEndian.Uint32(d.tmp[4:8])) + if w <= 0 || h <= 0 { + return FormatError("non-positive dimension") + } + nPixels := int64(w) * int64(h) + if nPixels != int64(int(nPixels)) { + return UnsupportedError("dimension overflow") + } + // There can be up to 8 bytes per pixel, for 16 bits per channel RGBA. + if nPixels != (nPixels*8)/8 { + return UnsupportedError("dimension overflow") + } + + d.cb = cbInvalid + d.depth = int(d.tmp[8]) + switch d.depth { + case 1: + switch d.tmp[9] { + case ctGrayscale: + d.cb = cbG1 + case ctPaletted: + d.cb = cbP1 + } + case 2: + switch d.tmp[9] { + case ctGrayscale: + d.cb = cbG2 + case ctPaletted: + d.cb = cbP2 + } + case 4: + switch d.tmp[9] { + case ctGrayscale: + d.cb = cbG4 + case ctPaletted: + d.cb = cbP4 + } + case 8: + switch d.tmp[9] { + case ctGrayscale: + d.cb = cbG8 + case ctTrueColor: + d.cb = cbTC8 + case ctPaletted: + d.cb = cbP8 + case ctGrayscaleAlpha: + d.cb = cbGA8 + case ctTrueColorAlpha: + d.cb = cbTCA8 + } + case 16: + switch d.tmp[9] { + case ctGrayscale: + d.cb = cbG16 + case ctTrueColor: + d.cb = cbTC16 + case ctGrayscaleAlpha: + d.cb = cbGA16 + case ctTrueColorAlpha: + d.cb = cbTCA16 + } + } + if d.cb == cbInvalid { + return UnsupportedError(fmt.Sprintf("bit depth %d, color type %d", d.tmp[8], d.tmp[9])) + } + d.a.Frames[0].width, d.a.Frames[0].height = int(w), int(h) + return d.verifyChecksum() +} + +func (d *decoder) parsePLTE(length uint32) error { + np := int(length / 3) // The number of palette entries. + if length%3 != 0 || np <= 0 || np > 256 || np > 1< 256 { + return FormatError("bad tRNS length") + } + n, err := io.ReadFull(d.r, d.tmp[:length]) + if err != nil { + return err + } + d.crc.Write(d.tmp[:n]) + + if len(d.palette) < n { + d.palette = d.palette[:n] + } + for i := 0; i < n; i++ { + rgba := d.palette[i].(color.RGBA) + d.palette[i] = color.NRGBA{rgba.R, rgba.G, rgba.B, d.tmp[i]} + } + + default: + return FormatError("tRNS, color type mismatch") + } + return d.verifyChecksum() +} + +// Read presents one or more IDAT chunks as one continuous stream (minus the +// intermediate chunk headers and footers). If the PNG data looked like: +// ... len0 IDAT xxx crc0 len1 IDAT yy crc1 len2 IEND crc2 +// then this reader presents xxxyy. For well-formed PNG data, the decoder state +// immediately before the first Read call is that d.r is positioned between the +// first IDAT and xxx, and the decoder state immediately after the last Read +// call is that d.r is positioned between yy and crc1. +func (d *decoder) Read(p []byte) (int, error) { + if len(p) == 0 { + return 0, nil + } + for d.idatLength == 0 { + // We have exhausted an IDAT chunk. Verify the checksum of that chunk. + if err := d.verifyChecksum(); err != nil { + return 0, err + } + // Read the length and chunk type of the next chunk, and check that + // it is an IDAT chunk. + if _, err := io.ReadFull(d.r, d.tmp[:8]); err != nil { + return 0, err + } + + if d.stage < dsSeenfdAT { + d.idatLength = binary.BigEndian.Uint32(d.tmp[:4]) + if string(d.tmp[4:8]) != "IDAT" { + return 0, FormatError(fmt.Sprintf("expected IDAT, found %s", string(d.tmp[4:8]))) + } + } else { + d.idatLength = binary.BigEndian.Uint32(d.tmp[:4]) - 4 + if string(d.tmp[4:8]) != "fdAT" { + return 0, FormatError(fmt.Sprintf("expected fdAT, found %s", string(d.tmp[4:8]))) + } + } + d.crc.Reset() + d.crc.Write(d.tmp[4:8]) + if d.stage >= dsSeenfdAT { + if _, err := io.ReadFull(d.r, d.tmp[:4]); err != nil { + return 0, err + } + d.crc.Write(d.tmp[:4]) + } + } + if int(d.idatLength) < 0 { + return 0, UnsupportedError("IDAT/fdAT chunk length overflow") + } + n, err := d.r.Read(p[:min(len(p), int(d.idatLength))]) + d.crc.Write(p[:n]) + d.idatLength -= uint32(n) + return n, err +} + +// decode decodes the IDAT data into an image. +func (d *decoder) decode() (image.Image, error) { + r, err := zlib.NewReader(d) + if err != nil { + return nil, err + } + defer r.Close() + var img image.Image + if d.interlace == itNone { + img, err = d.readImagePass(r, 0, false) + if err != nil { + return nil, err + } + } else if d.interlace == itAdam7 { + // Allocate a blank image of the full size. + img, err = d.readImagePass(nil, 0, true) + if err != nil { + return nil, err + } + for pass := 0; pass < 7; pass++ { + imagePass, err := d.readImagePass(r, pass, false) + if err != nil { + return nil, err + } + if imagePass != nil { + d.mergePassInto(img, imagePass, pass) + } + } + } + + // Check for EOF, to verify the zlib checksum. + n := 0 + for i := 0; n == 0 && err == nil; i++ { + if i == 100 { + return nil, io.ErrNoProgress + } + n, err = r.Read(d.tmp[:1]) + } + if err != nil && err != io.EOF { + return nil, FormatError(err.Error()) + } + if n != 0 || d.idatLength != 0 { + return nil, FormatError("too much pixel data") + } + + return img, nil +} + +// readImagePass reads a single image pass, sized according to the pass number. +func (d *decoder) readImagePass(r io.Reader, pass int, allocateOnly bool) (image.Image, error) { + bitsPerPixel := 0 + pixOffset := 0 + var ( + gray *image.Gray + rgba *image.RGBA + paletted *image.Paletted + nrgba *image.NRGBA + gray16 *image.Gray16 + rgba64 *image.RGBA64 + nrgba64 *image.NRGBA64 + img image.Image + width int + height int + ) + width = d.a.Frames[d.frame_index].width + height = d.a.Frames[d.frame_index].height + if d.interlace == itAdam7 && !allocateOnly { + p := interlacing[pass] + // Add the multiplication factor and subtract one, effectively rounding up. + width = (width - p.xOffset + p.xFactor - 1) / p.xFactor + height = (height - p.yOffset + p.yFactor - 1) / p.yFactor + // A PNG image can't have zero width or height, but for an interlaced + // image, an individual pass might have zero width or height. If so, we + // shouldn't even read a per-row filter type byte, so return early. + if width == 0 || height == 0 { + return nil, nil + } + } + switch d.cb { + case cbG1, cbG2, cbG4, cbG8: + bitsPerPixel = d.depth + if d.useTransparent { + nrgba = image.NewNRGBA(image.Rect(0, 0, width, height)) + img = nrgba + } else { + gray = image.NewGray(image.Rect(0, 0, width, height)) + img = gray + } + case cbGA8: + bitsPerPixel = 16 + nrgba = image.NewNRGBA(image.Rect(0, 0, width, height)) + img = nrgba + case cbTC8: + bitsPerPixel = 24 + if d.useTransparent { + nrgba = image.NewNRGBA(image.Rect(0, 0, width, height)) + img = nrgba + } else { + rgba = image.NewRGBA(image.Rect(0, 0, width, height)) + img = rgba + } + case cbP1, cbP2, cbP4, cbP8: + bitsPerPixel = d.depth + paletted = image.NewPaletted(image.Rect(0, 0, width, height), d.palette) + img = paletted + case cbTCA8: + bitsPerPixel = 32 + nrgba = image.NewNRGBA(image.Rect(0, 0, width, height)) + img = nrgba + case cbG16: + bitsPerPixel = 16 + if d.useTransparent { + nrgba64 = image.NewNRGBA64(image.Rect(0, 0, width, height)) + img = nrgba64 + } else { + gray16 = image.NewGray16(image.Rect(0, 0, width, height)) + img = gray16 + } + case cbGA16: + bitsPerPixel = 32 + nrgba64 = image.NewNRGBA64(image.Rect(0, 0, width, height)) + img = nrgba64 + case cbTC16: + bitsPerPixel = 48 + if d.useTransparent { + nrgba64 = image.NewNRGBA64(image.Rect(0, 0, width, height)) + img = nrgba64 + } else { + rgba64 = image.NewRGBA64(image.Rect(0, 0, width, height)) + img = rgba64 + } + case cbTCA16: + bitsPerPixel = 64 + nrgba64 = image.NewNRGBA64(image.Rect(0, 0, width, height)) + img = nrgba64 + } + if allocateOnly { + return img, nil + } + bytesPerPixel := (bitsPerPixel + 7) / 8 + + // The +1 is for the per-row filter type, which is at cr[0]. + rowSize := 1 + (bitsPerPixel*width+7)/8 + // cr and pr are the bytes for the current and previous row. + cr := make([]uint8, rowSize) + pr := make([]uint8, rowSize) + + for y := 0; y < height; y++ { + // Read the decompressed bytes. + _, err := io.ReadFull(r, cr) + if err != nil { + if err == io.EOF || err == io.ErrUnexpectedEOF { + return nil, FormatError("not enough pixel data") + } + return nil, err + } + + // Apply the filter. + cdat := cr[1:] + pdat := pr[1:] + switch cr[0] { + case ftNone: + // No-op. + case ftSub: + for i := bytesPerPixel; i < len(cdat); i++ { + cdat[i] += cdat[i-bytesPerPixel] + } + case ftUp: + for i, p := range pdat { + cdat[i] += p + } + case ftAverage: + // The first column has no column to the left of it, so it is a + // special case. We know that the first column exists because we + // check above that width != 0, and so len(cdat) != 0. + for i := 0; i < bytesPerPixel; i++ { + cdat[i] += pdat[i] / 2 + } + for i := bytesPerPixel; i < len(cdat); i++ { + cdat[i] += uint8((int(cdat[i-bytesPerPixel]) + int(pdat[i])) / 2) + } + case ftPaeth: + filterPaeth(cdat, pdat, bytesPerPixel) + default: + return nil, FormatError("bad filter type") + } + + // Convert from bytes to colors. + switch d.cb { + case cbG1: + if d.useTransparent { + ty := d.transparent[1] + for x := 0; x < width; x += 8 { + b := cdat[x/8] + for x2 := 0; x2 < 8 && x+x2 < width; x2++ { + ycol := (b >> 7) * 0xff + acol := uint8(0xff) + if ycol == ty { + acol = 0x00 + } + nrgba.SetNRGBA(x+x2, y, color.NRGBA{ycol, ycol, ycol, acol}) + b <<= 1 + } + } + } else { + for x := 0; x < width; x += 8 { + b := cdat[x/8] + for x2 := 0; x2 < 8 && x+x2 < width; x2++ { + gray.SetGray(x+x2, y, color.Gray{(b >> 7) * 0xff}) + b <<= 1 + } + } + } + case cbG2: + if d.useTransparent { + ty := d.transparent[1] + for x := 0; x < width; x += 4 { + b := cdat[x/4] + for x2 := 0; x2 < 4 && x+x2 < width; x2++ { + ycol := (b >> 6) * 0x55 + acol := uint8(0xff) + if ycol == ty { + acol = 0x00 + } + nrgba.SetNRGBA(x+x2, y, color.NRGBA{ycol, ycol, ycol, acol}) + b <<= 2 + } + } + } else { + for x := 0; x < width; x += 4 { + b := cdat[x/4] + for x2 := 0; x2 < 4 && x+x2 < width; x2++ { + gray.SetGray(x+x2, y, color.Gray{(b >> 6) * 0x55}) + b <<= 2 + } + } + } + case cbG4: + if d.useTransparent { + ty := d.transparent[1] + for x := 0; x < width; x += 2 { + b := cdat[x/2] + for x2 := 0; x2 < 2 && x+x2 < width; x2++ { + ycol := (b >> 4) * 0x11 + acol := uint8(0xff) + if ycol == ty { + acol = 0x00 + } + nrgba.SetNRGBA(x+x2, y, color.NRGBA{ycol, ycol, ycol, acol}) + b <<= 4 + } + } + } else { + for x := 0; x < width; x += 2 { + b := cdat[x/2] + for x2 := 0; x2 < 2 && x+x2 < width; x2++ { + gray.SetGray(x+x2, y, color.Gray{(b >> 4) * 0x11}) + b <<= 4 + } + } + } + case cbG8: + if d.useTransparent { + ty := d.transparent[1] + for x := 0; x < width; x++ { + ycol := cdat[x] + acol := uint8(0xff) + if ycol == ty { + acol = 0x00 + } + nrgba.SetNRGBA(x, y, color.NRGBA{ycol, ycol, ycol, acol}) + } + } else { + copy(gray.Pix[pixOffset:], cdat) + pixOffset += gray.Stride + } + case cbGA8: + for x := 0; x < width; x++ { + ycol := cdat[2*x+0] + nrgba.SetNRGBA(x, y, color.NRGBA{ycol, ycol, ycol, cdat[2*x+1]}) + } + case cbTC8: + if d.useTransparent { + pix, i, j := nrgba.Pix, pixOffset, 0 + tr, tg, tb := d.transparent[1], d.transparent[3], d.transparent[5] + for x := 0; x < width; x++ { + r := cdat[j+0] + g := cdat[j+1] + b := cdat[j+2] + a := uint8(0xff) + if r == tr && g == tg && b == tb { + a = 0x00 + } + pix[i+0] = r + pix[i+1] = g + pix[i+2] = b + pix[i+3] = a + i += 4 + j += 3 + } + pixOffset += nrgba.Stride + } else { + pix, i, j := rgba.Pix, pixOffset, 0 + for x := 0; x < width; x++ { + pix[i+0] = cdat[j+0] + pix[i+1] = cdat[j+1] + pix[i+2] = cdat[j+2] + pix[i+3] = 0xff + i += 4 + j += 3 + } + pixOffset += rgba.Stride + } + case cbP1: + for x := 0; x < width; x += 8 { + b := cdat[x/8] + for x2 := 0; x2 < 8 && x+x2 < width; x2++ { + idx := b >> 7 + if len(paletted.Palette) <= int(idx) { + paletted.Palette = paletted.Palette[:int(idx)+1] + } + paletted.SetColorIndex(x+x2, y, idx) + b <<= 1 + } + } + case cbP2: + for x := 0; x < width; x += 4 { + b := cdat[x/4] + for x2 := 0; x2 < 4 && x+x2 < width; x2++ { + idx := b >> 6 + if len(paletted.Palette) <= int(idx) { + paletted.Palette = paletted.Palette[:int(idx)+1] + } + paletted.SetColorIndex(x+x2, y, idx) + b <<= 2 + } + } + case cbP4: + for x := 0; x < width; x += 2 { + b := cdat[x/2] + for x2 := 0; x2 < 2 && x+x2 < width; x2++ { + idx := b >> 4 + if len(paletted.Palette) <= int(idx) { + paletted.Palette = paletted.Palette[:int(idx)+1] + } + paletted.SetColorIndex(x+x2, y, idx) + b <<= 4 + } + } + case cbP8: + if len(paletted.Palette) != 255 { + for x := 0; x < width; x++ { + if len(paletted.Palette) <= int(cdat[x]) { + paletted.Palette = paletted.Palette[:int(cdat[x])+1] + } + } + } + copy(paletted.Pix[pixOffset:], cdat) + pixOffset += paletted.Stride + case cbTCA8: + copy(nrgba.Pix[pixOffset:], cdat) + pixOffset += nrgba.Stride + case cbG16: + if d.useTransparent { + ty := uint16(d.transparent[0])<<8 | uint16(d.transparent[1]) + for x := 0; x < width; x++ { + ycol := uint16(cdat[2*x+0])<<8 | uint16(cdat[2*x+1]) + acol := uint16(0xffff) + if ycol == ty { + acol = 0x0000 + } + nrgba64.SetNRGBA64(x, y, color.NRGBA64{ycol, ycol, ycol, acol}) + } + } else { + for x := 0; x < width; x++ { + ycol := uint16(cdat[2*x+0])<<8 | uint16(cdat[2*x+1]) + gray16.SetGray16(x, y, color.Gray16{ycol}) + } + } + case cbGA16: + for x := 0; x < width; x++ { + ycol := uint16(cdat[4*x+0])<<8 | uint16(cdat[4*x+1]) + acol := uint16(cdat[4*x+2])<<8 | uint16(cdat[4*x+3]) + nrgba64.SetNRGBA64(x, y, color.NRGBA64{ycol, ycol, ycol, acol}) + } + case cbTC16: + if d.useTransparent { + tr := uint16(d.transparent[0])<<8 | uint16(d.transparent[1]) + tg := uint16(d.transparent[2])<<8 | uint16(d.transparent[3]) + tb := uint16(d.transparent[4])<<8 | uint16(d.transparent[5]) + for x := 0; x < width; x++ { + rcol := uint16(cdat[6*x+0])<<8 | uint16(cdat[6*x+1]) + gcol := uint16(cdat[6*x+2])<<8 | uint16(cdat[6*x+3]) + bcol := uint16(cdat[6*x+4])<<8 | uint16(cdat[6*x+5]) + acol := uint16(0xffff) + if rcol == tr && gcol == tg && bcol == tb { + acol = 0x0000 + } + nrgba64.SetNRGBA64(x, y, color.NRGBA64{rcol, gcol, bcol, acol}) + } + } else { + for x := 0; x < width; x++ { + rcol := uint16(cdat[6*x+0])<<8 | uint16(cdat[6*x+1]) + gcol := uint16(cdat[6*x+2])<<8 | uint16(cdat[6*x+3]) + bcol := uint16(cdat[6*x+4])<<8 | uint16(cdat[6*x+5]) + rgba64.SetRGBA64(x, y, color.RGBA64{rcol, gcol, bcol, 0xffff}) + } + } + case cbTCA16: + for x := 0; x < width; x++ { + rcol := uint16(cdat[8*x+0])<<8 | uint16(cdat[8*x+1]) + gcol := uint16(cdat[8*x+2])<<8 | uint16(cdat[8*x+3]) + bcol := uint16(cdat[8*x+4])<<8 | uint16(cdat[8*x+5]) + acol := uint16(cdat[8*x+6])<<8 | uint16(cdat[8*x+7]) + nrgba64.SetNRGBA64(x, y, color.NRGBA64{rcol, gcol, bcol, acol}) + } + } + + // The current row for y is the previous row for y+1. + pr, cr = cr, pr + } + + return img, nil +} + +// mergePassInto merges a single pass into a full sized image. +func (d *decoder) mergePassInto(dst image.Image, src image.Image, pass int) { + p := interlacing[pass] + var ( + srcPix []uint8 + dstPix []uint8 + stride int + rect image.Rectangle + bytesPerPixel int + ) + switch target := dst.(type) { + case *image.Alpha: + srcPix = src.(*image.Alpha).Pix + dstPix, stride, rect = target.Pix, target.Stride, target.Rect + bytesPerPixel = 1 + case *image.Alpha16: + srcPix = src.(*image.Alpha16).Pix + dstPix, stride, rect = target.Pix, target.Stride, target.Rect + bytesPerPixel = 2 + case *image.Gray: + srcPix = src.(*image.Gray).Pix + dstPix, stride, rect = target.Pix, target.Stride, target.Rect + bytesPerPixel = 1 + case *image.Gray16: + srcPix = src.(*image.Gray16).Pix + dstPix, stride, rect = target.Pix, target.Stride, target.Rect + bytesPerPixel = 2 + case *image.NRGBA: + srcPix = src.(*image.NRGBA).Pix + dstPix, stride, rect = target.Pix, target.Stride, target.Rect + bytesPerPixel = 4 + case *image.NRGBA64: + srcPix = src.(*image.NRGBA64).Pix + dstPix, stride, rect = target.Pix, target.Stride, target.Rect + bytesPerPixel = 8 + case *image.Paletted: + srcPix = src.(*image.Paletted).Pix + dstPix, stride, rect = target.Pix, target.Stride, target.Rect + bytesPerPixel = 1 + case *image.RGBA: + srcPix = src.(*image.RGBA).Pix + dstPix, stride, rect = target.Pix, target.Stride, target.Rect + bytesPerPixel = 4 + case *image.RGBA64: + srcPix = src.(*image.RGBA64).Pix + dstPix, stride, rect = target.Pix, target.Stride, target.Rect + bytesPerPixel = 8 + } + s, bounds := 0, src.Bounds() + for y := bounds.Min.Y; y < bounds.Max.Y; y++ { + dBase := (y*p.yFactor+p.yOffset-rect.Min.Y)*stride + (p.xOffset-rect.Min.X)*bytesPerPixel + for x := bounds.Min.X; x < bounds.Max.X; x++ { + d := dBase + x*p.xFactor*bytesPerPixel + copy(dstPix[d:], srcPix[s:s+bytesPerPixel]) + s += bytesPerPixel + } + } +} + +func (d *decoder) parseacTL(length uint32) (err error) { + if length != 8 { + return FormatError("bad acTL length") + } + if _, err := io.ReadFull(d.r, d.tmp[:8]); err != nil { + return err + } + + d.num_frames = binary.BigEndian.Uint32(d.tmp[:4]) + d.a.LoopCount = uint(binary.BigEndian.Uint32(d.tmp[4:8])) + + d.crc.Write(d.tmp[:8]) + return d.verifyChecksum() +} + +func (d *decoder) parsefcTL(length uint32) (err error) { + if length != 26 { + return FormatError("bad fcTL length") + } + if _, err := io.ReadFull(d.r, d.tmp[:26]); err != nil { + return err + } + + if d.frame_index >= len(d.a.Frames) { + d.a.Frames = append(d.a.Frames, Frame{}) + } + + d.a.Frames[d.frame_index].IsDefault = false + d.a.Frames[d.frame_index].width = int(int32(binary.BigEndian.Uint32(d.tmp[4:8]))) + d.a.Frames[d.frame_index].height = int(int32(binary.BigEndian.Uint32(d.tmp[8:12]))) + d.a.Frames[d.frame_index].XOffset = int(binary.BigEndian.Uint32(d.tmp[12:16])) + d.a.Frames[d.frame_index].YOffset = int(binary.BigEndian.Uint32(d.tmp[16:20])) + d.a.Frames[d.frame_index].DelayNumerator = binary.BigEndian.Uint16(d.tmp[20:22]) + d.a.Frames[d.frame_index].DelayDenominator = binary.BigEndian.Uint16(d.tmp[22:24]) + d.a.Frames[d.frame_index].DisposeOp = byte(d.tmp[24]) + d.a.Frames[d.frame_index].BlendOp = byte(d.tmp[25]) + + d.crc.Write(d.tmp[:26]) + return d.verifyChecksum() +} + +func (d *decoder) parsefdAT(length uint32) (err error) { + if _, err := io.ReadFull(d.r, d.tmp[:4]); err != nil { + return err + } + d.crc.Write(d.tmp[:4]) + d.idatLength = length - 4 + d.a.Frames[d.frame_index].Image, err = d.decode() + if err != nil { + return err + } + return d.verifyChecksum() +} + +func (d *decoder) parseIDAT(length uint32) (err error) { + d.idatLength = length + d.a.Frames[d.frame_index].Image, err = d.decode() + if err != nil { + return err + } + return d.verifyChecksum() +} + +func (d *decoder) parseIEND(length uint32) error { + if length != 0 { + return FormatError("bad IEND length") + } + return d.verifyChecksum() +} + +func (d *decoder) parseChunk() error { + // Read the length and chunk type. + n, err := io.ReadFull(d.r, d.tmp[:8]) + if err != nil { + return err + } + length := binary.BigEndian.Uint32(d.tmp[:4]) + d.crc.Reset() + d.crc.Write(d.tmp[4:8]) + + // Read the chunk data. + switch string(d.tmp[4:8]) { + case "IHDR": + if d.stage != dsStart { + return chunkOrderError + } + d.stage = dsSeenIHDR + return d.parseIHDR(length) + case "PLTE": + if d.stage != dsSeenIHDR { + return chunkOrderError + } + d.stage = dsSeenPLTE + return d.parsePLTE(length) + case "tRNS": + if cbPaletted(d.cb) { + if d.stage != dsSeenPLTE { + return chunkOrderError + } + } else if d.stage != dsSeenIHDR { + return chunkOrderError + } + d.stage = dsSeentRNS + return d.parsetRNS(length) + case "acTL": + if d.stage >= dsSeenIDAT { + return chunkOrderError + } + return d.parseacTL(length) + case "fcTL": + if d.stage >= dsSeenIDAT { + d.frame_index = d.frame_index + 1 + } + return d.parsefcTL(length) + case "fdAT": + if d.stage < dsSeenIDAT { + return chunkOrderError + } + d.stage = dsSeenfdAT + return d.parsefdAT(length) + case "IDAT": + if d.stage < dsSeenIHDR || d.stage > dsSeenIDAT || (d.stage == dsSeenIHDR && cbPaletted(d.cb)) { + return chunkOrderError + } else if d.stage == dsSeenIDAT { + // Ignore trailing zero-length or garbage IDAT chunks. + // + // This does not affect valid PNG images that contain multiple IDAT + // chunks, since the first call to parseIDAT below will consume all + // consecutive IDAT chunks required for decoding the image. + break + } + d.stage = dsSeenIDAT + return d.parseIDAT(length) + case "IEND": + if d.stage < dsSeenIDAT { + return chunkOrderError + } + d.stage = dsSeenIEND + return d.parseIEND(length) + } + if length > 0x7fffffff { + return FormatError(fmt.Sprintf("Bad chunk length: %d", length)) + } + // Ignore this chunk (of a known length). + var ignored [4096]byte + for length > 0 { + n, err = io.ReadFull(d.r, ignored[:min(len(ignored), int(length))]) + if err != nil { + return err + } + d.crc.Write(ignored[:n]) + length -= uint32(n) + } + return d.verifyChecksum() +} + +func (d *decoder) verifyChecksum() error { + if _, err := io.ReadFull(d.r, d.tmp[:4]); err != nil { + return err + } + if binary.BigEndian.Uint32(d.tmp[:4]) != d.crc.Sum32() { + return FormatError("invalid checksum") + } + return nil +} + +func (d *decoder) checkHeader() error { + _, err := io.ReadFull(d.r, d.tmp[:len(pngHeader)]) + if err != nil { + return err + } + if string(d.tmp[:len(pngHeader)]) != pngHeader { + return FormatError("not a PNG file") + } + return nil +} + +// Decode reads an APNG file from r and returns it as an APNG +// Type. If the first frame returns true for IsDefault(), that +// frame should not be part of the a. +// The type of Image returned depends on the PNG contents. +func DecodeAll(r io.Reader) (APNG, error) { + d := &decoder{ + r: r, + crc: crc32.NewIEEE(), + frame_index: 0, + a: APNG{Frames: make([]Frame, 1)}, + } + d.a.Frames[0].IsDefault = true + if err := d.checkHeader(); err != nil { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + return d.a, err + } + for d.stage != dsSeenIEND { + if err := d.parseChunk(); err != nil { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + return d.a, err + } + } + return d.a, nil +} + +// Decode reads an APNG file from r and returns the default image. +func Decode(r io.Reader) (image.Image, error) { + a, err := DecodeAll(r) + if err != nil { + return nil, err + } + return a.Frames[0].Image, nil +} + +// DecodeConfig returns the color model and dimensions of a PNG image without +// decoding the entire image. +func DecodeConfig(r io.Reader) (image.Config, error) { + d := &decoder{ + r: r, + crc: crc32.NewIEEE(), + } + if err := d.checkHeader(); err != nil { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + return image.Config{}, err + } + for { + if err := d.parseChunk(); err != nil { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + return image.Config{}, err + } + paletted := cbPaletted(d.cb) + if d.stage == dsSeenIHDR && !paletted { + break + } + if d.stage == dsSeenPLTE && paletted { + break + } + } + var cm color.Model + switch d.cb { + case cbG1, cbG2, cbG4, cbG8: + cm = color.GrayModel + case cbGA8: + cm = color.NRGBAModel + case cbTC8: + cm = color.RGBAModel + case cbP1, cbP2, cbP4, cbP8: + cm = d.palette + case cbTCA8: + cm = color.NRGBAModel + case cbG16: + cm = color.Gray16Model + case cbGA16: + cm = color.NRGBA64Model + case cbTC16: + cm = color.RGBA64Model + case cbTCA16: + cm = color.NRGBA64Model + } + return image.Config{ + ColorModel: cm, + Width: int(d.a.Frames[0].width), + Height: int(d.a.Frames[0].height), + }, nil +} + +func init() { + image.RegisterFormat("apng", pngHeader, Decode, DecodeConfig) +} diff --git a/vendor/github.com/kettek/apng/writer.go b/vendor/github.com/kettek/apng/writer.go new file mode 100644 index 00000000..f7eee931 --- /dev/null +++ b/vendor/github.com/kettek/apng/writer.go @@ -0,0 +1,713 @@ +// Original PNG code Copyright 2009 The Go Authors. +// Additional APNG enhancements Copyright 2018 Ketchetwahmeegwun +// Tecumseh Southall / kts of kettek. +// All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package apng + +import ( + "bufio" + "compress/zlib" + "encoding/binary" + "hash/crc32" + "image" + "image/color" + "io" + "strconv" +) + +// Encoder configures encoding PNG images. +type Encoder struct { + CompressionLevel CompressionLevel + + // BufferPool optionally specifies a buffer pool to get temporary + // EncoderBuffers when encoding an image. + BufferPool EncoderBufferPool +} + +// EncoderBufferPool is an interface for getting and returning temporary +// instances of the EncoderBuffer struct. This can be used to reuse buffers +// when encoding multiple images. +type EncoderBufferPool interface { + Get() *EncoderBuffer + Put(*EncoderBuffer) +} + +// EncoderBuffer holds the buffers used for encoding PNG images. +type EncoderBuffer encoder + +type encoder struct { + enc *Encoder + w io.Writer + a APNG + write_type int // 0 = IDAT, 1 = fdAT + seq int + cb int + err error + header [8]byte + footer [4]byte + tmp [4 * 256]byte + cr [nFilter][]uint8 + pr []uint8 + zw *zlib.Writer + zwLevel int + bw *bufio.Writer +} + +type CompressionLevel int + +const ( + DefaultCompression CompressionLevel = 0 + NoCompression CompressionLevel = -1 + BestSpeed CompressionLevel = -2 + BestCompression CompressionLevel = -3 + + // Positive CompressionLevel values are reserved to mean a numeric zlib + // compression level, although that is not implemented yet. +) + +type opaquer interface { + Opaque() bool +} + +// Returns whether or not the image is fully opaque. +func opaque(m image.Image) bool { + if o, ok := m.(opaquer); ok { + return o.Opaque() + } + b := m.Bounds() + for y := b.Min.Y; y < b.Max.Y; y++ { + for x := b.Min.X; x < b.Max.X; x++ { + _, _, _, a := m.At(x, y).RGBA() + if a != 0xffff { + return false + } + } + } + return true +} + +// The absolute value of a byte interpreted as a signed int8. +func abs8(d uint8) int { + if d < 128 { + return int(d) + } + return 256 - int(d) +} + +func (e *encoder) writeChunk(b []byte, name string) { + if e.err != nil { + return + } + n := uint32(len(b)) + if int(n) != len(b) { + e.err = UnsupportedError(name + " chunk is too large: " + strconv.Itoa(len(b))) + return + } + binary.BigEndian.PutUint32(e.header[:4], n) + e.header[4] = name[0] + e.header[5] = name[1] + e.header[6] = name[2] + e.header[7] = name[3] + crc := crc32.NewIEEE() + crc.Write(e.header[4:8]) + crc.Write(b) + binary.BigEndian.PutUint32(e.footer[:4], crc.Sum32()) + + _, e.err = e.w.Write(e.header[:8]) + if e.err != nil { + return + } + _, e.err = e.w.Write(b) + if e.err != nil { + return + } + _, e.err = e.w.Write(e.footer[:4]) +} + +func (e *encoder) writeIHDR() { + b := e.a.Frames[0].Image.Bounds() + binary.BigEndian.PutUint32(e.tmp[0:4], uint32(b.Dx())) + binary.BigEndian.PutUint32(e.tmp[4:8], uint32(b.Dy())) + // Set bit depth and color type. + switch e.cb { + case cbG8: + e.tmp[8] = 8 + e.tmp[9] = ctGrayscale + case cbTC8: + e.tmp[8] = 8 + e.tmp[9] = ctTrueColor + case cbP8: + e.tmp[8] = 8 + e.tmp[9] = ctPaletted + case cbP4: + e.tmp[8] = 4 + e.tmp[9] = ctPaletted + case cbP2: + e.tmp[8] = 2 + e.tmp[9] = ctPaletted + case cbP1: + e.tmp[8] = 1 + e.tmp[9] = ctPaletted + case cbTCA8: + e.tmp[8] = 8 + e.tmp[9] = ctTrueColorAlpha + case cbG16: + e.tmp[8] = 16 + e.tmp[9] = ctGrayscale + case cbTC16: + e.tmp[8] = 16 + e.tmp[9] = ctTrueColor + case cbTCA16: + e.tmp[8] = 16 + e.tmp[9] = ctTrueColorAlpha + } + e.tmp[10] = 0 // default compression method + e.tmp[11] = 0 // default filter method + e.tmp[12] = 0 // non-interlaced + e.writeChunk(e.tmp[:13], "IHDR") +} + +func (e *encoder) writeacTL() { + binary.BigEndian.PutUint32(e.tmp[0:4], uint32(len(e.a.Frames))) + binary.BigEndian.PutUint32(e.tmp[4:8], uint32(e.a.LoopCount)) + e.writeChunk(e.tmp[:8], "acTL") +} + +func (e *encoder) writefcTL(f Frame) { + binary.BigEndian.PutUint32(e.tmp[0:4], uint32(e.seq)) + e.seq = e.seq + 1 + b := f.Image.Bounds() + binary.BigEndian.PutUint32(e.tmp[4:8], uint32(b.Dx())) + binary.BigEndian.PutUint32(e.tmp[8:12], uint32(b.Dy())) + binary.BigEndian.PutUint32(e.tmp[12:16], uint32(f.XOffset)) + binary.BigEndian.PutUint32(e.tmp[16:20], uint32(f.YOffset)) + binary.BigEndian.PutUint16(e.tmp[20:22], uint16(f.DelayNumerator)) + binary.BigEndian.PutUint16(e.tmp[22:24], uint16(f.DelayDenominator)) + e.tmp[24] = f.DisposeOp + e.tmp[25] = f.BlendOp + e.writeChunk(e.tmp[:26], "fcTL") +} + +func (e *encoder) writefdATs(f Frame) { + e.write_type = 1 + if e.err != nil { + return + } + if e.bw == nil { + e.bw = bufio.NewWriterSize(e, 1<<15) + } else { + e.bw.Reset(e) + } + e.err = e.writeImage(e.bw, f.Image, e.cb, levelToZlib(e.enc.CompressionLevel)) + if e.err != nil { + return + } + e.err = e.bw.Flush() +} + +func (e *encoder) writePLTEAndTRNS(p color.Palette) { + if len(p) < 1 || len(p) > 256 { + e.err = FormatError("bad palette length: " + strconv.Itoa(len(p))) + return + } + last := -1 + for i, c := range p { + c1 := color.NRGBAModel.Convert(c).(color.NRGBA) + e.tmp[3*i+0] = c1.R + e.tmp[3*i+1] = c1.G + e.tmp[3*i+2] = c1.B + if c1.A != 0xff { + last = i + } + e.tmp[3*256+i] = c1.A + } + e.writeChunk(e.tmp[:3*len(p)], "PLTE") + if last != -1 { + e.writeChunk(e.tmp[3*256:3*256+1+last], "tRNS") + } +} + +// An encoder is an io.Writer that satisfies writes by writing PNG IDAT chunks, +// including an 8-byte header and 4-byte CRC checksum per Write call. Such calls +// should be relatively infrequent, since writeIDATs uses a bufio.Writer. +// +// This method should only be called from writeIDATs (via writeImage). +// No other code should treat an encoder as an io.Writer. +func (e *encoder) Write(b []byte) (int, error) { + if e.write_type == 0 { + e.writeChunk(b, "IDAT") + } else { + c := make([]byte, 4) + binary.BigEndian.PutUint32(c[0:4], uint32(e.seq)) + e.seq = e.seq + 1 + b = append(c, b...) + e.writeChunk(b, "fdAT") + } + if e.err != nil { + return 0, e.err + } + return len(b), nil +} + +// Chooses the filter to use for encoding the current row, and applies it. +// The return value is the index of the filter and also of the row in cr that has had it applied. +func filter(cr *[nFilter][]byte, pr []byte, bpp int) int { + // We try all five filter types, and pick the one that minimizes the sum of absolute differences. + // This is the same heuristic that libpng uses, although the filters are attempted in order of + // estimated most likely to be minimal (ftUp, ftPaeth, ftNone, ftSub, ftAverage), rather than + // in their enumeration order (ftNone, ftSub, ftUp, ftAverage, ftPaeth). + cdat0 := cr[0][1:] + cdat1 := cr[1][1:] + cdat2 := cr[2][1:] + cdat3 := cr[3][1:] + cdat4 := cr[4][1:] + pdat := pr[1:] + n := len(cdat0) + + // The up filter. + sum := 0 + for i := 0; i < n; i++ { + cdat2[i] = cdat0[i] - pdat[i] + sum += abs8(cdat2[i]) + } + best := sum + filter := ftUp + + // The Paeth filter. + sum = 0 + for i := 0; i < bpp; i++ { + cdat4[i] = cdat0[i] - pdat[i] + sum += abs8(cdat4[i]) + } + for i := bpp; i < n; i++ { + cdat4[i] = cdat0[i] - paeth(cdat0[i-bpp], pdat[i], pdat[i-bpp]) + sum += abs8(cdat4[i]) + if sum >= best { + break + } + } + if sum < best { + best = sum + filter = ftPaeth + } + + // The none filter. + sum = 0 + for i := 0; i < n; i++ { + sum += abs8(cdat0[i]) + if sum >= best { + break + } + } + if sum < best { + best = sum + filter = ftNone + } + + // The sub filter. + sum = 0 + for i := 0; i < bpp; i++ { + cdat1[i] = cdat0[i] + sum += abs8(cdat1[i]) + } + for i := bpp; i < n; i++ { + cdat1[i] = cdat0[i] - cdat0[i-bpp] + sum += abs8(cdat1[i]) + if sum >= best { + break + } + } + if sum < best { + best = sum + filter = ftSub + } + + // The average filter. + sum = 0 + for i := 0; i < bpp; i++ { + cdat3[i] = cdat0[i] - pdat[i]/2 + sum += abs8(cdat3[i]) + } + for i := bpp; i < n; i++ { + cdat3[i] = cdat0[i] - uint8((int(cdat0[i-bpp])+int(pdat[i]))/2) + sum += abs8(cdat3[i]) + if sum >= best { + break + } + } + if sum < best { + best = sum + filter = ftAverage + } + + return filter +} + +func zeroMemory(v []uint8) { + for i := range v { + v[i] = 0 + } +} + +func (e *encoder) writeImage(w io.Writer, m image.Image, cb int, level int) error { + if e.zw == nil || e.zwLevel != level { + zw, err := zlib.NewWriterLevel(w, level) + if err != nil { + return err + } + e.zw = zw + e.zwLevel = level + } else { + e.zw.Reset(w) + } + defer e.zw.Close() + + bitsPerPixel := 0 + + switch cb { + case cbG8: + bitsPerPixel = 8 + case cbTC8: + bitsPerPixel = 24 + case cbP8: + bitsPerPixel = 8 + case cbP4: + bitsPerPixel = 4 + case cbP2: + bitsPerPixel = 2 + case cbP1: + bitsPerPixel = 1 + case cbTCA8: + bitsPerPixel = 32 + case cbTC16: + bitsPerPixel = 48 + case cbTCA16: + bitsPerPixel = 64 + case cbG16: + bitsPerPixel = 16 + } + + // cr[*] and pr are the bytes for the current and previous row. + // cr[0] is unfiltered (or equivalently, filtered with the ftNone filter). + // cr[ft], for non-zero filter types ft, are buffers for transforming cr[0] under the + // other PNG filter types. These buffers are allocated once and re-used for each row. + // The +1 is for the per-row filter type, which is at cr[*][0]. + b := m.Bounds() + sz := 1 + (bitsPerPixel*b.Dx()+7)/8 + for i := range e.cr { + if cap(e.cr[i]) < sz { + e.cr[i] = make([]uint8, sz) + } else { + e.cr[i] = e.cr[i][:sz] + } + e.cr[i][0] = uint8(i) + } + cr := e.cr + if cap(e.pr) < sz { + e.pr = make([]uint8, sz) + } else { + e.pr = e.pr[:sz] + zeroMemory(e.pr) + } + pr := e.pr + + gray, _ := m.(*image.Gray) + rgba, _ := m.(*image.RGBA) + paletted, _ := m.(*image.Paletted) + nrgba, _ := m.(*image.NRGBA) + + for y := b.Min.Y; y < b.Max.Y; y++ { + // Convert from colors to bytes. + i := 1 + switch cb { + case cbG8: + if gray != nil { + offset := (y - b.Min.Y) * gray.Stride + copy(cr[0][1:], gray.Pix[offset:offset+b.Dx()]) + } else { + for x := b.Min.X; x < b.Max.X; x++ { + c := color.GrayModel.Convert(m.At(x, y)).(color.Gray) + cr[0][i] = c.Y + i++ + } + } + case cbTC8: + // We have previously verified that the alpha value is fully opaque. + cr0 := cr[0] + stride, pix := 0, []byte(nil) + if rgba != nil { + stride, pix = rgba.Stride, rgba.Pix + } else if nrgba != nil { + stride, pix = nrgba.Stride, nrgba.Pix + } + if stride != 0 { + j0 := (y - b.Min.Y) * stride + j1 := j0 + b.Dx()*4 + for j := j0; j < j1; j += 4 { + cr0[i+0] = pix[j+0] + cr0[i+1] = pix[j+1] + cr0[i+2] = pix[j+2] + i += 3 + } + } else { + for x := b.Min.X; x < b.Max.X; x++ { + r, g, b, _ := m.At(x, y).RGBA() + cr0[i+0] = uint8(r >> 8) + cr0[i+1] = uint8(g >> 8) + cr0[i+2] = uint8(b >> 8) + i += 3 + } + } + case cbP8: + if paletted != nil { + offset := (y - b.Min.Y) * paletted.Stride + copy(cr[0][1:], paletted.Pix[offset:offset+b.Dx()]) + } else { + pi := m.(image.PalettedImage) + for x := b.Min.X; x < b.Max.X; x++ { + cr[0][i] = pi.ColorIndexAt(x, y) + i += 1 + } + } + + case cbP4, cbP2, cbP1: + pi := m.(image.PalettedImage) + + var a uint8 + var c int + for x := b.Min.X; x < b.Max.X; x++ { + a = a<> 8) + cr[0][i+1] = uint8(c.Y) + i += 2 + } + case cbTC16: + // We have previously verified that the alpha value is fully opaque. + for x := b.Min.X; x < b.Max.X; x++ { + r, g, b, _ := m.At(x, y).RGBA() + cr[0][i+0] = uint8(r >> 8) + cr[0][i+1] = uint8(r) + cr[0][i+2] = uint8(g >> 8) + cr[0][i+3] = uint8(g) + cr[0][i+4] = uint8(b >> 8) + cr[0][i+5] = uint8(b) + i += 6 + } + case cbTCA16: + // Convert from image.Image (which is alpha-premultiplied) to PNG's non-alpha-premultiplied. + for x := b.Min.X; x < b.Max.X; x++ { + c := color.NRGBA64Model.Convert(m.At(x, y)).(color.NRGBA64) + cr[0][i+0] = uint8(c.R >> 8) + cr[0][i+1] = uint8(c.R) + cr[0][i+2] = uint8(c.G >> 8) + cr[0][i+3] = uint8(c.G) + cr[0][i+4] = uint8(c.B >> 8) + cr[0][i+5] = uint8(c.B) + cr[0][i+6] = uint8(c.A >> 8) + cr[0][i+7] = uint8(c.A) + i += 8 + } + } + + // Apply the filter. + // Skip filter for NoCompression and paletted images (cbP8) as + // "filters are rarely useful on palette images" and will result + // in larger files (see http://www.libpng.org/pub/png/book/chapter09.html). + f := ftNone + if level != zlib.NoCompression && cb != cbP8 && cb != cbP4 && cb != cbP2 && cb != cbP1 { + // Since we skip paletted images we don't have to worry about + // bitsPerPixel not being a multiple of 8 + bpp := bitsPerPixel / 8 + f = filter(&cr, pr, bpp) + } + + // Write the compressed bytes. + if _, err := e.zw.Write(cr[f]); err != nil { + return err + } + + // The current row for y is the previous row for y+1. + pr, cr[0] = cr[0], pr + } + return nil +} + +// Write the actual image data to one or more IDAT chunks. +func (e *encoder) writeIDATs() { + e.write_type = 0 + if e.err != nil { + return + } + if e.bw == nil { + e.bw = bufio.NewWriterSize(e, 1<<15) + } else { + e.bw.Reset(e) + } + e.err = e.writeImage(e.bw, e.a.Frames[0].Image, e.cb, levelToZlib(e.enc.CompressionLevel)) + if e.err != nil { + return + } + e.err = e.bw.Flush() +} + +// This function is required because we want the zero value of +// Encoder.CompressionLevel to map to zlib.DefaultCompression. +func levelToZlib(l CompressionLevel) int { + switch l { + case DefaultCompression: + return zlib.DefaultCompression + case NoCompression: + return zlib.NoCompression + case BestSpeed: + return zlib.BestSpeed + case BestCompression: + return zlib.BestCompression + default: + return zlib.DefaultCompression + } +} + +func (e *encoder) writeIEND() { e.writeChunk(nil, "IEND") } + +// Encode writes the APNG a to w in PNG format. Any Image may be +// encoded, but images that are not image.NRGBA might be encoded lossily. +func Encode(w io.Writer, a APNG) error { + var e Encoder + return e.Encode(w, a) +} + +// Encode writes the Animation a to w in PNG format. +func (enc *Encoder) Encode(w io.Writer, a APNG) error { + // Obviously, negative widths and heights are invalid. Furthermore, the PNG + // spec section 11.2.2 says that zero is invalid. Excessively large images are + // also rejected. + mw, mh := int64(a.Frames[0].Image.Bounds().Dx()), int64(a.Frames[0].Image.Bounds().Dy()) + if mw <= 0 || mh <= 0 || mw >= 1<<32 || mh >= 1<<32 { + return FormatError("invalid image size: " + strconv.FormatInt(mw, 10) + "x" + strconv.FormatInt(mh, 10)) + } + + var e *encoder + if enc.BufferPool != nil { + buffer := enc.BufferPool.Get() + e = (*encoder)(buffer) + + } + if e == nil { + e = &encoder{} + } + if enc.BufferPool != nil { + defer enc.BufferPool.Put((*EncoderBuffer)(e)) + } + + e.enc = enc + e.w = w + e.a = a + + var pal color.Palette + // cbP8 encoding needs PalettedImage's ColorIndexAt method. + if _, ok := a.Frames[0].Image.(image.PalettedImage); ok { + pal, _ = a.Frames[0].Image.ColorModel().(color.Palette) + } + if pal != nil { + if len(pal) <= 2 { + e.cb = cbP1 + } else if len(pal) <= 4 { + e.cb = cbP2 + } else if len(pal) <= 16 { + e.cb = cbP4 + } else { + e.cb = cbP8 + } + } else { + switch a.Frames[0].Image.ColorModel() { + case color.GrayModel: + e.cb = cbG8 + case color.Gray16Model: + e.cb = cbG16 + case color.RGBAModel, color.NRGBAModel, color.AlphaModel: + isOpaque := true + for _, v := range a.Frames { + if !opaque(v.Image) { + isOpaque = false + break + } + } + if isOpaque { + e.cb = cbTC8 + } else { + e.cb = cbTCA8 + } + default: + isOpaque := true + for _, v := range a.Frames { + if !opaque(v.Image) { + isOpaque = false + break + } + } + if isOpaque { + e.cb = cbTC16 + } else { + e.cb = cbTCA16 + } + } + } + + _, e.err = io.WriteString(w, pngHeader) + e.writeIHDR() + if pal != nil { + e.writePLTEAndTRNS(pal) + } + if len(e.a.Frames) > 1 { + e.writeacTL() + } + if !e.a.Frames[0].IsDefault { + e.writefcTL(e.a.Frames[0]) + } + e.writeIDATs() + for i := 0; i < len(e.a.Frames); i = i + 1 { + if i != 0 && !e.a.Frames[i].IsDefault { + e.writefcTL(e.a.Frames[i]) + e.writefdATs(e.a.Frames[i]) + } + } + e.writeIEND() + return e.err +} diff --git a/vendor/github.com/sizeofint/webpanimation/.gitignore b/vendor/github.com/sizeofint/webpanimation/.gitignore new file mode 100644 index 00000000..723ef36f --- /dev/null +++ b/vendor/github.com/sizeofint/webpanimation/.gitignore @@ -0,0 +1 @@ +.idea \ No newline at end of file diff --git a/vendor/github.com/sizeofint/webpanimation/LICENSE b/vendor/github.com/sizeofint/webpanimation/LICENSE new file mode 100644 index 00000000..9b3f99d6 --- /dev/null +++ b/vendor/github.com/sizeofint/webpanimation/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2021, george.imerlishvili@gmail.com. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of {organization}. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/sizeofint/webpanimation/capi.go b/vendor/github.com/sizeofint/webpanimation/capi.go new file mode 100644 index 00000000..eac21b5e --- /dev/null +++ b/vendor/github.com/sizeofint/webpanimation/capi.go @@ -0,0 +1,362 @@ +package webpanimation + +/* +#cgo CFLAGS: -Wno-pointer-sign -DHAVE_CONFIG_H +#cgo !windows LDFLAGS: -lm + +#include "webp_encode.h" +#include "webp_mux.h" +*/ +import "C" +import ( + "errors" + "unsafe" +) + +type WebPMuxError int + +const ( + WebpMuxAbiVersion = 0x0108 + WebpEncoderAbiVersion = 0x020f +) + +const ( + WebpMuxOk = WebPMuxError(C.WEBP_MUX_OK) + WebpMuxNotFound = WebPMuxError(C.WEBP_MUX_NOT_FOUND) + WebpMuxInvalidArgument = WebPMuxError(C.WEBP_MUX_INVALID_ARGUMENT) + WebpMuxBadData = WebPMuxError(C.WEBP_MUX_BAD_DATA) + WebpMuxMemoryError = WebPMuxError(C.WEBP_MUX_MEMORY_ERROR) + WebpMuxNotEnoughData = WebPMuxError(C.WEBP_MUX_NOT_ENOUGH_DATA) +) + +type WebPPicture C.WebPPicture +type WebPAnimEncoder C.WebPAnimEncoder +type WebPAnimEncoderOptions C.WebPAnimEncoderOptions +type WebPData C.WebPData +type WebPMux C.WebPMux +type WebPMuxAnimParams C.WebPMuxAnimParams +type webPConfig struct { + webpConfig *C.WebPConfig +} + +type WebPConfig interface { + getRawPointer() *C.WebPConfig + SetLossless(v int) + GetLossless() int + SetMethod(v int) + SetImageHint(v int) + SetTargetSize(v int) + SetTargetPSNR(v float32) + SetSegments(v int) + SetSnsStrength(v int) + SetFilterStrength(v int) + SetFilterSharpness(v int) + SetAutofilter(v int) + SetAlphaCompression(v int) + SetAlphaFiltering(v int) + SetPass(v int) + SetShowCompressed(v int) + SetPreprocessing(v int) + SetPartitions(v int) + SetPartitionLimit(v int) + SetEmulateJpegSize(v int) + SetThreadLevel(v int) + SetLowMemory(v int) + SetNearLossless(v int) + SetExact(v int) + SetUseDeltaPalette(v int) + SetUseSharpYuv(v int) + SetAlphaQuality(v int) + SetFilterType(v int) + SetQuality(v float32) +} + +func WebPDataClear(webPData *WebPData) { + C.WebPDataClear((*C.WebPData)(unsafe.Pointer(webPData))) +} + +func WebPMuxDelete(webPMux *WebPMux) { + C.WebPMuxDelete((*C.WebPMux)(unsafe.Pointer(webPMux))) +} + +func WebPPictureFree(webPPicture *WebPPicture) { + C.WebPPictureFree((*C.WebPPicture)(unsafe.Pointer(webPPicture))) +} + +func WebPAnimEncoderDelete(webPAnimEncoder *WebPAnimEncoder) { + C.WebPAnimEncoderDelete((*C.WebPAnimEncoder)(unsafe.Pointer(webPAnimEncoder))) +} + +func (wmap *WebPMuxAnimParams) SetBgcolor(v uint32) { + ((*C.WebPMuxAnimParams)(wmap)).bgcolor = (C.uint32_t)(v) +} + +func (wmap *WebPMuxAnimParams) SetLoopCount(v int) { + (*C.WebPMuxAnimParams)(wmap).loop_count = (C.int)(v) +} + +func WebPPictureInit(webPPicture *WebPPicture) int { + return int(C.WebPPictureInit((*C.WebPPicture)(unsafe.Pointer(webPPicture)))) +} + +func (wpp *WebPPicture) SetWidth(v int) { + ((*C.WebPPicture)(wpp)).width = (C.int)(v) +} + +func (wpp *WebPPicture) SetHeight(v int) { + ((*C.WebPPicture)(wpp)).height = (C.int)(v) +} + +func (wpp WebPPicture) GetWidth() int { + return int(((C.WebPPicture)(wpp)).width) +} + +func (wpp WebPPicture) GetHeight() int { + return int(((C.WebPPicture)(wpp)).height) +} + +func (wpp *WebPPicture) SetUseArgb(v int) { + ((*C.WebPPicture)(wpp)).use_argb = (C.int)(v) +} + +func (wpd WebPData) GetBytes() []byte { + return C.GoBytes(unsafe.Pointer(((C.WebPData)(wpd)).bytes), (C.int)(((C.WebPData)(wpd)).size)) +} + +func WebPDataInit(webPData *WebPData) { + C.WebPDataInit((*C.WebPData)(unsafe.Pointer(webPData))) +} + +// NewWebpConfig create webpconfig instance +func NewWebpConfig() WebPConfig { + webpcfg := &webPConfig{} + webpcfg.webpConfig = &C.WebPConfig{} + WebPConfigInitInternal(webpcfg) + return webpcfg +} + +func WebPConfigInitInternal(config WebPConfig) int { + return int(C.WebPConfigInitInternal( + config.getRawPointer(), + (C.WebPPreset)(0), + (C.float)(75.0), + (C.int)(WebpEncoderAbiVersion), + )) +} + +func (webpCfg *webPConfig) getRawPointer() *C.WebPConfig { + return webpCfg.webpConfig +} + +func (webpCfg *webPConfig) SetLossless(v int) { + webpCfg.webpConfig.lossless = (C.int)(v) +} + +func (webpCfg *webPConfig) GetLossless() int { + return int(webpCfg.webpConfig.lossless) +} + +func (webpCfg *webPConfig) SetMethod(v int) { + webpCfg.webpConfig.method = (C.int)(v) +} + +func (webpCfg *webPConfig) SetImageHint(v int) { + webpCfg.webpConfig.image_hint = (C.WebPImageHint)(v) +} + +func (webpCfg *webPConfig) SetTargetSize(v int) { + webpCfg.webpConfig.target_size = (C.int)(v) +} + +func (webpCfg *webPConfig) SetTargetPSNR(v float32) { + webpCfg.webpConfig.target_PSNR = (C.float)(v) +} + +func (webpCfg *webPConfig) SetSegments(v int) { + webpCfg.webpConfig.segments = (C.int)(v) +} + +func (webpCfg *webPConfig) SetSnsStrength(v int) { + webpCfg.webpConfig.sns_strength = (C.int)(v) +} + +func (webpCfg *webPConfig) SetFilterStrength(v int) { + webpCfg.webpConfig.filter_strength = (C.int)(v) +} + +func (webpCfg *webPConfig) SetFilterSharpness(v int) { + webpCfg.webpConfig.filter_sharpness = (C.int)(v) +} + +func (webpCfg *webPConfig) SetAutofilter(v int) { + webpCfg.webpConfig.autofilter = (C.int)(v) +} + +func (webpCfg *webPConfig) SetAlphaCompression(v int) { + webpCfg.webpConfig.alpha_compression = (C.int)(v) +} + +func (webpCfg *webPConfig) SetAlphaFiltering(v int) { + webpCfg.webpConfig.alpha_filtering = (C.int)(v) +} + +func (webpCfg *webPConfig) SetPass(v int) { + webpCfg.webpConfig.pass = (C.int)(v) +} + +func (webpCfg *webPConfig) SetShowCompressed(v int) { + webpCfg.webpConfig.show_compressed = (C.int)(v) +} + +func (webpCfg *webPConfig) SetPreprocessing(v int) { + webpCfg.webpConfig.preprocessing = (C.int)(v) +} + +func (webpCfg *webPConfig) SetPartitions(v int) { + webpCfg.webpConfig.partitions = (C.int)(v) +} + +func (webpCfg *webPConfig) SetPartitionLimit(v int) { + webpCfg.webpConfig.partition_limit = (C.int)(v) +} + +func (webpCfg *webPConfig) SetEmulateJpegSize(v int) { + webpCfg.webpConfig.emulate_jpeg_size = (C.int)(v) +} + +func (webpCfg *webPConfig) SetThreadLevel(v int) { + webpCfg.webpConfig.thread_level = (C.int)(v) +} + +func (webpCfg *webPConfig) SetLowMemory(v int) { + webpCfg.webpConfig.low_memory = (C.int)(v) +} + +func (webpCfg *webPConfig) SetNearLossless(v int) { + webpCfg.webpConfig.near_lossless = (C.int)(v) +} + +func (webpCfg *webPConfig) SetExact(v int) { + webpCfg.webpConfig.exact = (C.int)(v) +} + +func (webpCfg *webPConfig) SetUseDeltaPalette(v int) { + webpCfg.webpConfig.use_delta_palette = (C.int)(v) +} + +func (webpCfg *webPConfig) SetUseSharpYuv(v int) { + webpCfg.webpConfig.use_sharp_yuv = (C.int)(v) +} + +func (webpCfg *webPConfig) SetAlphaQuality(v int) { + webpCfg.webpConfig.alpha_quality = (C.int)(v) +} + +func (webpCfg *webPConfig) SetFilterType(v int) { + webpCfg.webpConfig.filter_type = (C.int)(v) +} + +func (webpCfg *webPConfig) SetQuality(v float32) { + webpCfg.webpConfig.quality = (C.float)(v) +} + +func (encOptions *WebPAnimEncoderOptions) GetAnimParams() WebPMuxAnimParams { + return WebPMuxAnimParams(((*C.WebPAnimEncoderOptions)(encOptions)).anim_params) +} + +func (encOptions *WebPAnimEncoderOptions) SetAnimParams(v WebPMuxAnimParams) { + ((*C.WebPAnimEncoderOptions)(encOptions)).anim_params = (C.WebPMuxAnimParams)(v) +} + +func (encOptions *WebPAnimEncoderOptions) SetMinimizeSize(v int) { + ((*C.WebPAnimEncoderOptions)(encOptions)).minimize_size = (C.int)(v) +} + +func (encOptions *WebPAnimEncoderOptions) SetKmin(v int) { + ((*C.WebPAnimEncoderOptions)(encOptions)).kmin = (C.int)(v) +} + +func (encOptions *WebPAnimEncoderOptions) SetKmax(v int) { + ((*C.WebPAnimEncoderOptions)(encOptions)).kmax = (C.int)(v) +} + +func (encOptions *WebPAnimEncoderOptions) SetAllowMixed(v int) { + ((*C.WebPAnimEncoderOptions)(encOptions)).allow_mixed = (C.int)(v) +} + +func (encOptions *WebPAnimEncoderOptions) SetVerbose(v int) { + ((*C.WebPAnimEncoderOptions)(encOptions)).verbose = (C.int)(v) +} + +func WebPAnimEncoderOptionsInitInternal(webPAnimEncoderOptions *WebPAnimEncoderOptions) int { + return int(C.WebPAnimEncoderOptionsInitInternal( + (*C.WebPAnimEncoderOptions)(unsafe.Pointer(webPAnimEncoderOptions)), + (C.int)(WebpMuxAbiVersion), + )) +} + +func WebPPictureImportRGBA(data []byte, stride int, webPPicture *WebPPicture) error { + res := int(C.WebPPictureImportRGBA( + (*C.WebPPicture)(unsafe.Pointer(webPPicture)), + (*C.uint8_t)(unsafe.Pointer(&data[0])), + (C.int)(stride), + )) + if res == 0 { + return errors.New("error: WebPPictureImportBGRA") + } + return nil +} + +func WebPAnimEncoderNewInternal(width, height int, webPAnimEncoderOptions *WebPAnimEncoderOptions) *WebPAnimEncoder { + return (*WebPAnimEncoder)(C.WebPAnimEncoderNewInternal( + (C.int)(width), + (C.int)(height), + (*C.WebPAnimEncoderOptions)(unsafe.Pointer(webPAnimEncoderOptions)), + (C.int)(WebpMuxAbiVersion), + )) +} + +func WebPAnimEncoderAdd(webPAnimEncoder *WebPAnimEncoder, webPPicture *WebPPicture, timestamp int, webpcfg WebPConfig) int { + return int(C.WebPAnimEncoderAdd( + (*C.WebPAnimEncoder)(unsafe.Pointer(webPAnimEncoder)), + (*C.WebPPicture)(unsafe.Pointer(webPPicture)), + (C.int)(timestamp), + webpcfg.getRawPointer(), + )) +} + +func WebPAnimEncoderAssemble(webPAnimEncoder *WebPAnimEncoder, webPData *WebPData) int { + return int(C.WebPAnimEncoderAssemble( + (*C.WebPAnimEncoder)(unsafe.Pointer(webPAnimEncoder)), + (*C.WebPData)(unsafe.Pointer(webPData)), + )) +} + +func WebPMuxCreateInternal(webPData *WebPData, copyData int) *WebPMux { + return (*WebPMux)(C.WebPMuxCreateInternal( + (*C.WebPData)(unsafe.Pointer(webPData)), + (C.int)(copyData), + (C.int)(WebpMuxAbiVersion), + )) +} + +func WebPMuxSetAnimationParams(webPMux *WebPMux, webPMuxAnimParams *WebPMuxAnimParams) WebPMuxError { + return (WebPMuxError)(C.WebPMuxSetAnimationParams( + (*C.WebPMux)(unsafe.Pointer(webPMux)), + (*C.WebPMuxAnimParams)(unsafe.Pointer(webPMuxAnimParams)), + )) +} + +func WebPMuxGetAnimationParams(webPMux *WebPMux, webPMuxAnimParams *WebPMuxAnimParams) WebPMuxError { + return (WebPMuxError)(C.WebPMuxGetAnimationParams( + (*C.WebPMux)(unsafe.Pointer(webPMux)), + (*C.WebPMuxAnimParams)(unsafe.Pointer(webPMuxAnimParams)), + )) +} + +func WebPMuxAssemble(webPMux *WebPMux, webPData *WebPData) WebPMuxError { + return (WebPMuxError)(C.WebPMuxAssemble( + (*C.WebPMux)(unsafe.Pointer(webPMux)), + (*C.WebPData)(unsafe.Pointer(webPData)), + )) +} diff --git a/vendor/github.com/sizeofint/webpanimation/config.h b/vendor/github.com/sizeofint/webpanimation/config.h new file mode 100644 index 00000000..b0762a0d --- /dev/null +++ b/vendor/github.com/sizeofint/webpanimation/config.h @@ -0,0 +1,4 @@ +#ifndef WEBANIMATION_HPP +#define WEBANIMATION_HPP +#define WEBP_USE_THREAD +#endif diff --git a/vendor/github.com/sizeofint/webpanimation/dec_alpha_dec.c b/vendor/github.com/sizeofint/webpanimation/dec_alpha_dec.c new file mode 100644 index 00000000..91a68c7a --- /dev/null +++ b/vendor/github.com/sizeofint/webpanimation/dec_alpha_dec.c @@ -0,0 +1,232 @@ +// Copyright 2011 Google Inc. All Rights Reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the COPYING file in the root of the source +// tree. An additional intellectual property rights grant can be found +// in the file PATENTS. All contributing project authors may +// be found in the AUTHORS file in the root of the source tree. +// ----------------------------------------------------------------------------- +// +// Alpha-plane decompression. +// +// Author: Skal (pascal.massimino@gmail.com) + +#include +#include "dec_alphai_dec.h" +#include "dec_vp8i_dec.h" +#include "dec_vp8li_dec.h" +#include "dsp_dsp.h" +#include "utils_quant_levels_dec_utils.h" +#include "utils_utils.h" +#include "webp_format_constants.h" + +//------------------------------------------------------------------------------ +// ALPHDecoder object. + +// Allocates a new alpha decoder instance. +static ALPHDecoder* ALPHNew(void) { + ALPHDecoder* const dec = (ALPHDecoder*)WebPSafeCalloc(1ULL, sizeof(*dec)); + return dec; +} + +// Clears and deallocates an alpha decoder instance. +static void ALPHDelete(ALPHDecoder* const dec) { + if (dec != NULL) { + VP8LDelete(dec->vp8l_dec_); + dec->vp8l_dec_ = NULL; + WebPSafeFree(dec); + } +} + +//------------------------------------------------------------------------------ +// Decoding. + +// Initialize alpha decoding by parsing the alpha header and decoding the image +// header for alpha data stored using lossless compression. +// Returns false in case of error in alpha header (data too short, invalid +// compression method or filter, error in lossless header data etc). +static int ALPHInit(ALPHDecoder* const dec, const uint8_t* data, + size_t data_size, const VP8Io* const src_io, + uint8_t* output) { + int ok = 0; + const uint8_t* const alpha_data = data + ALPHA_HEADER_LEN; + const size_t alpha_data_size = data_size - ALPHA_HEADER_LEN; + int rsrv; + VP8Io* const io = &dec->io_; + + assert(data != NULL && output != NULL && src_io != NULL); + + VP8FiltersInit(); + dec->output_ = output; + dec->width_ = src_io->width; + dec->height_ = src_io->height; + assert(dec->width_ > 0 && dec->height_ > 0); + + if (data_size <= ALPHA_HEADER_LEN) { + return 0; + } + + dec->method_ = (data[0] >> 0) & 0x03; + dec->filter_ = (WEBP_FILTER_TYPE)((data[0] >> 2) & 0x03); + dec->pre_processing_ = (data[0] >> 4) & 0x03; + rsrv = (data[0] >> 6) & 0x03; + if (dec->method_ < ALPHA_NO_COMPRESSION || + dec->method_ > ALPHA_LOSSLESS_COMPRESSION || + dec->filter_ >= WEBP_FILTER_LAST || + dec->pre_processing_ > ALPHA_PREPROCESSED_LEVELS || + rsrv != 0) { + return 0; + } + + // Copy the necessary parameters from src_io to io + VP8InitIo(io); + WebPInitCustomIo(NULL, io); + io->opaque = dec; + io->width = src_io->width; + io->height = src_io->height; + + io->use_cropping = src_io->use_cropping; + io->crop_left = src_io->crop_left; + io->crop_right = src_io->crop_right; + io->crop_top = src_io->crop_top; + io->crop_bottom = src_io->crop_bottom; + // No need to copy the scaling parameters. + + if (dec->method_ == ALPHA_NO_COMPRESSION) { + const size_t alpha_decoded_size = dec->width_ * dec->height_; + ok = (alpha_data_size >= alpha_decoded_size); + } else { + assert(dec->method_ == ALPHA_LOSSLESS_COMPRESSION); + ok = VP8LDecodeAlphaHeader(dec, alpha_data, alpha_data_size); + } + + return ok; +} + +// Decodes, unfilters and dequantizes *at least* 'num_rows' rows of alpha +// starting from row number 'row'. It assumes that rows up to (row - 1) have +// already been decoded. +// Returns false in case of bitstream error. +static int ALPHDecode(VP8Decoder* const dec, int row, int num_rows) { + ALPHDecoder* const alph_dec = dec->alph_dec_; + const int width = alph_dec->width_; + const int height = alph_dec->io_.crop_bottom; + if (alph_dec->method_ == ALPHA_NO_COMPRESSION) { + int y; + const uint8_t* prev_line = dec->alpha_prev_line_; + const uint8_t* deltas = dec->alpha_data_ + ALPHA_HEADER_LEN + row * width; + uint8_t* dst = dec->alpha_plane_ + row * width; + assert(deltas <= &dec->alpha_data_[dec->alpha_data_size_]); + if (alph_dec->filter_ != WEBP_FILTER_NONE) { + assert(WebPUnfilters[alph_dec->filter_] != NULL); + for (y = 0; y < num_rows; ++y) { + WebPUnfilters[alph_dec->filter_](prev_line, deltas, dst, width); + prev_line = dst; + dst += width; + deltas += width; + } + } else { + for (y = 0; y < num_rows; ++y) { + memcpy(dst, deltas, width * sizeof(*dst)); + prev_line = dst; + dst += width; + deltas += width; + } + } + dec->alpha_prev_line_ = prev_line; + } else { // alph_dec->method_ == ALPHA_LOSSLESS_COMPRESSION + assert(alph_dec->vp8l_dec_ != NULL); + if (!VP8LDecodeAlphaImageStream(alph_dec, row + num_rows)) { + return 0; + } + } + + if (row + num_rows >= height) { + dec->is_alpha_decoded_ = 1; + } + return 1; +} + +static int AllocateAlphaPlane(VP8Decoder* const dec, const VP8Io* const io) { + const int stride = io->width; + const int height = io->crop_bottom; + const uint64_t alpha_size = (uint64_t)stride * height; + assert(dec->alpha_plane_mem_ == NULL); + dec->alpha_plane_mem_ = + (uint8_t*)WebPSafeMalloc(alpha_size, sizeof(*dec->alpha_plane_)); + if (dec->alpha_plane_mem_ == NULL) { + return 0; + } + dec->alpha_plane_ = dec->alpha_plane_mem_; + dec->alpha_prev_line_ = NULL; + return 1; +} + +void WebPDeallocateAlphaMemory(VP8Decoder* const dec) { + assert(dec != NULL); + WebPSafeFree(dec->alpha_plane_mem_); + dec->alpha_plane_mem_ = NULL; + dec->alpha_plane_ = NULL; + ALPHDelete(dec->alph_dec_); + dec->alph_dec_ = NULL; +} + +//------------------------------------------------------------------------------ +// Main entry point. + +const uint8_t* VP8DecompressAlphaRows(VP8Decoder* const dec, + const VP8Io* const io, + int row, int num_rows) { + const int width = io->width; + const int height = io->crop_bottom; + + assert(dec != NULL && io != NULL); + + if (row < 0 || num_rows <= 0 || row + num_rows > height) { + return NULL; // sanity check. + } + + if (!dec->is_alpha_decoded_) { + if (dec->alph_dec_ == NULL) { // Initialize decoder. + dec->alph_dec_ = ALPHNew(); + if (dec->alph_dec_ == NULL) return NULL; + if (!AllocateAlphaPlane(dec, io)) goto Error; + if (!ALPHInit(dec->alph_dec_, dec->alpha_data_, dec->alpha_data_size_, + io, dec->alpha_plane_)) { + goto Error; + } + // if we allowed use of alpha dithering, check whether it's needed at all + if (dec->alph_dec_->pre_processing_ != ALPHA_PREPROCESSED_LEVELS) { + dec->alpha_dithering_ = 0; // disable dithering + } else { + num_rows = height - row; // decode everything in one pass + } + } + + assert(dec->alph_dec_ != NULL); + assert(row + num_rows <= height); + if (!ALPHDecode(dec, row, num_rows)) goto Error; + + if (dec->is_alpha_decoded_) { // finished? + ALPHDelete(dec->alph_dec_); + dec->alph_dec_ = NULL; + if (dec->alpha_dithering_ > 0) { + uint8_t* const alpha = dec->alpha_plane_ + io->crop_top * width + + io->crop_left; + if (!WebPDequantizeLevels(alpha, + io->crop_right - io->crop_left, + io->crop_bottom - io->crop_top, + width, dec->alpha_dithering_)) { + goto Error; + } + } + } + } + + // Return a pointer to the current decoded row. + return dec->alpha_plane_ + row * width; + + Error: + WebPDeallocateAlphaMemory(dec); + return NULL; +} diff --git a/vendor/github.com/sizeofint/webpanimation/dec_alphai_dec.h b/vendor/github.com/sizeofint/webpanimation/dec_alphai_dec.h new file mode 100644 index 00000000..df0ccc07 --- /dev/null +++ b/vendor/github.com/sizeofint/webpanimation/dec_alphai_dec.h @@ -0,0 +1,54 @@ +// Copyright 2013 Google Inc. All Rights Reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the COPYING file in the root of the source +// tree. An additional intellectual property rights grant can be found +// in the file PATENTS. All contributing project authors may +// be found in the AUTHORS file in the root of the source tree. +// ----------------------------------------------------------------------------- +// +// Alpha decoder: internal header. +// +// Author: Urvang (urvang@google.com) + +#ifndef WEBP_DEC_ALPHAI_DEC_H_ +#define WEBP_DEC_ALPHAI_DEC_H_ + +#include "dec_webpi_dec.h" +#include "utils_filters_utils.h" + +#ifdef __cplusplus +extern "C" { +#endif + +struct VP8LDecoder; // Defined in dec/vp8li.h. + +typedef struct ALPHDecoder ALPHDecoder; +struct ALPHDecoder { + int width_; + int height_; + int method_; + WEBP_FILTER_TYPE filter_; + int pre_processing_; + struct VP8LDecoder* vp8l_dec_; + VP8Io io_; + int use_8b_decode_; // Although alpha channel requires only 1 byte per + // pixel, sometimes VP8LDecoder may need to allocate + // 4 bytes per pixel internally during decode. + uint8_t* output_; + const uint8_t* prev_line_; // last output row (or NULL) +}; + +//------------------------------------------------------------------------------ +// internal functions. Not public. + +// Deallocate memory associated to dec->alpha_plane_ decoding +void WebPDeallocateAlphaMemory(VP8Decoder* const dec); + +//------------------------------------------------------------------------------ + +#ifdef __cplusplus +} // extern "C" +#endif + +#endif // WEBP_DEC_ALPHAI_DEC_H_ diff --git a/vendor/github.com/sizeofint/webpanimation/dec_buffer_dec.c b/vendor/github.com/sizeofint/webpanimation/dec_buffer_dec.c new file mode 100644 index 00000000..7fcf451d --- /dev/null +++ b/vendor/github.com/sizeofint/webpanimation/dec_buffer_dec.c @@ -0,0 +1,312 @@ +// Copyright 2011 Google Inc. All Rights Reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the COPYING file in the root of the source +// tree. An additional intellectual property rights grant can be found +// in the file PATENTS. All contributing project authors may +// be found in the AUTHORS file in the root of the source tree. +// ----------------------------------------------------------------------------- +// +// Everything about WebPDecBuffer +// +// Author: Skal (pascal.massimino@gmail.com) + +#include + +#include "dec_vp8i_dec.h" +#include "dec_webpi_dec.h" +#include "utils_utils.h" + +//------------------------------------------------------------------------------ +// WebPDecBuffer + +// Number of bytes per pixel for the different color-spaces. +static const uint8_t kModeBpp[MODE_LAST] = { + 3, 4, 3, 4, 4, 2, 2, + 4, 4, 4, 2, // pre-multiplied modes + 1, 1 }; + +// Check that webp_csp_mode is within the bounds of WEBP_CSP_MODE. +// Convert to an integer to handle both the unsigned/signed enum cases +// without the need for casting to remove type limit warnings. +static int IsValidColorspace(int webp_csp_mode) { + return (webp_csp_mode >= MODE_RGB && webp_csp_mode < MODE_LAST); +} + +// strictly speaking, the very last (or first, if flipped) row +// doesn't require padding. +#define MIN_BUFFER_SIZE(WIDTH, HEIGHT, STRIDE) \ + ((uint64_t)(STRIDE) * ((HEIGHT) - 1) + (WIDTH)) + +static VP8StatusCode CheckDecBuffer(const WebPDecBuffer* const buffer) { + int ok = 1; + const WEBP_CSP_MODE mode = buffer->colorspace; + const int width = buffer->width; + const int height = buffer->height; + if (!IsValidColorspace(mode)) { + ok = 0; + } else if (!WebPIsRGBMode(mode)) { // YUV checks + const WebPYUVABuffer* const buf = &buffer->u.YUVA; + const int uv_width = (width + 1) / 2; + const int uv_height = (height + 1) / 2; + const int y_stride = abs(buf->y_stride); + const int u_stride = abs(buf->u_stride); + const int v_stride = abs(buf->v_stride); + const int a_stride = abs(buf->a_stride); + const uint64_t y_size = MIN_BUFFER_SIZE(width, height, y_stride); + const uint64_t u_size = MIN_BUFFER_SIZE(uv_width, uv_height, u_stride); + const uint64_t v_size = MIN_BUFFER_SIZE(uv_width, uv_height, v_stride); + const uint64_t a_size = MIN_BUFFER_SIZE(width, height, a_stride); + ok &= (y_size <= buf->y_size); + ok &= (u_size <= buf->u_size); + ok &= (v_size <= buf->v_size); + ok &= (y_stride >= width); + ok &= (u_stride >= uv_width); + ok &= (v_stride >= uv_width); + ok &= (buf->y != NULL); + ok &= (buf->u != NULL); + ok &= (buf->v != NULL); + if (mode == MODE_YUVA) { + ok &= (a_stride >= width); + ok &= (a_size <= buf->a_size); + ok &= (buf->a != NULL); + } + } else { // RGB checks + const WebPRGBABuffer* const buf = &buffer->u.RGBA; + const int stride = abs(buf->stride); + const uint64_t size = + MIN_BUFFER_SIZE(width * kModeBpp[mode], height, stride); + ok &= (size <= buf->size); + ok &= (stride >= width * kModeBpp[mode]); + ok &= (buf->rgba != NULL); + } + return ok ? VP8_STATUS_OK : VP8_STATUS_INVALID_PARAM; +} +#undef MIN_BUFFER_SIZE + +static VP8StatusCode AllocateBuffer(WebPDecBuffer* const buffer) { + const int w = buffer->width; + const int h = buffer->height; + const WEBP_CSP_MODE mode = buffer->colorspace; + + if (w <= 0 || h <= 0 || !IsValidColorspace(mode)) { + return VP8_STATUS_INVALID_PARAM; + } + + if (buffer->is_external_memory <= 0 && buffer->private_memory == NULL) { + uint8_t* output; + int uv_stride = 0, a_stride = 0; + uint64_t uv_size = 0, a_size = 0, total_size; + // We need memory and it hasn't been allocated yet. + // => initialize output buffer, now that dimensions are known. + int stride; + uint64_t size; + + if ((uint64_t)w * kModeBpp[mode] >= (1ull << 32)) { + return VP8_STATUS_INVALID_PARAM; + } + stride = w * kModeBpp[mode]; + size = (uint64_t)stride * h; + if (!WebPIsRGBMode(mode)) { + uv_stride = (w + 1) / 2; + uv_size = (uint64_t)uv_stride * ((h + 1) / 2); + if (mode == MODE_YUVA) { + a_stride = w; + a_size = (uint64_t)a_stride * h; + } + } + total_size = size + 2 * uv_size + a_size; + + // Security/sanity checks + output = (uint8_t*)WebPSafeMalloc(total_size, sizeof(*output)); + if (output == NULL) { + return VP8_STATUS_OUT_OF_MEMORY; + } + buffer->private_memory = output; + + if (!WebPIsRGBMode(mode)) { // YUVA initialization + WebPYUVABuffer* const buf = &buffer->u.YUVA; + buf->y = output; + buf->y_stride = stride; + buf->y_size = (size_t)size; + buf->u = output + size; + buf->u_stride = uv_stride; + buf->u_size = (size_t)uv_size; + buf->v = output + size + uv_size; + buf->v_stride = uv_stride; + buf->v_size = (size_t)uv_size; + if (mode == MODE_YUVA) { + buf->a = output + size + 2 * uv_size; + } + buf->a_size = (size_t)a_size; + buf->a_stride = a_stride; + } else { // RGBA initialization + WebPRGBABuffer* const buf = &buffer->u.RGBA; + buf->rgba = output; + buf->stride = stride; + buf->size = (size_t)size; + } + } + return CheckDecBuffer(buffer); +} + +VP8StatusCode WebPFlipBuffer(WebPDecBuffer* const buffer) { + if (buffer == NULL) { + return VP8_STATUS_INVALID_PARAM; + } + if (WebPIsRGBMode(buffer->colorspace)) { + WebPRGBABuffer* const buf = &buffer->u.RGBA; + buf->rgba += (buffer->height - 1) * buf->stride; + buf->stride = -buf->stride; + } else { + WebPYUVABuffer* const buf = &buffer->u.YUVA; + const int H = buffer->height; + buf->y += (H - 1) * buf->y_stride; + buf->y_stride = -buf->y_stride; + buf->u += ((H - 1) >> 1) * buf->u_stride; + buf->u_stride = -buf->u_stride; + buf->v += ((H - 1) >> 1) * buf->v_stride; + buf->v_stride = -buf->v_stride; + if (buf->a != NULL) { + buf->a += (H - 1) * buf->a_stride; + buf->a_stride = -buf->a_stride; + } + } + return VP8_STATUS_OK; +} + +VP8StatusCode WebPAllocateDecBuffer(int width, int height, + const WebPDecoderOptions* const options, + WebPDecBuffer* const buffer) { + VP8StatusCode status; + if (buffer == NULL || width <= 0 || height <= 0) { + return VP8_STATUS_INVALID_PARAM; + } + if (options != NULL) { // First, apply options if there is any. + if (options->use_cropping) { + const int cw = options->crop_width; + const int ch = options->crop_height; + const int x = options->crop_left & ~1; + const int y = options->crop_top & ~1; + if (x < 0 || y < 0 || cw <= 0 || ch <= 0 || + x + cw > width || y + ch > height) { + return VP8_STATUS_INVALID_PARAM; // out of frame boundary. + } + width = cw; + height = ch; + } + + if (options->use_scaling) { +#if !defined(WEBP_REDUCE_SIZE) + int scaled_width = options->scaled_width; + int scaled_height = options->scaled_height; + if (!WebPRescalerGetScaledDimensions( + width, height, &scaled_width, &scaled_height)) { + return VP8_STATUS_INVALID_PARAM; + } + width = scaled_width; + height = scaled_height; +#else + return VP8_STATUS_INVALID_PARAM; // rescaling not supported +#endif + } + } + buffer->width = width; + buffer->height = height; + + // Then, allocate buffer for real. + status = AllocateBuffer(buffer); + if (status != VP8_STATUS_OK) return status; + + // Use the stride trick if vertical flip is needed. + if (options != NULL && options->flip) { + status = WebPFlipBuffer(buffer); + } + return status; +} + +//------------------------------------------------------------------------------ +// constructors / destructors + +int WebPInitDecBufferInternal(WebPDecBuffer* buffer, int version) { + if (WEBP_ABI_IS_INCOMPATIBLE(version, WEBP_DECODER_ABI_VERSION)) { + return 0; // version mismatch + } + if (buffer == NULL) return 0; + memset(buffer, 0, sizeof(*buffer)); + return 1; +} + +void WebPFreeDecBuffer(WebPDecBuffer* buffer) { + if (buffer != NULL) { + if (buffer->is_external_memory <= 0) { + WebPSafeFree(buffer->private_memory); + } + buffer->private_memory = NULL; + } +} + +void WebPCopyDecBuffer(const WebPDecBuffer* const src, + WebPDecBuffer* const dst) { + if (src != NULL && dst != NULL) { + *dst = *src; + if (src->private_memory != NULL) { + dst->is_external_memory = 1; // dst buffer doesn't own the memory. + dst->private_memory = NULL; + } + } +} + +// Copy and transfer ownership from src to dst (beware of parameter order!) +void WebPGrabDecBuffer(WebPDecBuffer* const src, WebPDecBuffer* const dst) { + if (src != NULL && dst != NULL) { + *dst = *src; + if (src->private_memory != NULL) { + src->is_external_memory = 1; // src relinquishes ownership + src->private_memory = NULL; + } + } +} + +VP8StatusCode WebPCopyDecBufferPixels(const WebPDecBuffer* const src_buf, + WebPDecBuffer* const dst_buf) { + assert(src_buf != NULL && dst_buf != NULL); + assert(src_buf->colorspace == dst_buf->colorspace); + + dst_buf->width = src_buf->width; + dst_buf->height = src_buf->height; + if (CheckDecBuffer(dst_buf) != VP8_STATUS_OK) { + return VP8_STATUS_INVALID_PARAM; + } + if (WebPIsRGBMode(src_buf->colorspace)) { + const WebPRGBABuffer* const src = &src_buf->u.RGBA; + const WebPRGBABuffer* const dst = &dst_buf->u.RGBA; + WebPCopyPlane(src->rgba, src->stride, dst->rgba, dst->stride, + src_buf->width * kModeBpp[src_buf->colorspace], + src_buf->height); + } else { + const WebPYUVABuffer* const src = &src_buf->u.YUVA; + const WebPYUVABuffer* const dst = &dst_buf->u.YUVA; + WebPCopyPlane(src->y, src->y_stride, dst->y, dst->y_stride, + src_buf->width, src_buf->height); + WebPCopyPlane(src->u, src->u_stride, dst->u, dst->u_stride, + (src_buf->width + 1) / 2, (src_buf->height + 1) / 2); + WebPCopyPlane(src->v, src->v_stride, dst->v, dst->v_stride, + (src_buf->width + 1) / 2, (src_buf->height + 1) / 2); + if (WebPIsAlphaMode(src_buf->colorspace)) { + WebPCopyPlane(src->a, src->a_stride, dst->a, dst->a_stride, + src_buf->width, src_buf->height); + } + } + return VP8_STATUS_OK; +} + +int WebPAvoidSlowMemory(const WebPDecBuffer* const output, + const WebPBitstreamFeatures* const features) { + assert(output != NULL); + return (output->is_external_memory >= 2) && + WebPIsPremultipliedMode(output->colorspace) && + (features != NULL && features->has_alpha); +} + +//------------------------------------------------------------------------------ diff --git a/vendor/github.com/sizeofint/webpanimation/dec_common_dec.h b/vendor/github.com/sizeofint/webpanimation/dec_common_dec.h new file mode 100644 index 00000000..b158550a --- /dev/null +++ b/vendor/github.com/sizeofint/webpanimation/dec_common_dec.h @@ -0,0 +1,54 @@ +// Copyright 2015 Google Inc. All Rights Reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the COPYING file in the root of the source +// tree. An additional intellectual property rights grant can be found +// in the file PATENTS. All contributing project authors may +// be found in the AUTHORS file in the root of the source tree. +// ----------------------------------------------------------------------------- +// +// Definitions and macros common to encoding and decoding +// +// Author: Skal (pascal.massimino@gmail.com) + +#ifndef WEBP_DEC_COMMON_DEC_H_ +#define WEBP_DEC_COMMON_DEC_H_ + +// intra prediction modes +enum { B_DC_PRED = 0, // 4x4 modes + B_TM_PRED = 1, + B_VE_PRED = 2, + B_HE_PRED = 3, + B_RD_PRED = 4, + B_VR_PRED = 5, + B_LD_PRED = 6, + B_VL_PRED = 7, + B_HD_PRED = 8, + B_HU_PRED = 9, + NUM_BMODES = B_HU_PRED + 1 - B_DC_PRED, // = 10 + + // Luma16 or UV modes + DC_PRED = B_DC_PRED, V_PRED = B_VE_PRED, + H_PRED = B_HE_PRED, TM_PRED = B_TM_PRED, + B_PRED = NUM_BMODES, // refined I4x4 mode + NUM_PRED_MODES = 4, + + // special modes + B_DC_PRED_NOTOP = 4, + B_DC_PRED_NOLEFT = 5, + B_DC_PRED_NOTOPLEFT = 6, + NUM_B_DC_MODES = 7 }; + +enum { MB_FEATURE_TREE_PROBS = 3, + NUM_MB_SEGMENTS = 4, + NUM_REF_LF_DELTAS = 4, + NUM_MODE_LF_DELTAS = 4, // I4x4, ZERO, *, SPLIT + MAX_NUM_PARTITIONS = 8, + // Probabilities + NUM_TYPES = 4, // 0: i16-AC, 1: i16-DC, 2:chroma-AC, 3:i4-AC + NUM_BANDS = 8, + NUM_CTX = 3, + NUM_PROBAS = 11 + }; + +#endif // WEBP_DEC_COMMON_DEC_H_ diff --git a/vendor/github.com/sizeofint/webpanimation/dec_frame_dec.c b/vendor/github.com/sizeofint/webpanimation/dec_frame_dec.c new file mode 100644 index 00000000..195e503b --- /dev/null +++ b/vendor/github.com/sizeofint/webpanimation/dec_frame_dec.c @@ -0,0 +1,803 @@ +// Copyright 2010 Google Inc. All Rights Reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the COPYING file in the root of the source +// tree. An additional intellectual property rights grant can be found +// in the file PATENTS. All contributing project authors may +// be found in the AUTHORS file in the root of the source tree. +// ----------------------------------------------------------------------------- +// +// Frame-reconstruction function. Memory allocation. +// +// Author: Skal (pascal.massimino@gmail.com) + +#include +#include "dec_vp8i_dec.h" +#include "utils_utils.h" + +//------------------------------------------------------------------------------ +// Main reconstruction function. + +static const uint16_t kScan[16] = { + 0 + 0 * BPS, 4 + 0 * BPS, 8 + 0 * BPS, 12 + 0 * BPS, + 0 + 4 * BPS, 4 + 4 * BPS, 8 + 4 * BPS, 12 + 4 * BPS, + 0 + 8 * BPS, 4 + 8 * BPS, 8 + 8 * BPS, 12 + 8 * BPS, + 0 + 12 * BPS, 4 + 12 * BPS, 8 + 12 * BPS, 12 + 12 * BPS +}; + +static int CheckMode(int mb_x, int mb_y, int mode) { + if (mode == B_DC_PRED) { + if (mb_x == 0) { + return (mb_y == 0) ? B_DC_PRED_NOTOPLEFT : B_DC_PRED_NOLEFT; + } else { + return (mb_y == 0) ? B_DC_PRED_NOTOP : B_DC_PRED; + } + } + return mode; +} + +static void Copy32b(uint8_t* const dst, const uint8_t* const src) { + memcpy(dst, src, 4); +} + +static WEBP_INLINE void DoTransform(uint32_t bits, const int16_t* const src, + uint8_t* const dst) { + switch (bits >> 30) { + case 3: + VP8Transform(src, dst, 0); + break; + case 2: + VP8TransformAC3(src, dst); + break; + case 1: + VP8TransformDC(src, dst); + break; + default: + break; + } +} + +static void DoUVTransform(uint32_t bits, const int16_t* const src, + uint8_t* const dst) { + if (bits & 0xff) { // any non-zero coeff at all? + if (bits & 0xaa) { // any non-zero AC coefficient? + VP8TransformUV(src, dst); // note we don't use the AC3 variant for U/V + } else { + VP8TransformDCUV(src, dst); + } + } +} + +static void ReconstructRow(const VP8Decoder* const dec, + const VP8ThreadContext* ctx) { + int j; + int mb_x; + const int mb_y = ctx->mb_y_; + const int cache_id = ctx->id_; + uint8_t* const y_dst = dec->yuv_b_ + Y_OFF; + uint8_t* const u_dst = dec->yuv_b_ + U_OFF; + uint8_t* const v_dst = dec->yuv_b_ + V_OFF; + + // Initialize left-most block. + for (j = 0; j < 16; ++j) { + y_dst[j * BPS - 1] = 129; + } + for (j = 0; j < 8; ++j) { + u_dst[j * BPS - 1] = 129; + v_dst[j * BPS - 1] = 129; + } + + // Init top-left sample on left column too. + if (mb_y > 0) { + y_dst[-1 - BPS] = u_dst[-1 - BPS] = v_dst[-1 - BPS] = 129; + } else { + // we only need to do this init once at block (0,0). + // Afterward, it remains valid for the whole topmost row. + memset(y_dst - BPS - 1, 127, 16 + 4 + 1); + memset(u_dst - BPS - 1, 127, 8 + 1); + memset(v_dst - BPS - 1, 127, 8 + 1); + } + + // Reconstruct one row. + for (mb_x = 0; mb_x < dec->mb_w_; ++mb_x) { + const VP8MBData* const block = ctx->mb_data_ + mb_x; + + // Rotate in the left samples from previously decoded block. We move four + // pixels at a time for alignment reason, and because of in-loop filter. + if (mb_x > 0) { + for (j = -1; j < 16; ++j) { + Copy32b(&y_dst[j * BPS - 4], &y_dst[j * BPS + 12]); + } + for (j = -1; j < 8; ++j) { + Copy32b(&u_dst[j * BPS - 4], &u_dst[j * BPS + 4]); + Copy32b(&v_dst[j * BPS - 4], &v_dst[j * BPS + 4]); + } + } + { + // bring top samples into the cache + VP8TopSamples* const top_yuv = dec->yuv_t_ + mb_x; + const int16_t* const coeffs = block->coeffs_; + uint32_t bits = block->non_zero_y_; + int n; + + if (mb_y > 0) { + memcpy(y_dst - BPS, top_yuv[0].y, 16); + memcpy(u_dst - BPS, top_yuv[0].u, 8); + memcpy(v_dst - BPS, top_yuv[0].v, 8); + } + + // predict and add residuals + if (block->is_i4x4_) { // 4x4 + uint32_t* const top_right = (uint32_t*)(y_dst - BPS + 16); + + if (mb_y > 0) { + if (mb_x >= dec->mb_w_ - 1) { // on rightmost border + memset(top_right, top_yuv[0].y[15], sizeof(*top_right)); + } else { + memcpy(top_right, top_yuv[1].y, sizeof(*top_right)); + } + } + // replicate the top-right pixels below + top_right[BPS] = top_right[2 * BPS] = top_right[3 * BPS] = top_right[0]; + + // predict and add residuals for all 4x4 blocks in turn. + for (n = 0; n < 16; ++n, bits <<= 2) { + uint8_t* const dst = y_dst + kScan[n]; + VP8PredLuma4[block->imodes_[n]](dst); + DoTransform(bits, coeffs + n * 16, dst); + } + } else { // 16x16 + const int pred_func = CheckMode(mb_x, mb_y, block->imodes_[0]); + VP8PredLuma16[pred_func](y_dst); + if (bits != 0) { + for (n = 0; n < 16; ++n, bits <<= 2) { + DoTransform(bits, coeffs + n * 16, y_dst + kScan[n]); + } + } + } + { + // Chroma + const uint32_t bits_uv = block->non_zero_uv_; + const int pred_func = CheckMode(mb_x, mb_y, block->uvmode_); + VP8PredChroma8[pred_func](u_dst); + VP8PredChroma8[pred_func](v_dst); + DoUVTransform(bits_uv >> 0, coeffs + 16 * 16, u_dst); + DoUVTransform(bits_uv >> 8, coeffs + 20 * 16, v_dst); + } + + // stash away top samples for next block + if (mb_y < dec->mb_h_ - 1) { + memcpy(top_yuv[0].y, y_dst + 15 * BPS, 16); + memcpy(top_yuv[0].u, u_dst + 7 * BPS, 8); + memcpy(top_yuv[0].v, v_dst + 7 * BPS, 8); + } + } + // Transfer reconstructed samples from yuv_b_ cache to final destination. + { + const int y_offset = cache_id * 16 * dec->cache_y_stride_; + const int uv_offset = cache_id * 8 * dec->cache_uv_stride_; + uint8_t* const y_out = dec->cache_y_ + mb_x * 16 + y_offset; + uint8_t* const u_out = dec->cache_u_ + mb_x * 8 + uv_offset; + uint8_t* const v_out = dec->cache_v_ + mb_x * 8 + uv_offset; + for (j = 0; j < 16; ++j) { + memcpy(y_out + j * dec->cache_y_stride_, y_dst + j * BPS, 16); + } + for (j = 0; j < 8; ++j) { + memcpy(u_out + j * dec->cache_uv_stride_, u_dst + j * BPS, 8); + memcpy(v_out + j * dec->cache_uv_stride_, v_dst + j * BPS, 8); + } + } + } +} + +//------------------------------------------------------------------------------ +// Filtering + +// kFilterExtraRows[] = How many extra lines are needed on the MB boundary +// for caching, given a filtering level. +// Simple filter: up to 2 luma samples are read and 1 is written. +// Complex filter: up to 4 luma samples are read and 3 are written. Same for +// U/V, so it's 8 samples total (because of the 2x upsampling). +static const uint8_t kFilterExtraRows[3] = { 0, 2, 8 }; + +static void DoFilter(const VP8Decoder* const dec, int mb_x, int mb_y) { + const VP8ThreadContext* const ctx = &dec->thread_ctx_; + const int cache_id = ctx->id_; + const int y_bps = dec->cache_y_stride_; + const VP8FInfo* const f_info = ctx->f_info_ + mb_x; + uint8_t* const y_dst = dec->cache_y_ + cache_id * 16 * y_bps + mb_x * 16; + const int ilevel = f_info->f_ilevel_; + const int limit = f_info->f_limit_; + if (limit == 0) { + return; + } + assert(limit >= 3); + if (dec->filter_type_ == 1) { // simple + if (mb_x > 0) { + VP8SimpleHFilter16(y_dst, y_bps, limit + 4); + } + if (f_info->f_inner_) { + VP8SimpleHFilter16i(y_dst, y_bps, limit); + } + if (mb_y > 0) { + VP8SimpleVFilter16(y_dst, y_bps, limit + 4); + } + if (f_info->f_inner_) { + VP8SimpleVFilter16i(y_dst, y_bps, limit); + } + } else { // complex + const int uv_bps = dec->cache_uv_stride_; + uint8_t* const u_dst = dec->cache_u_ + cache_id * 8 * uv_bps + mb_x * 8; + uint8_t* const v_dst = dec->cache_v_ + cache_id * 8 * uv_bps + mb_x * 8; + const int hev_thresh = f_info->hev_thresh_; + if (mb_x > 0) { + VP8HFilter16(y_dst, y_bps, limit + 4, ilevel, hev_thresh); + VP8HFilter8(u_dst, v_dst, uv_bps, limit + 4, ilevel, hev_thresh); + } + if (f_info->f_inner_) { + VP8HFilter16i(y_dst, y_bps, limit, ilevel, hev_thresh); + VP8HFilter8i(u_dst, v_dst, uv_bps, limit, ilevel, hev_thresh); + } + if (mb_y > 0) { + VP8VFilter16(y_dst, y_bps, limit + 4, ilevel, hev_thresh); + VP8VFilter8(u_dst, v_dst, uv_bps, limit + 4, ilevel, hev_thresh); + } + if (f_info->f_inner_) { + VP8VFilter16i(y_dst, y_bps, limit, ilevel, hev_thresh); + VP8VFilter8i(u_dst, v_dst, uv_bps, limit, ilevel, hev_thresh); + } + } +} + +// Filter the decoded macroblock row (if needed) +static void FilterRow(const VP8Decoder* const dec) { + int mb_x; + const int mb_y = dec->thread_ctx_.mb_y_; + assert(dec->thread_ctx_.filter_row_); + for (mb_x = dec->tl_mb_x_; mb_x < dec->br_mb_x_; ++mb_x) { + DoFilter(dec, mb_x, mb_y); + } +} + +//------------------------------------------------------------------------------ +// Precompute the filtering strength for each segment and each i4x4/i16x16 mode. + +static void PrecomputeFilterStrengths(VP8Decoder* const dec) { + if (dec->filter_type_ > 0) { + int s; + const VP8FilterHeader* const hdr = &dec->filter_hdr_; + for (s = 0; s < NUM_MB_SEGMENTS; ++s) { + int i4x4; + // First, compute the initial level + int base_level; + if (dec->segment_hdr_.use_segment_) { + base_level = dec->segment_hdr_.filter_strength_[s]; + if (!dec->segment_hdr_.absolute_delta_) { + base_level += hdr->level_; + } + } else { + base_level = hdr->level_; + } + for (i4x4 = 0; i4x4 <= 1; ++i4x4) { + VP8FInfo* const info = &dec->fstrengths_[s][i4x4]; + int level = base_level; + if (hdr->use_lf_delta_) { + level += hdr->ref_lf_delta_[0]; + if (i4x4) { + level += hdr->mode_lf_delta_[0]; + } + } + level = (level < 0) ? 0 : (level > 63) ? 63 : level; + if (level > 0) { + int ilevel = level; + if (hdr->sharpness_ > 0) { + if (hdr->sharpness_ > 4) { + ilevel >>= 2; + } else { + ilevel >>= 1; + } + if (ilevel > 9 - hdr->sharpness_) { + ilevel = 9 - hdr->sharpness_; + } + } + if (ilevel < 1) ilevel = 1; + info->f_ilevel_ = ilevel; + info->f_limit_ = 2 * level + ilevel; + info->hev_thresh_ = (level >= 40) ? 2 : (level >= 15) ? 1 : 0; + } else { + info->f_limit_ = 0; // no filtering + } + info->f_inner_ = i4x4; + } + } + } +} + +//------------------------------------------------------------------------------ +// Dithering + +// minimal amp that will provide a non-zero dithering effect +#define MIN_DITHER_AMP 4 + +#define DITHER_AMP_TAB_SIZE 12 +static const uint8_t kQuantToDitherAmp[DITHER_AMP_TAB_SIZE] = { + // roughly, it's dqm->uv_mat_[1] + 8, 7, 6, 4, 4, 2, 2, 2, 1, 1, 1, 1 +}; + +void VP8InitDithering(const WebPDecoderOptions* const options, + VP8Decoder* const dec) { + assert(dec != NULL); + if (options != NULL) { + const int d = options->dithering_strength; + const int max_amp = (1 << VP8_RANDOM_DITHER_FIX) - 1; + const int f = (d < 0) ? 0 : (d > 100) ? max_amp : (d * max_amp / 100); + if (f > 0) { + int s; + int all_amp = 0; + for (s = 0; s < NUM_MB_SEGMENTS; ++s) { + VP8QuantMatrix* const dqm = &dec->dqm_[s]; + if (dqm->uv_quant_ < DITHER_AMP_TAB_SIZE) { + const int idx = (dqm->uv_quant_ < 0) ? 0 : dqm->uv_quant_; + dqm->dither_ = (f * kQuantToDitherAmp[idx]) >> 3; + } + all_amp |= dqm->dither_; + } + if (all_amp != 0) { + VP8InitRandom(&dec->dithering_rg_, 1.0f); + dec->dither_ = 1; + } + } + // potentially allow alpha dithering + dec->alpha_dithering_ = options->alpha_dithering_strength; + if (dec->alpha_dithering_ > 100) { + dec->alpha_dithering_ = 100; + } else if (dec->alpha_dithering_ < 0) { + dec->alpha_dithering_ = 0; + } + } +} + +// Convert to range: [-2,2] for dither=50, [-4,4] for dither=100 +static void Dither8x8(VP8Random* const rg, uint8_t* dst, int bps, int amp) { + uint8_t dither[64]; + int i; + for (i = 0; i < 8 * 8; ++i) { + dither[i] = VP8RandomBits2(rg, VP8_DITHER_AMP_BITS + 1, amp); + } + VP8DitherCombine8x8(dither, dst, bps); +} + +static void DitherRow(VP8Decoder* const dec) { + int mb_x; + assert(dec->dither_); + for (mb_x = dec->tl_mb_x_; mb_x < dec->br_mb_x_; ++mb_x) { + const VP8ThreadContext* const ctx = &dec->thread_ctx_; + const VP8MBData* const data = ctx->mb_data_ + mb_x; + const int cache_id = ctx->id_; + const int uv_bps = dec->cache_uv_stride_; + if (data->dither_ >= MIN_DITHER_AMP) { + uint8_t* const u_dst = dec->cache_u_ + cache_id * 8 * uv_bps + mb_x * 8; + uint8_t* const v_dst = dec->cache_v_ + cache_id * 8 * uv_bps + mb_x * 8; + Dither8x8(&dec->dithering_rg_, u_dst, uv_bps, data->dither_); + Dither8x8(&dec->dithering_rg_, v_dst, uv_bps, data->dither_); + } + } +} + +//------------------------------------------------------------------------------ +// This function is called after a row of macroblocks is finished decoding. +// It also takes into account the following restrictions: +// * In case of in-loop filtering, we must hold off sending some of the bottom +// pixels as they are yet unfiltered. They will be when the next macroblock +// row is decoded. Meanwhile, we must preserve them by rotating them in the +// cache area. This doesn't hold for the very bottom row of the uncropped +// picture of course. +// * we must clip the remaining pixels against the cropping area. The VP8Io +// struct must have the following fields set correctly before calling put(): + +#define MACROBLOCK_VPOS(mb_y) ((mb_y) * 16) // vertical position of a MB + +// Finalize and transmit a complete row. Return false in case of user-abort. +static int FinishRow(void* arg1, void* arg2) { + VP8Decoder* const dec = (VP8Decoder*)arg1; + VP8Io* const io = (VP8Io*)arg2; + int ok = 1; + const VP8ThreadContext* const ctx = &dec->thread_ctx_; + const int cache_id = ctx->id_; + const int extra_y_rows = kFilterExtraRows[dec->filter_type_]; + const int ysize = extra_y_rows * dec->cache_y_stride_; + const int uvsize = (extra_y_rows / 2) * dec->cache_uv_stride_; + const int y_offset = cache_id * 16 * dec->cache_y_stride_; + const int uv_offset = cache_id * 8 * dec->cache_uv_stride_; + uint8_t* const ydst = dec->cache_y_ - ysize + y_offset; + uint8_t* const udst = dec->cache_u_ - uvsize + uv_offset; + uint8_t* const vdst = dec->cache_v_ - uvsize + uv_offset; + const int mb_y = ctx->mb_y_; + const int is_first_row = (mb_y == 0); + const int is_last_row = (mb_y >= dec->br_mb_y_ - 1); + + if (dec->mt_method_ == 2) { + ReconstructRow(dec, ctx); + } + + if (ctx->filter_row_) { + FilterRow(dec); + } + + if (dec->dither_) { + DitherRow(dec); + } + + if (io->put != NULL) { + int y_start = MACROBLOCK_VPOS(mb_y); + int y_end = MACROBLOCK_VPOS(mb_y + 1); + if (!is_first_row) { + y_start -= extra_y_rows; + io->y = ydst; + io->u = udst; + io->v = vdst; + } else { + io->y = dec->cache_y_ + y_offset; + io->u = dec->cache_u_ + uv_offset; + io->v = dec->cache_v_ + uv_offset; + } + + if (!is_last_row) { + y_end -= extra_y_rows; + } + if (y_end > io->crop_bottom) { + y_end = io->crop_bottom; // make sure we don't overflow on last row. + } + // If dec->alpha_data_ is not NULL, we have some alpha plane present. + io->a = NULL; + if (dec->alpha_data_ != NULL && y_start < y_end) { + io->a = VP8DecompressAlphaRows(dec, io, y_start, y_end - y_start); + if (io->a == NULL) { + return VP8SetError(dec, VP8_STATUS_BITSTREAM_ERROR, + "Could not decode alpha data."); + } + } + if (y_start < io->crop_top) { + const int delta_y = io->crop_top - y_start; + y_start = io->crop_top; + assert(!(delta_y & 1)); + io->y += dec->cache_y_stride_ * delta_y; + io->u += dec->cache_uv_stride_ * (delta_y >> 1); + io->v += dec->cache_uv_stride_ * (delta_y >> 1); + if (io->a != NULL) { + io->a += io->width * delta_y; + } + } + if (y_start < y_end) { + io->y += io->crop_left; + io->u += io->crop_left >> 1; + io->v += io->crop_left >> 1; + if (io->a != NULL) { + io->a += io->crop_left; + } + io->mb_y = y_start - io->crop_top; + io->mb_w = io->crop_right - io->crop_left; + io->mb_h = y_end - y_start; + ok = io->put(io); + } + } + // rotate top samples if needed + if (cache_id + 1 == dec->num_caches_) { + if (!is_last_row) { + memcpy(dec->cache_y_ - ysize, ydst + 16 * dec->cache_y_stride_, ysize); + memcpy(dec->cache_u_ - uvsize, udst + 8 * dec->cache_uv_stride_, uvsize); + memcpy(dec->cache_v_ - uvsize, vdst + 8 * dec->cache_uv_stride_, uvsize); + } + } + + return ok; +} + +#undef MACROBLOCK_VPOS + +//------------------------------------------------------------------------------ + +int VP8ProcessRow(VP8Decoder* const dec, VP8Io* const io) { + int ok = 1; + VP8ThreadContext* const ctx = &dec->thread_ctx_; + const int filter_row = + (dec->filter_type_ > 0) && + (dec->mb_y_ >= dec->tl_mb_y_) && (dec->mb_y_ <= dec->br_mb_y_); + if (dec->mt_method_ == 0) { + // ctx->id_ and ctx->f_info_ are already set + ctx->mb_y_ = dec->mb_y_; + ctx->filter_row_ = filter_row; + ReconstructRow(dec, ctx); + ok = FinishRow(dec, io); + } else { + WebPWorker* const worker = &dec->worker_; + // Finish previous job *before* updating context + ok &= WebPGetWorkerInterface()->Sync(worker); + assert(worker->status_ == OK); + if (ok) { // spawn a new deblocking/output job + ctx->io_ = *io; + ctx->id_ = dec->cache_id_; + ctx->mb_y_ = dec->mb_y_; + ctx->filter_row_ = filter_row; + if (dec->mt_method_ == 2) { // swap macroblock data + VP8MBData* const tmp = ctx->mb_data_; + ctx->mb_data_ = dec->mb_data_; + dec->mb_data_ = tmp; + } else { + // perform reconstruction directly in main thread + ReconstructRow(dec, ctx); + } + if (filter_row) { // swap filter info + VP8FInfo* const tmp = ctx->f_info_; + ctx->f_info_ = dec->f_info_; + dec->f_info_ = tmp; + } + // (reconstruct)+filter in parallel + WebPGetWorkerInterface()->Launch(worker); + if (++dec->cache_id_ == dec->num_caches_) { + dec->cache_id_ = 0; + } + } + } + return ok; +} + +//------------------------------------------------------------------------------ +// Finish setting up the decoding parameter once user's setup() is called. + +VP8StatusCode VP8EnterCritical(VP8Decoder* const dec, VP8Io* const io) { + // Call setup() first. This may trigger additional decoding features on 'io'. + // Note: Afterward, we must call teardown() no matter what. + if (io->setup != NULL && !io->setup(io)) { + VP8SetError(dec, VP8_STATUS_USER_ABORT, "Frame setup failed"); + return dec->status_; + } + + // Disable filtering per user request + if (io->bypass_filtering) { + dec->filter_type_ = 0; + } + + // Define the area where we can skip in-loop filtering, in case of cropping. + // + // 'Simple' filter reads two luma samples outside of the macroblock + // and filters one. It doesn't filter the chroma samples. Hence, we can + // avoid doing the in-loop filtering before crop_top/crop_left position. + // For the 'Complex' filter, 3 samples are read and up to 3 are filtered. + // Means: there's a dependency chain that goes all the way up to the + // top-left corner of the picture (MB #0). We must filter all the previous + // macroblocks. + { + const int extra_pixels = kFilterExtraRows[dec->filter_type_]; + if (dec->filter_type_ == 2) { + // For complex filter, we need to preserve the dependency chain. + dec->tl_mb_x_ = 0; + dec->tl_mb_y_ = 0; + } else { + // For simple filter, we can filter only the cropped region. + // We include 'extra_pixels' on the other side of the boundary, since + // vertical or horizontal filtering of the previous macroblock can + // modify some abutting pixels. + dec->tl_mb_x_ = (io->crop_left - extra_pixels) >> 4; + dec->tl_mb_y_ = (io->crop_top - extra_pixels) >> 4; + if (dec->tl_mb_x_ < 0) dec->tl_mb_x_ = 0; + if (dec->tl_mb_y_ < 0) dec->tl_mb_y_ = 0; + } + // We need some 'extra' pixels on the right/bottom. + dec->br_mb_y_ = (io->crop_bottom + 15 + extra_pixels) >> 4; + dec->br_mb_x_ = (io->crop_right + 15 + extra_pixels) >> 4; + if (dec->br_mb_x_ > dec->mb_w_) { + dec->br_mb_x_ = dec->mb_w_; + } + if (dec->br_mb_y_ > dec->mb_h_) { + dec->br_mb_y_ = dec->mb_h_; + } + } + PrecomputeFilterStrengths(dec); + return VP8_STATUS_OK; +} + +int VP8ExitCritical(VP8Decoder* const dec, VP8Io* const io) { + int ok = 1; + if (dec->mt_method_ > 0) { + ok = WebPGetWorkerInterface()->Sync(&dec->worker_); + } + + if (io->teardown != NULL) { + io->teardown(io); + } + return ok; +} + +//------------------------------------------------------------------------------ +// For multi-threaded decoding we need to use 3 rows of 16 pixels as delay line. +// +// Reason is: the deblocking filter cannot deblock the bottom horizontal edges +// immediately, and needs to wait for first few rows of the next macroblock to +// be decoded. Hence, deblocking is lagging behind by 4 or 8 pixels (depending +// on strength). +// With two threads, the vertical positions of the rows being decoded are: +// Decode: [ 0..15][16..31][32..47][48..63][64..79][... +// Deblock: [ 0..11][12..27][28..43][44..59][... +// If we use two threads and two caches of 16 pixels, the sequence would be: +// Decode: [ 0..15][16..31][ 0..15!!][16..31][ 0..15][... +// Deblock: [ 0..11][12..27!!][-4..11][12..27][... +// The problem occurs during row [12..15!!] that both the decoding and +// deblocking threads are writing simultaneously. +// With 3 cache lines, one get a safe write pattern: +// Decode: [ 0..15][16..31][32..47][ 0..15][16..31][32..47][0.. +// Deblock: [ 0..11][12..27][28..43][-4..11][12..27][28... +// Note that multi-threaded output _without_ deblocking can make use of two +// cache lines of 16 pixels only, since there's no lagging behind. The decoding +// and output process have non-concurrent writing: +// Decode: [ 0..15][16..31][ 0..15][16..31][... +// io->put: [ 0..15][16..31][ 0..15][... + +#define MT_CACHE_LINES 3 +#define ST_CACHE_LINES 1 // 1 cache row only for single-threaded case + +// Initialize multi/single-thread worker +static int InitThreadContext(VP8Decoder* const dec) { + dec->cache_id_ = 0; + if (dec->mt_method_ > 0) { + WebPWorker* const worker = &dec->worker_; + if (!WebPGetWorkerInterface()->Reset(worker)) { + return VP8SetError(dec, VP8_STATUS_OUT_OF_MEMORY, + "thread initialization failed."); + } + worker->data1 = dec; + worker->data2 = (void*)&dec->thread_ctx_.io_; + worker->hook = FinishRow; + dec->num_caches_ = + (dec->filter_type_ > 0) ? MT_CACHE_LINES : MT_CACHE_LINES - 1; + } else { + dec->num_caches_ = ST_CACHE_LINES; + } + return 1; +} + +int VP8GetThreadMethod(const WebPDecoderOptions* const options, + const WebPHeaderStructure* const headers, + int width, int height) { + if (options == NULL || options->use_threads == 0) { + return 0; + } + (void)headers; + (void)width; + (void)height; + assert(headers == NULL || !headers->is_lossless); +#if defined(WEBP_USE_THREAD) + if (width >= MIN_WIDTH_FOR_THREADS) return 2; +#endif + return 0; +} + +#undef MT_CACHE_LINES +#undef ST_CACHE_LINES + +//------------------------------------------------------------------------------ +// Memory setup + +static int AllocateMemory(VP8Decoder* const dec) { + const int num_caches = dec->num_caches_; + const int mb_w = dec->mb_w_; + // Note: we use 'size_t' when there's no overflow risk, uint64_t otherwise. + const size_t intra_pred_mode_size = 4 * mb_w * sizeof(uint8_t); + const size_t top_size = sizeof(VP8TopSamples) * mb_w; + const size_t mb_info_size = (mb_w + 1) * sizeof(VP8MB); + const size_t f_info_size = + (dec->filter_type_ > 0) ? + mb_w * (dec->mt_method_ > 0 ? 2 : 1) * sizeof(VP8FInfo) + : 0; + const size_t yuv_size = YUV_SIZE * sizeof(*dec->yuv_b_); + const size_t mb_data_size = + (dec->mt_method_ == 2 ? 2 : 1) * mb_w * sizeof(*dec->mb_data_); + const size_t cache_height = (16 * num_caches + + kFilterExtraRows[dec->filter_type_]) * 3 / 2; + const size_t cache_size = top_size * cache_height; + // alpha_size is the only one that scales as width x height. + const uint64_t alpha_size = (dec->alpha_data_ != NULL) ? + (uint64_t)dec->pic_hdr_.width_ * dec->pic_hdr_.height_ : 0ULL; + const uint64_t needed = (uint64_t)intra_pred_mode_size + + top_size + mb_info_size + f_info_size + + yuv_size + mb_data_size + + cache_size + alpha_size + WEBP_ALIGN_CST; + uint8_t* mem; + + if (needed != (size_t)needed) return 0; // check for overflow + if (needed > dec->mem_size_) { + WebPSafeFree(dec->mem_); + dec->mem_size_ = 0; + dec->mem_ = WebPSafeMalloc(needed, sizeof(uint8_t)); + if (dec->mem_ == NULL) { + return VP8SetError(dec, VP8_STATUS_OUT_OF_MEMORY, + "no memory during frame initialization."); + } + // down-cast is ok, thanks to WebPSafeMalloc() above. + dec->mem_size_ = (size_t)needed; + } + + mem = (uint8_t*)dec->mem_; + dec->intra_t_ = mem; + mem += intra_pred_mode_size; + + dec->yuv_t_ = (VP8TopSamples*)mem; + mem += top_size; + + dec->mb_info_ = ((VP8MB*)mem) + 1; + mem += mb_info_size; + + dec->f_info_ = f_info_size ? (VP8FInfo*)mem : NULL; + mem += f_info_size; + dec->thread_ctx_.id_ = 0; + dec->thread_ctx_.f_info_ = dec->f_info_; + if (dec->filter_type_ > 0 && dec->mt_method_ > 0) { + // secondary cache line. The deblocking process need to make use of the + // filtering strength from previous macroblock row, while the new ones + // are being decoded in parallel. We'll just swap the pointers. + dec->thread_ctx_.f_info_ += mb_w; + } + + mem = (uint8_t*)WEBP_ALIGN(mem); + assert((yuv_size & WEBP_ALIGN_CST) == 0); + dec->yuv_b_ = mem; + mem += yuv_size; + + dec->mb_data_ = (VP8MBData*)mem; + dec->thread_ctx_.mb_data_ = (VP8MBData*)mem; + if (dec->mt_method_ == 2) { + dec->thread_ctx_.mb_data_ += mb_w; + } + mem += mb_data_size; + + dec->cache_y_stride_ = 16 * mb_w; + dec->cache_uv_stride_ = 8 * mb_w; + { + const int extra_rows = kFilterExtraRows[dec->filter_type_]; + const int extra_y = extra_rows * dec->cache_y_stride_; + const int extra_uv = (extra_rows / 2) * dec->cache_uv_stride_; + dec->cache_y_ = mem + extra_y; + dec->cache_u_ = dec->cache_y_ + + 16 * num_caches * dec->cache_y_stride_ + extra_uv; + dec->cache_v_ = dec->cache_u_ + + 8 * num_caches * dec->cache_uv_stride_ + extra_uv; + dec->cache_id_ = 0; + } + mem += cache_size; + + // alpha plane + dec->alpha_plane_ = alpha_size ? mem : NULL; + mem += alpha_size; + assert(mem <= (uint8_t*)dec->mem_ + dec->mem_size_); + + // note: left/top-info is initialized once for all. + memset(dec->mb_info_ - 1, 0, mb_info_size); + VP8InitScanline(dec); // initialize left too. + + // initialize top + memset(dec->intra_t_, B_DC_PRED, intra_pred_mode_size); + + return 1; +} + +static void InitIo(VP8Decoder* const dec, VP8Io* io) { + // prepare 'io' + io->mb_y = 0; + io->y = dec->cache_y_; + io->u = dec->cache_u_; + io->v = dec->cache_v_; + io->y_stride = dec->cache_y_stride_; + io->uv_stride = dec->cache_uv_stride_; + io->a = NULL; +} + +int VP8InitFrame(VP8Decoder* const dec, VP8Io* const io) { + if (!InitThreadContext(dec)) return 0; // call first. Sets dec->num_caches_. + if (!AllocateMemory(dec)) return 0; + InitIo(dec, io); + VP8DspInit(); // Init critical function pointers and look-up tables. + return 1; +} + +//------------------------------------------------------------------------------ diff --git a/vendor/github.com/sizeofint/webpanimation/dec_idec_dec.c b/vendor/github.com/sizeofint/webpanimation/dec_idec_dec.c new file mode 100644 index 00000000..53fbc8ad --- /dev/null +++ b/vendor/github.com/sizeofint/webpanimation/dec_idec_dec.c @@ -0,0 +1,908 @@ +// Copyright 2011 Google Inc. All Rights Reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the COPYING file in the root of the source +// tree. An additional intellectual property rights grant can be found +// in the file PATENTS. All contributing project authors may +// be found in the AUTHORS file in the root of the source tree. +// ----------------------------------------------------------------------------- +// +// Incremental decoding +// +// Author: somnath@google.com (Somnath Banerjee) + +#include +#include +#include + +#include "dec_alphai_dec.h" +#include "dec_webpi_dec.h" +#include "dec_vp8i_dec.h" +#include "utils_utils.h" + +// In append mode, buffer allocations increase as multiples of this value. +// Needs to be a power of 2. +#define CHUNK_SIZE 4096 +#define MAX_MB_SIZE 4096 + +//------------------------------------------------------------------------------ +// Data structures for memory and states + +// Decoding states. State normally flows as: +// WEBP_HEADER->VP8_HEADER->VP8_PARTS0->VP8_DATA->DONE for a lossy image, and +// WEBP_HEADER->VP8L_HEADER->VP8L_DATA->DONE for a lossless image. +// If there is any error the decoder goes into state ERROR. +typedef enum { + STATE_WEBP_HEADER, // All the data before that of the VP8/VP8L chunk. + STATE_VP8_HEADER, // The VP8 Frame header (within the VP8 chunk). + STATE_VP8_PARTS0, + STATE_VP8_DATA, + STATE_VP8L_HEADER, + STATE_VP8L_DATA, + STATE_DONE, + STATE_ERROR +} DecState; + +// Operating state for the MemBuffer +typedef enum { + MEM_MODE_NONE = 0, + MEM_MODE_APPEND, + MEM_MODE_MAP +} MemBufferMode; + +// storage for partition #0 and partial data (in a rolling fashion) +typedef struct { + MemBufferMode mode_; // Operation mode + size_t start_; // start location of the data to be decoded + size_t end_; // end location + size_t buf_size_; // size of the allocated buffer + uint8_t* buf_; // We don't own this buffer in case WebPIUpdate() + + size_t part0_size_; // size of partition #0 + const uint8_t* part0_buf_; // buffer to store partition #0 +} MemBuffer; + +struct WebPIDecoder { + DecState state_; // current decoding state + WebPDecParams params_; // Params to store output info + int is_lossless_; // for down-casting 'dec_'. + void* dec_; // either a VP8Decoder or a VP8LDecoder instance + VP8Io io_; + + MemBuffer mem_; // input memory buffer. + WebPDecBuffer output_; // output buffer (when no external one is supplied, + // or if the external one has slow-memory) + WebPDecBuffer* final_output_; // Slow-memory output to copy to eventually. + size_t chunk_size_; // Compressed VP8/VP8L size extracted from Header. + + int last_mb_y_; // last row reached for intra-mode decoding +}; + +// MB context to restore in case VP8DecodeMB() fails +typedef struct { + VP8MB left_; + VP8MB info_; + VP8BitReader token_br_; +} MBContext; + +//------------------------------------------------------------------------------ +// MemBuffer: incoming data handling + +static WEBP_INLINE size_t MemDataSize(const MemBuffer* mem) { + return (mem->end_ - mem->start_); +} + +// Check if we need to preserve the compressed alpha data, as it may not have +// been decoded yet. +static int NeedCompressedAlpha(const WebPIDecoder* const idec) { + if (idec->state_ == STATE_WEBP_HEADER) { + // We haven't parsed the headers yet, so we don't know whether the image is + // lossy or lossless. This also means that we haven't parsed the ALPH chunk. + return 0; + } + if (idec->is_lossless_) { + return 0; // ALPH chunk is not present for lossless images. + } else { + const VP8Decoder* const dec = (VP8Decoder*)idec->dec_; + assert(dec != NULL); // Must be true as idec->state_ != STATE_WEBP_HEADER. + return (dec->alpha_data_ != NULL) && !dec->is_alpha_decoded_; + } +} + +static void DoRemap(WebPIDecoder* const idec, ptrdiff_t offset) { + MemBuffer* const mem = &idec->mem_; + const uint8_t* const new_base = mem->buf_ + mem->start_; + // note: for VP8, setting up idec->io_ is only really needed at the beginning + // of the decoding, till partition #0 is complete. + idec->io_.data = new_base; + idec->io_.data_size = MemDataSize(mem); + + if (idec->dec_ != NULL) { + if (!idec->is_lossless_) { + VP8Decoder* const dec = (VP8Decoder*)idec->dec_; + const uint32_t last_part = dec->num_parts_minus_one_; + if (offset != 0) { + uint32_t p; + for (p = 0; p <= last_part; ++p) { + VP8RemapBitReader(dec->parts_ + p, offset); + } + // Remap partition #0 data pointer to new offset, but only in MAP + // mode (in APPEND mode, partition #0 is copied into a fixed memory). + if (mem->mode_ == MEM_MODE_MAP) { + VP8RemapBitReader(&dec->br_, offset); + } + } + { + const uint8_t* const last_start = dec->parts_[last_part].buf_; + VP8BitReaderSetBuffer(&dec->parts_[last_part], last_start, + mem->buf_ + mem->end_ - last_start); + } + if (NeedCompressedAlpha(idec)) { + ALPHDecoder* const alph_dec = dec->alph_dec_; + dec->alpha_data_ += offset; + if (alph_dec != NULL && alph_dec->vp8l_dec_ != NULL) { + if (alph_dec->method_ == ALPHA_LOSSLESS_COMPRESSION) { + VP8LDecoder* const alph_vp8l_dec = alph_dec->vp8l_dec_; + assert(dec->alpha_data_size_ >= ALPHA_HEADER_LEN); + VP8LBitReaderSetBuffer(&alph_vp8l_dec->br_, + dec->alpha_data_ + ALPHA_HEADER_LEN, + dec->alpha_data_size_ - ALPHA_HEADER_LEN); + } else { // alph_dec->method_ == ALPHA_NO_COMPRESSION + // Nothing special to do in this case. + } + } + } + } else { // Resize lossless bitreader + VP8LDecoder* const dec = (VP8LDecoder*)idec->dec_; + VP8LBitReaderSetBuffer(&dec->br_, new_base, MemDataSize(mem)); + } + } +} + +// Appends data to the end of MemBuffer->buf_. It expands the allocated memory +// size if required and also updates VP8BitReader's if new memory is allocated. +static int AppendToMemBuffer(WebPIDecoder* const idec, + const uint8_t* const data, size_t data_size) { + VP8Decoder* const dec = (VP8Decoder*)idec->dec_; + MemBuffer* const mem = &idec->mem_; + const int need_compressed_alpha = NeedCompressedAlpha(idec); + const uint8_t* const old_start = + (mem->buf_ == NULL) ? NULL : mem->buf_ + mem->start_; + const uint8_t* const old_base = + need_compressed_alpha ? dec->alpha_data_ : old_start; + assert(mem->buf_ != NULL || mem->start_ == 0); + assert(mem->mode_ == MEM_MODE_APPEND); + if (data_size > MAX_CHUNK_PAYLOAD) { + // security safeguard: trying to allocate more than what the format + // allows for a chunk should be considered a smoke smell. + return 0; + } + + if (mem->end_ + data_size > mem->buf_size_) { // Need some free memory + const size_t new_mem_start = old_start - old_base; + const size_t current_size = MemDataSize(mem) + new_mem_start; + const uint64_t new_size = (uint64_t)current_size + data_size; + const uint64_t extra_size = (new_size + CHUNK_SIZE - 1) & ~(CHUNK_SIZE - 1); + uint8_t* const new_buf = + (uint8_t*)WebPSafeMalloc(extra_size, sizeof(*new_buf)); + if (new_buf == NULL) return 0; + if (old_base != NULL) memcpy(new_buf, old_base, current_size); + WebPSafeFree(mem->buf_); + mem->buf_ = new_buf; + mem->buf_size_ = (size_t)extra_size; + mem->start_ = new_mem_start; + mem->end_ = current_size; + } + + assert(mem->buf_ != NULL); + memcpy(mem->buf_ + mem->end_, data, data_size); + mem->end_ += data_size; + assert(mem->end_ <= mem->buf_size_); + + DoRemap(idec, mem->buf_ + mem->start_ - old_start); + return 1; +} + +static int RemapMemBuffer(WebPIDecoder* const idec, + const uint8_t* const data, size_t data_size) { + MemBuffer* const mem = &idec->mem_; + const uint8_t* const old_buf = mem->buf_; + const uint8_t* const old_start = + (old_buf == NULL) ? NULL : old_buf + mem->start_; + assert(old_buf != NULL || mem->start_ == 0); + assert(mem->mode_ == MEM_MODE_MAP); + + if (data_size < mem->buf_size_) return 0; // can't remap to a shorter buffer! + + mem->buf_ = (uint8_t*)data; + mem->end_ = mem->buf_size_ = data_size; + + DoRemap(idec, mem->buf_ + mem->start_ - old_start); + return 1; +} + +static void InitMemBuffer(MemBuffer* const mem) { + mem->mode_ = MEM_MODE_NONE; + mem->buf_ = NULL; + mem->buf_size_ = 0; + mem->part0_buf_ = NULL; + mem->part0_size_ = 0; +} + +static void ClearMemBuffer(MemBuffer* const mem) { + assert(mem); + if (mem->mode_ == MEM_MODE_APPEND) { + WebPSafeFree(mem->buf_); + WebPSafeFree((void*)mem->part0_buf_); + } +} + +static int CheckMemBufferMode(MemBuffer* const mem, MemBufferMode expected) { + if (mem->mode_ == MEM_MODE_NONE) { + mem->mode_ = expected; // switch to the expected mode + } else if (mem->mode_ != expected) { + return 0; // we mixed the modes => error + } + assert(mem->mode_ == expected); // mode is ok + return 1; +} + +// To be called last. +static VP8StatusCode FinishDecoding(WebPIDecoder* const idec) { + const WebPDecoderOptions* const options = idec->params_.options; + WebPDecBuffer* const output = idec->params_.output; + + idec->state_ = STATE_DONE; + if (options != NULL && options->flip) { + const VP8StatusCode status = WebPFlipBuffer(output); + if (status != VP8_STATUS_OK) return status; + } + if (idec->final_output_ != NULL) { + WebPCopyDecBufferPixels(output, idec->final_output_); // do the slow-copy + WebPFreeDecBuffer(&idec->output_); + *output = *idec->final_output_; + idec->final_output_ = NULL; + } + return VP8_STATUS_OK; +} + +//------------------------------------------------------------------------------ +// Macroblock-decoding contexts + +static void SaveContext(const VP8Decoder* dec, const VP8BitReader* token_br, + MBContext* const context) { + context->left_ = dec->mb_info_[-1]; + context->info_ = dec->mb_info_[dec->mb_x_]; + context->token_br_ = *token_br; +} + +static void RestoreContext(const MBContext* context, VP8Decoder* const dec, + VP8BitReader* const token_br) { + dec->mb_info_[-1] = context->left_; + dec->mb_info_[dec->mb_x_] = context->info_; + *token_br = context->token_br_; +} + +//------------------------------------------------------------------------------ + +static VP8StatusCode IDecError(WebPIDecoder* const idec, VP8StatusCode error) { + if (idec->state_ == STATE_VP8_DATA) { + // Synchronize the thread, clean-up and check for errors. + VP8ExitCritical((VP8Decoder*)idec->dec_, &idec->io_); + } + idec->state_ = STATE_ERROR; + return error; +} + +static void ChangeState(WebPIDecoder* const idec, DecState new_state, + size_t consumed_bytes) { + MemBuffer* const mem = &idec->mem_; + idec->state_ = new_state; + mem->start_ += consumed_bytes; + assert(mem->start_ <= mem->end_); + idec->io_.data = mem->buf_ + mem->start_; + idec->io_.data_size = MemDataSize(mem); +} + +// Headers +static VP8StatusCode DecodeWebPHeaders(WebPIDecoder* const idec) { + MemBuffer* const mem = &idec->mem_; + const uint8_t* data = mem->buf_ + mem->start_; + size_t curr_size = MemDataSize(mem); + VP8StatusCode status; + WebPHeaderStructure headers; + + headers.data = data; + headers.data_size = curr_size; + headers.have_all_data = 0; + status = WebPParseHeaders(&headers); + if (status == VP8_STATUS_NOT_ENOUGH_DATA) { + return VP8_STATUS_SUSPENDED; // We haven't found a VP8 chunk yet. + } else if (status != VP8_STATUS_OK) { + return IDecError(idec, status); + } + + idec->chunk_size_ = headers.compressed_size; + idec->is_lossless_ = headers.is_lossless; + if (!idec->is_lossless_) { + VP8Decoder* const dec = VP8New(); + if (dec == NULL) { + return VP8_STATUS_OUT_OF_MEMORY; + } + idec->dec_ = dec; + dec->alpha_data_ = headers.alpha_data; + dec->alpha_data_size_ = headers.alpha_data_size; + ChangeState(idec, STATE_VP8_HEADER, headers.offset); + } else { + VP8LDecoder* const dec = VP8LNew(); + if (dec == NULL) { + return VP8_STATUS_OUT_OF_MEMORY; + } + idec->dec_ = dec; + ChangeState(idec, STATE_VP8L_HEADER, headers.offset); + } + return VP8_STATUS_OK; +} + +static VP8StatusCode DecodeVP8FrameHeader(WebPIDecoder* const idec) { + const uint8_t* data = idec->mem_.buf_ + idec->mem_.start_; + const size_t curr_size = MemDataSize(&idec->mem_); + int width, height; + uint32_t bits; + + if (curr_size < VP8_FRAME_HEADER_SIZE) { + // Not enough data bytes to extract VP8 Frame Header. + return VP8_STATUS_SUSPENDED; + } + if (!VP8GetInfo(data, curr_size, idec->chunk_size_, &width, &height)) { + return IDecError(idec, VP8_STATUS_BITSTREAM_ERROR); + } + + bits = data[0] | (data[1] << 8) | (data[2] << 16); + idec->mem_.part0_size_ = (bits >> 5) + VP8_FRAME_HEADER_SIZE; + + idec->io_.data = data; + idec->io_.data_size = curr_size; + idec->state_ = STATE_VP8_PARTS0; + return VP8_STATUS_OK; +} + +// Partition #0 +static VP8StatusCode CopyParts0Data(WebPIDecoder* const idec) { + VP8Decoder* const dec = (VP8Decoder*)idec->dec_; + VP8BitReader* const br = &dec->br_; + const size_t part_size = br->buf_end_ - br->buf_; + MemBuffer* const mem = &idec->mem_; + assert(!idec->is_lossless_); + assert(mem->part0_buf_ == NULL); + // the following is a format limitation, no need for runtime check: + assert(part_size <= mem->part0_size_); + if (part_size == 0) { // can't have zero-size partition #0 + return VP8_STATUS_BITSTREAM_ERROR; + } + if (mem->mode_ == MEM_MODE_APPEND) { + // We copy and grab ownership of the partition #0 data. + uint8_t* const part0_buf = (uint8_t*)WebPSafeMalloc(1ULL, part_size); + if (part0_buf == NULL) { + return VP8_STATUS_OUT_OF_MEMORY; + } + memcpy(part0_buf, br->buf_, part_size); + mem->part0_buf_ = part0_buf; + VP8BitReaderSetBuffer(br, part0_buf, part_size); + } else { + // Else: just keep pointers to the partition #0's data in dec_->br_. + } + mem->start_ += part_size; + return VP8_STATUS_OK; +} + +static VP8StatusCode DecodePartition0(WebPIDecoder* const idec) { + VP8Decoder* const dec = (VP8Decoder*)idec->dec_; + VP8Io* const io = &idec->io_; + const WebPDecParams* const params = &idec->params_; + WebPDecBuffer* const output = params->output; + + // Wait till we have enough data for the whole partition #0 + if (MemDataSize(&idec->mem_) < idec->mem_.part0_size_) { + return VP8_STATUS_SUSPENDED; + } + + if (!VP8GetHeaders(dec, io)) { + const VP8StatusCode status = dec->status_; + if (status == VP8_STATUS_SUSPENDED || + status == VP8_STATUS_NOT_ENOUGH_DATA) { + // treating NOT_ENOUGH_DATA as SUSPENDED state + return VP8_STATUS_SUSPENDED; + } + return IDecError(idec, status); + } + + // Allocate/Verify output buffer now + dec->status_ = WebPAllocateDecBuffer(io->width, io->height, params->options, + output); + if (dec->status_ != VP8_STATUS_OK) { + return IDecError(idec, dec->status_); + } + // This change must be done before calling VP8InitFrame() + dec->mt_method_ = VP8GetThreadMethod(params->options, NULL, + io->width, io->height); + VP8InitDithering(params->options, dec); + + dec->status_ = CopyParts0Data(idec); + if (dec->status_ != VP8_STATUS_OK) { + return IDecError(idec, dec->status_); + } + + // Finish setting up the decoding parameters. Will call io->setup(). + if (VP8EnterCritical(dec, io) != VP8_STATUS_OK) { + return IDecError(idec, dec->status_); + } + + // Note: past this point, teardown() must always be called + // in case of error. + idec->state_ = STATE_VP8_DATA; + // Allocate memory and prepare everything. + if (!VP8InitFrame(dec, io)) { + return IDecError(idec, dec->status_); + } + return VP8_STATUS_OK; +} + +// Remaining partitions +static VP8StatusCode DecodeRemaining(WebPIDecoder* const idec) { + VP8Decoder* const dec = (VP8Decoder*)idec->dec_; + VP8Io* const io = &idec->io_; + + // Make sure partition #0 has been read before, to set dec to ready_. + if (!dec->ready_) { + return IDecError(idec, VP8_STATUS_BITSTREAM_ERROR); + } + for (; dec->mb_y_ < dec->mb_h_; ++dec->mb_y_) { + if (idec->last_mb_y_ != dec->mb_y_) { + if (!VP8ParseIntraModeRow(&dec->br_, dec)) { + // note: normally, error shouldn't occur since we already have the whole + // partition0 available here in DecodeRemaining(). Reaching EOF while + // reading intra modes really means a BITSTREAM_ERROR. + return IDecError(idec, VP8_STATUS_BITSTREAM_ERROR); + } + idec->last_mb_y_ = dec->mb_y_; + } + for (; dec->mb_x_ < dec->mb_w_; ++dec->mb_x_) { + VP8BitReader* const token_br = + &dec->parts_[dec->mb_y_ & dec->num_parts_minus_one_]; + MBContext context; + SaveContext(dec, token_br, &context); + if (!VP8DecodeMB(dec, token_br)) { + // We shouldn't fail when MAX_MB data was available + if (dec->num_parts_minus_one_ == 0 && + MemDataSize(&idec->mem_) > MAX_MB_SIZE) { + return IDecError(idec, VP8_STATUS_BITSTREAM_ERROR); + } + // Synchronize the threads. + if (dec->mt_method_ > 0) { + if (!WebPGetWorkerInterface()->Sync(&dec->worker_)) { + return IDecError(idec, VP8_STATUS_BITSTREAM_ERROR); + } + } + RestoreContext(&context, dec, token_br); + return VP8_STATUS_SUSPENDED; + } + // Release buffer only if there is only one partition + if (dec->num_parts_minus_one_ == 0) { + idec->mem_.start_ = token_br->buf_ - idec->mem_.buf_; + assert(idec->mem_.start_ <= idec->mem_.end_); + } + } + VP8InitScanline(dec); // Prepare for next scanline + + // Reconstruct, filter and emit the row. + if (!VP8ProcessRow(dec, io)) { + return IDecError(idec, VP8_STATUS_USER_ABORT); + } + } + // Synchronize the thread and check for errors. + if (!VP8ExitCritical(dec, io)) { + idec->state_ = STATE_ERROR; // prevent re-entry in IDecError + return IDecError(idec, VP8_STATUS_USER_ABORT); + } + dec->ready_ = 0; + return FinishDecoding(idec); +} + +static VP8StatusCode ErrorStatusLossless(WebPIDecoder* const idec, + VP8StatusCode status) { + if (status == VP8_STATUS_SUSPENDED || status == VP8_STATUS_NOT_ENOUGH_DATA) { + return VP8_STATUS_SUSPENDED; + } + return IDecError(idec, status); +} + +static VP8StatusCode DecodeVP8LHeader(WebPIDecoder* const idec) { + VP8Io* const io = &idec->io_; + VP8LDecoder* const dec = (VP8LDecoder*)idec->dec_; + const WebPDecParams* const params = &idec->params_; + WebPDecBuffer* const output = params->output; + size_t curr_size = MemDataSize(&idec->mem_); + assert(idec->is_lossless_); + + // Wait until there's enough data for decoding header. + if (curr_size < (idec->chunk_size_ >> 3)) { + dec->status_ = VP8_STATUS_SUSPENDED; + return ErrorStatusLossless(idec, dec->status_); + } + + if (!VP8LDecodeHeader(dec, io)) { + if (dec->status_ == VP8_STATUS_BITSTREAM_ERROR && + curr_size < idec->chunk_size_) { + dec->status_ = VP8_STATUS_SUSPENDED; + } + return ErrorStatusLossless(idec, dec->status_); + } + // Allocate/verify output buffer now. + dec->status_ = WebPAllocateDecBuffer(io->width, io->height, params->options, + output); + if (dec->status_ != VP8_STATUS_OK) { + return IDecError(idec, dec->status_); + } + + idec->state_ = STATE_VP8L_DATA; + return VP8_STATUS_OK; +} + +static VP8StatusCode DecodeVP8LData(WebPIDecoder* const idec) { + VP8LDecoder* const dec = (VP8LDecoder*)idec->dec_; + const size_t curr_size = MemDataSize(&idec->mem_); + assert(idec->is_lossless_); + + // Switch to incremental decoding if we don't have all the bytes available. + dec->incremental_ = (curr_size < idec->chunk_size_); + + if (!VP8LDecodeImage(dec)) { + return ErrorStatusLossless(idec, dec->status_); + } + assert(dec->status_ == VP8_STATUS_OK || dec->status_ == VP8_STATUS_SUSPENDED); + return (dec->status_ == VP8_STATUS_SUSPENDED) ? dec->status_ + : FinishDecoding(idec); +} + + // Main decoding loop +static VP8StatusCode IDecode(WebPIDecoder* idec) { + VP8StatusCode status = VP8_STATUS_SUSPENDED; + + if (idec->state_ == STATE_WEBP_HEADER) { + status = DecodeWebPHeaders(idec); + } else { + if (idec->dec_ == NULL) { + return VP8_STATUS_SUSPENDED; // can't continue if we have no decoder. + } + } + if (idec->state_ == STATE_VP8_HEADER) { + status = DecodeVP8FrameHeader(idec); + } + if (idec->state_ == STATE_VP8_PARTS0) { + status = DecodePartition0(idec); + } + if (idec->state_ == STATE_VP8_DATA) { + const VP8Decoder* const dec = (VP8Decoder*)idec->dec_; + if (dec == NULL) { + return VP8_STATUS_SUSPENDED; // can't continue if we have no decoder. + } + status = DecodeRemaining(idec); + } + if (idec->state_ == STATE_VP8L_HEADER) { + status = DecodeVP8LHeader(idec); + } + if (idec->state_ == STATE_VP8L_DATA) { + status = DecodeVP8LData(idec); + } + return status; +} + +//------------------------------------------------------------------------------ +// Internal constructor + +static WebPIDecoder* NewDecoder(WebPDecBuffer* const output_buffer, + const WebPBitstreamFeatures* const features) { + WebPIDecoder* idec = (WebPIDecoder*)WebPSafeCalloc(1ULL, sizeof(*idec)); + if (idec == NULL) { + return NULL; + } + + idec->state_ = STATE_WEBP_HEADER; + idec->chunk_size_ = 0; + + idec->last_mb_y_ = -1; + + InitMemBuffer(&idec->mem_); + WebPInitDecBuffer(&idec->output_); + VP8InitIo(&idec->io_); + + WebPResetDecParams(&idec->params_); + if (output_buffer == NULL || WebPAvoidSlowMemory(output_buffer, features)) { + idec->params_.output = &idec->output_; + idec->final_output_ = output_buffer; + if (output_buffer != NULL) { + idec->params_.output->colorspace = output_buffer->colorspace; + } + } else { + idec->params_.output = output_buffer; + idec->final_output_ = NULL; + } + WebPInitCustomIo(&idec->params_, &idec->io_); // Plug the I/O functions. + + return idec; +} + +//------------------------------------------------------------------------------ +// Public functions + +WebPIDecoder* WebPINewDecoder(WebPDecBuffer* output_buffer) { + return NewDecoder(output_buffer, NULL); +} + +WebPIDecoder* WebPIDecode(const uint8_t* data, size_t data_size, + WebPDecoderConfig* config) { + WebPIDecoder* idec; + WebPBitstreamFeatures tmp_features; + WebPBitstreamFeatures* const features = + (config == NULL) ? &tmp_features : &config->input; + memset(&tmp_features, 0, sizeof(tmp_features)); + + // Parse the bitstream's features, if requested: + if (data != NULL && data_size > 0) { + if (WebPGetFeatures(data, data_size, features) != VP8_STATUS_OK) { + return NULL; + } + } + + // Create an instance of the incremental decoder + idec = (config != NULL) ? NewDecoder(&config->output, features) + : NewDecoder(NULL, features); + if (idec == NULL) { + return NULL; + } + // Finish initialization + if (config != NULL) { + idec->params_.options = &config->options; + } + return idec; +} + +void WebPIDelete(WebPIDecoder* idec) { + if (idec == NULL) return; + if (idec->dec_ != NULL) { + if (!idec->is_lossless_) { + if (idec->state_ == STATE_VP8_DATA) { + // Synchronize the thread, clean-up and check for errors. + VP8ExitCritical((VP8Decoder*)idec->dec_, &idec->io_); + } + VP8Delete((VP8Decoder*)idec->dec_); + } else { + VP8LDelete((VP8LDecoder*)idec->dec_); + } + } + ClearMemBuffer(&idec->mem_); + WebPFreeDecBuffer(&idec->output_); + WebPSafeFree(idec); +} + +//------------------------------------------------------------------------------ +// Wrapper toward WebPINewDecoder + +WebPIDecoder* WebPINewRGB(WEBP_CSP_MODE csp, uint8_t* output_buffer, + size_t output_buffer_size, int output_stride) { + const int is_external_memory = (output_buffer != NULL) ? 1 : 0; + WebPIDecoder* idec; + + if (csp >= MODE_YUV) return NULL; + if (is_external_memory == 0) { // Overwrite parameters to sane values. + output_buffer_size = 0; + output_stride = 0; + } else { // A buffer was passed. Validate the other params. + if (output_stride == 0 || output_buffer_size == 0) { + return NULL; // invalid parameter. + } + } + idec = WebPINewDecoder(NULL); + if (idec == NULL) return NULL; + idec->output_.colorspace = csp; + idec->output_.is_external_memory = is_external_memory; + idec->output_.u.RGBA.rgba = output_buffer; + idec->output_.u.RGBA.stride = output_stride; + idec->output_.u.RGBA.size = output_buffer_size; + return idec; +} + +WebPIDecoder* WebPINewYUVA(uint8_t* luma, size_t luma_size, int luma_stride, + uint8_t* u, size_t u_size, int u_stride, + uint8_t* v, size_t v_size, int v_stride, + uint8_t* a, size_t a_size, int a_stride) { + const int is_external_memory = (luma != NULL) ? 1 : 0; + WebPIDecoder* idec; + WEBP_CSP_MODE colorspace; + + if (is_external_memory == 0) { // Overwrite parameters to sane values. + luma_size = u_size = v_size = a_size = 0; + luma_stride = u_stride = v_stride = a_stride = 0; + u = v = a = NULL; + colorspace = MODE_YUVA; + } else { // A luma buffer was passed. Validate the other parameters. + if (u == NULL || v == NULL) return NULL; + if (luma_size == 0 || u_size == 0 || v_size == 0) return NULL; + if (luma_stride == 0 || u_stride == 0 || v_stride == 0) return NULL; + if (a != NULL) { + if (a_size == 0 || a_stride == 0) return NULL; + } + colorspace = (a == NULL) ? MODE_YUV : MODE_YUVA; + } + + idec = WebPINewDecoder(NULL); + if (idec == NULL) return NULL; + + idec->output_.colorspace = colorspace; + idec->output_.is_external_memory = is_external_memory; + idec->output_.u.YUVA.y = luma; + idec->output_.u.YUVA.y_stride = luma_stride; + idec->output_.u.YUVA.y_size = luma_size; + idec->output_.u.YUVA.u = u; + idec->output_.u.YUVA.u_stride = u_stride; + idec->output_.u.YUVA.u_size = u_size; + idec->output_.u.YUVA.v = v; + idec->output_.u.YUVA.v_stride = v_stride; + idec->output_.u.YUVA.v_size = v_size; + idec->output_.u.YUVA.a = a; + idec->output_.u.YUVA.a_stride = a_stride; + idec->output_.u.YUVA.a_size = a_size; + return idec; +} + +WebPIDecoder* WebPINewYUV(uint8_t* luma, size_t luma_size, int luma_stride, + uint8_t* u, size_t u_size, int u_stride, + uint8_t* v, size_t v_size, int v_stride) { + return WebPINewYUVA(luma, luma_size, luma_stride, + u, u_size, u_stride, + v, v_size, v_stride, + NULL, 0, 0); +} + +//------------------------------------------------------------------------------ + +static VP8StatusCode IDecCheckStatus(const WebPIDecoder* const idec) { + assert(idec); + if (idec->state_ == STATE_ERROR) { + return VP8_STATUS_BITSTREAM_ERROR; + } + if (idec->state_ == STATE_DONE) { + return VP8_STATUS_OK; + } + return VP8_STATUS_SUSPENDED; +} + +VP8StatusCode WebPIAppend(WebPIDecoder* idec, + const uint8_t* data, size_t data_size) { + VP8StatusCode status; + if (idec == NULL || data == NULL) { + return VP8_STATUS_INVALID_PARAM; + } + status = IDecCheckStatus(idec); + if (status != VP8_STATUS_SUSPENDED) { + return status; + } + // Check mixed calls between RemapMemBuffer and AppendToMemBuffer. + if (!CheckMemBufferMode(&idec->mem_, MEM_MODE_APPEND)) { + return VP8_STATUS_INVALID_PARAM; + } + // Append data to memory buffer + if (!AppendToMemBuffer(idec, data, data_size)) { + return VP8_STATUS_OUT_OF_MEMORY; + } + return IDecode(idec); +} + +VP8StatusCode WebPIUpdate(WebPIDecoder* idec, + const uint8_t* data, size_t data_size) { + VP8StatusCode status; + if (idec == NULL || data == NULL) { + return VP8_STATUS_INVALID_PARAM; + } + status = IDecCheckStatus(idec); + if (status != VP8_STATUS_SUSPENDED) { + return status; + } + // Check mixed calls between RemapMemBuffer and AppendToMemBuffer. + if (!CheckMemBufferMode(&idec->mem_, MEM_MODE_MAP)) { + return VP8_STATUS_INVALID_PARAM; + } + // Make the memory buffer point to the new buffer + if (!RemapMemBuffer(idec, data, data_size)) { + return VP8_STATUS_INVALID_PARAM; + } + return IDecode(idec); +} + +//------------------------------------------------------------------------------ + +static const WebPDecBuffer* GetOutputBuffer(const WebPIDecoder* const idec) { + if (idec == NULL || idec->dec_ == NULL) { + return NULL; + } + if (idec->state_ <= STATE_VP8_PARTS0) { + return NULL; + } + if (idec->final_output_ != NULL) { + return NULL; // not yet slow-copied + } + return idec->params_.output; +} + +const WebPDecBuffer* WebPIDecodedArea(const WebPIDecoder* idec, + int* left, int* top, + int* width, int* height) { + const WebPDecBuffer* const src = GetOutputBuffer(idec); + if (left != NULL) *left = 0; + if (top != NULL) *top = 0; + if (src != NULL) { + if (width != NULL) *width = src->width; + if (height != NULL) *height = idec->params_.last_y; + } else { + if (width != NULL) *width = 0; + if (height != NULL) *height = 0; + } + return src; +} + +uint8_t* WebPIDecGetRGB(const WebPIDecoder* idec, int* last_y, + int* width, int* height, int* stride) { + const WebPDecBuffer* const src = GetOutputBuffer(idec); + if (src == NULL) return NULL; + if (src->colorspace >= MODE_YUV) { + return NULL; + } + + if (last_y != NULL) *last_y = idec->params_.last_y; + if (width != NULL) *width = src->width; + if (height != NULL) *height = src->height; + if (stride != NULL) *stride = src->u.RGBA.stride; + + return src->u.RGBA.rgba; +} + +uint8_t* WebPIDecGetYUVA(const WebPIDecoder* idec, int* last_y, + uint8_t** u, uint8_t** v, uint8_t** a, + int* width, int* height, + int* stride, int* uv_stride, int* a_stride) { + const WebPDecBuffer* const src = GetOutputBuffer(idec); + if (src == NULL) return NULL; + if (src->colorspace < MODE_YUV) { + return NULL; + } + + if (last_y != NULL) *last_y = idec->params_.last_y; + if (u != NULL) *u = src->u.YUVA.u; + if (v != NULL) *v = src->u.YUVA.v; + if (a != NULL) *a = src->u.YUVA.a; + if (width != NULL) *width = src->width; + if (height != NULL) *height = src->height; + if (stride != NULL) *stride = src->u.YUVA.y_stride; + if (uv_stride != NULL) *uv_stride = src->u.YUVA.u_stride; + if (a_stride != NULL) *a_stride = src->u.YUVA.a_stride; + + return src->u.YUVA.y; +} + +int WebPISetIOHooks(WebPIDecoder* const idec, + VP8IoPutHook put, + VP8IoSetupHook setup, + VP8IoTeardownHook teardown, + void* user_data) { + if (idec == NULL || idec->state_ > STATE_WEBP_HEADER) { + return 0; + } + + idec->io_.put = put; + idec->io_.setup = setup; + idec->io_.teardown = teardown; + idec->io_.opaque = user_data; + + return 1; +} diff --git a/vendor/github.com/sizeofint/webpanimation/dec_io_dec.c b/vendor/github.com/sizeofint/webpanimation/dec_io_dec.c new file mode 100644 index 00000000..e2aa333e --- /dev/null +++ b/vendor/github.com/sizeofint/webpanimation/dec_io_dec.c @@ -0,0 +1,644 @@ +// Copyright 2011 Google Inc. All Rights Reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the COPYING file in the root of the source +// tree. An additional intellectual property rights grant can be found +// in the file PATENTS. All contributing project authors may +// be found in the AUTHORS file in the root of the source tree. +// ----------------------------------------------------------------------------- +// +// functions for sample output. +// +// Author: Skal (pascal.massimino@gmail.com) + +#include +#include +#include "dec_vp8i_dec.h" +#include "dec_webpi_dec.h" +#include "dsp_dsp.h" +#include "dsp_yuv.h" +#include "utils_utils.h" + +//------------------------------------------------------------------------------ +// Main YUV<->RGB conversion functions + +static int EmitYUV(const VP8Io* const io, WebPDecParams* const p) { + WebPDecBuffer* output = p->output; + const WebPYUVABuffer* const buf = &output->u.YUVA; + uint8_t* const y_dst = buf->y + (size_t)io->mb_y * buf->y_stride; + uint8_t* const u_dst = buf->u + (size_t)(io->mb_y >> 1) * buf->u_stride; + uint8_t* const v_dst = buf->v + (size_t)(io->mb_y >> 1) * buf->v_stride; + const int mb_w = io->mb_w; + const int mb_h = io->mb_h; + const int uv_w = (mb_w + 1) / 2; + const int uv_h = (mb_h + 1) / 2; + WebPCopyPlane(io->y, io->y_stride, y_dst, buf->y_stride, mb_w, mb_h); + WebPCopyPlane(io->u, io->uv_stride, u_dst, buf->u_stride, uv_w, uv_h); + WebPCopyPlane(io->v, io->uv_stride, v_dst, buf->v_stride, uv_w, uv_h); + return io->mb_h; +} + +// Point-sampling U/V sampler. +static int EmitSampledRGB(const VP8Io* const io, WebPDecParams* const p) { + WebPDecBuffer* const output = p->output; + WebPRGBABuffer* const buf = &output->u.RGBA; + uint8_t* const dst = buf->rgba + (size_t)io->mb_y * buf->stride; + WebPSamplerProcessPlane(io->y, io->y_stride, + io->u, io->v, io->uv_stride, + dst, buf->stride, io->mb_w, io->mb_h, + WebPSamplers[output->colorspace]); + return io->mb_h; +} + +//------------------------------------------------------------------------------ +// Fancy upsampling + +#ifdef FANCY_UPSAMPLING +static int EmitFancyRGB(const VP8Io* const io, WebPDecParams* const p) { + int num_lines_out = io->mb_h; // a priori guess + const WebPRGBABuffer* const buf = &p->output->u.RGBA; + uint8_t* dst = buf->rgba + (size_t)io->mb_y * buf->stride; + WebPUpsampleLinePairFunc upsample = WebPUpsamplers[p->output->colorspace]; + const uint8_t* cur_y = io->y; + const uint8_t* cur_u = io->u; + const uint8_t* cur_v = io->v; + const uint8_t* top_u = p->tmp_u; + const uint8_t* top_v = p->tmp_v; + int y = io->mb_y; + const int y_end = io->mb_y + io->mb_h; + const int mb_w = io->mb_w; + const int uv_w = (mb_w + 1) / 2; + + if (y == 0) { + // First line is special cased. We mirror the u/v samples at boundary. + upsample(cur_y, NULL, cur_u, cur_v, cur_u, cur_v, dst, NULL, mb_w); + } else { + // We can finish the left-over line from previous call. + upsample(p->tmp_y, cur_y, top_u, top_v, cur_u, cur_v, + dst - buf->stride, dst, mb_w); + ++num_lines_out; + } + // Loop over each output pairs of row. + for (; y + 2 < y_end; y += 2) { + top_u = cur_u; + top_v = cur_v; + cur_u += io->uv_stride; + cur_v += io->uv_stride; + dst += 2 * buf->stride; + cur_y += 2 * io->y_stride; + upsample(cur_y - io->y_stride, cur_y, + top_u, top_v, cur_u, cur_v, + dst - buf->stride, dst, mb_w); + } + // move to last row + cur_y += io->y_stride; + if (io->crop_top + y_end < io->crop_bottom) { + // Save the unfinished samples for next call (as we're not done yet). + memcpy(p->tmp_y, cur_y, mb_w * sizeof(*p->tmp_y)); + memcpy(p->tmp_u, cur_u, uv_w * sizeof(*p->tmp_u)); + memcpy(p->tmp_v, cur_v, uv_w * sizeof(*p->tmp_v)); + // The fancy upsampler leaves a row unfinished behind + // (except for the very last row) + num_lines_out--; + } else { + // Process the very last row of even-sized picture + if (!(y_end & 1)) { + upsample(cur_y, NULL, cur_u, cur_v, cur_u, cur_v, + dst + buf->stride, NULL, mb_w); + } + } + return num_lines_out; +} + +#endif /* FANCY_UPSAMPLING */ + +//------------------------------------------------------------------------------ + +static void FillAlphaPlane(uint8_t* dst, int w, int h, int stride) { + int j; + for (j = 0; j < h; ++j) { + memset(dst, 0xff, w * sizeof(*dst)); + dst += stride; + } +} + +static int EmitAlphaYUV(const VP8Io* const io, WebPDecParams* const p, + int expected_num_lines_out) { + const uint8_t* alpha = io->a; + const WebPYUVABuffer* const buf = &p->output->u.YUVA; + const int mb_w = io->mb_w; + const int mb_h = io->mb_h; + uint8_t* dst = buf->a + (size_t)io->mb_y * buf->a_stride; + int j; + (void)expected_num_lines_out; + assert(expected_num_lines_out == mb_h); + if (alpha != NULL) { + for (j = 0; j < mb_h; ++j) { + memcpy(dst, alpha, mb_w * sizeof(*dst)); + alpha += io->width; + dst += buf->a_stride; + } + } else if (buf->a != NULL) { + // the user requested alpha, but there is none, set it to opaque. + FillAlphaPlane(dst, mb_w, mb_h, buf->a_stride); + } + return 0; +} + +static int GetAlphaSourceRow(const VP8Io* const io, + const uint8_t** alpha, int* const num_rows) { + int start_y = io->mb_y; + *num_rows = io->mb_h; + + // Compensate for the 1-line delay of the fancy upscaler. + // This is similar to EmitFancyRGB(). + if (io->fancy_upsampling) { + if (start_y == 0) { + // We don't process the last row yet. It'll be done during the next call. + --*num_rows; + } else { + --start_y; + // Fortunately, *alpha data is persistent, so we can go back + // one row and finish alpha blending, now that the fancy upscaler + // completed the YUV->RGB interpolation. + *alpha -= io->width; + } + if (io->crop_top + io->mb_y + io->mb_h == io->crop_bottom) { + // If it's the very last call, we process all the remaining rows! + *num_rows = io->crop_bottom - io->crop_top - start_y; + } + } + return start_y; +} + +static int EmitAlphaRGB(const VP8Io* const io, WebPDecParams* const p, + int expected_num_lines_out) { + const uint8_t* alpha = io->a; + if (alpha != NULL) { + const int mb_w = io->mb_w; + const WEBP_CSP_MODE colorspace = p->output->colorspace; + const int alpha_first = + (colorspace == MODE_ARGB || colorspace == MODE_Argb); + const WebPRGBABuffer* const buf = &p->output->u.RGBA; + int num_rows; + const size_t start_y = GetAlphaSourceRow(io, &alpha, &num_rows); + uint8_t* const base_rgba = buf->rgba + start_y * buf->stride; + uint8_t* const dst = base_rgba + (alpha_first ? 0 : 3); + const int has_alpha = WebPDispatchAlpha(alpha, io->width, mb_w, + num_rows, dst, buf->stride); + (void)expected_num_lines_out; + assert(expected_num_lines_out == num_rows); + // has_alpha is true if there's non-trivial alpha to premultiply with. + if (has_alpha && WebPIsPremultipliedMode(colorspace)) { + WebPApplyAlphaMultiply(base_rgba, alpha_first, + mb_w, num_rows, buf->stride); + } + } + return 0; +} + +static int EmitAlphaRGBA4444(const VP8Io* const io, WebPDecParams* const p, + int expected_num_lines_out) { + const uint8_t* alpha = io->a; + if (alpha != NULL) { + const int mb_w = io->mb_w; + const WEBP_CSP_MODE colorspace = p->output->colorspace; + const WebPRGBABuffer* const buf = &p->output->u.RGBA; + int num_rows; + const size_t start_y = GetAlphaSourceRow(io, &alpha, &num_rows); + uint8_t* const base_rgba = buf->rgba + start_y * buf->stride; +#if (WEBP_SWAP_16BIT_CSP == 1) + uint8_t* alpha_dst = base_rgba; +#else + uint8_t* alpha_dst = base_rgba + 1; +#endif + uint32_t alpha_mask = 0x0f; + int i, j; + for (j = 0; j < num_rows; ++j) { + for (i = 0; i < mb_w; ++i) { + // Fill in the alpha value (converted to 4 bits). + const uint32_t alpha_value = alpha[i] >> 4; + alpha_dst[2 * i] = (alpha_dst[2 * i] & 0xf0) | alpha_value; + alpha_mask &= alpha_value; + } + alpha += io->width; + alpha_dst += buf->stride; + } + (void)expected_num_lines_out; + assert(expected_num_lines_out == num_rows); + if (alpha_mask != 0x0f && WebPIsPremultipliedMode(colorspace)) { + WebPApplyAlphaMultiply4444(base_rgba, mb_w, num_rows, buf->stride); + } + } + return 0; +} + +//------------------------------------------------------------------------------ +// YUV rescaling (no final RGB conversion needed) + +#if !defined(WEBP_REDUCE_SIZE) +static int Rescale(const uint8_t* src, int src_stride, + int new_lines, WebPRescaler* const wrk) { + int num_lines_out = 0; + while (new_lines > 0) { // import new contributions of source rows. + const int lines_in = WebPRescalerImport(wrk, new_lines, src, src_stride); + src += lines_in * src_stride; + new_lines -= lines_in; + num_lines_out += WebPRescalerExport(wrk); // emit output row(s) + } + return num_lines_out; +} + +static int EmitRescaledYUV(const VP8Io* const io, WebPDecParams* const p) { + const int mb_h = io->mb_h; + const int uv_mb_h = (mb_h + 1) >> 1; + WebPRescaler* const scaler = p->scaler_y; + int num_lines_out = 0; + if (WebPIsAlphaMode(p->output->colorspace) && io->a != NULL) { + // Before rescaling, we premultiply the luma directly into the io->y + // internal buffer. This is OK since these samples are not used for + // intra-prediction (the top samples are saved in cache_y_/u_/v_). + // But we need to cast the const away, though. + WebPMultRows((uint8_t*)io->y, io->y_stride, + io->a, io->width, io->mb_w, mb_h, 0); + } + num_lines_out = Rescale(io->y, io->y_stride, mb_h, scaler); + Rescale(io->u, io->uv_stride, uv_mb_h, p->scaler_u); + Rescale(io->v, io->uv_stride, uv_mb_h, p->scaler_v); + return num_lines_out; +} + +static int EmitRescaledAlphaYUV(const VP8Io* const io, WebPDecParams* const p, + int expected_num_lines_out) { + const WebPYUVABuffer* const buf = &p->output->u.YUVA; + uint8_t* const dst_a = buf->a + (size_t)p->last_y * buf->a_stride; + if (io->a != NULL) { + uint8_t* const dst_y = buf->y + (size_t)p->last_y * buf->y_stride; + const int num_lines_out = Rescale(io->a, io->width, io->mb_h, p->scaler_a); + assert(expected_num_lines_out == num_lines_out); + if (num_lines_out > 0) { // unmultiply the Y + WebPMultRows(dst_y, buf->y_stride, dst_a, buf->a_stride, + p->scaler_a->dst_width, num_lines_out, 1); + } + } else if (buf->a != NULL) { + // the user requested alpha, but there is none, set it to opaque. + assert(p->last_y + expected_num_lines_out <= io->scaled_height); + FillAlphaPlane(dst_a, io->scaled_width, expected_num_lines_out, + buf->a_stride); + } + return 0; +} + +static int InitYUVRescaler(const VP8Io* const io, WebPDecParams* const p) { + const int has_alpha = WebPIsAlphaMode(p->output->colorspace); + const WebPYUVABuffer* const buf = &p->output->u.YUVA; + const int out_width = io->scaled_width; + const int out_height = io->scaled_height; + const int uv_out_width = (out_width + 1) >> 1; + const int uv_out_height = (out_height + 1) >> 1; + const int uv_in_width = (io->mb_w + 1) >> 1; + const int uv_in_height = (io->mb_h + 1) >> 1; + const size_t work_size = 2 * out_width; // scratch memory for luma rescaler + const size_t uv_work_size = 2 * uv_out_width; // and for each u/v ones + size_t tmp_size, rescaler_size; + rescaler_t* work; + WebPRescaler* scalers; + const int num_rescalers = has_alpha ? 4 : 3; + + tmp_size = (work_size + 2 * uv_work_size) * sizeof(*work); + if (has_alpha) { + tmp_size += work_size * sizeof(*work); + } + rescaler_size = num_rescalers * sizeof(*p->scaler_y) + WEBP_ALIGN_CST; + + p->memory = WebPSafeMalloc(1ULL, tmp_size + rescaler_size); + if (p->memory == NULL) { + return 0; // memory error + } + work = (rescaler_t*)p->memory; + + scalers = (WebPRescaler*)WEBP_ALIGN((const uint8_t*)work + tmp_size); + p->scaler_y = &scalers[0]; + p->scaler_u = &scalers[1]; + p->scaler_v = &scalers[2]; + p->scaler_a = has_alpha ? &scalers[3] : NULL; + + WebPRescalerInit(p->scaler_y, io->mb_w, io->mb_h, + buf->y, out_width, out_height, buf->y_stride, 1, + work); + WebPRescalerInit(p->scaler_u, uv_in_width, uv_in_height, + buf->u, uv_out_width, uv_out_height, buf->u_stride, 1, + work + work_size); + WebPRescalerInit(p->scaler_v, uv_in_width, uv_in_height, + buf->v, uv_out_width, uv_out_height, buf->v_stride, 1, + work + work_size + uv_work_size); + p->emit = EmitRescaledYUV; + + if (has_alpha) { + WebPRescalerInit(p->scaler_a, io->mb_w, io->mb_h, + buf->a, out_width, out_height, buf->a_stride, 1, + work + work_size + 2 * uv_work_size); + p->emit_alpha = EmitRescaledAlphaYUV; + WebPInitAlphaProcessing(); + } + return 1; +} + +//------------------------------------------------------------------------------ +// RGBA rescaling + +static int ExportRGB(WebPDecParams* const p, int y_pos) { + const WebPYUV444Converter convert = + WebPYUV444Converters[p->output->colorspace]; + const WebPRGBABuffer* const buf = &p->output->u.RGBA; + uint8_t* dst = buf->rgba + (size_t)y_pos * buf->stride; + int num_lines_out = 0; + // For RGB rescaling, because of the YUV420, current scan position + // U/V can be +1/-1 line from the Y one. Hence the double test. + while (WebPRescalerHasPendingOutput(p->scaler_y) && + WebPRescalerHasPendingOutput(p->scaler_u)) { + assert(y_pos + num_lines_out < p->output->height); + assert(p->scaler_u->y_accum == p->scaler_v->y_accum); + WebPRescalerExportRow(p->scaler_y); + WebPRescalerExportRow(p->scaler_u); + WebPRescalerExportRow(p->scaler_v); + convert(p->scaler_y->dst, p->scaler_u->dst, p->scaler_v->dst, + dst, p->scaler_y->dst_width); + dst += buf->stride; + ++num_lines_out; + } + return num_lines_out; +} + +static int EmitRescaledRGB(const VP8Io* const io, WebPDecParams* const p) { + const int mb_h = io->mb_h; + const int uv_mb_h = (mb_h + 1) >> 1; + int j = 0, uv_j = 0; + int num_lines_out = 0; + while (j < mb_h) { + const int y_lines_in = + WebPRescalerImport(p->scaler_y, mb_h - j, + io->y + (size_t)j * io->y_stride, io->y_stride); + j += y_lines_in; + if (WebPRescaleNeededLines(p->scaler_u, uv_mb_h - uv_j)) { + const int u_lines_in = WebPRescalerImport( + p->scaler_u, uv_mb_h - uv_j, io->u + (size_t)uv_j * io->uv_stride, + io->uv_stride); + const int v_lines_in = WebPRescalerImport( + p->scaler_v, uv_mb_h - uv_j, io->v + (size_t)uv_j * io->uv_stride, + io->uv_stride); + (void)v_lines_in; // remove a gcc warning + assert(u_lines_in == v_lines_in); + uv_j += u_lines_in; + } + num_lines_out += ExportRGB(p, p->last_y + num_lines_out); + } + return num_lines_out; +} + +static int ExportAlpha(WebPDecParams* const p, int y_pos, int max_lines_out) { + const WebPRGBABuffer* const buf = &p->output->u.RGBA; + uint8_t* const base_rgba = buf->rgba + (size_t)y_pos * buf->stride; + const WEBP_CSP_MODE colorspace = p->output->colorspace; + const int alpha_first = + (colorspace == MODE_ARGB || colorspace == MODE_Argb); + uint8_t* dst = base_rgba + (alpha_first ? 0 : 3); + int num_lines_out = 0; + const int is_premult_alpha = WebPIsPremultipliedMode(colorspace); + uint32_t non_opaque = 0; + const int width = p->scaler_a->dst_width; + + while (WebPRescalerHasPendingOutput(p->scaler_a) && + num_lines_out < max_lines_out) { + assert(y_pos + num_lines_out < p->output->height); + WebPRescalerExportRow(p->scaler_a); + non_opaque |= WebPDispatchAlpha(p->scaler_a->dst, 0, width, 1, dst, 0); + dst += buf->stride; + ++num_lines_out; + } + if (is_premult_alpha && non_opaque) { + WebPApplyAlphaMultiply(base_rgba, alpha_first, + width, num_lines_out, buf->stride); + } + return num_lines_out; +} + +static int ExportAlphaRGBA4444(WebPDecParams* const p, int y_pos, + int max_lines_out) { + const WebPRGBABuffer* const buf = &p->output->u.RGBA; + uint8_t* const base_rgba = buf->rgba + (size_t)y_pos * buf->stride; +#if (WEBP_SWAP_16BIT_CSP == 1) + uint8_t* alpha_dst = base_rgba; +#else + uint8_t* alpha_dst = base_rgba + 1; +#endif + int num_lines_out = 0; + const WEBP_CSP_MODE colorspace = p->output->colorspace; + const int width = p->scaler_a->dst_width; + const int is_premult_alpha = WebPIsPremultipliedMode(colorspace); + uint32_t alpha_mask = 0x0f; + + while (WebPRescalerHasPendingOutput(p->scaler_a) && + num_lines_out < max_lines_out) { + int i; + assert(y_pos + num_lines_out < p->output->height); + WebPRescalerExportRow(p->scaler_a); + for (i = 0; i < width; ++i) { + // Fill in the alpha value (converted to 4 bits). + const uint32_t alpha_value = p->scaler_a->dst[i] >> 4; + alpha_dst[2 * i] = (alpha_dst[2 * i] & 0xf0) | alpha_value; + alpha_mask &= alpha_value; + } + alpha_dst += buf->stride; + ++num_lines_out; + } + if (is_premult_alpha && alpha_mask != 0x0f) { + WebPApplyAlphaMultiply4444(base_rgba, width, num_lines_out, buf->stride); + } + return num_lines_out; +} + +static int EmitRescaledAlphaRGB(const VP8Io* const io, WebPDecParams* const p, + int expected_num_out_lines) { + if (io->a != NULL) { + WebPRescaler* const scaler = p->scaler_a; + int lines_left = expected_num_out_lines; + const int y_end = p->last_y + lines_left; + while (lines_left > 0) { + const int64_t row_offset = (int64_t)scaler->src_y - io->mb_y; + WebPRescalerImport(scaler, io->mb_h + io->mb_y - scaler->src_y, + io->a + row_offset * io->width, io->width); + lines_left -= p->emit_alpha_row(p, y_end - lines_left, lines_left); + } + } + return 0; +} + +static int InitRGBRescaler(const VP8Io* const io, WebPDecParams* const p) { + const int has_alpha = WebPIsAlphaMode(p->output->colorspace); + const int out_width = io->scaled_width; + const int out_height = io->scaled_height; + const int uv_in_width = (io->mb_w + 1) >> 1; + const int uv_in_height = (io->mb_h + 1) >> 1; + const size_t work_size = 2 * out_width; // scratch memory for one rescaler + rescaler_t* work; // rescalers work area + uint8_t* tmp; // tmp storage for scaled YUV444 samples before RGB conversion + size_t tmp_size1, tmp_size2, total_size, rescaler_size; + WebPRescaler* scalers; + const int num_rescalers = has_alpha ? 4 : 3; + + tmp_size1 = 3 * work_size; + tmp_size2 = 3 * out_width; + if (has_alpha) { + tmp_size1 += work_size; + tmp_size2 += out_width; + } + total_size = tmp_size1 * sizeof(*work) + tmp_size2 * sizeof(*tmp); + rescaler_size = num_rescalers * sizeof(*p->scaler_y) + WEBP_ALIGN_CST; + + p->memory = WebPSafeMalloc(1ULL, total_size + rescaler_size); + if (p->memory == NULL) { + return 0; // memory error + } + work = (rescaler_t*)p->memory; + tmp = (uint8_t*)(work + tmp_size1); + + scalers = (WebPRescaler*)WEBP_ALIGN((const uint8_t*)work + total_size); + p->scaler_y = &scalers[0]; + p->scaler_u = &scalers[1]; + p->scaler_v = &scalers[2]; + p->scaler_a = has_alpha ? &scalers[3] : NULL; + + WebPRescalerInit(p->scaler_y, io->mb_w, io->mb_h, + tmp + 0 * out_width, out_width, out_height, 0, 1, + work + 0 * work_size); + WebPRescalerInit(p->scaler_u, uv_in_width, uv_in_height, + tmp + 1 * out_width, out_width, out_height, 0, 1, + work + 1 * work_size); + WebPRescalerInit(p->scaler_v, uv_in_width, uv_in_height, + tmp + 2 * out_width, out_width, out_height, 0, 1, + work + 2 * work_size); + p->emit = EmitRescaledRGB; + WebPInitYUV444Converters(); + + if (has_alpha) { + WebPRescalerInit(p->scaler_a, io->mb_w, io->mb_h, + tmp + 3 * out_width, out_width, out_height, 0, 1, + work + 3 * work_size); + p->emit_alpha = EmitRescaledAlphaRGB; + if (p->output->colorspace == MODE_RGBA_4444 || + p->output->colorspace == MODE_rgbA_4444) { + p->emit_alpha_row = ExportAlphaRGBA4444; + } else { + p->emit_alpha_row = ExportAlpha; + } + WebPInitAlphaProcessing(); + } + return 1; +} + +#endif // WEBP_REDUCE_SIZE + +//------------------------------------------------------------------------------ +// Default custom functions + +static int CustomSetup(VP8Io* io) { + WebPDecParams* const p = (WebPDecParams*)io->opaque; + const WEBP_CSP_MODE colorspace = p->output->colorspace; + const int is_rgb = WebPIsRGBMode(colorspace); + const int is_alpha = WebPIsAlphaMode(colorspace); + + p->memory = NULL; + p->emit = NULL; + p->emit_alpha = NULL; + p->emit_alpha_row = NULL; + if (!WebPIoInitFromOptions(p->options, io, is_alpha ? MODE_YUV : MODE_YUVA)) { + return 0; + } + if (is_alpha && WebPIsPremultipliedMode(colorspace)) { + WebPInitUpsamplers(); + } + if (io->use_scaling) { +#if !defined(WEBP_REDUCE_SIZE) + const int ok = is_rgb ? InitRGBRescaler(io, p) : InitYUVRescaler(io, p); + if (!ok) { + return 0; // memory error + } +#else + return 0; // rescaling support not compiled +#endif + } else { + if (is_rgb) { + WebPInitSamplers(); + p->emit = EmitSampledRGB; // default + if (io->fancy_upsampling) { +#ifdef FANCY_UPSAMPLING + const int uv_width = (io->mb_w + 1) >> 1; + p->memory = WebPSafeMalloc(1ULL, (size_t)(io->mb_w + 2 * uv_width)); + if (p->memory == NULL) { + return 0; // memory error. + } + p->tmp_y = (uint8_t*)p->memory; + p->tmp_u = p->tmp_y + io->mb_w; + p->tmp_v = p->tmp_u + uv_width; + p->emit = EmitFancyRGB; + WebPInitUpsamplers(); +#endif + } + } else { + p->emit = EmitYUV; + } + if (is_alpha) { // need transparency output + p->emit_alpha = + (colorspace == MODE_RGBA_4444 || colorspace == MODE_rgbA_4444) ? + EmitAlphaRGBA4444 + : is_rgb ? EmitAlphaRGB + : EmitAlphaYUV; + if (is_rgb) { + WebPInitAlphaProcessing(); + } + } + } + + return 1; +} + +//------------------------------------------------------------------------------ + +static int CustomPut(const VP8Io* io) { + WebPDecParams* const p = (WebPDecParams*)io->opaque; + const int mb_w = io->mb_w; + const int mb_h = io->mb_h; + int num_lines_out; + assert(!(io->mb_y & 1)); + + if (mb_w <= 0 || mb_h <= 0) { + return 0; + } + num_lines_out = p->emit(io, p); + if (p->emit_alpha != NULL) { + p->emit_alpha(io, p, num_lines_out); + } + p->last_y += num_lines_out; + return 1; +} + +//------------------------------------------------------------------------------ + +static void CustomTeardown(const VP8Io* io) { + WebPDecParams* const p = (WebPDecParams*)io->opaque; + WebPSafeFree(p->memory); + p->memory = NULL; +} + +//------------------------------------------------------------------------------ +// Main entry point + +void WebPInitCustomIo(WebPDecParams* const params, VP8Io* const io) { + io->put = CustomPut; + io->setup = CustomSetup; + io->teardown = CustomTeardown; + io->opaque = params; +} + +//------------------------------------------------------------------------------ diff --git a/vendor/github.com/sizeofint/webpanimation/dec_quant_dec.c b/vendor/github.com/sizeofint/webpanimation/dec_quant_dec.c new file mode 100644 index 00000000..080951b4 --- /dev/null +++ b/vendor/github.com/sizeofint/webpanimation/dec_quant_dec.c @@ -0,0 +1,115 @@ +// Copyright 2010 Google Inc. All Rights Reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the COPYING file in the root of the source +// tree. An additional intellectual property rights grant can be found +// in the file PATENTS. All contributing project authors may +// be found in the AUTHORS file in the root of the source tree. +// ----------------------------------------------------------------------------- +// +// Quantizer initialization +// +// Author: Skal (pascal.massimino@gmail.com) + +#include "dec_vp8i_dec.h" + +static WEBP_INLINE int clip(int v, int M) { + return v < 0 ? 0 : v > M ? M : v; +} + +// Paragraph 14.1 +static const uint8_t kDcTable[128] = { + 4, 5, 6, 7, 8, 9, 10, 10, + 11, 12, 13, 14, 15, 16, 17, 17, + 18, 19, 20, 20, 21, 21, 22, 22, + 23, 23, 24, 25, 25, 26, 27, 28, + 29, 30, 31, 32, 33, 34, 35, 36, + 37, 37, 38, 39, 40, 41, 42, 43, + 44, 45, 46, 46, 47, 48, 49, 50, + 51, 52, 53, 54, 55, 56, 57, 58, + 59, 60, 61, 62, 63, 64, 65, 66, + 67, 68, 69, 70, 71, 72, 73, 74, + 75, 76, 76, 77, 78, 79, 80, 81, + 82, 83, 84, 85, 86, 87, 88, 89, + 91, 93, 95, 96, 98, 100, 101, 102, + 104, 106, 108, 110, 112, 114, 116, 118, + 122, 124, 126, 128, 130, 132, 134, 136, + 138, 140, 143, 145, 148, 151, 154, 157 +}; + +static const uint16_t kAcTable[128] = { + 4, 5, 6, 7, 8, 9, 10, 11, + 12, 13, 14, 15, 16, 17, 18, 19, + 20, 21, 22, 23, 24, 25, 26, 27, + 28, 29, 30, 31, 32, 33, 34, 35, + 36, 37, 38, 39, 40, 41, 42, 43, + 44, 45, 46, 47, 48, 49, 50, 51, + 52, 53, 54, 55, 56, 57, 58, 60, + 62, 64, 66, 68, 70, 72, 74, 76, + 78, 80, 82, 84, 86, 88, 90, 92, + 94, 96, 98, 100, 102, 104, 106, 108, + 110, 112, 114, 116, 119, 122, 125, 128, + 131, 134, 137, 140, 143, 146, 149, 152, + 155, 158, 161, 164, 167, 170, 173, 177, + 181, 185, 189, 193, 197, 201, 205, 209, + 213, 217, 221, 225, 229, 234, 239, 245, + 249, 254, 259, 264, 269, 274, 279, 284 +}; + +//------------------------------------------------------------------------------ +// Paragraph 9.6 + +void VP8ParseQuant(VP8Decoder* const dec) { + VP8BitReader* const br = &dec->br_; + const int base_q0 = VP8GetValue(br, 7, "global-header"); + const int dqy1_dc = VP8Get(br, "global-header") ? + VP8GetSignedValue(br, 4, "global-header") : 0; + const int dqy2_dc = VP8Get(br, "global-header") ? + VP8GetSignedValue(br, 4, "global-header") : 0; + const int dqy2_ac = VP8Get(br, "global-header") ? + VP8GetSignedValue(br, 4, "global-header") : 0; + const int dquv_dc = VP8Get(br, "global-header") ? + VP8GetSignedValue(br, 4, "global-header") : 0; + const int dquv_ac = VP8Get(br, "global-header") ? + VP8GetSignedValue(br, 4, "global-header") : 0; + + const VP8SegmentHeader* const hdr = &dec->segment_hdr_; + int i; + + for (i = 0; i < NUM_MB_SEGMENTS; ++i) { + int q; + if (hdr->use_segment_) { + q = hdr->quantizer_[i]; + if (!hdr->absolute_delta_) { + q += base_q0; + } + } else { + if (i > 0) { + dec->dqm_[i] = dec->dqm_[0]; + continue; + } else { + q = base_q0; + } + } + { + VP8QuantMatrix* const m = &dec->dqm_[i]; + m->y1_mat_[0] = kDcTable[clip(q + dqy1_dc, 127)]; + m->y1_mat_[1] = kAcTable[clip(q + 0, 127)]; + + m->y2_mat_[0] = kDcTable[clip(q + dqy2_dc, 127)] * 2; + // For all x in [0..284], x*155/100 is bitwise equal to (x*101581) >> 16. + // The smallest precision for that is '(x*6349) >> 12' but 16 is a good + // word size. + m->y2_mat_[1] = (kAcTable[clip(q + dqy2_ac, 127)] * 101581) >> 16; + if (m->y2_mat_[1] < 8) m->y2_mat_[1] = 8; + + m->uv_mat_[0] = kDcTable[clip(q + dquv_dc, 117)]; + m->uv_mat_[1] = kAcTable[clip(q + dquv_ac, 127)]; + + m->uv_quant_ = q + dquv_ac; // for dithering strength evaluation + } + } +} + +//------------------------------------------------------------------------------ + diff --git a/vendor/github.com/sizeofint/webpanimation/dec_tree_dec.c b/vendor/github.com/sizeofint/webpanimation/dec_tree_dec.c new file mode 100644 index 00000000..7325bb49 --- /dev/null +++ b/vendor/github.com/sizeofint/webpanimation/dec_tree_dec.c @@ -0,0 +1,537 @@ +// Copyright 2010 Google Inc. All Rights Reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the COPYING file in the root of the source +// tree. An additional intellectual property rights grant can be found +// in the file PATENTS. All contributing project authors may +// be found in the AUTHORS file in the root of the source tree. +// ----------------------------------------------------------------------------- +// +// Coding trees and probas +// +// Author: Skal (pascal.massimino@gmail.com) + +#include "dec_vp8i_dec.h" +#include "utils_bit_reader_inl_utils.h" + +#if !defined(USE_GENERIC_TREE) +#if !defined(__arm__) && !defined(_M_ARM) && !defined(__aarch64__) +// using a table is ~1-2% slower on ARM. Prefer the coded-tree approach then. +#define USE_GENERIC_TREE 1 // ALTERNATE_CODE +#else +#define USE_GENERIC_TREE 0 +#endif +#endif // USE_GENERIC_TREE + +#if (USE_GENERIC_TREE == 1) +static const int8_t kYModesIntra4[18] = { + -B_DC_PRED, 1, + -B_TM_PRED, 2, + -B_VE_PRED, 3, + 4, 6, + -B_HE_PRED, 5, + -B_RD_PRED, -B_VR_PRED, + -B_LD_PRED, 7, + -B_VL_PRED, 8, + -B_HD_PRED, -B_HU_PRED +}; +#endif + +//------------------------------------------------------------------------------ +// Default probabilities + +// Paragraph 13.5 +static const uint8_t + CoeffsProba0[NUM_TYPES][NUM_BANDS][NUM_CTX][NUM_PROBAS] = { + { { { 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128 }, + { 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128 }, + { 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128 } + }, + { { 253, 136, 254, 255, 228, 219, 128, 128, 128, 128, 128 }, + { 189, 129, 242, 255, 227, 213, 255, 219, 128, 128, 128 }, + { 106, 126, 227, 252, 214, 209, 255, 255, 128, 128, 128 } + }, + { { 1, 98, 248, 255, 236, 226, 255, 255, 128, 128, 128 }, + { 181, 133, 238, 254, 221, 234, 255, 154, 128, 128, 128 }, + { 78, 134, 202, 247, 198, 180, 255, 219, 128, 128, 128 }, + }, + { { 1, 185, 249, 255, 243, 255, 128, 128, 128, 128, 128 }, + { 184, 150, 247, 255, 236, 224, 128, 128, 128, 128, 128 }, + { 77, 110, 216, 255, 236, 230, 128, 128, 128, 128, 128 }, + }, + { { 1, 101, 251, 255, 241, 255, 128, 128, 128, 128, 128 }, + { 170, 139, 241, 252, 236, 209, 255, 255, 128, 128, 128 }, + { 37, 116, 196, 243, 228, 255, 255, 255, 128, 128, 128 } + }, + { { 1, 204, 254, 255, 245, 255, 128, 128, 128, 128, 128 }, + { 207, 160, 250, 255, 238, 128, 128, 128, 128, 128, 128 }, + { 102, 103, 231, 255, 211, 171, 128, 128, 128, 128, 128 } + }, + { { 1, 152, 252, 255, 240, 255, 128, 128, 128, 128, 128 }, + { 177, 135, 243, 255, 234, 225, 128, 128, 128, 128, 128 }, + { 80, 129, 211, 255, 194, 224, 128, 128, 128, 128, 128 } + }, + { { 1, 1, 255, 128, 128, 128, 128, 128, 128, 128, 128 }, + { 246, 1, 255, 128, 128, 128, 128, 128, 128, 128, 128 }, + { 255, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128 } + } + }, + { { { 198, 35, 237, 223, 193, 187, 162, 160, 145, 155, 62 }, + { 131, 45, 198, 221, 172, 176, 220, 157, 252, 221, 1 }, + { 68, 47, 146, 208, 149, 167, 221, 162, 255, 223, 128 } + }, + { { 1, 149, 241, 255, 221, 224, 255, 255, 128, 128, 128 }, + { 184, 141, 234, 253, 222, 220, 255, 199, 128, 128, 128 }, + { 81, 99, 181, 242, 176, 190, 249, 202, 255, 255, 128 } + }, + { { 1, 129, 232, 253, 214, 197, 242, 196, 255, 255, 128 }, + { 99, 121, 210, 250, 201, 198, 255, 202, 128, 128, 128 }, + { 23, 91, 163, 242, 170, 187, 247, 210, 255, 255, 128 } + }, + { { 1, 200, 246, 255, 234, 255, 128, 128, 128, 128, 128 }, + { 109, 178, 241, 255, 231, 245, 255, 255, 128, 128, 128 }, + { 44, 130, 201, 253, 205, 192, 255, 255, 128, 128, 128 } + }, + { { 1, 132, 239, 251, 219, 209, 255, 165, 128, 128, 128 }, + { 94, 136, 225, 251, 218, 190, 255, 255, 128, 128, 128 }, + { 22, 100, 174, 245, 186, 161, 255, 199, 128, 128, 128 } + }, + { { 1, 182, 249, 255, 232, 235, 128, 128, 128, 128, 128 }, + { 124, 143, 241, 255, 227, 234, 128, 128, 128, 128, 128 }, + { 35, 77, 181, 251, 193, 211, 255, 205, 128, 128, 128 } + }, + { { 1, 157, 247, 255, 236, 231, 255, 255, 128, 128, 128 }, + { 121, 141, 235, 255, 225, 227, 255, 255, 128, 128, 128 }, + { 45, 99, 188, 251, 195, 217, 255, 224, 128, 128, 128 } + }, + { { 1, 1, 251, 255, 213, 255, 128, 128, 128, 128, 128 }, + { 203, 1, 248, 255, 255, 128, 128, 128, 128, 128, 128 }, + { 137, 1, 177, 255, 224, 255, 128, 128, 128, 128, 128 } + } + }, + { { { 253, 9, 248, 251, 207, 208, 255, 192, 128, 128, 128 }, + { 175, 13, 224, 243, 193, 185, 249, 198, 255, 255, 128 }, + { 73, 17, 171, 221, 161, 179, 236, 167, 255, 234, 128 } + }, + { { 1, 95, 247, 253, 212, 183, 255, 255, 128, 128, 128 }, + { 239, 90, 244, 250, 211, 209, 255, 255, 128, 128, 128 }, + { 155, 77, 195, 248, 188, 195, 255, 255, 128, 128, 128 } + }, + { { 1, 24, 239, 251, 218, 219, 255, 205, 128, 128, 128 }, + { 201, 51, 219, 255, 196, 186, 128, 128, 128, 128, 128 }, + { 69, 46, 190, 239, 201, 218, 255, 228, 128, 128, 128 } + }, + { { 1, 191, 251, 255, 255, 128, 128, 128, 128, 128, 128 }, + { 223, 165, 249, 255, 213, 255, 128, 128, 128, 128, 128 }, + { 141, 124, 248, 255, 255, 128, 128, 128, 128, 128, 128 } + }, + { { 1, 16, 248, 255, 255, 128, 128, 128, 128, 128, 128 }, + { 190, 36, 230, 255, 236, 255, 128, 128, 128, 128, 128 }, + { 149, 1, 255, 128, 128, 128, 128, 128, 128, 128, 128 } + }, + { { 1, 226, 255, 128, 128, 128, 128, 128, 128, 128, 128 }, + { 247, 192, 255, 128, 128, 128, 128, 128, 128, 128, 128 }, + { 240, 128, 255, 128, 128, 128, 128, 128, 128, 128, 128 } + }, + { { 1, 134, 252, 255, 255, 128, 128, 128, 128, 128, 128 }, + { 213, 62, 250, 255, 255, 128, 128, 128, 128, 128, 128 }, + { 55, 93, 255, 128, 128, 128, 128, 128, 128, 128, 128 } + }, + { { 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128 }, + { 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128 }, + { 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128 } + } + }, + { { { 202, 24, 213, 235, 186, 191, 220, 160, 240, 175, 255 }, + { 126, 38, 182, 232, 169, 184, 228, 174, 255, 187, 128 }, + { 61, 46, 138, 219, 151, 178, 240, 170, 255, 216, 128 } + }, + { { 1, 112, 230, 250, 199, 191, 247, 159, 255, 255, 128 }, + { 166, 109, 228, 252, 211, 215, 255, 174, 128, 128, 128 }, + { 39, 77, 162, 232, 172, 180, 245, 178, 255, 255, 128 } + }, + { { 1, 52, 220, 246, 198, 199, 249, 220, 255, 255, 128 }, + { 124, 74, 191, 243, 183, 193, 250, 221, 255, 255, 128 }, + { 24, 71, 130, 219, 154, 170, 243, 182, 255, 255, 128 } + }, + { { 1, 182, 225, 249, 219, 240, 255, 224, 128, 128, 128 }, + { 149, 150, 226, 252, 216, 205, 255, 171, 128, 128, 128 }, + { 28, 108, 170, 242, 183, 194, 254, 223, 255, 255, 128 } + }, + { { 1, 81, 230, 252, 204, 203, 255, 192, 128, 128, 128 }, + { 123, 102, 209, 247, 188, 196, 255, 233, 128, 128, 128 }, + { 20, 95, 153, 243, 164, 173, 255, 203, 128, 128, 128 } + }, + { { 1, 222, 248, 255, 216, 213, 128, 128, 128, 128, 128 }, + { 168, 175, 246, 252, 235, 205, 255, 255, 128, 128, 128 }, + { 47, 116, 215, 255, 211, 212, 255, 255, 128, 128, 128 } + }, + { { 1, 121, 236, 253, 212, 214, 255, 255, 128, 128, 128 }, + { 141, 84, 213, 252, 201, 202, 255, 219, 128, 128, 128 }, + { 42, 80, 160, 240, 162, 185, 255, 205, 128, 128, 128 } + }, + { { 1, 1, 255, 128, 128, 128, 128, 128, 128, 128, 128 }, + { 244, 1, 255, 128, 128, 128, 128, 128, 128, 128, 128 }, + { 238, 1, 255, 128, 128, 128, 128, 128, 128, 128, 128 } + } + } +}; + +// Paragraph 11.5 +static const uint8_t kBModesProba[NUM_BMODES][NUM_BMODES][NUM_BMODES - 1] = { + { { 231, 120, 48, 89, 115, 113, 120, 152, 112 }, + { 152, 179, 64, 126, 170, 118, 46, 70, 95 }, + { 175, 69, 143, 80, 85, 82, 72, 155, 103 }, + { 56, 58, 10, 171, 218, 189, 17, 13, 152 }, + { 114, 26, 17, 163, 44, 195, 21, 10, 173 }, + { 121, 24, 80, 195, 26, 62, 44, 64, 85 }, + { 144, 71, 10, 38, 171, 213, 144, 34, 26 }, + { 170, 46, 55, 19, 136, 160, 33, 206, 71 }, + { 63, 20, 8, 114, 114, 208, 12, 9, 226 }, + { 81, 40, 11, 96, 182, 84, 29, 16, 36 } }, + { { 134, 183, 89, 137, 98, 101, 106, 165, 148 }, + { 72, 187, 100, 130, 157, 111, 32, 75, 80 }, + { 66, 102, 167, 99, 74, 62, 40, 234, 128 }, + { 41, 53, 9, 178, 241, 141, 26, 8, 107 }, + { 74, 43, 26, 146, 73, 166, 49, 23, 157 }, + { 65, 38, 105, 160, 51, 52, 31, 115, 128 }, + { 104, 79, 12, 27, 217, 255, 87, 17, 7 }, + { 87, 68, 71, 44, 114, 51, 15, 186, 23 }, + { 47, 41, 14, 110, 182, 183, 21, 17, 194 }, + { 66, 45, 25, 102, 197, 189, 23, 18, 22 } }, + { { 88, 88, 147, 150, 42, 46, 45, 196, 205 }, + { 43, 97, 183, 117, 85, 38, 35, 179, 61 }, + { 39, 53, 200, 87, 26, 21, 43, 232, 171 }, + { 56, 34, 51, 104, 114, 102, 29, 93, 77 }, + { 39, 28, 85, 171, 58, 165, 90, 98, 64 }, + { 34, 22, 116, 206, 23, 34, 43, 166, 73 }, + { 107, 54, 32, 26, 51, 1, 81, 43, 31 }, + { 68, 25, 106, 22, 64, 171, 36, 225, 114 }, + { 34, 19, 21, 102, 132, 188, 16, 76, 124 }, + { 62, 18, 78, 95, 85, 57, 50, 48, 51 } }, + { { 193, 101, 35, 159, 215, 111, 89, 46, 111 }, + { 60, 148, 31, 172, 219, 228, 21, 18, 111 }, + { 112, 113, 77, 85, 179, 255, 38, 120, 114 }, + { 40, 42, 1, 196, 245, 209, 10, 25, 109 }, + { 88, 43, 29, 140, 166, 213, 37, 43, 154 }, + { 61, 63, 30, 155, 67, 45, 68, 1, 209 }, + { 100, 80, 8, 43, 154, 1, 51, 26, 71 }, + { 142, 78, 78, 16, 255, 128, 34, 197, 171 }, + { 41, 40, 5, 102, 211, 183, 4, 1, 221 }, + { 51, 50, 17, 168, 209, 192, 23, 25, 82 } }, + { { 138, 31, 36, 171, 27, 166, 38, 44, 229 }, + { 67, 87, 58, 169, 82, 115, 26, 59, 179 }, + { 63, 59, 90, 180, 59, 166, 93, 73, 154 }, + { 40, 40, 21, 116, 143, 209, 34, 39, 175 }, + { 47, 15, 16, 183, 34, 223, 49, 45, 183 }, + { 46, 17, 33, 183, 6, 98, 15, 32, 183 }, + { 57, 46, 22, 24, 128, 1, 54, 17, 37 }, + { 65, 32, 73, 115, 28, 128, 23, 128, 205 }, + { 40, 3, 9, 115, 51, 192, 18, 6, 223 }, + { 87, 37, 9, 115, 59, 77, 64, 21, 47 } }, + { { 104, 55, 44, 218, 9, 54, 53, 130, 226 }, + { 64, 90, 70, 205, 40, 41, 23, 26, 57 }, + { 54, 57, 112, 184, 5, 41, 38, 166, 213 }, + { 30, 34, 26, 133, 152, 116, 10, 32, 134 }, + { 39, 19, 53, 221, 26, 114, 32, 73, 255 }, + { 31, 9, 65, 234, 2, 15, 1, 118, 73 }, + { 75, 32, 12, 51, 192, 255, 160, 43, 51 }, + { 88, 31, 35, 67, 102, 85, 55, 186, 85 }, + { 56, 21, 23, 111, 59, 205, 45, 37, 192 }, + { 55, 38, 70, 124, 73, 102, 1, 34, 98 } }, + { { 125, 98, 42, 88, 104, 85, 117, 175, 82 }, + { 95, 84, 53, 89, 128, 100, 113, 101, 45 }, + { 75, 79, 123, 47, 51, 128, 81, 171, 1 }, + { 57, 17, 5, 71, 102, 57, 53, 41, 49 }, + { 38, 33, 13, 121, 57, 73, 26, 1, 85 }, + { 41, 10, 67, 138, 77, 110, 90, 47, 114 }, + { 115, 21, 2, 10, 102, 255, 166, 23, 6 }, + { 101, 29, 16, 10, 85, 128, 101, 196, 26 }, + { 57, 18, 10, 102, 102, 213, 34, 20, 43 }, + { 117, 20, 15, 36, 163, 128, 68, 1, 26 } }, + { { 102, 61, 71, 37, 34, 53, 31, 243, 192 }, + { 69, 60, 71, 38, 73, 119, 28, 222, 37 }, + { 68, 45, 128, 34, 1, 47, 11, 245, 171 }, + { 62, 17, 19, 70, 146, 85, 55, 62, 70 }, + { 37, 43, 37, 154, 100, 163, 85, 160, 1 }, + { 63, 9, 92, 136, 28, 64, 32, 201, 85 }, + { 75, 15, 9, 9, 64, 255, 184, 119, 16 }, + { 86, 6, 28, 5, 64, 255, 25, 248, 1 }, + { 56, 8, 17, 132, 137, 255, 55, 116, 128 }, + { 58, 15, 20, 82, 135, 57, 26, 121, 40 } }, + { { 164, 50, 31, 137, 154, 133, 25, 35, 218 }, + { 51, 103, 44, 131, 131, 123, 31, 6, 158 }, + { 86, 40, 64, 135, 148, 224, 45, 183, 128 }, + { 22, 26, 17, 131, 240, 154, 14, 1, 209 }, + { 45, 16, 21, 91, 64, 222, 7, 1, 197 }, + { 56, 21, 39, 155, 60, 138, 23, 102, 213 }, + { 83, 12, 13, 54, 192, 255, 68, 47, 28 }, + { 85, 26, 85, 85, 128, 128, 32, 146, 171 }, + { 18, 11, 7, 63, 144, 171, 4, 4, 246 }, + { 35, 27, 10, 146, 174, 171, 12, 26, 128 } }, + { { 190, 80, 35, 99, 180, 80, 126, 54, 45 }, + { 85, 126, 47, 87, 176, 51, 41, 20, 32 }, + { 101, 75, 128, 139, 118, 146, 116, 128, 85 }, + { 56, 41, 15, 176, 236, 85, 37, 9, 62 }, + { 71, 30, 17, 119, 118, 255, 17, 18, 138 }, + { 101, 38, 60, 138, 55, 70, 43, 26, 142 }, + { 146, 36, 19, 30, 171, 255, 97, 27, 20 }, + { 138, 45, 61, 62, 219, 1, 81, 188, 64 }, + { 32, 41, 20, 117, 151, 142, 20, 21, 163 }, + { 112, 19, 12, 61, 195, 128, 48, 4, 24 } } +}; + +void VP8ResetProba(VP8Proba* const proba) { + memset(proba->segments_, 255u, sizeof(proba->segments_)); + // proba->bands_[][] is initialized later +} + +static void ParseIntraMode(VP8BitReader* const br, + VP8Decoder* const dec, int mb_x) { + uint8_t* const top = dec->intra_t_ + 4 * mb_x; + uint8_t* const left = dec->intra_l_; + VP8MBData* const block = dec->mb_data_ + mb_x; + + // Note: we don't save segment map (yet), as we don't expect + // to decode more than 1 keyframe. + if (dec->segment_hdr_.update_map_) { + // Hardcoded tree parsing + block->segment_ = !VP8GetBit(br, dec->proba_.segments_[0], "segments") + ? VP8GetBit(br, dec->proba_.segments_[1], "segments") + : VP8GetBit(br, dec->proba_.segments_[2], "segments") + 2; + } else { + block->segment_ = 0; // default for intra + } + if (dec->use_skip_proba_) block->skip_ = VP8GetBit(br, dec->skip_p_, "skip"); + + block->is_i4x4_ = !VP8GetBit(br, 145, "block-size"); + if (!block->is_i4x4_) { + // Hardcoded 16x16 intra-mode decision tree. + const int ymode = + VP8GetBit(br, 156, "pred-modes") ? + (VP8GetBit(br, 128, "pred-modes") ? TM_PRED : H_PRED) : + (VP8GetBit(br, 163, "pred-modes") ? V_PRED : DC_PRED); + block->imodes_[0] = ymode; + memset(top, ymode, 4 * sizeof(*top)); + memset(left, ymode, 4 * sizeof(*left)); + } else { + uint8_t* modes = block->imodes_; + int y; + for (y = 0; y < 4; ++y) { + int ymode = left[y]; + int x; + for (x = 0; x < 4; ++x) { + const uint8_t* const prob = kBModesProba[top[x]][ymode]; +#if (USE_GENERIC_TREE == 1) + // Generic tree-parsing + int i = kYModesIntra4[VP8GetBit(br, prob[0], "pred-modes")]; + while (i > 0) { + i = kYModesIntra4[2 * i + VP8GetBit(br, prob[i], "pred-modes")]; + } + ymode = -i; +#else + // Hardcoded tree parsing + ymode = !VP8GetBit(br, prob[0], "pred-modes") ? B_DC_PRED : + !VP8GetBit(br, prob[1], "pred-modes") ? B_TM_PRED : + !VP8GetBit(br, prob[2], "pred-modes") ? B_VE_PRED : + !VP8GetBit(br, prob[3], "pred-modes") ? + (!VP8GetBit(br, prob[4], "pred-modes") ? B_HE_PRED : + (!VP8GetBit(br, prob[5], "pred-modes") ? B_RD_PRED + : B_VR_PRED)) : + (!VP8GetBit(br, prob[6], "pred-modes") ? B_LD_PRED : + (!VP8GetBit(br, prob[7], "pred-modes") ? B_VL_PRED : + (!VP8GetBit(br, prob[8], "pred-modes") ? B_HD_PRED + : B_HU_PRED)) + ); +#endif // USE_GENERIC_TREE + top[x] = ymode; + } + memcpy(modes, top, 4 * sizeof(*top)); + modes += 4; + left[y] = ymode; + } + } + // Hardcoded UVMode decision tree + block->uvmode_ = !VP8GetBit(br, 142, "pred-modes-uv") ? DC_PRED + : !VP8GetBit(br, 114, "pred-modes-uv") ? V_PRED + : VP8GetBit(br, 183, "pred-modes-uv") ? TM_PRED : H_PRED; +} + +int VP8ParseIntraModeRow(VP8BitReader* const br, VP8Decoder* const dec) { + int mb_x; + for (mb_x = 0; mb_x < dec->mb_w_; ++mb_x) { + ParseIntraMode(br, dec, mb_x); + } + return !dec->br_.eof_; +} + +//------------------------------------------------------------------------------ +// Paragraph 13 + +static const uint8_t + CoeffsUpdateProba[NUM_TYPES][NUM_BANDS][NUM_CTX][NUM_PROBAS] = { + { { { 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 }, + { 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 }, + { 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 } + }, + { { 176, 246, 255, 255, 255, 255, 255, 255, 255, 255, 255 }, + { 223, 241, 252, 255, 255, 255, 255, 255, 255, 255, 255 }, + { 249, 253, 253, 255, 255, 255, 255, 255, 255, 255, 255 } + }, + { { 255, 244, 252, 255, 255, 255, 255, 255, 255, 255, 255 }, + { 234, 254, 254, 255, 255, 255, 255, 255, 255, 255, 255 }, + { 253, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 } + }, + { { 255, 246, 254, 255, 255, 255, 255, 255, 255, 255, 255 }, + { 239, 253, 254, 255, 255, 255, 255, 255, 255, 255, 255 }, + { 254, 255, 254, 255, 255, 255, 255, 255, 255, 255, 255 } + }, + { { 255, 248, 254, 255, 255, 255, 255, 255, 255, 255, 255 }, + { 251, 255, 254, 255, 255, 255, 255, 255, 255, 255, 255 }, + { 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 } + }, + { { 255, 253, 254, 255, 255, 255, 255, 255, 255, 255, 255 }, + { 251, 254, 254, 255, 255, 255, 255, 255, 255, 255, 255 }, + { 254, 255, 254, 255, 255, 255, 255, 255, 255, 255, 255 } + }, + { { 255, 254, 253, 255, 254, 255, 255, 255, 255, 255, 255 }, + { 250, 255, 254, 255, 254, 255, 255, 255, 255, 255, 255 }, + { 254, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 } + }, + { { 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 }, + { 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 }, + { 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 } + } + }, + { { { 217, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 }, + { 225, 252, 241, 253, 255, 255, 254, 255, 255, 255, 255 }, + { 234, 250, 241, 250, 253, 255, 253, 254, 255, 255, 255 } + }, + { { 255, 254, 255, 255, 255, 255, 255, 255, 255, 255, 255 }, + { 223, 254, 254, 255, 255, 255, 255, 255, 255, 255, 255 }, + { 238, 253, 254, 254, 255, 255, 255, 255, 255, 255, 255 } + }, + { { 255, 248, 254, 255, 255, 255, 255, 255, 255, 255, 255 }, + { 249, 254, 255, 255, 255, 255, 255, 255, 255, 255, 255 }, + { 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 } + }, + { { 255, 253, 255, 255, 255, 255, 255, 255, 255, 255, 255 }, + { 247, 254, 255, 255, 255, 255, 255, 255, 255, 255, 255 }, + { 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 } + }, + { { 255, 253, 254, 255, 255, 255, 255, 255, 255, 255, 255 }, + { 252, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 }, + { 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 } + }, + { { 255, 254, 254, 255, 255, 255, 255, 255, 255, 255, 255 }, + { 253, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 }, + { 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 } + }, + { { 255, 254, 253, 255, 255, 255, 255, 255, 255, 255, 255 }, + { 250, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 }, + { 254, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 } + }, + { { 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 }, + { 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 }, + { 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 } + } + }, + { { { 186, 251, 250, 255, 255, 255, 255, 255, 255, 255, 255 }, + { 234, 251, 244, 254, 255, 255, 255, 255, 255, 255, 255 }, + { 251, 251, 243, 253, 254, 255, 254, 255, 255, 255, 255 } + }, + { { 255, 253, 254, 255, 255, 255, 255, 255, 255, 255, 255 }, + { 236, 253, 254, 255, 255, 255, 255, 255, 255, 255, 255 }, + { 251, 253, 253, 254, 254, 255, 255, 255, 255, 255, 255 } + }, + { { 255, 254, 254, 255, 255, 255, 255, 255, 255, 255, 255 }, + { 254, 254, 254, 255, 255, 255, 255, 255, 255, 255, 255 }, + { 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 } + }, + { { 255, 254, 255, 255, 255, 255, 255, 255, 255, 255, 255 }, + { 254, 254, 255, 255, 255, 255, 255, 255, 255, 255, 255 }, + { 254, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 } + }, + { { 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 }, + { 254, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 }, + { 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 } + }, + { { 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 }, + { 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 }, + { 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 } + }, + { { 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 }, + { 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 }, + { 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 } + }, + { { 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 }, + { 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 }, + { 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 } + } + }, + { { { 248, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 }, + { 250, 254, 252, 254, 255, 255, 255, 255, 255, 255, 255 }, + { 248, 254, 249, 253, 255, 255, 255, 255, 255, 255, 255 } + }, + { { 255, 253, 253, 255, 255, 255, 255, 255, 255, 255, 255 }, + { 246, 253, 253, 255, 255, 255, 255, 255, 255, 255, 255 }, + { 252, 254, 251, 254, 254, 255, 255, 255, 255, 255, 255 } + }, + { { 255, 254, 252, 255, 255, 255, 255, 255, 255, 255, 255 }, + { 248, 254, 253, 255, 255, 255, 255, 255, 255, 255, 255 }, + { 253, 255, 254, 254, 255, 255, 255, 255, 255, 255, 255 } + }, + { { 255, 251, 254, 255, 255, 255, 255, 255, 255, 255, 255 }, + { 245, 251, 254, 255, 255, 255, 255, 255, 255, 255, 255 }, + { 253, 253, 254, 255, 255, 255, 255, 255, 255, 255, 255 } + }, + { { 255, 251, 253, 255, 255, 255, 255, 255, 255, 255, 255 }, + { 252, 253, 254, 255, 255, 255, 255, 255, 255, 255, 255 }, + { 255, 254, 255, 255, 255, 255, 255, 255, 255, 255, 255 } + }, + { { 255, 252, 255, 255, 255, 255, 255, 255, 255, 255, 255 }, + { 249, 255, 254, 255, 255, 255, 255, 255, 255, 255, 255 }, + { 255, 255, 254, 255, 255, 255, 255, 255, 255, 255, 255 } + }, + { { 255, 255, 253, 255, 255, 255, 255, 255, 255, 255, 255 }, + { 250, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 }, + { 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 } + }, + { { 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 }, + { 254, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 }, + { 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 } + } + } +}; + +// Paragraph 9.9 + +static const uint8_t kBands[16 + 1] = { + 0, 1, 2, 3, 6, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6, 7, + 0 // extra entry as sentinel +}; + +void VP8ParseProba(VP8BitReader* const br, VP8Decoder* const dec) { + VP8Proba* const proba = &dec->proba_; + int t, b, c, p; + for (t = 0; t < NUM_TYPES; ++t) { + for (b = 0; b < NUM_BANDS; ++b) { + for (c = 0; c < NUM_CTX; ++c) { + for (p = 0; p < NUM_PROBAS; ++p) { + const int v = + VP8GetBit(br, CoeffsUpdateProba[t][b][c][p], "global-header") ? + VP8GetValue(br, 8, "global-header") : + CoeffsProba0[t][b][c][p]; + proba->bands_[t][b].probas_[c][p] = v; + } + } + } + for (b = 0; b < 16 + 1; ++b) { + proba->bands_ptr_[t][b] = &proba->bands_[t][kBands[b]]; + } + } + dec->use_skip_proba_ = VP8Get(br, "global-header"); + if (dec->use_skip_proba_) { + dec->skip_p_ = VP8GetValue(br, 8, "global-header"); + } +} diff --git a/vendor/github.com/sizeofint/webpanimation/dec_vp8_dec.c b/vendor/github.com/sizeofint/webpanimation/dec_vp8_dec.c new file mode 100644 index 00000000..6db26a78 --- /dev/null +++ b/vendor/github.com/sizeofint/webpanimation/dec_vp8_dec.c @@ -0,0 +1,722 @@ +// Copyright 2010 Google Inc. All Rights Reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the COPYING file in the root of the source +// tree. An additional intellectual property rights grant can be found +// in the file PATENTS. All contributing project authors may +// be found in the AUTHORS file in the root of the source tree. +// ----------------------------------------------------------------------------- +// +// main entry for the decoder +// +// Author: Skal (pascal.massimino@gmail.com) + +#include + +#include "dec_alphai_dec.h" +#include "dec_vp8i_dec.h" +#include "dec_vp8li_dec.h" +#include "dec_webpi_dec.h" +#include "utils_bit_reader_inl_utils.h" +#include "utils_utils.h" + +//------------------------------------------------------------------------------ + +int WebPGetDecoderVersion(void) { + return (DEC_MAJ_VERSION << 16) | (DEC_MIN_VERSION << 8) | DEC_REV_VERSION; +} + +//------------------------------------------------------------------------------ +// Signature and pointer-to-function for GetCoeffs() variants below. + +typedef int (*GetCoeffsFunc)(VP8BitReader* const br, + const VP8BandProbas* const prob[], + int ctx, const quant_t dq, int n, int16_t* out); +static volatile GetCoeffsFunc GetCoeffs = NULL; + +static void InitGetCoeffs(void); + +//------------------------------------------------------------------------------ +// VP8Decoder + +static void SetOk(VP8Decoder* const dec) { + dec->status_ = VP8_STATUS_OK; + dec->error_msg_ = "OK"; +} + +int VP8InitIoInternal(VP8Io* const io, int version) { + if (WEBP_ABI_IS_INCOMPATIBLE(version, WEBP_DECODER_ABI_VERSION)) { + return 0; // mismatch error + } + if (io != NULL) { + memset(io, 0, sizeof(*io)); + } + return 1; +} + +VP8Decoder* VP8New(void) { + VP8Decoder* const dec = (VP8Decoder*)WebPSafeCalloc(1ULL, sizeof(*dec)); + if (dec != NULL) { + SetOk(dec); + WebPGetWorkerInterface()->Init(&dec->worker_); + dec->ready_ = 0; + dec->num_parts_minus_one_ = 0; + InitGetCoeffs(); + } + return dec; +} + +VP8StatusCode VP8Status(VP8Decoder* const dec) { + if (!dec) return VP8_STATUS_INVALID_PARAM; + return dec->status_; +} + +const char* VP8StatusMessage(VP8Decoder* const dec) { + if (dec == NULL) return "no object"; + if (!dec->error_msg_) return "OK"; + return dec->error_msg_; +} + +void VP8Delete(VP8Decoder* const dec) { + if (dec != NULL) { + VP8Clear(dec); + WebPSafeFree(dec); + } +} + +int VP8SetError(VP8Decoder* const dec, + VP8StatusCode error, const char* const msg) { + // The oldest error reported takes precedence over the new one. + if (dec->status_ == VP8_STATUS_OK) { + dec->status_ = error; + dec->error_msg_ = msg; + dec->ready_ = 0; + } + return 0; +} + +//------------------------------------------------------------------------------ + +int VP8CheckSignature(const uint8_t* const data, size_t data_size) { + return (data_size >= 3 && + data[0] == 0x9d && data[1] == 0x01 && data[2] == 0x2a); +} + +int VP8GetInfo(const uint8_t* data, size_t data_size, size_t chunk_size, + int* const width, int* const height) { + if (data == NULL || data_size < VP8_FRAME_HEADER_SIZE) { + return 0; // not enough data + } + // check signature + if (!VP8CheckSignature(data + 3, data_size - 3)) { + return 0; // Wrong signature. + } else { + const uint32_t bits = data[0] | (data[1] << 8) | (data[2] << 16); + const int key_frame = !(bits & 1); + const int w = ((data[7] << 8) | data[6]) & 0x3fff; + const int h = ((data[9] << 8) | data[8]) & 0x3fff; + + if (!key_frame) { // Not a keyframe. + return 0; + } + + if (((bits >> 1) & 7) > 3) { + return 0; // unknown profile + } + if (!((bits >> 4) & 1)) { + return 0; // first frame is invisible! + } + if (((bits >> 5)) >= chunk_size) { // partition_length + return 0; // inconsistent size information. + } + if (w == 0 || h == 0) { + return 0; // We don't support both width and height to be zero. + } + + if (width) { + *width = w; + } + if (height) { + *height = h; + } + + return 1; + } +} + +//------------------------------------------------------------------------------ +// Header parsing + +static void ResetSegmentHeader(VP8SegmentHeader* const hdr) { + assert(hdr != NULL); + hdr->use_segment_ = 0; + hdr->update_map_ = 0; + hdr->absolute_delta_ = 1; + memset(hdr->quantizer_, 0, sizeof(hdr->quantizer_)); + memset(hdr->filter_strength_, 0, sizeof(hdr->filter_strength_)); +} + +// Paragraph 9.3 +static int ParseSegmentHeader(VP8BitReader* br, + VP8SegmentHeader* hdr, VP8Proba* proba) { + assert(br != NULL); + assert(hdr != NULL); + hdr->use_segment_ = VP8Get(br, "global-header"); + if (hdr->use_segment_) { + hdr->update_map_ = VP8Get(br, "global-header"); + if (VP8Get(br, "global-header")) { // update data + int s; + hdr->absolute_delta_ = VP8Get(br, "global-header"); + for (s = 0; s < NUM_MB_SEGMENTS; ++s) { + hdr->quantizer_[s] = VP8Get(br, "global-header") ? + VP8GetSignedValue(br, 7, "global-header") : 0; + } + for (s = 0; s < NUM_MB_SEGMENTS; ++s) { + hdr->filter_strength_[s] = VP8Get(br, "global-header") ? + VP8GetSignedValue(br, 6, "global-header") : 0; + } + } + if (hdr->update_map_) { + int s; + for (s = 0; s < MB_FEATURE_TREE_PROBS; ++s) { + proba->segments_[s] = VP8Get(br, "global-header") ? + VP8GetValue(br, 8, "global-header") : 255u; + } + } + } else { + hdr->update_map_ = 0; + } + return !br->eof_; +} + +// Paragraph 9.5 +// This function returns VP8_STATUS_SUSPENDED if we don't have all the +// necessary data in 'buf'. +// This case is not necessarily an error (for incremental decoding). +// Still, no bitreader is ever initialized to make it possible to read +// unavailable memory. +// If we don't even have the partitions' sizes, than VP8_STATUS_NOT_ENOUGH_DATA +// is returned, and this is an unrecoverable error. +// If the partitions were positioned ok, VP8_STATUS_OK is returned. +static VP8StatusCode ParsePartitions(VP8Decoder* const dec, + const uint8_t* buf, size_t size) { + VP8BitReader* const br = &dec->br_; + const uint8_t* sz = buf; + const uint8_t* buf_end = buf + size; + const uint8_t* part_start; + size_t size_left = size; + size_t last_part; + size_t p; + + dec->num_parts_minus_one_ = (1 << VP8GetValue(br, 2, "global-header")) - 1; + last_part = dec->num_parts_minus_one_; + if (size < 3 * last_part) { + // we can't even read the sizes with sz[]! That's a failure. + return VP8_STATUS_NOT_ENOUGH_DATA; + } + part_start = buf + last_part * 3; + size_left -= last_part * 3; + for (p = 0; p < last_part; ++p) { + size_t psize = sz[0] | (sz[1] << 8) | (sz[2] << 16); + if (psize > size_left) psize = size_left; + VP8InitBitReader(dec->parts_ + p, part_start, psize); + part_start += psize; + size_left -= psize; + sz += 3; + } + VP8InitBitReader(dec->parts_ + last_part, part_start, size_left); + return (part_start < buf_end) ? VP8_STATUS_OK : + VP8_STATUS_SUSPENDED; // Init is ok, but there's not enough data +} + +// Paragraph 9.4 +static int ParseFilterHeader(VP8BitReader* br, VP8Decoder* const dec) { + VP8FilterHeader* const hdr = &dec->filter_hdr_; + hdr->simple_ = VP8Get(br, "global-header"); + hdr->level_ = VP8GetValue(br, 6, "global-header"); + hdr->sharpness_ = VP8GetValue(br, 3, "global-header"); + hdr->use_lf_delta_ = VP8Get(br, "global-header"); + if (hdr->use_lf_delta_) { + if (VP8Get(br, "global-header")) { // update lf-delta? + int i; + for (i = 0; i < NUM_REF_LF_DELTAS; ++i) { + if (VP8Get(br, "global-header")) { + hdr->ref_lf_delta_[i] = VP8GetSignedValue(br, 6, "global-header"); + } + } + for (i = 0; i < NUM_MODE_LF_DELTAS; ++i) { + if (VP8Get(br, "global-header")) { + hdr->mode_lf_delta_[i] = VP8GetSignedValue(br, 6, "global-header"); + } + } + } + } + dec->filter_type_ = (hdr->level_ == 0) ? 0 : hdr->simple_ ? 1 : 2; + return !br->eof_; +} + +// Topmost call +int VP8GetHeaders(VP8Decoder* const dec, VP8Io* const io) { + const uint8_t* buf; + size_t buf_size; + VP8FrameHeader* frm_hdr; + VP8PictureHeader* pic_hdr; + VP8BitReader* br; + VP8StatusCode status; + + if (dec == NULL) { + return 0; + } + SetOk(dec); + if (io == NULL) { + return VP8SetError(dec, VP8_STATUS_INVALID_PARAM, + "null VP8Io passed to VP8GetHeaders()"); + } + buf = io->data; + buf_size = io->data_size; + if (buf_size < 4) { + return VP8SetError(dec, VP8_STATUS_NOT_ENOUGH_DATA, + "Truncated header."); + } + + // Paragraph 9.1 + { + const uint32_t bits = buf[0] | (buf[1] << 8) | (buf[2] << 16); + frm_hdr = &dec->frm_hdr_; + frm_hdr->key_frame_ = !(bits & 1); + frm_hdr->profile_ = (bits >> 1) & 7; + frm_hdr->show_ = (bits >> 4) & 1; + frm_hdr->partition_length_ = (bits >> 5); + if (frm_hdr->profile_ > 3) { + return VP8SetError(dec, VP8_STATUS_BITSTREAM_ERROR, + "Incorrect keyframe parameters."); + } + if (!frm_hdr->show_) { + return VP8SetError(dec, VP8_STATUS_UNSUPPORTED_FEATURE, + "Frame not displayable."); + } + buf += 3; + buf_size -= 3; + } + + pic_hdr = &dec->pic_hdr_; + if (frm_hdr->key_frame_) { + // Paragraph 9.2 + if (buf_size < 7) { + return VP8SetError(dec, VP8_STATUS_NOT_ENOUGH_DATA, + "cannot parse picture header"); + } + if (!VP8CheckSignature(buf, buf_size)) { + return VP8SetError(dec, VP8_STATUS_BITSTREAM_ERROR, + "Bad code word"); + } + pic_hdr->width_ = ((buf[4] << 8) | buf[3]) & 0x3fff; + pic_hdr->xscale_ = buf[4] >> 6; // ratio: 1, 5/4 5/3 or 2 + pic_hdr->height_ = ((buf[6] << 8) | buf[5]) & 0x3fff; + pic_hdr->yscale_ = buf[6] >> 6; + buf += 7; + buf_size -= 7; + + dec->mb_w_ = (pic_hdr->width_ + 15) >> 4; + dec->mb_h_ = (pic_hdr->height_ + 15) >> 4; + + // Setup default output area (can be later modified during io->setup()) + io->width = pic_hdr->width_; + io->height = pic_hdr->height_; + // IMPORTANT! use some sane dimensions in crop_* and scaled_* fields. + // So they can be used interchangeably without always testing for + // 'use_cropping'. + io->use_cropping = 0; + io->crop_top = 0; + io->crop_left = 0; + io->crop_right = io->width; + io->crop_bottom = io->height; + io->use_scaling = 0; + io->scaled_width = io->width; + io->scaled_height = io->height; + + io->mb_w = io->width; // sanity check + io->mb_h = io->height; // ditto + + VP8ResetProba(&dec->proba_); + ResetSegmentHeader(&dec->segment_hdr_); + } + + // Check if we have all the partition #0 available, and initialize dec->br_ + // to read this partition (and this partition only). + if (frm_hdr->partition_length_ > buf_size) { + return VP8SetError(dec, VP8_STATUS_NOT_ENOUGH_DATA, + "bad partition length"); + } + + br = &dec->br_; + VP8InitBitReader(br, buf, frm_hdr->partition_length_); + buf += frm_hdr->partition_length_; + buf_size -= frm_hdr->partition_length_; + + if (frm_hdr->key_frame_) { + pic_hdr->colorspace_ = VP8Get(br, "global-header"); + pic_hdr->clamp_type_ = VP8Get(br, "global-header"); + } + if (!ParseSegmentHeader(br, &dec->segment_hdr_, &dec->proba_)) { + return VP8SetError(dec, VP8_STATUS_BITSTREAM_ERROR, + "cannot parse segment header"); + } + // Filter specs + if (!ParseFilterHeader(br, dec)) { + return VP8SetError(dec, VP8_STATUS_BITSTREAM_ERROR, + "cannot parse filter header"); + } + status = ParsePartitions(dec, buf, buf_size); + if (status != VP8_STATUS_OK) { + return VP8SetError(dec, status, "cannot parse partitions"); + } + + // quantizer change + VP8ParseQuant(dec); + + // Frame buffer marking + if (!frm_hdr->key_frame_) { + return VP8SetError(dec, VP8_STATUS_UNSUPPORTED_FEATURE, + "Not a key frame."); + } + + VP8Get(br, "global-header"); // ignore the value of update_proba_ + + VP8ParseProba(br, dec); + + // sanitized state + dec->ready_ = 1; + return 1; +} + +//------------------------------------------------------------------------------ +// Residual decoding (Paragraph 13.2 / 13.3) + +static const uint8_t kCat3[] = { 173, 148, 140, 0 }; +static const uint8_t kCat4[] = { 176, 155, 140, 135, 0 }; +static const uint8_t kCat5[] = { 180, 157, 141, 134, 130, 0 }; +static const uint8_t kCat6[] = + { 254, 254, 243, 230, 196, 177, 153, 140, 133, 130, 129, 0 }; +static const uint8_t* const kCat3456[] = { kCat3, kCat4, kCat5, kCat6 }; +static const uint8_t kZigzag[16] = { + 0, 1, 4, 8, 5, 2, 3, 6, 9, 12, 13, 10, 7, 11, 14, 15 +}; + +// See section 13-2: http://tools.ietf.org/html/rfc6386#section-13.2 +static int GetLargeValue(VP8BitReader* const br, const uint8_t* const p) { + int v; + if (!VP8GetBit(br, p[3], "coeffs")) { + if (!VP8GetBit(br, p[4], "coeffs")) { + v = 2; + } else { + v = 3 + VP8GetBit(br, p[5], "coeffs"); + } + } else { + if (!VP8GetBit(br, p[6], "coeffs")) { + if (!VP8GetBit(br, p[7], "coeffs")) { + v = 5 + VP8GetBit(br, 159, "coeffs"); + } else { + v = 7 + 2 * VP8GetBit(br, 165, "coeffs"); + v += VP8GetBit(br, 145, "coeffs"); + } + } else { + const uint8_t* tab; + const int bit1 = VP8GetBit(br, p[8], "coeffs"); + const int bit0 = VP8GetBit(br, p[9 + bit1], "coeffs"); + const int cat = 2 * bit1 + bit0; + v = 0; + for (tab = kCat3456[cat]; *tab; ++tab) { + v += v + VP8GetBit(br, *tab, "coeffs"); + } + v += 3 + (8 << cat); + } + } + return v; +} + +// Returns the position of the last non-zero coeff plus one +static int GetCoeffsFast(VP8BitReader* const br, + const VP8BandProbas* const prob[], + int ctx, const quant_t dq, int n, int16_t* out) { + const uint8_t* p = prob[n]->probas_[ctx]; + for (; n < 16; ++n) { + if (!VP8GetBit(br, p[0], "coeffs")) { + return n; // previous coeff was last non-zero coeff + } + while (!VP8GetBit(br, p[1], "coeffs")) { // sequence of zero coeffs + p = prob[++n]->probas_[0]; + if (n == 16) return 16; + } + { // non zero coeff + const VP8ProbaArray* const p_ctx = &prob[n + 1]->probas_[0]; + int v; + if (!VP8GetBit(br, p[2], "coeffs")) { + v = 1; + p = p_ctx[1]; + } else { + v = GetLargeValue(br, p); + p = p_ctx[2]; + } + out[kZigzag[n]] = VP8GetSigned(br, v, "coeffs") * dq[n > 0]; + } + } + return 16; +} + +// This version of GetCoeffs() uses VP8GetBitAlt() which is an alternate version +// of VP8GetBitAlt() targeting specific platforms. +static int GetCoeffsAlt(VP8BitReader* const br, + const VP8BandProbas* const prob[], + int ctx, const quant_t dq, int n, int16_t* out) { + const uint8_t* p = prob[n]->probas_[ctx]; + for (; n < 16; ++n) { + if (!VP8GetBitAlt(br, p[0], "coeffs")) { + return n; // previous coeff was last non-zero coeff + } + while (!VP8GetBitAlt(br, p[1], "coeffs")) { // sequence of zero coeffs + p = prob[++n]->probas_[0]; + if (n == 16) return 16; + } + { // non zero coeff + const VP8ProbaArray* const p_ctx = &prob[n + 1]->probas_[0]; + int v; + if (!VP8GetBitAlt(br, p[2], "coeffs")) { + v = 1; + p = p_ctx[1]; + } else { + v = GetLargeValue(br, p); + p = p_ctx[2]; + } + out[kZigzag[n]] = VP8GetSigned(br, v, "coeffs") * dq[n > 0]; + } + } + return 16; +} + +WEBP_DSP_INIT_FUNC(InitGetCoeffs) { + if (VP8GetCPUInfo != NULL && VP8GetCPUInfo(kSlowSSSE3)) { + GetCoeffs = GetCoeffsAlt; + } else { + GetCoeffs = GetCoeffsFast; + } +} + +static WEBP_INLINE uint32_t NzCodeBits(uint32_t nz_coeffs, int nz, int dc_nz) { + nz_coeffs <<= 2; + nz_coeffs |= (nz > 3) ? 3 : (nz > 1) ? 2 : dc_nz; + return nz_coeffs; +} + +static int ParseResiduals(VP8Decoder* const dec, + VP8MB* const mb, VP8BitReader* const token_br) { + const VP8BandProbas* (* const bands)[16 + 1] = dec->proba_.bands_ptr_; + const VP8BandProbas* const * ac_proba; + VP8MBData* const block = dec->mb_data_ + dec->mb_x_; + const VP8QuantMatrix* const q = &dec->dqm_[block->segment_]; + int16_t* dst = block->coeffs_; + VP8MB* const left_mb = dec->mb_info_ - 1; + uint8_t tnz, lnz; + uint32_t non_zero_y = 0; + uint32_t non_zero_uv = 0; + int x, y, ch; + uint32_t out_t_nz, out_l_nz; + int first; + + memset(dst, 0, 384 * sizeof(*dst)); + if (!block->is_i4x4_) { // parse DC + int16_t dc[16] = { 0 }; + const int ctx = mb->nz_dc_ + left_mb->nz_dc_; + const int nz = GetCoeffs(token_br, bands[1], ctx, q->y2_mat_, 0, dc); + mb->nz_dc_ = left_mb->nz_dc_ = (nz > 0); + if (nz > 1) { // more than just the DC -> perform the full transform + VP8TransformWHT(dc, dst); + } else { // only DC is non-zero -> inlined simplified transform + int i; + const int dc0 = (dc[0] + 3) >> 3; + for (i = 0; i < 16 * 16; i += 16) dst[i] = dc0; + } + first = 1; + ac_proba = bands[0]; + } else { + first = 0; + ac_proba = bands[3]; + } + + tnz = mb->nz_ & 0x0f; + lnz = left_mb->nz_ & 0x0f; + for (y = 0; y < 4; ++y) { + int l = lnz & 1; + uint32_t nz_coeffs = 0; + for (x = 0; x < 4; ++x) { + const int ctx = l + (tnz & 1); + const int nz = GetCoeffs(token_br, ac_proba, ctx, q->y1_mat_, first, dst); + l = (nz > first); + tnz = (tnz >> 1) | (l << 7); + nz_coeffs = NzCodeBits(nz_coeffs, nz, dst[0] != 0); + dst += 16; + } + tnz >>= 4; + lnz = (lnz >> 1) | (l << 7); + non_zero_y = (non_zero_y << 8) | nz_coeffs; + } + out_t_nz = tnz; + out_l_nz = lnz >> 4; + + for (ch = 0; ch < 4; ch += 2) { + uint32_t nz_coeffs = 0; + tnz = mb->nz_ >> (4 + ch); + lnz = left_mb->nz_ >> (4 + ch); + for (y = 0; y < 2; ++y) { + int l = lnz & 1; + for (x = 0; x < 2; ++x) { + const int ctx = l + (tnz & 1); + const int nz = GetCoeffs(token_br, bands[2], ctx, q->uv_mat_, 0, dst); + l = (nz > 0); + tnz = (tnz >> 1) | (l << 3); + nz_coeffs = NzCodeBits(nz_coeffs, nz, dst[0] != 0); + dst += 16; + } + tnz >>= 2; + lnz = (lnz >> 1) | (l << 5); + } + // Note: we don't really need the per-4x4 details for U/V blocks. + non_zero_uv |= nz_coeffs << (4 * ch); + out_t_nz |= (tnz << 4) << ch; + out_l_nz |= (lnz & 0xf0) << ch; + } + mb->nz_ = out_t_nz; + left_mb->nz_ = out_l_nz; + + block->non_zero_y_ = non_zero_y; + block->non_zero_uv_ = non_zero_uv; + + // We look at the mode-code of each block and check if some blocks have less + // than three non-zero coeffs (code < 2). This is to avoid dithering flat and + // empty blocks. + block->dither_ = (non_zero_uv & 0xaaaa) ? 0 : q->dither_; + + return !(non_zero_y | non_zero_uv); // will be used for further optimization +} + +//------------------------------------------------------------------------------ +// Main loop + +int VP8DecodeMB(VP8Decoder* const dec, VP8BitReader* const token_br) { + VP8MB* const left = dec->mb_info_ - 1; + VP8MB* const mb = dec->mb_info_ + dec->mb_x_; + VP8MBData* const block = dec->mb_data_ + dec->mb_x_; + int skip = dec->use_skip_proba_ ? block->skip_ : 0; + + if (!skip) { + skip = ParseResiduals(dec, mb, token_br); + } else { + left->nz_ = mb->nz_ = 0; + if (!block->is_i4x4_) { + left->nz_dc_ = mb->nz_dc_ = 0; + } + block->non_zero_y_ = 0; + block->non_zero_uv_ = 0; + block->dither_ = 0; + } + + if (dec->filter_type_ > 0) { // store filter info + VP8FInfo* const finfo = dec->f_info_ + dec->mb_x_; + *finfo = dec->fstrengths_[block->segment_][block->is_i4x4_]; + finfo->f_inner_ |= !skip; + } + + return !token_br->eof_; +} + +void VP8InitScanline(VP8Decoder* const dec) { + VP8MB* const left = dec->mb_info_ - 1; + left->nz_ = 0; + left->nz_dc_ = 0; + memset(dec->intra_l_, B_DC_PRED, sizeof(dec->intra_l_)); + dec->mb_x_ = 0; +} + +static int ParseFrame(VP8Decoder* const dec, VP8Io* io) { + for (dec->mb_y_ = 0; dec->mb_y_ < dec->br_mb_y_; ++dec->mb_y_) { + // Parse bitstream for this row. + VP8BitReader* const token_br = + &dec->parts_[dec->mb_y_ & dec->num_parts_minus_one_]; + if (!VP8ParseIntraModeRow(&dec->br_, dec)) { + return VP8SetError(dec, VP8_STATUS_NOT_ENOUGH_DATA, + "Premature end-of-partition0 encountered."); + } + for (; dec->mb_x_ < dec->mb_w_; ++dec->mb_x_) { + if (!VP8DecodeMB(dec, token_br)) { + return VP8SetError(dec, VP8_STATUS_NOT_ENOUGH_DATA, + "Premature end-of-file encountered."); + } + } + VP8InitScanline(dec); // Prepare for next scanline + + // Reconstruct, filter and emit the row. + if (!VP8ProcessRow(dec, io)) { + return VP8SetError(dec, VP8_STATUS_USER_ABORT, "Output aborted."); + } + } + if (dec->mt_method_ > 0) { + if (!WebPGetWorkerInterface()->Sync(&dec->worker_)) return 0; + } + + return 1; +} + +// Main entry point +int VP8Decode(VP8Decoder* const dec, VP8Io* const io) { + int ok = 0; + if (dec == NULL) { + return 0; + } + if (io == NULL) { + return VP8SetError(dec, VP8_STATUS_INVALID_PARAM, + "NULL VP8Io parameter in VP8Decode()."); + } + + if (!dec->ready_) { + if (!VP8GetHeaders(dec, io)) { + return 0; + } + } + assert(dec->ready_); + + // Finish setting up the decoding parameter. Will call io->setup(). + ok = (VP8EnterCritical(dec, io) == VP8_STATUS_OK); + if (ok) { // good to go. + // Will allocate memory and prepare everything. + if (ok) ok = VP8InitFrame(dec, io); + + // Main decoding loop + if (ok) ok = ParseFrame(dec, io); + + // Exit. + ok &= VP8ExitCritical(dec, io); + } + + if (!ok) { + VP8Clear(dec); + return 0; + } + + dec->ready_ = 0; + return ok; +} + +void VP8Clear(VP8Decoder* const dec) { + if (dec == NULL) { + return; + } + WebPGetWorkerInterface()->End(&dec->worker_); + WebPDeallocateAlphaMemory(dec); + WebPSafeFree(dec->mem_); + dec->mem_ = NULL; + dec->mem_size_ = 0; + memset(&dec->br_, 0, sizeof(dec->br_)); + dec->ready_ = 0; +} + +//------------------------------------------------------------------------------ diff --git a/vendor/github.com/sizeofint/webpanimation/dec_vp8_dec.h b/vendor/github.com/sizeofint/webpanimation/dec_vp8_dec.h new file mode 100644 index 00000000..cbd99264 --- /dev/null +++ b/vendor/github.com/sizeofint/webpanimation/dec_vp8_dec.h @@ -0,0 +1,185 @@ +// Copyright 2010 Google Inc. All Rights Reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the COPYING file in the root of the source +// tree. An additional intellectual property rights grant can be found +// in the file PATENTS. All contributing project authors may +// be found in the AUTHORS file in the root of the source tree. +// ----------------------------------------------------------------------------- +// +// Low-level API for VP8 decoder +// +// Author: Skal (pascal.massimino@gmail.com) + +#ifndef WEBP_DEC_VP8_DEC_H_ +#define WEBP_DEC_VP8_DEC_H_ + +#include "webp_decode.h" + +#ifdef __cplusplus +extern "C" { +#endif + +//------------------------------------------------------------------------------ +// Lower-level API +// +// These functions provide fine-grained control of the decoding process. +// The call flow should resemble: +// +// VP8Io io; +// VP8InitIo(&io); +// io.data = data; +// io.data_size = size; +// /* customize io's functions (setup()/put()/teardown()) if needed. */ +// +// VP8Decoder* dec = VP8New(); +// int ok = VP8Decode(dec, &io); +// if (!ok) printf("Error: %s\n", VP8StatusMessage(dec)); +// VP8Delete(dec); +// return ok; + +// Input / Output +typedef struct VP8Io VP8Io; +typedef int (*VP8IoPutHook)(const VP8Io* io); +typedef int (*VP8IoSetupHook)(VP8Io* io); +typedef void (*VP8IoTeardownHook)(const VP8Io* io); + +struct VP8Io { + // set by VP8GetHeaders() + int width, height; // picture dimensions, in pixels (invariable). + // These are the original, uncropped dimensions. + // The actual area passed to put() is stored + // in mb_w / mb_h fields. + + // set before calling put() + int mb_y; // position of the current rows (in pixels) + int mb_w; // number of columns in the sample + int mb_h; // number of rows in the sample + const uint8_t* y, *u, *v; // rows to copy (in yuv420 format) + int y_stride; // row stride for luma + int uv_stride; // row stride for chroma + + void* opaque; // user data + + // called when fresh samples are available. Currently, samples are in + // YUV420 format, and can be up to width x 24 in size (depending on the + // in-loop filtering level, e.g.). Should return false in case of error + // or abort request. The actual size of the area to update is mb_w x mb_h + // in size, taking cropping into account. + VP8IoPutHook put; + + // called just before starting to decode the blocks. + // Must return false in case of setup error, true otherwise. If false is + // returned, teardown() will NOT be called. But if the setup succeeded + // and true is returned, then teardown() will always be called afterward. + VP8IoSetupHook setup; + + // Called just after block decoding is finished (or when an error occurred + // during put()). Is NOT called if setup() failed. + VP8IoTeardownHook teardown; + + // this is a recommendation for the user-side yuv->rgb converter. This flag + // is set when calling setup() hook and can be overwritten by it. It then + // can be taken into consideration during the put() method. + int fancy_upsampling; + + // Input buffer. + size_t data_size; + const uint8_t* data; + + // If true, in-loop filtering will not be performed even if present in the + // bitstream. Switching off filtering may speed up decoding at the expense + // of more visible blocking. Note that output will also be non-compliant + // with the VP8 specifications. + int bypass_filtering; + + // Cropping parameters. + int use_cropping; + int crop_left, crop_right, crop_top, crop_bottom; + + // Scaling parameters. + int use_scaling; + int scaled_width, scaled_height; + + // If non NULL, pointer to the alpha data (if present) corresponding to the + // start of the current row (That is: it is pre-offset by mb_y and takes + // cropping into account). + const uint8_t* a; +}; + +// Internal, version-checked, entry point +int VP8InitIoInternal(VP8Io* const, int); + +// Set the custom IO function pointers and user-data. The setter for IO hooks +// should be called before initiating incremental decoding. Returns true if +// WebPIDecoder object is successfully modified, false otherwise. +int WebPISetIOHooks(WebPIDecoder* const idec, + VP8IoPutHook put, + VP8IoSetupHook setup, + VP8IoTeardownHook teardown, + void* user_data); + +// Main decoding object. This is an opaque structure. +typedef struct VP8Decoder VP8Decoder; + +// Create a new decoder object. +VP8Decoder* VP8New(void); + +// Must be called to make sure 'io' is initialized properly. +// Returns false in case of version mismatch. Upon such failure, no other +// decoding function should be called (VP8Decode, VP8GetHeaders, ...) +static WEBP_INLINE int VP8InitIo(VP8Io* const io) { + return VP8InitIoInternal(io, WEBP_DECODER_ABI_VERSION); +} + +// Decode the VP8 frame header. Returns true if ok. +// Note: 'io->data' must be pointing to the start of the VP8 frame header. +int VP8GetHeaders(VP8Decoder* const dec, VP8Io* const io); + +// Decode a picture. Will call VP8GetHeaders() if it wasn't done already. +// Returns false in case of error. +int VP8Decode(VP8Decoder* const dec, VP8Io* const io); + +// Return current status of the decoder: +VP8StatusCode VP8Status(VP8Decoder* const dec); + +// return readable string corresponding to the last status. +const char* VP8StatusMessage(VP8Decoder* const dec); + +// Resets the decoder in its initial state, reclaiming memory. +// Not a mandatory call between calls to VP8Decode(). +void VP8Clear(VP8Decoder* const dec); + +// Destroy the decoder object. +void VP8Delete(VP8Decoder* const dec); + +//------------------------------------------------------------------------------ +// Miscellaneous VP8/VP8L bitstream probing functions. + +// Returns true if the next 3 bytes in data contain the VP8 signature. +WEBP_EXTERN int VP8CheckSignature(const uint8_t* const data, size_t data_size); + +// Validates the VP8 data-header and retrieves basic header information viz +// width and height. Returns 0 in case of formatting error. *width/*height +// can be passed NULL. +WEBP_EXTERN int VP8GetInfo( + const uint8_t* data, + size_t data_size, // data available so far + size_t chunk_size, // total data size expected in the chunk + int* const width, int* const height); + +// Returns true if the next byte(s) in data is a VP8L signature. +WEBP_EXTERN int VP8LCheckSignature(const uint8_t* const data, size_t size); + +// Validates the VP8L data-header and retrieves basic header information viz +// width, height and alpha. Returns 0 in case of formatting error. +// width/height/has_alpha can be passed NULL. +WEBP_EXTERN int VP8LGetInfo( + const uint8_t* data, size_t data_size, // data available so far + int* const width, int* const height, int* const has_alpha); + +#ifdef __cplusplus +} // extern "C" +#endif + +#endif // WEBP_DEC_VP8_DEC_H_ diff --git a/vendor/github.com/sizeofint/webpanimation/dec_vp8i_dec.h b/vendor/github.com/sizeofint/webpanimation/dec_vp8i_dec.h new file mode 100644 index 00000000..f369817c --- /dev/null +++ b/vendor/github.com/sizeofint/webpanimation/dec_vp8i_dec.h @@ -0,0 +1,319 @@ +// Copyright 2010 Google Inc. All Rights Reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the COPYING file in the root of the source +// tree. An additional intellectual property rights grant can be found +// in the file PATENTS. All contributing project authors may +// be found in the AUTHORS file in the root of the source tree. +// ----------------------------------------------------------------------------- +// +// VP8 decoder: internal header. +// +// Author: Skal (pascal.massimino@gmail.com) + +#ifndef WEBP_DEC_VP8I_DEC_H_ +#define WEBP_DEC_VP8I_DEC_H_ + +#include // for memcpy() +#include "dec_common_dec.h" +#include "dec_vp8li_dec.h" +#include "utils_bit_reader_utils.h" +#include "utils_random_utils.h" +#include "utils_thread_utils.h" +#include "dsp_dsp.h" + +#ifdef __cplusplus +extern "C" { +#endif + +//------------------------------------------------------------------------------ +// Various defines and enums + +// version numbers +#define DEC_MAJ_VERSION 1 +#define DEC_MIN_VERSION 2 +#define DEC_REV_VERSION 0 + +// YUV-cache parameters. Cache is 32-bytes wide (= one cacheline). +// Constraints are: We need to store one 16x16 block of luma samples (y), +// and two 8x8 chroma blocks (u/v). These are better be 16-bytes aligned, +// in order to be SIMD-friendly. We also need to store the top, left and +// top-left samples (from previously decoded blocks), along with four +// extra top-right samples for luma (intra4x4 prediction only). +// One possible layout is, using 32 * (17 + 9) bytes: +// +// .+------ <- only 1 pixel high +// .|yyyyt. +// .|yyyyt. +// .|yyyyt. +// .|yyyy.. +// .+--.+-- <- only 1 pixel high +// .|uu.|vv +// .|uu.|vv +// +// Every character is a 4x4 block, with legend: +// '.' = unused +// 'y' = y-samples 'u' = u-samples 'v' = u-samples +// '|' = left sample, '-' = top sample, '+' = top-left sample +// 't' = extra top-right sample for 4x4 modes +#define YUV_SIZE (BPS * 17 + BPS * 9) +#define Y_OFF (BPS * 1 + 8) +#define U_OFF (Y_OFF + BPS * 16 + BPS) +#define V_OFF (U_OFF + 16) + +// minimal width under which lossy multi-threading is always disabled +#define MIN_WIDTH_FOR_THREADS 512 + +//------------------------------------------------------------------------------ +// Headers + +typedef struct { + uint8_t key_frame_; + uint8_t profile_; + uint8_t show_; + uint32_t partition_length_; +} VP8FrameHeader; + +typedef struct { + uint16_t width_; + uint16_t height_; + uint8_t xscale_; + uint8_t yscale_; + uint8_t colorspace_; // 0 = YCbCr + uint8_t clamp_type_; +} VP8PictureHeader; + +// segment features +typedef struct { + int use_segment_; + int update_map_; // whether to update the segment map or not + int absolute_delta_; // absolute or delta values for quantizer and filter + int8_t quantizer_[NUM_MB_SEGMENTS]; // quantization changes + int8_t filter_strength_[NUM_MB_SEGMENTS]; // filter strength for segments +} VP8SegmentHeader; + +// probas associated to one of the contexts +typedef uint8_t VP8ProbaArray[NUM_PROBAS]; + +typedef struct { // all the probas associated to one band + VP8ProbaArray probas_[NUM_CTX]; +} VP8BandProbas; + +// Struct collecting all frame-persistent probabilities. +typedef struct { + uint8_t segments_[MB_FEATURE_TREE_PROBS]; + // Type: 0:Intra16-AC 1:Intra16-DC 2:Chroma 3:Intra4 + VP8BandProbas bands_[NUM_TYPES][NUM_BANDS]; + const VP8BandProbas* bands_ptr_[NUM_TYPES][16 + 1]; +} VP8Proba; + +// Filter parameters +typedef struct { + int simple_; // 0=complex, 1=simple + int level_; // [0..63] + int sharpness_; // [0..7] + int use_lf_delta_; + int ref_lf_delta_[NUM_REF_LF_DELTAS]; + int mode_lf_delta_[NUM_MODE_LF_DELTAS]; +} VP8FilterHeader; + +//------------------------------------------------------------------------------ +// Informations about the macroblocks. + +typedef struct { // filter specs + uint8_t f_limit_; // filter limit in [3..189], or 0 if no filtering + uint8_t f_ilevel_; // inner limit in [1..63] + uint8_t f_inner_; // do inner filtering? + uint8_t hev_thresh_; // high edge variance threshold in [0..2] +} VP8FInfo; + +typedef struct { // Top/Left Contexts used for syntax-parsing + uint8_t nz_; // non-zero AC/DC coeffs (4bit for luma + 4bit for chroma) + uint8_t nz_dc_; // non-zero DC coeff (1bit) +} VP8MB; + +// Dequantization matrices +typedef int quant_t[2]; // [DC / AC]. Can be 'uint16_t[2]' too (~slower). +typedef struct { + quant_t y1_mat_, y2_mat_, uv_mat_; + + int uv_quant_; // U/V quantizer value + int dither_; // dithering amplitude (0 = off, max=255) +} VP8QuantMatrix; + +// Data needed to reconstruct a macroblock +typedef struct { + int16_t coeffs_[384]; // 384 coeffs = (16+4+4) * 4*4 + uint8_t is_i4x4_; // true if intra4x4 + uint8_t imodes_[16]; // one 16x16 mode (#0) or sixteen 4x4 modes + uint8_t uvmode_; // chroma prediction mode + // bit-wise info about the content of each sub-4x4 blocks (in decoding order). + // Each of the 4x4 blocks for y/u/v is associated with a 2b code according to: + // code=0 -> no coefficient + // code=1 -> only DC + // code=2 -> first three coefficients are non-zero + // code=3 -> more than three coefficients are non-zero + // This allows to call specialized transform functions. + uint32_t non_zero_y_; + uint32_t non_zero_uv_; + uint8_t dither_; // local dithering strength (deduced from non_zero_*) + uint8_t skip_; + uint8_t segment_; +} VP8MBData; + +// Persistent information needed by the parallel processing +typedef struct { + int id_; // cache row to process (in [0..2]) + int mb_y_; // macroblock position of the row + int filter_row_; // true if row-filtering is needed + VP8FInfo* f_info_; // filter strengths (swapped with dec->f_info_) + VP8MBData* mb_data_; // reconstruction data (swapped with dec->mb_data_) + VP8Io io_; // copy of the VP8Io to pass to put() +} VP8ThreadContext; + +// Saved top samples, per macroblock. Fits into a cache-line. +typedef struct { + uint8_t y[16], u[8], v[8]; +} VP8TopSamples; + +//------------------------------------------------------------------------------ +// VP8Decoder: the main opaque structure handed over to user + +struct VP8Decoder { + VP8StatusCode status_; + int ready_; // true if ready to decode a picture with VP8Decode() + const char* error_msg_; // set when status_ is not OK. + + // Main data source + VP8BitReader br_; + + // headers + VP8FrameHeader frm_hdr_; + VP8PictureHeader pic_hdr_; + VP8FilterHeader filter_hdr_; + VP8SegmentHeader segment_hdr_; + + // Worker + WebPWorker worker_; + int mt_method_; // multi-thread method: 0=off, 1=[parse+recon][filter] + // 2=[parse][recon+filter] + int cache_id_; // current cache row + int num_caches_; // number of cached rows of 16 pixels (1, 2 or 3) + VP8ThreadContext thread_ctx_; // Thread context + + // dimension, in macroblock units. + int mb_w_, mb_h_; + + // Macroblock to process/filter, depending on cropping and filter_type. + int tl_mb_x_, tl_mb_y_; // top-left MB that must be in-loop filtered + int br_mb_x_, br_mb_y_; // last bottom-right MB that must be decoded + + // number of partitions minus one. + uint32_t num_parts_minus_one_; + // per-partition boolean decoders. + VP8BitReader parts_[MAX_NUM_PARTITIONS]; + + // Dithering strength, deduced from decoding options + int dither_; // whether to use dithering or not + VP8Random dithering_rg_; // random generator for dithering + + // dequantization (one set of DC/AC dequant factor per segment) + VP8QuantMatrix dqm_[NUM_MB_SEGMENTS]; + + // probabilities + VP8Proba proba_; + int use_skip_proba_; + uint8_t skip_p_; + + // Boundary data cache and persistent buffers. + uint8_t* intra_t_; // top intra modes values: 4 * mb_w_ + uint8_t intra_l_[4]; // left intra modes values + + VP8TopSamples* yuv_t_; // top y/u/v samples + + VP8MB* mb_info_; // contextual macroblock info (mb_w_ + 1) + VP8FInfo* f_info_; // filter strength info + uint8_t* yuv_b_; // main block for Y/U/V (size = YUV_SIZE) + + uint8_t* cache_y_; // macroblock row for storing unfiltered samples + uint8_t* cache_u_; + uint8_t* cache_v_; + int cache_y_stride_; + int cache_uv_stride_; + + // main memory chunk for the above data. Persistent. + void* mem_; + size_t mem_size_; + + // Per macroblock non-persistent infos. + int mb_x_, mb_y_; // current position, in macroblock units + VP8MBData* mb_data_; // parsed reconstruction data + + // Filtering side-info + int filter_type_; // 0=off, 1=simple, 2=complex + VP8FInfo fstrengths_[NUM_MB_SEGMENTS][2]; // precalculated per-segment/type + + // Alpha + struct ALPHDecoder* alph_dec_; // alpha-plane decoder object + const uint8_t* alpha_data_; // compressed alpha data (if present) + size_t alpha_data_size_; + int is_alpha_decoded_; // true if alpha_data_ is decoded in alpha_plane_ + uint8_t* alpha_plane_mem_; // memory allocated for alpha_plane_ + uint8_t* alpha_plane_; // output. Persistent, contains the whole data. + const uint8_t* alpha_prev_line_; // last decoded alpha row (or NULL) + int alpha_dithering_; // derived from decoding options (0=off, 100=full) +}; + +//------------------------------------------------------------------------------ +// internal functions. Not public. + +// in vp8.c +int VP8SetError(VP8Decoder* const dec, + VP8StatusCode error, const char* const msg); + +// in tree.c +void VP8ResetProba(VP8Proba* const proba); +void VP8ParseProba(VP8BitReader* const br, VP8Decoder* const dec); +// parses one row of intra mode data in partition 0, returns !eof +int VP8ParseIntraModeRow(VP8BitReader* const br, VP8Decoder* const dec); + +// in quant.c +void VP8ParseQuant(VP8Decoder* const dec); + +// in frame.c +int VP8InitFrame(VP8Decoder* const dec, VP8Io* const io); +// Call io->setup() and finish setting up scan parameters. +// After this call returns, one must always call VP8ExitCritical() with the +// same parameters. Both functions should be used in pair. Returns VP8_STATUS_OK +// if ok, otherwise sets and returns the error status on *dec. +VP8StatusCode VP8EnterCritical(VP8Decoder* const dec, VP8Io* const io); +// Must always be called in pair with VP8EnterCritical(). +// Returns false in case of error. +int VP8ExitCritical(VP8Decoder* const dec, VP8Io* const io); +// Return the multi-threading method to use (0=off), depending +// on options and bitstream size. Only for lossy decoding. +int VP8GetThreadMethod(const WebPDecoderOptions* const options, + const WebPHeaderStructure* const headers, + int width, int height); +// Initialize dithering post-process if needed. +void VP8InitDithering(const WebPDecoderOptions* const options, + VP8Decoder* const dec); +// Process the last decoded row (filtering + output). +int VP8ProcessRow(VP8Decoder* const dec, VP8Io* const io); +// To be called at the start of a new scanline, to initialize predictors. +void VP8InitScanline(VP8Decoder* const dec); +// Decode one macroblock. Returns false if there is not enough data. +int VP8DecodeMB(VP8Decoder* const dec, VP8BitReader* const token_br); + +// in alpha.c +const uint8_t* VP8DecompressAlphaRows(VP8Decoder* const dec, + const VP8Io* const io, + int row, int num_rows); + +//------------------------------------------------------------------------------ + +#ifdef __cplusplus +} // extern "C" +#endif + +#endif // WEBP_DEC_VP8I_DEC_H_ diff --git a/vendor/github.com/sizeofint/webpanimation/dec_vp8l_dec.c b/vendor/github.com/sizeofint/webpanimation/dec_vp8l_dec.c new file mode 100644 index 00000000..e1a8a11a --- /dev/null +++ b/vendor/github.com/sizeofint/webpanimation/dec_vp8l_dec.c @@ -0,0 +1,1738 @@ +// Copyright 2012 Google Inc. All Rights Reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the COPYING file in the root of the source +// tree. An additional intellectual property rights grant can be found +// in the file PATENTS. All contributing project authors may +// be found in the AUTHORS file in the root of the source tree. +// ----------------------------------------------------------------------------- +// +// main entry for the decoder +// +// Authors: Vikas Arora (vikaas.arora@gmail.com) +// Jyrki Alakuijala (jyrki@google.com) + +#include + +#include "dec_alphai_dec.h" +#include "dec_vp8li_dec.h" +#include "dsp_dsp.h" +#include "dsp_lossless.h" +#include "dsp_lossless_common.h" +#include "dsp_yuv.h" +#include "utils_endian_inl_utils.h" +#include "utils_huffman_utils.h" +#include "utils_utils.h" + +#define NUM_ARGB_CACHE_ROWS 16 + +static const int kCodeLengthLiterals = 16; +static const int kCodeLengthRepeatCode = 16; +static const uint8_t kCodeLengthExtraBits[3] = { 2, 3, 7 }; +static const uint8_t kCodeLengthRepeatOffsets[3] = { 3, 3, 11 }; + +// ----------------------------------------------------------------------------- +// Five Huffman codes are used at each meta code: +// 1. green + length prefix codes + color cache codes, +// 2. alpha, +// 3. red, +// 4. blue, and, +// 5. distance prefix codes. +typedef enum { + GREEN = 0, + RED = 1, + BLUE = 2, + ALPHA = 3, + DIST = 4 +} HuffIndex; + +static const uint16_t kAlphabetSize[HUFFMAN_CODES_PER_META_CODE] = { + NUM_LITERAL_CODES + NUM_LENGTH_CODES, + NUM_LITERAL_CODES, NUM_LITERAL_CODES, NUM_LITERAL_CODES, + NUM_DISTANCE_CODES +}; + +static const uint8_t kLiteralMap[HUFFMAN_CODES_PER_META_CODE] = { + 0, 1, 1, 1, 0 +}; + +#define NUM_CODE_LENGTH_CODES 19 +static const uint8_t kCodeLengthCodeOrder[NUM_CODE_LENGTH_CODES] = { + 17, 18, 0, 1, 2, 3, 4, 5, 16, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 +}; + +#define CODE_TO_PLANE_CODES 120 +static const uint8_t kCodeToPlane[CODE_TO_PLANE_CODES] = { + 0x18, 0x07, 0x17, 0x19, 0x28, 0x06, 0x27, 0x29, 0x16, 0x1a, + 0x26, 0x2a, 0x38, 0x05, 0x37, 0x39, 0x15, 0x1b, 0x36, 0x3a, + 0x25, 0x2b, 0x48, 0x04, 0x47, 0x49, 0x14, 0x1c, 0x35, 0x3b, + 0x46, 0x4a, 0x24, 0x2c, 0x58, 0x45, 0x4b, 0x34, 0x3c, 0x03, + 0x57, 0x59, 0x13, 0x1d, 0x56, 0x5a, 0x23, 0x2d, 0x44, 0x4c, + 0x55, 0x5b, 0x33, 0x3d, 0x68, 0x02, 0x67, 0x69, 0x12, 0x1e, + 0x66, 0x6a, 0x22, 0x2e, 0x54, 0x5c, 0x43, 0x4d, 0x65, 0x6b, + 0x32, 0x3e, 0x78, 0x01, 0x77, 0x79, 0x53, 0x5d, 0x11, 0x1f, + 0x64, 0x6c, 0x42, 0x4e, 0x76, 0x7a, 0x21, 0x2f, 0x75, 0x7b, + 0x31, 0x3f, 0x63, 0x6d, 0x52, 0x5e, 0x00, 0x74, 0x7c, 0x41, + 0x4f, 0x10, 0x20, 0x62, 0x6e, 0x30, 0x73, 0x7d, 0x51, 0x5f, + 0x40, 0x72, 0x7e, 0x61, 0x6f, 0x50, 0x71, 0x7f, 0x60, 0x70 +}; + +// Memory needed for lookup tables of one Huffman tree group. Red, blue, alpha +// and distance alphabets are constant (256 for red, blue and alpha, 40 for +// distance) and lookup table sizes for them in worst case are 630 and 410 +// respectively. Size of green alphabet depends on color cache size and is equal +// to 256 (green component values) + 24 (length prefix values) +// + color_cache_size (between 0 and 2048). +// All values computed for 8-bit first level lookup with Mark Adler's tool: +// http://www.hdfgroup.org/ftp/lib-external/zlib/zlib-1.2.5/examples/enough.c +#define FIXED_TABLE_SIZE (630 * 3 + 410) +static const uint16_t kTableSize[12] = { + FIXED_TABLE_SIZE + 654, + FIXED_TABLE_SIZE + 656, + FIXED_TABLE_SIZE + 658, + FIXED_TABLE_SIZE + 662, + FIXED_TABLE_SIZE + 670, + FIXED_TABLE_SIZE + 686, + FIXED_TABLE_SIZE + 718, + FIXED_TABLE_SIZE + 782, + FIXED_TABLE_SIZE + 912, + FIXED_TABLE_SIZE + 1168, + FIXED_TABLE_SIZE + 1680, + FIXED_TABLE_SIZE + 2704 +}; + +static int DecodeImageStream(int xsize, int ysize, + int is_level0, + VP8LDecoder* const dec, + uint32_t** const decoded_data); + +//------------------------------------------------------------------------------ + +int VP8LCheckSignature(const uint8_t* const data, size_t size) { + return (size >= VP8L_FRAME_HEADER_SIZE && + data[0] == VP8L_MAGIC_BYTE && + (data[4] >> 5) == 0); // version +} + +static int ReadImageInfo(VP8LBitReader* const br, + int* const width, int* const height, + int* const has_alpha) { + if (VP8LReadBits(br, 8) != VP8L_MAGIC_BYTE) return 0; + *width = VP8LReadBits(br, VP8L_IMAGE_SIZE_BITS) + 1; + *height = VP8LReadBits(br, VP8L_IMAGE_SIZE_BITS) + 1; + *has_alpha = VP8LReadBits(br, 1); + if (VP8LReadBits(br, VP8L_VERSION_BITS) != 0) return 0; + return !br->eos_; +} + +int VP8LGetInfo(const uint8_t* data, size_t data_size, + int* const width, int* const height, int* const has_alpha) { + if (data == NULL || data_size < VP8L_FRAME_HEADER_SIZE) { + return 0; // not enough data + } else if (!VP8LCheckSignature(data, data_size)) { + return 0; // bad signature + } else { + int w, h, a; + VP8LBitReader br; + VP8LInitBitReader(&br, data, data_size); + if (!ReadImageInfo(&br, &w, &h, &a)) { + return 0; + } + if (width != NULL) *width = w; + if (height != NULL) *height = h; + if (has_alpha != NULL) *has_alpha = a; + return 1; + } +} + +//------------------------------------------------------------------------------ + +static WEBP_INLINE int GetCopyDistance(int distance_symbol, + VP8LBitReader* const br) { + int extra_bits, offset; + if (distance_symbol < 4) { + return distance_symbol + 1; + } + extra_bits = (distance_symbol - 2) >> 1; + offset = (2 + (distance_symbol & 1)) << extra_bits; + return offset + VP8LReadBits(br, extra_bits) + 1; +} + +static WEBP_INLINE int GetCopyLength(int length_symbol, + VP8LBitReader* const br) { + // Length and distance prefixes are encoded the same way. + return GetCopyDistance(length_symbol, br); +} + +static WEBP_INLINE int PlaneCodeToDistance(int xsize, int plane_code) { + if (plane_code > CODE_TO_PLANE_CODES) { + return plane_code - CODE_TO_PLANE_CODES; + } else { + const int dist_code = kCodeToPlane[plane_code - 1]; + const int yoffset = dist_code >> 4; + const int xoffset = 8 - (dist_code & 0xf); + const int dist = yoffset * xsize + xoffset; + return (dist >= 1) ? dist : 1; // dist<1 can happen if xsize is very small + } +} + +//------------------------------------------------------------------------------ +// Decodes the next Huffman code from bit-stream. +// FillBitWindow(br) needs to be called at minimum every second call +// to ReadSymbol, in order to pre-fetch enough bits. +static WEBP_INLINE int ReadSymbol(const HuffmanCode* table, + VP8LBitReader* const br) { + int nbits; + uint32_t val = VP8LPrefetchBits(br); + table += val & HUFFMAN_TABLE_MASK; + nbits = table->bits - HUFFMAN_TABLE_BITS; + if (nbits > 0) { + VP8LSetBitPos(br, br->bit_pos_ + HUFFMAN_TABLE_BITS); + val = VP8LPrefetchBits(br); + table += table->value; + table += val & ((1 << nbits) - 1); + } + VP8LSetBitPos(br, br->bit_pos_ + table->bits); + return table->value; +} + +// Reads packed symbol depending on GREEN channel +#define BITS_SPECIAL_MARKER 0x100 // something large enough (and a bit-mask) +#define PACKED_NON_LITERAL_CODE 0 // must be < NUM_LITERAL_CODES +static WEBP_INLINE int ReadPackedSymbols(const HTreeGroup* group, + VP8LBitReader* const br, + uint32_t* const dst) { + const uint32_t val = VP8LPrefetchBits(br) & (HUFFMAN_PACKED_TABLE_SIZE - 1); + const HuffmanCode32 code = group->packed_table[val]; + assert(group->use_packed_table); + if (code.bits < BITS_SPECIAL_MARKER) { + VP8LSetBitPos(br, br->bit_pos_ + code.bits); + *dst = code.value; + return PACKED_NON_LITERAL_CODE; + } else { + VP8LSetBitPos(br, br->bit_pos_ + code.bits - BITS_SPECIAL_MARKER); + assert(code.value >= NUM_LITERAL_CODES); + return code.value; + } +} + +static int AccumulateHCode(HuffmanCode hcode, int shift, + HuffmanCode32* const huff) { + huff->bits += hcode.bits; + huff->value |= (uint32_t)hcode.value << shift; + assert(huff->bits <= HUFFMAN_TABLE_BITS); + return hcode.bits; +} + +static void BuildPackedTable(HTreeGroup* const htree_group) { + uint32_t code; + for (code = 0; code < HUFFMAN_PACKED_TABLE_SIZE; ++code) { + uint32_t bits = code; + HuffmanCode32* const huff = &htree_group->packed_table[bits]; + HuffmanCode hcode = htree_group->htrees[GREEN][bits]; + if (hcode.value >= NUM_LITERAL_CODES) { + huff->bits = hcode.bits + BITS_SPECIAL_MARKER; + huff->value = hcode.value; + } else { + huff->bits = 0; + huff->value = 0; + bits >>= AccumulateHCode(hcode, 8, huff); + bits >>= AccumulateHCode(htree_group->htrees[RED][bits], 16, huff); + bits >>= AccumulateHCode(htree_group->htrees[BLUE][bits], 0, huff); + bits >>= AccumulateHCode(htree_group->htrees[ALPHA][bits], 24, huff); + (void)bits; + } + } +} + +static int ReadHuffmanCodeLengths( + VP8LDecoder* const dec, const int* const code_length_code_lengths, + int num_symbols, int* const code_lengths) { + int ok = 0; + VP8LBitReader* const br = &dec->br_; + int symbol; + int max_symbol; + int prev_code_len = DEFAULT_CODE_LENGTH; + HuffmanCode table[1 << LENGTHS_TABLE_BITS]; + + if (!VP8LBuildHuffmanTable(table, LENGTHS_TABLE_BITS, + code_length_code_lengths, + NUM_CODE_LENGTH_CODES)) { + goto End; + } + + if (VP8LReadBits(br, 1)) { // use length + const int length_nbits = 2 + 2 * VP8LReadBits(br, 3); + max_symbol = 2 + VP8LReadBits(br, length_nbits); + if (max_symbol > num_symbols) { + goto End; + } + } else { + max_symbol = num_symbols; + } + + symbol = 0; + while (symbol < num_symbols) { + const HuffmanCode* p; + int code_len; + if (max_symbol-- == 0) break; + VP8LFillBitWindow(br); + p = &table[VP8LPrefetchBits(br) & LENGTHS_TABLE_MASK]; + VP8LSetBitPos(br, br->bit_pos_ + p->bits); + code_len = p->value; + if (code_len < kCodeLengthLiterals) { + code_lengths[symbol++] = code_len; + if (code_len != 0) prev_code_len = code_len; + } else { + const int use_prev = (code_len == kCodeLengthRepeatCode); + const int slot = code_len - kCodeLengthLiterals; + const int extra_bits = kCodeLengthExtraBits[slot]; + const int repeat_offset = kCodeLengthRepeatOffsets[slot]; + int repeat = VP8LReadBits(br, extra_bits) + repeat_offset; + if (symbol + repeat > num_symbols) { + goto End; + } else { + const int length = use_prev ? prev_code_len : 0; + while (repeat-- > 0) code_lengths[symbol++] = length; + } + } + } + ok = 1; + + End: + if (!ok) dec->status_ = VP8_STATUS_BITSTREAM_ERROR; + return ok; +} + +// 'code_lengths' is pre-allocated temporary buffer, used for creating Huffman +// tree. +static int ReadHuffmanCode(int alphabet_size, VP8LDecoder* const dec, + int* const code_lengths, HuffmanCode* const table) { + int ok = 0; + int size = 0; + VP8LBitReader* const br = &dec->br_; + const int simple_code = VP8LReadBits(br, 1); + + memset(code_lengths, 0, alphabet_size * sizeof(*code_lengths)); + + if (simple_code) { // Read symbols, codes & code lengths directly. + const int num_symbols = VP8LReadBits(br, 1) + 1; + const int first_symbol_len_code = VP8LReadBits(br, 1); + // The first code is either 1 bit or 8 bit code. + int symbol = VP8LReadBits(br, (first_symbol_len_code == 0) ? 1 : 8); + code_lengths[symbol] = 1; + // The second code (if present), is always 8 bit long. + if (num_symbols == 2) { + symbol = VP8LReadBits(br, 8); + code_lengths[symbol] = 1; + } + ok = 1; + } else { // Decode Huffman-coded code lengths. + int i; + int code_length_code_lengths[NUM_CODE_LENGTH_CODES] = { 0 }; + const int num_codes = VP8LReadBits(br, 4) + 4; + if (num_codes > NUM_CODE_LENGTH_CODES) { + dec->status_ = VP8_STATUS_BITSTREAM_ERROR; + return 0; + } + + for (i = 0; i < num_codes; ++i) { + code_length_code_lengths[kCodeLengthCodeOrder[i]] = VP8LReadBits(br, 3); + } + ok = ReadHuffmanCodeLengths(dec, code_length_code_lengths, alphabet_size, + code_lengths); + } + + ok = ok && !br->eos_; + if (ok) { + size = VP8LBuildHuffmanTable(table, HUFFMAN_TABLE_BITS, + code_lengths, alphabet_size); + } + if (!ok || size == 0) { + dec->status_ = VP8_STATUS_BITSTREAM_ERROR; + return 0; + } + return size; +} + +static int ReadHuffmanCodes(VP8LDecoder* const dec, int xsize, int ysize, + int color_cache_bits, int allow_recursion) { + int i, j; + VP8LBitReader* const br = &dec->br_; + VP8LMetadata* const hdr = &dec->hdr_; + uint32_t* huffman_image = NULL; + HTreeGroup* htree_groups = NULL; + HuffmanCode* huffman_tables = NULL; + HuffmanCode* huffman_table = NULL; + int num_htree_groups = 1; + int num_htree_groups_max = 1; + int max_alphabet_size = 0; + int* code_lengths = NULL; + const int table_size = kTableSize[color_cache_bits]; + int* mapping = NULL; + int ok = 0; + + if (allow_recursion && VP8LReadBits(br, 1)) { + // use meta Huffman codes. + const int huffman_precision = VP8LReadBits(br, 3) + 2; + const int huffman_xsize = VP8LSubSampleSize(xsize, huffman_precision); + const int huffman_ysize = VP8LSubSampleSize(ysize, huffman_precision); + const int huffman_pixs = huffman_xsize * huffman_ysize; + if (!DecodeImageStream(huffman_xsize, huffman_ysize, 0, dec, + &huffman_image)) { + goto Error; + } + hdr->huffman_subsample_bits_ = huffman_precision; + for (i = 0; i < huffman_pixs; ++i) { + // The huffman data is stored in red and green bytes. + const int group = (huffman_image[i] >> 8) & 0xffff; + huffman_image[i] = group; + if (group >= num_htree_groups_max) { + num_htree_groups_max = group + 1; + } + } + // Check the validity of num_htree_groups_max. If it seems too big, use a + // smaller value for later. This will prevent big memory allocations to end + // up with a bad bitstream anyway. + // The value of 1000 is totally arbitrary. We know that num_htree_groups_max + // is smaller than (1 << 16) and should be smaller than the number of pixels + // (though the format allows it to be bigger). + if (num_htree_groups_max > 1000 || num_htree_groups_max > xsize * ysize) { + // Create a mapping from the used indices to the minimal set of used + // values [0, num_htree_groups) + mapping = (int*)WebPSafeMalloc(num_htree_groups_max, sizeof(*mapping)); + if (mapping == NULL) { + dec->status_ = VP8_STATUS_OUT_OF_MEMORY; + goto Error; + } + // -1 means a value is unmapped, and therefore unused in the Huffman + // image. + memset(mapping, 0xff, num_htree_groups_max * sizeof(*mapping)); + for (num_htree_groups = 0, i = 0; i < huffman_pixs; ++i) { + // Get the current mapping for the group and remap the Huffman image. + int* const mapped_group = &mapping[huffman_image[i]]; + if (*mapped_group == -1) *mapped_group = num_htree_groups++; + huffman_image[i] = *mapped_group; + } + } else { + num_htree_groups = num_htree_groups_max; + } + } + + if (br->eos_) goto Error; + + // Find maximum alphabet size for the htree group. + for (j = 0; j < HUFFMAN_CODES_PER_META_CODE; ++j) { + int alphabet_size = kAlphabetSize[j]; + if (j == 0 && color_cache_bits > 0) { + alphabet_size += 1 << color_cache_bits; + } + if (max_alphabet_size < alphabet_size) { + max_alphabet_size = alphabet_size; + } + } + + code_lengths = (int*)WebPSafeCalloc((uint64_t)max_alphabet_size, + sizeof(*code_lengths)); + huffman_tables = (HuffmanCode*)WebPSafeMalloc(num_htree_groups * table_size, + sizeof(*huffman_tables)); + htree_groups = VP8LHtreeGroupsNew(num_htree_groups); + + if (htree_groups == NULL || code_lengths == NULL || huffman_tables == NULL) { + dec->status_ = VP8_STATUS_OUT_OF_MEMORY; + goto Error; + } + + huffman_table = huffman_tables; + for (i = 0; i < num_htree_groups_max; ++i) { + // If the index "i" is unused in the Huffman image, just make sure the + // coefficients are valid but do not store them. + if (mapping != NULL && mapping[i] == -1) { + for (j = 0; j < HUFFMAN_CODES_PER_META_CODE; ++j) { + int alphabet_size = kAlphabetSize[j]; + if (j == 0 && color_cache_bits > 0) { + alphabet_size += (1 << color_cache_bits); + } + // Passing in NULL so that nothing gets filled. + if (!ReadHuffmanCode(alphabet_size, dec, code_lengths, NULL)) { + goto Error; + } + } + } else { + HTreeGroup* const htree_group = + &htree_groups[(mapping == NULL) ? i : mapping[i]]; + HuffmanCode** const htrees = htree_group->htrees; + int size; + int total_size = 0; + int is_trivial_literal = 1; + int max_bits = 0; + for (j = 0; j < HUFFMAN_CODES_PER_META_CODE; ++j) { + int alphabet_size = kAlphabetSize[j]; + htrees[j] = huffman_table; + if (j == 0 && color_cache_bits > 0) { + alphabet_size += (1 << color_cache_bits); + } + size = ReadHuffmanCode(alphabet_size, dec, code_lengths, huffman_table); + if (size == 0) { + goto Error; + } + if (is_trivial_literal && kLiteralMap[j] == 1) { + is_trivial_literal = (huffman_table->bits == 0); + } + total_size += huffman_table->bits; + huffman_table += size; + if (j <= ALPHA) { + int local_max_bits = code_lengths[0]; + int k; + for (k = 1; k < alphabet_size; ++k) { + if (code_lengths[k] > local_max_bits) { + local_max_bits = code_lengths[k]; + } + } + max_bits += local_max_bits; + } + } + htree_group->is_trivial_literal = is_trivial_literal; + htree_group->is_trivial_code = 0; + if (is_trivial_literal) { + const int red = htrees[RED][0].value; + const int blue = htrees[BLUE][0].value; + const int alpha = htrees[ALPHA][0].value; + htree_group->literal_arb = ((uint32_t)alpha << 24) | (red << 16) | blue; + if (total_size == 0 && htrees[GREEN][0].value < NUM_LITERAL_CODES) { + htree_group->is_trivial_code = 1; + htree_group->literal_arb |= htrees[GREEN][0].value << 8; + } + } + htree_group->use_packed_table = + !htree_group->is_trivial_code && (max_bits < HUFFMAN_PACKED_BITS); + if (htree_group->use_packed_table) BuildPackedTable(htree_group); + } + } + ok = 1; + + // All OK. Finalize pointers. + hdr->huffman_image_ = huffman_image; + hdr->num_htree_groups_ = num_htree_groups; + hdr->htree_groups_ = htree_groups; + hdr->huffman_tables_ = huffman_tables; + + Error: + WebPSafeFree(code_lengths); + WebPSafeFree(mapping); + if (!ok) { + WebPSafeFree(huffman_image); + WebPSafeFree(huffman_tables); + VP8LHtreeGroupsFree(htree_groups); + } + return ok; +} + +//------------------------------------------------------------------------------ +// Scaling. + +#if !defined(WEBP_REDUCE_SIZE) +static int AllocateAndInitRescaler(VP8LDecoder* const dec, VP8Io* const io) { + const int num_channels = 4; + const int in_width = io->mb_w; + const int out_width = io->scaled_width; + const int in_height = io->mb_h; + const int out_height = io->scaled_height; + const uint64_t work_size = 2 * num_channels * (uint64_t)out_width; + rescaler_t* work; // Rescaler work area. + const uint64_t scaled_data_size = (uint64_t)out_width; + uint32_t* scaled_data; // Temporary storage for scaled BGRA data. + const uint64_t memory_size = sizeof(*dec->rescaler) + + work_size * sizeof(*work) + + scaled_data_size * sizeof(*scaled_data); + uint8_t* memory = (uint8_t*)WebPSafeMalloc(memory_size, sizeof(*memory)); + if (memory == NULL) { + dec->status_ = VP8_STATUS_OUT_OF_MEMORY; + return 0; + } + assert(dec->rescaler_memory == NULL); + dec->rescaler_memory = memory; + + dec->rescaler = (WebPRescaler*)memory; + memory += sizeof(*dec->rescaler); + work = (rescaler_t*)memory; + memory += work_size * sizeof(*work); + scaled_data = (uint32_t*)memory; + + WebPRescalerInit(dec->rescaler, in_width, in_height, (uint8_t*)scaled_data, + out_width, out_height, 0, num_channels, work); + return 1; +} +#endif // WEBP_REDUCE_SIZE + +//------------------------------------------------------------------------------ +// Export to ARGB + +#if !defined(WEBP_REDUCE_SIZE) + +// We have special "export" function since we need to convert from BGRA +static int Export(WebPRescaler* const rescaler, WEBP_CSP_MODE colorspace, + int rgba_stride, uint8_t* const rgba) { + uint32_t* const src = (uint32_t*)rescaler->dst; + const int dst_width = rescaler->dst_width; + int num_lines_out = 0; + while (WebPRescalerHasPendingOutput(rescaler)) { + uint8_t* const dst = rgba + num_lines_out * rgba_stride; + WebPRescalerExportRow(rescaler); + WebPMultARGBRow(src, dst_width, 1); + VP8LConvertFromBGRA(src, dst_width, colorspace, dst); + ++num_lines_out; + } + return num_lines_out; +} + +// Emit scaled rows. +static int EmitRescaledRowsRGBA(const VP8LDecoder* const dec, + uint8_t* in, int in_stride, int mb_h, + uint8_t* const out, int out_stride) { + const WEBP_CSP_MODE colorspace = dec->output_->colorspace; + int num_lines_in = 0; + int num_lines_out = 0; + while (num_lines_in < mb_h) { + uint8_t* const row_in = in + num_lines_in * in_stride; + uint8_t* const row_out = out + num_lines_out * out_stride; + const int lines_left = mb_h - num_lines_in; + const int needed_lines = WebPRescaleNeededLines(dec->rescaler, lines_left); + int lines_imported; + assert(needed_lines > 0 && needed_lines <= lines_left); + WebPMultARGBRows(row_in, in_stride, + dec->rescaler->src_width, needed_lines, 0); + lines_imported = + WebPRescalerImport(dec->rescaler, lines_left, row_in, in_stride); + assert(lines_imported == needed_lines); + num_lines_in += lines_imported; + num_lines_out += Export(dec->rescaler, colorspace, out_stride, row_out); + } + return num_lines_out; +} + +#endif // WEBP_REDUCE_SIZE + +// Emit rows without any scaling. +static int EmitRows(WEBP_CSP_MODE colorspace, + const uint8_t* row_in, int in_stride, + int mb_w, int mb_h, + uint8_t* const out, int out_stride) { + int lines = mb_h; + uint8_t* row_out = out; + while (lines-- > 0) { + VP8LConvertFromBGRA((const uint32_t*)row_in, mb_w, colorspace, row_out); + row_in += in_stride; + row_out += out_stride; + } + return mb_h; // Num rows out == num rows in. +} + +//------------------------------------------------------------------------------ +// Export to YUVA + +static void ConvertToYUVA(const uint32_t* const src, int width, int y_pos, + const WebPDecBuffer* const output) { + const WebPYUVABuffer* const buf = &output->u.YUVA; + + // first, the luma plane + WebPConvertARGBToY(src, buf->y + y_pos * buf->y_stride, width); + + // then U/V planes + { + uint8_t* const u = buf->u + (y_pos >> 1) * buf->u_stride; + uint8_t* const v = buf->v + (y_pos >> 1) * buf->v_stride; + // even lines: store values + // odd lines: average with previous values + WebPConvertARGBToUV(src, u, v, width, !(y_pos & 1)); + } + // Lastly, store alpha if needed. + if (buf->a != NULL) { + uint8_t* const a = buf->a + y_pos * buf->a_stride; +#if defined(WORDS_BIGENDIAN) + WebPExtractAlpha((uint8_t*)src + 0, 0, width, 1, a, 0); +#else + WebPExtractAlpha((uint8_t*)src + 3, 0, width, 1, a, 0); +#endif + } +} + +static int ExportYUVA(const VP8LDecoder* const dec, int y_pos) { + WebPRescaler* const rescaler = dec->rescaler; + uint32_t* const src = (uint32_t*)rescaler->dst; + const int dst_width = rescaler->dst_width; + int num_lines_out = 0; + while (WebPRescalerHasPendingOutput(rescaler)) { + WebPRescalerExportRow(rescaler); + WebPMultARGBRow(src, dst_width, 1); + ConvertToYUVA(src, dst_width, y_pos, dec->output_); + ++y_pos; + ++num_lines_out; + } + return num_lines_out; +} + +static int EmitRescaledRowsYUVA(const VP8LDecoder* const dec, + uint8_t* in, int in_stride, int mb_h) { + int num_lines_in = 0; + int y_pos = dec->last_out_row_; + while (num_lines_in < mb_h) { + const int lines_left = mb_h - num_lines_in; + const int needed_lines = WebPRescaleNeededLines(dec->rescaler, lines_left); + int lines_imported; + WebPMultARGBRows(in, in_stride, dec->rescaler->src_width, needed_lines, 0); + lines_imported = + WebPRescalerImport(dec->rescaler, lines_left, in, in_stride); + assert(lines_imported == needed_lines); + num_lines_in += lines_imported; + in += needed_lines * in_stride; + y_pos += ExportYUVA(dec, y_pos); + } + return y_pos; +} + +static int EmitRowsYUVA(const VP8LDecoder* const dec, + const uint8_t* in, int in_stride, + int mb_w, int num_rows) { + int y_pos = dec->last_out_row_; + while (num_rows-- > 0) { + ConvertToYUVA((const uint32_t*)in, mb_w, y_pos, dec->output_); + in += in_stride; + ++y_pos; + } + return y_pos; +} + +//------------------------------------------------------------------------------ +// Cropping. + +// Sets io->mb_y, io->mb_h & io->mb_w according to start row, end row and +// crop options. Also updates the input data pointer, so that it points to the +// start of the cropped window. Note that pixels are in ARGB format even if +// 'in_data' is uint8_t*. +// Returns true if the crop window is not empty. +static int SetCropWindow(VP8Io* const io, int y_start, int y_end, + uint8_t** const in_data, int pixel_stride) { + assert(y_start < y_end); + assert(io->crop_left < io->crop_right); + if (y_end > io->crop_bottom) { + y_end = io->crop_bottom; // make sure we don't overflow on last row. + } + if (y_start < io->crop_top) { + const int delta = io->crop_top - y_start; + y_start = io->crop_top; + *in_data += delta * pixel_stride; + } + if (y_start >= y_end) return 0; // Crop window is empty. + + *in_data += io->crop_left * sizeof(uint32_t); + + io->mb_y = y_start - io->crop_top; + io->mb_w = io->crop_right - io->crop_left; + io->mb_h = y_end - y_start; + return 1; // Non-empty crop window. +} + +//------------------------------------------------------------------------------ + +static WEBP_INLINE int GetMetaIndex( + const uint32_t* const image, int xsize, int bits, int x, int y) { + if (bits == 0) return 0; + return image[xsize * (y >> bits) + (x >> bits)]; +} + +static WEBP_INLINE HTreeGroup* GetHtreeGroupForPos(VP8LMetadata* const hdr, + int x, int y) { + const int meta_index = GetMetaIndex(hdr->huffman_image_, hdr->huffman_xsize_, + hdr->huffman_subsample_bits_, x, y); + assert(meta_index < hdr->num_htree_groups_); + return hdr->htree_groups_ + meta_index; +} + +//------------------------------------------------------------------------------ +// Main loop, with custom row-processing function + +typedef void (*ProcessRowsFunc)(VP8LDecoder* const dec, int row); + +static void ApplyInverseTransforms(VP8LDecoder* const dec, + int start_row, int num_rows, + const uint32_t* const rows) { + int n = dec->next_transform_; + const int cache_pixs = dec->width_ * num_rows; + const int end_row = start_row + num_rows; + const uint32_t* rows_in = rows; + uint32_t* const rows_out = dec->argb_cache_; + + // Inverse transforms. + while (n-- > 0) { + VP8LTransform* const transform = &dec->transforms_[n]; + VP8LInverseTransform(transform, start_row, end_row, rows_in, rows_out); + rows_in = rows_out; + } + if (rows_in != rows_out) { + // No transform called, hence just copy. + memcpy(rows_out, rows_in, cache_pixs * sizeof(*rows_out)); + } +} + +// Processes (transforms, scales & color-converts) the rows decoded after the +// last call. +static void ProcessRows(VP8LDecoder* const dec, int row) { + const uint32_t* const rows = dec->pixels_ + dec->width_ * dec->last_row_; + const int num_rows = row - dec->last_row_; + + assert(row <= dec->io_->crop_bottom); + // We can't process more than NUM_ARGB_CACHE_ROWS at a time (that's the size + // of argb_cache_), but we currently don't need more than that. + assert(num_rows <= NUM_ARGB_CACHE_ROWS); + if (num_rows > 0) { // Emit output. + VP8Io* const io = dec->io_; + uint8_t* rows_data = (uint8_t*)dec->argb_cache_; + const int in_stride = io->width * sizeof(uint32_t); // in unit of RGBA + ApplyInverseTransforms(dec, dec->last_row_, num_rows, rows); + if (!SetCropWindow(io, dec->last_row_, row, &rows_data, in_stride)) { + // Nothing to output (this time). + } else { + const WebPDecBuffer* const output = dec->output_; + if (WebPIsRGBMode(output->colorspace)) { // convert to RGBA + const WebPRGBABuffer* const buf = &output->u.RGBA; + uint8_t* const rgba = buf->rgba + dec->last_out_row_ * buf->stride; + const int num_rows_out = +#if !defined(WEBP_REDUCE_SIZE) + io->use_scaling ? + EmitRescaledRowsRGBA(dec, rows_data, in_stride, io->mb_h, + rgba, buf->stride) : +#endif // WEBP_REDUCE_SIZE + EmitRows(output->colorspace, rows_data, in_stride, + io->mb_w, io->mb_h, rgba, buf->stride); + // Update 'last_out_row_'. + dec->last_out_row_ += num_rows_out; + } else { // convert to YUVA + dec->last_out_row_ = io->use_scaling ? + EmitRescaledRowsYUVA(dec, rows_data, in_stride, io->mb_h) : + EmitRowsYUVA(dec, rows_data, in_stride, io->mb_w, io->mb_h); + } + assert(dec->last_out_row_ <= output->height); + } + } + + // Update 'last_row_'. + dec->last_row_ = row; + assert(dec->last_row_ <= dec->height_); +} + +// Row-processing for the special case when alpha data contains only one +// transform (color indexing), and trivial non-green literals. +static int Is8bOptimizable(const VP8LMetadata* const hdr) { + int i; + if (hdr->color_cache_size_ > 0) return 0; + // When the Huffman tree contains only one symbol, we can skip the + // call to ReadSymbol() for red/blue/alpha channels. + for (i = 0; i < hdr->num_htree_groups_; ++i) { + HuffmanCode** const htrees = hdr->htree_groups_[i].htrees; + if (htrees[RED][0].bits > 0) return 0; + if (htrees[BLUE][0].bits > 0) return 0; + if (htrees[ALPHA][0].bits > 0) return 0; + } + return 1; +} + +static void AlphaApplyFilter(ALPHDecoder* const alph_dec, + int first_row, int last_row, + uint8_t* out, int stride) { + if (alph_dec->filter_ != WEBP_FILTER_NONE) { + int y; + const uint8_t* prev_line = alph_dec->prev_line_; + assert(WebPUnfilters[alph_dec->filter_] != NULL); + for (y = first_row; y < last_row; ++y) { + WebPUnfilters[alph_dec->filter_](prev_line, out, out, stride); + prev_line = out; + out += stride; + } + alph_dec->prev_line_ = prev_line; + } +} + +static void ExtractPalettedAlphaRows(VP8LDecoder* const dec, int last_row) { + // For vertical and gradient filtering, we need to decode the part above the + // crop_top row, in order to have the correct spatial predictors. + ALPHDecoder* const alph_dec = (ALPHDecoder*)dec->io_->opaque; + const int top_row = + (alph_dec->filter_ == WEBP_FILTER_NONE || + alph_dec->filter_ == WEBP_FILTER_HORIZONTAL) ? dec->io_->crop_top + : dec->last_row_; + const int first_row = (dec->last_row_ < top_row) ? top_row : dec->last_row_; + assert(last_row <= dec->io_->crop_bottom); + if (last_row > first_row) { + // Special method for paletted alpha data. We only process the cropped area. + const int width = dec->io_->width; + uint8_t* out = alph_dec->output_ + width * first_row; + const uint8_t* const in = + (uint8_t*)dec->pixels_ + dec->width_ * first_row; + VP8LTransform* const transform = &dec->transforms_[0]; + assert(dec->next_transform_ == 1); + assert(transform->type_ == COLOR_INDEXING_TRANSFORM); + VP8LColorIndexInverseTransformAlpha(transform, first_row, last_row, + in, out); + AlphaApplyFilter(alph_dec, first_row, last_row, out, width); + } + dec->last_row_ = dec->last_out_row_ = last_row; +} + +//------------------------------------------------------------------------------ +// Helper functions for fast pattern copy (8b and 32b) + +// cyclic rotation of pattern word +static WEBP_INLINE uint32_t Rotate8b(uint32_t V) { +#if defined(WORDS_BIGENDIAN) + return ((V & 0xff000000u) >> 24) | (V << 8); +#else + return ((V & 0xffu) << 24) | (V >> 8); +#endif +} + +// copy 1, 2 or 4-bytes pattern +static WEBP_INLINE void CopySmallPattern8b(const uint8_t* src, uint8_t* dst, + int length, uint32_t pattern) { + int i; + // align 'dst' to 4-bytes boundary. Adjust the pattern along the way. + while ((uintptr_t)dst & 3) { + *dst++ = *src++; + pattern = Rotate8b(pattern); + --length; + } + // Copy the pattern 4 bytes at a time. + for (i = 0; i < (length >> 2); ++i) { + ((uint32_t*)dst)[i] = pattern; + } + // Finish with left-overs. 'pattern' is still correctly positioned, + // so no Rotate8b() call is needed. + for (i <<= 2; i < length; ++i) { + dst[i] = src[i]; + } +} + +static WEBP_INLINE void CopyBlock8b(uint8_t* const dst, int dist, int length) { + const uint8_t* src = dst - dist; + if (length >= 8) { + uint32_t pattern = 0; + switch (dist) { + case 1: + pattern = src[0]; +#if defined(__arm__) || defined(_M_ARM) // arm doesn't like multiply that much + pattern |= pattern << 8; + pattern |= pattern << 16; +#elif defined(WEBP_USE_MIPS_DSP_R2) + __asm__ volatile ("replv.qb %0, %0" : "+r"(pattern)); +#else + pattern = 0x01010101u * pattern; +#endif + break; + case 2: +#if !defined(WORDS_BIGENDIAN) + memcpy(&pattern, src, sizeof(uint16_t)); +#else + pattern = ((uint32_t)src[0] << 8) | src[1]; +#endif +#if defined(__arm__) || defined(_M_ARM) + pattern |= pattern << 16; +#elif defined(WEBP_USE_MIPS_DSP_R2) + __asm__ volatile ("replv.ph %0, %0" : "+r"(pattern)); +#else + pattern = 0x00010001u * pattern; +#endif + break; + case 4: + memcpy(&pattern, src, sizeof(uint32_t)); + break; + default: + goto Copy; + } + CopySmallPattern8b(src, dst, length, pattern); + return; + } + Copy: + if (dist >= length) { // no overlap -> use memcpy() + memcpy(dst, src, length * sizeof(*dst)); + } else { + int i; + for (i = 0; i < length; ++i) dst[i] = src[i]; + } +} + +// copy pattern of 1 or 2 uint32_t's +static WEBP_INLINE void CopySmallPattern32b(const uint32_t* src, + uint32_t* dst, + int length, uint64_t pattern) { + int i; + if ((uintptr_t)dst & 4) { // Align 'dst' to 8-bytes boundary. + *dst++ = *src++; + pattern = (pattern >> 32) | (pattern << 32); + --length; + } + assert(0 == ((uintptr_t)dst & 7)); + for (i = 0; i < (length >> 1); ++i) { + ((uint64_t*)dst)[i] = pattern; // Copy the pattern 8 bytes at a time. + } + if (length & 1) { // Finish with left-over. + dst[i << 1] = src[i << 1]; + } +} + +static WEBP_INLINE void CopyBlock32b(uint32_t* const dst, + int dist, int length) { + const uint32_t* const src = dst - dist; + if (dist <= 2 && length >= 4 && ((uintptr_t)dst & 3) == 0) { + uint64_t pattern; + if (dist == 1) { + pattern = (uint64_t)src[0]; + pattern |= pattern << 32; + } else { + memcpy(&pattern, src, sizeof(pattern)); + } + CopySmallPattern32b(src, dst, length, pattern); + } else if (dist >= length) { // no overlap + memcpy(dst, src, length * sizeof(*dst)); + } else { + int i; + for (i = 0; i < length; ++i) dst[i] = src[i]; + } +} + +//------------------------------------------------------------------------------ + +static int DecodeAlphaData(VP8LDecoder* const dec, uint8_t* const data, + int width, int height, int last_row) { + int ok = 1; + int row = dec->last_pixel_ / width; + int col = dec->last_pixel_ % width; + VP8LBitReader* const br = &dec->br_; + VP8LMetadata* const hdr = &dec->hdr_; + int pos = dec->last_pixel_; // current position + const int end = width * height; // End of data + const int last = width * last_row; // Last pixel to decode + const int len_code_limit = NUM_LITERAL_CODES + NUM_LENGTH_CODES; + const int mask = hdr->huffman_mask_; + const HTreeGroup* htree_group = + (pos < last) ? GetHtreeGroupForPos(hdr, col, row) : NULL; + assert(pos <= end); + assert(last_row <= height); + assert(Is8bOptimizable(hdr)); + + while (!br->eos_ && pos < last) { + int code; + // Only update when changing tile. + if ((col & mask) == 0) { + htree_group = GetHtreeGroupForPos(hdr, col, row); + } + assert(htree_group != NULL); + VP8LFillBitWindow(br); + code = ReadSymbol(htree_group->htrees[GREEN], br); + if (code < NUM_LITERAL_CODES) { // Literal + data[pos] = code; + ++pos; + ++col; + if (col >= width) { + col = 0; + ++row; + if (row <= last_row && (row % NUM_ARGB_CACHE_ROWS == 0)) { + ExtractPalettedAlphaRows(dec, row); + } + } + } else if (code < len_code_limit) { // Backward reference + int dist_code, dist; + const int length_sym = code - NUM_LITERAL_CODES; + const int length = GetCopyLength(length_sym, br); + const int dist_symbol = ReadSymbol(htree_group->htrees[DIST], br); + VP8LFillBitWindow(br); + dist_code = GetCopyDistance(dist_symbol, br); + dist = PlaneCodeToDistance(width, dist_code); + if (pos >= dist && end - pos >= length) { + CopyBlock8b(data + pos, dist, length); + } else { + ok = 0; + goto End; + } + pos += length; + col += length; + while (col >= width) { + col -= width; + ++row; + if (row <= last_row && (row % NUM_ARGB_CACHE_ROWS == 0)) { + ExtractPalettedAlphaRows(dec, row); + } + } + if (pos < last && (col & mask)) { + htree_group = GetHtreeGroupForPos(hdr, col, row); + } + } else { // Not reached + ok = 0; + goto End; + } + br->eos_ = VP8LIsEndOfStream(br); + } + // Process the remaining rows corresponding to last row-block. + ExtractPalettedAlphaRows(dec, row > last_row ? last_row : row); + + End: + br->eos_ = VP8LIsEndOfStream(br); + if (!ok || (br->eos_ && pos < end)) { + ok = 0; + dec->status_ = br->eos_ ? VP8_STATUS_SUSPENDED + : VP8_STATUS_BITSTREAM_ERROR; + } else { + dec->last_pixel_ = pos; + } + return ok; +} + +static void SaveState(VP8LDecoder* const dec, int last_pixel) { + assert(dec->incremental_); + dec->saved_br_ = dec->br_; + dec->saved_last_pixel_ = last_pixel; + if (dec->hdr_.color_cache_size_ > 0) { + VP8LColorCacheCopy(&dec->hdr_.color_cache_, &dec->hdr_.saved_color_cache_); + } +} + +static void RestoreState(VP8LDecoder* const dec) { + assert(dec->br_.eos_); + dec->status_ = VP8_STATUS_SUSPENDED; + dec->br_ = dec->saved_br_; + dec->last_pixel_ = dec->saved_last_pixel_; + if (dec->hdr_.color_cache_size_ > 0) { + VP8LColorCacheCopy(&dec->hdr_.saved_color_cache_, &dec->hdr_.color_cache_); + } +} + +#define SYNC_EVERY_N_ROWS 8 // minimum number of rows between check-points +static int DecodeImageData(VP8LDecoder* const dec, uint32_t* const data, + int width, int height, int last_row, + ProcessRowsFunc process_func) { + int row = dec->last_pixel_ / width; + int col = dec->last_pixel_ % width; + VP8LBitReader* const br = &dec->br_; + VP8LMetadata* const hdr = &dec->hdr_; + uint32_t* src = data + dec->last_pixel_; + uint32_t* last_cached = src; + uint32_t* const src_end = data + width * height; // End of data + uint32_t* const src_last = data + width * last_row; // Last pixel to decode + const int len_code_limit = NUM_LITERAL_CODES + NUM_LENGTH_CODES; + const int color_cache_limit = len_code_limit + hdr->color_cache_size_; + int next_sync_row = dec->incremental_ ? row : 1 << 24; + VP8LColorCache* const color_cache = + (hdr->color_cache_size_ > 0) ? &hdr->color_cache_ : NULL; + const int mask = hdr->huffman_mask_; + const HTreeGroup* htree_group = + (src < src_last) ? GetHtreeGroupForPos(hdr, col, row) : NULL; + assert(dec->last_row_ < last_row); + assert(src_last <= src_end); + + while (src < src_last) { + int code; + if (row >= next_sync_row) { + SaveState(dec, (int)(src - data)); + next_sync_row = row + SYNC_EVERY_N_ROWS; + } + // Only update when changing tile. Note we could use this test: + // if "((((prev_col ^ col) | prev_row ^ row)) > mask)" -> tile changed + // but that's actually slower and needs storing the previous col/row. + if ((col & mask) == 0) { + htree_group = GetHtreeGroupForPos(hdr, col, row); + } + assert(htree_group != NULL); + if (htree_group->is_trivial_code) { + *src = htree_group->literal_arb; + goto AdvanceByOne; + } + VP8LFillBitWindow(br); + if (htree_group->use_packed_table) { + code = ReadPackedSymbols(htree_group, br, src); + if (VP8LIsEndOfStream(br)) break; + if (code == PACKED_NON_LITERAL_CODE) goto AdvanceByOne; + } else { + code = ReadSymbol(htree_group->htrees[GREEN], br); + } + if (VP8LIsEndOfStream(br)) break; + if (code < NUM_LITERAL_CODES) { // Literal + if (htree_group->is_trivial_literal) { + *src = htree_group->literal_arb | (code << 8); + } else { + int red, blue, alpha; + red = ReadSymbol(htree_group->htrees[RED], br); + VP8LFillBitWindow(br); + blue = ReadSymbol(htree_group->htrees[BLUE], br); + alpha = ReadSymbol(htree_group->htrees[ALPHA], br); + if (VP8LIsEndOfStream(br)) break; + *src = ((uint32_t)alpha << 24) | (red << 16) | (code << 8) | blue; + } + AdvanceByOne: + ++src; + ++col; + if (col >= width) { + col = 0; + ++row; + if (process_func != NULL) { + if (row <= last_row && (row % NUM_ARGB_CACHE_ROWS == 0)) { + process_func(dec, row); + } + } + if (color_cache != NULL) { + while (last_cached < src) { + VP8LColorCacheInsert(color_cache, *last_cached++); + } + } + } + } else if (code < len_code_limit) { // Backward reference + int dist_code, dist; + const int length_sym = code - NUM_LITERAL_CODES; + const int length = GetCopyLength(length_sym, br); + const int dist_symbol = ReadSymbol(htree_group->htrees[DIST], br); + VP8LFillBitWindow(br); + dist_code = GetCopyDistance(dist_symbol, br); + dist = PlaneCodeToDistance(width, dist_code); + + if (VP8LIsEndOfStream(br)) break; + if (src - data < (ptrdiff_t)dist || src_end - src < (ptrdiff_t)length) { + goto Error; + } else { + CopyBlock32b(src, dist, length); + } + src += length; + col += length; + while (col >= width) { + col -= width; + ++row; + if (process_func != NULL) { + if (row <= last_row && (row % NUM_ARGB_CACHE_ROWS == 0)) { + process_func(dec, row); + } + } + } + // Because of the check done above (before 'src' was incremented by + // 'length'), the following holds true. + assert(src <= src_end); + if (col & mask) htree_group = GetHtreeGroupForPos(hdr, col, row); + if (color_cache != NULL) { + while (last_cached < src) { + VP8LColorCacheInsert(color_cache, *last_cached++); + } + } + } else if (code < color_cache_limit) { // Color cache + const int key = code - len_code_limit; + assert(color_cache != NULL); + while (last_cached < src) { + VP8LColorCacheInsert(color_cache, *last_cached++); + } + *src = VP8LColorCacheLookup(color_cache, key); + goto AdvanceByOne; + } else { // Not reached + goto Error; + } + } + + br->eos_ = VP8LIsEndOfStream(br); + if (dec->incremental_ && br->eos_ && src < src_end) { + RestoreState(dec); + } else if (!br->eos_) { + // Process the remaining rows corresponding to last row-block. + if (process_func != NULL) { + process_func(dec, row > last_row ? last_row : row); + } + dec->status_ = VP8_STATUS_OK; + dec->last_pixel_ = (int)(src - data); // end-of-scan marker + } else { + // if not incremental, and we are past the end of buffer (eos_=1), then this + // is a real bitstream error. + goto Error; + } + return 1; + + Error: + dec->status_ = VP8_STATUS_BITSTREAM_ERROR; + return 0; +} + +// ----------------------------------------------------------------------------- +// VP8LTransform + +static void ClearTransform(VP8LTransform* const transform) { + WebPSafeFree(transform->data_); + transform->data_ = NULL; +} + +// For security reason, we need to remap the color map to span +// the total possible bundled values, and not just the num_colors. +static int ExpandColorMap(int num_colors, VP8LTransform* const transform) { + int i; + const int final_num_colors = 1 << (8 >> transform->bits_); + uint32_t* const new_color_map = + (uint32_t*)WebPSafeMalloc((uint64_t)final_num_colors, + sizeof(*new_color_map)); + if (new_color_map == NULL) { + return 0; + } else { + uint8_t* const data = (uint8_t*)transform->data_; + uint8_t* const new_data = (uint8_t*)new_color_map; + new_color_map[0] = transform->data_[0]; + for (i = 4; i < 4 * num_colors; ++i) { + // Equivalent to AddPixelEq(), on a byte-basis. + new_data[i] = (data[i] + new_data[i - 4]) & 0xff; + } + for (; i < 4 * final_num_colors; ++i) { + new_data[i] = 0; // black tail. + } + WebPSafeFree(transform->data_); + transform->data_ = new_color_map; + } + return 1; +} + +static int ReadTransform(int* const xsize, int const* ysize, + VP8LDecoder* const dec) { + int ok = 1; + VP8LBitReader* const br = &dec->br_; + VP8LTransform* transform = &dec->transforms_[dec->next_transform_]; + const VP8LImageTransformType type = + (VP8LImageTransformType)VP8LReadBits(br, 2); + + // Each transform type can only be present once in the stream. + if (dec->transforms_seen_ & (1U << type)) { + return 0; // Already there, let's not accept the second same transform. + } + dec->transforms_seen_ |= (1U << type); + + transform->type_ = type; + transform->xsize_ = *xsize; + transform->ysize_ = *ysize; + transform->data_ = NULL; + ++dec->next_transform_; + assert(dec->next_transform_ <= NUM_TRANSFORMS); + + switch (type) { + case PREDICTOR_TRANSFORM: + case CROSS_COLOR_TRANSFORM: + transform->bits_ = VP8LReadBits(br, 3) + 2; + ok = DecodeImageStream(VP8LSubSampleSize(transform->xsize_, + transform->bits_), + VP8LSubSampleSize(transform->ysize_, + transform->bits_), + 0, dec, &transform->data_); + break; + case COLOR_INDEXING_TRANSFORM: { + const int num_colors = VP8LReadBits(br, 8) + 1; + const int bits = (num_colors > 16) ? 0 + : (num_colors > 4) ? 1 + : (num_colors > 2) ? 2 + : 3; + *xsize = VP8LSubSampleSize(transform->xsize_, bits); + transform->bits_ = bits; + ok = DecodeImageStream(num_colors, 1, 0, dec, &transform->data_); + ok = ok && ExpandColorMap(num_colors, transform); + break; + } + case SUBTRACT_GREEN: + break; + default: + assert(0); // can't happen + break; + } + + return ok; +} + +// ----------------------------------------------------------------------------- +// VP8LMetadata + +static void InitMetadata(VP8LMetadata* const hdr) { + assert(hdr != NULL); + memset(hdr, 0, sizeof(*hdr)); +} + +static void ClearMetadata(VP8LMetadata* const hdr) { + assert(hdr != NULL); + + WebPSafeFree(hdr->huffman_image_); + WebPSafeFree(hdr->huffman_tables_); + VP8LHtreeGroupsFree(hdr->htree_groups_); + VP8LColorCacheClear(&hdr->color_cache_); + VP8LColorCacheClear(&hdr->saved_color_cache_); + InitMetadata(hdr); +} + +// ----------------------------------------------------------------------------- +// VP8LDecoder + +VP8LDecoder* VP8LNew(void) { + VP8LDecoder* const dec = (VP8LDecoder*)WebPSafeCalloc(1ULL, sizeof(*dec)); + if (dec == NULL) return NULL; + dec->status_ = VP8_STATUS_OK; + dec->state_ = READ_DIM; + + VP8LDspInit(); // Init critical function pointers. + + return dec; +} + +void VP8LClear(VP8LDecoder* const dec) { + int i; + if (dec == NULL) return; + ClearMetadata(&dec->hdr_); + + WebPSafeFree(dec->pixels_); + dec->pixels_ = NULL; + for (i = 0; i < dec->next_transform_; ++i) { + ClearTransform(&dec->transforms_[i]); + } + dec->next_transform_ = 0; + dec->transforms_seen_ = 0; + + WebPSafeFree(dec->rescaler_memory); + dec->rescaler_memory = NULL; + + dec->output_ = NULL; // leave no trace behind +} + +void VP8LDelete(VP8LDecoder* const dec) { + if (dec != NULL) { + VP8LClear(dec); + WebPSafeFree(dec); + } +} + +static void UpdateDecoder(VP8LDecoder* const dec, int width, int height) { + VP8LMetadata* const hdr = &dec->hdr_; + const int num_bits = hdr->huffman_subsample_bits_; + dec->width_ = width; + dec->height_ = height; + + hdr->huffman_xsize_ = VP8LSubSampleSize(width, num_bits); + hdr->huffman_mask_ = (num_bits == 0) ? ~0 : (1 << num_bits) - 1; +} + +static int DecodeImageStream(int xsize, int ysize, + int is_level0, + VP8LDecoder* const dec, + uint32_t** const decoded_data) { + int ok = 1; + int transform_xsize = xsize; + int transform_ysize = ysize; + VP8LBitReader* const br = &dec->br_; + VP8LMetadata* const hdr = &dec->hdr_; + uint32_t* data = NULL; + int color_cache_bits = 0; + + // Read the transforms (may recurse). + if (is_level0) { + while (ok && VP8LReadBits(br, 1)) { + ok = ReadTransform(&transform_xsize, &transform_ysize, dec); + } + } + + // Color cache + if (ok && VP8LReadBits(br, 1)) { + color_cache_bits = VP8LReadBits(br, 4); + ok = (color_cache_bits >= 1 && color_cache_bits <= MAX_CACHE_BITS); + if (!ok) { + dec->status_ = VP8_STATUS_BITSTREAM_ERROR; + goto End; + } + } + + // Read the Huffman codes (may recurse). + ok = ok && ReadHuffmanCodes(dec, transform_xsize, transform_ysize, + color_cache_bits, is_level0); + if (!ok) { + dec->status_ = VP8_STATUS_BITSTREAM_ERROR; + goto End; + } + + // Finish setting up the color-cache + if (color_cache_bits > 0) { + hdr->color_cache_size_ = 1 << color_cache_bits; + if (!VP8LColorCacheInit(&hdr->color_cache_, color_cache_bits)) { + dec->status_ = VP8_STATUS_OUT_OF_MEMORY; + ok = 0; + goto End; + } + } else { + hdr->color_cache_size_ = 0; + } + UpdateDecoder(dec, transform_xsize, transform_ysize); + + if (is_level0) { // level 0 complete + dec->state_ = READ_HDR; + goto End; + } + + { + const uint64_t total_size = (uint64_t)transform_xsize * transform_ysize; + data = (uint32_t*)WebPSafeMalloc(total_size, sizeof(*data)); + if (data == NULL) { + dec->status_ = VP8_STATUS_OUT_OF_MEMORY; + ok = 0; + goto End; + } + } + + // Use the Huffman trees to decode the LZ77 encoded data. + ok = DecodeImageData(dec, data, transform_xsize, transform_ysize, + transform_ysize, NULL); + ok = ok && !br->eos_; + + End: + if (!ok) { + WebPSafeFree(data); + ClearMetadata(hdr); + } else { + if (decoded_data != NULL) { + *decoded_data = data; + } else { + // We allocate image data in this function only for transforms. At level 0 + // (that is: not the transforms), we shouldn't have allocated anything. + assert(data == NULL); + assert(is_level0); + } + dec->last_pixel_ = 0; // Reset for future DECODE_DATA_FUNC() calls. + if (!is_level0) ClearMetadata(hdr); // Clean up temporary data behind. + } + return ok; +} + +//------------------------------------------------------------------------------ +// Allocate internal buffers dec->pixels_ and dec->argb_cache_. +static int AllocateInternalBuffers32b(VP8LDecoder* const dec, int final_width) { + const uint64_t num_pixels = (uint64_t)dec->width_ * dec->height_; + // Scratch buffer corresponding to top-prediction row for transforming the + // first row in the row-blocks. Not needed for paletted alpha. + const uint64_t cache_top_pixels = (uint16_t)final_width; + // Scratch buffer for temporary BGRA storage. Not needed for paletted alpha. + const uint64_t cache_pixels = (uint64_t)final_width * NUM_ARGB_CACHE_ROWS; + const uint64_t total_num_pixels = + num_pixels + cache_top_pixels + cache_pixels; + + assert(dec->width_ <= final_width); + dec->pixels_ = (uint32_t*)WebPSafeMalloc(total_num_pixels, sizeof(uint32_t)); + if (dec->pixels_ == NULL) { + dec->argb_cache_ = NULL; // for sanity check + dec->status_ = VP8_STATUS_OUT_OF_MEMORY; + return 0; + } + dec->argb_cache_ = dec->pixels_ + num_pixels + cache_top_pixels; + return 1; +} + +static int AllocateInternalBuffers8b(VP8LDecoder* const dec) { + const uint64_t total_num_pixels = (uint64_t)dec->width_ * dec->height_; + dec->argb_cache_ = NULL; // for sanity check + dec->pixels_ = (uint32_t*)WebPSafeMalloc(total_num_pixels, sizeof(uint8_t)); + if (dec->pixels_ == NULL) { + dec->status_ = VP8_STATUS_OUT_OF_MEMORY; + return 0; + } + return 1; +} + +//------------------------------------------------------------------------------ + +// Special row-processing that only stores the alpha data. +static void ExtractAlphaRows(VP8LDecoder* const dec, int last_row) { + int cur_row = dec->last_row_; + int num_rows = last_row - cur_row; + const uint32_t* in = dec->pixels_ + dec->width_ * cur_row; + + assert(last_row <= dec->io_->crop_bottom); + while (num_rows > 0) { + const int num_rows_to_process = + (num_rows > NUM_ARGB_CACHE_ROWS) ? NUM_ARGB_CACHE_ROWS : num_rows; + // Extract alpha (which is stored in the green plane). + ALPHDecoder* const alph_dec = (ALPHDecoder*)dec->io_->opaque; + uint8_t* const output = alph_dec->output_; + const int width = dec->io_->width; // the final width (!= dec->width_) + const int cache_pixs = width * num_rows_to_process; + uint8_t* const dst = output + width * cur_row; + const uint32_t* const src = dec->argb_cache_; + ApplyInverseTransforms(dec, cur_row, num_rows_to_process, in); + WebPExtractGreen(src, dst, cache_pixs); + AlphaApplyFilter(alph_dec, + cur_row, cur_row + num_rows_to_process, dst, width); + num_rows -= num_rows_to_process; + in += num_rows_to_process * dec->width_; + cur_row += num_rows_to_process; + } + assert(cur_row == last_row); + dec->last_row_ = dec->last_out_row_ = last_row; +} + +int VP8LDecodeAlphaHeader(ALPHDecoder* const alph_dec, + const uint8_t* const data, size_t data_size) { + int ok = 0; + VP8LDecoder* dec = VP8LNew(); + + if (dec == NULL) return 0; + + assert(alph_dec != NULL); + + dec->width_ = alph_dec->width_; + dec->height_ = alph_dec->height_; + dec->io_ = &alph_dec->io_; + dec->io_->opaque = alph_dec; + dec->io_->width = alph_dec->width_; + dec->io_->height = alph_dec->height_; + + dec->status_ = VP8_STATUS_OK; + VP8LInitBitReader(&dec->br_, data, data_size); + + if (!DecodeImageStream(alph_dec->width_, alph_dec->height_, 1, dec, NULL)) { + goto Err; + } + + // Special case: if alpha data uses only the color indexing transform and + // doesn't use color cache (a frequent case), we will use DecodeAlphaData() + // method that only needs allocation of 1 byte per pixel (alpha channel). + if (dec->next_transform_ == 1 && + dec->transforms_[0].type_ == COLOR_INDEXING_TRANSFORM && + Is8bOptimizable(&dec->hdr_)) { + alph_dec->use_8b_decode_ = 1; + ok = AllocateInternalBuffers8b(dec); + } else { + // Allocate internal buffers (note that dec->width_ may have changed here). + alph_dec->use_8b_decode_ = 0; + ok = AllocateInternalBuffers32b(dec, alph_dec->width_); + } + + if (!ok) goto Err; + + // Only set here, once we are sure it is valid (to avoid thread races). + alph_dec->vp8l_dec_ = dec; + return 1; + + Err: + VP8LDelete(dec); + return 0; +} + +int VP8LDecodeAlphaImageStream(ALPHDecoder* const alph_dec, int last_row) { + VP8LDecoder* const dec = alph_dec->vp8l_dec_; + assert(dec != NULL); + assert(last_row <= dec->height_); + + if (dec->last_row_ >= last_row) { + return 1; // done + } + + if (!alph_dec->use_8b_decode_) WebPInitAlphaProcessing(); + + // Decode (with special row processing). + return alph_dec->use_8b_decode_ ? + DecodeAlphaData(dec, (uint8_t*)dec->pixels_, dec->width_, dec->height_, + last_row) : + DecodeImageData(dec, dec->pixels_, dec->width_, dec->height_, + last_row, ExtractAlphaRows); +} + +//------------------------------------------------------------------------------ + +int VP8LDecodeHeader(VP8LDecoder* const dec, VP8Io* const io) { + int width, height, has_alpha; + + if (dec == NULL) return 0; + if (io == NULL) { + dec->status_ = VP8_STATUS_INVALID_PARAM; + return 0; + } + + dec->io_ = io; + dec->status_ = VP8_STATUS_OK; + VP8LInitBitReader(&dec->br_, io->data, io->data_size); + if (!ReadImageInfo(&dec->br_, &width, &height, &has_alpha)) { + dec->status_ = VP8_STATUS_BITSTREAM_ERROR; + goto Error; + } + dec->state_ = READ_DIM; + io->width = width; + io->height = height; + + if (!DecodeImageStream(width, height, 1, dec, NULL)) goto Error; + return 1; + + Error: + VP8LClear(dec); + assert(dec->status_ != VP8_STATUS_OK); + return 0; +} + +int VP8LDecodeImage(VP8LDecoder* const dec) { + VP8Io* io = NULL; + WebPDecParams* params = NULL; + + // Sanity checks. + if (dec == NULL) return 0; + + assert(dec->hdr_.huffman_tables_ != NULL); + assert(dec->hdr_.htree_groups_ != NULL); + assert(dec->hdr_.num_htree_groups_ > 0); + + io = dec->io_; + assert(io != NULL); + params = (WebPDecParams*)io->opaque; + assert(params != NULL); + + // Initialization. + if (dec->state_ != READ_DATA) { + dec->output_ = params->output; + assert(dec->output_ != NULL); + + if (!WebPIoInitFromOptions(params->options, io, MODE_BGRA)) { + dec->status_ = VP8_STATUS_INVALID_PARAM; + goto Err; + } + + if (!AllocateInternalBuffers32b(dec, io->width)) goto Err; + +#if !defined(WEBP_REDUCE_SIZE) + if (io->use_scaling && !AllocateAndInitRescaler(dec, io)) goto Err; +#else + if (io->use_scaling) { + dec->status_ = VP8_STATUS_INVALID_PARAM; + goto Err; + } +#endif + if (io->use_scaling || WebPIsPremultipliedMode(dec->output_->colorspace)) { + // need the alpha-multiply functions for premultiplied output or rescaling + WebPInitAlphaProcessing(); + } + + if (!WebPIsRGBMode(dec->output_->colorspace)) { + WebPInitConvertARGBToYUV(); + if (dec->output_->u.YUVA.a != NULL) WebPInitAlphaProcessing(); + } + if (dec->incremental_) { + if (dec->hdr_.color_cache_size_ > 0 && + dec->hdr_.saved_color_cache_.colors_ == NULL) { + if (!VP8LColorCacheInit(&dec->hdr_.saved_color_cache_, + dec->hdr_.color_cache_.hash_bits_)) { + dec->status_ = VP8_STATUS_OUT_OF_MEMORY; + goto Err; + } + } + } + dec->state_ = READ_DATA; + } + + // Decode. + if (!DecodeImageData(dec, dec->pixels_, dec->width_, dec->height_, + io->crop_bottom, ProcessRows)) { + goto Err; + } + + params->last_y = dec->last_out_row_; + return 1; + + Err: + VP8LClear(dec); + assert(dec->status_ != VP8_STATUS_OK); + return 0; +} + +//------------------------------------------------------------------------------ diff --git a/vendor/github.com/sizeofint/webpanimation/dec_vp8li_dec.h b/vendor/github.com/sizeofint/webpanimation/dec_vp8li_dec.h new file mode 100644 index 00000000..02e3b9cc --- /dev/null +++ b/vendor/github.com/sizeofint/webpanimation/dec_vp8li_dec.h @@ -0,0 +1,135 @@ +// Copyright 2012 Google Inc. All Rights Reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the COPYING file in the root of the source +// tree. An additional intellectual property rights grant can be found +// in the file PATENTS. All contributing project authors may +// be found in the AUTHORS file in the root of the source tree. +// ----------------------------------------------------------------------------- +// +// Lossless decoder: internal header. +// +// Author: Skal (pascal.massimino@gmail.com) +// Vikas Arora(vikaas.arora@gmail.com) + +#ifndef WEBP_DEC_VP8LI_DEC_H_ +#define WEBP_DEC_VP8LI_DEC_H_ + +#include // for memcpy() +#include "dec_webpi_dec.h" +#include "utils_bit_reader_utils.h" +#include "utils_color_cache_utils.h" +#include "utils_huffman_utils.h" + +#ifdef __cplusplus +extern "C" { +#endif + +typedef enum { + READ_DATA = 0, + READ_HDR = 1, + READ_DIM = 2 +} VP8LDecodeState; + +typedef struct VP8LTransform VP8LTransform; +struct VP8LTransform { + VP8LImageTransformType type_; // transform type. + int bits_; // subsampling bits defining transform window. + int xsize_; // transform window X index. + int ysize_; // transform window Y index. + uint32_t* data_; // transform data. +}; + +typedef struct { + int color_cache_size_; + VP8LColorCache color_cache_; + VP8LColorCache saved_color_cache_; // for incremental + + int huffman_mask_; + int huffman_subsample_bits_; + int huffman_xsize_; + uint32_t* huffman_image_; + int num_htree_groups_; + HTreeGroup* htree_groups_; + HuffmanCode* huffman_tables_; +} VP8LMetadata; + +typedef struct VP8LDecoder VP8LDecoder; +struct VP8LDecoder { + VP8StatusCode status_; + VP8LDecodeState state_; + VP8Io* io_; + + const WebPDecBuffer* output_; // shortcut to io->opaque->output + + uint32_t* pixels_; // Internal data: either uint8_t* for alpha + // or uint32_t* for BGRA. + uint32_t* argb_cache_; // Scratch buffer for temporary BGRA storage. + + VP8LBitReader br_; + int incremental_; // if true, incremental decoding is expected + VP8LBitReader saved_br_; // note: could be local variables too + int saved_last_pixel_; + + int width_; + int height_; + int last_row_; // last input row decoded so far. + int last_pixel_; // last pixel decoded so far. However, it may + // not be transformed, scaled and + // color-converted yet. + int last_out_row_; // last row output so far. + + VP8LMetadata hdr_; + + int next_transform_; + VP8LTransform transforms_[NUM_TRANSFORMS]; + // or'd bitset storing the transforms types. + uint32_t transforms_seen_; + + uint8_t* rescaler_memory; // Working memory for rescaling work. + WebPRescaler* rescaler; // Common rescaler for all channels. +}; + +//------------------------------------------------------------------------------ +// internal functions. Not public. + +struct ALPHDecoder; // Defined in dec/alphai.h. + +// in vp8l.c + +// Decodes image header for alpha data stored using lossless compression. +// Returns false in case of error. +int VP8LDecodeAlphaHeader(struct ALPHDecoder* const alph_dec, + const uint8_t* const data, size_t data_size); + +// Decodes *at least* 'last_row' rows of alpha. If some of the initial rows are +// already decoded in previous call(s), it will resume decoding from where it +// was paused. +// Returns false in case of bitstream error. +int VP8LDecodeAlphaImageStream(struct ALPHDecoder* const alph_dec, + int last_row); + +// Allocates and initialize a new lossless decoder instance. +VP8LDecoder* VP8LNew(void); + +// Decodes the image header. Returns false in case of error. +int VP8LDecodeHeader(VP8LDecoder* const dec, VP8Io* const io); + +// Decodes an image. It's required to decode the lossless header before calling +// this function. Returns false in case of error, with updated dec->status_. +int VP8LDecodeImage(VP8LDecoder* const dec); + +// Resets the decoder in its initial state, reclaiming memory. +// Preserves the dec->status_ value. +void VP8LClear(VP8LDecoder* const dec); + +// Clears and deallocate a lossless decoder instance. +void VP8LDelete(VP8LDecoder* const dec); + +//------------------------------------------------------------------------------ + +#ifdef __cplusplus +} // extern "C" +#endif + +#endif // WEBP_DEC_VP8LI_DEC_H_ diff --git a/vendor/github.com/sizeofint/webpanimation/dec_webp_dec.c b/vendor/github.com/sizeofint/webpanimation/dec_webp_dec.c new file mode 100644 index 00000000..d3ff89a1 --- /dev/null +++ b/vendor/github.com/sizeofint/webpanimation/dec_webp_dec.c @@ -0,0 +1,845 @@ +// Copyright 2010 Google Inc. All Rights Reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the COPYING file in the root of the source +// tree. An additional intellectual property rights grant can be found +// in the file PATENTS. All contributing project authors may +// be found in the AUTHORS file in the root of the source tree. +// ----------------------------------------------------------------------------- +// +// Main decoding functions for WEBP images. +// +// Author: Skal (pascal.massimino@gmail.com) + +#include + +#include "dec_vp8i_dec.h" +#include "dec_vp8li_dec.h" +#include "dec_webpi_dec.h" +#include "utils_utils.h" +#include "webp_mux_types.h" + +//------------------------------------------------------------------------------ +// RIFF layout is: +// Offset tag +// 0...3 "RIFF" 4-byte tag +// 4...7 size of image data (including metadata) starting at offset 8 +// 8...11 "WEBP" our form-type signature +// The RIFF container (12 bytes) is followed by appropriate chunks: +// 12..15 "VP8 ": 4-bytes tags, signaling the use of VP8 video format +// 16..19 size of the raw VP8 image data, starting at offset 20 +// 20.... the VP8 bytes +// Or, +// 12..15 "VP8L": 4-bytes tags, signaling the use of VP8L lossless format +// 16..19 size of the raw VP8L image data, starting at offset 20 +// 20.... the VP8L bytes +// Or, +// 12..15 "VP8X": 4-bytes tags, describing the extended-VP8 chunk. +// 16..19 size of the VP8X chunk starting at offset 20. +// 20..23 VP8X flags bit-map corresponding to the chunk-types present. +// 24..26 Width of the Canvas Image. +// 27..29 Height of the Canvas Image. +// There can be extra chunks after the "VP8X" chunk (ICCP, ANMF, VP8, VP8L, +// XMP, EXIF ...) +// All sizes are in little-endian order. +// Note: chunk data size must be padded to multiple of 2 when written. + +// Validates the RIFF container (if detected) and skips over it. +// If a RIFF container is detected, returns: +// VP8_STATUS_BITSTREAM_ERROR for invalid header, +// VP8_STATUS_NOT_ENOUGH_DATA for truncated data if have_all_data is true, +// and VP8_STATUS_OK otherwise. +// In case there are not enough bytes (partial RIFF container), return 0 for +// *riff_size. Else return the RIFF size extracted from the header. +static VP8StatusCode ParseRIFF(const uint8_t** const data, + size_t* const data_size, int have_all_data, + size_t* const riff_size) { + assert(data != NULL); + assert(data_size != NULL); + assert(riff_size != NULL); + + *riff_size = 0; // Default: no RIFF present. + if (*data_size >= RIFF_HEADER_SIZE && !memcmp(*data, "RIFF", TAG_SIZE)) { + if (memcmp(*data + 8, "WEBP", TAG_SIZE)) { + return VP8_STATUS_BITSTREAM_ERROR; // Wrong image file signature. + } else { + const uint32_t size = GetLE32(*data + TAG_SIZE); + // Check that we have at least one chunk (i.e "WEBP" + "VP8?nnnn"). + if (size < TAG_SIZE + CHUNK_HEADER_SIZE) { + return VP8_STATUS_BITSTREAM_ERROR; + } + if (size > MAX_CHUNK_PAYLOAD) { + return VP8_STATUS_BITSTREAM_ERROR; + } + if (have_all_data && (size > *data_size - CHUNK_HEADER_SIZE)) { + return VP8_STATUS_NOT_ENOUGH_DATA; // Truncated bitstream. + } + // We have a RIFF container. Skip it. + *riff_size = size; + *data += RIFF_HEADER_SIZE; + *data_size -= RIFF_HEADER_SIZE; + } + } + return VP8_STATUS_OK; +} + +// Validates the VP8X header and skips over it. +// Returns VP8_STATUS_BITSTREAM_ERROR for invalid VP8X header, +// VP8_STATUS_NOT_ENOUGH_DATA in case of insufficient data, and +// VP8_STATUS_OK otherwise. +// If a VP8X chunk is found, found_vp8x is set to true and *width_ptr, +// *height_ptr and *flags_ptr are set to the corresponding values extracted +// from the VP8X chunk. +static VP8StatusCode ParseVP8X(const uint8_t** const data, + size_t* const data_size, + int* const found_vp8x, + int* const width_ptr, int* const height_ptr, + uint32_t* const flags_ptr) { + const uint32_t vp8x_size = CHUNK_HEADER_SIZE + VP8X_CHUNK_SIZE; + assert(data != NULL); + assert(data_size != NULL); + assert(found_vp8x != NULL); + + *found_vp8x = 0; + + if (*data_size < CHUNK_HEADER_SIZE) { + return VP8_STATUS_NOT_ENOUGH_DATA; // Insufficient data. + } + + if (!memcmp(*data, "VP8X", TAG_SIZE)) { + int width, height; + uint32_t flags; + const uint32_t chunk_size = GetLE32(*data + TAG_SIZE); + if (chunk_size != VP8X_CHUNK_SIZE) { + return VP8_STATUS_BITSTREAM_ERROR; // Wrong chunk size. + } + + // Verify if enough data is available to validate the VP8X chunk. + if (*data_size < vp8x_size) { + return VP8_STATUS_NOT_ENOUGH_DATA; // Insufficient data. + } + flags = GetLE32(*data + 8); + width = 1 + GetLE24(*data + 12); + height = 1 + GetLE24(*data + 15); + if (width * (uint64_t)height >= MAX_IMAGE_AREA) { + return VP8_STATUS_BITSTREAM_ERROR; // image is too large + } + + if (flags_ptr != NULL) *flags_ptr = flags; + if (width_ptr != NULL) *width_ptr = width; + if (height_ptr != NULL) *height_ptr = height; + // Skip over VP8X header bytes. + *data += vp8x_size; + *data_size -= vp8x_size; + *found_vp8x = 1; + } + return VP8_STATUS_OK; +} + +// Skips to the next VP8/VP8L chunk header in the data given the size of the +// RIFF chunk 'riff_size'. +// Returns VP8_STATUS_BITSTREAM_ERROR if any invalid chunk size is encountered, +// VP8_STATUS_NOT_ENOUGH_DATA in case of insufficient data, and +// VP8_STATUS_OK otherwise. +// If an alpha chunk is found, *alpha_data and *alpha_size are set +// appropriately. +static VP8StatusCode ParseOptionalChunks(const uint8_t** const data, + size_t* const data_size, + size_t const riff_size, + const uint8_t** const alpha_data, + size_t* const alpha_size) { + const uint8_t* buf; + size_t buf_size; + uint32_t total_size = TAG_SIZE + // "WEBP". + CHUNK_HEADER_SIZE + // "VP8Xnnnn". + VP8X_CHUNK_SIZE; // data. + assert(data != NULL); + assert(data_size != NULL); + buf = *data; + buf_size = *data_size; + + assert(alpha_data != NULL); + assert(alpha_size != NULL); + *alpha_data = NULL; + *alpha_size = 0; + + while (1) { + uint32_t chunk_size; + uint32_t disk_chunk_size; // chunk_size with padding + + *data = buf; + *data_size = buf_size; + + if (buf_size < CHUNK_HEADER_SIZE) { // Insufficient data. + return VP8_STATUS_NOT_ENOUGH_DATA; + } + + chunk_size = GetLE32(buf + TAG_SIZE); + if (chunk_size > MAX_CHUNK_PAYLOAD) { + return VP8_STATUS_BITSTREAM_ERROR; // Not a valid chunk size. + } + // For odd-sized chunk-payload, there's one byte padding at the end. + disk_chunk_size = (CHUNK_HEADER_SIZE + chunk_size + 1) & ~1; + total_size += disk_chunk_size; + + // Check that total bytes skipped so far does not exceed riff_size. + if (riff_size > 0 && (total_size > riff_size)) { + return VP8_STATUS_BITSTREAM_ERROR; // Not a valid chunk size. + } + + // Start of a (possibly incomplete) VP8/VP8L chunk implies that we have + // parsed all the optional chunks. + // Note: This check must occur before the check 'buf_size < disk_chunk_size' + // below to allow incomplete VP8/VP8L chunks. + if (!memcmp(buf, "VP8 ", TAG_SIZE) || + !memcmp(buf, "VP8L", TAG_SIZE)) { + return VP8_STATUS_OK; + } + + if (buf_size < disk_chunk_size) { // Insufficient data. + return VP8_STATUS_NOT_ENOUGH_DATA; + } + + if (!memcmp(buf, "ALPH", TAG_SIZE)) { // A valid ALPH header. + *alpha_data = buf + CHUNK_HEADER_SIZE; + *alpha_size = chunk_size; + } + + // We have a full and valid chunk; skip it. + buf += disk_chunk_size; + buf_size -= disk_chunk_size; + } +} + +// Validates the VP8/VP8L Header ("VP8 nnnn" or "VP8L nnnn") and skips over it. +// Returns VP8_STATUS_BITSTREAM_ERROR for invalid (chunk larger than +// riff_size) VP8/VP8L header, +// VP8_STATUS_NOT_ENOUGH_DATA in case of insufficient data, and +// VP8_STATUS_OK otherwise. +// If a VP8/VP8L chunk is found, *chunk_size is set to the total number of bytes +// extracted from the VP8/VP8L chunk header. +// The flag '*is_lossless' is set to 1 in case of VP8L chunk / raw VP8L data. +static VP8StatusCode ParseVP8Header(const uint8_t** const data_ptr, + size_t* const data_size, int have_all_data, + size_t riff_size, size_t* const chunk_size, + int* const is_lossless) { + const uint8_t* const data = *data_ptr; + const int is_vp8 = !memcmp(data, "VP8 ", TAG_SIZE); + const int is_vp8l = !memcmp(data, "VP8L", TAG_SIZE); + const uint32_t minimal_size = + TAG_SIZE + CHUNK_HEADER_SIZE; // "WEBP" + "VP8 nnnn" OR + // "WEBP" + "VP8Lnnnn" + assert(data != NULL); + assert(data_size != NULL); + assert(chunk_size != NULL); + assert(is_lossless != NULL); + + if (*data_size < CHUNK_HEADER_SIZE) { + return VP8_STATUS_NOT_ENOUGH_DATA; // Insufficient data. + } + + if (is_vp8 || is_vp8l) { + // Bitstream contains VP8/VP8L header. + const uint32_t size = GetLE32(data + TAG_SIZE); + if ((riff_size >= minimal_size) && (size > riff_size - minimal_size)) { + return VP8_STATUS_BITSTREAM_ERROR; // Inconsistent size information. + } + if (have_all_data && (size > *data_size - CHUNK_HEADER_SIZE)) { + return VP8_STATUS_NOT_ENOUGH_DATA; // Truncated bitstream. + } + // Skip over CHUNK_HEADER_SIZE bytes from VP8/VP8L Header. + *chunk_size = size; + *data_ptr += CHUNK_HEADER_SIZE; + *data_size -= CHUNK_HEADER_SIZE; + *is_lossless = is_vp8l; + } else { + // Raw VP8/VP8L bitstream (no header). + *is_lossless = VP8LCheckSignature(data, *data_size); + *chunk_size = *data_size; + } + + return VP8_STATUS_OK; +} + +//------------------------------------------------------------------------------ + +// Fetch '*width', '*height', '*has_alpha' and fill out 'headers' based on +// 'data'. All the output parameters may be NULL. If 'headers' is NULL only the +// minimal amount will be read to fetch the remaining parameters. +// If 'headers' is non-NULL this function will attempt to locate both alpha +// data (with or without a VP8X chunk) and the bitstream chunk (VP8/VP8L). +// Note: The following chunk sequences (before the raw VP8/VP8L data) are +// considered valid by this function: +// RIFF + VP8(L) +// RIFF + VP8X + (optional chunks) + VP8(L) +// ALPH + VP8 <-- Not a valid WebP format: only allowed for internal purpose. +// VP8(L) <-- Not a valid WebP format: only allowed for internal purpose. +static VP8StatusCode ParseHeadersInternal(const uint8_t* data, + size_t data_size, + int* const width, + int* const height, + int* const has_alpha, + int* const has_animation, + int* const format, + WebPHeaderStructure* const headers) { + int canvas_width = 0; + int canvas_height = 0; + int image_width = 0; + int image_height = 0; + int found_riff = 0; + int found_vp8x = 0; + int animation_present = 0; + const int have_all_data = (headers != NULL) ? headers->have_all_data : 0; + + VP8StatusCode status; + WebPHeaderStructure hdrs; + + if (data == NULL || data_size < RIFF_HEADER_SIZE) { + return VP8_STATUS_NOT_ENOUGH_DATA; + } + memset(&hdrs, 0, sizeof(hdrs)); + hdrs.data = data; + hdrs.data_size = data_size; + + // Skip over RIFF header. + status = ParseRIFF(&data, &data_size, have_all_data, &hdrs.riff_size); + if (status != VP8_STATUS_OK) { + return status; // Wrong RIFF header / insufficient data. + } + found_riff = (hdrs.riff_size > 0); + + // Skip over VP8X. + { + uint32_t flags = 0; + status = ParseVP8X(&data, &data_size, &found_vp8x, + &canvas_width, &canvas_height, &flags); + if (status != VP8_STATUS_OK) { + return status; // Wrong VP8X / insufficient data. + } + animation_present = !!(flags & ANIMATION_FLAG); + if (!found_riff && found_vp8x) { + // Note: This restriction may be removed in the future, if it becomes + // necessary to send VP8X chunk to the decoder. + return VP8_STATUS_BITSTREAM_ERROR; + } + if (has_alpha != NULL) *has_alpha = !!(flags & ALPHA_FLAG); + if (has_animation != NULL) *has_animation = animation_present; + if (format != NULL) *format = 0; // default = undefined + + image_width = canvas_width; + image_height = canvas_height; + if (found_vp8x && animation_present && headers == NULL) { + status = VP8_STATUS_OK; + goto ReturnWidthHeight; // Just return features from VP8X header. + } + } + + if (data_size < TAG_SIZE) { + status = VP8_STATUS_NOT_ENOUGH_DATA; + goto ReturnWidthHeight; + } + + // Skip over optional chunks if data started with "RIFF + VP8X" or "ALPH". + if ((found_riff && found_vp8x) || + (!found_riff && !found_vp8x && !memcmp(data, "ALPH", TAG_SIZE))) { + status = ParseOptionalChunks(&data, &data_size, hdrs.riff_size, + &hdrs.alpha_data, &hdrs.alpha_data_size); + if (status != VP8_STATUS_OK) { + goto ReturnWidthHeight; // Invalid chunk size / insufficient data. + } + } + + // Skip over VP8/VP8L header. + status = ParseVP8Header(&data, &data_size, have_all_data, hdrs.riff_size, + &hdrs.compressed_size, &hdrs.is_lossless); + if (status != VP8_STATUS_OK) { + goto ReturnWidthHeight; // Wrong VP8/VP8L chunk-header / insufficient data. + } + if (hdrs.compressed_size > MAX_CHUNK_PAYLOAD) { + return VP8_STATUS_BITSTREAM_ERROR; + } + + if (format != NULL && !animation_present) { + *format = hdrs.is_lossless ? 2 : 1; + } + + if (!hdrs.is_lossless) { + if (data_size < VP8_FRAME_HEADER_SIZE) { + status = VP8_STATUS_NOT_ENOUGH_DATA; + goto ReturnWidthHeight; + } + // Validates raw VP8 data. + if (!VP8GetInfo(data, data_size, (uint32_t)hdrs.compressed_size, + &image_width, &image_height)) { + return VP8_STATUS_BITSTREAM_ERROR; + } + } else { + if (data_size < VP8L_FRAME_HEADER_SIZE) { + status = VP8_STATUS_NOT_ENOUGH_DATA; + goto ReturnWidthHeight; + } + // Validates raw VP8L data. + if (!VP8LGetInfo(data, data_size, &image_width, &image_height, has_alpha)) { + return VP8_STATUS_BITSTREAM_ERROR; + } + } + // Validates image size coherency. + if (found_vp8x) { + if (canvas_width != image_width || canvas_height != image_height) { + return VP8_STATUS_BITSTREAM_ERROR; + } + } + if (headers != NULL) { + *headers = hdrs; + headers->offset = data - headers->data; + assert((uint64_t)(data - headers->data) < MAX_CHUNK_PAYLOAD); + assert(headers->offset == headers->data_size - data_size); + } + ReturnWidthHeight: + if (status == VP8_STATUS_OK || + (status == VP8_STATUS_NOT_ENOUGH_DATA && found_vp8x && headers == NULL)) { + if (has_alpha != NULL) { + // If the data did not contain a VP8X/VP8L chunk the only definitive way + // to set this is by looking for alpha data (from an ALPH chunk). + *has_alpha |= (hdrs.alpha_data != NULL); + } + if (width != NULL) *width = image_width; + if (height != NULL) *height = image_height; + return VP8_STATUS_OK; + } else { + return status; + } +} + +VP8StatusCode WebPParseHeaders(WebPHeaderStructure* const headers) { + // status is marked volatile as a workaround for a clang-3.8 (aarch64) bug + volatile VP8StatusCode status; + int has_animation = 0; + assert(headers != NULL); + // fill out headers, ignore width/height/has_alpha. + status = ParseHeadersInternal(headers->data, headers->data_size, + NULL, NULL, NULL, &has_animation, + NULL, headers); + if (status == VP8_STATUS_OK || status == VP8_STATUS_NOT_ENOUGH_DATA) { + // The WebPDemux API + libwebp can be used to decode individual + // uncomposited frames or the WebPAnimDecoder can be used to fully + // reconstruct them (see webp/demux.h). + if (has_animation) { + status = VP8_STATUS_UNSUPPORTED_FEATURE; + } + } + return status; +} + +//------------------------------------------------------------------------------ +// WebPDecParams + +void WebPResetDecParams(WebPDecParams* const params) { + if (params != NULL) { + memset(params, 0, sizeof(*params)); + } +} + +//------------------------------------------------------------------------------ +// "Into" decoding variants + +// Main flow +static VP8StatusCode DecodeInto(const uint8_t* const data, size_t data_size, + WebPDecParams* const params) { + VP8StatusCode status; + VP8Io io; + WebPHeaderStructure headers; + + headers.data = data; + headers.data_size = data_size; + headers.have_all_data = 1; + status = WebPParseHeaders(&headers); // Process Pre-VP8 chunks. + if (status != VP8_STATUS_OK) { + return status; + } + + assert(params != NULL); + VP8InitIo(&io); + io.data = headers.data + headers.offset; + io.data_size = headers.data_size - headers.offset; + WebPInitCustomIo(params, &io); // Plug the I/O functions. + + if (!headers.is_lossless) { + VP8Decoder* const dec = VP8New(); + if (dec == NULL) { + return VP8_STATUS_OUT_OF_MEMORY; + } + dec->alpha_data_ = headers.alpha_data; + dec->alpha_data_size_ = headers.alpha_data_size; + + // Decode bitstream header, update io->width/io->height. + if (!VP8GetHeaders(dec, &io)) { + status = dec->status_; // An error occurred. Grab error status. + } else { + // Allocate/check output buffers. + status = WebPAllocateDecBuffer(io.width, io.height, params->options, + params->output); + if (status == VP8_STATUS_OK) { // Decode + // This change must be done before calling VP8Decode() + dec->mt_method_ = VP8GetThreadMethod(params->options, &headers, + io.width, io.height); + VP8InitDithering(params->options, dec); + if (!VP8Decode(dec, &io)) { + status = dec->status_; + } + } + } + VP8Delete(dec); + } else { + VP8LDecoder* const dec = VP8LNew(); + if (dec == NULL) { + return VP8_STATUS_OUT_OF_MEMORY; + } + if (!VP8LDecodeHeader(dec, &io)) { + status = dec->status_; // An error occurred. Grab error status. + } else { + // Allocate/check output buffers. + status = WebPAllocateDecBuffer(io.width, io.height, params->options, + params->output); + if (status == VP8_STATUS_OK) { // Decode + if (!VP8LDecodeImage(dec)) { + status = dec->status_; + } + } + } + VP8LDelete(dec); + } + + if (status != VP8_STATUS_OK) { + WebPFreeDecBuffer(params->output); + } else { + if (params->options != NULL && params->options->flip) { + // This restores the original stride values if options->flip was used + // during the call to WebPAllocateDecBuffer above. + status = WebPFlipBuffer(params->output); + } + } + return status; +} + +// Helpers +static uint8_t* DecodeIntoRGBABuffer(WEBP_CSP_MODE colorspace, + const uint8_t* const data, + size_t data_size, + uint8_t* const rgba, + int stride, size_t size) { + WebPDecParams params; + WebPDecBuffer buf; + if (rgba == NULL) { + return NULL; + } + WebPInitDecBuffer(&buf); + WebPResetDecParams(¶ms); + params.output = &buf; + buf.colorspace = colorspace; + buf.u.RGBA.rgba = rgba; + buf.u.RGBA.stride = stride; + buf.u.RGBA.size = size; + buf.is_external_memory = 1; + if (DecodeInto(data, data_size, ¶ms) != VP8_STATUS_OK) { + return NULL; + } + return rgba; +} + +uint8_t* WebPDecodeRGBInto(const uint8_t* data, size_t data_size, + uint8_t* output, size_t size, int stride) { + return DecodeIntoRGBABuffer(MODE_RGB, data, data_size, output, stride, size); +} + +uint8_t* WebPDecodeRGBAInto(const uint8_t* data, size_t data_size, + uint8_t* output, size_t size, int stride) { + return DecodeIntoRGBABuffer(MODE_RGBA, data, data_size, output, stride, size); +} + +uint8_t* WebPDecodeARGBInto(const uint8_t* data, size_t data_size, + uint8_t* output, size_t size, int stride) { + return DecodeIntoRGBABuffer(MODE_ARGB, data, data_size, output, stride, size); +} + +uint8_t* WebPDecodeBGRInto(const uint8_t* data, size_t data_size, + uint8_t* output, size_t size, int stride) { + return DecodeIntoRGBABuffer(MODE_BGR, data, data_size, output, stride, size); +} + +uint8_t* WebPDecodeBGRAInto(const uint8_t* data, size_t data_size, + uint8_t* output, size_t size, int stride) { + return DecodeIntoRGBABuffer(MODE_BGRA, data, data_size, output, stride, size); +} + +uint8_t* WebPDecodeYUVInto(const uint8_t* data, size_t data_size, + uint8_t* luma, size_t luma_size, int luma_stride, + uint8_t* u, size_t u_size, int u_stride, + uint8_t* v, size_t v_size, int v_stride) { + WebPDecParams params; + WebPDecBuffer output; + if (luma == NULL) return NULL; + WebPInitDecBuffer(&output); + WebPResetDecParams(¶ms); + params.output = &output; + output.colorspace = MODE_YUV; + output.u.YUVA.y = luma; + output.u.YUVA.y_stride = luma_stride; + output.u.YUVA.y_size = luma_size; + output.u.YUVA.u = u; + output.u.YUVA.u_stride = u_stride; + output.u.YUVA.u_size = u_size; + output.u.YUVA.v = v; + output.u.YUVA.v_stride = v_stride; + output.u.YUVA.v_size = v_size; + output.is_external_memory = 1; + if (DecodeInto(data, data_size, ¶ms) != VP8_STATUS_OK) { + return NULL; + } + return luma; +} + +//------------------------------------------------------------------------------ + +static uint8_t* Decode(WEBP_CSP_MODE mode, const uint8_t* const data, + size_t data_size, int* const width, int* const height, + WebPDecBuffer* const keep_info) { + WebPDecParams params; + WebPDecBuffer output; + + WebPInitDecBuffer(&output); + WebPResetDecParams(¶ms); + params.output = &output; + output.colorspace = mode; + + // Retrieve (and report back) the required dimensions from bitstream. + if (!WebPGetInfo(data, data_size, &output.width, &output.height)) { + return NULL; + } + if (width != NULL) *width = output.width; + if (height != NULL) *height = output.height; + + // Decode + if (DecodeInto(data, data_size, ¶ms) != VP8_STATUS_OK) { + return NULL; + } + if (keep_info != NULL) { // keep track of the side-info + WebPCopyDecBuffer(&output, keep_info); + } + // return decoded samples (don't clear 'output'!) + return WebPIsRGBMode(mode) ? output.u.RGBA.rgba : output.u.YUVA.y; +} + +uint8_t* WebPDecodeRGB(const uint8_t* data, size_t data_size, + int* width, int* height) { + return Decode(MODE_RGB, data, data_size, width, height, NULL); +} + +uint8_t* WebPDecodeRGBA(const uint8_t* data, size_t data_size, + int* width, int* height) { + return Decode(MODE_RGBA, data, data_size, width, height, NULL); +} + +uint8_t* WebPDecodeARGB(const uint8_t* data, size_t data_size, + int* width, int* height) { + return Decode(MODE_ARGB, data, data_size, width, height, NULL); +} + +uint8_t* WebPDecodeBGR(const uint8_t* data, size_t data_size, + int* width, int* height) { + return Decode(MODE_BGR, data, data_size, width, height, NULL); +} + +uint8_t* WebPDecodeBGRA(const uint8_t* data, size_t data_size, + int* width, int* height) { + return Decode(MODE_BGRA, data, data_size, width, height, NULL); +} + +uint8_t* WebPDecodeYUV(const uint8_t* data, size_t data_size, + int* width, int* height, uint8_t** u, uint8_t** v, + int* stride, int* uv_stride) { + WebPDecBuffer output; // only to preserve the side-infos + uint8_t* const out = Decode(MODE_YUV, data, data_size, + width, height, &output); + + if (out != NULL) { + const WebPYUVABuffer* const buf = &output.u.YUVA; + *u = buf->u; + *v = buf->v; + *stride = buf->y_stride; + *uv_stride = buf->u_stride; + assert(buf->u_stride == buf->v_stride); + } + return out; +} + +static void DefaultFeatures(WebPBitstreamFeatures* const features) { + assert(features != NULL); + memset(features, 0, sizeof(*features)); +} + +static VP8StatusCode GetFeatures(const uint8_t* const data, size_t data_size, + WebPBitstreamFeatures* const features) { + if (features == NULL || data == NULL) { + return VP8_STATUS_INVALID_PARAM; + } + DefaultFeatures(features); + + // Only parse enough of the data to retrieve the features. + return ParseHeadersInternal(data, data_size, + &features->width, &features->height, + &features->has_alpha, &features->has_animation, + &features->format, NULL); +} + +//------------------------------------------------------------------------------ +// WebPGetInfo() + +int WebPGetInfo(const uint8_t* data, size_t data_size, + int* width, int* height) { + WebPBitstreamFeatures features; + + if (GetFeatures(data, data_size, &features) != VP8_STATUS_OK) { + return 0; + } + + if (width != NULL) { + *width = features.width; + } + if (height != NULL) { + *height = features.height; + } + + return 1; +} + +//------------------------------------------------------------------------------ +// Advance decoding API + +int WebPInitDecoderConfigInternal(WebPDecoderConfig* config, + int version) { + if (WEBP_ABI_IS_INCOMPATIBLE(version, WEBP_DECODER_ABI_VERSION)) { + return 0; // version mismatch + } + if (config == NULL) { + return 0; + } + memset(config, 0, sizeof(*config)); + DefaultFeatures(&config->input); + WebPInitDecBuffer(&config->output); + return 1; +} + +VP8StatusCode WebPGetFeaturesInternal(const uint8_t* data, size_t data_size, + WebPBitstreamFeatures* features, + int version) { + if (WEBP_ABI_IS_INCOMPATIBLE(version, WEBP_DECODER_ABI_VERSION)) { + return VP8_STATUS_INVALID_PARAM; // version mismatch + } + if (features == NULL) { + return VP8_STATUS_INVALID_PARAM; + } + return GetFeatures(data, data_size, features); +} + +VP8StatusCode WebPDecode(const uint8_t* data, size_t data_size, + WebPDecoderConfig* config) { + WebPDecParams params; + VP8StatusCode status; + + if (config == NULL) { + return VP8_STATUS_INVALID_PARAM; + } + + status = GetFeatures(data, data_size, &config->input); + if (status != VP8_STATUS_OK) { + if (status == VP8_STATUS_NOT_ENOUGH_DATA) { + return VP8_STATUS_BITSTREAM_ERROR; // Not-enough-data treated as error. + } + return status; + } + + WebPResetDecParams(¶ms); + params.options = &config->options; + params.output = &config->output; + if (WebPAvoidSlowMemory(params.output, &config->input)) { + // decoding to slow memory: use a temporary in-mem buffer to decode into. + WebPDecBuffer in_mem_buffer; + WebPInitDecBuffer(&in_mem_buffer); + in_mem_buffer.colorspace = config->output.colorspace; + in_mem_buffer.width = config->input.width; + in_mem_buffer.height = config->input.height; + params.output = &in_mem_buffer; + status = DecodeInto(data, data_size, ¶ms); + if (status == VP8_STATUS_OK) { // do the slow-copy + status = WebPCopyDecBufferPixels(&in_mem_buffer, &config->output); + } + WebPFreeDecBuffer(&in_mem_buffer); + } else { + status = DecodeInto(data, data_size, ¶ms); + } + + return status; +} + +//------------------------------------------------------------------------------ +// Cropping and rescaling. + +int WebPIoInitFromOptions(const WebPDecoderOptions* const options, + VP8Io* const io, WEBP_CSP_MODE src_colorspace) { + const int W = io->width; + const int H = io->height; + int x = 0, y = 0, w = W, h = H; + + // Cropping + io->use_cropping = (options != NULL) && (options->use_cropping > 0); + if (io->use_cropping) { + w = options->crop_width; + h = options->crop_height; + x = options->crop_left; + y = options->crop_top; + if (!WebPIsRGBMode(src_colorspace)) { // only snap for YUV420 + x &= ~1; + y &= ~1; + } + if (x < 0 || y < 0 || w <= 0 || h <= 0 || x + w > W || y + h > H) { + return 0; // out of frame boundary error + } + } + io->crop_left = x; + io->crop_top = y; + io->crop_right = x + w; + io->crop_bottom = y + h; + io->mb_w = w; + io->mb_h = h; + + // Scaling + io->use_scaling = (options != NULL) && (options->use_scaling > 0); + if (io->use_scaling) { + int scaled_width = options->scaled_width; + int scaled_height = options->scaled_height; + if (!WebPRescalerGetScaledDimensions(w, h, &scaled_width, &scaled_height)) { + return 0; + } + io->scaled_width = scaled_width; + io->scaled_height = scaled_height; + } + + // Filter + io->bypass_filtering = (options != NULL) && options->bypass_filtering; + + // Fancy upsampler +#ifdef FANCY_UPSAMPLING + io->fancy_upsampling = (options == NULL) || (!options->no_fancy_upsampling); +#endif + + if (io->use_scaling) { + // disable filter (only for large downscaling ratio). + io->bypass_filtering = (io->scaled_width < W * 3 / 4) && + (io->scaled_height < H * 3 / 4); + io->fancy_upsampling = 0; + } + return 1; +} + +//------------------------------------------------------------------------------ diff --git a/vendor/github.com/sizeofint/webpanimation/dec_webpi_dec.h b/vendor/github.com/sizeofint/webpanimation/dec_webpi_dec.h new file mode 100644 index 00000000..916d0a6f --- /dev/null +++ b/vendor/github.com/sizeofint/webpanimation/dec_webpi_dec.h @@ -0,0 +1,133 @@ +// Copyright 2011 Google Inc. All Rights Reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the COPYING file in the root of the source +// tree. An additional intellectual property rights grant can be found +// in the file PATENTS. All contributing project authors may +// be found in the AUTHORS file in the root of the source tree. +// ----------------------------------------------------------------------------- +// +// Internal header: WebP decoding parameters and custom IO on buffer +// +// Author: somnath@google.com (Somnath Banerjee) + +#ifndef WEBP_DEC_WEBPI_DEC_H_ +#define WEBP_DEC_WEBPI_DEC_H_ + +#ifdef __cplusplus +extern "C" { +#endif + +#include "utils_rescaler_utils.h" +#include "dec_vp8_dec.h" + +//------------------------------------------------------------------------------ +// WebPDecParams: Decoding output parameters. Transient internal object. + +typedef struct WebPDecParams WebPDecParams; +typedef int (*OutputFunc)(const VP8Io* const io, WebPDecParams* const p); +typedef int (*OutputAlphaFunc)(const VP8Io* const io, WebPDecParams* const p, + int expected_num_out_lines); +typedef int (*OutputRowFunc)(WebPDecParams* const p, int y_pos, + int max_out_lines); + +struct WebPDecParams { + WebPDecBuffer* output; // output buffer. + uint8_t* tmp_y, *tmp_u, *tmp_v; // cache for the fancy upsampler + // or used for tmp rescaling + + int last_y; // coordinate of the line that was last output + const WebPDecoderOptions* options; // if not NULL, use alt decoding features + + WebPRescaler* scaler_y, *scaler_u, *scaler_v, *scaler_a; // rescalers + void* memory; // overall scratch memory for the output work. + + OutputFunc emit; // output RGB or YUV samples + OutputAlphaFunc emit_alpha; // output alpha channel + OutputRowFunc emit_alpha_row; // output one line of rescaled alpha values +}; + +// Should be called first, before any use of the WebPDecParams object. +void WebPResetDecParams(WebPDecParams* const params); + +//------------------------------------------------------------------------------ +// Header parsing helpers + +// Structure storing a description of the RIFF headers. +typedef struct { + const uint8_t* data; // input buffer + size_t data_size; // input buffer size + int have_all_data; // true if all data is known to be available + size_t offset; // offset to main data chunk (VP8 or VP8L) + const uint8_t* alpha_data; // points to alpha chunk (if present) + size_t alpha_data_size; // alpha chunk size + size_t compressed_size; // VP8/VP8L compressed data size + size_t riff_size; // size of the riff payload (or 0 if absent) + int is_lossless; // true if a VP8L chunk is present +} WebPHeaderStructure; + +// Skips over all valid chunks prior to the first VP8/VP8L frame header. +// Returns: VP8_STATUS_OK, VP8_STATUS_BITSTREAM_ERROR (invalid header/chunk), +// VP8_STATUS_NOT_ENOUGH_DATA (partial input) or VP8_STATUS_UNSUPPORTED_FEATURE +// in the case of non-decodable features (animation for instance). +// In 'headers', compressed_size, offset, alpha_data, alpha_size, and lossless +// fields are updated appropriately upon success. +VP8StatusCode WebPParseHeaders(WebPHeaderStructure* const headers); + +//------------------------------------------------------------------------------ +// Misc utils + +// Initializes VP8Io with custom setup, io and teardown functions. The default +// hooks will use the supplied 'params' as io->opaque handle. +void WebPInitCustomIo(WebPDecParams* const params, VP8Io* const io); + +// Setup crop_xxx fields, mb_w and mb_h in io. 'src_colorspace' refers +// to the *compressed* format, not the output one. +int WebPIoInitFromOptions(const WebPDecoderOptions* const options, + VP8Io* const io, WEBP_CSP_MODE src_colorspace); + +//------------------------------------------------------------------------------ +// Internal functions regarding WebPDecBuffer memory (in buffer.c). +// Don't really need to be externally visible for now. + +// Prepare 'buffer' with the requested initial dimensions width/height. +// If no external storage is supplied, initializes buffer by allocating output +// memory and setting up the stride information. Validate the parameters. Return +// an error code in case of problem (no memory, or invalid stride / size / +// dimension / etc.). If *options is not NULL, also verify that the options' +// parameters are valid and apply them to the width/height dimensions of the +// output buffer. This takes cropping / scaling / rotation into account. +// Also incorporates the options->flip flag to flip the buffer parameters if +// needed. +VP8StatusCode WebPAllocateDecBuffer(int width, int height, + const WebPDecoderOptions* const options, + WebPDecBuffer* const buffer); + +// Flip buffer vertically by negating the various strides. +VP8StatusCode WebPFlipBuffer(WebPDecBuffer* const buffer); + +// Copy 'src' into 'dst' buffer, making sure 'dst' is not marked as owner of the +// memory (still held by 'src'). No pixels are copied. +void WebPCopyDecBuffer(const WebPDecBuffer* const src, + WebPDecBuffer* const dst); + +// Copy and transfer ownership from src to dst (beware of parameter order!) +void WebPGrabDecBuffer(WebPDecBuffer* const src, WebPDecBuffer* const dst); + +// Copy pixels from 'src' into a *preallocated* 'dst' buffer. Returns +// VP8_STATUS_INVALID_PARAM if the 'dst' is not set up correctly for the copy. +VP8StatusCode WebPCopyDecBufferPixels(const WebPDecBuffer* const src, + WebPDecBuffer* const dst); + +// Returns true if decoding will be slow with the current configuration +// and bitstream features. +int WebPAvoidSlowMemory(const WebPDecBuffer* const output, + const WebPBitstreamFeatures* const features); + +//------------------------------------------------------------------------------ + +#ifdef __cplusplus +} // extern "C" +#endif + +#endif // WEBP_DEC_WEBPI_DEC_H_ diff --git a/vendor/github.com/sizeofint/webpanimation/demux_anim_decode.c b/vendor/github.com/sizeofint/webpanimation/demux_anim_decode.c new file mode 100644 index 00000000..938e2d20 --- /dev/null +++ b/vendor/github.com/sizeofint/webpanimation/demux_anim_decode.c @@ -0,0 +1,457 @@ +// Copyright 2015 Google Inc. All Rights Reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the COPYING file in the root of the source +// tree. An additional intellectual property rights grant can be found +// in the file PATENTS. All contributing project authors may +// be found in the AUTHORS file in the root of the source tree. +// ----------------------------------------------------------------------------- +// +// AnimDecoder implementation. +// + +#ifdef HAVE_CONFIG_H +#include "config.h" +#endif + +#include +#include + +#include "utils_utils.h" +#include "webp_decode.h" +#include "webp_demux.h" + +#define NUM_CHANNELS 4 + +typedef void (*BlendRowFunc)(uint32_t* const, const uint32_t* const, int); +static void BlendPixelRowNonPremult(uint32_t* const src, + const uint32_t* const dst, int num_pixels); +static void BlendPixelRowPremult(uint32_t* const src, const uint32_t* const dst, + int num_pixels); + +struct WebPAnimDecoder { + WebPDemuxer* demux_; // Demuxer created from given WebP bitstream. + WebPDecoderConfig config_; // Decoder config. + // Note: we use a pointer to a function blending multiple pixels at a time to + // allow possible inlining of per-pixel blending function. + BlendRowFunc blend_func_; // Pointer to the chose blend row function. + WebPAnimInfo info_; // Global info about the animation. + uint8_t* curr_frame_; // Current canvas (not disposed). + uint8_t* prev_frame_disposed_; // Previous canvas (properly disposed). + int prev_frame_timestamp_; // Previous frame timestamp (milliseconds). + WebPIterator prev_iter_; // Iterator object for previous frame. + int prev_frame_was_keyframe_; // True if previous frame was a keyframe. + int next_frame_; // Index of the next frame to be decoded + // (starting from 1). +}; + +static void DefaultDecoderOptions(WebPAnimDecoderOptions* const dec_options) { + dec_options->color_mode = MODE_RGBA; + dec_options->use_threads = 0; +} + +int WebPAnimDecoderOptionsInitInternal(WebPAnimDecoderOptions* dec_options, + int abi_version) { + if (dec_options == NULL || + WEBP_ABI_IS_INCOMPATIBLE(abi_version, WEBP_DEMUX_ABI_VERSION)) { + return 0; + } + DefaultDecoderOptions(dec_options); + return 1; +} + +static int ApplyDecoderOptions(const WebPAnimDecoderOptions* const dec_options, + WebPAnimDecoder* const dec) { + WEBP_CSP_MODE mode; + WebPDecoderConfig* config = &dec->config_; + assert(dec_options != NULL); + + mode = dec_options->color_mode; + if (mode != MODE_RGBA && mode != MODE_BGRA && + mode != MODE_rgbA && mode != MODE_bgrA) { + return 0; + } + dec->blend_func_ = (mode == MODE_RGBA || mode == MODE_BGRA) + ? &BlendPixelRowNonPremult + : &BlendPixelRowPremult; + WebPInitDecoderConfig(config); + config->output.colorspace = mode; + config->output.is_external_memory = 1; + config->options.use_threads = dec_options->use_threads; + // Note: config->output.u.RGBA is set at the time of decoding each frame. + return 1; +} + +WebPAnimDecoder* WebPAnimDecoderNewInternal( + const WebPData* webp_data, const WebPAnimDecoderOptions* dec_options, + int abi_version) { + WebPAnimDecoderOptions options; + WebPAnimDecoder* dec = NULL; + if (webp_data == NULL || + WEBP_ABI_IS_INCOMPATIBLE(abi_version, WEBP_DEMUX_ABI_VERSION)) { + return NULL; + } + + // Note: calloc() so that the pointer members are initialized to NULL. + dec = (WebPAnimDecoder*)WebPSafeCalloc(1ULL, sizeof(*dec)); + if (dec == NULL) goto Error; + + if (dec_options != NULL) { + options = *dec_options; + } else { + DefaultDecoderOptions(&options); + } + if (!ApplyDecoderOptions(&options, dec)) goto Error; + + dec->demux_ = WebPDemux(webp_data); + if (dec->demux_ == NULL) goto Error; + + dec->info_.canvas_width = WebPDemuxGetI(dec->demux_, WEBP_FF_CANVAS_WIDTH); + dec->info_.canvas_height = WebPDemuxGetI(dec->demux_, WEBP_FF_CANVAS_HEIGHT); + dec->info_.loop_count = WebPDemuxGetI(dec->demux_, WEBP_FF_LOOP_COUNT); + dec->info_.bgcolor = WebPDemuxGetI(dec->demux_, WEBP_FF_BACKGROUND_COLOR); + dec->info_.frame_count = WebPDemuxGetI(dec->demux_, WEBP_FF_FRAME_COUNT); + + // Note: calloc() because we fill frame with zeroes as well. + dec->curr_frame_ = (uint8_t*)WebPSafeCalloc( + dec->info_.canvas_width * NUM_CHANNELS, dec->info_.canvas_height); + if (dec->curr_frame_ == NULL) goto Error; + dec->prev_frame_disposed_ = (uint8_t*)WebPSafeCalloc( + dec->info_.canvas_width * NUM_CHANNELS, dec->info_.canvas_height); + if (dec->prev_frame_disposed_ == NULL) goto Error; + + WebPAnimDecoderReset(dec); + return dec; + + Error: + WebPAnimDecoderDelete(dec); + return NULL; +} + +int WebPAnimDecoderGetInfo(const WebPAnimDecoder* dec, WebPAnimInfo* info) { + if (dec == NULL || info == NULL) return 0; + *info = dec->info_; + return 1; +} + +// Returns true if the frame covers the full canvas. +static int IsFullFrame(int width, int height, int canvas_width, + int canvas_height) { + return (width == canvas_width && height == canvas_height); +} + +// Clear the canvas to transparent. +static int ZeroFillCanvas(uint8_t* buf, uint32_t canvas_width, + uint32_t canvas_height) { + const uint64_t size = + (uint64_t)canvas_width * canvas_height * NUM_CHANNELS * sizeof(*buf); + if (size != (size_t)size) return 0; + memset(buf, 0, (size_t)size); + return 1; +} + +// Clear given frame rectangle to transparent. +static void ZeroFillFrameRect(uint8_t* buf, int buf_stride, int x_offset, + int y_offset, int width, int height) { + int j; + assert(width * NUM_CHANNELS <= buf_stride); + buf += y_offset * buf_stride + x_offset * NUM_CHANNELS; + for (j = 0; j < height; ++j) { + memset(buf, 0, width * NUM_CHANNELS); + buf += buf_stride; + } +} + +// Copy width * height pixels from 'src' to 'dst'. +static int CopyCanvas(const uint8_t* src, uint8_t* dst, + uint32_t width, uint32_t height) { + const uint64_t size = (uint64_t)width * height * NUM_CHANNELS; + if (size != (size_t)size) return 0; + assert(src != NULL && dst != NULL); + memcpy(dst, src, (size_t)size); + return 1; +} + +// Returns true if the current frame is a key-frame. +static int IsKeyFrame(const WebPIterator* const curr, + const WebPIterator* const prev, + int prev_frame_was_key_frame, + int canvas_width, int canvas_height) { + if (curr->frame_num == 1) { + return 1; + } else if ((!curr->has_alpha || curr->blend_method == WEBP_MUX_NO_BLEND) && + IsFullFrame(curr->width, curr->height, + canvas_width, canvas_height)) { + return 1; + } else { + return (prev->dispose_method == WEBP_MUX_DISPOSE_BACKGROUND) && + (IsFullFrame(prev->width, prev->height, canvas_width, + canvas_height) || + prev_frame_was_key_frame); + } +} + + +// Blend a single channel of 'src' over 'dst', given their alpha channel values. +// 'src' and 'dst' are assumed to be NOT pre-multiplied by alpha. +static uint8_t BlendChannelNonPremult(uint32_t src, uint8_t src_a, + uint32_t dst, uint8_t dst_a, + uint32_t scale, int shift) { + const uint8_t src_channel = (src >> shift) & 0xff; + const uint8_t dst_channel = (dst >> shift) & 0xff; + const uint32_t blend_unscaled = src_channel * src_a + dst_channel * dst_a; + assert(blend_unscaled < (1ULL << 32) / scale); + return (blend_unscaled * scale) >> 24; +} + +// Blend 'src' over 'dst' assuming they are NOT pre-multiplied by alpha. +static uint32_t BlendPixelNonPremult(uint32_t src, uint32_t dst) { + const uint8_t src_a = (src >> 24) & 0xff; + + if (src_a == 0) { + return dst; + } else { + const uint8_t dst_a = (dst >> 24) & 0xff; + // This is the approximate integer arithmetic for the actual formula: + // dst_factor_a = (dst_a * (255 - src_a)) / 255. + const uint8_t dst_factor_a = (dst_a * (256 - src_a)) >> 8; + const uint8_t blend_a = src_a + dst_factor_a; + const uint32_t scale = (1UL << 24) / blend_a; + + const uint8_t blend_r = + BlendChannelNonPremult(src, src_a, dst, dst_factor_a, scale, 0); + const uint8_t blend_g = + BlendChannelNonPremult(src, src_a, dst, dst_factor_a, scale, 8); + const uint8_t blend_b = + BlendChannelNonPremult(src, src_a, dst, dst_factor_a, scale, 16); + assert(src_a + dst_factor_a < 256); + + return (blend_r << 0) | + (blend_g << 8) | + (blend_b << 16) | + ((uint32_t)blend_a << 24); + } +} + +// Blend 'num_pixels' in 'src' over 'dst' assuming they are NOT pre-multiplied +// by alpha. +static void BlendPixelRowNonPremult(uint32_t* const src, + const uint32_t* const dst, int num_pixels) { + int i; + for (i = 0; i < num_pixels; ++i) { + const uint8_t src_alpha = (src[i] >> 24) & 0xff; + if (src_alpha != 0xff) { + src[i] = BlendPixelNonPremult(src[i], dst[i]); + } + } +} + +// Individually multiply each channel in 'pix' by 'scale'. +static WEBP_INLINE uint32_t ChannelwiseMultiply(uint32_t pix, uint32_t scale) { + uint32_t mask = 0x00FF00FF; + uint32_t rb = ((pix & mask) * scale) >> 8; + uint32_t ag = ((pix >> 8) & mask) * scale; + return (rb & mask) | (ag & ~mask); +} + +// Blend 'src' over 'dst' assuming they are pre-multiplied by alpha. +static uint32_t BlendPixelPremult(uint32_t src, uint32_t dst) { + const uint8_t src_a = (src >> 24) & 0xff; + return src + ChannelwiseMultiply(dst, 256 - src_a); +} + +// Blend 'num_pixels' in 'src' over 'dst' assuming they are pre-multiplied by +// alpha. +static void BlendPixelRowPremult(uint32_t* const src, const uint32_t* const dst, + int num_pixels) { + int i; + for (i = 0; i < num_pixels; ++i) { + const uint8_t src_alpha = (src[i] >> 24) & 0xff; + if (src_alpha != 0xff) { + src[i] = BlendPixelPremult(src[i], dst[i]); + } + } +} + +// Returns two ranges ( pairs) at row 'canvas_y', that belong to +// 'src' but not 'dst'. A point range is empty if the corresponding width is 0. +static void FindBlendRangeAtRow(const WebPIterator* const src, + const WebPIterator* const dst, int canvas_y, + int* const left1, int* const width1, + int* const left2, int* const width2) { + const int src_max_x = src->x_offset + src->width; + const int dst_max_x = dst->x_offset + dst->width; + const int dst_max_y = dst->y_offset + dst->height; + assert(canvas_y >= src->y_offset && canvas_y < (src->y_offset + src->height)); + *left1 = -1; + *width1 = 0; + *left2 = -1; + *width2 = 0; + + if (canvas_y < dst->y_offset || canvas_y >= dst_max_y || + src->x_offset >= dst_max_x || src_max_x <= dst->x_offset) { + *left1 = src->x_offset; + *width1 = src->width; + return; + } + + if (src->x_offset < dst->x_offset) { + *left1 = src->x_offset; + *width1 = dst->x_offset - src->x_offset; + } + + if (src_max_x > dst_max_x) { + *left2 = dst_max_x; + *width2 = src_max_x - dst_max_x; + } +} + +int WebPAnimDecoderGetNext(WebPAnimDecoder* dec, + uint8_t** buf_ptr, int* timestamp_ptr) { + WebPIterator iter; + uint32_t width; + uint32_t height; + int is_key_frame; + int timestamp; + BlendRowFunc blend_row; + + if (dec == NULL || buf_ptr == NULL || timestamp_ptr == NULL) return 0; + if (!WebPAnimDecoderHasMoreFrames(dec)) return 0; + + width = dec->info_.canvas_width; + height = dec->info_.canvas_height; + blend_row = dec->blend_func_; + + // Get compressed frame. + if (!WebPDemuxGetFrame(dec->demux_, dec->next_frame_, &iter)) { + return 0; + } + timestamp = dec->prev_frame_timestamp_ + iter.duration; + + // Initialize. + is_key_frame = IsKeyFrame(&iter, &dec->prev_iter_, + dec->prev_frame_was_keyframe_, width, height); + if (is_key_frame) { + if (!ZeroFillCanvas(dec->curr_frame_, width, height)) { + goto Error; + } + } else { + if (!CopyCanvas(dec->prev_frame_disposed_, dec->curr_frame_, + width, height)) { + goto Error; + } + } + + // Decode. + { + const uint8_t* in = iter.fragment.bytes; + const size_t in_size = iter.fragment.size; + const uint32_t stride = width * NUM_CHANNELS; // at most 25 + 2 bits + const uint64_t out_offset = (uint64_t)iter.y_offset * stride + + (uint64_t)iter.x_offset * NUM_CHANNELS; // 53b + const uint64_t size = (uint64_t)iter.height * stride; // at most 25 + 27b + WebPDecoderConfig* const config = &dec->config_; + WebPRGBABuffer* const buf = &config->output.u.RGBA; + if ((size_t)size != size) goto Error; + buf->stride = (int)stride; + buf->size = (size_t)size; + buf->rgba = dec->curr_frame_ + out_offset; + + if (WebPDecode(in, in_size, config) != VP8_STATUS_OK) { + goto Error; + } + } + + // During the decoding of current frame, we may have set some pixels to be + // transparent (i.e. alpha < 255). However, the value of each of these + // pixels should have been determined by blending it against the value of + // that pixel in the previous frame if blending method of is WEBP_MUX_BLEND. + if (iter.frame_num > 1 && iter.blend_method == WEBP_MUX_BLEND && + !is_key_frame) { + if (dec->prev_iter_.dispose_method == WEBP_MUX_DISPOSE_NONE) { + int y; + // Blend transparent pixels with pixels in previous canvas. + for (y = 0; y < iter.height; ++y) { + const size_t offset = + (iter.y_offset + y) * width + iter.x_offset; + blend_row((uint32_t*)dec->curr_frame_ + offset, + (uint32_t*)dec->prev_frame_disposed_ + offset, iter.width); + } + } else { + int y; + assert(dec->prev_iter_.dispose_method == WEBP_MUX_DISPOSE_BACKGROUND); + // We need to blend a transparent pixel with its value just after + // initialization. That is, blend it with: + // * Fully transparent pixel if it belongs to prevRect <-- No-op. + // * The pixel in the previous canvas otherwise <-- Need alpha-blending. + for (y = 0; y < iter.height; ++y) { + const int canvas_y = iter.y_offset + y; + int left1, width1, left2, width2; + FindBlendRangeAtRow(&iter, &dec->prev_iter_, canvas_y, &left1, &width1, + &left2, &width2); + if (width1 > 0) { + const size_t offset1 = canvas_y * width + left1; + blend_row((uint32_t*)dec->curr_frame_ + offset1, + (uint32_t*)dec->prev_frame_disposed_ + offset1, width1); + } + if (width2 > 0) { + const size_t offset2 = canvas_y * width + left2; + blend_row((uint32_t*)dec->curr_frame_ + offset2, + (uint32_t*)dec->prev_frame_disposed_ + offset2, width2); + } + } + } + } + + // Update info of the previous frame and dispose it for the next iteration. + dec->prev_frame_timestamp_ = timestamp; + WebPDemuxReleaseIterator(&dec->prev_iter_); + dec->prev_iter_ = iter; + dec->prev_frame_was_keyframe_ = is_key_frame; + CopyCanvas(dec->curr_frame_, dec->prev_frame_disposed_, width, height); + if (dec->prev_iter_.dispose_method == WEBP_MUX_DISPOSE_BACKGROUND) { + ZeroFillFrameRect(dec->prev_frame_disposed_, width * NUM_CHANNELS, + dec->prev_iter_.x_offset, dec->prev_iter_.y_offset, + dec->prev_iter_.width, dec->prev_iter_.height); + } + ++dec->next_frame_; + + // All OK, fill in the values. + *buf_ptr = dec->curr_frame_; + *timestamp_ptr = timestamp; + return 1; + + Error: + WebPDemuxReleaseIterator(&iter); + return 0; +} + +int WebPAnimDecoderHasMoreFrames(const WebPAnimDecoder* dec) { + if (dec == NULL) return 0; + return (dec->next_frame_ <= (int)dec->info_.frame_count); +} + +void WebPAnimDecoderReset(WebPAnimDecoder* dec) { + if (dec != NULL) { + dec->prev_frame_timestamp_ = 0; + WebPDemuxReleaseIterator(&dec->prev_iter_); + memset(&dec->prev_iter_, 0, sizeof(dec->prev_iter_)); + dec->prev_frame_was_keyframe_ = 0; + dec->next_frame_ = 1; + } +} + +const WebPDemuxer* WebPAnimDecoderGetDemuxer(const WebPAnimDecoder* dec) { + if (dec == NULL) return NULL; + return dec->demux_; +} + +void WebPAnimDecoderDelete(WebPAnimDecoder* dec) { + if (dec != NULL) { + WebPDemuxReleaseIterator(&dec->prev_iter_); + WebPDemuxDelete(dec->demux_); + WebPSafeFree(dec->curr_frame_); + WebPSafeFree(dec->prev_frame_disposed_); + WebPSafeFree(dec); + } +} diff --git a/vendor/github.com/sizeofint/webpanimation/demux_demux.c b/vendor/github.com/sizeofint/webpanimation/demux_demux.c new file mode 100644 index 00000000..c334309e --- /dev/null +++ b/vendor/github.com/sizeofint/webpanimation/demux_demux.c @@ -0,0 +1,972 @@ +// Copyright 2012 Google Inc. All Rights Reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the COPYING file in the root of the source +// tree. An additional intellectual property rights grant can be found +// in the file PATENTS. All contributing project authors may +// be found in the AUTHORS file in the root of the source tree. +// ----------------------------------------------------------------------------- +// +// WebP container demux. +// + +#ifdef HAVE_CONFIG_H +#include "config.h" +#endif + +#include +#include +#include + +#include "utils_utils.h" +#include "webp_decode.h" +#include "webp_demux.h" +#include "webp_format_constants.h" + +#define DMUX_MAJ_VERSION 1 +#define DMUX_MIN_VERSION 2 +#define DMUX_REV_VERSION 0 + +typedef struct { + size_t start_; // start location of the data + size_t end_; // end location + size_t riff_end_; // riff chunk end location, can be > end_. + size_t buf_size_; // size of the buffer + const uint8_t* buf_; +} MemBuffer; + +typedef struct { + size_t offset_; + size_t size_; +} ChunkData; + +typedef struct Frame { + int x_offset_, y_offset_; + int width_, height_; + int has_alpha_; + int duration_; + WebPMuxAnimDispose dispose_method_; + WebPMuxAnimBlend blend_method_; + int frame_num_; + int complete_; // img_components_ contains a full image. + ChunkData img_components_[2]; // 0=VP8{,L} 1=ALPH + struct Frame* next_; +} Frame; + +typedef struct Chunk { + ChunkData data_; + struct Chunk* next_; +} Chunk; + +struct WebPDemuxer { + MemBuffer mem_; + WebPDemuxState state_; + int is_ext_format_; + uint32_t feature_flags_; + int canvas_width_, canvas_height_; + int loop_count_; + uint32_t bgcolor_; + int num_frames_; + Frame* frames_; + Frame** frames_tail_; + Chunk* chunks_; // non-image chunks + Chunk** chunks_tail_; +}; + +typedef enum { + PARSE_OK, + PARSE_NEED_MORE_DATA, + PARSE_ERROR +} ParseStatus; + +typedef struct ChunkParser { + uint8_t id[4]; + ParseStatus (*parse)(WebPDemuxer* const dmux); + int (*valid)(const WebPDemuxer* const dmux); +} ChunkParser; + +static ParseStatus ParseSingleImage(WebPDemuxer* const dmux); +static ParseStatus ParseVP8X(WebPDemuxer* const dmux); +static int IsValidSimpleFormat(const WebPDemuxer* const dmux); +static int IsValidExtendedFormat(const WebPDemuxer* const dmux); + +static const ChunkParser kMasterChunks[] = { + { { 'V', 'P', '8', ' ' }, ParseSingleImage, IsValidSimpleFormat }, + { { 'V', 'P', '8', 'L' }, ParseSingleImage, IsValidSimpleFormat }, + { { 'V', 'P', '8', 'X' }, ParseVP8X, IsValidExtendedFormat }, + { { '0', '0', '0', '0' }, NULL, NULL }, +}; + +//------------------------------------------------------------------------------ + +int WebPGetDemuxVersion(void) { + return (DMUX_MAJ_VERSION << 16) | (DMUX_MIN_VERSION << 8) | DMUX_REV_VERSION; +} + +// ----------------------------------------------------------------------------- +// MemBuffer + +static int RemapMemBuffer(MemBuffer* const mem, + const uint8_t* data, size_t size) { + if (size < mem->buf_size_) return 0; // can't remap to a shorter buffer! + + mem->buf_ = data; + mem->end_ = mem->buf_size_ = size; + return 1; +} + +static int InitMemBuffer(MemBuffer* const mem, + const uint8_t* data, size_t size) { + memset(mem, 0, sizeof(*mem)); + return RemapMemBuffer(mem, data, size); +} + +// Return the remaining data size available in 'mem'. +static WEBP_INLINE size_t MemDataSize(const MemBuffer* const mem) { + return (mem->end_ - mem->start_); +} + +// Return true if 'size' exceeds the end of the RIFF chunk. +static WEBP_INLINE int SizeIsInvalid(const MemBuffer* const mem, size_t size) { + return (size > mem->riff_end_ - mem->start_); +} + +static WEBP_INLINE void Skip(MemBuffer* const mem, size_t size) { + mem->start_ += size; +} + +static WEBP_INLINE void Rewind(MemBuffer* const mem, size_t size) { + mem->start_ -= size; +} + +static WEBP_INLINE const uint8_t* GetBuffer(MemBuffer* const mem) { + return mem->buf_ + mem->start_; +} + +// Read from 'mem' and skip the read bytes. +static WEBP_INLINE uint8_t ReadByte(MemBuffer* const mem) { + const uint8_t byte = mem->buf_[mem->start_]; + Skip(mem, 1); + return byte; +} + +static WEBP_INLINE int ReadLE16s(MemBuffer* const mem) { + const uint8_t* const data = mem->buf_ + mem->start_; + const int val = GetLE16(data); + Skip(mem, 2); + return val; +} + +static WEBP_INLINE int ReadLE24s(MemBuffer* const mem) { + const uint8_t* const data = mem->buf_ + mem->start_; + const int val = GetLE24(data); + Skip(mem, 3); + return val; +} + +static WEBP_INLINE uint32_t ReadLE32(MemBuffer* const mem) { + const uint8_t* const data = mem->buf_ + mem->start_; + const uint32_t val = GetLE32(data); + Skip(mem, 4); + return val; +} + +// ----------------------------------------------------------------------------- +// Secondary chunk parsing + +static void AddChunk(WebPDemuxer* const dmux, Chunk* const chunk) { + *dmux->chunks_tail_ = chunk; + chunk->next_ = NULL; + dmux->chunks_tail_ = &chunk->next_; +} + +// Add a frame to the end of the list, ensuring the last frame is complete. +// Returns true on success, false otherwise. +static int AddFrame(WebPDemuxer* const dmux, Frame* const frame) { + const Frame* const last_frame = *dmux->frames_tail_; + if (last_frame != NULL && !last_frame->complete_) return 0; + + *dmux->frames_tail_ = frame; + frame->next_ = NULL; + dmux->frames_tail_ = &frame->next_; + return 1; +} + +static void SetFrameInfo(size_t start_offset, size_t size, + int frame_num, int complete, + const WebPBitstreamFeatures* const features, + Frame* const frame) { + frame->img_components_[0].offset_ = start_offset; + frame->img_components_[0].size_ = size; + frame->width_ = features->width; + frame->height_ = features->height; + frame->has_alpha_ |= features->has_alpha; + frame->frame_num_ = frame_num; + frame->complete_ = complete; +} + +// Store image bearing chunks to 'frame'. 'min_size' is an optional size +// requirement, it may be zero. +static ParseStatus StoreFrame(int frame_num, uint32_t min_size, + MemBuffer* const mem, Frame* const frame) { + int alpha_chunks = 0; + int image_chunks = 0; + int done = (MemDataSize(mem) < CHUNK_HEADER_SIZE || + MemDataSize(mem) < min_size); + ParseStatus status = PARSE_OK; + + if (done) return PARSE_NEED_MORE_DATA; + + do { + const size_t chunk_start_offset = mem->start_; + const uint32_t fourcc = ReadLE32(mem); + const uint32_t payload_size = ReadLE32(mem); + const uint32_t payload_size_padded = payload_size + (payload_size & 1); + const size_t payload_available = (payload_size_padded > MemDataSize(mem)) + ? MemDataSize(mem) : payload_size_padded; + const size_t chunk_size = CHUNK_HEADER_SIZE + payload_available; + + if (payload_size > MAX_CHUNK_PAYLOAD) return PARSE_ERROR; + if (SizeIsInvalid(mem, payload_size_padded)) return PARSE_ERROR; + if (payload_size_padded > MemDataSize(mem)) status = PARSE_NEED_MORE_DATA; + + switch (fourcc) { + case MKFOURCC('A', 'L', 'P', 'H'): + if (alpha_chunks == 0) { + ++alpha_chunks; + frame->img_components_[1].offset_ = chunk_start_offset; + frame->img_components_[1].size_ = chunk_size; + frame->has_alpha_ = 1; + frame->frame_num_ = frame_num; + Skip(mem, payload_available); + } else { + goto Done; + } + break; + case MKFOURCC('V', 'P', '8', 'L'): + if (alpha_chunks > 0) return PARSE_ERROR; // VP8L has its own alpha + // fall through + case MKFOURCC('V', 'P', '8', ' '): + if (image_chunks == 0) { + // Extract the bitstream features, tolerating failures when the data + // is incomplete. + WebPBitstreamFeatures features; + const VP8StatusCode vp8_status = + WebPGetFeatures(mem->buf_ + chunk_start_offset, chunk_size, + &features); + if (status == PARSE_NEED_MORE_DATA && + vp8_status == VP8_STATUS_NOT_ENOUGH_DATA) { + return PARSE_NEED_MORE_DATA; + } else if (vp8_status != VP8_STATUS_OK) { + // We have enough data, and yet WebPGetFeatures() failed. + return PARSE_ERROR; + } + ++image_chunks; + SetFrameInfo(chunk_start_offset, chunk_size, frame_num, + status == PARSE_OK, &features, frame); + Skip(mem, payload_available); + } else { + goto Done; + } + break; + Done: + default: + // Restore fourcc/size when moving up one level in parsing. + Rewind(mem, CHUNK_HEADER_SIZE); + done = 1; + break; + } + + if (mem->start_ == mem->riff_end_) { + done = 1; + } else if (MemDataSize(mem) < CHUNK_HEADER_SIZE) { + status = PARSE_NEED_MORE_DATA; + } + } while (!done && status == PARSE_OK); + + return status; +} + +// Creates a new Frame if 'actual_size' is within bounds and 'mem' contains +// enough data ('min_size') to parse the payload. +// Returns PARSE_OK on success with *frame pointing to the new Frame. +// Returns PARSE_NEED_MORE_DATA with insufficient data, PARSE_ERROR otherwise. +static ParseStatus NewFrame(const MemBuffer* const mem, + uint32_t min_size, uint32_t actual_size, + Frame** frame) { + if (SizeIsInvalid(mem, min_size)) return PARSE_ERROR; + if (actual_size < min_size) return PARSE_ERROR; + if (MemDataSize(mem) < min_size) return PARSE_NEED_MORE_DATA; + + *frame = (Frame*)WebPSafeCalloc(1ULL, sizeof(**frame)); + return (*frame == NULL) ? PARSE_ERROR : PARSE_OK; +} + +// Parse a 'ANMF' chunk and any image bearing chunks that immediately follow. +// 'frame_chunk_size' is the previously validated, padded chunk size. +static ParseStatus ParseAnimationFrame( + WebPDemuxer* const dmux, uint32_t frame_chunk_size) { + const int is_animation = !!(dmux->feature_flags_ & ANIMATION_FLAG); + const uint32_t anmf_payload_size = frame_chunk_size - ANMF_CHUNK_SIZE; + int added_frame = 0; + int bits; + MemBuffer* const mem = &dmux->mem_; + Frame* frame; + size_t start_offset; + ParseStatus status = + NewFrame(mem, ANMF_CHUNK_SIZE, frame_chunk_size, &frame); + if (status != PARSE_OK) return status; + + frame->x_offset_ = 2 * ReadLE24s(mem); + frame->y_offset_ = 2 * ReadLE24s(mem); + frame->width_ = 1 + ReadLE24s(mem); + frame->height_ = 1 + ReadLE24s(mem); + frame->duration_ = ReadLE24s(mem); + bits = ReadByte(mem); + frame->dispose_method_ = + (bits & 1) ? WEBP_MUX_DISPOSE_BACKGROUND : WEBP_MUX_DISPOSE_NONE; + frame->blend_method_ = (bits & 2) ? WEBP_MUX_NO_BLEND : WEBP_MUX_BLEND; + if (frame->width_ * (uint64_t)frame->height_ >= MAX_IMAGE_AREA) { + WebPSafeFree(frame); + return PARSE_ERROR; + } + + // Store a frame only if the animation flag is set there is some data for + // this frame is available. + start_offset = mem->start_; + status = StoreFrame(dmux->num_frames_ + 1, anmf_payload_size, mem, frame); + if (status != PARSE_ERROR && mem->start_ - start_offset > anmf_payload_size) { + status = PARSE_ERROR; + } + if (status != PARSE_ERROR && is_animation && frame->frame_num_ > 0) { + added_frame = AddFrame(dmux, frame); + if (added_frame) { + ++dmux->num_frames_; + } else { + status = PARSE_ERROR; + } + } + + if (!added_frame) WebPSafeFree(frame); + return status; +} + +// General chunk storage, starting with the header at 'start_offset', allowing +// the user to request the payload via a fourcc string. 'size' includes the +// header and the unpadded payload size. +// Returns true on success, false otherwise. +static int StoreChunk(WebPDemuxer* const dmux, + size_t start_offset, uint32_t size) { + Chunk* const chunk = (Chunk*)WebPSafeCalloc(1ULL, sizeof(*chunk)); + if (chunk == NULL) return 0; + + chunk->data_.offset_ = start_offset; + chunk->data_.size_ = size; + AddChunk(dmux, chunk); + return 1; +} + +// ----------------------------------------------------------------------------- +// Primary chunk parsing + +static ParseStatus ReadHeader(MemBuffer* const mem) { + const size_t min_size = RIFF_HEADER_SIZE + CHUNK_HEADER_SIZE; + uint32_t riff_size; + + // Basic file level validation. + if (MemDataSize(mem) < min_size) return PARSE_NEED_MORE_DATA; + if (memcmp(GetBuffer(mem), "RIFF", CHUNK_SIZE_BYTES) || + memcmp(GetBuffer(mem) + CHUNK_HEADER_SIZE, "WEBP", CHUNK_SIZE_BYTES)) { + return PARSE_ERROR; + } + + riff_size = GetLE32(GetBuffer(mem) + TAG_SIZE); + if (riff_size < CHUNK_HEADER_SIZE) return PARSE_ERROR; + if (riff_size > MAX_CHUNK_PAYLOAD) return PARSE_ERROR; + + // There's no point in reading past the end of the RIFF chunk + mem->riff_end_ = riff_size + CHUNK_HEADER_SIZE; + if (mem->buf_size_ > mem->riff_end_) { + mem->buf_size_ = mem->end_ = mem->riff_end_; + } + + Skip(mem, RIFF_HEADER_SIZE); + return PARSE_OK; +} + +static ParseStatus ParseSingleImage(WebPDemuxer* const dmux) { + const size_t min_size = CHUNK_HEADER_SIZE; + MemBuffer* const mem = &dmux->mem_; + Frame* frame; + ParseStatus status; + int image_added = 0; + + if (dmux->frames_ != NULL) return PARSE_ERROR; + if (SizeIsInvalid(mem, min_size)) return PARSE_ERROR; + if (MemDataSize(mem) < min_size) return PARSE_NEED_MORE_DATA; + + frame = (Frame*)WebPSafeCalloc(1ULL, sizeof(*frame)); + if (frame == NULL) return PARSE_ERROR; + + // For the single image case we allow parsing of a partial frame, so no + // minimum size is imposed here. + status = StoreFrame(1, 0, &dmux->mem_, frame); + if (status != PARSE_ERROR) { + const int has_alpha = !!(dmux->feature_flags_ & ALPHA_FLAG); + // Clear any alpha when the alpha flag is missing. + if (!has_alpha && frame->img_components_[1].size_ > 0) { + frame->img_components_[1].offset_ = 0; + frame->img_components_[1].size_ = 0; + frame->has_alpha_ = 0; + } + + // Use the frame width/height as the canvas values for non-vp8x files. + // Also, set ALPHA_FLAG if this is a lossless image with alpha. + if (!dmux->is_ext_format_ && frame->width_ > 0 && frame->height_ > 0) { + dmux->state_ = WEBP_DEMUX_PARSED_HEADER; + dmux->canvas_width_ = frame->width_; + dmux->canvas_height_ = frame->height_; + dmux->feature_flags_ |= frame->has_alpha_ ? ALPHA_FLAG : 0; + } + if (!AddFrame(dmux, frame)) { + status = PARSE_ERROR; // last frame was left incomplete + } else { + image_added = 1; + dmux->num_frames_ = 1; + } + } + + if (!image_added) WebPSafeFree(frame); + return status; +} + +static ParseStatus ParseVP8XChunks(WebPDemuxer* const dmux) { + const int is_animation = !!(dmux->feature_flags_ & ANIMATION_FLAG); + MemBuffer* const mem = &dmux->mem_; + int anim_chunks = 0; + ParseStatus status = PARSE_OK; + + do { + int store_chunk = 1; + const size_t chunk_start_offset = mem->start_; + const uint32_t fourcc = ReadLE32(mem); + const uint32_t chunk_size = ReadLE32(mem); + const uint32_t chunk_size_padded = chunk_size + (chunk_size & 1); + + if (chunk_size > MAX_CHUNK_PAYLOAD) return PARSE_ERROR; + if (SizeIsInvalid(mem, chunk_size_padded)) return PARSE_ERROR; + + switch (fourcc) { + case MKFOURCC('V', 'P', '8', 'X'): { + return PARSE_ERROR; + } + case MKFOURCC('A', 'L', 'P', 'H'): + case MKFOURCC('V', 'P', '8', ' '): + case MKFOURCC('V', 'P', '8', 'L'): { + // check that this isn't an animation (all frames should be in an ANMF). + if (anim_chunks > 0 || is_animation) return PARSE_ERROR; + + Rewind(mem, CHUNK_HEADER_SIZE); + status = ParseSingleImage(dmux); + break; + } + case MKFOURCC('A', 'N', 'I', 'M'): { + if (chunk_size_padded < ANIM_CHUNK_SIZE) return PARSE_ERROR; + + if (MemDataSize(mem) < chunk_size_padded) { + status = PARSE_NEED_MORE_DATA; + } else if (anim_chunks == 0) { + ++anim_chunks; + dmux->bgcolor_ = ReadLE32(mem); + dmux->loop_count_ = ReadLE16s(mem); + Skip(mem, chunk_size_padded - ANIM_CHUNK_SIZE); + } else { + store_chunk = 0; + goto Skip; + } + break; + } + case MKFOURCC('A', 'N', 'M', 'F'): { + if (anim_chunks == 0) return PARSE_ERROR; // 'ANIM' precedes frames. + status = ParseAnimationFrame(dmux, chunk_size_padded); + break; + } + case MKFOURCC('I', 'C', 'C', 'P'): { + store_chunk = !!(dmux->feature_flags_ & ICCP_FLAG); + goto Skip; + } + case MKFOURCC('E', 'X', 'I', 'F'): { + store_chunk = !!(dmux->feature_flags_ & EXIF_FLAG); + goto Skip; + } + case MKFOURCC('X', 'M', 'P', ' '): { + store_chunk = !!(dmux->feature_flags_ & XMP_FLAG); + goto Skip; + } + Skip: + default: { + if (chunk_size_padded <= MemDataSize(mem)) { + if (store_chunk) { + // Store only the chunk header and unpadded size as only the payload + // will be returned to the user. + if (!StoreChunk(dmux, chunk_start_offset, + CHUNK_HEADER_SIZE + chunk_size)) { + return PARSE_ERROR; + } + } + Skip(mem, chunk_size_padded); + } else { + status = PARSE_NEED_MORE_DATA; + } + } + } + + if (mem->start_ == mem->riff_end_) { + break; + } else if (MemDataSize(mem) < CHUNK_HEADER_SIZE) { + status = PARSE_NEED_MORE_DATA; + } + } while (status == PARSE_OK); + + return status; +} + +static ParseStatus ParseVP8X(WebPDemuxer* const dmux) { + MemBuffer* const mem = &dmux->mem_; + uint32_t vp8x_size; + + if (MemDataSize(mem) < CHUNK_HEADER_SIZE) return PARSE_NEED_MORE_DATA; + + dmux->is_ext_format_ = 1; + Skip(mem, TAG_SIZE); // VP8X + vp8x_size = ReadLE32(mem); + if (vp8x_size > MAX_CHUNK_PAYLOAD) return PARSE_ERROR; + if (vp8x_size < VP8X_CHUNK_SIZE) return PARSE_ERROR; + vp8x_size += vp8x_size & 1; + if (SizeIsInvalid(mem, vp8x_size)) return PARSE_ERROR; + if (MemDataSize(mem) < vp8x_size) return PARSE_NEED_MORE_DATA; + + dmux->feature_flags_ = ReadByte(mem); + Skip(mem, 3); // Reserved. + dmux->canvas_width_ = 1 + ReadLE24s(mem); + dmux->canvas_height_ = 1 + ReadLE24s(mem); + if (dmux->canvas_width_ * (uint64_t)dmux->canvas_height_ >= MAX_IMAGE_AREA) { + return PARSE_ERROR; // image final dimension is too large + } + Skip(mem, vp8x_size - VP8X_CHUNK_SIZE); // skip any trailing data. + dmux->state_ = WEBP_DEMUX_PARSED_HEADER; + + if (SizeIsInvalid(mem, CHUNK_HEADER_SIZE)) return PARSE_ERROR; + if (MemDataSize(mem) < CHUNK_HEADER_SIZE) return PARSE_NEED_MORE_DATA; + + return ParseVP8XChunks(dmux); +} + +// ----------------------------------------------------------------------------- +// Format validation + +static int IsValidSimpleFormat(const WebPDemuxer* const dmux) { + const Frame* const frame = dmux->frames_; + if (dmux->state_ == WEBP_DEMUX_PARSING_HEADER) return 1; + + if (dmux->canvas_width_ <= 0 || dmux->canvas_height_ <= 0) return 0; + if (dmux->state_ == WEBP_DEMUX_DONE && frame == NULL) return 0; + + if (frame->width_ <= 0 || frame->height_ <= 0) return 0; + return 1; +} + +// If 'exact' is true, check that the image resolution matches the canvas. +// If 'exact' is false, check that the x/y offsets do not exceed the canvas. +static int CheckFrameBounds(const Frame* const frame, int exact, + int canvas_width, int canvas_height) { + if (exact) { + if (frame->x_offset_ != 0 || frame->y_offset_ != 0) { + return 0; + } + if (frame->width_ != canvas_width || frame->height_ != canvas_height) { + return 0; + } + } else { + if (frame->x_offset_ < 0 || frame->y_offset_ < 0) return 0; + if (frame->width_ + frame->x_offset_ > canvas_width) return 0; + if (frame->height_ + frame->y_offset_ > canvas_height) return 0; + } + return 1; +} + +static int IsValidExtendedFormat(const WebPDemuxer* const dmux) { + const int is_animation = !!(dmux->feature_flags_ & ANIMATION_FLAG); + const Frame* f = dmux->frames_; + + if (dmux->state_ == WEBP_DEMUX_PARSING_HEADER) return 1; + + if (dmux->canvas_width_ <= 0 || dmux->canvas_height_ <= 0) return 0; + if (dmux->loop_count_ < 0) return 0; + if (dmux->state_ == WEBP_DEMUX_DONE && dmux->frames_ == NULL) return 0; + if (dmux->feature_flags_ & ~ALL_VALID_FLAGS) return 0; // invalid bitstream + + while (f != NULL) { + const int cur_frame_set = f->frame_num_; + int frame_count = 0; + + // Check frame properties. + for (; f != NULL && f->frame_num_ == cur_frame_set; f = f->next_) { + const ChunkData* const image = f->img_components_; + const ChunkData* const alpha = f->img_components_ + 1; + + if (!is_animation && f->frame_num_ > 1) return 0; + + if (f->complete_) { + if (alpha->size_ == 0 && image->size_ == 0) return 0; + // Ensure alpha precedes image bitstream. + if (alpha->size_ > 0 && alpha->offset_ > image->offset_) { + return 0; + } + + if (f->width_ <= 0 || f->height_ <= 0) return 0; + } else { + // There shouldn't be a partial frame in a complete file. + if (dmux->state_ == WEBP_DEMUX_DONE) return 0; + + // Ensure alpha precedes image bitstream. + if (alpha->size_ > 0 && image->size_ > 0 && + alpha->offset_ > image->offset_) { + return 0; + } + // There shouldn't be any frames after an incomplete one. + if (f->next_ != NULL) return 0; + } + + if (f->width_ > 0 && f->height_ > 0 && + !CheckFrameBounds(f, !is_animation, + dmux->canvas_width_, dmux->canvas_height_)) { + return 0; + } + + ++frame_count; + } + } + return 1; +} + +// ----------------------------------------------------------------------------- +// WebPDemuxer object + +static void InitDemux(WebPDemuxer* const dmux, const MemBuffer* const mem) { + dmux->state_ = WEBP_DEMUX_PARSING_HEADER; + dmux->loop_count_ = 1; + dmux->bgcolor_ = 0xFFFFFFFF; // White background by default. + dmux->canvas_width_ = -1; + dmux->canvas_height_ = -1; + dmux->frames_tail_ = &dmux->frames_; + dmux->chunks_tail_ = &dmux->chunks_; + dmux->mem_ = *mem; +} + +static ParseStatus CreateRawImageDemuxer(MemBuffer* const mem, + WebPDemuxer** demuxer) { + WebPBitstreamFeatures features; + const VP8StatusCode status = + WebPGetFeatures(mem->buf_, mem->buf_size_, &features); + *demuxer = NULL; + if (status != VP8_STATUS_OK) { + return (status == VP8_STATUS_NOT_ENOUGH_DATA) ? PARSE_NEED_MORE_DATA + : PARSE_ERROR; + } + + { + WebPDemuxer* const dmux = (WebPDemuxer*)WebPSafeCalloc(1ULL, sizeof(*dmux)); + Frame* const frame = (Frame*)WebPSafeCalloc(1ULL, sizeof(*frame)); + if (dmux == NULL || frame == NULL) goto Error; + InitDemux(dmux, mem); + SetFrameInfo(0, mem->buf_size_, 1 /*frame_num*/, 1 /*complete*/, &features, + frame); + if (!AddFrame(dmux, frame)) goto Error; + dmux->state_ = WEBP_DEMUX_DONE; + dmux->canvas_width_ = frame->width_; + dmux->canvas_height_ = frame->height_; + dmux->feature_flags_ |= frame->has_alpha_ ? ALPHA_FLAG : 0; + dmux->num_frames_ = 1; + assert(IsValidSimpleFormat(dmux)); + *demuxer = dmux; + return PARSE_OK; + + Error: + WebPSafeFree(dmux); + WebPSafeFree(frame); + return PARSE_ERROR; + } +} + +WebPDemuxer* WebPDemuxInternal(const WebPData* data, int allow_partial, + WebPDemuxState* state, int version) { + const ChunkParser* parser; + int partial; + ParseStatus status = PARSE_ERROR; + MemBuffer mem; + WebPDemuxer* dmux; + + if (state != NULL) *state = WEBP_DEMUX_PARSE_ERROR; + + if (WEBP_ABI_IS_INCOMPATIBLE(version, WEBP_DEMUX_ABI_VERSION)) return NULL; + if (data == NULL || data->bytes == NULL || data->size == 0) return NULL; + + if (!InitMemBuffer(&mem, data->bytes, data->size)) return NULL; + status = ReadHeader(&mem); + if (status != PARSE_OK) { + // If parsing of the webp file header fails attempt to handle a raw + // VP8/VP8L frame. Note 'allow_partial' is ignored in this case. + if (status == PARSE_ERROR) { + status = CreateRawImageDemuxer(&mem, &dmux); + if (status == PARSE_OK) { + if (state != NULL) *state = WEBP_DEMUX_DONE; + return dmux; + } + } + if (state != NULL) { + *state = (status == PARSE_NEED_MORE_DATA) ? WEBP_DEMUX_PARSING_HEADER + : WEBP_DEMUX_PARSE_ERROR; + } + return NULL; + } + + partial = (mem.buf_size_ < mem.riff_end_); + if (!allow_partial && partial) return NULL; + + dmux = (WebPDemuxer*)WebPSafeCalloc(1ULL, sizeof(*dmux)); + if (dmux == NULL) return NULL; + InitDemux(dmux, &mem); + + status = PARSE_ERROR; + for (parser = kMasterChunks; parser->parse != NULL; ++parser) { + if (!memcmp(parser->id, GetBuffer(&dmux->mem_), TAG_SIZE)) { + status = parser->parse(dmux); + if (status == PARSE_OK) dmux->state_ = WEBP_DEMUX_DONE; + if (status == PARSE_NEED_MORE_DATA && !partial) status = PARSE_ERROR; + if (status != PARSE_ERROR && !parser->valid(dmux)) status = PARSE_ERROR; + if (status == PARSE_ERROR) dmux->state_ = WEBP_DEMUX_PARSE_ERROR; + break; + } + } + if (state != NULL) *state = dmux->state_; + + if (status == PARSE_ERROR) { + WebPDemuxDelete(dmux); + return NULL; + } + return dmux; +} + +void WebPDemuxDelete(WebPDemuxer* dmux) { + Chunk* c; + Frame* f; + if (dmux == NULL) return; + + for (f = dmux->frames_; f != NULL;) { + Frame* const cur_frame = f; + f = f->next_; + WebPSafeFree(cur_frame); + } + for (c = dmux->chunks_; c != NULL;) { + Chunk* const cur_chunk = c; + c = c->next_; + WebPSafeFree(cur_chunk); + } + WebPSafeFree(dmux); +} + +// ----------------------------------------------------------------------------- + +uint32_t WebPDemuxGetI(const WebPDemuxer* dmux, WebPFormatFeature feature) { + if (dmux == NULL) return 0; + + switch (feature) { + case WEBP_FF_FORMAT_FLAGS: return dmux->feature_flags_; + case WEBP_FF_CANVAS_WIDTH: return (uint32_t)dmux->canvas_width_; + case WEBP_FF_CANVAS_HEIGHT: return (uint32_t)dmux->canvas_height_; + case WEBP_FF_LOOP_COUNT: return (uint32_t)dmux->loop_count_; + case WEBP_FF_BACKGROUND_COLOR: return dmux->bgcolor_; + case WEBP_FF_FRAME_COUNT: return (uint32_t)dmux->num_frames_; + } + return 0; +} + +// ----------------------------------------------------------------------------- +// Frame iteration + +static const Frame* GetFrame(const WebPDemuxer* const dmux, int frame_num) { + const Frame* f; + for (f = dmux->frames_; f != NULL; f = f->next_) { + if (frame_num == f->frame_num_) break; + } + return f; +} + +static const uint8_t* GetFramePayload(const uint8_t* const mem_buf, + const Frame* const frame, + size_t* const data_size) { + *data_size = 0; + if (frame != NULL) { + const ChunkData* const image = frame->img_components_; + const ChunkData* const alpha = frame->img_components_ + 1; + size_t start_offset = image->offset_; + *data_size = image->size_; + + // if alpha exists it precedes image, update the size allowing for + // intervening chunks. + if (alpha->size_ > 0) { + const size_t inter_size = (image->offset_ > 0) + ? image->offset_ - (alpha->offset_ + alpha->size_) + : 0; + start_offset = alpha->offset_; + *data_size += alpha->size_ + inter_size; + } + return mem_buf + start_offset; + } + return NULL; +} + +// Create a whole 'frame' from VP8 (+ alpha) or lossless. +static int SynthesizeFrame(const WebPDemuxer* const dmux, + const Frame* const frame, + WebPIterator* const iter) { + const uint8_t* const mem_buf = dmux->mem_.buf_; + size_t payload_size = 0; + const uint8_t* const payload = GetFramePayload(mem_buf, frame, &payload_size); + if (payload == NULL) return 0; + assert(frame != NULL); + + iter->frame_num = frame->frame_num_; + iter->num_frames = dmux->num_frames_; + iter->x_offset = frame->x_offset_; + iter->y_offset = frame->y_offset_; + iter->width = frame->width_; + iter->height = frame->height_; + iter->has_alpha = frame->has_alpha_; + iter->duration = frame->duration_; + iter->dispose_method = frame->dispose_method_; + iter->blend_method = frame->blend_method_; + iter->complete = frame->complete_; + iter->fragment.bytes = payload; + iter->fragment.size = payload_size; + return 1; +} + +static int SetFrame(int frame_num, WebPIterator* const iter) { + const Frame* frame; + const WebPDemuxer* const dmux = (WebPDemuxer*)iter->private_; + if (dmux == NULL || frame_num < 0) return 0; + if (frame_num > dmux->num_frames_) return 0; + if (frame_num == 0) frame_num = dmux->num_frames_; + + frame = GetFrame(dmux, frame_num); + if (frame == NULL) return 0; + + return SynthesizeFrame(dmux, frame, iter); +} + +int WebPDemuxGetFrame(const WebPDemuxer* dmux, int frame, WebPIterator* iter) { + if (iter == NULL) return 0; + + memset(iter, 0, sizeof(*iter)); + iter->private_ = (void*)dmux; + return SetFrame(frame, iter); +} + +int WebPDemuxNextFrame(WebPIterator* iter) { + if (iter == NULL) return 0; + return SetFrame(iter->frame_num + 1, iter); +} + +int WebPDemuxPrevFrame(WebPIterator* iter) { + if (iter == NULL) return 0; + if (iter->frame_num <= 1) return 0; + return SetFrame(iter->frame_num - 1, iter); +} + +void WebPDemuxReleaseIterator(WebPIterator* iter) { + (void)iter; +} + +// ----------------------------------------------------------------------------- +// Chunk iteration + +static int ChunkCount(const WebPDemuxer* const dmux, const char fourcc[4]) { + const uint8_t* const mem_buf = dmux->mem_.buf_; + const Chunk* c; + int count = 0; + for (c = dmux->chunks_; c != NULL; c = c->next_) { + const uint8_t* const header = mem_buf + c->data_.offset_; + if (!memcmp(header, fourcc, TAG_SIZE)) ++count; + } + return count; +} + +static const Chunk* GetChunk(const WebPDemuxer* const dmux, + const char fourcc[4], int chunk_num) { + const uint8_t* const mem_buf = dmux->mem_.buf_; + const Chunk* c; + int count = 0; + for (c = dmux->chunks_; c != NULL; c = c->next_) { + const uint8_t* const header = mem_buf + c->data_.offset_; + if (!memcmp(header, fourcc, TAG_SIZE)) ++count; + if (count == chunk_num) break; + } + return c; +} + +static int SetChunk(const char fourcc[4], int chunk_num, + WebPChunkIterator* const iter) { + const WebPDemuxer* const dmux = (WebPDemuxer*)iter->private_; + int count; + + if (dmux == NULL || fourcc == NULL || chunk_num < 0) return 0; + count = ChunkCount(dmux, fourcc); + if (count == 0) return 0; + if (chunk_num == 0) chunk_num = count; + + if (chunk_num <= count) { + const uint8_t* const mem_buf = dmux->mem_.buf_; + const Chunk* const chunk = GetChunk(dmux, fourcc, chunk_num); + iter->chunk.bytes = mem_buf + chunk->data_.offset_ + CHUNK_HEADER_SIZE; + iter->chunk.size = chunk->data_.size_ - CHUNK_HEADER_SIZE; + iter->num_chunks = count; + iter->chunk_num = chunk_num; + return 1; + } + return 0; +} + +int WebPDemuxGetChunk(const WebPDemuxer* dmux, + const char fourcc[4], int chunk_num, + WebPChunkIterator* iter) { + if (iter == NULL) return 0; + + memset(iter, 0, sizeof(*iter)); + iter->private_ = (void*)dmux; + return SetChunk(fourcc, chunk_num, iter); +} + +int WebPDemuxNextChunk(WebPChunkIterator* iter) { + if (iter != NULL) { + const char* const fourcc = + (const char*)iter->chunk.bytes - CHUNK_HEADER_SIZE; + return SetChunk(fourcc, iter->chunk_num + 1, iter); + } + return 0; +} + +int WebPDemuxPrevChunk(WebPChunkIterator* iter) { + if (iter != NULL && iter->chunk_num > 1) { + const char* const fourcc = + (const char*)iter->chunk.bytes - CHUNK_HEADER_SIZE; + return SetChunk(fourcc, iter->chunk_num - 1, iter); + } + return 0; +} + +void WebPDemuxReleaseChunkIterator(WebPChunkIterator* iter) { + (void)iter; +} + diff --git a/vendor/github.com/sizeofint/webpanimation/dsp_alpha_processing.c b/vendor/github.com/sizeofint/webpanimation/dsp_alpha_processing.c new file mode 100644 index 00000000..d5f16e6a --- /dev/null +++ b/vendor/github.com/sizeofint/webpanimation/dsp_alpha_processing.c @@ -0,0 +1,480 @@ +// Copyright 2013 Google Inc. All Rights Reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the COPYING file in the root of the source +// tree. An additional intellectual property rights grant can be found +// in the file PATENTS. All contributing project authors may +// be found in the AUTHORS file in the root of the source tree. +// ----------------------------------------------------------------------------- +// +// Utilities for processing transparent channel. +// +// Author: Skal (pascal.massimino@gmail.com) + +#include +#include "dsp_dsp.h" + +// Tables can be faster on some platform but incur some extra binary size (~2k). +#if !defined(USE_TABLES_FOR_ALPHA_MULT) +#define USE_TABLES_FOR_ALPHA_MULT 0 // ALTERNATE_CODE +#endif + + +// ----------------------------------------------------------------------------- + +#define MFIX 24 // 24bit fixed-point arithmetic +#define HALF ((1u << MFIX) >> 1) +#define KINV_255 ((1u << MFIX) / 255u) + +static uint32_t Mult(uint8_t x, uint32_t mult) { + const uint32_t v = (x * mult + HALF) >> MFIX; + assert(v <= 255); // <- 24bit precision is enough to ensure that. + return v; +} + +#if (USE_TABLES_FOR_ALPHA_MULT == 1) + +static const uint32_t kMultTables[2][256] = { + { // (255u << MFIX) / alpha + 0x00000000, 0xff000000, 0x7f800000, 0x55000000, 0x3fc00000, 0x33000000, + 0x2a800000, 0x246db6db, 0x1fe00000, 0x1c555555, 0x19800000, 0x172e8ba2, + 0x15400000, 0x139d89d8, 0x1236db6d, 0x11000000, 0x0ff00000, 0x0f000000, + 0x0e2aaaaa, 0x0d6bca1a, 0x0cc00000, 0x0c249249, 0x0b9745d1, 0x0b1642c8, + 0x0aa00000, 0x0a333333, 0x09cec4ec, 0x0971c71c, 0x091b6db6, 0x08cb08d3, + 0x08800000, 0x0839ce73, 0x07f80000, 0x07ba2e8b, 0x07800000, 0x07492492, + 0x07155555, 0x06e45306, 0x06b5e50d, 0x0689d89d, 0x06600000, 0x063831f3, + 0x06124924, 0x05ee23b8, 0x05cba2e8, 0x05aaaaaa, 0x058b2164, 0x056cefa8, + 0x05500000, 0x05343eb1, 0x05199999, 0x05000000, 0x04e76276, 0x04cfb2b7, + 0x04b8e38e, 0x04a2e8ba, 0x048db6db, 0x0479435e, 0x04658469, 0x045270d0, + 0x04400000, 0x042e29f7, 0x041ce739, 0x040c30c3, 0x03fc0000, 0x03ec4ec4, + 0x03dd1745, 0x03ce540f, 0x03c00000, 0x03b21642, 0x03a49249, 0x03976fc6, + 0x038aaaaa, 0x037e3f1f, 0x03722983, 0x03666666, 0x035af286, 0x034fcace, + 0x0344ec4e, 0x033a5440, 0x03300000, 0x0325ed09, 0x031c18f9, 0x0312818a, + 0x03092492, 0x03000000, 0x02f711dc, 0x02ee5846, 0x02e5d174, 0x02dd7baf, + 0x02d55555, 0x02cd5cd5, 0x02c590b2, 0x02bdef7b, 0x02b677d4, 0x02af286b, + 0x02a80000, 0x02a0fd5c, 0x029a1f58, 0x029364d9, 0x028ccccc, 0x0286562d, + 0x02800000, 0x0279c952, 0x0273b13b, 0x026db6db, 0x0267d95b, 0x026217ec, + 0x025c71c7, 0x0256e62a, 0x0251745d, 0x024c1bac, 0x0246db6d, 0x0241b2f9, + 0x023ca1af, 0x0237a6f4, 0x0232c234, 0x022df2df, 0x02293868, 0x02249249, + 0x02200000, 0x021b810e, 0x021714fb, 0x0212bb51, 0x020e739c, 0x020a3d70, + 0x02061861, 0x02020408, 0x01fe0000, 0x01fa0be8, 0x01f62762, 0x01f25213, + 0x01ee8ba2, 0x01ead3ba, 0x01e72a07, 0x01e38e38, 0x01e00000, 0x01dc7f10, + 0x01d90b21, 0x01d5a3e9, 0x01d24924, 0x01cefa8d, 0x01cbb7e3, 0x01c880e5, + 0x01c55555, 0x01c234f7, 0x01bf1f8f, 0x01bc14e5, 0x01b914c1, 0x01b61eed, + 0x01b33333, 0x01b05160, 0x01ad7943, 0x01aaaaaa, 0x01a7e567, 0x01a5294a, + 0x01a27627, 0x019fcbd2, 0x019d2a20, 0x019a90e7, 0x01980000, 0x01957741, + 0x0192f684, 0x01907da4, 0x018e0c7c, 0x018ba2e8, 0x018940c5, 0x0186e5f0, + 0x01849249, 0x018245ae, 0x01800000, 0x017dc11f, 0x017b88ee, 0x0179574e, + 0x01772c23, 0x01750750, 0x0172e8ba, 0x0170d045, 0x016ebdd7, 0x016cb157, + 0x016aaaaa, 0x0168a9b9, 0x0166ae6a, 0x0164b8a7, 0x0162c859, 0x0160dd67, + 0x015ef7bd, 0x015d1745, 0x015b3bea, 0x01596596, 0x01579435, 0x0155c7b4, + 0x01540000, 0x01523d03, 0x01507eae, 0x014ec4ec, 0x014d0fac, 0x014b5edc, + 0x0149b26c, 0x01480a4a, 0x01466666, 0x0144c6af, 0x01432b16, 0x0141938b, + 0x01400000, 0x013e7063, 0x013ce4a9, 0x013b5cc0, 0x0139d89d, 0x01385830, + 0x0136db6d, 0x01356246, 0x0133ecad, 0x01327a97, 0x01310bf6, 0x012fa0be, + 0x012e38e3, 0x012cd459, 0x012b7315, 0x012a150a, 0x0128ba2e, 0x01276276, + 0x01260dd6, 0x0124bc44, 0x01236db6, 0x01222222, 0x0120d97c, 0x011f93bc, + 0x011e50d7, 0x011d10c4, 0x011bd37a, 0x011a98ef, 0x0119611a, 0x01182bf2, + 0x0116f96f, 0x0115c988, 0x01149c34, 0x0113716a, 0x01124924, 0x01112358, + 0x01100000, 0x010edf12, 0x010dc087, 0x010ca458, 0x010b8a7d, 0x010a72f0, + 0x01095da8, 0x01084a9f, 0x010739ce, 0x01062b2e, 0x01051eb8, 0x01041465, + 0x01030c30, 0x01020612, 0x01010204, 0x01000000 }, + { // alpha * KINV_255 + 0x00000000, 0x00010101, 0x00020202, 0x00030303, 0x00040404, 0x00050505, + 0x00060606, 0x00070707, 0x00080808, 0x00090909, 0x000a0a0a, 0x000b0b0b, + 0x000c0c0c, 0x000d0d0d, 0x000e0e0e, 0x000f0f0f, 0x00101010, 0x00111111, + 0x00121212, 0x00131313, 0x00141414, 0x00151515, 0x00161616, 0x00171717, + 0x00181818, 0x00191919, 0x001a1a1a, 0x001b1b1b, 0x001c1c1c, 0x001d1d1d, + 0x001e1e1e, 0x001f1f1f, 0x00202020, 0x00212121, 0x00222222, 0x00232323, + 0x00242424, 0x00252525, 0x00262626, 0x00272727, 0x00282828, 0x00292929, + 0x002a2a2a, 0x002b2b2b, 0x002c2c2c, 0x002d2d2d, 0x002e2e2e, 0x002f2f2f, + 0x00303030, 0x00313131, 0x00323232, 0x00333333, 0x00343434, 0x00353535, + 0x00363636, 0x00373737, 0x00383838, 0x00393939, 0x003a3a3a, 0x003b3b3b, + 0x003c3c3c, 0x003d3d3d, 0x003e3e3e, 0x003f3f3f, 0x00404040, 0x00414141, + 0x00424242, 0x00434343, 0x00444444, 0x00454545, 0x00464646, 0x00474747, + 0x00484848, 0x00494949, 0x004a4a4a, 0x004b4b4b, 0x004c4c4c, 0x004d4d4d, + 0x004e4e4e, 0x004f4f4f, 0x00505050, 0x00515151, 0x00525252, 0x00535353, + 0x00545454, 0x00555555, 0x00565656, 0x00575757, 0x00585858, 0x00595959, + 0x005a5a5a, 0x005b5b5b, 0x005c5c5c, 0x005d5d5d, 0x005e5e5e, 0x005f5f5f, + 0x00606060, 0x00616161, 0x00626262, 0x00636363, 0x00646464, 0x00656565, + 0x00666666, 0x00676767, 0x00686868, 0x00696969, 0x006a6a6a, 0x006b6b6b, + 0x006c6c6c, 0x006d6d6d, 0x006e6e6e, 0x006f6f6f, 0x00707070, 0x00717171, + 0x00727272, 0x00737373, 0x00747474, 0x00757575, 0x00767676, 0x00777777, + 0x00787878, 0x00797979, 0x007a7a7a, 0x007b7b7b, 0x007c7c7c, 0x007d7d7d, + 0x007e7e7e, 0x007f7f7f, 0x00808080, 0x00818181, 0x00828282, 0x00838383, + 0x00848484, 0x00858585, 0x00868686, 0x00878787, 0x00888888, 0x00898989, + 0x008a8a8a, 0x008b8b8b, 0x008c8c8c, 0x008d8d8d, 0x008e8e8e, 0x008f8f8f, + 0x00909090, 0x00919191, 0x00929292, 0x00939393, 0x00949494, 0x00959595, + 0x00969696, 0x00979797, 0x00989898, 0x00999999, 0x009a9a9a, 0x009b9b9b, + 0x009c9c9c, 0x009d9d9d, 0x009e9e9e, 0x009f9f9f, 0x00a0a0a0, 0x00a1a1a1, + 0x00a2a2a2, 0x00a3a3a3, 0x00a4a4a4, 0x00a5a5a5, 0x00a6a6a6, 0x00a7a7a7, + 0x00a8a8a8, 0x00a9a9a9, 0x00aaaaaa, 0x00ababab, 0x00acacac, 0x00adadad, + 0x00aeaeae, 0x00afafaf, 0x00b0b0b0, 0x00b1b1b1, 0x00b2b2b2, 0x00b3b3b3, + 0x00b4b4b4, 0x00b5b5b5, 0x00b6b6b6, 0x00b7b7b7, 0x00b8b8b8, 0x00b9b9b9, + 0x00bababa, 0x00bbbbbb, 0x00bcbcbc, 0x00bdbdbd, 0x00bebebe, 0x00bfbfbf, + 0x00c0c0c0, 0x00c1c1c1, 0x00c2c2c2, 0x00c3c3c3, 0x00c4c4c4, 0x00c5c5c5, + 0x00c6c6c6, 0x00c7c7c7, 0x00c8c8c8, 0x00c9c9c9, 0x00cacaca, 0x00cbcbcb, + 0x00cccccc, 0x00cdcdcd, 0x00cecece, 0x00cfcfcf, 0x00d0d0d0, 0x00d1d1d1, + 0x00d2d2d2, 0x00d3d3d3, 0x00d4d4d4, 0x00d5d5d5, 0x00d6d6d6, 0x00d7d7d7, + 0x00d8d8d8, 0x00d9d9d9, 0x00dadada, 0x00dbdbdb, 0x00dcdcdc, 0x00dddddd, + 0x00dedede, 0x00dfdfdf, 0x00e0e0e0, 0x00e1e1e1, 0x00e2e2e2, 0x00e3e3e3, + 0x00e4e4e4, 0x00e5e5e5, 0x00e6e6e6, 0x00e7e7e7, 0x00e8e8e8, 0x00e9e9e9, + 0x00eaeaea, 0x00ebebeb, 0x00ececec, 0x00ededed, 0x00eeeeee, 0x00efefef, + 0x00f0f0f0, 0x00f1f1f1, 0x00f2f2f2, 0x00f3f3f3, 0x00f4f4f4, 0x00f5f5f5, + 0x00f6f6f6, 0x00f7f7f7, 0x00f8f8f8, 0x00f9f9f9, 0x00fafafa, 0x00fbfbfb, + 0x00fcfcfc, 0x00fdfdfd, 0x00fefefe, 0x00ffffff } +}; + +static WEBP_INLINE uint32_t GetScale(uint32_t a, int inverse) { + return kMultTables[!inverse][a]; +} + +#else + +static WEBP_INLINE uint32_t GetScale(uint32_t a, int inverse) { + return inverse ? (255u << MFIX) / a : a * KINV_255; +} + +#endif // USE_TABLES_FOR_ALPHA_MULT + +void WebPMultARGBRow_C(uint32_t* const ptr, int width, int inverse) { + int x; + for (x = 0; x < width; ++x) { + const uint32_t argb = ptr[x]; + if (argb < 0xff000000u) { // alpha < 255 + if (argb <= 0x00ffffffu) { // alpha == 0 + ptr[x] = 0; + } else { + const uint32_t alpha = (argb >> 24) & 0xff; + const uint32_t scale = GetScale(alpha, inverse); + uint32_t out = argb & 0xff000000u; + out |= Mult(argb >> 0, scale) << 0; + out |= Mult(argb >> 8, scale) << 8; + out |= Mult(argb >> 16, scale) << 16; + ptr[x] = out; + } + } + } +} + +void WebPMultRow_C(uint8_t* const ptr, const uint8_t* const alpha, + int width, int inverse) { + int x; + for (x = 0; x < width; ++x) { + const uint32_t a = alpha[x]; + if (a != 255) { + if (a == 0) { + ptr[x] = 0; + } else { + const uint32_t scale = GetScale(a, inverse); + ptr[x] = Mult(ptr[x], scale); + } + } + } +} + +#undef KINV_255 +#undef HALF +#undef MFIX + +void (*WebPMultARGBRow)(uint32_t* const ptr, int width, int inverse); +void (*WebPMultRow)(uint8_t* const ptr, const uint8_t* const alpha, + int width, int inverse); + +//------------------------------------------------------------------------------ +// Generic per-plane calls + +void WebPMultARGBRows(uint8_t* ptr, int stride, int width, int num_rows, + int inverse) { + int n; + for (n = 0; n < num_rows; ++n) { + WebPMultARGBRow((uint32_t*)ptr, width, inverse); + ptr += stride; + } +} + +void WebPMultRows(uint8_t* ptr, int stride, + const uint8_t* alpha, int alpha_stride, + int width, int num_rows, int inverse) { + int n; + for (n = 0; n < num_rows; ++n) { + WebPMultRow(ptr, alpha, width, inverse); + ptr += stride; + alpha += alpha_stride; + } +} + +//------------------------------------------------------------------------------ +// Premultiplied modes + +// non dithered-modes + +// (x * a * 32897) >> 23 is bit-wise equivalent to (int)(x * a / 255.) +// for all 8bit x or a. For bit-wise equivalence to (int)(x * a / 255. + .5), +// one can use instead: (x * a * 65793 + (1 << 23)) >> 24 +#if 1 // (int)(x * a / 255.) +#define MULTIPLIER(a) ((a) * 32897U) +#define PREMULTIPLY(x, m) (((x) * (m)) >> 23) +#else // (int)(x * a / 255. + .5) +#define MULTIPLIER(a) ((a) * 65793U) +#define PREMULTIPLY(x, m) (((x) * (m) + (1U << 23)) >> 24) +#endif + +#if !WEBP_NEON_OMIT_C_CODE +static void ApplyAlphaMultiply_C(uint8_t* rgba, int alpha_first, + int w, int h, int stride) { + while (h-- > 0) { + uint8_t* const rgb = rgba + (alpha_first ? 1 : 0); + const uint8_t* const alpha = rgba + (alpha_first ? 0 : 3); + int i; + for (i = 0; i < w; ++i) { + const uint32_t a = alpha[4 * i]; + if (a != 0xff) { + const uint32_t mult = MULTIPLIER(a); + rgb[4 * i + 0] = PREMULTIPLY(rgb[4 * i + 0], mult); + rgb[4 * i + 1] = PREMULTIPLY(rgb[4 * i + 1], mult); + rgb[4 * i + 2] = PREMULTIPLY(rgb[4 * i + 2], mult); + } + } + rgba += stride; + } +} +#endif // !WEBP_NEON_OMIT_C_CODE +#undef MULTIPLIER +#undef PREMULTIPLY + +// rgbA4444 + +#define MULTIPLIER(a) ((a) * 0x1111) // 0x1111 ~= (1 << 16) / 15 + +static WEBP_INLINE uint8_t dither_hi(uint8_t x) { + return (x & 0xf0) | (x >> 4); +} + +static WEBP_INLINE uint8_t dither_lo(uint8_t x) { + return (x & 0x0f) | (x << 4); +} + +static WEBP_INLINE uint8_t multiply(uint8_t x, uint32_t m) { + return (x * m) >> 16; +} + +static WEBP_INLINE void ApplyAlphaMultiply4444_C(uint8_t* rgba4444, + int w, int h, int stride, + int rg_byte_pos /* 0 or 1 */) { + while (h-- > 0) { + int i; + for (i = 0; i < w; ++i) { + const uint32_t rg = rgba4444[2 * i + rg_byte_pos]; + const uint32_t ba = rgba4444[2 * i + (rg_byte_pos ^ 1)]; + const uint8_t a = ba & 0x0f; + const uint32_t mult = MULTIPLIER(a); + const uint8_t r = multiply(dither_hi(rg), mult); + const uint8_t g = multiply(dither_lo(rg), mult); + const uint8_t b = multiply(dither_hi(ba), mult); + rgba4444[2 * i + rg_byte_pos] = (r & 0xf0) | ((g >> 4) & 0x0f); + rgba4444[2 * i + (rg_byte_pos ^ 1)] = (b & 0xf0) | a; + } + rgba4444 += stride; + } +} +#undef MULTIPLIER + +static void ApplyAlphaMultiply_16b_C(uint8_t* rgba4444, + int w, int h, int stride) { +#if (WEBP_SWAP_16BIT_CSP == 1) + ApplyAlphaMultiply4444_C(rgba4444, w, h, stride, 1); +#else + ApplyAlphaMultiply4444_C(rgba4444, w, h, stride, 0); +#endif +} + +#if !WEBP_NEON_OMIT_C_CODE +static int DispatchAlpha_C(const uint8_t* alpha, int alpha_stride, + int width, int height, + uint8_t* dst, int dst_stride) { + uint32_t alpha_mask = 0xff; + int i, j; + + for (j = 0; j < height; ++j) { + for (i = 0; i < width; ++i) { + const uint32_t alpha_value = alpha[i]; + dst[4 * i] = alpha_value; + alpha_mask &= alpha_value; + } + alpha += alpha_stride; + dst += dst_stride; + } + + return (alpha_mask != 0xff); +} + +static void DispatchAlphaToGreen_C(const uint8_t* alpha, int alpha_stride, + int width, int height, + uint32_t* dst, int dst_stride) { + int i, j; + for (j = 0; j < height; ++j) { + for (i = 0; i < width; ++i) { + dst[i] = alpha[i] << 8; // leave A/R/B channels zero'd. + } + alpha += alpha_stride; + dst += dst_stride; + } +} + +static int ExtractAlpha_C(const uint8_t* argb, int argb_stride, + int width, int height, + uint8_t* alpha, int alpha_stride) { + uint8_t alpha_mask = 0xff; + int i, j; + + for (j = 0; j < height; ++j) { + for (i = 0; i < width; ++i) { + const uint8_t alpha_value = argb[4 * i]; + alpha[i] = alpha_value; + alpha_mask &= alpha_value; + } + argb += argb_stride; + alpha += alpha_stride; + } + return (alpha_mask == 0xff); +} + +static void ExtractGreen_C(const uint32_t* argb, uint8_t* alpha, int size) { + int i; + for (i = 0; i < size; ++i) alpha[i] = argb[i] >> 8; +} +#endif // !WEBP_NEON_OMIT_C_CODE + +//------------------------------------------------------------------------------ + +static int HasAlpha8b_C(const uint8_t* src, int length) { + while (length-- > 0) if (*src++ != 0xff) return 1; + return 0; +} + +static int HasAlpha32b_C(const uint8_t* src, int length) { + int x; + for (x = 0; length-- > 0; x += 4) if (src[x] != 0xff) return 1; + return 0; +} + +static void AlphaReplace_C(uint32_t* src, int length, uint32_t color) { + int x; + for (x = 0; x < length; ++x) if ((src[x] >> 24) == 0) src[x] = color; +} + +//------------------------------------------------------------------------------ +// Simple channel manipulations. + +static WEBP_INLINE uint32_t MakeARGB32(int a, int r, int g, int b) { + return (((uint32_t)a << 24) | (r << 16) | (g << 8) | b); +} + +#ifdef WORDS_BIGENDIAN +static void PackARGB_C(const uint8_t* a, const uint8_t* r, const uint8_t* g, + const uint8_t* b, int len, uint32_t* out) { + int i; + for (i = 0; i < len; ++i) { + out[i] = MakeARGB32(a[4 * i], r[4 * i], g[4 * i], b[4 * i]); + } +} +#endif + +static void PackRGB_C(const uint8_t* r, const uint8_t* g, const uint8_t* b, + int len, int step, uint32_t* out) { + int i, offset = 0; + for (i = 0; i < len; ++i) { + out[i] = MakeARGB32(0xff, r[offset], g[offset], b[offset]); + offset += step; + } +} + +void (*WebPApplyAlphaMultiply)(uint8_t*, int, int, int, int); +void (*WebPApplyAlphaMultiply4444)(uint8_t*, int, int, int); +int (*WebPDispatchAlpha)(const uint8_t*, int, int, int, uint8_t*, int); +void (*WebPDispatchAlphaToGreen)(const uint8_t*, int, int, int, uint32_t*, int); +int (*WebPExtractAlpha)(const uint8_t*, int, int, int, uint8_t*, int); +void (*WebPExtractGreen)(const uint32_t* argb, uint8_t* alpha, int size); +#ifdef WORDS_BIGENDIAN +void (*WebPPackARGB)(const uint8_t* a, const uint8_t* r, const uint8_t* g, + const uint8_t* b, int, uint32_t*); +#endif +void (*WebPPackRGB)(const uint8_t* r, const uint8_t* g, const uint8_t* b, + int len, int step, uint32_t* out); + +int (*WebPHasAlpha8b)(const uint8_t* src, int length); +int (*WebPHasAlpha32b)(const uint8_t* src, int length); +void (*WebPAlphaReplace)(uint32_t* src, int length, uint32_t color); + +//------------------------------------------------------------------------------ +// Init function + +extern void WebPInitAlphaProcessingMIPSdspR2(void); +extern void WebPInitAlphaProcessingSSE2(void); +extern void WebPInitAlphaProcessingSSE41(void); +extern void WebPInitAlphaProcessingNEON(void); + +WEBP_DSP_INIT_FUNC(WebPInitAlphaProcessing) { + WebPMultARGBRow = WebPMultARGBRow_C; + WebPMultRow = WebPMultRow_C; + WebPApplyAlphaMultiply4444 = ApplyAlphaMultiply_16b_C; + +#ifdef WORDS_BIGENDIAN + WebPPackARGB = PackARGB_C; +#endif + WebPPackRGB = PackRGB_C; +#if !WEBP_NEON_OMIT_C_CODE + WebPApplyAlphaMultiply = ApplyAlphaMultiply_C; + WebPDispatchAlpha = DispatchAlpha_C; + WebPDispatchAlphaToGreen = DispatchAlphaToGreen_C; + WebPExtractAlpha = ExtractAlpha_C; + WebPExtractGreen = ExtractGreen_C; +#endif + + WebPHasAlpha8b = HasAlpha8b_C; + WebPHasAlpha32b = HasAlpha32b_C; + WebPAlphaReplace = AlphaReplace_C; + + // If defined, use CPUInfo() to overwrite some pointers with faster versions. + if (VP8GetCPUInfo != NULL) { +#if defined(WEBP_USE_SSE2) + if (VP8GetCPUInfo(kSSE2)) { + WebPInitAlphaProcessingSSE2(); +#if defined(WEBP_USE_SSE41) + if (VP8GetCPUInfo(kSSE4_1)) { + WebPInitAlphaProcessingSSE41(); + } +#endif + } +#endif +#if defined(WEBP_USE_MIPS_DSP_R2) + if (VP8GetCPUInfo(kMIPSdspR2)) { + WebPInitAlphaProcessingMIPSdspR2(); + } +#endif + } + +#if defined(WEBP_USE_NEON) + if (WEBP_NEON_OMIT_C_CODE || + (VP8GetCPUInfo != NULL && VP8GetCPUInfo(kNEON))) { + WebPInitAlphaProcessingNEON(); + } +#endif + + assert(WebPMultARGBRow != NULL); + assert(WebPMultRow != NULL); + assert(WebPApplyAlphaMultiply != NULL); + assert(WebPApplyAlphaMultiply4444 != NULL); + assert(WebPDispatchAlpha != NULL); + assert(WebPDispatchAlphaToGreen != NULL); + assert(WebPExtractAlpha != NULL); + assert(WebPExtractGreen != NULL); +#ifdef WORDS_BIGENDIAN + assert(WebPPackARGB != NULL); +#endif + assert(WebPPackRGB != NULL); + assert(WebPHasAlpha8b != NULL); + assert(WebPHasAlpha32b != NULL); + assert(WebPAlphaReplace != NULL); +} diff --git a/vendor/github.com/sizeofint/webpanimation/dsp_alpha_processing_mips_dsp_r2.c b/vendor/github.com/sizeofint/webpanimation/dsp_alpha_processing_mips_dsp_r2.c new file mode 100644 index 00000000..3d19d6c5 --- /dev/null +++ b/vendor/github.com/sizeofint/webpanimation/dsp_alpha_processing_mips_dsp_r2.c @@ -0,0 +1,228 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the COPYING file in the root of the source +// tree. An additional intellectual property rights grant can be found +// in the file PATENTS. All contributing project authors may +// be found in the AUTHORS file in the root of the source tree. +// ----------------------------------------------------------------------------- +// +// Utilities for processing transparent channel. +// +// Author(s): Branimir Vasic (branimir.vasic@imgtec.com) +// Djordje Pesut (djordje.pesut@imgtec.com) + +#include "dsp_dsp.h" + +#if defined(WEBP_USE_MIPS_DSP_R2) + +static int DispatchAlpha_MIPSdspR2(const uint8_t* alpha, int alpha_stride, + int width, int height, + uint8_t* dst, int dst_stride) { + uint32_t alpha_mask = 0xffffffff; + int i, j, temp0; + + for (j = 0; j < height; ++j) { + uint8_t* pdst = dst; + const uint8_t* palpha = alpha; + for (i = 0; i < (width >> 2); ++i) { + int temp1, temp2, temp3; + + __asm__ volatile ( + "ulw %[temp0], 0(%[palpha]) \n\t" + "addiu %[palpha], %[palpha], 4 \n\t" + "addiu %[pdst], %[pdst], 16 \n\t" + "srl %[temp1], %[temp0], 8 \n\t" + "srl %[temp2], %[temp0], 16 \n\t" + "srl %[temp3], %[temp0], 24 \n\t" + "and %[alpha_mask], %[alpha_mask], %[temp0] \n\t" + "sb %[temp0], -16(%[pdst]) \n\t" + "sb %[temp1], -12(%[pdst]) \n\t" + "sb %[temp2], -8(%[pdst]) \n\t" + "sb %[temp3], -4(%[pdst]) \n\t" + : [temp0]"=&r"(temp0), [temp1]"=&r"(temp1), [temp2]"=&r"(temp2), + [temp3]"=&r"(temp3), [palpha]"+r"(palpha), [pdst]"+r"(pdst), + [alpha_mask]"+r"(alpha_mask) + : + : "memory" + ); + } + + for (i = 0; i < (width & 3); ++i) { + __asm__ volatile ( + "lbu %[temp0], 0(%[palpha]) \n\t" + "addiu %[palpha], %[palpha], 1 \n\t" + "sb %[temp0], 0(%[pdst]) \n\t" + "and %[alpha_mask], %[alpha_mask], %[temp0] \n\t" + "addiu %[pdst], %[pdst], 4 \n\t" + : [temp0]"=&r"(temp0), [palpha]"+r"(palpha), [pdst]"+r"(pdst), + [alpha_mask]"+r"(alpha_mask) + : + : "memory" + ); + } + alpha += alpha_stride; + dst += dst_stride; + } + + __asm__ volatile ( + "ext %[temp0], %[alpha_mask], 0, 16 \n\t" + "srl %[alpha_mask], %[alpha_mask], 16 \n\t" + "and %[alpha_mask], %[alpha_mask], %[temp0] \n\t" + "ext %[temp0], %[alpha_mask], 0, 8 \n\t" + "srl %[alpha_mask], %[alpha_mask], 8 \n\t" + "and %[alpha_mask], %[alpha_mask], %[temp0] \n\t" + : [temp0]"=&r"(temp0), [alpha_mask]"+r"(alpha_mask) + : + ); + + return (alpha_mask != 0xff); +} + +static void MultARGBRow_MIPSdspR2(uint32_t* const ptr, int width, + int inverse) { + int x; + const uint32_t c_00ffffff = 0x00ffffffu; + const uint32_t c_ff000000 = 0xff000000u; + const uint32_t c_8000000 = 0x00800000u; + const uint32_t c_8000080 = 0x00800080u; + for (x = 0; x < width; ++x) { + const uint32_t argb = ptr[x]; + if (argb < 0xff000000u) { // alpha < 255 + if (argb <= 0x00ffffffu) { // alpha == 0 + ptr[x] = 0; + } else { + int temp0, temp1, temp2, temp3, alpha; + __asm__ volatile ( + "srl %[alpha], %[argb], 24 \n\t" + "replv.qb %[temp0], %[alpha] \n\t" + "and %[temp0], %[temp0], %[c_00ffffff] \n\t" + "beqz %[inverse], 0f \n\t" + "divu $zero, %[c_ff000000], %[alpha] \n\t" + "mflo %[temp0] \n\t" + "0: \n\t" + "andi %[temp1], %[argb], 0xff \n\t" + "ext %[temp2], %[argb], 8, 8 \n\t" + "ext %[temp3], %[argb], 16, 8 \n\t" + "mul %[temp1], %[temp1], %[temp0] \n\t" + "mul %[temp2], %[temp2], %[temp0] \n\t" + "mul %[temp3], %[temp3], %[temp0] \n\t" + "precrq.ph.w %[temp1], %[temp2], %[temp1] \n\t" + "addu %[temp3], %[temp3], %[c_8000000] \n\t" + "addu %[temp1], %[temp1], %[c_8000080] \n\t" + "precrq.ph.w %[temp3], %[argb], %[temp3] \n\t" + "precrq.qb.ph %[temp1], %[temp3], %[temp1] \n\t" + : [temp0]"=&r"(temp0), [temp1]"=&r"(temp1), [temp2]"=&r"(temp2), + [temp3]"=&r"(temp3), [alpha]"=&r"(alpha) + : [inverse]"r"(inverse), [c_00ffffff]"r"(c_00ffffff), + [c_8000000]"r"(c_8000000), [c_8000080]"r"(c_8000080), + [c_ff000000]"r"(c_ff000000), [argb]"r"(argb) + : "memory", "hi", "lo" + ); + ptr[x] = temp1; + } + } + } +} + +#ifdef WORDS_BIGENDIAN +static void PackARGB_MIPSdspR2(const uint8_t* a, const uint8_t* r, + const uint8_t* g, const uint8_t* b, int len, + uint32_t* out) { + int temp0, temp1, temp2, temp3, offset; + const int rest = len & 1; + const uint32_t* const loop_end = out + len - rest; + const int step = 4; + __asm__ volatile ( + "xor %[offset], %[offset], %[offset] \n\t" + "beq %[loop_end], %[out], 0f \n\t" + "2: \n\t" + "lbux %[temp0], %[offset](%[a]) \n\t" + "lbux %[temp1], %[offset](%[r]) \n\t" + "lbux %[temp2], %[offset](%[g]) \n\t" + "lbux %[temp3], %[offset](%[b]) \n\t" + "ins %[temp1], %[temp0], 16, 16 \n\t" + "ins %[temp3], %[temp2], 16, 16 \n\t" + "addiu %[out], %[out], 4 \n\t" + "precr.qb.ph %[temp0], %[temp1], %[temp3] \n\t" + "sw %[temp0], -4(%[out]) \n\t" + "addu %[offset], %[offset], %[step] \n\t" + "bne %[loop_end], %[out], 2b \n\t" + "0: \n\t" + "beq %[rest], $zero, 1f \n\t" + "lbux %[temp0], %[offset](%[a]) \n\t" + "lbux %[temp1], %[offset](%[r]) \n\t" + "lbux %[temp2], %[offset](%[g]) \n\t" + "lbux %[temp3], %[offset](%[b]) \n\t" + "ins %[temp1], %[temp0], 16, 16 \n\t" + "ins %[temp3], %[temp2], 16, 16 \n\t" + "precr.qb.ph %[temp0], %[temp1], %[temp3] \n\t" + "sw %[temp0], 0(%[out]) \n\t" + "1: \n\t" + : [temp0]"=&r"(temp0), [temp1]"=&r"(temp1), [temp2]"=&r"(temp2), + [temp3]"=&r"(temp3), [offset]"=&r"(offset), [out]"+&r"(out) + : [a]"r"(a), [r]"r"(r), [g]"r"(g), [b]"r"(b), [step]"r"(step), + [loop_end]"r"(loop_end), [rest]"r"(rest) + : "memory" + ); +} +#endif // WORDS_BIGENDIAN + +static void PackRGB_MIPSdspR2(const uint8_t* r, const uint8_t* g, + const uint8_t* b, int len, int step, + uint32_t* out) { + int temp0, temp1, temp2, offset; + const int rest = len & 1; + const int a = 0xff; + const uint32_t* const loop_end = out + len - rest; + __asm__ volatile ( + "xor %[offset], %[offset], %[offset] \n\t" + "beq %[loop_end], %[out], 0f \n\t" + "2: \n\t" + "lbux %[temp0], %[offset](%[r]) \n\t" + "lbux %[temp1], %[offset](%[g]) \n\t" + "lbux %[temp2], %[offset](%[b]) \n\t" + "ins %[temp0], %[a], 16, 16 \n\t" + "ins %[temp2], %[temp1], 16, 16 \n\t" + "addiu %[out], %[out], 4 \n\t" + "precr.qb.ph %[temp0], %[temp0], %[temp2] \n\t" + "sw %[temp0], -4(%[out]) \n\t" + "addu %[offset], %[offset], %[step] \n\t" + "bne %[loop_end], %[out], 2b \n\t" + "0: \n\t" + "beq %[rest], $zero, 1f \n\t" + "lbux %[temp0], %[offset](%[r]) \n\t" + "lbux %[temp1], %[offset](%[g]) \n\t" + "lbux %[temp2], %[offset](%[b]) \n\t" + "ins %[temp0], %[a], 16, 16 \n\t" + "ins %[temp2], %[temp1], 16, 16 \n\t" + "precr.qb.ph %[temp0], %[temp0], %[temp2] \n\t" + "sw %[temp0], 0(%[out]) \n\t" + "1: \n\t" + : [temp0]"=&r"(temp0), [temp1]"=&r"(temp1), [temp2]"=&r"(temp2), + [offset]"=&r"(offset), [out]"+&r"(out) + : [a]"r"(a), [r]"r"(r), [g]"r"(g), [b]"r"(b), [step]"r"(step), + [loop_end]"r"(loop_end), [rest]"r"(rest) + : "memory" + ); +} + +//------------------------------------------------------------------------------ +// Entry point + +extern void WebPInitAlphaProcessingMIPSdspR2(void); + +WEBP_TSAN_IGNORE_FUNCTION void WebPInitAlphaProcessingMIPSdspR2(void) { + WebPDispatchAlpha = DispatchAlpha_MIPSdspR2; + WebPMultARGBRow = MultARGBRow_MIPSdspR2; +#ifdef WORDS_BIGENDIAN + WebPPackARGB = PackARGB_MIPSdspR2; +#endif + WebPPackRGB = PackRGB_MIPSdspR2; +} + +#else // !WEBP_USE_MIPS_DSP_R2 + +WEBP_DSP_INIT_STUB(WebPInitAlphaProcessingMIPSdspR2) + +#endif // WEBP_USE_MIPS_DSP_R2 diff --git a/vendor/github.com/sizeofint/webpanimation/dsp_alpha_processing_neon.c b/vendor/github.com/sizeofint/webpanimation/dsp_alpha_processing_neon.c new file mode 100644 index 00000000..81915502 --- /dev/null +++ b/vendor/github.com/sizeofint/webpanimation/dsp_alpha_processing_neon.c @@ -0,0 +1,191 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the COPYING file in the root of the source +// tree. An additional intellectual property rights grant can be found +// in the file PATENTS. All contributing project authors may +// be found in the AUTHORS file in the root of the source tree. +// ----------------------------------------------------------------------------- +// +// Utilities for processing transparent channel, NEON version. +// +// Author: Skal (pascal.massimino@gmail.com) + +#include "dsp_dsp.h" + +#if defined(WEBP_USE_NEON) + +#include "dsp_neon.h" + +//------------------------------------------------------------------------------ + +#define MULTIPLIER(a) ((a) * 0x8081) +#define PREMULTIPLY(x, m) (((x) * (m)) >> 23) + +#define MULTIPLY_BY_ALPHA(V, ALPHA, OTHER) do { \ + const uint8x8_t alpha = (V).val[(ALPHA)]; \ + const uint16x8_t r1 = vmull_u8((V).val[1], alpha); \ + const uint16x8_t g1 = vmull_u8((V).val[2], alpha); \ + const uint16x8_t b1 = vmull_u8((V).val[(OTHER)], alpha); \ + /* we use: v / 255 = (v + 1 + (v >> 8)) >> 8 */ \ + const uint16x8_t r2 = vsraq_n_u16(r1, r1, 8); \ + const uint16x8_t g2 = vsraq_n_u16(g1, g1, 8); \ + const uint16x8_t b2 = vsraq_n_u16(b1, b1, 8); \ + const uint16x8_t r3 = vaddq_u16(r2, kOne); \ + const uint16x8_t g3 = vaddq_u16(g2, kOne); \ + const uint16x8_t b3 = vaddq_u16(b2, kOne); \ + (V).val[1] = vshrn_n_u16(r3, 8); \ + (V).val[2] = vshrn_n_u16(g3, 8); \ + (V).val[(OTHER)] = vshrn_n_u16(b3, 8); \ +} while (0) + +static void ApplyAlphaMultiply_NEON(uint8_t* rgba, int alpha_first, + int w, int h, int stride) { + const uint16x8_t kOne = vdupq_n_u16(1u); + while (h-- > 0) { + uint32_t* const rgbx = (uint32_t*)rgba; + int i = 0; + if (alpha_first) { + for (; i + 8 <= w; i += 8) { + // load aaaa...|rrrr...|gggg...|bbbb... + uint8x8x4_t RGBX = vld4_u8((const uint8_t*)(rgbx + i)); + MULTIPLY_BY_ALPHA(RGBX, 0, 3); + vst4_u8((uint8_t*)(rgbx + i), RGBX); + } + } else { + for (; i + 8 <= w; i += 8) { + uint8x8x4_t RGBX = vld4_u8((const uint8_t*)(rgbx + i)); + MULTIPLY_BY_ALPHA(RGBX, 3, 0); + vst4_u8((uint8_t*)(rgbx + i), RGBX); + } + } + // Finish with left-overs. + for (; i < w; ++i) { + uint8_t* const rgb = rgba + (alpha_first ? 1 : 0); + const uint8_t* const alpha = rgba + (alpha_first ? 0 : 3); + const uint32_t a = alpha[4 * i]; + if (a != 0xff) { + const uint32_t mult = MULTIPLIER(a); + rgb[4 * i + 0] = PREMULTIPLY(rgb[4 * i + 0], mult); + rgb[4 * i + 1] = PREMULTIPLY(rgb[4 * i + 1], mult); + rgb[4 * i + 2] = PREMULTIPLY(rgb[4 * i + 2], mult); + } + } + rgba += stride; + } +} +#undef MULTIPLY_BY_ALPHA +#undef MULTIPLIER +#undef PREMULTIPLY + +//------------------------------------------------------------------------------ + +static int DispatchAlpha_NEON(const uint8_t* alpha, int alpha_stride, + int width, int height, + uint8_t* dst, int dst_stride) { + uint32_t alpha_mask = 0xffffffffu; + uint8x8_t mask8 = vdup_n_u8(0xff); + uint32_t tmp[2]; + int i, j; + for (j = 0; j < height; ++j) { + // We don't know if alpha is first or last in dst[] (depending on rgbA/Argb + // mode). So we must be sure dst[4*i + 8 - 1] is writable for the store. + // Hence the test with 'width - 1' instead of just 'width'. + for (i = 0; i + 8 <= width - 1; i += 8) { + uint8x8x4_t rgbX = vld4_u8((const uint8_t*)(dst + 4 * i)); + const uint8x8_t alphas = vld1_u8(alpha + i); + rgbX.val[0] = alphas; + vst4_u8((uint8_t*)(dst + 4 * i), rgbX); + mask8 = vand_u8(mask8, alphas); + } + for (; i < width; ++i) { + const uint32_t alpha_value = alpha[i]; + dst[4 * i] = alpha_value; + alpha_mask &= alpha_value; + } + alpha += alpha_stride; + dst += dst_stride; + } + vst1_u8((uint8_t*)tmp, mask8); + alpha_mask &= tmp[0]; + alpha_mask &= tmp[1]; + return (alpha_mask != 0xffffffffu); +} + +static void DispatchAlphaToGreen_NEON(const uint8_t* alpha, int alpha_stride, + int width, int height, + uint32_t* dst, int dst_stride) { + int i, j; + uint8x8x4_t greens; // leave A/R/B channels zero'd. + greens.val[0] = vdup_n_u8(0); + greens.val[2] = vdup_n_u8(0); + greens.val[3] = vdup_n_u8(0); + for (j = 0; j < height; ++j) { + for (i = 0; i + 8 <= width; i += 8) { + greens.val[1] = vld1_u8(alpha + i); + vst4_u8((uint8_t*)(dst + i), greens); + } + for (; i < width; ++i) dst[i] = alpha[i] << 8; + alpha += alpha_stride; + dst += dst_stride; + } +} + +static int ExtractAlpha_NEON(const uint8_t* argb, int argb_stride, + int width, int height, + uint8_t* alpha, int alpha_stride) { + uint32_t alpha_mask = 0xffffffffu; + uint8x8_t mask8 = vdup_n_u8(0xff); + uint32_t tmp[2]; + int i, j; + for (j = 0; j < height; ++j) { + // We don't know if alpha is first or last in dst[] (depending on rgbA/Argb + // mode). So we must be sure dst[4*i + 8 - 1] is writable for the store. + // Hence the test with 'width - 1' instead of just 'width'. + for (i = 0; i + 8 <= width - 1; i += 8) { + const uint8x8x4_t rgbX = vld4_u8((const uint8_t*)(argb + 4 * i)); + const uint8x8_t alphas = rgbX.val[0]; + vst1_u8((uint8_t*)(alpha + i), alphas); + mask8 = vand_u8(mask8, alphas); + } + for (; i < width; ++i) { + alpha[i] = argb[4 * i]; + alpha_mask &= alpha[i]; + } + argb += argb_stride; + alpha += alpha_stride; + } + vst1_u8((uint8_t*)tmp, mask8); + alpha_mask &= tmp[0]; + alpha_mask &= tmp[1]; + return (alpha_mask == 0xffffffffu); +} + +static void ExtractGreen_NEON(const uint32_t* argb, + uint8_t* alpha, int size) { + int i; + for (i = 0; i + 16 <= size; i += 16) { + const uint8x16x4_t rgbX = vld4q_u8((const uint8_t*)(argb + i)); + const uint8x16_t greens = rgbX.val[1]; + vst1q_u8(alpha + i, greens); + } + for (; i < size; ++i) alpha[i] = (argb[i] >> 8) & 0xff; +} + +//------------------------------------------------------------------------------ + +extern void WebPInitAlphaProcessingNEON(void); + +WEBP_TSAN_IGNORE_FUNCTION void WebPInitAlphaProcessingNEON(void) { + WebPApplyAlphaMultiply = ApplyAlphaMultiply_NEON; + WebPDispatchAlpha = DispatchAlpha_NEON; + WebPDispatchAlphaToGreen = DispatchAlphaToGreen_NEON; + WebPExtractAlpha = ExtractAlpha_NEON; + WebPExtractGreen = ExtractGreen_NEON; +} + +#else // !WEBP_USE_NEON + +WEBP_DSP_INIT_STUB(WebPInitAlphaProcessingNEON) + +#endif // WEBP_USE_NEON diff --git a/vendor/github.com/sizeofint/webpanimation/dsp_alpha_processing_sse2.c b/vendor/github.com/sizeofint/webpanimation/dsp_alpha_processing_sse2.c new file mode 100644 index 00000000..22ba6ba5 --- /dev/null +++ b/vendor/github.com/sizeofint/webpanimation/dsp_alpha_processing_sse2.c @@ -0,0 +1,365 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the COPYING file in the root of the source +// tree. An additional intellectual property rights grant can be found +// in the file PATENTS. All contributing project authors may +// be found in the AUTHORS file in the root of the source tree. +// ----------------------------------------------------------------------------- +// +// Utilities for processing transparent channel. +// +// Author: Skal (pascal.massimino@gmail.com) + +#include "dsp_dsp.h" + +#if defined(WEBP_USE_SSE2) +#include + +//------------------------------------------------------------------------------ + +static int DispatchAlpha_SSE2(const uint8_t* alpha, int alpha_stride, + int width, int height, + uint8_t* dst, int dst_stride) { + // alpha_and stores an 'and' operation of all the alpha[] values. The final + // value is not 0xff if any of the alpha[] is not equal to 0xff. + uint32_t alpha_and = 0xff; + int i, j; + const __m128i zero = _mm_setzero_si128(); + const __m128i rgb_mask = _mm_set1_epi32(0xffffff00u); // to preserve RGB + const __m128i all_0xff = _mm_set_epi32(0, 0, ~0u, ~0u); + __m128i all_alphas = all_0xff; + + // We must be able to access 3 extra bytes after the last written byte + // 'dst[4 * width - 4]', because we don't know if alpha is the first or the + // last byte of the quadruplet. + const int limit = (width - 1) & ~7; + + for (j = 0; j < height; ++j) { + __m128i* out = (__m128i*)dst; + for (i = 0; i < limit; i += 8) { + // load 8 alpha bytes + const __m128i a0 = _mm_loadl_epi64((const __m128i*)&alpha[i]); + const __m128i a1 = _mm_unpacklo_epi8(a0, zero); + const __m128i a2_lo = _mm_unpacklo_epi16(a1, zero); + const __m128i a2_hi = _mm_unpackhi_epi16(a1, zero); + // load 8 dst pixels (32 bytes) + const __m128i b0_lo = _mm_loadu_si128(out + 0); + const __m128i b0_hi = _mm_loadu_si128(out + 1); + // mask dst alpha values + const __m128i b1_lo = _mm_and_si128(b0_lo, rgb_mask); + const __m128i b1_hi = _mm_and_si128(b0_hi, rgb_mask); + // combine + const __m128i b2_lo = _mm_or_si128(b1_lo, a2_lo); + const __m128i b2_hi = _mm_or_si128(b1_hi, a2_hi); + // store + _mm_storeu_si128(out + 0, b2_lo); + _mm_storeu_si128(out + 1, b2_hi); + // accumulate eight alpha 'and' in parallel + all_alphas = _mm_and_si128(all_alphas, a0); + out += 2; + } + for (; i < width; ++i) { + const uint32_t alpha_value = alpha[i]; + dst[4 * i] = alpha_value; + alpha_and &= alpha_value; + } + alpha += alpha_stride; + dst += dst_stride; + } + // Combine the eight alpha 'and' into a 8-bit mask. + alpha_and &= _mm_movemask_epi8(_mm_cmpeq_epi8(all_alphas, all_0xff)); + return (alpha_and != 0xff); +} + +static void DispatchAlphaToGreen_SSE2(const uint8_t* alpha, int alpha_stride, + int width, int height, + uint32_t* dst, int dst_stride) { + int i, j; + const __m128i zero = _mm_setzero_si128(); + const int limit = width & ~15; + for (j = 0; j < height; ++j) { + for (i = 0; i < limit; i += 16) { // process 16 alpha bytes + const __m128i a0 = _mm_loadu_si128((const __m128i*)&alpha[i]); + const __m128i a1 = _mm_unpacklo_epi8(zero, a0); // note the 'zero' first! + const __m128i b1 = _mm_unpackhi_epi8(zero, a0); + const __m128i a2_lo = _mm_unpacklo_epi16(a1, zero); + const __m128i b2_lo = _mm_unpacklo_epi16(b1, zero); + const __m128i a2_hi = _mm_unpackhi_epi16(a1, zero); + const __m128i b2_hi = _mm_unpackhi_epi16(b1, zero); + _mm_storeu_si128((__m128i*)&dst[i + 0], a2_lo); + _mm_storeu_si128((__m128i*)&dst[i + 4], a2_hi); + _mm_storeu_si128((__m128i*)&dst[i + 8], b2_lo); + _mm_storeu_si128((__m128i*)&dst[i + 12], b2_hi); + } + for (; i < width; ++i) dst[i] = alpha[i] << 8; + alpha += alpha_stride; + dst += dst_stride; + } +} + +static int ExtractAlpha_SSE2(const uint8_t* argb, int argb_stride, + int width, int height, + uint8_t* alpha, int alpha_stride) { + // alpha_and stores an 'and' operation of all the alpha[] values. The final + // value is not 0xff if any of the alpha[] is not equal to 0xff. + uint32_t alpha_and = 0xff; + int i, j; + const __m128i a_mask = _mm_set1_epi32(0xffu); // to preserve alpha + const __m128i all_0xff = _mm_set_epi32(0, 0, ~0u, ~0u); + __m128i all_alphas = all_0xff; + + // We must be able to access 3 extra bytes after the last written byte + // 'src[4 * width - 4]', because we don't know if alpha is the first or the + // last byte of the quadruplet. + const int limit = (width - 1) & ~7; + + for (j = 0; j < height; ++j) { + const __m128i* src = (const __m128i*)argb; + for (i = 0; i < limit; i += 8) { + // load 32 argb bytes + const __m128i a0 = _mm_loadu_si128(src + 0); + const __m128i a1 = _mm_loadu_si128(src + 1); + const __m128i b0 = _mm_and_si128(a0, a_mask); + const __m128i b1 = _mm_and_si128(a1, a_mask); + const __m128i c0 = _mm_packs_epi32(b0, b1); + const __m128i d0 = _mm_packus_epi16(c0, c0); + // store + _mm_storel_epi64((__m128i*)&alpha[i], d0); + // accumulate eight alpha 'and' in parallel + all_alphas = _mm_and_si128(all_alphas, d0); + src += 2; + } + for (; i < width; ++i) { + const uint32_t alpha_value = argb[4 * i]; + alpha[i] = alpha_value; + alpha_and &= alpha_value; + } + argb += argb_stride; + alpha += alpha_stride; + } + // Combine the eight alpha 'and' into a 8-bit mask. + alpha_and &= _mm_movemask_epi8(_mm_cmpeq_epi8(all_alphas, all_0xff)); + return (alpha_and == 0xff); +} + +//------------------------------------------------------------------------------ +// Non-dither premultiplied modes + +#define MULTIPLIER(a) ((a) * 0x8081) +#define PREMULTIPLY(x, m) (((x) * (m)) >> 23) + +// We can't use a 'const int' for the SHUFFLE value, because it has to be an +// immediate in the _mm_shufflexx_epi16() instruction. We really need a macro. +// We use: v / 255 = (v * 0x8081) >> 23, where v = alpha * {r,g,b} is a 16bit +// value. +#define APPLY_ALPHA(RGBX, SHUFFLE) do { \ + const __m128i argb0 = _mm_loadu_si128((const __m128i*)&(RGBX)); \ + const __m128i argb1_lo = _mm_unpacklo_epi8(argb0, zero); \ + const __m128i argb1_hi = _mm_unpackhi_epi8(argb0, zero); \ + const __m128i alpha0_lo = _mm_or_si128(argb1_lo, kMask); \ + const __m128i alpha0_hi = _mm_or_si128(argb1_hi, kMask); \ + const __m128i alpha1_lo = _mm_shufflelo_epi16(alpha0_lo, SHUFFLE); \ + const __m128i alpha1_hi = _mm_shufflelo_epi16(alpha0_hi, SHUFFLE); \ + const __m128i alpha2_lo = _mm_shufflehi_epi16(alpha1_lo, SHUFFLE); \ + const __m128i alpha2_hi = _mm_shufflehi_epi16(alpha1_hi, SHUFFLE); \ + /* alpha2 = [ff a0 a0 a0][ff a1 a1 a1] */ \ + const __m128i A0_lo = _mm_mullo_epi16(alpha2_lo, argb1_lo); \ + const __m128i A0_hi = _mm_mullo_epi16(alpha2_hi, argb1_hi); \ + const __m128i A1_lo = _mm_mulhi_epu16(A0_lo, kMult); \ + const __m128i A1_hi = _mm_mulhi_epu16(A0_hi, kMult); \ + const __m128i A2_lo = _mm_srli_epi16(A1_lo, 7); \ + const __m128i A2_hi = _mm_srli_epi16(A1_hi, 7); \ + const __m128i A3 = _mm_packus_epi16(A2_lo, A2_hi); \ + _mm_storeu_si128((__m128i*)&(RGBX), A3); \ +} while (0) + +static void ApplyAlphaMultiply_SSE2(uint8_t* rgba, int alpha_first, + int w, int h, int stride) { + const __m128i zero = _mm_setzero_si128(); + const __m128i kMult = _mm_set1_epi16(0x8081u); + const __m128i kMask = _mm_set_epi16(0, 0xff, 0xff, 0, 0, 0xff, 0xff, 0); + const int kSpan = 4; + while (h-- > 0) { + uint32_t* const rgbx = (uint32_t*)rgba; + int i; + if (!alpha_first) { + for (i = 0; i + kSpan <= w; i += kSpan) { + APPLY_ALPHA(rgbx[i], _MM_SHUFFLE(2, 3, 3, 3)); + } + } else { + for (i = 0; i + kSpan <= w; i += kSpan) { + APPLY_ALPHA(rgbx[i], _MM_SHUFFLE(0, 0, 0, 1)); + } + } + // Finish with left-overs. + for (; i < w; ++i) { + uint8_t* const rgb = rgba + (alpha_first ? 1 : 0); + const uint8_t* const alpha = rgba + (alpha_first ? 0 : 3); + const uint32_t a = alpha[4 * i]; + if (a != 0xff) { + const uint32_t mult = MULTIPLIER(a); + rgb[4 * i + 0] = PREMULTIPLY(rgb[4 * i + 0], mult); + rgb[4 * i + 1] = PREMULTIPLY(rgb[4 * i + 1], mult); + rgb[4 * i + 2] = PREMULTIPLY(rgb[4 * i + 2], mult); + } + } + rgba += stride; + } +} +#undef MULTIPLIER +#undef PREMULTIPLY + +//------------------------------------------------------------------------------ +// Alpha detection + +static int HasAlpha8b_SSE2(const uint8_t* src, int length) { + const __m128i all_0xff = _mm_set1_epi8((char)0xff); + int i = 0; + for (; i + 16 <= length; i += 16) { + const __m128i v = _mm_loadu_si128((const __m128i*)(src + i)); + const __m128i bits = _mm_cmpeq_epi8(v, all_0xff); + const int mask = _mm_movemask_epi8(bits); + if (mask != 0xffff) return 1; + } + for (; i < length; ++i) if (src[i] != 0xff) return 1; + return 0; +} + +static int HasAlpha32b_SSE2(const uint8_t* src, int length) { + const __m128i alpha_mask = _mm_set1_epi32(0xff); + const __m128i all_0xff = _mm_set1_epi8((char)0xff); + int i = 0; + // We don't know if we can access the last 3 bytes after the last alpha + // value 'src[4 * length - 4]' (because we don't know if alpha is the first + // or the last byte of the quadruplet). Hence the '-3' protection below. + length = length * 4 - 3; // size in bytes + for (; i + 64 <= length; i += 64) { + const __m128i a0 = _mm_loadu_si128((const __m128i*)(src + i + 0)); + const __m128i a1 = _mm_loadu_si128((const __m128i*)(src + i + 16)); + const __m128i a2 = _mm_loadu_si128((const __m128i*)(src + i + 32)); + const __m128i a3 = _mm_loadu_si128((const __m128i*)(src + i + 48)); + const __m128i b0 = _mm_and_si128(a0, alpha_mask); + const __m128i b1 = _mm_and_si128(a1, alpha_mask); + const __m128i b2 = _mm_and_si128(a2, alpha_mask); + const __m128i b3 = _mm_and_si128(a3, alpha_mask); + const __m128i c0 = _mm_packs_epi32(b0, b1); + const __m128i c1 = _mm_packs_epi32(b2, b3); + const __m128i d = _mm_packus_epi16(c0, c1); + const __m128i bits = _mm_cmpeq_epi8(d, all_0xff); + const int mask = _mm_movemask_epi8(bits); + if (mask != 0xffff) return 1; + } + for (; i + 32 <= length; i += 32) { + const __m128i a0 = _mm_loadu_si128((const __m128i*)(src + i + 0)); + const __m128i a1 = _mm_loadu_si128((const __m128i*)(src + i + 16)); + const __m128i b0 = _mm_and_si128(a0, alpha_mask); + const __m128i b1 = _mm_and_si128(a1, alpha_mask); + const __m128i c = _mm_packs_epi32(b0, b1); + const __m128i d = _mm_packus_epi16(c, c); + const __m128i bits = _mm_cmpeq_epi8(d, all_0xff); + const int mask = _mm_movemask_epi8(bits); + if (mask != 0xffff) return 1; + } + for (; i <= length; i += 4) if (src[i] != 0xff) return 1; + return 0; +} + +static void AlphaReplace_SSE2(uint32_t* src, int length, uint32_t color) { + const __m128i m_color = _mm_set1_epi32(color); + const __m128i zero = _mm_setzero_si128(); + int i = 0; + for (; i + 8 <= length; i += 8) { + const __m128i a0 = _mm_loadu_si128((const __m128i*)(src + i + 0)); + const __m128i a1 = _mm_loadu_si128((const __m128i*)(src + i + 4)); + const __m128i b0 = _mm_srai_epi32(a0, 24); + const __m128i b1 = _mm_srai_epi32(a1, 24); + const __m128i c0 = _mm_cmpeq_epi32(b0, zero); + const __m128i c1 = _mm_cmpeq_epi32(b1, zero); + const __m128i d0 = _mm_and_si128(c0, m_color); + const __m128i d1 = _mm_and_si128(c1, m_color); + const __m128i e0 = _mm_andnot_si128(c0, a0); + const __m128i e1 = _mm_andnot_si128(c1, a1); + _mm_storeu_si128((__m128i*)(src + i + 0), _mm_or_si128(d0, e0)); + _mm_storeu_si128((__m128i*)(src + i + 4), _mm_or_si128(d1, e1)); + } + for (; i < length; ++i) if ((src[i] >> 24) == 0) src[i] = color; +} + +// ----------------------------------------------------------------------------- +// Apply alpha value to rows + +static void MultARGBRow_SSE2(uint32_t* const ptr, int width, int inverse) { + int x = 0; + if (!inverse) { + const int kSpan = 2; + const __m128i zero = _mm_setzero_si128(); + const __m128i k128 = _mm_set1_epi16(128); + const __m128i kMult = _mm_set1_epi16(0x0101); + const __m128i kMask = _mm_set_epi16(0, 0xff, 0, 0, 0, 0xff, 0, 0); + for (x = 0; x + kSpan <= width; x += kSpan) { + // To compute 'result = (int)(a * x / 255. + .5)', we use: + // tmp = a * v + 128, result = (tmp * 0x0101u) >> 16 + const __m128i A0 = _mm_loadl_epi64((const __m128i*)&ptr[x]); + const __m128i A1 = _mm_unpacklo_epi8(A0, zero); + const __m128i A2 = _mm_or_si128(A1, kMask); + const __m128i A3 = _mm_shufflelo_epi16(A2, _MM_SHUFFLE(2, 3, 3, 3)); + const __m128i A4 = _mm_shufflehi_epi16(A3, _MM_SHUFFLE(2, 3, 3, 3)); + // here, A4 = [ff a0 a0 a0][ff a1 a1 a1] + const __m128i A5 = _mm_mullo_epi16(A4, A1); + const __m128i A6 = _mm_add_epi16(A5, k128); + const __m128i A7 = _mm_mulhi_epu16(A6, kMult); + const __m128i A10 = _mm_packus_epi16(A7, zero); + _mm_storel_epi64((__m128i*)&ptr[x], A10); + } + } + width -= x; + if (width > 0) WebPMultARGBRow_C(ptr + x, width, inverse); +} + +static void MultRow_SSE2(uint8_t* const ptr, const uint8_t* const alpha, + int width, int inverse) { + int x = 0; + if (!inverse) { + const __m128i zero = _mm_setzero_si128(); + const __m128i k128 = _mm_set1_epi16(128); + const __m128i kMult = _mm_set1_epi16(0x0101); + for (x = 0; x + 8 <= width; x += 8) { + const __m128i v0 = _mm_loadl_epi64((__m128i*)&ptr[x]); + const __m128i a0 = _mm_loadl_epi64((const __m128i*)&alpha[x]); + const __m128i v1 = _mm_unpacklo_epi8(v0, zero); + const __m128i a1 = _mm_unpacklo_epi8(a0, zero); + const __m128i v2 = _mm_mullo_epi16(v1, a1); + const __m128i v3 = _mm_add_epi16(v2, k128); + const __m128i v4 = _mm_mulhi_epu16(v3, kMult); + const __m128i v5 = _mm_packus_epi16(v4, zero); + _mm_storel_epi64((__m128i*)&ptr[x], v5); + } + } + width -= x; + if (width > 0) WebPMultRow_C(ptr + x, alpha + x, width, inverse); +} + +//------------------------------------------------------------------------------ +// Entry point + +extern void WebPInitAlphaProcessingSSE2(void); + +WEBP_TSAN_IGNORE_FUNCTION void WebPInitAlphaProcessingSSE2(void) { + WebPMultARGBRow = MultARGBRow_SSE2; + WebPMultRow = MultRow_SSE2; + WebPApplyAlphaMultiply = ApplyAlphaMultiply_SSE2; + WebPDispatchAlpha = DispatchAlpha_SSE2; + WebPDispatchAlphaToGreen = DispatchAlphaToGreen_SSE2; + WebPExtractAlpha = ExtractAlpha_SSE2; + + WebPHasAlpha8b = HasAlpha8b_SSE2; + WebPHasAlpha32b = HasAlpha32b_SSE2; + WebPAlphaReplace = AlphaReplace_SSE2; +} + +#else // !WEBP_USE_SSE2 + +WEBP_DSP_INIT_STUB(WebPInitAlphaProcessingSSE2) + +#endif // WEBP_USE_SSE2 diff --git a/vendor/github.com/sizeofint/webpanimation/dsp_alpha_processing_sse41.c b/vendor/github.com/sizeofint/webpanimation/dsp_alpha_processing_sse41.c new file mode 100644 index 00000000..802e9424 --- /dev/null +++ b/vendor/github.com/sizeofint/webpanimation/dsp_alpha_processing_sse41.c @@ -0,0 +1,92 @@ +// Copyright 2015 Google Inc. All Rights Reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the COPYING file in the root of the source +// tree. An additional intellectual property rights grant can be found +// in the file PATENTS. All contributing project authors may +// be found in the AUTHORS file in the root of the source tree. +// ----------------------------------------------------------------------------- +// +// Utilities for processing transparent channel, SSE4.1 variant. +// +// Author: Skal (pascal.massimino@gmail.com) + +#include "dsp_dsp.h" + +#if defined(WEBP_USE_SSE41) + +#include + +//------------------------------------------------------------------------------ + +static int ExtractAlpha_SSE41(const uint8_t* argb, int argb_stride, + int width, int height, + uint8_t* alpha, int alpha_stride) { + // alpha_and stores an 'and' operation of all the alpha[] values. The final + // value is not 0xff if any of the alpha[] is not equal to 0xff. + uint32_t alpha_and = 0xff; + int i, j; + const __m128i all_0xff = _mm_set1_epi32(~0u); + __m128i all_alphas = all_0xff; + + // We must be able to access 3 extra bytes after the last written byte + // 'src[4 * width - 4]', because we don't know if alpha is the first or the + // last byte of the quadruplet. + const int limit = (width - 1) & ~15; + const __m128i kCstAlpha0 = _mm_set_epi8(-1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, 12, 8, 4, 0); + const __m128i kCstAlpha1 = _mm_set_epi8(-1, -1, -1, -1, -1, -1, -1, -1, + 12, 8, 4, 0, -1, -1, -1, -1); + const __m128i kCstAlpha2 = _mm_set_epi8(-1, -1, -1, -1, 12, 8, 4, 0, + -1, -1, -1, -1, -1, -1, -1, -1); + const __m128i kCstAlpha3 = _mm_set_epi8(12, 8, 4, 0, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1); + for (j = 0; j < height; ++j) { + const __m128i* src = (const __m128i*)argb; + for (i = 0; i < limit; i += 16) { + // load 64 argb bytes + const __m128i a0 = _mm_loadu_si128(src + 0); + const __m128i a1 = _mm_loadu_si128(src + 1); + const __m128i a2 = _mm_loadu_si128(src + 2); + const __m128i a3 = _mm_loadu_si128(src + 3); + const __m128i b0 = _mm_shuffle_epi8(a0, kCstAlpha0); + const __m128i b1 = _mm_shuffle_epi8(a1, kCstAlpha1); + const __m128i b2 = _mm_shuffle_epi8(a2, kCstAlpha2); + const __m128i b3 = _mm_shuffle_epi8(a3, kCstAlpha3); + const __m128i c0 = _mm_or_si128(b0, b1); + const __m128i c1 = _mm_or_si128(b2, b3); + const __m128i d0 = _mm_or_si128(c0, c1); + // store + _mm_storeu_si128((__m128i*)&alpha[i], d0); + // accumulate sixteen alpha 'and' in parallel + all_alphas = _mm_and_si128(all_alphas, d0); + src += 4; + } + for (; i < width; ++i) { + const uint32_t alpha_value = argb[4 * i]; + alpha[i] = alpha_value; + alpha_and &= alpha_value; + } + argb += argb_stride; + alpha += alpha_stride; + } + // Combine the sixteen alpha 'and' into an 8-bit mask. + alpha_and |= 0xff00u; // pretend the upper bits [8..15] were tested ok. + alpha_and &= _mm_movemask_epi8(_mm_cmpeq_epi8(all_alphas, all_0xff)); + return (alpha_and == 0xffffu); +} + +//------------------------------------------------------------------------------ +// Entry point + +extern void WebPInitAlphaProcessingSSE41(void); + +WEBP_TSAN_IGNORE_FUNCTION void WebPInitAlphaProcessingSSE41(void) { + WebPExtractAlpha = ExtractAlpha_SSE41; +} + +#else // !WEBP_USE_SSE41 + +WEBP_DSP_INIT_STUB(WebPInitAlphaProcessingSSE41) + +#endif // WEBP_USE_SSE41 diff --git a/vendor/github.com/sizeofint/webpanimation/dsp_common_sse2.h b/vendor/github.com/sizeofint/webpanimation/dsp_common_sse2.h new file mode 100644 index 00000000..e9f1ebff --- /dev/null +++ b/vendor/github.com/sizeofint/webpanimation/dsp_common_sse2.h @@ -0,0 +1,194 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the COPYING file in the root of the source +// tree. An additional intellectual property rights grant can be found +// in the file PATENTS. All contributing project authors may +// be found in the AUTHORS file in the root of the source tree. +// ----------------------------------------------------------------------------- +// +// SSE2 code common to several files. +// +// Author: Vincent Rabaud (vrabaud@google.com) + +#ifndef WEBP_DSP_COMMON_SSE2_H_ +#define WEBP_DSP_COMMON_SSE2_H_ + +#ifdef __cplusplus +extern "C" { +#endif + +#if defined(WEBP_USE_SSE2) + +#include + +//------------------------------------------------------------------------------ +// Quite useful macro for debugging. Left here for convenience. + +#if 0 +#include +static WEBP_INLINE void PrintReg(const __m128i r, const char* const name, + int size) { + int n; + union { + __m128i r; + uint8_t i8[16]; + uint16_t i16[8]; + uint32_t i32[4]; + uint64_t i64[2]; + } tmp; + tmp.r = r; + fprintf(stderr, "%s\t: ", name); + if (size == 8) { + for (n = 0; n < 16; ++n) fprintf(stderr, "%.2x ", tmp.i8[n]); + } else if (size == 16) { + for (n = 0; n < 8; ++n) fprintf(stderr, "%.4x ", tmp.i16[n]); + } else if (size == 32) { + for (n = 0; n < 4; ++n) fprintf(stderr, "%.8x ", tmp.i32[n]); + } else { + for (n = 0; n < 2; ++n) fprintf(stderr, "%.16lx ", tmp.i64[n]); + } + fprintf(stderr, "\n"); +} +#endif + +//------------------------------------------------------------------------------ +// Math functions. + +// Return the sum of all the 8b in the register. +static WEBP_INLINE int VP8HorizontalAdd8b(const __m128i* const a) { + const __m128i zero = _mm_setzero_si128(); + const __m128i sad8x2 = _mm_sad_epu8(*a, zero); + // sum the two sads: sad8x2[0:1] + sad8x2[8:9] + const __m128i sum = _mm_add_epi32(sad8x2, _mm_shuffle_epi32(sad8x2, 2)); + return _mm_cvtsi128_si32(sum); +} + +// Transpose two 4x4 16b matrices horizontally stored in registers. +static WEBP_INLINE void VP8Transpose_2_4x4_16b( + const __m128i* const in0, const __m128i* const in1, + const __m128i* const in2, const __m128i* const in3, __m128i* const out0, + __m128i* const out1, __m128i* const out2, __m128i* const out3) { + // Transpose the two 4x4. + // a00 a01 a02 a03 b00 b01 b02 b03 + // a10 a11 a12 a13 b10 b11 b12 b13 + // a20 a21 a22 a23 b20 b21 b22 b23 + // a30 a31 a32 a33 b30 b31 b32 b33 + const __m128i transpose0_0 = _mm_unpacklo_epi16(*in0, *in1); + const __m128i transpose0_1 = _mm_unpacklo_epi16(*in2, *in3); + const __m128i transpose0_2 = _mm_unpackhi_epi16(*in0, *in1); + const __m128i transpose0_3 = _mm_unpackhi_epi16(*in2, *in3); + // a00 a10 a01 a11 a02 a12 a03 a13 + // a20 a30 a21 a31 a22 a32 a23 a33 + // b00 b10 b01 b11 b02 b12 b03 b13 + // b20 b30 b21 b31 b22 b32 b23 b33 + const __m128i transpose1_0 = _mm_unpacklo_epi32(transpose0_0, transpose0_1); + const __m128i transpose1_1 = _mm_unpacklo_epi32(transpose0_2, transpose0_3); + const __m128i transpose1_2 = _mm_unpackhi_epi32(transpose0_0, transpose0_1); + const __m128i transpose1_3 = _mm_unpackhi_epi32(transpose0_2, transpose0_3); + // a00 a10 a20 a30 a01 a11 a21 a31 + // b00 b10 b20 b30 b01 b11 b21 b31 + // a02 a12 a22 a32 a03 a13 a23 a33 + // b02 b12 a22 b32 b03 b13 b23 b33 + *out0 = _mm_unpacklo_epi64(transpose1_0, transpose1_1); + *out1 = _mm_unpackhi_epi64(transpose1_0, transpose1_1); + *out2 = _mm_unpacklo_epi64(transpose1_2, transpose1_3); + *out3 = _mm_unpackhi_epi64(transpose1_2, transpose1_3); + // a00 a10 a20 a30 b00 b10 b20 b30 + // a01 a11 a21 a31 b01 b11 b21 b31 + // a02 a12 a22 a32 b02 b12 b22 b32 + // a03 a13 a23 a33 b03 b13 b23 b33 +} + +//------------------------------------------------------------------------------ +// Channel mixing. + +// Function used several times in VP8PlanarTo24b. +// It samples the in buffer as follows: one every two unsigned char is stored +// at the beginning of the buffer, while the other half is stored at the end. +#define VP8PlanarTo24bHelper(IN, OUT) \ + do { \ + const __m128i v_mask = _mm_set1_epi16(0x00ff); \ + /* Take one every two upper 8b values.*/ \ + (OUT##0) = _mm_packus_epi16(_mm_and_si128((IN##0), v_mask), \ + _mm_and_si128((IN##1), v_mask)); \ + (OUT##1) = _mm_packus_epi16(_mm_and_si128((IN##2), v_mask), \ + _mm_and_si128((IN##3), v_mask)); \ + (OUT##2) = _mm_packus_epi16(_mm_and_si128((IN##4), v_mask), \ + _mm_and_si128((IN##5), v_mask)); \ + /* Take one every two lower 8b values.*/ \ + (OUT##3) = _mm_packus_epi16(_mm_srli_epi16((IN##0), 8), \ + _mm_srli_epi16((IN##1), 8)); \ + (OUT##4) = _mm_packus_epi16(_mm_srli_epi16((IN##2), 8), \ + _mm_srli_epi16((IN##3), 8)); \ + (OUT##5) = _mm_packus_epi16(_mm_srli_epi16((IN##4), 8), \ + _mm_srli_epi16((IN##5), 8)); \ + } while (0) + +// Pack the planar buffers +// rrrr... rrrr... gggg... gggg... bbbb... bbbb.... +// triplet by triplet in the output buffer rgb as rgbrgbrgbrgb ... +static WEBP_INLINE void VP8PlanarTo24b_SSE2( + __m128i* const in0, __m128i* const in1, __m128i* const in2, + __m128i* const in3, __m128i* const in4, __m128i* const in5) { + // The input is 6 registers of sixteen 8b but for the sake of explanation, + // let's take 6 registers of four 8b values. + // To pack, we will keep taking one every two 8b integer and move it + // around as follows: + // Input: + // r0r1r2r3 | r4r5r6r7 | g0g1g2g3 | g4g5g6g7 | b0b1b2b3 | b4b5b6b7 + // Split the 6 registers in two sets of 3 registers: the first set as the even + // 8b bytes, the second the odd ones: + // r0r2r4r6 | g0g2g4g6 | b0b2b4b6 | r1r3r5r7 | g1g3g5g7 | b1b3b5b7 + // Repeat the same permutations twice more: + // r0r4g0g4 | b0b4r1r5 | g1g5b1b5 | r2r6g2g6 | b2b6r3r7 | g3g7b3b7 + // r0g0b0r1 | g1b1r2g2 | b2r3g3b3 | r4g4b4r5 | g5b5r6g6 | b6r7g7b7 + __m128i tmp0, tmp1, tmp2, tmp3, tmp4, tmp5; + VP8PlanarTo24bHelper(*in, tmp); + VP8PlanarTo24bHelper(tmp, *in); + VP8PlanarTo24bHelper(*in, tmp); + // We need to do it two more times than the example as we have sixteen bytes. + { + __m128i out0, out1, out2, out3, out4, out5; + VP8PlanarTo24bHelper(tmp, out); + VP8PlanarTo24bHelper(out, *in); + } +} + +#undef VP8PlanarTo24bHelper + +// Convert four packed four-channel buffers like argbargbargbargb... into the +// split channels aaaaa ... rrrr ... gggg .... bbbbb ...... +static WEBP_INLINE void VP8L32bToPlanar_SSE2(__m128i* const in0, + __m128i* const in1, + __m128i* const in2, + __m128i* const in3) { + // Column-wise transpose. + const __m128i A0 = _mm_unpacklo_epi8(*in0, *in1); + const __m128i A1 = _mm_unpackhi_epi8(*in0, *in1); + const __m128i A2 = _mm_unpacklo_epi8(*in2, *in3); + const __m128i A3 = _mm_unpackhi_epi8(*in2, *in3); + const __m128i B0 = _mm_unpacklo_epi8(A0, A1); + const __m128i B1 = _mm_unpackhi_epi8(A0, A1); + const __m128i B2 = _mm_unpacklo_epi8(A2, A3); + const __m128i B3 = _mm_unpackhi_epi8(A2, A3); + // C0 = g7 g6 ... g1 g0 | b7 b6 ... b1 b0 + // C1 = a7 a6 ... a1 a0 | r7 r6 ... r1 r0 + const __m128i C0 = _mm_unpacklo_epi8(B0, B1); + const __m128i C1 = _mm_unpackhi_epi8(B0, B1); + const __m128i C2 = _mm_unpacklo_epi8(B2, B3); + const __m128i C3 = _mm_unpackhi_epi8(B2, B3); + // Gather the channels. + *in0 = _mm_unpackhi_epi64(C1, C3); + *in1 = _mm_unpacklo_epi64(C1, C3); + *in2 = _mm_unpackhi_epi64(C0, C2); + *in3 = _mm_unpacklo_epi64(C0, C2); +} + +#endif // WEBP_USE_SSE2 + +#ifdef __cplusplus +} // extern "C" +#endif + +#endif // WEBP_DSP_COMMON_SSE2_H_ diff --git a/vendor/github.com/sizeofint/webpanimation/dsp_common_sse41.h b/vendor/github.com/sizeofint/webpanimation/dsp_common_sse41.h new file mode 100644 index 00000000..2f173c02 --- /dev/null +++ b/vendor/github.com/sizeofint/webpanimation/dsp_common_sse41.h @@ -0,0 +1,132 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the COPYING file in the root of the source +// tree. An additional intellectual property rights grant can be found +// in the file PATENTS. All contributing project authors may +// be found in the AUTHORS file in the root of the source tree. +// ----------------------------------------------------------------------------- +// +// SSE4 code common to several files. +// +// Author: Vincent Rabaud (vrabaud@google.com) + +#ifndef WEBP_DSP_COMMON_SSE41_H_ +#define WEBP_DSP_COMMON_SSE41_H_ + +#ifdef __cplusplus +extern "C" { +#endif + +#if defined(WEBP_USE_SSE41) +#include + +//------------------------------------------------------------------------------ +// Channel mixing. +// Shuffles the input buffer as A0 0 0 A1 0 0 A2 ... +#define WEBP_SSE41_SHUFF(OUT, IN0, IN1) \ + OUT##0 = _mm_shuffle_epi8(*IN0, shuff0); \ + OUT##1 = _mm_shuffle_epi8(*IN0, shuff1); \ + OUT##2 = _mm_shuffle_epi8(*IN0, shuff2); \ + OUT##3 = _mm_shuffle_epi8(*IN1, shuff0); \ + OUT##4 = _mm_shuffle_epi8(*IN1, shuff1); \ + OUT##5 = _mm_shuffle_epi8(*IN1, shuff2); + +// Pack the planar buffers +// rrrr... rrrr... gggg... gggg... bbbb... bbbb.... +// triplet by triplet in the output buffer rgb as rgbrgbrgbrgb ... +static WEBP_INLINE void VP8PlanarTo24b_SSE41( + __m128i* const in0, __m128i* const in1, __m128i* const in2, + __m128i* const in3, __m128i* const in4, __m128i* const in5) { + __m128i R0, R1, R2, R3, R4, R5; + __m128i G0, G1, G2, G3, G4, G5; + __m128i B0, B1, B2, B3, B4, B5; + + // Process R. + { + const __m128i shuff0 = _mm_set_epi8( + 5, -1, -1, 4, -1, -1, 3, -1, -1, 2, -1, -1, 1, -1, -1, 0); + const __m128i shuff1 = _mm_set_epi8( + -1, 10, -1, -1, 9, -1, -1, 8, -1, -1, 7, -1, -1, 6, -1, -1); + const __m128i shuff2 = _mm_set_epi8( + -1, -1, 15, -1, -1, 14, -1, -1, 13, -1, -1, 12, -1, -1, 11, -1); + WEBP_SSE41_SHUFF(R, in0, in1) + } + + // Process G. + { + // Same as before, just shifted to the left by one and including the right + // padding. + const __m128i shuff0 = _mm_set_epi8( + -1, -1, 4, -1, -1, 3, -1, -1, 2, -1, -1, 1, -1, -1, 0, -1); + const __m128i shuff1 = _mm_set_epi8( + 10, -1, -1, 9, -1, -1, 8, -1, -1, 7, -1, -1, 6, -1, -1, 5); + const __m128i shuff2 = _mm_set_epi8( + -1, 15, -1, -1, 14, -1, -1, 13, -1, -1, 12, -1, -1, 11, -1, -1); + WEBP_SSE41_SHUFF(G, in2, in3) + } + + // Process B. + { + const __m128i shuff0 = _mm_set_epi8( + -1, 4, -1, -1, 3, -1, -1, 2, -1, -1, 1, -1, -1, 0, -1, -1); + const __m128i shuff1 = _mm_set_epi8( + -1, -1, 9, -1, -1, 8, -1, -1, 7, -1, -1, 6, -1, -1, 5, -1); + const __m128i shuff2 = _mm_set_epi8( + 15, -1, -1, 14, -1, -1, 13, -1, -1, 12, -1, -1, 11, -1, -1, 10); + WEBP_SSE41_SHUFF(B, in4, in5) + } + + // OR the different channels. + { + const __m128i RG0 = _mm_or_si128(R0, G0); + const __m128i RG1 = _mm_or_si128(R1, G1); + const __m128i RG2 = _mm_or_si128(R2, G2); + const __m128i RG3 = _mm_or_si128(R3, G3); + const __m128i RG4 = _mm_or_si128(R4, G4); + const __m128i RG5 = _mm_or_si128(R5, G5); + *in0 = _mm_or_si128(RG0, B0); + *in1 = _mm_or_si128(RG1, B1); + *in2 = _mm_or_si128(RG2, B2); + *in3 = _mm_or_si128(RG3, B3); + *in4 = _mm_or_si128(RG4, B4); + *in5 = _mm_or_si128(RG5, B5); + } +} + +#undef WEBP_SSE41_SHUFF + +// Convert four packed four-channel buffers like argbargbargbargb... into the +// split channels aaaaa ... rrrr ... gggg .... bbbbb ...... +static WEBP_INLINE void VP8L32bToPlanar_SSE41(__m128i* const in0, + __m128i* const in1, + __m128i* const in2, + __m128i* const in3) { + // aaaarrrrggggbbbb + const __m128i shuff0 = + _mm_set_epi8(15, 11, 7, 3, 14, 10, 6, 2, 13, 9, 5, 1, 12, 8, 4, 0); + const __m128i A0 = _mm_shuffle_epi8(*in0, shuff0); + const __m128i A1 = _mm_shuffle_epi8(*in1, shuff0); + const __m128i A2 = _mm_shuffle_epi8(*in2, shuff0); + const __m128i A3 = _mm_shuffle_epi8(*in3, shuff0); + // A0A1R0R1 + // G0G1B0B1 + // A2A3R2R3 + // G0G1B0B1 + const __m128i B0 = _mm_unpacklo_epi32(A0, A1); + const __m128i B1 = _mm_unpackhi_epi32(A0, A1); + const __m128i B2 = _mm_unpacklo_epi32(A2, A3); + const __m128i B3 = _mm_unpackhi_epi32(A2, A3); + *in3 = _mm_unpacklo_epi64(B0, B2); + *in2 = _mm_unpackhi_epi64(B0, B2); + *in1 = _mm_unpacklo_epi64(B1, B3); + *in0 = _mm_unpackhi_epi64(B1, B3); +} + +#endif // WEBP_USE_SSE41 + +#ifdef __cplusplus +} // extern "C" +#endif + +#endif // WEBP_DSP_COMMON_SSE41_H_ diff --git a/vendor/github.com/sizeofint/webpanimation/dsp_cost.c b/vendor/github.com/sizeofint/webpanimation/dsp_cost.c new file mode 100644 index 00000000..403e6211 --- /dev/null +++ b/vendor/github.com/sizeofint/webpanimation/dsp_cost.c @@ -0,0 +1,411 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the COPYING file in the root of the source +// tree. An additional intellectual property rights grant can be found +// in the file PATENTS. All contributing project authors may +// be found in the AUTHORS file in the root of the source tree. +// ----------------------------------------------------------------------------- +// +// Author: Skal (pascal.massimino@gmail.com) + +#include "dsp_dsp.h" +#include "enc_cost_enc.h" + +//------------------------------------------------------------------------------ +// Boolean-cost cost table + +const uint16_t VP8EntropyCost[256] = { + 1792, 1792, 1792, 1536, 1536, 1408, 1366, 1280, 1280, 1216, + 1178, 1152, 1110, 1076, 1061, 1024, 1024, 992, 968, 951, + 939, 911, 896, 878, 871, 854, 838, 820, 811, 794, + 786, 768, 768, 752, 740, 732, 720, 709, 704, 690, + 683, 672, 666, 655, 647, 640, 631, 622, 615, 607, + 598, 592, 586, 576, 572, 564, 559, 555, 547, 541, + 534, 528, 522, 512, 512, 504, 500, 494, 488, 483, + 477, 473, 467, 461, 458, 452, 448, 443, 438, 434, + 427, 424, 419, 415, 410, 406, 403, 399, 394, 390, + 384, 384, 377, 374, 370, 366, 362, 359, 355, 351, + 347, 342, 342, 336, 333, 330, 326, 323, 320, 316, + 312, 308, 305, 302, 299, 296, 293, 288, 287, 283, + 280, 277, 274, 272, 268, 266, 262, 256, 256, 256, + 251, 248, 245, 242, 240, 237, 234, 232, 228, 226, + 223, 221, 218, 216, 214, 211, 208, 205, 203, 201, + 198, 196, 192, 191, 188, 187, 183, 181, 179, 176, + 175, 171, 171, 168, 165, 163, 160, 159, 156, 154, + 152, 150, 148, 146, 144, 142, 139, 138, 135, 133, + 131, 128, 128, 125, 123, 121, 119, 117, 115, 113, + 111, 110, 107, 105, 103, 102, 100, 98, 96, 94, + 92, 91, 89, 86, 86, 83, 82, 80, 77, 76, + 74, 73, 71, 69, 67, 66, 64, 63, 61, 59, + 57, 55, 54, 52, 51, 49, 47, 46, 44, 43, + 41, 40, 38, 36, 35, 33, 32, 30, 29, 27, + 25, 24, 22, 21, 19, 18, 16, 15, 13, 12, + 10, 9, 7, 6, 4, 3 +}; + +//------------------------------------------------------------------------------ +// Level cost tables + +// fixed costs for coding levels, deduce from the coding tree. +// This is only the part that doesn't depend on the probability state. +const uint16_t VP8LevelFixedCosts[MAX_LEVEL + 1] = { + 0, 256, 256, 256, 256, 432, 618, 630, + 731, 640, 640, 828, 901, 948, 1021, 1101, + 1174, 1221, 1294, 1042, 1085, 1115, 1158, 1202, + 1245, 1275, 1318, 1337, 1380, 1410, 1453, 1497, + 1540, 1570, 1613, 1280, 1295, 1317, 1332, 1358, + 1373, 1395, 1410, 1454, 1469, 1491, 1506, 1532, + 1547, 1569, 1584, 1601, 1616, 1638, 1653, 1679, + 1694, 1716, 1731, 1775, 1790, 1812, 1827, 1853, + 1868, 1890, 1905, 1727, 1733, 1742, 1748, 1759, + 1765, 1774, 1780, 1800, 1806, 1815, 1821, 1832, + 1838, 1847, 1853, 1878, 1884, 1893, 1899, 1910, + 1916, 1925, 1931, 1951, 1957, 1966, 1972, 1983, + 1989, 1998, 2004, 2027, 2033, 2042, 2048, 2059, + 2065, 2074, 2080, 2100, 2106, 2115, 2121, 2132, + 2138, 2147, 2153, 2178, 2184, 2193, 2199, 2210, + 2216, 2225, 2231, 2251, 2257, 2266, 2272, 2283, + 2289, 2298, 2304, 2168, 2174, 2183, 2189, 2200, + 2206, 2215, 2221, 2241, 2247, 2256, 2262, 2273, + 2279, 2288, 2294, 2319, 2325, 2334, 2340, 2351, + 2357, 2366, 2372, 2392, 2398, 2407, 2413, 2424, + 2430, 2439, 2445, 2468, 2474, 2483, 2489, 2500, + 2506, 2515, 2521, 2541, 2547, 2556, 2562, 2573, + 2579, 2588, 2594, 2619, 2625, 2634, 2640, 2651, + 2657, 2666, 2672, 2692, 2698, 2707, 2713, 2724, + 2730, 2739, 2745, 2540, 2546, 2555, 2561, 2572, + 2578, 2587, 2593, 2613, 2619, 2628, 2634, 2645, + 2651, 2660, 2666, 2691, 2697, 2706, 2712, 2723, + 2729, 2738, 2744, 2764, 2770, 2779, 2785, 2796, + 2802, 2811, 2817, 2840, 2846, 2855, 2861, 2872, + 2878, 2887, 2893, 2913, 2919, 2928, 2934, 2945, + 2951, 2960, 2966, 2991, 2997, 3006, 3012, 3023, + 3029, 3038, 3044, 3064, 3070, 3079, 3085, 3096, + 3102, 3111, 3117, 2981, 2987, 2996, 3002, 3013, + 3019, 3028, 3034, 3054, 3060, 3069, 3075, 3086, + 3092, 3101, 3107, 3132, 3138, 3147, 3153, 3164, + 3170, 3179, 3185, 3205, 3211, 3220, 3226, 3237, + 3243, 3252, 3258, 3281, 3287, 3296, 3302, 3313, + 3319, 3328, 3334, 3354, 3360, 3369, 3375, 3386, + 3392, 3401, 3407, 3432, 3438, 3447, 3453, 3464, + 3470, 3479, 3485, 3505, 3511, 3520, 3526, 3537, + 3543, 3552, 3558, 2816, 2822, 2831, 2837, 2848, + 2854, 2863, 2869, 2889, 2895, 2904, 2910, 2921, + 2927, 2936, 2942, 2967, 2973, 2982, 2988, 2999, + 3005, 3014, 3020, 3040, 3046, 3055, 3061, 3072, + 3078, 3087, 3093, 3116, 3122, 3131, 3137, 3148, + 3154, 3163, 3169, 3189, 3195, 3204, 3210, 3221, + 3227, 3236, 3242, 3267, 3273, 3282, 3288, 3299, + 3305, 3314, 3320, 3340, 3346, 3355, 3361, 3372, + 3378, 3387, 3393, 3257, 3263, 3272, 3278, 3289, + 3295, 3304, 3310, 3330, 3336, 3345, 3351, 3362, + 3368, 3377, 3383, 3408, 3414, 3423, 3429, 3440, + 3446, 3455, 3461, 3481, 3487, 3496, 3502, 3513, + 3519, 3528, 3534, 3557, 3563, 3572, 3578, 3589, + 3595, 3604, 3610, 3630, 3636, 3645, 3651, 3662, + 3668, 3677, 3683, 3708, 3714, 3723, 3729, 3740, + 3746, 3755, 3761, 3781, 3787, 3796, 3802, 3813, + 3819, 3828, 3834, 3629, 3635, 3644, 3650, 3661, + 3667, 3676, 3682, 3702, 3708, 3717, 3723, 3734, + 3740, 3749, 3755, 3780, 3786, 3795, 3801, 3812, + 3818, 3827, 3833, 3853, 3859, 3868, 3874, 3885, + 3891, 3900, 3906, 3929, 3935, 3944, 3950, 3961, + 3967, 3976, 3982, 4002, 4008, 4017, 4023, 4034, + 4040, 4049, 4055, 4080, 4086, 4095, 4101, 4112, + 4118, 4127, 4133, 4153, 4159, 4168, 4174, 4185, + 4191, 4200, 4206, 4070, 4076, 4085, 4091, 4102, + 4108, 4117, 4123, 4143, 4149, 4158, 4164, 4175, + 4181, 4190, 4196, 4221, 4227, 4236, 4242, 4253, + 4259, 4268, 4274, 4294, 4300, 4309, 4315, 4326, + 4332, 4341, 4347, 4370, 4376, 4385, 4391, 4402, + 4408, 4417, 4423, 4443, 4449, 4458, 4464, 4475, + 4481, 4490, 4496, 4521, 4527, 4536, 4542, 4553, + 4559, 4568, 4574, 4594, 4600, 4609, 4615, 4626, + 4632, 4641, 4647, 3515, 3521, 3530, 3536, 3547, + 3553, 3562, 3568, 3588, 3594, 3603, 3609, 3620, + 3626, 3635, 3641, 3666, 3672, 3681, 3687, 3698, + 3704, 3713, 3719, 3739, 3745, 3754, 3760, 3771, + 3777, 3786, 3792, 3815, 3821, 3830, 3836, 3847, + 3853, 3862, 3868, 3888, 3894, 3903, 3909, 3920, + 3926, 3935, 3941, 3966, 3972, 3981, 3987, 3998, + 4004, 4013, 4019, 4039, 4045, 4054, 4060, 4071, + 4077, 4086, 4092, 3956, 3962, 3971, 3977, 3988, + 3994, 4003, 4009, 4029, 4035, 4044, 4050, 4061, + 4067, 4076, 4082, 4107, 4113, 4122, 4128, 4139, + 4145, 4154, 4160, 4180, 4186, 4195, 4201, 4212, + 4218, 4227, 4233, 4256, 4262, 4271, 4277, 4288, + 4294, 4303, 4309, 4329, 4335, 4344, 4350, 4361, + 4367, 4376, 4382, 4407, 4413, 4422, 4428, 4439, + 4445, 4454, 4460, 4480, 4486, 4495, 4501, 4512, + 4518, 4527, 4533, 4328, 4334, 4343, 4349, 4360, + 4366, 4375, 4381, 4401, 4407, 4416, 4422, 4433, + 4439, 4448, 4454, 4479, 4485, 4494, 4500, 4511, + 4517, 4526, 4532, 4552, 4558, 4567, 4573, 4584, + 4590, 4599, 4605, 4628, 4634, 4643, 4649, 4660, + 4666, 4675, 4681, 4701, 4707, 4716, 4722, 4733, + 4739, 4748, 4754, 4779, 4785, 4794, 4800, 4811, + 4817, 4826, 4832, 4852, 4858, 4867, 4873, 4884, + 4890, 4899, 4905, 4769, 4775, 4784, 4790, 4801, + 4807, 4816, 4822, 4842, 4848, 4857, 4863, 4874, + 4880, 4889, 4895, 4920, 4926, 4935, 4941, 4952, + 4958, 4967, 4973, 4993, 4999, 5008, 5014, 5025, + 5031, 5040, 5046, 5069, 5075, 5084, 5090, 5101, + 5107, 5116, 5122, 5142, 5148, 5157, 5163, 5174, + 5180, 5189, 5195, 5220, 5226, 5235, 5241, 5252, + 5258, 5267, 5273, 5293, 5299, 5308, 5314, 5325, + 5331, 5340, 5346, 4604, 4610, 4619, 4625, 4636, + 4642, 4651, 4657, 4677, 4683, 4692, 4698, 4709, + 4715, 4724, 4730, 4755, 4761, 4770, 4776, 4787, + 4793, 4802, 4808, 4828, 4834, 4843, 4849, 4860, + 4866, 4875, 4881, 4904, 4910, 4919, 4925, 4936, + 4942, 4951, 4957, 4977, 4983, 4992, 4998, 5009, + 5015, 5024, 5030, 5055, 5061, 5070, 5076, 5087, + 5093, 5102, 5108, 5128, 5134, 5143, 5149, 5160, + 5166, 5175, 5181, 5045, 5051, 5060, 5066, 5077, + 5083, 5092, 5098, 5118, 5124, 5133, 5139, 5150, + 5156, 5165, 5171, 5196, 5202, 5211, 5217, 5228, + 5234, 5243, 5249, 5269, 5275, 5284, 5290, 5301, + 5307, 5316, 5322, 5345, 5351, 5360, 5366, 5377, + 5383, 5392, 5398, 5418, 5424, 5433, 5439, 5450, + 5456, 5465, 5471, 5496, 5502, 5511, 5517, 5528, + 5534, 5543, 5549, 5569, 5575, 5584, 5590, 5601, + 5607, 5616, 5622, 5417, 5423, 5432, 5438, 5449, + 5455, 5464, 5470, 5490, 5496, 5505, 5511, 5522, + 5528, 5537, 5543, 5568, 5574, 5583, 5589, 5600, + 5606, 5615, 5621, 5641, 5647, 5656, 5662, 5673, + 5679, 5688, 5694, 5717, 5723, 5732, 5738, 5749, + 5755, 5764, 5770, 5790, 5796, 5805, 5811, 5822, + 5828, 5837, 5843, 5868, 5874, 5883, 5889, 5900, + 5906, 5915, 5921, 5941, 5947, 5956, 5962, 5973, + 5979, 5988, 5994, 5858, 5864, 5873, 5879, 5890, + 5896, 5905, 5911, 5931, 5937, 5946, 5952, 5963, + 5969, 5978, 5984, 6009, 6015, 6024, 6030, 6041, + 6047, 6056, 6062, 6082, 6088, 6097, 6103, 6114, + 6120, 6129, 6135, 6158, 6164, 6173, 6179, 6190, + 6196, 6205, 6211, 6231, 6237, 6246, 6252, 6263, + 6269, 6278, 6284, 6309, 6315, 6324, 6330, 6341, + 6347, 6356, 6362, 6382, 6388, 6397, 6403, 6414, + 6420, 6429, 6435, 3515, 3521, 3530, 3536, 3547, + 3553, 3562, 3568, 3588, 3594, 3603, 3609, 3620, + 3626, 3635, 3641, 3666, 3672, 3681, 3687, 3698, + 3704, 3713, 3719, 3739, 3745, 3754, 3760, 3771, + 3777, 3786, 3792, 3815, 3821, 3830, 3836, 3847, + 3853, 3862, 3868, 3888, 3894, 3903, 3909, 3920, + 3926, 3935, 3941, 3966, 3972, 3981, 3987, 3998, + 4004, 4013, 4019, 4039, 4045, 4054, 4060, 4071, + 4077, 4086, 4092, 3956, 3962, 3971, 3977, 3988, + 3994, 4003, 4009, 4029, 4035, 4044, 4050, 4061, + 4067, 4076, 4082, 4107, 4113, 4122, 4128, 4139, + 4145, 4154, 4160, 4180, 4186, 4195, 4201, 4212, + 4218, 4227, 4233, 4256, 4262, 4271, 4277, 4288, + 4294, 4303, 4309, 4329, 4335, 4344, 4350, 4361, + 4367, 4376, 4382, 4407, 4413, 4422, 4428, 4439, + 4445, 4454, 4460, 4480, 4486, 4495, 4501, 4512, + 4518, 4527, 4533, 4328, 4334, 4343, 4349, 4360, + 4366, 4375, 4381, 4401, 4407, 4416, 4422, 4433, + 4439, 4448, 4454, 4479, 4485, 4494, 4500, 4511, + 4517, 4526, 4532, 4552, 4558, 4567, 4573, 4584, + 4590, 4599, 4605, 4628, 4634, 4643, 4649, 4660, + 4666, 4675, 4681, 4701, 4707, 4716, 4722, 4733, + 4739, 4748, 4754, 4779, 4785, 4794, 4800, 4811, + 4817, 4826, 4832, 4852, 4858, 4867, 4873, 4884, + 4890, 4899, 4905, 4769, 4775, 4784, 4790, 4801, + 4807, 4816, 4822, 4842, 4848, 4857, 4863, 4874, + 4880, 4889, 4895, 4920, 4926, 4935, 4941, 4952, + 4958, 4967, 4973, 4993, 4999, 5008, 5014, 5025, + 5031, 5040, 5046, 5069, 5075, 5084, 5090, 5101, + 5107, 5116, 5122, 5142, 5148, 5157, 5163, 5174, + 5180, 5189, 5195, 5220, 5226, 5235, 5241, 5252, + 5258, 5267, 5273, 5293, 5299, 5308, 5314, 5325, + 5331, 5340, 5346, 4604, 4610, 4619, 4625, 4636, + 4642, 4651, 4657, 4677, 4683, 4692, 4698, 4709, + 4715, 4724, 4730, 4755, 4761, 4770, 4776, 4787, + 4793, 4802, 4808, 4828, 4834, 4843, 4849, 4860, + 4866, 4875, 4881, 4904, 4910, 4919, 4925, 4936, + 4942, 4951, 4957, 4977, 4983, 4992, 4998, 5009, + 5015, 5024, 5030, 5055, 5061, 5070, 5076, 5087, + 5093, 5102, 5108, 5128, 5134, 5143, 5149, 5160, + 5166, 5175, 5181, 5045, 5051, 5060, 5066, 5077, + 5083, 5092, 5098, 5118, 5124, 5133, 5139, 5150, + 5156, 5165, 5171, 5196, 5202, 5211, 5217, 5228, + 5234, 5243, 5249, 5269, 5275, 5284, 5290, 5301, + 5307, 5316, 5322, 5345, 5351, 5360, 5366, 5377, + 5383, 5392, 5398, 5418, 5424, 5433, 5439, 5450, + 5456, 5465, 5471, 5496, 5502, 5511, 5517, 5528, + 5534, 5543, 5549, 5569, 5575, 5584, 5590, 5601, + 5607, 5616, 5622, 5417, 5423, 5432, 5438, 5449, + 5455, 5464, 5470, 5490, 5496, 5505, 5511, 5522, + 5528, 5537, 5543, 5568, 5574, 5583, 5589, 5600, + 5606, 5615, 5621, 5641, 5647, 5656, 5662, 5673, + 5679, 5688, 5694, 5717, 5723, 5732, 5738, 5749, + 5755, 5764, 5770, 5790, 5796, 5805, 5811, 5822, + 5828, 5837, 5843, 5868, 5874, 5883, 5889, 5900, + 5906, 5915, 5921, 5941, 5947, 5956, 5962, 5973, + 5979, 5988, 5994, 5858, 5864, 5873, 5879, 5890, + 5896, 5905, 5911, 5931, 5937, 5946, 5952, 5963, + 5969, 5978, 5984, 6009, 6015, 6024, 6030, 6041, + 6047, 6056, 6062, 6082, 6088, 6097, 6103, 6114, + 6120, 6129, 6135, 6158, 6164, 6173, 6179, 6190, + 6196, 6205, 6211, 6231, 6237, 6246, 6252, 6263, + 6269, 6278, 6284, 6309, 6315, 6324, 6330, 6341, + 6347, 6356, 6362, 6382, 6388, 6397, 6403, 6414, + 6420, 6429, 6435, 5303, 5309, 5318, 5324, 5335, + 5341, 5350, 5356, 5376, 5382, 5391, 5397, 5408, + 5414, 5423, 5429, 5454, 5460, 5469, 5475, 5486, + 5492, 5501, 5507, 5527, 5533, 5542, 5548, 5559, + 5565, 5574, 5580, 5603, 5609, 5618, 5624, 5635, + 5641, 5650, 5656, 5676, 5682, 5691, 5697, 5708, + 5714, 5723, 5729, 5754, 5760, 5769, 5775, 5786, + 5792, 5801, 5807, 5827, 5833, 5842, 5848, 5859, + 5865, 5874, 5880, 5744, 5750, 5759, 5765, 5776, + 5782, 5791, 5797, 5817, 5823, 5832, 5838, 5849, + 5855, 5864, 5870, 5895, 5901, 5910, 5916, 5927, + 5933, 5942, 5948, 5968, 5974, 5983, 5989, 6000, + 6006, 6015, 6021, 6044, 6050, 6059, 6065, 6076, + 6082, 6091, 6097, 6117, 6123, 6132, 6138, 6149, + 6155, 6164, 6170, 6195, 6201, 6210, 6216, 6227, + 6233, 6242, 6248, 6268, 6274, 6283, 6289, 6300, + 6306, 6315, 6321, 6116, 6122, 6131, 6137, 6148, + 6154, 6163, 6169, 6189, 6195, 6204, 6210, 6221, + 6227, 6236, 6242, 6267, 6273, 6282, 6288, 6299, + 6305, 6314, 6320, 6340, 6346, 6355, 6361, 6372, + 6378, 6387, 6393, 6416, 6422, 6431, 6437, 6448, + 6454, 6463, 6469, 6489, 6495, 6504, 6510, 6521, + 6527, 6536, 6542, 6567, 6573, 6582, 6588, 6599, + 6605, 6614, 6620, 6640, 6646, 6655, 6661, 6672, + 6678, 6687, 6693, 6557, 6563, 6572, 6578, 6589, + 6595, 6604, 6610, 6630, 6636, 6645, 6651, 6662, + 6668, 6677, 6683, 6708, 6714, 6723, 6729, 6740, + 6746, 6755, 6761, 6781, 6787, 6796, 6802, 6813, + 6819, 6828, 6834, 6857, 6863, 6872, 6878, 6889, + 6895, 6904, 6910, 6930, 6936, 6945, 6951, 6962, + 6968, 6977, 6983, 7008, 7014, 7023, 7029, 7040, + 7046, 7055, 7061, 7081, 7087, 7096, 7102, 7113, + 7119, 7128, 7134, 6392, 6398, 6407, 6413, 6424, + 6430, 6439, 6445, 6465, 6471, 6480, 6486, 6497, + 6503, 6512, 6518, 6543, 6549, 6558, 6564, 6575, + 6581, 6590, 6596, 6616, 6622, 6631, 6637, 6648, + 6654, 6663, 6669, 6692, 6698, 6707, 6713, 6724, + 6730, 6739, 6745, 6765, 6771, 6780, 6786, 6797, + 6803, 6812, 6818, 6843, 6849, 6858, 6864, 6875, + 6881, 6890, 6896, 6916, 6922, 6931, 6937, 6948, + 6954, 6963, 6969, 6833, 6839, 6848, 6854, 6865, + 6871, 6880, 6886, 6906, 6912, 6921, 6927, 6938, + 6944, 6953, 6959, 6984, 6990, 6999, 7005, 7016, + 7022, 7031, 7037, 7057, 7063, 7072, 7078, 7089, + 7095, 7104, 7110, 7133, 7139, 7148, 7154, 7165, + 7171, 7180, 7186, 7206, 7212, 7221, 7227, 7238, + 7244, 7253, 7259, 7284, 7290, 7299, 7305, 7316, + 7322, 7331, 7337, 7357, 7363, 7372, 7378, 7389, + 7395, 7404, 7410, 7205, 7211, 7220, 7226, 7237, + 7243, 7252, 7258, 7278, 7284, 7293, 7299, 7310, + 7316, 7325, 7331, 7356, 7362, 7371, 7377, 7388, + 7394, 7403, 7409, 7429, 7435, 7444, 7450, 7461, + 7467, 7476, 7482, 7505, 7511, 7520, 7526, 7537, + 7543, 7552, 7558, 7578, 7584, 7593, 7599, 7610, + 7616, 7625, 7631, 7656, 7662, 7671, 7677, 7688, + 7694, 7703, 7709, 7729, 7735, 7744, 7750, 7761 +}; + +//------------------------------------------------------------------------------ +// Tables for level coding + +const uint8_t VP8EncBands[16 + 1] = { + 0, 1, 2, 3, 6, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6, 7, + 0 // sentinel +}; + +//------------------------------------------------------------------------------ +// Mode costs + +static int GetResidualCost_C(int ctx0, const VP8Residual* const res) { + int n = res->first; + // should be prob[VP8EncBands[n]], but it's equivalent for n=0 or 1 + const int p0 = res->prob[n][ctx0][0]; + CostArrayPtr const costs = res->costs; + const uint16_t* t = costs[n][ctx0]; + // bit_cost(1, p0) is already incorporated in t[] tables, but only if ctx != 0 + // (as required by the syntax). For ctx0 == 0, we need to add it here or it'll + // be missing during the loop. + int cost = (ctx0 == 0) ? VP8BitCost(1, p0) : 0; + + if (res->last < 0) { + return VP8BitCost(0, p0); + } + for (; n < res->last; ++n) { + const int v = abs(res->coeffs[n]); + const int ctx = (v >= 2) ? 2 : v; + cost += VP8LevelCost(t, v); + t = costs[n + 1][ctx]; + } + // Last coefficient is always non-zero + { + const int v = abs(res->coeffs[n]); + assert(v != 0); + cost += VP8LevelCost(t, v); + if (n < 15) { + const int b = VP8EncBands[n + 1]; + const int ctx = (v == 1) ? 1 : 2; + const int last_p0 = res->prob[b][ctx][0]; + cost += VP8BitCost(0, last_p0); + } + } + return cost; +} + +static void SetResidualCoeffs_C(const int16_t* const coeffs, + VP8Residual* const res) { + int n; + res->last = -1; + assert(res->first == 0 || coeffs[0] == 0); + for (n = 15; n >= 0; --n) { + if (coeffs[n]) { + res->last = n; + break; + } + } + res->coeffs = coeffs; +} + +//------------------------------------------------------------------------------ +// init function + +VP8GetResidualCostFunc VP8GetResidualCost; +VP8SetResidualCoeffsFunc VP8SetResidualCoeffs; + +extern void VP8EncDspCostInitMIPS32(void); +extern void VP8EncDspCostInitMIPSdspR2(void); +extern void VP8EncDspCostInitSSE2(void); +extern void VP8EncDspCostInitNEON(void); + +WEBP_DSP_INIT_FUNC(VP8EncDspCostInit) { + VP8GetResidualCost = GetResidualCost_C; + VP8SetResidualCoeffs = SetResidualCoeffs_C; + + // If defined, use CPUInfo() to overwrite some pointers with faster versions. + if (VP8GetCPUInfo != NULL) { +#if defined(WEBP_USE_MIPS32) + if (VP8GetCPUInfo(kMIPS32)) { + VP8EncDspCostInitMIPS32(); + } +#endif +#if defined(WEBP_USE_MIPS_DSP_R2) + if (VP8GetCPUInfo(kMIPSdspR2)) { + VP8EncDspCostInitMIPSdspR2(); + } +#endif +#if defined(WEBP_USE_SSE2) + if (VP8GetCPUInfo(kSSE2)) { + VP8EncDspCostInitSSE2(); + } +#endif +#if defined(WEBP_USE_NEON) + if (VP8GetCPUInfo(kNEON)) { + VP8EncDspCostInitNEON(); + } +#endif + } +} + +//------------------------------------------------------------------------------ diff --git a/vendor/github.com/sizeofint/webpanimation/dsp_cost_mips32.c b/vendor/github.com/sizeofint/webpanimation/dsp_cost_mips32.c new file mode 100644 index 00000000..9b0b227c --- /dev/null +++ b/vendor/github.com/sizeofint/webpanimation/dsp_cost_mips32.c @@ -0,0 +1,154 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the COPYING file in the root of the source +// tree. An additional intellectual property rights grant can be found +// in the file PATENTS. All contributing project authors may +// be found in the AUTHORS file in the root of the source tree. +// ----------------------------------------------------------------------------- +// +// Author: Djordje Pesut (djordje.pesut@imgtec.com) + +#include "dsp_dsp.h" + +#if defined(WEBP_USE_MIPS32) + +#include "enc_cost_enc.h" + +static int GetResidualCost_MIPS32(int ctx0, const VP8Residual* const res) { + int temp0, temp1; + int v_reg, ctx_reg; + int n = res->first; + // should be prob[VP8EncBands[n]], but it's equivalent for n=0 or 1 + int p0 = res->prob[n][ctx0][0]; + CostArrayPtr const costs = res->costs; + const uint16_t* t = costs[n][ctx0]; + // bit_cost(1, p0) is already incorporated in t[] tables, but only if ctx != 0 + // (as required by the syntax). For ctx0 == 0, we need to add it here or it'll + // be missing during the loop. + int cost = (ctx0 == 0) ? VP8BitCost(1, p0) : 0; + const int16_t* res_coeffs = res->coeffs; + const int res_last = res->last; + const int const_max_level = MAX_VARIABLE_LEVEL; + const int const_2 = 2; + const uint16_t** p_costs = &costs[n][0]; + const size_t inc_p_costs = NUM_CTX * sizeof(*p_costs); + + if (res->last < 0) { + return VP8BitCost(0, p0); + } + + __asm__ volatile ( + ".set push \n\t" + ".set noreorder \n\t" + "subu %[temp1], %[res_last], %[n] \n\t" + "sll %[temp0], %[n], 1 \n\t" + "blez %[temp1], 2f \n\t" + " addu %[res_coeffs], %[res_coeffs], %[temp0] \n\t" + "1: \n\t" + "lh %[v_reg], 0(%[res_coeffs]) \n\t" + "addiu %[n], %[n], 1 \n\t" + "negu %[temp0], %[v_reg] \n\t" + "slti %[temp1], %[v_reg], 0 \n\t" + "movn %[v_reg], %[temp0], %[temp1] \n\t" + "sltiu %[temp0], %[v_reg], 2 \n\t" + "move %[ctx_reg], %[v_reg] \n\t" + "movz %[ctx_reg], %[const_2], %[temp0] \n\t" + "sll %[temp1], %[v_reg], 1 \n\t" + "addu %[temp1], %[temp1], %[VP8LevelFixedCosts] \n\t" + "lhu %[temp1], 0(%[temp1]) \n\t" + "slt %[temp0], %[v_reg], %[const_max_level] \n\t" + "movz %[v_reg], %[const_max_level], %[temp0] \n\t" + "addu %[cost], %[cost], %[temp1] \n\t" + "sll %[v_reg], %[v_reg], 1 \n\t" + "sll %[ctx_reg], %[ctx_reg], 2 \n\t" + "addu %[v_reg], %[v_reg], %[t] \n\t" + "lhu %[temp0], 0(%[v_reg]) \n\t" + "addu %[p_costs], %[p_costs], %[inc_p_costs] \n\t" + "addu %[t], %[p_costs], %[ctx_reg] \n\t" + "addu %[cost], %[cost], %[temp0] \n\t" + "addiu %[res_coeffs], %[res_coeffs], 2 \n\t" + "bne %[n], %[res_last], 1b \n\t" + " lw %[t], 0(%[t]) \n\t" + "2: \n\t" + ".set pop \n\t" + : [cost]"+&r"(cost), [t]"+&r"(t), [n]"+&r"(n), [v_reg]"=&r"(v_reg), + [ctx_reg]"=&r"(ctx_reg), [p_costs]"+&r"(p_costs), [temp0]"=&r"(temp0), + [temp1]"=&r"(temp1), [res_coeffs]"+&r"(res_coeffs) + : [const_2]"r"(const_2), [const_max_level]"r"(const_max_level), + [VP8LevelFixedCosts]"r"(VP8LevelFixedCosts), [res_last]"r"(res_last), + [inc_p_costs]"r"(inc_p_costs) + : "memory" + ); + + // Last coefficient is always non-zero + { + const int v = abs(res->coeffs[n]); + assert(v != 0); + cost += VP8LevelCost(t, v); + if (n < 15) { + const int b = VP8EncBands[n + 1]; + const int ctx = (v == 1) ? 1 : 2; + const int last_p0 = res->prob[b][ctx][0]; + cost += VP8BitCost(0, last_p0); + } + } + return cost; +} + +static void SetResidualCoeffs_MIPS32(const int16_t* const coeffs, + VP8Residual* const res) { + const int16_t* p_coeffs = (int16_t*)coeffs; + int temp0, temp1, temp2, n, n1; + assert(res->first == 0 || coeffs[0] == 0); + + __asm__ volatile ( + ".set push \n\t" + ".set noreorder \n\t" + "addiu %[p_coeffs], %[p_coeffs], 28 \n\t" + "li %[n], 15 \n\t" + "li %[temp2], -1 \n\t" + "0: \n\t" + "ulw %[temp0], 0(%[p_coeffs]) \n\t" + "beqz %[temp0], 1f \n\t" +#if defined(WORDS_BIGENDIAN) + " sll %[temp1], %[temp0], 16 \n\t" +#else + " srl %[temp1], %[temp0], 16 \n\t" +#endif + "addiu %[n1], %[n], -1 \n\t" + "movz %[temp0], %[n1], %[temp1] \n\t" + "movn %[temp0], %[n], %[temp1] \n\t" + "j 2f \n\t" + " addiu %[temp2], %[temp0], 0 \n\t" + "1: \n\t" + "addiu %[n], %[n], -2 \n\t" + "bgtz %[n], 0b \n\t" + " addiu %[p_coeffs], %[p_coeffs], -4 \n\t" + "2: \n\t" + ".set pop \n\t" + : [p_coeffs]"+&r"(p_coeffs), [temp0]"=&r"(temp0), + [temp1]"=&r"(temp1), [temp2]"=&r"(temp2), + [n]"=&r"(n), [n1]"=&r"(n1) + : + : "memory" + ); + res->last = temp2; + res->coeffs = coeffs; +} + +//------------------------------------------------------------------------------ +// Entry point + +extern void VP8EncDspCostInitMIPS32(void); + +WEBP_TSAN_IGNORE_FUNCTION void VP8EncDspCostInitMIPS32(void) { + VP8GetResidualCost = GetResidualCost_MIPS32; + VP8SetResidualCoeffs = SetResidualCoeffs_MIPS32; +} + +#else // !WEBP_USE_MIPS32 + +WEBP_DSP_INIT_STUB(VP8EncDspCostInitMIPS32) + +#endif // WEBP_USE_MIPS32 diff --git a/vendor/github.com/sizeofint/webpanimation/dsp_cost_mips_dsp_r2.c b/vendor/github.com/sizeofint/webpanimation/dsp_cost_mips_dsp_r2.c new file mode 100644 index 00000000..4de73d4a --- /dev/null +++ b/vendor/github.com/sizeofint/webpanimation/dsp_cost_mips_dsp_r2.c @@ -0,0 +1,107 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the COPYING file in the root of the source +// tree. An additional intellectual property rights grant can be found +// in the file PATENTS. All contributing project authors may +// be found in the AUTHORS file in the root of the source tree. +// ----------------------------------------------------------------------------- +// +// Author: Djordje Pesut (djordje.pesut@imgtec.com) + +#include "dsp_dsp.h" + +#if defined(WEBP_USE_MIPS_DSP_R2) + +#include "enc_cost_enc.h" + +static int GetResidualCost_MIPSdspR2(int ctx0, const VP8Residual* const res) { + int temp0, temp1; + int v_reg, ctx_reg; + int n = res->first; + // should be prob[VP8EncBands[n]], but it's equivalent for n=0 or 1 + int p0 = res->prob[n][ctx0][0]; + CostArrayPtr const costs = res->costs; + const uint16_t* t = costs[n][ctx0]; + // bit_cost(1, p0) is already incorporated in t[] tables, but only if ctx != 0 + // (as required by the syntax). For ctx0 == 0, we need to add it here or it'll + // be missing during the loop. + int cost = (ctx0 == 0) ? VP8BitCost(1, p0) : 0; + const int16_t* res_coeffs = res->coeffs; + const int res_last = res->last; + const int const_max_level = MAX_VARIABLE_LEVEL; + const int const_2 = 2; + const uint16_t** p_costs = &costs[n][0]; + const size_t inc_p_costs = NUM_CTX * sizeof(*p_costs); + + if (res->last < 0) { + return VP8BitCost(0, p0); + } + + __asm__ volatile ( + ".set push \n\t" + ".set noreorder \n\t" + "subu %[temp1], %[res_last], %[n] \n\t" + "blez %[temp1], 2f \n\t" + " nop \n\t" + "1: \n\t" + "sll %[temp0], %[n], 1 \n\t" + "lhx %[v_reg], %[temp0](%[res_coeffs]) \n\t" + "addiu %[n], %[n], 1 \n\t" + "absq_s.w %[v_reg], %[v_reg] \n\t" + "sltiu %[temp0], %[v_reg], 2 \n\t" + "move %[ctx_reg], %[v_reg] \n\t" + "movz %[ctx_reg], %[const_2], %[temp0] \n\t" + "sll %[temp1], %[v_reg], 1 \n\t" + "lhx %[temp1], %[temp1](%[VP8LevelFixedCosts]) \n\t" + "slt %[temp0], %[v_reg], %[const_max_level] \n\t" + "movz %[v_reg], %[const_max_level], %[temp0] \n\t" + "addu %[cost], %[cost], %[temp1] \n\t" + "sll %[v_reg], %[v_reg], 1 \n\t" + "sll %[ctx_reg], %[ctx_reg], 2 \n\t" + "lhx %[temp0], %[v_reg](%[t]) \n\t" + "addu %[p_costs], %[p_costs], %[inc_p_costs] \n\t" + "addu %[t], %[p_costs], %[ctx_reg] \n\t" + "addu %[cost], %[cost], %[temp0] \n\t" + "bne %[n], %[res_last], 1b \n\t" + " lw %[t], 0(%[t]) \n\t" + "2: \n\t" + ".set pop \n\t" + : [cost]"+&r"(cost), [t]"+&r"(t), [n]"+&r"(n), [v_reg]"=&r"(v_reg), + [ctx_reg]"=&r"(ctx_reg), [p_costs]"+&r"(p_costs), [temp0]"=&r"(temp0), + [temp1]"=&r"(temp1) + : [const_2]"r"(const_2), [const_max_level]"r"(const_max_level), + [VP8LevelFixedCosts]"r"(VP8LevelFixedCosts), [res_last]"r"(res_last), + [res_coeffs]"r"(res_coeffs), [inc_p_costs]"r"(inc_p_costs) + : "memory" + ); + + // Last coefficient is always non-zero + { + const int v = abs(res->coeffs[n]); + assert(v != 0); + cost += VP8LevelCost(t, v); + if (n < 15) { + const int b = VP8EncBands[n + 1]; + const int ctx = (v == 1) ? 1 : 2; + const int last_p0 = res->prob[b][ctx][0]; + cost += VP8BitCost(0, last_p0); + } + } + return cost; +} + +//------------------------------------------------------------------------------ +// Entry point + +extern void VP8EncDspCostInitMIPSdspR2(void); + +WEBP_TSAN_IGNORE_FUNCTION void VP8EncDspCostInitMIPSdspR2(void) { + VP8GetResidualCost = GetResidualCost_MIPSdspR2; +} + +#else // !WEBP_USE_MIPS_DSP_R2 + +WEBP_DSP_INIT_STUB(VP8EncDspCostInitMIPSdspR2) + +#endif // WEBP_USE_MIPS_DSP_R2 diff --git a/vendor/github.com/sizeofint/webpanimation/dsp_cost_neon.c b/vendor/github.com/sizeofint/webpanimation/dsp_cost_neon.c new file mode 100644 index 00000000..5f6369f8 --- /dev/null +++ b/vendor/github.com/sizeofint/webpanimation/dsp_cost_neon.c @@ -0,0 +1,122 @@ +// Copyright 2018 Google Inc. All Rights Reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the COPYING file in the root of the source +// tree. An additional intellectual property rights grant can be found +// in the file PATENTS. All contributing project authors may +// be found in the AUTHORS file in the root of the source tree. +// ----------------------------------------------------------------------------- +// +// ARM NEON version of cost functions + +#include "dsp_dsp.h" + +#if defined(WEBP_USE_NEON) + +#include "dsp_neon.h" +#include "enc_cost_enc.h" + +static const uint8_t position[16] = { 1, 2, 3, 4, 5, 6, 7, 8, + 9, 10, 11, 12, 13, 14, 15, 16 }; + +static void SetResidualCoeffs_NEON(const int16_t* const coeffs, + VP8Residual* const res) { + const int16x8_t minus_one = vdupq_n_s16(-1); + const int16x8_t coeffs_0 = vld1q_s16(coeffs); + const int16x8_t coeffs_1 = vld1q_s16(coeffs + 8); + const uint16x8_t eob_0 = vtstq_s16(coeffs_0, minus_one); + const uint16x8_t eob_1 = vtstq_s16(coeffs_1, minus_one); + const uint8x16_t eob = vcombine_u8(vqmovn_u16(eob_0), vqmovn_u16(eob_1)); + const uint8x16_t masked = vandq_u8(eob, vld1q_u8(position)); + +#ifdef __aarch64__ + res->last = vmaxvq_u8(masked) - 1; +#else + const uint8x8_t eob_8x8 = vmax_u8(vget_low_u8(masked), vget_high_u8(masked)); + const uint16x8_t eob_16x8 = vmovl_u8(eob_8x8); + const uint16x4_t eob_16x4 = + vmax_u16(vget_low_u16(eob_16x8), vget_high_u16(eob_16x8)); + const uint32x4_t eob_32x4 = vmovl_u16(eob_16x4); + uint32x2_t eob_32x2 = + vmax_u32(vget_low_u32(eob_32x4), vget_high_u32(eob_32x4)); + eob_32x2 = vpmax_u32(eob_32x2, eob_32x2); + + vst1_lane_s32(&res->last, vreinterpret_s32_u32(eob_32x2), 0); + --res->last; +#endif // __aarch64__ + + res->coeffs = coeffs; +} + +static int GetResidualCost_NEON(int ctx0, const VP8Residual* const res) { + uint8_t levels[16], ctxs[16]; + uint16_t abs_levels[16]; + int n = res->first; + // should be prob[VP8EncBands[n]], but it's equivalent for n=0 or 1 + const int p0 = res->prob[n][ctx0][0]; + CostArrayPtr const costs = res->costs; + const uint16_t* t = costs[n][ctx0]; + // bit_cost(1, p0) is already incorporated in t[] tables, but only if ctx != 0 + // (as required by the syntax). For ctx0 == 0, we need to add it here or it'll + // be missing during the loop. + int cost = (ctx0 == 0) ? VP8BitCost(1, p0) : 0; + + if (res->last < 0) { + return VP8BitCost(0, p0); + } + + { // precompute clamped levels and contexts, packed to 8b. + const uint8x16_t kCst2 = vdupq_n_u8(2); + const uint8x16_t kCst67 = vdupq_n_u8(MAX_VARIABLE_LEVEL); + const int16x8_t c0 = vld1q_s16(res->coeffs); + const int16x8_t c1 = vld1q_s16(res->coeffs + 8); + const uint16x8_t E0 = vreinterpretq_u16_s16(vabsq_s16(c0)); + const uint16x8_t E1 = vreinterpretq_u16_s16(vabsq_s16(c1)); + const uint8x16_t F = vcombine_u8(vqmovn_u16(E0), vqmovn_u16(E1)); + const uint8x16_t G = vminq_u8(F, kCst2); // context = 0,1,2 + const uint8x16_t H = vminq_u8(F, kCst67); // clamp_level in [0..67] + + vst1q_u8(ctxs, G); + vst1q_u8(levels, H); + + vst1q_u16(abs_levels, E0); + vst1q_u16(abs_levels + 8, E1); + } + for (; n < res->last; ++n) { + const int ctx = ctxs[n]; + const int level = levels[n]; + const int flevel = abs_levels[n]; // full level + cost += VP8LevelFixedCosts[flevel] + t[level]; // simplified VP8LevelCost() + t = costs[n + 1][ctx]; + } + // Last coefficient is always non-zero + { + const int level = levels[n]; + const int flevel = abs_levels[n]; + assert(flevel != 0); + cost += VP8LevelFixedCosts[flevel] + t[level]; + if (n < 15) { + const int b = VP8EncBands[n + 1]; + const int ctx = ctxs[n]; + const int last_p0 = res->prob[b][ctx][0]; + cost += VP8BitCost(0, last_p0); + } + } + return cost; +} + +//------------------------------------------------------------------------------ +// Entry point + +extern void VP8EncDspCostInitNEON(void); + +WEBP_TSAN_IGNORE_FUNCTION void VP8EncDspCostInitNEON(void) { + VP8SetResidualCoeffs = SetResidualCoeffs_NEON; + VP8GetResidualCost = GetResidualCost_NEON; +} + +#else // !WEBP_USE_NEON + +WEBP_DSP_INIT_STUB(VP8EncDspCostInitNEON) + +#endif // WEBP_USE_NEON diff --git a/vendor/github.com/sizeofint/webpanimation/dsp_cost_sse2.c b/vendor/github.com/sizeofint/webpanimation/dsp_cost_sse2.c new file mode 100644 index 00000000..c59cf30b --- /dev/null +++ b/vendor/github.com/sizeofint/webpanimation/dsp_cost_sse2.c @@ -0,0 +1,119 @@ +// Copyright 2015 Google Inc. All Rights Reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the COPYING file in the root of the source +// tree. An additional intellectual property rights grant can be found +// in the file PATENTS. All contributing project authors may +// be found in the AUTHORS file in the root of the source tree. +// ----------------------------------------------------------------------------- +// +// SSE2 version of cost functions +// +// Author: Skal (pascal.massimino@gmail.com) + +#include "dsp_dsp.h" + +#if defined(WEBP_USE_SSE2) +#include + +#include "enc_cost_enc.h" +#include "enc_vp8i_enc.h" +#include "utils_utils.h" + +//------------------------------------------------------------------------------ + +static void SetResidualCoeffs_SSE2(const int16_t* const coeffs, + VP8Residual* const res) { + const __m128i c0 = _mm_loadu_si128((const __m128i*)(coeffs + 0)); + const __m128i c1 = _mm_loadu_si128((const __m128i*)(coeffs + 8)); + // Use SSE2 to compare 16 values with a single instruction. + const __m128i zero = _mm_setzero_si128(); + const __m128i m0 = _mm_packs_epi16(c0, c1); + const __m128i m1 = _mm_cmpeq_epi8(m0, zero); + // Get the comparison results as a bitmask into 16bits. Negate the mask to get + // the position of entries that are not equal to zero. We don't need to mask + // out least significant bits according to res->first, since coeffs[0] is 0 + // if res->first > 0. + const uint32_t mask = 0x0000ffffu ^ (uint32_t)_mm_movemask_epi8(m1); + // The position of the most significant non-zero bit indicates the position of + // the last non-zero value. + assert(res->first == 0 || coeffs[0] == 0); + res->last = mask ? BitsLog2Floor(mask) : -1; + res->coeffs = coeffs; +} + +static int GetResidualCost_SSE2(int ctx0, const VP8Residual* const res) { + uint8_t levels[16], ctxs[16]; + uint16_t abs_levels[16]; + int n = res->first; + // should be prob[VP8EncBands[n]], but it's equivalent for n=0 or 1 + const int p0 = res->prob[n][ctx0][0]; + CostArrayPtr const costs = res->costs; + const uint16_t* t = costs[n][ctx0]; + // bit_cost(1, p0) is already incorporated in t[] tables, but only if ctx != 0 + // (as required by the syntax). For ctx0 == 0, we need to add it here or it'll + // be missing during the loop. + int cost = (ctx0 == 0) ? VP8BitCost(1, p0) : 0; + + if (res->last < 0) { + return VP8BitCost(0, p0); + } + + { // precompute clamped levels and contexts, packed to 8b. + const __m128i zero = _mm_setzero_si128(); + const __m128i kCst2 = _mm_set1_epi8(2); + const __m128i kCst67 = _mm_set1_epi8(MAX_VARIABLE_LEVEL); + const __m128i c0 = _mm_loadu_si128((const __m128i*)&res->coeffs[0]); + const __m128i c1 = _mm_loadu_si128((const __m128i*)&res->coeffs[8]); + const __m128i D0 = _mm_sub_epi16(zero, c0); + const __m128i D1 = _mm_sub_epi16(zero, c1); + const __m128i E0 = _mm_max_epi16(c0, D0); // abs(v), 16b + const __m128i E1 = _mm_max_epi16(c1, D1); + const __m128i F = _mm_packs_epi16(E0, E1); + const __m128i G = _mm_min_epu8(F, kCst2); // context = 0,1,2 + const __m128i H = _mm_min_epu8(F, kCst67); // clamp_level in [0..67] + + _mm_storeu_si128((__m128i*)&ctxs[0], G); + _mm_storeu_si128((__m128i*)&levels[0], H); + + _mm_storeu_si128((__m128i*)&abs_levels[0], E0); + _mm_storeu_si128((__m128i*)&abs_levels[8], E1); + } + for (; n < res->last; ++n) { + const int ctx = ctxs[n]; + const int level = levels[n]; + const int flevel = abs_levels[n]; // full level + cost += VP8LevelFixedCosts[flevel] + t[level]; // simplified VP8LevelCost() + t = costs[n + 1][ctx]; + } + // Last coefficient is always non-zero + { + const int level = levels[n]; + const int flevel = abs_levels[n]; + assert(flevel != 0); + cost += VP8LevelFixedCosts[flevel] + t[level]; + if (n < 15) { + const int b = VP8EncBands[n + 1]; + const int ctx = ctxs[n]; + const int last_p0 = res->prob[b][ctx][0]; + cost += VP8BitCost(0, last_p0); + } + } + return cost; +} + +//------------------------------------------------------------------------------ +// Entry point + +extern void VP8EncDspCostInitSSE2(void); + +WEBP_TSAN_IGNORE_FUNCTION void VP8EncDspCostInitSSE2(void) { + VP8SetResidualCoeffs = SetResidualCoeffs_SSE2; + VP8GetResidualCost = GetResidualCost_SSE2; +} + +#else // !WEBP_USE_SSE2 + +WEBP_DSP_INIT_STUB(VP8EncDspCostInitSSE2) + +#endif // WEBP_USE_SSE2 diff --git a/vendor/github.com/sizeofint/webpanimation/dsp_cpu.c b/vendor/github.com/sizeofint/webpanimation/dsp_cpu.c new file mode 100644 index 00000000..952a5d59 --- /dev/null +++ b/vendor/github.com/sizeofint/webpanimation/dsp_cpu.c @@ -0,0 +1,252 @@ +// Copyright 2011 Google Inc. All Rights Reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the COPYING file in the root of the source +// tree. An additional intellectual property rights grant can be found +// in the file PATENTS. All contributing project authors may +// be found in the AUTHORS file in the root of the source tree. +// ----------------------------------------------------------------------------- +// +// CPU detection +// +// Author: Christian Duvivier (cduvivier@google.com) + +#include "dsp_dsp.h" + +#if defined(WEBP_HAVE_NEON_RTCD) +#include +#include +#endif + +#if defined(WEBP_ANDROID_NEON) +#include +#endif + +//------------------------------------------------------------------------------ +// SSE2 detection. +// + +// apple/darwin gcc-4.0.1 defines __PIC__, but not __pic__ with -fPIC. +#if (defined(__pic__) || defined(__PIC__)) && defined(__i386__) +static WEBP_INLINE void GetCPUInfo(int cpu_info[4], int info_type) { + __asm__ volatile ( + "mov %%ebx, %%edi\n" + "cpuid\n" + "xchg %%edi, %%ebx\n" + : "=a"(cpu_info[0]), "=D"(cpu_info[1]), "=c"(cpu_info[2]), "=d"(cpu_info[3]) + : "a"(info_type), "c"(0)); +} +#elif defined(__x86_64__) && \ + (defined(__code_model_medium__) || defined(__code_model_large__)) && \ + defined(__PIC__) +static WEBP_INLINE void GetCPUInfo(int cpu_info[4], int info_type) { + __asm__ volatile ( + "xchg{q}\t{%%rbx}, %q1\n" + "cpuid\n" + "xchg{q}\t{%%rbx}, %q1\n" + : "=a"(cpu_info[0]), "=&r"(cpu_info[1]), "=c"(cpu_info[2]), + "=d"(cpu_info[3]) + : "a"(info_type), "c"(0)); +} +#elif defined(__i386__) || defined(__x86_64__) +static WEBP_INLINE void GetCPUInfo(int cpu_info[4], int info_type) { + __asm__ volatile ( + "cpuid\n" + : "=a"(cpu_info[0]), "=b"(cpu_info[1]), "=c"(cpu_info[2]), "=d"(cpu_info[3]) + : "a"(info_type), "c"(0)); +} +#elif defined(_MSC_VER) && (defined(_M_X64) || defined(_M_IX86)) + +#if defined(_MSC_FULL_VER) && _MSC_FULL_VER >= 150030729 // >= VS2008 SP1 +#include +#define GetCPUInfo(info, type) __cpuidex(info, type, 0) // set ecx=0 +#define WEBP_HAVE_MSC_CPUID +#elif _MSC_VER > 1310 +#include +#define GetCPUInfo __cpuid +#define WEBP_HAVE_MSC_CPUID +#endif + +#endif + +// NaCl has no support for xgetbv or the raw opcode. +#if !defined(__native_client__) && (defined(__i386__) || defined(__x86_64__)) +static WEBP_INLINE uint64_t xgetbv(void) { + const uint32_t ecx = 0; + uint32_t eax, edx; + // Use the raw opcode for xgetbv for compatibility with older toolchains. + __asm__ volatile ( + ".byte 0x0f, 0x01, 0xd0\n" + : "=a"(eax), "=d"(edx) : "c" (ecx)); + return ((uint64_t)edx << 32) | eax; +} +#elif (defined(_M_X64) || defined(_M_IX86)) && \ + defined(_MSC_FULL_VER) && _MSC_FULL_VER >= 160040219 // >= VS2010 SP1 +#include +#define xgetbv() _xgetbv(0) +#elif defined(_MSC_VER) && defined(_M_IX86) +static WEBP_INLINE uint64_t xgetbv(void) { + uint32_t eax_, edx_; + __asm { + xor ecx, ecx // ecx = 0 + // Use the raw opcode for xgetbv for compatibility with older toolchains. + __asm _emit 0x0f __asm _emit 0x01 __asm _emit 0xd0 + mov eax_, eax + mov edx_, edx + } + return ((uint64_t)edx_ << 32) | eax_; +} +#else +#define xgetbv() 0U // no AVX for older x64 or unrecognized toolchains. +#endif + +#if defined(__i386__) || defined(__x86_64__) || defined(WEBP_HAVE_MSC_CPUID) + +// helper function for run-time detection of slow SSSE3 platforms +static int CheckSlowModel(int info) { + // Table listing display models with longer latencies for the bsr instruction + // (ie 2 cycles vs 10/16 cycles) and some SSSE3 instructions like pshufb. + // Refer to Intel 64 and IA-32 Architectures Optimization Reference Manual. + static const uint8_t kSlowModels[] = { + 0x37, 0x4a, 0x4d, // Silvermont Microarchitecture + 0x1c, 0x26, 0x27 // Atom Microarchitecture + }; + const uint32_t model = ((info & 0xf0000) >> 12) | ((info >> 4) & 0xf); + const uint32_t family = (info >> 8) & 0xf; + if (family == 0x06) { + size_t i; + for (i = 0; i < sizeof(kSlowModels) / sizeof(kSlowModels[0]); ++i) { + if (model == kSlowModels[i]) return 1; + } + } + return 0; +} + +static int x86CPUInfo(CPUFeature feature) { + int max_cpuid_value; + int cpu_info[4]; + int is_intel = 0; + + // get the highest feature value cpuid supports + GetCPUInfo(cpu_info, 0); + max_cpuid_value = cpu_info[0]; + if (max_cpuid_value < 1) { + return 0; + } else { + const int VENDOR_ID_INTEL_EBX = 0x756e6547; // uneG + const int VENDOR_ID_INTEL_EDX = 0x49656e69; // Ieni + const int VENDOR_ID_INTEL_ECX = 0x6c65746e; // letn + is_intel = (cpu_info[1] == VENDOR_ID_INTEL_EBX && + cpu_info[2] == VENDOR_ID_INTEL_ECX && + cpu_info[3] == VENDOR_ID_INTEL_EDX); // genuine Intel? + } + + GetCPUInfo(cpu_info, 1); + if (feature == kSSE2) { + return !!(cpu_info[3] & (1 << 26)); + } + if (feature == kSSE3) { + return !!(cpu_info[2] & (1 << 0)); + } + if (feature == kSlowSSSE3) { + if (is_intel && (cpu_info[2] & (1 << 9))) { // SSSE3? + return CheckSlowModel(cpu_info[0]); + } + return 0; + } + + if (feature == kSSE4_1) { + return !!(cpu_info[2] & (1 << 19)); + } + if (feature == kAVX) { + // bits 27 (OSXSAVE) & 28 (256-bit AVX) + if ((cpu_info[2] & 0x18000000) == 0x18000000) { + // XMM state and YMM state enabled by the OS. + return (xgetbv() & 0x6) == 0x6; + } + } + if (feature == kAVX2) { + if (x86CPUInfo(kAVX) && max_cpuid_value >= 7) { + GetCPUInfo(cpu_info, 7); + return !!(cpu_info[1] & (1 << 5)); + } + } + return 0; +} +VP8CPUInfo VP8GetCPUInfo = x86CPUInfo; +#elif defined(WEBP_ANDROID_NEON) // NB: needs to be before generic NEON test. +static int AndroidCPUInfo(CPUFeature feature) { + const AndroidCpuFamily cpu_family = android_getCpuFamily(); + const uint64_t cpu_features = android_getCpuFeatures(); + if (feature == kNEON) { + return cpu_family == ANDROID_CPU_FAMILY_ARM && + (cpu_features & ANDROID_CPU_ARM_FEATURE_NEON) != 0; + } + return 0; +} +VP8CPUInfo VP8GetCPUInfo = AndroidCPUInfo; +#elif defined(EMSCRIPTEN) // also needs to be before generic NEON test +// Use compile flags as an indicator of SIMD support instead of a runtime check. +static int wasmCPUInfo(CPUFeature feature) { + switch (feature) { +#ifdef WEBP_USE_SSE2 + case kSSE2: + return 1; +#endif +#ifdef WEBP_USE_SSE41 + case kSSE3: + case kSlowSSSE3: + case kSSE4_1: + return 1; +#endif +#ifdef WEBP_USE_NEON + case kNEON: + return 1; +#endif + default: + break; + } + return 0; +} +VP8CPUInfo VP8GetCPUInfo = wasmCPUInfo; +#elif defined(WEBP_USE_NEON) +// define a dummy function to enable turning off NEON at runtime by setting +// VP8DecGetCPUInfo = NULL +static int armCPUInfo(CPUFeature feature) { + if (feature != kNEON) return 0; +#if defined(__linux__) && defined(WEBP_HAVE_NEON_RTCD) + { + int has_neon = 0; + char line[200]; + FILE* const cpuinfo = fopen("/proc/cpuinfo", "r"); + if (cpuinfo == NULL) return 0; + while (fgets(line, sizeof(line), cpuinfo)) { + if (!strncmp(line, "Features", 8)) { + if (strstr(line, " neon ") != NULL) { + has_neon = 1; + break; + } + } + } + fclose(cpuinfo); + return has_neon; + } +#else + return 1; +#endif +} +VP8CPUInfo VP8GetCPUInfo = armCPUInfo; +#elif defined(WEBP_USE_MIPS32) || defined(WEBP_USE_MIPS_DSP_R2) || \ + defined(WEBP_USE_MSA) +static int mipsCPUInfo(CPUFeature feature) { + if ((feature == kMIPS32) || (feature == kMIPSdspR2) || (feature == kMSA)) { + return 1; + } else { + return 0; + } + +} +VP8CPUInfo VP8GetCPUInfo = mipsCPUInfo; +#else +VP8CPUInfo VP8GetCPUInfo = NULL; +#endif diff --git a/vendor/github.com/sizeofint/webpanimation/dsp_dec.c b/vendor/github.com/sizeofint/webpanimation/dsp_dec.c new file mode 100644 index 00000000..aab40bd3 --- /dev/null +++ b/vendor/github.com/sizeofint/webpanimation/dsp_dec.c @@ -0,0 +1,887 @@ +// Copyright 2010 Google Inc. All Rights Reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the COPYING file in the root of the source +// tree. An additional intellectual property rights grant can be found +// in the file PATENTS. All contributing project authors may +// be found in the AUTHORS file in the root of the source tree. +// ----------------------------------------------------------------------------- +// +// Speed-critical decoding functions, default plain-C implementations. +// +// Author: Skal (pascal.massimino@gmail.com) + +#include + +#include "dsp_dsp.h" +#include "dec_vp8i_dec.h" +#include "utils_utils.h" + +//------------------------------------------------------------------------------ + +static WEBP_INLINE uint8_t clip_8b(int v) { + return (!(v & ~0xff)) ? v : (v < 0) ? 0 : 255; +} + +//------------------------------------------------------------------------------ +// Transforms (Paragraph 14.4) + +#define STORE(x, y, v) \ + dst[(x) + (y) * BPS] = clip_8b(dst[(x) + (y) * BPS] + ((v) >> 3)) + +#define STORE2(y, dc, d, c) do { \ + const int DC = (dc); \ + STORE(0, y, DC + (d)); \ + STORE(1, y, DC + (c)); \ + STORE(2, y, DC - (c)); \ + STORE(3, y, DC - (d)); \ +} while (0) + +#define MUL1(a) ((((a) * 20091) >> 16) + (a)) +#define MUL2(a) (((a) * 35468) >> 16) + +#if !WEBP_NEON_OMIT_C_CODE +static void TransformOne_C(const int16_t* in, uint8_t* dst) { + int C[4 * 4], *tmp; + int i; + tmp = C; + for (i = 0; i < 4; ++i) { // vertical pass + const int a = in[0] + in[8]; // [-4096, 4094] + const int b = in[0] - in[8]; // [-4095, 4095] + const int c = MUL2(in[4]) - MUL1(in[12]); // [-3783, 3783] + const int d = MUL1(in[4]) + MUL2(in[12]); // [-3785, 3781] + tmp[0] = a + d; // [-7881, 7875] + tmp[1] = b + c; // [-7878, 7878] + tmp[2] = b - c; // [-7878, 7878] + tmp[3] = a - d; // [-7877, 7879] + tmp += 4; + in++; + } + // Each pass is expanding the dynamic range by ~3.85 (upper bound). + // The exact value is (2. + (20091 + 35468) / 65536). + // After the second pass, maximum interval is [-3794, 3794], assuming + // an input in [-2048, 2047] interval. We then need to add a dst value + // in the [0, 255] range. + // In the worst case scenario, the input to clip_8b() can be as large as + // [-60713, 60968]. + tmp = C; + for (i = 0; i < 4; ++i) { // horizontal pass + const int dc = tmp[0] + 4; + const int a = dc + tmp[8]; + const int b = dc - tmp[8]; + const int c = MUL2(tmp[4]) - MUL1(tmp[12]); + const int d = MUL1(tmp[4]) + MUL2(tmp[12]); + STORE(0, 0, a + d); + STORE(1, 0, b + c); + STORE(2, 0, b - c); + STORE(3, 0, a - d); + tmp++; + dst += BPS; + } +} + +// Simplified transform when only in[0], in[1] and in[4] are non-zero +static void TransformAC3_C(const int16_t* in, uint8_t* dst) { + const int a = in[0] + 4; + const int c4 = MUL2(in[4]); + const int d4 = MUL1(in[4]); + const int c1 = MUL2(in[1]); + const int d1 = MUL1(in[1]); + STORE2(0, a + d4, d1, c1); + STORE2(1, a + c4, d1, c1); + STORE2(2, a - c4, d1, c1); + STORE2(3, a - d4, d1, c1); +} +#undef MUL1 +#undef MUL2 +#undef STORE2 + +static void TransformTwo_C(const int16_t* in, uint8_t* dst, int do_two) { + TransformOne_C(in, dst); + if (do_two) { + TransformOne_C(in + 16, dst + 4); + } +} +#endif // !WEBP_NEON_OMIT_C_CODE + +static void TransformUV_C(const int16_t* in, uint8_t* dst) { + VP8Transform(in + 0 * 16, dst, 1); + VP8Transform(in + 2 * 16, dst + 4 * BPS, 1); +} + +#if !WEBP_NEON_OMIT_C_CODE +static void TransformDC_C(const int16_t* in, uint8_t* dst) { + const int DC = in[0] + 4; + int i, j; + for (j = 0; j < 4; ++j) { + for (i = 0; i < 4; ++i) { + STORE(i, j, DC); + } + } +} +#endif // !WEBP_NEON_OMIT_C_CODE + +static void TransformDCUV_C(const int16_t* in, uint8_t* dst) { + if (in[0 * 16]) VP8TransformDC(in + 0 * 16, dst); + if (in[1 * 16]) VP8TransformDC(in + 1 * 16, dst + 4); + if (in[2 * 16]) VP8TransformDC(in + 2 * 16, dst + 4 * BPS); + if (in[3 * 16]) VP8TransformDC(in + 3 * 16, dst + 4 * BPS + 4); +} + +#undef STORE + +//------------------------------------------------------------------------------ +// Paragraph 14.3 + +#if !WEBP_NEON_OMIT_C_CODE +static void TransformWHT_C(const int16_t* in, int16_t* out) { + int tmp[16]; + int i; + for (i = 0; i < 4; ++i) { + const int a0 = in[0 + i] + in[12 + i]; + const int a1 = in[4 + i] + in[ 8 + i]; + const int a2 = in[4 + i] - in[ 8 + i]; + const int a3 = in[0 + i] - in[12 + i]; + tmp[0 + i] = a0 + a1; + tmp[8 + i] = a0 - a1; + tmp[4 + i] = a3 + a2; + tmp[12 + i] = a3 - a2; + } + for (i = 0; i < 4; ++i) { + const int dc = tmp[0 + i * 4] + 3; // w/ rounder + const int a0 = dc + tmp[3 + i * 4]; + const int a1 = tmp[1 + i * 4] + tmp[2 + i * 4]; + const int a2 = tmp[1 + i * 4] - tmp[2 + i * 4]; + const int a3 = dc - tmp[3 + i * 4]; + out[ 0] = (a0 + a1) >> 3; + out[16] = (a3 + a2) >> 3; + out[32] = (a0 - a1) >> 3; + out[48] = (a3 - a2) >> 3; + out += 64; + } +} +#endif // !WEBP_NEON_OMIT_C_CODE + +void (*VP8TransformWHT)(const int16_t* in, int16_t* out); + +//------------------------------------------------------------------------------ +// Intra predictions + +#define DST(x, y) dst[(x) + (y) * BPS] + +#if !WEBP_NEON_OMIT_C_CODE +static WEBP_INLINE void TrueMotion(uint8_t* dst, int size) { + const uint8_t* top = dst - BPS; + const uint8_t* const clip0 = VP8kclip1 - top[-1]; + int y; + for (y = 0; y < size; ++y) { + const uint8_t* const clip = clip0 + dst[-1]; + int x; + for (x = 0; x < size; ++x) { + dst[x] = clip[top[x]]; + } + dst += BPS; + } +} +static void TM4_C(uint8_t* dst) { TrueMotion(dst, 4); } +static void TM8uv_C(uint8_t* dst) { TrueMotion(dst, 8); } +static void TM16_C(uint8_t* dst) { TrueMotion(dst, 16); } + +//------------------------------------------------------------------------------ +// 16x16 + +static void VE16_C(uint8_t* dst) { // vertical + int j; + for (j = 0; j < 16; ++j) { + memcpy(dst + j * BPS, dst - BPS, 16); + } +} + +static void HE16_C(uint8_t* dst) { // horizontal + int j; + for (j = 16; j > 0; --j) { + memset(dst, dst[-1], 16); + dst += BPS; + } +} + +static WEBP_INLINE void Put16(int v, uint8_t* dst) { + int j; + for (j = 0; j < 16; ++j) { + memset(dst + j * BPS, v, 16); + } +} + +static void DC16_C(uint8_t* dst) { // DC + int DC = 16; + int j; + for (j = 0; j < 16; ++j) { + DC += dst[-1 + j * BPS] + dst[j - BPS]; + } + Put16(DC >> 5, dst); +} + +static void DC16NoTop_C(uint8_t* dst) { // DC with top samples not available + int DC = 8; + int j; + for (j = 0; j < 16; ++j) { + DC += dst[-1 + j * BPS]; + } + Put16(DC >> 4, dst); +} + +static void DC16NoLeft_C(uint8_t* dst) { // DC with left samples not available + int DC = 8; + int i; + for (i = 0; i < 16; ++i) { + DC += dst[i - BPS]; + } + Put16(DC >> 4, dst); +} + +static void DC16NoTopLeft_C(uint8_t* dst) { // DC with no top and left samples + Put16(0x80, dst); +} +#endif // !WEBP_NEON_OMIT_C_CODE + +VP8PredFunc VP8PredLuma16[NUM_B_DC_MODES]; + +//------------------------------------------------------------------------------ +// 4x4 + +#define AVG3(a, b, c) ((uint8_t)(((a) + 2 * (b) + (c) + 2) >> 2)) +#define AVG2(a, b) (((a) + (b) + 1) >> 1) + +#if !WEBP_NEON_OMIT_C_CODE +static void VE4_C(uint8_t* dst) { // vertical + const uint8_t* top = dst - BPS; + const uint8_t vals[4] = { + AVG3(top[-1], top[0], top[1]), + AVG3(top[ 0], top[1], top[2]), + AVG3(top[ 1], top[2], top[3]), + AVG3(top[ 2], top[3], top[4]) + }; + int i; + for (i = 0; i < 4; ++i) { + memcpy(dst + i * BPS, vals, sizeof(vals)); + } +} +#endif // !WEBP_NEON_OMIT_C_CODE + +static void HE4_C(uint8_t* dst) { // horizontal + const int A = dst[-1 - BPS]; + const int B = dst[-1]; + const int C = dst[-1 + BPS]; + const int D = dst[-1 + 2 * BPS]; + const int E = dst[-1 + 3 * BPS]; + WebPUint32ToMem(dst + 0 * BPS, 0x01010101U * AVG3(A, B, C)); + WebPUint32ToMem(dst + 1 * BPS, 0x01010101U * AVG3(B, C, D)); + WebPUint32ToMem(dst + 2 * BPS, 0x01010101U * AVG3(C, D, E)); + WebPUint32ToMem(dst + 3 * BPS, 0x01010101U * AVG3(D, E, E)); +} + +#if !WEBP_NEON_OMIT_C_CODE +static void DC4_C(uint8_t* dst) { // DC + uint32_t dc = 4; + int i; + for (i = 0; i < 4; ++i) dc += dst[i - BPS] + dst[-1 + i * BPS]; + dc >>= 3; + for (i = 0; i < 4; ++i) memset(dst + i * BPS, dc, 4); +} + +static void RD4_C(uint8_t* dst) { // Down-right + const int I = dst[-1 + 0 * BPS]; + const int J = dst[-1 + 1 * BPS]; + const int K = dst[-1 + 2 * BPS]; + const int L = dst[-1 + 3 * BPS]; + const int X = dst[-1 - BPS]; + const int A = dst[0 - BPS]; + const int B = dst[1 - BPS]; + const int C = dst[2 - BPS]; + const int D = dst[3 - BPS]; + DST(0, 3) = AVG3(J, K, L); + DST(1, 3) = DST(0, 2) = AVG3(I, J, K); + DST(2, 3) = DST(1, 2) = DST(0, 1) = AVG3(X, I, J); + DST(3, 3) = DST(2, 2) = DST(1, 1) = DST(0, 0) = AVG3(A, X, I); + DST(3, 2) = DST(2, 1) = DST(1, 0) = AVG3(B, A, X); + DST(3, 1) = DST(2, 0) = AVG3(C, B, A); + DST(3, 0) = AVG3(D, C, B); +} + +static void LD4_C(uint8_t* dst) { // Down-Left + const int A = dst[0 - BPS]; + const int B = dst[1 - BPS]; + const int C = dst[2 - BPS]; + const int D = dst[3 - BPS]; + const int E = dst[4 - BPS]; + const int F = dst[5 - BPS]; + const int G = dst[6 - BPS]; + const int H = dst[7 - BPS]; + DST(0, 0) = AVG3(A, B, C); + DST(1, 0) = DST(0, 1) = AVG3(B, C, D); + DST(2, 0) = DST(1, 1) = DST(0, 2) = AVG3(C, D, E); + DST(3, 0) = DST(2, 1) = DST(1, 2) = DST(0, 3) = AVG3(D, E, F); + DST(3, 1) = DST(2, 2) = DST(1, 3) = AVG3(E, F, G); + DST(3, 2) = DST(2, 3) = AVG3(F, G, H); + DST(3, 3) = AVG3(G, H, H); +} +#endif // !WEBP_NEON_OMIT_C_CODE + +static void VR4_C(uint8_t* dst) { // Vertical-Right + const int I = dst[-1 + 0 * BPS]; + const int J = dst[-1 + 1 * BPS]; + const int K = dst[-1 + 2 * BPS]; + const int X = dst[-1 - BPS]; + const int A = dst[0 - BPS]; + const int B = dst[1 - BPS]; + const int C = dst[2 - BPS]; + const int D = dst[3 - BPS]; + DST(0, 0) = DST(1, 2) = AVG2(X, A); + DST(1, 0) = DST(2, 2) = AVG2(A, B); + DST(2, 0) = DST(3, 2) = AVG2(B, C); + DST(3, 0) = AVG2(C, D); + + DST(0, 3) = AVG3(K, J, I); + DST(0, 2) = AVG3(J, I, X); + DST(0, 1) = DST(1, 3) = AVG3(I, X, A); + DST(1, 1) = DST(2, 3) = AVG3(X, A, B); + DST(2, 1) = DST(3, 3) = AVG3(A, B, C); + DST(3, 1) = AVG3(B, C, D); +} + +static void VL4_C(uint8_t* dst) { // Vertical-Left + const int A = dst[0 - BPS]; + const int B = dst[1 - BPS]; + const int C = dst[2 - BPS]; + const int D = dst[3 - BPS]; + const int E = dst[4 - BPS]; + const int F = dst[5 - BPS]; + const int G = dst[6 - BPS]; + const int H = dst[7 - BPS]; + DST(0, 0) = AVG2(A, B); + DST(1, 0) = DST(0, 2) = AVG2(B, C); + DST(2, 0) = DST(1, 2) = AVG2(C, D); + DST(3, 0) = DST(2, 2) = AVG2(D, E); + + DST(0, 1) = AVG3(A, B, C); + DST(1, 1) = DST(0, 3) = AVG3(B, C, D); + DST(2, 1) = DST(1, 3) = AVG3(C, D, E); + DST(3, 1) = DST(2, 3) = AVG3(D, E, F); + DST(3, 2) = AVG3(E, F, G); + DST(3, 3) = AVG3(F, G, H); +} + +static void HU4_C(uint8_t* dst) { // Horizontal-Up + const int I = dst[-1 + 0 * BPS]; + const int J = dst[-1 + 1 * BPS]; + const int K = dst[-1 + 2 * BPS]; + const int L = dst[-1 + 3 * BPS]; + DST(0, 0) = AVG2(I, J); + DST(2, 0) = DST(0, 1) = AVG2(J, K); + DST(2, 1) = DST(0, 2) = AVG2(K, L); + DST(1, 0) = AVG3(I, J, K); + DST(3, 0) = DST(1, 1) = AVG3(J, K, L); + DST(3, 1) = DST(1, 2) = AVG3(K, L, L); + DST(3, 2) = DST(2, 2) = + DST(0, 3) = DST(1, 3) = DST(2, 3) = DST(3, 3) = L; +} + +static void HD4_C(uint8_t* dst) { // Horizontal-Down + const int I = dst[-1 + 0 * BPS]; + const int J = dst[-1 + 1 * BPS]; + const int K = dst[-1 + 2 * BPS]; + const int L = dst[-1 + 3 * BPS]; + const int X = dst[-1 - BPS]; + const int A = dst[0 - BPS]; + const int B = dst[1 - BPS]; + const int C = dst[2 - BPS]; + + DST(0, 0) = DST(2, 1) = AVG2(I, X); + DST(0, 1) = DST(2, 2) = AVG2(J, I); + DST(0, 2) = DST(2, 3) = AVG2(K, J); + DST(0, 3) = AVG2(L, K); + + DST(3, 0) = AVG3(A, B, C); + DST(2, 0) = AVG3(X, A, B); + DST(1, 0) = DST(3, 1) = AVG3(I, X, A); + DST(1, 1) = DST(3, 2) = AVG3(J, I, X); + DST(1, 2) = DST(3, 3) = AVG3(K, J, I); + DST(1, 3) = AVG3(L, K, J); +} + +#undef DST +#undef AVG3 +#undef AVG2 + +VP8PredFunc VP8PredLuma4[NUM_BMODES]; + +//------------------------------------------------------------------------------ +// Chroma + +#if !WEBP_NEON_OMIT_C_CODE +static void VE8uv_C(uint8_t* dst) { // vertical + int j; + for (j = 0; j < 8; ++j) { + memcpy(dst + j * BPS, dst - BPS, 8); + } +} + +static void HE8uv_C(uint8_t* dst) { // horizontal + int j; + for (j = 0; j < 8; ++j) { + memset(dst, dst[-1], 8); + dst += BPS; + } +} + +// helper for chroma-DC predictions +static WEBP_INLINE void Put8x8uv(uint8_t value, uint8_t* dst) { + int j; + for (j = 0; j < 8; ++j) { + memset(dst + j * BPS, value, 8); + } +} + +static void DC8uv_C(uint8_t* dst) { // DC + int dc0 = 8; + int i; + for (i = 0; i < 8; ++i) { + dc0 += dst[i - BPS] + dst[-1 + i * BPS]; + } + Put8x8uv(dc0 >> 4, dst); +} + +static void DC8uvNoLeft_C(uint8_t* dst) { // DC with no left samples + int dc0 = 4; + int i; + for (i = 0; i < 8; ++i) { + dc0 += dst[i - BPS]; + } + Put8x8uv(dc0 >> 3, dst); +} + +static void DC8uvNoTop_C(uint8_t* dst) { // DC with no top samples + int dc0 = 4; + int i; + for (i = 0; i < 8; ++i) { + dc0 += dst[-1 + i * BPS]; + } + Put8x8uv(dc0 >> 3, dst); +} + +static void DC8uvNoTopLeft_C(uint8_t* dst) { // DC with nothing + Put8x8uv(0x80, dst); +} +#endif // !WEBP_NEON_OMIT_C_CODE + +VP8PredFunc VP8PredChroma8[NUM_B_DC_MODES]; + +//------------------------------------------------------------------------------ +// Edge filtering functions + +#if !WEBP_NEON_OMIT_C_CODE || WEBP_NEON_WORK_AROUND_GCC +// 4 pixels in, 2 pixels out +static WEBP_INLINE void DoFilter2_C(uint8_t* p, int step) { + const int p1 = p[-2*step], p0 = p[-step], q0 = p[0], q1 = p[step]; + const int a = 3 * (q0 - p0) + VP8ksclip1[p1 - q1]; // in [-893,892] + const int a1 = VP8ksclip2[(a + 4) >> 3]; // in [-16,15] + const int a2 = VP8ksclip2[(a + 3) >> 3]; + p[-step] = VP8kclip1[p0 + a2]; + p[ 0] = VP8kclip1[q0 - a1]; +} + +// 4 pixels in, 4 pixels out +static WEBP_INLINE void DoFilter4_C(uint8_t* p, int step) { + const int p1 = p[-2*step], p0 = p[-step], q0 = p[0], q1 = p[step]; + const int a = 3 * (q0 - p0); + const int a1 = VP8ksclip2[(a + 4) >> 3]; + const int a2 = VP8ksclip2[(a + 3) >> 3]; + const int a3 = (a1 + 1) >> 1; + p[-2*step] = VP8kclip1[p1 + a3]; + p[- step] = VP8kclip1[p0 + a2]; + p[ 0] = VP8kclip1[q0 - a1]; + p[ step] = VP8kclip1[q1 - a3]; +} + +// 6 pixels in, 6 pixels out +static WEBP_INLINE void DoFilter6_C(uint8_t* p, int step) { + const int p2 = p[-3*step], p1 = p[-2*step], p0 = p[-step]; + const int q0 = p[0], q1 = p[step], q2 = p[2*step]; + const int a = VP8ksclip1[3 * (q0 - p0) + VP8ksclip1[p1 - q1]]; + // a is in [-128,127], a1 in [-27,27], a2 in [-18,18] and a3 in [-9,9] + const int a1 = (27 * a + 63) >> 7; // eq. to ((3 * a + 7) * 9) >> 7 + const int a2 = (18 * a + 63) >> 7; // eq. to ((2 * a + 7) * 9) >> 7 + const int a3 = (9 * a + 63) >> 7; // eq. to ((1 * a + 7) * 9) >> 7 + p[-3*step] = VP8kclip1[p2 + a3]; + p[-2*step] = VP8kclip1[p1 + a2]; + p[- step] = VP8kclip1[p0 + a1]; + p[ 0] = VP8kclip1[q0 - a1]; + p[ step] = VP8kclip1[q1 - a2]; + p[ 2*step] = VP8kclip1[q2 - a3]; +} + +static WEBP_INLINE int Hev(const uint8_t* p, int step, int thresh) { + const int p1 = p[-2*step], p0 = p[-step], q0 = p[0], q1 = p[step]; + return (VP8kabs0[p1 - p0] > thresh) || (VP8kabs0[q1 - q0] > thresh); +} +#endif // !WEBP_NEON_OMIT_C_CODE || WEBP_NEON_WORK_AROUND_GCC + +#if !WEBP_NEON_OMIT_C_CODE +static WEBP_INLINE int NeedsFilter_C(const uint8_t* p, int step, int t) { + const int p1 = p[-2 * step], p0 = p[-step], q0 = p[0], q1 = p[step]; + return ((4 * VP8kabs0[p0 - q0] + VP8kabs0[p1 - q1]) <= t); +} +#endif // !WEBP_NEON_OMIT_C_CODE + +#if !WEBP_NEON_OMIT_C_CODE || WEBP_NEON_WORK_AROUND_GCC +static WEBP_INLINE int NeedsFilter2_C(const uint8_t* p, + int step, int t, int it) { + const int p3 = p[-4 * step], p2 = p[-3 * step], p1 = p[-2 * step]; + const int p0 = p[-step], q0 = p[0]; + const int q1 = p[step], q2 = p[2 * step], q3 = p[3 * step]; + if ((4 * VP8kabs0[p0 - q0] + VP8kabs0[p1 - q1]) > t) return 0; + return VP8kabs0[p3 - p2] <= it && VP8kabs0[p2 - p1] <= it && + VP8kabs0[p1 - p0] <= it && VP8kabs0[q3 - q2] <= it && + VP8kabs0[q2 - q1] <= it && VP8kabs0[q1 - q0] <= it; +} +#endif // !WEBP_NEON_OMIT_C_CODE || WEBP_NEON_WORK_AROUND_GCC + +//------------------------------------------------------------------------------ +// Simple In-loop filtering (Paragraph 15.2) + +#if !WEBP_NEON_OMIT_C_CODE +static void SimpleVFilter16_C(uint8_t* p, int stride, int thresh) { + int i; + const int thresh2 = 2 * thresh + 1; + for (i = 0; i < 16; ++i) { + if (NeedsFilter_C(p + i, stride, thresh2)) { + DoFilter2_C(p + i, stride); + } + } +} + +static void SimpleHFilter16_C(uint8_t* p, int stride, int thresh) { + int i; + const int thresh2 = 2 * thresh + 1; + for (i = 0; i < 16; ++i) { + if (NeedsFilter_C(p + i * stride, 1, thresh2)) { + DoFilter2_C(p + i * stride, 1); + } + } +} + +static void SimpleVFilter16i_C(uint8_t* p, int stride, int thresh) { + int k; + for (k = 3; k > 0; --k) { + p += 4 * stride; + SimpleVFilter16_C(p, stride, thresh); + } +} + +static void SimpleHFilter16i_C(uint8_t* p, int stride, int thresh) { + int k; + for (k = 3; k > 0; --k) { + p += 4; + SimpleHFilter16_C(p, stride, thresh); + } +} +#endif // !WEBP_NEON_OMIT_C_CODE + +//------------------------------------------------------------------------------ +// Complex In-loop filtering (Paragraph 15.3) + +#if !WEBP_NEON_OMIT_C_CODE || WEBP_NEON_WORK_AROUND_GCC +static WEBP_INLINE void FilterLoop26_C(uint8_t* p, + int hstride, int vstride, int size, + int thresh, int ithresh, + int hev_thresh) { + const int thresh2 = 2 * thresh + 1; + while (size-- > 0) { + if (NeedsFilter2_C(p, hstride, thresh2, ithresh)) { + if (Hev(p, hstride, hev_thresh)) { + DoFilter2_C(p, hstride); + } else { + DoFilter6_C(p, hstride); + } + } + p += vstride; + } +} + +static WEBP_INLINE void FilterLoop24_C(uint8_t* p, + int hstride, int vstride, int size, + int thresh, int ithresh, + int hev_thresh) { + const int thresh2 = 2 * thresh + 1; + while (size-- > 0) { + if (NeedsFilter2_C(p, hstride, thresh2, ithresh)) { + if (Hev(p, hstride, hev_thresh)) { + DoFilter2_C(p, hstride); + } else { + DoFilter4_C(p, hstride); + } + } + p += vstride; + } +} +#endif // !WEBP_NEON_OMIT_C_CODE || WEBP_NEON_WORK_AROUND_GCC + +#if !WEBP_NEON_OMIT_C_CODE +// on macroblock edges +static void VFilter16_C(uint8_t* p, int stride, + int thresh, int ithresh, int hev_thresh) { + FilterLoop26_C(p, stride, 1, 16, thresh, ithresh, hev_thresh); +} + +static void HFilter16_C(uint8_t* p, int stride, + int thresh, int ithresh, int hev_thresh) { + FilterLoop26_C(p, 1, stride, 16, thresh, ithresh, hev_thresh); +} + +// on three inner edges +static void VFilter16i_C(uint8_t* p, int stride, + int thresh, int ithresh, int hev_thresh) { + int k; + for (k = 3; k > 0; --k) { + p += 4 * stride; + FilterLoop24_C(p, stride, 1, 16, thresh, ithresh, hev_thresh); + } +} +#endif // !WEBP_NEON_OMIT_C_CODE + +#if !WEBP_NEON_OMIT_C_CODE || WEBP_NEON_WORK_AROUND_GCC +static void HFilter16i_C(uint8_t* p, int stride, + int thresh, int ithresh, int hev_thresh) { + int k; + for (k = 3; k > 0; --k) { + p += 4; + FilterLoop24_C(p, 1, stride, 16, thresh, ithresh, hev_thresh); + } +} +#endif // !WEBP_NEON_OMIT_C_CODE || WEBP_NEON_WORK_AROUND_GCC + +#if !WEBP_NEON_OMIT_C_CODE +// 8-pixels wide variant, for chroma filtering +static void VFilter8_C(uint8_t* u, uint8_t* v, int stride, + int thresh, int ithresh, int hev_thresh) { + FilterLoop26_C(u, stride, 1, 8, thresh, ithresh, hev_thresh); + FilterLoop26_C(v, stride, 1, 8, thresh, ithresh, hev_thresh); +} +#endif // !WEBP_NEON_OMIT_C_CODE + +#if !WEBP_NEON_OMIT_C_CODE || WEBP_NEON_WORK_AROUND_GCC +static void HFilter8_C(uint8_t* u, uint8_t* v, int stride, + int thresh, int ithresh, int hev_thresh) { + FilterLoop26_C(u, 1, stride, 8, thresh, ithresh, hev_thresh); + FilterLoop26_C(v, 1, stride, 8, thresh, ithresh, hev_thresh); +} +#endif // !WEBP_NEON_OMIT_C_CODE || WEBP_NEON_WORK_AROUND_GCC + +#if !WEBP_NEON_OMIT_C_CODE +static void VFilter8i_C(uint8_t* u, uint8_t* v, int stride, + int thresh, int ithresh, int hev_thresh) { + FilterLoop24_C(u + 4 * stride, stride, 1, 8, thresh, ithresh, hev_thresh); + FilterLoop24_C(v + 4 * stride, stride, 1, 8, thresh, ithresh, hev_thresh); +} +#endif // !WEBP_NEON_OMIT_C_CODE + +#if !WEBP_NEON_OMIT_C_CODE || WEBP_NEON_WORK_AROUND_GCC +static void HFilter8i_C(uint8_t* u, uint8_t* v, int stride, + int thresh, int ithresh, int hev_thresh) { + FilterLoop24_C(u + 4, 1, stride, 8, thresh, ithresh, hev_thresh); + FilterLoop24_C(v + 4, 1, stride, 8, thresh, ithresh, hev_thresh); +} +#endif // !WEBP_NEON_OMIT_C_CODE || WEBP_NEON_WORK_AROUND_GCC + +//------------------------------------------------------------------------------ + +static void DitherCombine8x8_C(const uint8_t* dither, uint8_t* dst, + int dst_stride) { + int i, j; + for (j = 0; j < 8; ++j) { + for (i = 0; i < 8; ++i) { + const int delta0 = dither[i] - VP8_DITHER_AMP_CENTER; + const int delta1 = + (delta0 + VP8_DITHER_DESCALE_ROUNDER) >> VP8_DITHER_DESCALE; + dst[i] = clip_8b((int)dst[i] + delta1); + } + dst += dst_stride; + dither += 8; + } +} + +//------------------------------------------------------------------------------ + +VP8DecIdct2 VP8Transform; +VP8DecIdct VP8TransformAC3; +VP8DecIdct VP8TransformUV; +VP8DecIdct VP8TransformDC; +VP8DecIdct VP8TransformDCUV; + +VP8LumaFilterFunc VP8VFilter16; +VP8LumaFilterFunc VP8HFilter16; +VP8ChromaFilterFunc VP8VFilter8; +VP8ChromaFilterFunc VP8HFilter8; +VP8LumaFilterFunc VP8VFilter16i; +VP8LumaFilterFunc VP8HFilter16i; +VP8ChromaFilterFunc VP8VFilter8i; +VP8ChromaFilterFunc VP8HFilter8i; +VP8SimpleFilterFunc VP8SimpleVFilter16; +VP8SimpleFilterFunc VP8SimpleHFilter16; +VP8SimpleFilterFunc VP8SimpleVFilter16i; +VP8SimpleFilterFunc VP8SimpleHFilter16i; + +void (*VP8DitherCombine8x8)(const uint8_t* dither, uint8_t* dst, + int dst_stride); + +extern void VP8DspInitSSE2(void); +extern void VP8DspInitSSE41(void); +extern void VP8DspInitNEON(void); +extern void VP8DspInitMIPS32(void); +extern void VP8DspInitMIPSdspR2(void); +extern void VP8DspInitMSA(void); + +WEBP_DSP_INIT_FUNC(VP8DspInit) { + VP8InitClipTables(); + +#if !WEBP_NEON_OMIT_C_CODE + VP8TransformWHT = TransformWHT_C; + VP8Transform = TransformTwo_C; + VP8TransformDC = TransformDC_C; + VP8TransformAC3 = TransformAC3_C; +#endif + VP8TransformUV = TransformUV_C; + VP8TransformDCUV = TransformDCUV_C; + +#if !WEBP_NEON_OMIT_C_CODE + VP8VFilter16 = VFilter16_C; + VP8VFilter16i = VFilter16i_C; + VP8HFilter16 = HFilter16_C; + VP8VFilter8 = VFilter8_C; + VP8VFilter8i = VFilter8i_C; + VP8SimpleVFilter16 = SimpleVFilter16_C; + VP8SimpleHFilter16 = SimpleHFilter16_C; + VP8SimpleVFilter16i = SimpleVFilter16i_C; + VP8SimpleHFilter16i = SimpleHFilter16i_C; +#endif + +#if !WEBP_NEON_OMIT_C_CODE || WEBP_NEON_WORK_AROUND_GCC + VP8HFilter16i = HFilter16i_C; + VP8HFilter8 = HFilter8_C; + VP8HFilter8i = HFilter8i_C; +#endif + +#if !WEBP_NEON_OMIT_C_CODE + VP8PredLuma4[0] = DC4_C; + VP8PredLuma4[1] = TM4_C; + VP8PredLuma4[2] = VE4_C; + VP8PredLuma4[4] = RD4_C; + VP8PredLuma4[6] = LD4_C; +#endif + + VP8PredLuma4[3] = HE4_C; + VP8PredLuma4[5] = VR4_C; + VP8PredLuma4[7] = VL4_C; + VP8PredLuma4[8] = HD4_C; + VP8PredLuma4[9] = HU4_C; + +#if !WEBP_NEON_OMIT_C_CODE + VP8PredLuma16[0] = DC16_C; + VP8PredLuma16[1] = TM16_C; + VP8PredLuma16[2] = VE16_C; + VP8PredLuma16[3] = HE16_C; + VP8PredLuma16[4] = DC16NoTop_C; + VP8PredLuma16[5] = DC16NoLeft_C; + VP8PredLuma16[6] = DC16NoTopLeft_C; + + VP8PredChroma8[0] = DC8uv_C; + VP8PredChroma8[1] = TM8uv_C; + VP8PredChroma8[2] = VE8uv_C; + VP8PredChroma8[3] = HE8uv_C; + VP8PredChroma8[4] = DC8uvNoTop_C; + VP8PredChroma8[5] = DC8uvNoLeft_C; + VP8PredChroma8[6] = DC8uvNoTopLeft_C; +#endif + + VP8DitherCombine8x8 = DitherCombine8x8_C; + + // If defined, use CPUInfo() to overwrite some pointers with faster versions. + if (VP8GetCPUInfo != NULL) { +#if defined(WEBP_USE_SSE2) + if (VP8GetCPUInfo(kSSE2)) { + VP8DspInitSSE2(); +#if defined(WEBP_USE_SSE41) + if (VP8GetCPUInfo(kSSE4_1)) { + VP8DspInitSSE41(); + } +#endif + } +#endif +#if defined(WEBP_USE_MIPS32) + if (VP8GetCPUInfo(kMIPS32)) { + VP8DspInitMIPS32(); + } +#endif +#if defined(WEBP_USE_MIPS_DSP_R2) + if (VP8GetCPUInfo(kMIPSdspR2)) { + VP8DspInitMIPSdspR2(); + } +#endif +#if defined(WEBP_USE_MSA) + if (VP8GetCPUInfo(kMSA)) { + VP8DspInitMSA(); + } +#endif + } + +#if defined(WEBP_USE_NEON) + if (WEBP_NEON_OMIT_C_CODE || + (VP8GetCPUInfo != NULL && VP8GetCPUInfo(kNEON))) { + VP8DspInitNEON(); + } +#endif + + assert(VP8TransformWHT != NULL); + assert(VP8Transform != NULL); + assert(VP8TransformDC != NULL); + assert(VP8TransformAC3 != NULL); + assert(VP8TransformUV != NULL); + assert(VP8TransformDCUV != NULL); + assert(VP8VFilter16 != NULL); + assert(VP8HFilter16 != NULL); + assert(VP8VFilter8 != NULL); + assert(VP8HFilter8 != NULL); + assert(VP8VFilter16i != NULL); + assert(VP8HFilter16i != NULL); + assert(VP8VFilter8i != NULL); + assert(VP8HFilter8i != NULL); + assert(VP8SimpleVFilter16 != NULL); + assert(VP8SimpleHFilter16 != NULL); + assert(VP8SimpleVFilter16i != NULL); + assert(VP8SimpleHFilter16i != NULL); + assert(VP8PredLuma4[0] != NULL); + assert(VP8PredLuma4[1] != NULL); + assert(VP8PredLuma4[2] != NULL); + assert(VP8PredLuma4[3] != NULL); + assert(VP8PredLuma4[4] != NULL); + assert(VP8PredLuma4[5] != NULL); + assert(VP8PredLuma4[6] != NULL); + assert(VP8PredLuma4[7] != NULL); + assert(VP8PredLuma4[8] != NULL); + assert(VP8PredLuma4[9] != NULL); + assert(VP8PredLuma16[0] != NULL); + assert(VP8PredLuma16[1] != NULL); + assert(VP8PredLuma16[2] != NULL); + assert(VP8PredLuma16[3] != NULL); + assert(VP8PredLuma16[4] != NULL); + assert(VP8PredLuma16[5] != NULL); + assert(VP8PredLuma16[6] != NULL); + assert(VP8PredChroma8[0] != NULL); + assert(VP8PredChroma8[1] != NULL); + assert(VP8PredChroma8[2] != NULL); + assert(VP8PredChroma8[3] != NULL); + assert(VP8PredChroma8[4] != NULL); + assert(VP8PredChroma8[5] != NULL); + assert(VP8PredChroma8[6] != NULL); + assert(VP8DitherCombine8x8 != NULL); +} diff --git a/vendor/github.com/sizeofint/webpanimation/dsp_dec_clip_tables.c b/vendor/github.com/sizeofint/webpanimation/dsp_dec_clip_tables.c new file mode 100644 index 00000000..6b091f41 --- /dev/null +++ b/vendor/github.com/sizeofint/webpanimation/dsp_dec_clip_tables.c @@ -0,0 +1,369 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the COPYING file in the root of the source +// tree. An additional intellectual property rights grant can be found +// in the file PATENTS. All contributing project authors may +// be found in the AUTHORS file in the root of the source tree. +// ----------------------------------------------------------------------------- +// +// Clipping tables for filtering +// +// Author: Skal (pascal.massimino@gmail.com) + +#include "dsp_dsp.h" + +// define to 0 to have run-time table initialization +#if !defined(USE_STATIC_TABLES) +#define USE_STATIC_TABLES 1 // ALTERNATE_CODE +#endif + +#if (USE_STATIC_TABLES == 1) + +static const uint8_t abs0[255 + 255 + 1] = { + 0xff, 0xfe, 0xfd, 0xfc, 0xfb, 0xfa, 0xf9, 0xf8, 0xf7, 0xf6, 0xf5, 0xf4, + 0xf3, 0xf2, 0xf1, 0xf0, 0xef, 0xee, 0xed, 0xec, 0xeb, 0xea, 0xe9, 0xe8, + 0xe7, 0xe6, 0xe5, 0xe4, 0xe3, 0xe2, 0xe1, 0xe0, 0xdf, 0xde, 0xdd, 0xdc, + 0xdb, 0xda, 0xd9, 0xd8, 0xd7, 0xd6, 0xd5, 0xd4, 0xd3, 0xd2, 0xd1, 0xd0, + 0xcf, 0xce, 0xcd, 0xcc, 0xcb, 0xca, 0xc9, 0xc8, 0xc7, 0xc6, 0xc5, 0xc4, + 0xc3, 0xc2, 0xc1, 0xc0, 0xbf, 0xbe, 0xbd, 0xbc, 0xbb, 0xba, 0xb9, 0xb8, + 0xb7, 0xb6, 0xb5, 0xb4, 0xb3, 0xb2, 0xb1, 0xb0, 0xaf, 0xae, 0xad, 0xac, + 0xab, 0xaa, 0xa9, 0xa8, 0xa7, 0xa6, 0xa5, 0xa4, 0xa3, 0xa2, 0xa1, 0xa0, + 0x9f, 0x9e, 0x9d, 0x9c, 0x9b, 0x9a, 0x99, 0x98, 0x97, 0x96, 0x95, 0x94, + 0x93, 0x92, 0x91, 0x90, 0x8f, 0x8e, 0x8d, 0x8c, 0x8b, 0x8a, 0x89, 0x88, + 0x87, 0x86, 0x85, 0x84, 0x83, 0x82, 0x81, 0x80, 0x7f, 0x7e, 0x7d, 0x7c, + 0x7b, 0x7a, 0x79, 0x78, 0x77, 0x76, 0x75, 0x74, 0x73, 0x72, 0x71, 0x70, + 0x6f, 0x6e, 0x6d, 0x6c, 0x6b, 0x6a, 0x69, 0x68, 0x67, 0x66, 0x65, 0x64, + 0x63, 0x62, 0x61, 0x60, 0x5f, 0x5e, 0x5d, 0x5c, 0x5b, 0x5a, 0x59, 0x58, + 0x57, 0x56, 0x55, 0x54, 0x53, 0x52, 0x51, 0x50, 0x4f, 0x4e, 0x4d, 0x4c, + 0x4b, 0x4a, 0x49, 0x48, 0x47, 0x46, 0x45, 0x44, 0x43, 0x42, 0x41, 0x40, + 0x3f, 0x3e, 0x3d, 0x3c, 0x3b, 0x3a, 0x39, 0x38, 0x37, 0x36, 0x35, 0x34, + 0x33, 0x32, 0x31, 0x30, 0x2f, 0x2e, 0x2d, 0x2c, 0x2b, 0x2a, 0x29, 0x28, + 0x27, 0x26, 0x25, 0x24, 0x23, 0x22, 0x21, 0x20, 0x1f, 0x1e, 0x1d, 0x1c, + 0x1b, 0x1a, 0x19, 0x18, 0x17, 0x16, 0x15, 0x14, 0x13, 0x12, 0x11, 0x10, + 0x0f, 0x0e, 0x0d, 0x0c, 0x0b, 0x0a, 0x09, 0x08, 0x07, 0x06, 0x05, 0x04, + 0x03, 0x02, 0x01, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, + 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13, 0x14, + 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x20, + 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2a, 0x2b, 0x2c, + 0x2d, 0x2e, 0x2f, 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, + 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, 0x40, 0x41, 0x42, 0x43, 0x44, + 0x45, 0x46, 0x47, 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, 0x50, + 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5a, 0x5b, 0x5c, + 0x5d, 0x5e, 0x5f, 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, + 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, 0x70, 0x71, 0x72, 0x73, 0x74, + 0x75, 0x76, 0x77, 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, 0x80, + 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, 0x89, 0x8a, 0x8b, 0x8c, + 0x8d, 0x8e, 0x8f, 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98, + 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, 0xa0, 0xa1, 0xa2, 0xa3, 0xa4, + 0xa5, 0xa6, 0xa7, 0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf, 0xb0, + 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, 0xb8, 0xb9, 0xba, 0xbb, 0xbc, + 0xbd, 0xbe, 0xbf, 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, 0xc8, + 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, 0xd0, 0xd1, 0xd2, 0xd3, 0xd4, + 0xd5, 0xd6, 0xd7, 0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf, 0xe0, + 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, 0xe8, 0xe9, 0xea, 0xeb, 0xec, + 0xed, 0xee, 0xef, 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, 0xf8, + 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff +}; + +static const uint8_t sclip1[1020 + 1020 + 1] = { + 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, + 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, + 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, + 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, + 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, + 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, + 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, + 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, + 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, + 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, + 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, + 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, + 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, + 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, + 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, + 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, + 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, + 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, + 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, + 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, + 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, + 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, + 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, + 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, + 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, + 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, + 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, + 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, + 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, + 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, + 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, + 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, + 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, + 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, + 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, + 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, + 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, + 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, + 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, + 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, + 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, + 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, + 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, + 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, + 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, + 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, + 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, + 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, + 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, + 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, + 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, + 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, + 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, + 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, + 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, + 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, + 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, + 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, + 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, + 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, + 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, + 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, + 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, + 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, + 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, + 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, + 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, + 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, + 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, + 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, + 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, + 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, + 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, + 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, + 0x80, 0x80, 0x80, 0x80, 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, + 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, 0x90, 0x91, 0x92, 0x93, + 0x94, 0x95, 0x96, 0x97, 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, + 0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, 0xa8, 0xa9, 0xaa, 0xab, + 0xac, 0xad, 0xae, 0xaf, 0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, + 0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf, 0xc0, 0xc1, 0xc2, 0xc3, + 0xc4, 0xc5, 0xc6, 0xc7, 0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, + 0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, 0xd8, 0xd9, 0xda, 0xdb, + 0xdc, 0xdd, 0xde, 0xdf, 0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, + 0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, 0xf0, 0xf1, 0xf2, 0xf3, + 0xf4, 0xf5, 0xf6, 0xf7, 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff, + 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, + 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, + 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x20, 0x21, 0x22, 0x23, + 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, + 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3a, 0x3b, + 0x3c, 0x3d, 0x3e, 0x3f, 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, + 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, 0x50, 0x51, 0x52, 0x53, + 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, + 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6a, 0x6b, + 0x6c, 0x6d, 0x6e, 0x6f, 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, + 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, + 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, + 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, + 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, + 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, + 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, + 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, + 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, + 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, + 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, + 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, + 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, + 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, + 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, + 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, + 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, + 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, + 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, + 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, + 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, + 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, + 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, + 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, + 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, + 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, + 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, + 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, + 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, + 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, + 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, + 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, + 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, + 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, + 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, + 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, + 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, + 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, + 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, + 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, + 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, + 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, + 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, + 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, + 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, + 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, + 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, + 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, + 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, + 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, + 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, + 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, + 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, + 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, + 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, + 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, + 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, + 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, + 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, + 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, + 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, + 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, + 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, + 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, + 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, + 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, + 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, + 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, + 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, + 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, + 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, + 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, + 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, + 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, + 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, + 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f +}; + +static const uint8_t sclip2[112 + 112 + 1] = { + 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, + 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, + 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, + 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, + 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, + 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, + 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, + 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, + 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, 0xf8, 0xf9, 0xfa, 0xfb, + 0xfc, 0xfd, 0xfe, 0xff, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, + 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, + 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, + 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, + 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, + 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, + 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, + 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, + 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, + 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f +}; + +static const uint8_t clip1[255 + 511 + 1] = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, + 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13, 0x14, + 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x20, + 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2a, 0x2b, 0x2c, + 0x2d, 0x2e, 0x2f, 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, + 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, 0x40, 0x41, 0x42, 0x43, 0x44, + 0x45, 0x46, 0x47, 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, 0x50, + 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5a, 0x5b, 0x5c, + 0x5d, 0x5e, 0x5f, 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, + 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, 0x70, 0x71, 0x72, 0x73, 0x74, + 0x75, 0x76, 0x77, 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, 0x80, + 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, 0x89, 0x8a, 0x8b, 0x8c, + 0x8d, 0x8e, 0x8f, 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98, + 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, 0xa0, 0xa1, 0xa2, 0xa3, 0xa4, + 0xa5, 0xa6, 0xa7, 0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf, 0xb0, + 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, 0xb8, 0xb9, 0xba, 0xbb, 0xbc, + 0xbd, 0xbe, 0xbf, 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, 0xc8, + 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, 0xd0, 0xd1, 0xd2, 0xd3, 0xd4, + 0xd5, 0xd6, 0xd7, 0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf, 0xe0, + 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, 0xe8, 0xe9, 0xea, 0xeb, 0xec, + 0xed, 0xee, 0xef, 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, 0xf8, + 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff +}; + +#else + +// uninitialized tables +static uint8_t abs0[255 + 255 + 1]; +static int8_t sclip1[1020 + 1020 + 1]; +static int8_t sclip2[112 + 112 + 1]; +static uint8_t clip1[255 + 511 + 1]; + +// We declare this variable 'volatile' to prevent instruction reordering +// and make sure it's set to true _last_ (so as to be thread-safe) +static volatile int tables_ok = 0; + +#endif // USE_STATIC_TABLES + +const int8_t* const VP8ksclip1 = (const int8_t*)&sclip1[1020]; +const int8_t* const VP8ksclip2 = (const int8_t*)&sclip2[112]; +const uint8_t* const VP8kclip1 = &clip1[255]; +const uint8_t* const VP8kabs0 = &abs0[255]; + +WEBP_TSAN_IGNORE_FUNCTION void VP8InitClipTables(void) { +#if (USE_STATIC_TABLES == 0) + int i; + if (!tables_ok) { + for (i = -255; i <= 255; ++i) { + abs0[255 + i] = (i < 0) ? -i : i; + } + for (i = -1020; i <= 1020; ++i) { + sclip1[1020 + i] = (i < -128) ? -128 : (i > 127) ? 127 : i; + } + for (i = -112; i <= 112; ++i) { + sclip2[112 + i] = (i < -16) ? -16 : (i > 15) ? 15 : i; + } + for (i = -255; i <= 255 + 255; ++i) { + clip1[255 + i] = (i < 0) ? 0 : (i > 255) ? 255 : i; + } + tables_ok = 1; + } +#endif // USE_STATIC_TABLES +} diff --git a/vendor/github.com/sizeofint/webpanimation/dsp_dec_mips32.c b/vendor/github.com/sizeofint/webpanimation/dsp_dec_mips32.c new file mode 100644 index 00000000..baddbc8e --- /dev/null +++ b/vendor/github.com/sizeofint/webpanimation/dsp_dec_mips32.c @@ -0,0 +1,587 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the COPYING file in the root of the source +// tree. An additional intellectual property rights grant can be found +// in the file PATENTS. All contributing project authors may +// be found in the AUTHORS file in the root of the source tree. +// ----------------------------------------------------------------------------- +// +// MIPS version of dsp functions +// +// Author(s): Djordje Pesut (djordje.pesut@imgtec.com) +// Jovan Zelincevic (jovan.zelincevic@imgtec.com) + +#include "dsp_dsp.h" + +#if defined(WEBP_USE_MIPS32) + +#include "dsp_mips_macro.h" + +static const int kC1 = 20091 + (1 << 16); +static const int kC2 = 35468; + +static WEBP_INLINE int abs_mips32(int x) { + const int sign = x >> 31; + return (x ^ sign) - sign; +} + +// 4 pixels in, 2 pixels out +static WEBP_INLINE void do_filter2(uint8_t* p, int step) { + const int p1 = p[-2 * step], p0 = p[-step], q0 = p[0], q1 = p[step]; + const int a = 3 * (q0 - p0) + VP8ksclip1[p1 - q1]; + const int a1 = VP8ksclip2[(a + 4) >> 3]; + const int a2 = VP8ksclip2[(a + 3) >> 3]; + p[-step] = VP8kclip1[p0 + a2]; + p[ 0] = VP8kclip1[q0 - a1]; +} + +// 4 pixels in, 4 pixels out +static WEBP_INLINE void do_filter4(uint8_t* p, int step) { + const int p1 = p[-2 * step], p0 = p[-step], q0 = p[0], q1 = p[step]; + const int a = 3 * (q0 - p0); + const int a1 = VP8ksclip2[(a + 4) >> 3]; + const int a2 = VP8ksclip2[(a + 3) >> 3]; + const int a3 = (a1 + 1) >> 1; + p[-2 * step] = VP8kclip1[p1 + a3]; + p[- step] = VP8kclip1[p0 + a2]; + p[ 0] = VP8kclip1[q0 - a1]; + p[ step] = VP8kclip1[q1 - a3]; +} + +// 6 pixels in, 6 pixels out +static WEBP_INLINE void do_filter6(uint8_t* p, int step) { + const int p2 = p[-3 * step], p1 = p[-2 * step], p0 = p[-step]; + const int q0 = p[0], q1 = p[step], q2 = p[2 * step]; + const int a = VP8ksclip1[3 * (q0 - p0) + VP8ksclip1[p1 - q1]]; + // a is in [-128,127], a1 in [-27,27], a2 in [-18,18] and a3 in [-9,9] + const int a1 = (27 * a + 63) >> 7; // eq. to ((3 * a + 7) * 9) >> 7 + const int a2 = (18 * a + 63) >> 7; // eq. to ((2 * a + 7) * 9) >> 7 + const int a3 = (9 * a + 63) >> 7; // eq. to ((1 * a + 7) * 9) >> 7 + p[-3 * step] = VP8kclip1[p2 + a3]; + p[-2 * step] = VP8kclip1[p1 + a2]; + p[- step] = VP8kclip1[p0 + a1]; + p[ 0] = VP8kclip1[q0 - a1]; + p[ step] = VP8kclip1[q1 - a2]; + p[ 2 * step] = VP8kclip1[q2 - a3]; +} + +static WEBP_INLINE int hev(const uint8_t* p, int step, int thresh) { + const int p1 = p[-2 * step], p0 = p[-step], q0 = p[0], q1 = p[step]; + return (abs_mips32(p1 - p0) > thresh) || (abs_mips32(q1 - q0) > thresh); +} + +static WEBP_INLINE int needs_filter(const uint8_t* p, int step, int t) { + const int p1 = p[-2 * step], p0 = p[-step], q0 = p[0], q1 = p[step]; + return ((4 * abs_mips32(p0 - q0) + abs_mips32(p1 - q1)) <= t); +} + +static WEBP_INLINE int needs_filter2(const uint8_t* p, + int step, int t, int it) { + const int p3 = p[-4 * step], p2 = p[-3 * step]; + const int p1 = p[-2 * step], p0 = p[-step]; + const int q0 = p[0], q1 = p[step], q2 = p[2 * step], q3 = p[3 * step]; + if ((4 * abs_mips32(p0 - q0) + abs_mips32(p1 - q1)) > t) { + return 0; + } + return abs_mips32(p3 - p2) <= it && abs_mips32(p2 - p1) <= it && + abs_mips32(p1 - p0) <= it && abs_mips32(q3 - q2) <= it && + abs_mips32(q2 - q1) <= it && abs_mips32(q1 - q0) <= it; +} + +static WEBP_INLINE void FilterLoop26(uint8_t* p, + int hstride, int vstride, int size, + int thresh, int ithresh, int hev_thresh) { + const int thresh2 = 2 * thresh + 1; + while (size-- > 0) { + if (needs_filter2(p, hstride, thresh2, ithresh)) { + if (hev(p, hstride, hev_thresh)) { + do_filter2(p, hstride); + } else { + do_filter6(p, hstride); + } + } + p += vstride; + } +} + +static WEBP_INLINE void FilterLoop24(uint8_t* p, + int hstride, int vstride, int size, + int thresh, int ithresh, int hev_thresh) { + const int thresh2 = 2 * thresh + 1; + while (size-- > 0) { + if (needs_filter2(p, hstride, thresh2, ithresh)) { + if (hev(p, hstride, hev_thresh)) { + do_filter2(p, hstride); + } else { + do_filter4(p, hstride); + } + } + p += vstride; + } +} + +// on macroblock edges +static void VFilter16(uint8_t* p, int stride, + int thresh, int ithresh, int hev_thresh) { + FilterLoop26(p, stride, 1, 16, thresh, ithresh, hev_thresh); +} + +static void HFilter16(uint8_t* p, int stride, + int thresh, int ithresh, int hev_thresh) { + FilterLoop26(p, 1, stride, 16, thresh, ithresh, hev_thresh); +} + +// 8-pixels wide variant, for chroma filtering +static void VFilter8(uint8_t* u, uint8_t* v, int stride, + int thresh, int ithresh, int hev_thresh) { + FilterLoop26(u, stride, 1, 8, thresh, ithresh, hev_thresh); + FilterLoop26(v, stride, 1, 8, thresh, ithresh, hev_thresh); +} + +static void HFilter8(uint8_t* u, uint8_t* v, int stride, + int thresh, int ithresh, int hev_thresh) { + FilterLoop26(u, 1, stride, 8, thresh, ithresh, hev_thresh); + FilterLoop26(v, 1, stride, 8, thresh, ithresh, hev_thresh); +} + +static void VFilter8i(uint8_t* u, uint8_t* v, int stride, + int thresh, int ithresh, int hev_thresh) { + FilterLoop24(u + 4 * stride, stride, 1, 8, thresh, ithresh, hev_thresh); + FilterLoop24(v + 4 * stride, stride, 1, 8, thresh, ithresh, hev_thresh); +} + +static void HFilter8i(uint8_t* u, uint8_t* v, int stride, + int thresh, int ithresh, int hev_thresh) { + FilterLoop24(u + 4, 1, stride, 8, thresh, ithresh, hev_thresh); + FilterLoop24(v + 4, 1, stride, 8, thresh, ithresh, hev_thresh); +} + +// on three inner edges +static void VFilter16i(uint8_t* p, int stride, + int thresh, int ithresh, int hev_thresh) { + int k; + for (k = 3; k > 0; --k) { + p += 4 * stride; + FilterLoop24(p, stride, 1, 16, thresh, ithresh, hev_thresh); + } +} + +static void HFilter16i(uint8_t* p, int stride, + int thresh, int ithresh, int hev_thresh) { + int k; + for (k = 3; k > 0; --k) { + p += 4; + FilterLoop24(p, 1, stride, 16, thresh, ithresh, hev_thresh); + } +} + +//------------------------------------------------------------------------------ +// Simple In-loop filtering (Paragraph 15.2) + +static void SimpleVFilter16(uint8_t* p, int stride, int thresh) { + int i; + const int thresh2 = 2 * thresh + 1; + for (i = 0; i < 16; ++i) { + if (needs_filter(p + i, stride, thresh2)) { + do_filter2(p + i, stride); + } + } +} + +static void SimpleHFilter16(uint8_t* p, int stride, int thresh) { + int i; + const int thresh2 = 2 * thresh + 1; + for (i = 0; i < 16; ++i) { + if (needs_filter(p + i * stride, 1, thresh2)) { + do_filter2(p + i * stride, 1); + } + } +} + +static void SimpleVFilter16i(uint8_t* p, int stride, int thresh) { + int k; + for (k = 3; k > 0; --k) { + p += 4 * stride; + SimpleVFilter16(p, stride, thresh); + } +} + +static void SimpleHFilter16i(uint8_t* p, int stride, int thresh) { + int k; + for (k = 3; k > 0; --k) { + p += 4; + SimpleHFilter16(p, stride, thresh); + } +} + +static void TransformOne(const int16_t* in, uint8_t* dst) { + int temp0, temp1, temp2, temp3, temp4; + int temp5, temp6, temp7, temp8, temp9; + int temp10, temp11, temp12, temp13, temp14; + int temp15, temp16, temp17, temp18; + int16_t* p_in = (int16_t*)in; + + // loops unrolled and merged to avoid usage of tmp buffer + // and to reduce number of stalls. MUL macro is written + // in assembler and inlined + __asm__ volatile( + "lh %[temp0], 0(%[in]) \n\t" + "lh %[temp8], 16(%[in]) \n\t" + "lh %[temp4], 8(%[in]) \n\t" + "lh %[temp12], 24(%[in]) \n\t" + "addu %[temp16], %[temp0], %[temp8] \n\t" + "subu %[temp0], %[temp0], %[temp8] \n\t" + "mul %[temp8], %[temp4], %[kC2] \n\t" + "mul %[temp17], %[temp12], %[kC1] \n\t" + "mul %[temp4], %[temp4], %[kC1] \n\t" + "mul %[temp12], %[temp12], %[kC2] \n\t" + "lh %[temp1], 2(%[in]) \n\t" + "lh %[temp5], 10(%[in]) \n\t" + "lh %[temp9], 18(%[in]) \n\t" + "lh %[temp13], 26(%[in]) \n\t" + "sra %[temp8], %[temp8], 16 \n\t" + "sra %[temp17], %[temp17], 16 \n\t" + "sra %[temp4], %[temp4], 16 \n\t" + "sra %[temp12], %[temp12], 16 \n\t" + "lh %[temp2], 4(%[in]) \n\t" + "lh %[temp6], 12(%[in]) \n\t" + "lh %[temp10], 20(%[in]) \n\t" + "lh %[temp14], 28(%[in]) \n\t" + "subu %[temp17], %[temp8], %[temp17] \n\t" + "addu %[temp4], %[temp4], %[temp12] \n\t" + "addu %[temp8], %[temp16], %[temp4] \n\t" + "subu %[temp4], %[temp16], %[temp4] \n\t" + "addu %[temp16], %[temp1], %[temp9] \n\t" + "subu %[temp1], %[temp1], %[temp9] \n\t" + "lh %[temp3], 6(%[in]) \n\t" + "lh %[temp7], 14(%[in]) \n\t" + "lh %[temp11], 22(%[in]) \n\t" + "lh %[temp15], 30(%[in]) \n\t" + "addu %[temp12], %[temp0], %[temp17] \n\t" + "subu %[temp0], %[temp0], %[temp17] \n\t" + "mul %[temp9], %[temp5], %[kC2] \n\t" + "mul %[temp17], %[temp13], %[kC1] \n\t" + "mul %[temp5], %[temp5], %[kC1] \n\t" + "mul %[temp13], %[temp13], %[kC2] \n\t" + "sra %[temp9], %[temp9], 16 \n\t" + "sra %[temp17], %[temp17], 16 \n\t" + "subu %[temp17], %[temp9], %[temp17] \n\t" + "sra %[temp5], %[temp5], 16 \n\t" + "sra %[temp13], %[temp13], 16 \n\t" + "addu %[temp5], %[temp5], %[temp13] \n\t" + "addu %[temp13], %[temp1], %[temp17] \n\t" + "subu %[temp1], %[temp1], %[temp17] \n\t" + "mul %[temp17], %[temp14], %[kC1] \n\t" + "mul %[temp14], %[temp14], %[kC2] \n\t" + "addu %[temp9], %[temp16], %[temp5] \n\t" + "subu %[temp5], %[temp16], %[temp5] \n\t" + "addu %[temp16], %[temp2], %[temp10] \n\t" + "subu %[temp2], %[temp2], %[temp10] \n\t" + "mul %[temp10], %[temp6], %[kC2] \n\t" + "mul %[temp6], %[temp6], %[kC1] \n\t" + "sra %[temp17], %[temp17], 16 \n\t" + "sra %[temp14], %[temp14], 16 \n\t" + "sra %[temp10], %[temp10], 16 \n\t" + "sra %[temp6], %[temp6], 16 \n\t" + "subu %[temp17], %[temp10], %[temp17] \n\t" + "addu %[temp6], %[temp6], %[temp14] \n\t" + "addu %[temp10], %[temp16], %[temp6] \n\t" + "subu %[temp6], %[temp16], %[temp6] \n\t" + "addu %[temp14], %[temp2], %[temp17] \n\t" + "subu %[temp2], %[temp2], %[temp17] \n\t" + "mul %[temp17], %[temp15], %[kC1] \n\t" + "mul %[temp15], %[temp15], %[kC2] \n\t" + "addu %[temp16], %[temp3], %[temp11] \n\t" + "subu %[temp3], %[temp3], %[temp11] \n\t" + "mul %[temp11], %[temp7], %[kC2] \n\t" + "mul %[temp7], %[temp7], %[kC1] \n\t" + "addiu %[temp8], %[temp8], 4 \n\t" + "addiu %[temp12], %[temp12], 4 \n\t" + "addiu %[temp0], %[temp0], 4 \n\t" + "addiu %[temp4], %[temp4], 4 \n\t" + "sra %[temp17], %[temp17], 16 \n\t" + "sra %[temp15], %[temp15], 16 \n\t" + "sra %[temp11], %[temp11], 16 \n\t" + "sra %[temp7], %[temp7], 16 \n\t" + "subu %[temp17], %[temp11], %[temp17] \n\t" + "addu %[temp7], %[temp7], %[temp15] \n\t" + "addu %[temp15], %[temp3], %[temp17] \n\t" + "subu %[temp3], %[temp3], %[temp17] \n\t" + "addu %[temp11], %[temp16], %[temp7] \n\t" + "subu %[temp7], %[temp16], %[temp7] \n\t" + "addu %[temp16], %[temp8], %[temp10] \n\t" + "subu %[temp8], %[temp8], %[temp10] \n\t" + "mul %[temp10], %[temp9], %[kC2] \n\t" + "mul %[temp17], %[temp11], %[kC1] \n\t" + "mul %[temp9], %[temp9], %[kC1] \n\t" + "mul %[temp11], %[temp11], %[kC2] \n\t" + "sra %[temp10], %[temp10], 16 \n\t" + "sra %[temp17], %[temp17], 16 \n\t" + "sra %[temp9], %[temp9], 16 \n\t" + "sra %[temp11], %[temp11], 16 \n\t" + "subu %[temp17], %[temp10], %[temp17] \n\t" + "addu %[temp11], %[temp9], %[temp11] \n\t" + "addu %[temp10], %[temp12], %[temp14] \n\t" + "subu %[temp12], %[temp12], %[temp14] \n\t" + "mul %[temp14], %[temp13], %[kC2] \n\t" + "mul %[temp9], %[temp15], %[kC1] \n\t" + "mul %[temp13], %[temp13], %[kC1] \n\t" + "mul %[temp15], %[temp15], %[kC2] \n\t" + "sra %[temp14], %[temp14], 16 \n\t" + "sra %[temp9], %[temp9], 16 \n\t" + "sra %[temp13], %[temp13], 16 \n\t" + "sra %[temp15], %[temp15], 16 \n\t" + "subu %[temp9], %[temp14], %[temp9] \n\t" + "addu %[temp15], %[temp13], %[temp15] \n\t" + "addu %[temp14], %[temp0], %[temp2] \n\t" + "subu %[temp0], %[temp0], %[temp2] \n\t" + "mul %[temp2], %[temp1], %[kC2] \n\t" + "mul %[temp13], %[temp3], %[kC1] \n\t" + "mul %[temp1], %[temp1], %[kC1] \n\t" + "mul %[temp3], %[temp3], %[kC2] \n\t" + "sra %[temp2], %[temp2], 16 \n\t" + "sra %[temp13], %[temp13], 16 \n\t" + "sra %[temp1], %[temp1], 16 \n\t" + "sra %[temp3], %[temp3], 16 \n\t" + "subu %[temp13], %[temp2], %[temp13] \n\t" + "addu %[temp3], %[temp1], %[temp3] \n\t" + "addu %[temp2], %[temp4], %[temp6] \n\t" + "subu %[temp4], %[temp4], %[temp6] \n\t" + "mul %[temp6], %[temp5], %[kC2] \n\t" + "mul %[temp1], %[temp7], %[kC1] \n\t" + "mul %[temp5], %[temp5], %[kC1] \n\t" + "mul %[temp7], %[temp7], %[kC2] \n\t" + "sra %[temp6], %[temp6], 16 \n\t" + "sra %[temp1], %[temp1], 16 \n\t" + "sra %[temp5], %[temp5], 16 \n\t" + "sra %[temp7], %[temp7], 16 \n\t" + "subu %[temp1], %[temp6], %[temp1] \n\t" + "addu %[temp7], %[temp5], %[temp7] \n\t" + "addu %[temp5], %[temp16], %[temp11] \n\t" + "subu %[temp16], %[temp16], %[temp11] \n\t" + "addu %[temp11], %[temp8], %[temp17] \n\t" + "subu %[temp8], %[temp8], %[temp17] \n\t" + "sra %[temp5], %[temp5], 3 \n\t" + "sra %[temp16], %[temp16], 3 \n\t" + "sra %[temp11], %[temp11], 3 \n\t" + "sra %[temp8], %[temp8], 3 \n\t" + "addu %[temp17], %[temp10], %[temp15] \n\t" + "subu %[temp10], %[temp10], %[temp15] \n\t" + "addu %[temp15], %[temp12], %[temp9] \n\t" + "subu %[temp12], %[temp12], %[temp9] \n\t" + "sra %[temp17], %[temp17], 3 \n\t" + "sra %[temp10], %[temp10], 3 \n\t" + "sra %[temp15], %[temp15], 3 \n\t" + "sra %[temp12], %[temp12], 3 \n\t" + "addu %[temp9], %[temp14], %[temp3] \n\t" + "subu %[temp14], %[temp14], %[temp3] \n\t" + "addu %[temp3], %[temp0], %[temp13] \n\t" + "subu %[temp0], %[temp0], %[temp13] \n\t" + "sra %[temp9], %[temp9], 3 \n\t" + "sra %[temp14], %[temp14], 3 \n\t" + "sra %[temp3], %[temp3], 3 \n\t" + "sra %[temp0], %[temp0], 3 \n\t" + "addu %[temp13], %[temp2], %[temp7] \n\t" + "subu %[temp2], %[temp2], %[temp7] \n\t" + "addu %[temp7], %[temp4], %[temp1] \n\t" + "subu %[temp4], %[temp4], %[temp1] \n\t" + "sra %[temp13], %[temp13], 3 \n\t" + "sra %[temp2], %[temp2], 3 \n\t" + "sra %[temp7], %[temp7], 3 \n\t" + "sra %[temp4], %[temp4], 3 \n\t" + "addiu %[temp6], $zero, 255 \n\t" + "lbu %[temp1], 0+0*" XSTR(BPS) "(%[dst]) \n\t" + "addu %[temp1], %[temp1], %[temp5] \n\t" + "sra %[temp5], %[temp1], 8 \n\t" + "sra %[temp18], %[temp1], 31 \n\t" + "beqz %[temp5], 1f \n\t" + "xor %[temp1], %[temp1], %[temp1] \n\t" + "movz %[temp1], %[temp6], %[temp18] \n\t" + "1: \n\t" + "lbu %[temp18], 1+0*" XSTR(BPS) "(%[dst]) \n\t" + "sb %[temp1], 0+0*" XSTR(BPS) "(%[dst]) \n\t" + "addu %[temp18], %[temp18], %[temp11] \n\t" + "sra %[temp11], %[temp18], 8 \n\t" + "sra %[temp1], %[temp18], 31 \n\t" + "beqz %[temp11], 2f \n\t" + "xor %[temp18], %[temp18], %[temp18] \n\t" + "movz %[temp18], %[temp6], %[temp1] \n\t" + "2: \n\t" + "lbu %[temp1], 2+0*" XSTR(BPS) "(%[dst]) \n\t" + "sb %[temp18], 1+0*" XSTR(BPS) "(%[dst]) \n\t" + "addu %[temp1], %[temp1], %[temp8] \n\t" + "sra %[temp8], %[temp1], 8 \n\t" + "sra %[temp18], %[temp1], 31 \n\t" + "beqz %[temp8], 3f \n\t" + "xor %[temp1], %[temp1], %[temp1] \n\t" + "movz %[temp1], %[temp6], %[temp18] \n\t" + "3: \n\t" + "lbu %[temp18], 3+0*" XSTR(BPS) "(%[dst]) \n\t" + "sb %[temp1], 2+0*" XSTR(BPS) "(%[dst]) \n\t" + "addu %[temp18], %[temp18], %[temp16] \n\t" + "sra %[temp16], %[temp18], 8 \n\t" + "sra %[temp1], %[temp18], 31 \n\t" + "beqz %[temp16], 4f \n\t" + "xor %[temp18], %[temp18], %[temp18] \n\t" + "movz %[temp18], %[temp6], %[temp1] \n\t" + "4: \n\t" + "sb %[temp18], 3+0*" XSTR(BPS) "(%[dst]) \n\t" + "lbu %[temp5], 0+1*" XSTR(BPS) "(%[dst]) \n\t" + "lbu %[temp8], 1+1*" XSTR(BPS) "(%[dst]) \n\t" + "lbu %[temp11], 2+1*" XSTR(BPS) "(%[dst]) \n\t" + "lbu %[temp16], 3+1*" XSTR(BPS) "(%[dst]) \n\t" + "addu %[temp5], %[temp5], %[temp17] \n\t" + "addu %[temp8], %[temp8], %[temp15] \n\t" + "addu %[temp11], %[temp11], %[temp12] \n\t" + "addu %[temp16], %[temp16], %[temp10] \n\t" + "sra %[temp18], %[temp5], 8 \n\t" + "sra %[temp1], %[temp5], 31 \n\t" + "beqz %[temp18], 5f \n\t" + "xor %[temp5], %[temp5], %[temp5] \n\t" + "movz %[temp5], %[temp6], %[temp1] \n\t" + "5: \n\t" + "sra %[temp18], %[temp8], 8 \n\t" + "sra %[temp1], %[temp8], 31 \n\t" + "beqz %[temp18], 6f \n\t" + "xor %[temp8], %[temp8], %[temp8] \n\t" + "movz %[temp8], %[temp6], %[temp1] \n\t" + "6: \n\t" + "sra %[temp18], %[temp11], 8 \n\t" + "sra %[temp1], %[temp11], 31 \n\t" + "sra %[temp17], %[temp16], 8 \n\t" + "sra %[temp15], %[temp16], 31 \n\t" + "beqz %[temp18], 7f \n\t" + "xor %[temp11], %[temp11], %[temp11] \n\t" + "movz %[temp11], %[temp6], %[temp1] \n\t" + "7: \n\t" + "beqz %[temp17], 8f \n\t" + "xor %[temp16], %[temp16], %[temp16] \n\t" + "movz %[temp16], %[temp6], %[temp15] \n\t" + "8: \n\t" + "sb %[temp5], 0+1*" XSTR(BPS) "(%[dst]) \n\t" + "sb %[temp8], 1+1*" XSTR(BPS) "(%[dst]) \n\t" + "sb %[temp11], 2+1*" XSTR(BPS) "(%[dst]) \n\t" + "sb %[temp16], 3+1*" XSTR(BPS) "(%[dst]) \n\t" + "lbu %[temp5], 0+2*" XSTR(BPS) "(%[dst]) \n\t" + "lbu %[temp8], 1+2*" XSTR(BPS) "(%[dst]) \n\t" + "lbu %[temp11], 2+2*" XSTR(BPS) "(%[dst]) \n\t" + "lbu %[temp16], 3+2*" XSTR(BPS) "(%[dst]) \n\t" + "addu %[temp5], %[temp5], %[temp9] \n\t" + "addu %[temp8], %[temp8], %[temp3] \n\t" + "addu %[temp11], %[temp11], %[temp0] \n\t" + "addu %[temp16], %[temp16], %[temp14] \n\t" + "sra %[temp18], %[temp5], 8 \n\t" + "sra %[temp1], %[temp5], 31 \n\t" + "sra %[temp17], %[temp8], 8 \n\t" + "sra %[temp15], %[temp8], 31 \n\t" + "sra %[temp12], %[temp11], 8 \n\t" + "sra %[temp10], %[temp11], 31 \n\t" + "sra %[temp9], %[temp16], 8 \n\t" + "sra %[temp3], %[temp16], 31 \n\t" + "beqz %[temp18], 9f \n\t" + "xor %[temp5], %[temp5], %[temp5] \n\t" + "movz %[temp5], %[temp6], %[temp1] \n\t" + "9: \n\t" + "beqz %[temp17], 10f \n\t" + "xor %[temp8], %[temp8], %[temp8] \n\t" + "movz %[temp8], %[temp6], %[temp15] \n\t" + "10: \n\t" + "beqz %[temp12], 11f \n\t" + "xor %[temp11], %[temp11], %[temp11] \n\t" + "movz %[temp11], %[temp6], %[temp10] \n\t" + "11: \n\t" + "beqz %[temp9], 12f \n\t" + "xor %[temp16], %[temp16], %[temp16] \n\t" + "movz %[temp16], %[temp6], %[temp3] \n\t" + "12: \n\t" + "sb %[temp5], 0+2*" XSTR(BPS) "(%[dst]) \n\t" + "sb %[temp8], 1+2*" XSTR(BPS) "(%[dst]) \n\t" + "sb %[temp11], 2+2*" XSTR(BPS) "(%[dst]) \n\t" + "sb %[temp16], 3+2*" XSTR(BPS) "(%[dst]) \n\t" + "lbu %[temp5], 0+3*" XSTR(BPS) "(%[dst]) \n\t" + "lbu %[temp8], 1+3*" XSTR(BPS) "(%[dst]) \n\t" + "lbu %[temp11], 2+3*" XSTR(BPS) "(%[dst]) \n\t" + "lbu %[temp16], 3+3*" XSTR(BPS) "(%[dst]) \n\t" + "addu %[temp5], %[temp5], %[temp13] \n\t" + "addu %[temp8], %[temp8], %[temp7] \n\t" + "addu %[temp11], %[temp11], %[temp4] \n\t" + "addu %[temp16], %[temp16], %[temp2] \n\t" + "sra %[temp18], %[temp5], 8 \n\t" + "sra %[temp1], %[temp5], 31 \n\t" + "sra %[temp17], %[temp8], 8 \n\t" + "sra %[temp15], %[temp8], 31 \n\t" + "sra %[temp12], %[temp11], 8 \n\t" + "sra %[temp10], %[temp11], 31 \n\t" + "sra %[temp9], %[temp16], 8 \n\t" + "sra %[temp3], %[temp16], 31 \n\t" + "beqz %[temp18], 13f \n\t" + "xor %[temp5], %[temp5], %[temp5] \n\t" + "movz %[temp5], %[temp6], %[temp1] \n\t" + "13: \n\t" + "beqz %[temp17], 14f \n\t" + "xor %[temp8], %[temp8], %[temp8] \n\t" + "movz %[temp8], %[temp6], %[temp15] \n\t" + "14: \n\t" + "beqz %[temp12], 15f \n\t" + "xor %[temp11], %[temp11], %[temp11] \n\t" + "movz %[temp11], %[temp6], %[temp10] \n\t" + "15: \n\t" + "beqz %[temp9], 16f \n\t" + "xor %[temp16], %[temp16], %[temp16] \n\t" + "movz %[temp16], %[temp6], %[temp3] \n\t" + "16: \n\t" + "sb %[temp5], 0+3*" XSTR(BPS) "(%[dst]) \n\t" + "sb %[temp8], 1+3*" XSTR(BPS) "(%[dst]) \n\t" + "sb %[temp11], 2+3*" XSTR(BPS) "(%[dst]) \n\t" + "sb %[temp16], 3+3*" XSTR(BPS) "(%[dst]) \n\t" + + : [temp0]"=&r"(temp0), [temp1]"=&r"(temp1), [temp2]"=&r"(temp2), + [temp3]"=&r"(temp3), [temp4]"=&r"(temp4), [temp5]"=&r"(temp5), + [temp6]"=&r"(temp6), [temp7]"=&r"(temp7), [temp8]"=&r"(temp8), + [temp9]"=&r"(temp9), [temp10]"=&r"(temp10), [temp11]"=&r"(temp11), + [temp12]"=&r"(temp12), [temp13]"=&r"(temp13), [temp14]"=&r"(temp14), + [temp15]"=&r"(temp15), [temp16]"=&r"(temp16), [temp17]"=&r"(temp17), + [temp18]"=&r"(temp18) + : [in]"r"(p_in), [kC1]"r"(kC1), [kC2]"r"(kC2), [dst]"r"(dst) + : "memory", "hi", "lo" + ); +} + +static void TransformTwo(const int16_t* in, uint8_t* dst, int do_two) { + TransformOne(in, dst); + if (do_two) { + TransformOne(in + 16, dst + 4); + } +} + +//------------------------------------------------------------------------------ +// Entry point + +extern void VP8DspInitMIPS32(void); + +WEBP_TSAN_IGNORE_FUNCTION void VP8DspInitMIPS32(void) { + VP8InitClipTables(); + + VP8Transform = TransformTwo; + + VP8VFilter16 = VFilter16; + VP8HFilter16 = HFilter16; + VP8VFilter8 = VFilter8; + VP8HFilter8 = HFilter8; + VP8VFilter16i = VFilter16i; + VP8HFilter16i = HFilter16i; + VP8VFilter8i = VFilter8i; + VP8HFilter8i = HFilter8i; + + VP8SimpleVFilter16 = SimpleVFilter16; + VP8SimpleHFilter16 = SimpleHFilter16; + VP8SimpleVFilter16i = SimpleVFilter16i; + VP8SimpleHFilter16i = SimpleHFilter16i; +} + +#else // !WEBP_USE_MIPS32 + +WEBP_DSP_INIT_STUB(VP8DspInitMIPS32) + +#endif // WEBP_USE_MIPS32 diff --git a/vendor/github.com/sizeofint/webpanimation/dsp_dec_mips_dsp_r2.c b/vendor/github.com/sizeofint/webpanimation/dsp_dec_mips_dsp_r2.c new file mode 100644 index 00000000..498b61de --- /dev/null +++ b/vendor/github.com/sizeofint/webpanimation/dsp_dec_mips_dsp_r2.c @@ -0,0 +1,994 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the COPYING file in the root of the source +// tree. An additional intellectual property rights grant can be found +// in the file PATENTS. All contributing project authors may +// be found in the AUTHORS file in the root of the source tree. +// ----------------------------------------------------------------------------- +// +// MIPS version of dsp functions +// +// Author(s): Djordje Pesut (djordje.pesut@imgtec.com) +// Jovan Zelincevic (jovan.zelincevic@imgtec.com) + +#include "dsp_dsp.h" + +#if defined(WEBP_USE_MIPS_DSP_R2) + +#include "dsp_mips_macro.h" + +static const int kC1 = 20091 + (1 << 16); +static const int kC2 = 35468; + +#define MUL(a, b) (((a) * (b)) >> 16) + +static void TransformDC(const int16_t* in, uint8_t* dst) { + int temp1, temp2, temp3, temp4, temp5, temp6, temp7, temp8, temp9, temp10; + + __asm__ volatile ( + LOAD_WITH_OFFSET_X4(temp1, temp2, temp3, temp4, dst, + 0, 0, 0, 0, + 0, 1, 2, 3, + BPS) + "lh %[temp5], 0(%[in]) \n\t" + "addiu %[temp5], %[temp5], 4 \n\t" + "ins %[temp5], %[temp5], 16, 16 \n\t" + "shra.ph %[temp5], %[temp5], 3 \n\t" + CONVERT_2_BYTES_TO_HALF(temp6, temp7, temp8, temp9, temp10, temp1, temp2, + temp3, temp1, temp2, temp3, temp4) + STORE_SAT_SUM_X2(temp6, temp7, temp8, temp9, temp10, temp1, temp2, temp3, + temp5, temp5, temp5, temp5, temp5, temp5, temp5, temp5, + dst, 0, 1, 2, 3, BPS) + + OUTPUT_EARLY_CLOBBER_REGS_10() + : [in]"r"(in), [dst]"r"(dst) + : "memory" + ); +} + +static void TransformAC3(const int16_t* in, uint8_t* dst) { + const int a = in[0] + 4; + int c4 = MUL(in[4], kC2); + const int d4 = MUL(in[4], kC1); + const int c1 = MUL(in[1], kC2); + const int d1 = MUL(in[1], kC1); + int temp1, temp2, temp3, temp4, temp5, temp6, temp7, temp8, temp9; + int temp10, temp11, temp12, temp13, temp14, temp15, temp16, temp17, temp18; + + __asm__ volatile ( + "ins %[c4], %[d4], 16, 16 \n\t" + "replv.ph %[temp1], %[a] \n\t" + "replv.ph %[temp4], %[d1] \n\t" + ADD_SUB_HALVES(temp2, temp3, temp1, c4) + "replv.ph %[temp5], %[c1] \n\t" + SHIFT_R_SUM_X2(temp1, temp6, temp7, temp8, temp2, temp9, temp10, temp4, + temp2, temp2, temp3, temp3, temp4, temp5, temp4, temp5) + LOAD_WITH_OFFSET_X4(temp3, temp5, temp11, temp12, dst, + 0, 0, 0, 0, + 0, 1, 2, 3, + BPS) + CONVERT_2_BYTES_TO_HALF(temp13, temp14, temp3, temp15, temp5, temp16, + temp11, temp17, temp3, temp5, temp11, temp12) + PACK_2_HALVES_TO_WORD(temp12, temp18, temp7, temp6, temp1, temp8, temp2, + temp4, temp7, temp6, temp10, temp9) + STORE_SAT_SUM_X2(temp13, temp14, temp3, temp15, temp5, temp16, temp11, + temp17, temp12, temp18, temp1, temp8, temp2, temp4, + temp7, temp6, dst, 0, 1, 2, 3, BPS) + + OUTPUT_EARLY_CLOBBER_REGS_18(), + [c4]"+&r"(c4) + : [dst]"r"(dst), [a]"r"(a), [d1]"r"(d1), [d4]"r"(d4), [c1]"r"(c1) + : "memory" + ); +} + +static void TransformOne(const int16_t* in, uint8_t* dst) { + int temp1, temp2, temp3, temp4, temp5, temp6, temp7, temp8, temp9; + int temp10, temp11, temp12, temp13, temp14, temp15, temp16, temp17, temp18; + + __asm__ volatile ( + "ulw %[temp1], 0(%[in]) \n\t" + "ulw %[temp2], 16(%[in]) \n\t" + LOAD_IN_X2(temp5, temp6, 24, 26) + ADD_SUB_HALVES(temp3, temp4, temp1, temp2) + LOAD_IN_X2(temp1, temp2, 8, 10) + MUL_SHIFT_SUM(temp7, temp8, temp9, temp10, temp11, temp12, temp13, temp14, + temp10, temp8, temp9, temp7, temp1, temp2, temp5, temp6, + temp13, temp11, temp14, temp12) + INSERT_HALF_X2(temp8, temp7, temp10, temp9) + "ulw %[temp17], 4(%[in]) \n\t" + "ulw %[temp18], 20(%[in]) \n\t" + ADD_SUB_HALVES(temp1, temp2, temp3, temp8) + ADD_SUB_HALVES(temp5, temp6, temp4, temp7) + ADD_SUB_HALVES(temp7, temp8, temp17, temp18) + LOAD_IN_X2(temp17, temp18, 12, 14) + LOAD_IN_X2(temp9, temp10, 28, 30) + MUL_SHIFT_SUM(temp11, temp12, temp13, temp14, temp15, temp16, temp4, temp17, + temp12, temp14, temp11, temp13, temp17, temp18, temp9, temp10, + temp15, temp4, temp16, temp17) + INSERT_HALF_X2(temp11, temp12, temp13, temp14) + ADD_SUB_HALVES(temp17, temp8, temp8, temp11) + ADD_SUB_HALVES(temp3, temp4, temp7, temp12) + + // horizontal + SRA_16(temp9, temp10, temp11, temp12, temp1, temp2, temp5, temp6) + INSERT_HALF_X2(temp1, temp6, temp5, temp2) + SRA_16(temp13, temp14, temp15, temp16, temp3, temp4, temp17, temp8) + "repl.ph %[temp2], 0x4 \n\t" + INSERT_HALF_X2(temp3, temp8, temp17, temp4) + "addq.ph %[temp1], %[temp1], %[temp2] \n\t" + "addq.ph %[temp6], %[temp6], %[temp2] \n\t" + ADD_SUB_HALVES(temp2, temp4, temp1, temp3) + ADD_SUB_HALVES(temp5, temp7, temp6, temp8) + MUL_SHIFT_SUM(temp1, temp3, temp6, temp8, temp9, temp13, temp17, temp18, + temp3, temp13, temp1, temp9, temp9, temp13, temp11, temp15, + temp6, temp17, temp8, temp18) + MUL_SHIFT_SUM(temp6, temp8, temp18, temp17, temp11, temp15, temp12, temp16, + temp8, temp15, temp6, temp11, temp12, temp16, temp10, temp14, + temp18, temp12, temp17, temp16) + INSERT_HALF_X2(temp1, temp3, temp9, temp13) + INSERT_HALF_X2(temp6, temp8, temp11, temp15) + SHIFT_R_SUM_X2(temp9, temp10, temp11, temp12, temp13, temp14, temp15, + temp16, temp2, temp4, temp5, temp7, temp3, temp1, temp8, + temp6) + PACK_2_HALVES_TO_WORD(temp1, temp2, temp3, temp4, temp9, temp12, temp13, + temp16, temp11, temp10, temp15, temp14) + LOAD_WITH_OFFSET_X4(temp10, temp11, temp14, temp15, dst, + 0, 0, 0, 0, + 0, 1, 2, 3, + BPS) + CONVERT_2_BYTES_TO_HALF(temp5, temp6, temp7, temp8, temp17, temp18, temp10, + temp11, temp10, temp11, temp14, temp15) + STORE_SAT_SUM_X2(temp5, temp6, temp7, temp8, temp17, temp18, temp10, temp11, + temp9, temp12, temp1, temp2, temp13, temp16, temp3, temp4, + dst, 0, 1, 2, 3, BPS) + + OUTPUT_EARLY_CLOBBER_REGS_18() + : [dst]"r"(dst), [in]"r"(in), [kC1]"r"(kC1), [kC2]"r"(kC2) + : "memory", "hi", "lo" + ); +} + +static void TransformTwo(const int16_t* in, uint8_t* dst, int do_two) { + TransformOne(in, dst); + if (do_two) { + TransformOne(in + 16, dst + 4); + } +} + +static WEBP_INLINE void FilterLoop26(uint8_t* p, + int hstride, int vstride, int size, + int thresh, int ithresh, int hev_thresh) { + const int thresh2 = 2 * thresh + 1; + int temp1, temp2, temp3, temp4, temp5, temp6, temp7, temp8, temp9; + int temp10, temp11, temp12, temp13, temp14, temp15; + + __asm__ volatile ( + ".set push \n\t" + ".set noreorder \n\t" + "1: \n\t" + "negu %[temp1], %[hstride] \n\t" + "addiu %[size], %[size], -1 \n\t" + "sll %[temp2], %[hstride], 1 \n\t" + "sll %[temp3], %[temp1], 1 \n\t" + "addu %[temp4], %[temp2], %[hstride] \n\t" + "addu %[temp5], %[temp3], %[temp1] \n\t" + "lbu %[temp7], 0(%[p]) \n\t" + "sll %[temp6], %[temp3], 1 \n\t" + "lbux %[temp8], %[temp5](%[p]) \n\t" + "lbux %[temp9], %[temp3](%[p]) \n\t" + "lbux %[temp10], %[temp1](%[p]) \n\t" + "lbux %[temp11], %[temp6](%[p]) \n\t" + "lbux %[temp12], %[hstride](%[p]) \n\t" + "lbux %[temp13], %[temp2](%[p]) \n\t" + "lbux %[temp14], %[temp4](%[p]) \n\t" + "subu %[temp1], %[temp10], %[temp7] \n\t" + "subu %[temp2], %[temp9], %[temp12] \n\t" + "absq_s.w %[temp3], %[temp1] \n\t" + "absq_s.w %[temp4], %[temp2] \n\t" + "negu %[temp1], %[temp1] \n\t" + "sll %[temp3], %[temp3], 2 \n\t" + "addu %[temp15], %[temp3], %[temp4] \n\t" + "subu %[temp3], %[temp15], %[thresh2] \n\t" + "sll %[temp6], %[temp1], 1 \n\t" + "bgtz %[temp3], 3f \n\t" + " subu %[temp4], %[temp11], %[temp8] \n\t" + "absq_s.w %[temp4], %[temp4] \n\t" + "shll_s.w %[temp2], %[temp2], 24 \n\t" + "subu %[temp4], %[temp4], %[ithresh] \n\t" + "bgtz %[temp4], 3f \n\t" + " subu %[temp3], %[temp8], %[temp9] \n\t" + "absq_s.w %[temp3], %[temp3] \n\t" + "subu %[temp3], %[temp3], %[ithresh] \n\t" + "bgtz %[temp3], 3f \n\t" + " subu %[temp5], %[temp9], %[temp10] \n\t" + "absq_s.w %[temp3], %[temp5] \n\t" + "absq_s.w %[temp5], %[temp5] \n\t" + "subu %[temp3], %[temp3], %[ithresh] \n\t" + "bgtz %[temp3], 3f \n\t" + " subu %[temp3], %[temp14], %[temp13] \n\t" + "absq_s.w %[temp3], %[temp3] \n\t" + "slt %[temp5], %[hev_thresh], %[temp5] \n\t" + "subu %[temp3], %[temp3], %[ithresh] \n\t" + "bgtz %[temp3], 3f \n\t" + " subu %[temp3], %[temp13], %[temp12] \n\t" + "absq_s.w %[temp3], %[temp3] \n\t" + "sra %[temp4], %[temp2], 24 \n\t" + "subu %[temp3], %[temp3], %[ithresh] \n\t" + "bgtz %[temp3], 3f \n\t" + " subu %[temp15], %[temp12], %[temp7] \n\t" + "absq_s.w %[temp3], %[temp15] \n\t" + "absq_s.w %[temp15], %[temp15] \n\t" + "subu %[temp3], %[temp3], %[ithresh] \n\t" + "bgtz %[temp3], 3f \n\t" + " slt %[temp15], %[hev_thresh], %[temp15] \n\t" + "addu %[temp3], %[temp6], %[temp1] \n\t" + "or %[temp2], %[temp5], %[temp15] \n\t" + "addu %[temp5], %[temp4], %[temp3] \n\t" + "beqz %[temp2], 4f \n\t" + " shra_r.w %[temp1], %[temp5], 3 \n\t" + "addiu %[temp2], %[temp5], 3 \n\t" + "sra %[temp2], %[temp2], 3 \n\t" + "shll_s.w %[temp1], %[temp1], 27 \n\t" + "shll_s.w %[temp2], %[temp2], 27 \n\t" + "subu %[temp3], %[p], %[hstride] \n\t" + "sra %[temp1], %[temp1], 27 \n\t" + "sra %[temp2], %[temp2], 27 \n\t" + "subu %[temp1], %[temp7], %[temp1] \n\t" + "addu %[temp2], %[temp10], %[temp2] \n\t" + "lbux %[temp2], %[temp2](%[VP8kclip1]) \n\t" + "lbux %[temp1], %[temp1](%[VP8kclip1]) \n\t" + "sb %[temp2], 0(%[temp3]) \n\t" + "j 3f \n\t" + " sb %[temp1], 0(%[p]) \n\t" + "4: \n\t" + "shll_s.w %[temp5], %[temp5], 24 \n\t" + "subu %[temp14], %[p], %[hstride] \n\t" + "subu %[temp11], %[temp14], %[hstride] \n\t" + "sra %[temp6], %[temp5], 24 \n\t" + "sll %[temp1], %[temp6], 3 \n\t" + "subu %[temp15], %[temp11], %[hstride] \n\t" + "addu %[temp2], %[temp6], %[temp1] \n\t" + "sll %[temp3], %[temp2], 1 \n\t" + "addu %[temp4], %[temp3], %[temp2] \n\t" + "addiu %[temp2], %[temp2], 63 \n\t" + "addiu %[temp3], %[temp3], 63 \n\t" + "addiu %[temp4], %[temp4], 63 \n\t" + "sra %[temp2], %[temp2], 7 \n\t" + "sra %[temp3], %[temp3], 7 \n\t" + "sra %[temp4], %[temp4], 7 \n\t" + "addu %[temp1], %[temp8], %[temp2] \n\t" + "addu %[temp5], %[temp9], %[temp3] \n\t" + "addu %[temp6], %[temp10], %[temp4] \n\t" + "subu %[temp8], %[temp7], %[temp4] \n\t" + "subu %[temp7], %[temp12], %[temp3] \n\t" + "addu %[temp10], %[p], %[hstride] \n\t" + "subu %[temp9], %[temp13], %[temp2] \n\t" + "addu %[temp12], %[temp10], %[hstride] \n\t" + "lbux %[temp2], %[temp1](%[VP8kclip1]) \n\t" + "lbux %[temp3], %[temp5](%[VP8kclip1]) \n\t" + "lbux %[temp4], %[temp6](%[VP8kclip1]) \n\t" + "lbux %[temp5], %[temp8](%[VP8kclip1]) \n\t" + "lbux %[temp6], %[temp7](%[VP8kclip1]) \n\t" + "lbux %[temp8], %[temp9](%[VP8kclip1]) \n\t" + "sb %[temp2], 0(%[temp15]) \n\t" + "sb %[temp3], 0(%[temp11]) \n\t" + "sb %[temp4], 0(%[temp14]) \n\t" + "sb %[temp5], 0(%[p]) \n\t" + "sb %[temp6], 0(%[temp10]) \n\t" + "sb %[temp8], 0(%[temp12]) \n\t" + "3: \n\t" + "bgtz %[size], 1b \n\t" + " addu %[p], %[p], %[vstride] \n\t" + ".set pop \n\t" + : [temp1]"=&r"(temp1), [temp2]"=&r"(temp2),[temp3]"=&r"(temp3), + [temp4]"=&r"(temp4), [temp5]"=&r"(temp5), [temp6]"=&r"(temp6), + [temp7]"=&r"(temp7),[temp8]"=&r"(temp8),[temp9]"=&r"(temp9), + [temp10]"=&r"(temp10),[temp11]"=&r"(temp11),[temp12]"=&r"(temp12), + [temp13]"=&r"(temp13),[temp14]"=&r"(temp14),[temp15]"=&r"(temp15), + [size]"+&r"(size), [p]"+&r"(p) + : [hstride]"r"(hstride), [thresh2]"r"(thresh2), + [ithresh]"r"(ithresh),[vstride]"r"(vstride), [hev_thresh]"r"(hev_thresh), + [VP8kclip1]"r"(VP8kclip1) + : "memory" + ); +} + +static WEBP_INLINE void FilterLoop24(uint8_t* p, + int hstride, int vstride, int size, + int thresh, int ithresh, int hev_thresh) { + int p0, q0, p1, q1, p2, q2, p3, q3; + int step1, step2, temp1, temp2, temp3, temp4; + uint8_t* pTemp0; + uint8_t* pTemp1; + const int thresh2 = 2 * thresh + 1; + + __asm__ volatile ( + ".set push \n\t" + ".set noreorder \n\t" + "bltz %[size], 3f \n\t" + " nop \n\t" + "2: \n\t" + "negu %[step1], %[hstride] \n\t" + "lbu %[q0], 0(%[p]) \n\t" + "lbux %[p0], %[step1](%[p]) \n\t" + "subu %[step1], %[step1], %[hstride] \n\t" + "lbux %[q1], %[hstride](%[p]) \n\t" + "subu %[temp1], %[p0], %[q0] \n\t" + "lbux %[p1], %[step1](%[p]) \n\t" + "addu %[step2], %[hstride], %[hstride] \n\t" + "absq_s.w %[temp2], %[temp1] \n\t" + "subu %[temp3], %[p1], %[q1] \n\t" + "absq_s.w %[temp4], %[temp3] \n\t" + "sll %[temp2], %[temp2], 2 \n\t" + "addu %[temp2], %[temp2], %[temp4] \n\t" + "subu %[temp4], %[temp2], %[thresh2] \n\t" + "subu %[step1], %[step1], %[hstride] \n\t" + "bgtz %[temp4], 0f \n\t" + " lbux %[p2], %[step1](%[p]) \n\t" + "subu %[step1], %[step1], %[hstride] \n\t" + "lbux %[q2], %[step2](%[p]) \n\t" + "lbux %[p3], %[step1](%[p]) \n\t" + "subu %[temp4], %[p2], %[p1] \n\t" + "addu %[step2], %[step2], %[hstride] \n\t" + "subu %[temp2], %[p3], %[p2] \n\t" + "absq_s.w %[temp4], %[temp4] \n\t" + "absq_s.w %[temp2], %[temp2] \n\t" + "lbux %[q3], %[step2](%[p]) \n\t" + "subu %[temp4], %[temp4], %[ithresh] \n\t" + "negu %[temp1], %[temp1] \n\t" + "bgtz %[temp4], 0f \n\t" + " subu %[temp2], %[temp2], %[ithresh] \n\t" + "subu %[p3], %[p1], %[p0] \n\t" + "bgtz %[temp2], 0f \n\t" + " absq_s.w %[p3], %[p3] \n\t" + "subu %[temp4], %[q3], %[q2] \n\t" + "subu %[pTemp0], %[p], %[hstride] \n\t" + "absq_s.w %[temp4], %[temp4] \n\t" + "subu %[temp2], %[p3], %[ithresh] \n\t" + "sll %[step1], %[temp1], 1 \n\t" + "bgtz %[temp2], 0f \n\t" + " subu %[temp4], %[temp4], %[ithresh] \n\t" + "subu %[temp2], %[q2], %[q1] \n\t" + "bgtz %[temp4], 0f \n\t" + " absq_s.w %[temp2], %[temp2] \n\t" + "subu %[q3], %[q1], %[q0] \n\t" + "absq_s.w %[q3], %[q3] \n\t" + "subu %[temp2], %[temp2], %[ithresh] \n\t" + "addu %[temp1], %[temp1], %[step1] \n\t" + "bgtz %[temp2], 0f \n\t" + " subu %[temp4], %[q3], %[ithresh] \n\t" + "slt %[p3], %[hev_thresh], %[p3] \n\t" + "bgtz %[temp4], 0f \n\t" + " slt %[q3], %[hev_thresh], %[q3] \n\t" + "or %[q3], %[q3], %[p3] \n\t" + "bgtz %[q3], 1f \n\t" + " shra_r.w %[temp2], %[temp1], 3 \n\t" + "addiu %[temp1], %[temp1], 3 \n\t" + "sra %[temp1], %[temp1], 3 \n\t" + "shll_s.w %[temp2], %[temp2], 27 \n\t" + "shll_s.w %[temp1], %[temp1], 27 \n\t" + "addu %[pTemp1], %[p], %[hstride] \n\t" + "sra %[temp2], %[temp2], 27 \n\t" + "sra %[temp1], %[temp1], 27 \n\t" + "addiu %[step1], %[temp2], 1 \n\t" + "sra %[step1], %[step1], 1 \n\t" + "addu %[p0], %[p0], %[temp1] \n\t" + "addu %[p1], %[p1], %[step1] \n\t" + "subu %[q0], %[q0], %[temp2] \n\t" + "subu %[q1], %[q1], %[step1] \n\t" + "lbux %[temp2], %[p0](%[VP8kclip1]) \n\t" + "lbux %[temp3], %[q0](%[VP8kclip1]) \n\t" + "lbux %[temp4], %[q1](%[VP8kclip1]) \n\t" + "sb %[temp2], 0(%[pTemp0]) \n\t" + "lbux %[temp1], %[p1](%[VP8kclip1]) \n\t" + "subu %[pTemp0], %[pTemp0], %[hstride] \n\t" + "sb %[temp3], 0(%[p]) \n\t" + "sb %[temp4], 0(%[pTemp1]) \n\t" + "j 0f \n\t" + " sb %[temp1], 0(%[pTemp0]) \n\t" + "1: \n\t" + "shll_s.w %[temp3], %[temp3], 24 \n\t" + "sra %[temp3], %[temp3], 24 \n\t" + "addu %[temp1], %[temp1], %[temp3] \n\t" + "shra_r.w %[temp2], %[temp1], 3 \n\t" + "addiu %[temp1], %[temp1], 3 \n\t" + "shll_s.w %[temp2], %[temp2], 27 \n\t" + "sra %[temp1], %[temp1], 3 \n\t" + "shll_s.w %[temp1], %[temp1], 27 \n\t" + "sra %[temp2], %[temp2], 27 \n\t" + "sra %[temp1], %[temp1], 27 \n\t" + "addu %[p0], %[p0], %[temp1] \n\t" + "subu %[q0], %[q0], %[temp2] \n\t" + "lbux %[temp1], %[p0](%[VP8kclip1]) \n\t" + "lbux %[temp2], %[q0](%[VP8kclip1]) \n\t" + "sb %[temp2], 0(%[p]) \n\t" + "sb %[temp1], 0(%[pTemp0]) \n\t" + "0: \n\t" + "subu %[size], %[size], 1 \n\t" + "bgtz %[size], 2b \n\t" + " addu %[p], %[p], %[vstride] \n\t" + "3: \n\t" + ".set pop \n\t" + : [p0]"=&r"(p0), [q0]"=&r"(q0), [p1]"=&r"(p1), [q1]"=&r"(q1), + [p2]"=&r"(p2), [q2]"=&r"(q2), [p3]"=&r"(p3), [q3]"=&r"(q3), + [step2]"=&r"(step2), [step1]"=&r"(step1), [temp1]"=&r"(temp1), + [temp2]"=&r"(temp2), [temp3]"=&r"(temp3), [temp4]"=&r"(temp4), + [pTemp0]"=&r"(pTemp0), [pTemp1]"=&r"(pTemp1), [p]"+&r"(p), + [size]"+&r"(size) + : [vstride]"r"(vstride), [ithresh]"r"(ithresh), + [hev_thresh]"r"(hev_thresh), [hstride]"r"(hstride), + [VP8kclip1]"r"(VP8kclip1), [thresh2]"r"(thresh2) + : "memory" + ); +} + +// on macroblock edges +static void VFilter16(uint8_t* p, int stride, + int thresh, int ithresh, int hev_thresh) { + FilterLoop26(p, stride, 1, 16, thresh, ithresh, hev_thresh); +} + +static void HFilter16(uint8_t* p, int stride, + int thresh, int ithresh, int hev_thresh) { + FilterLoop26(p, 1, stride, 16, thresh, ithresh, hev_thresh); +} + +// 8-pixels wide variant, for chroma filtering +static void VFilter8(uint8_t* u, uint8_t* v, int stride, + int thresh, int ithresh, int hev_thresh) { + FilterLoop26(u, stride, 1, 8, thresh, ithresh, hev_thresh); + FilterLoop26(v, stride, 1, 8, thresh, ithresh, hev_thresh); +} + +static void HFilter8(uint8_t* u, uint8_t* v, int stride, + int thresh, int ithresh, int hev_thresh) { + FilterLoop26(u, 1, stride, 8, thresh, ithresh, hev_thresh); + FilterLoop26(v, 1, stride, 8, thresh, ithresh, hev_thresh); +} + +// on three inner edges +static void VFilter16i(uint8_t* p, int stride, + int thresh, int ithresh, int hev_thresh) { + int k; + for (k = 3; k > 0; --k) { + p += 4 * stride; + FilterLoop24(p, stride, 1, 16, thresh, ithresh, hev_thresh); + } +} + +static void HFilter16i(uint8_t* p, int stride, + int thresh, int ithresh, int hev_thresh) { + int k; + for (k = 3; k > 0; --k) { + p += 4; + FilterLoop24(p, 1, stride, 16, thresh, ithresh, hev_thresh); + } +} + +static void VFilter8i(uint8_t* u, uint8_t* v, int stride, + int thresh, int ithresh, int hev_thresh) { + FilterLoop24(u + 4 * stride, stride, 1, 8, thresh, ithresh, hev_thresh); + FilterLoop24(v + 4 * stride, stride, 1, 8, thresh, ithresh, hev_thresh); +} + +static void HFilter8i(uint8_t* u, uint8_t* v, int stride, + int thresh, int ithresh, int hev_thresh) { + FilterLoop24(u + 4, 1, stride, 8, thresh, ithresh, hev_thresh); + FilterLoop24(v + 4, 1, stride, 8, thresh, ithresh, hev_thresh); +} + +#undef MUL + +//------------------------------------------------------------------------------ +// Simple In-loop filtering (Paragraph 15.2) + +static void SimpleVFilter16(uint8_t* p, int stride, int thresh) { + int i; + const int thresh2 = 2 * thresh + 1; + int temp0, temp1, temp2, temp3, temp4, temp5, temp6, temp7, temp8; + uint8_t* p1 = p - stride; + __asm__ volatile ( + ".set push \n\t" + ".set noreorder \n\t" + "li %[i], 16 \n\t" + "0: \n\t" + "negu %[temp4], %[stride] \n\t" + "sll %[temp5], %[temp4], 1 \n\t" + "lbu %[temp2], 0(%[p]) \n\t" + "lbux %[temp3], %[stride](%[p]) \n\t" + "lbux %[temp1], %[temp4](%[p]) \n\t" + "lbux %[temp0], %[temp5](%[p]) \n\t" + "subu %[temp7], %[temp1], %[temp2] \n\t" + "subu %[temp6], %[temp0], %[temp3] \n\t" + "absq_s.w %[temp4], %[temp7] \n\t" + "absq_s.w %[temp5], %[temp6] \n\t" + "sll %[temp4], %[temp4], 2 \n\t" + "subu %[temp5], %[temp5], %[thresh2] \n\t" + "addu %[temp5], %[temp4], %[temp5] \n\t" + "negu %[temp8], %[temp7] \n\t" + "bgtz %[temp5], 1f \n\t" + " addiu %[i], %[i], -1 \n\t" + "sll %[temp4], %[temp8], 1 \n\t" + "shll_s.w %[temp5], %[temp6], 24 \n\t" + "addu %[temp3], %[temp4], %[temp8] \n\t" + "sra %[temp5], %[temp5], 24 \n\t" + "addu %[temp3], %[temp3], %[temp5] \n\t" + "addiu %[temp7], %[temp3], 3 \n\t" + "sra %[temp7], %[temp7], 3 \n\t" + "shra_r.w %[temp8], %[temp3], 3 \n\t" + "shll_s.w %[temp0], %[temp7], 27 \n\t" + "shll_s.w %[temp4], %[temp8], 27 \n\t" + "sra %[temp0], %[temp0], 27 \n\t" + "sra %[temp4], %[temp4], 27 \n\t" + "addu %[temp7], %[temp1], %[temp0] \n\t" + "subu %[temp2], %[temp2], %[temp4] \n\t" + "lbux %[temp3], %[temp7](%[VP8kclip1]) \n\t" + "lbux %[temp4], %[temp2](%[VP8kclip1]) \n\t" + "sb %[temp3], 0(%[p1]) \n\t" + "sb %[temp4], 0(%[p]) \n\t" + "1: \n\t" + "addiu %[p1], %[p1], 1 \n\t" + "bgtz %[i], 0b \n\t" + " addiu %[p], %[p], 1 \n\t" + " .set pop \n\t" + : [temp0]"=&r"(temp0), [temp1]"=&r"(temp1), [temp2]"=&r"(temp2), + [temp3]"=&r"(temp3), [temp4]"=&r"(temp4), [temp5]"=&r"(temp5), + [temp6]"=&r"(temp6), [temp7]"=&r"(temp7), [temp8]"=&r"(temp8), + [p]"+&r"(p), [i]"=&r"(i), [p1]"+&r"(p1) + : [stride]"r"(stride), [VP8kclip1]"r"(VP8kclip1), [thresh2]"r"(thresh2) + : "memory" + ); +} + +// TEMP0 = SRC[A + A1 * BPS] +// TEMP1 = SRC[B + B1 * BPS] +// TEMP2 = SRC[C + C1 * BPS] +// TEMP3 = SRC[D + D1 * BPS] +#define LOAD_4_BYTES(TEMP0, TEMP1, TEMP2, TEMP3, \ + A, A1, B, B1, C, C1, D, D1, SRC) \ + "lbu %[" #TEMP0 "], " #A "+" #A1 "*" XSTR(BPS) "(%[" #SRC "]) \n\t" \ + "lbu %[" #TEMP1 "], " #B "+" #B1 "*" XSTR(BPS) "(%[" #SRC "]) \n\t" \ + "lbu %[" #TEMP2 "], " #C "+" #C1 "*" XSTR(BPS) "(%[" #SRC "]) \n\t" \ + "lbu %[" #TEMP3 "], " #D "+" #D1 "*" XSTR(BPS) "(%[" #SRC "]) \n\t" \ + +static void SimpleHFilter16(uint8_t* p, int stride, int thresh) { + int i; + const int thresh2 = 2 * thresh + 1; + int temp0, temp1, temp2, temp3, temp4, temp5, temp6, temp7, temp8; + __asm__ volatile ( + ".set push \n\t" + ".set noreorder \n\t" + "li %[i], 16 \n\t" + "0: \n\t" + LOAD_4_BYTES(temp0, temp1, temp2, temp3, -2, 0, -1, 0, 0, 0, 1, 0, p) + "subu %[temp7], %[temp1], %[temp2] \n\t" + "subu %[temp6], %[temp0], %[temp3] \n\t" + "absq_s.w %[temp4], %[temp7] \n\t" + "absq_s.w %[temp5], %[temp6] \n\t" + "sll %[temp4], %[temp4], 2 \n\t" + "addu %[temp5], %[temp4], %[temp5] \n\t" + "subu %[temp5], %[temp5], %[thresh2] \n\t" + "negu %[temp8], %[temp7] \n\t" + "bgtz %[temp5], 1f \n\t" + " addiu %[i], %[i], -1 \n\t" + "sll %[temp4], %[temp8], 1 \n\t" + "shll_s.w %[temp5], %[temp6], 24 \n\t" + "addu %[temp3], %[temp4], %[temp8] \n\t" + "sra %[temp5], %[temp5], 24 \n\t" + "addu %[temp3], %[temp3], %[temp5] \n\t" + "addiu %[temp7], %[temp3], 3 \n\t" + "sra %[temp7], %[temp7], 3 \n\t" + "shra_r.w %[temp8], %[temp3], 3 \n\t" + "shll_s.w %[temp0], %[temp7], 27 \n\t" + "shll_s.w %[temp4], %[temp8], 27 \n\t" + "sra %[temp0], %[temp0], 27 \n\t" + "sra %[temp4], %[temp4], 27 \n\t" + "addu %[temp7], %[temp1], %[temp0] \n\t" + "subu %[temp2], %[temp2], %[temp4] \n\t" + "lbux %[temp3], %[temp7](%[VP8kclip1]) \n\t" + "lbux %[temp4], %[temp2](%[VP8kclip1]) \n\t" + "sb %[temp3], -1(%[p]) \n\t" + "sb %[temp4], 0(%[p]) \n\t" + "1: \n\t" + "bgtz %[i], 0b \n\t" + " addu %[p], %[p], %[stride] \n\t" + ".set pop \n\t" + : [temp0]"=&r"(temp0), [temp1]"=&r"(temp1), [temp2]"=&r"(temp2), + [temp3]"=&r"(temp3), [temp4]"=&r"(temp4), [temp5]"=&r"(temp5), + [temp6]"=&r"(temp6), [temp7]"=&r"(temp7), [temp8]"=&r"(temp8), + [p]"+&r"(p), [i]"=&r"(i) + : [stride]"r"(stride), [VP8kclip1]"r"(VP8kclip1), [thresh2]"r"(thresh2) + : "memory" + ); +} + +static void SimpleVFilter16i(uint8_t* p, int stride, int thresh) { + int k; + for (k = 3; k > 0; --k) { + p += 4 * stride; + SimpleVFilter16(p, stride, thresh); + } +} + +static void SimpleHFilter16i(uint8_t* p, int stride, int thresh) { + int k; + for (k = 3; k > 0; --k) { + p += 4; + SimpleHFilter16(p, stride, thresh); + } +} + +// DST[A * BPS] = TEMP0 +// DST[B + C * BPS] = TEMP1 +#define STORE_8_BYTES(TEMP0, TEMP1, A, B, C, DST) \ + "usw %[" #TEMP0 "], " #A "*" XSTR(BPS) "(%[" #DST "]) \n\t" \ + "usw %[" #TEMP1 "], " #B "+" #C "*" XSTR(BPS) "(%[" #DST "]) \n\t" + +static void VE4(uint8_t* dst) { // vertical + const uint8_t* top = dst - BPS; + int temp0, temp1, temp2, temp3, temp4, temp5, temp6; + __asm__ volatile ( + "ulw %[temp0], -1(%[top]) \n\t" + "ulh %[temp1], 3(%[top]) \n\t" + "preceu.ph.qbr %[temp2], %[temp0] \n\t" + "preceu.ph.qbl %[temp3], %[temp0] \n\t" + "preceu.ph.qbr %[temp4], %[temp1] \n\t" + "packrl.ph %[temp5], %[temp3], %[temp2] \n\t" + "packrl.ph %[temp6], %[temp4], %[temp3] \n\t" + "shll.ph %[temp5], %[temp5], 1 \n\t" + "shll.ph %[temp6], %[temp6], 1 \n\t" + "addq.ph %[temp2], %[temp5], %[temp2] \n\t" + "addq.ph %[temp6], %[temp6], %[temp4] \n\t" + "addq.ph %[temp2], %[temp2], %[temp3] \n\t" + "addq.ph %[temp6], %[temp6], %[temp3] \n\t" + "shra_r.ph %[temp2], %[temp2], 2 \n\t" + "shra_r.ph %[temp6], %[temp6], 2 \n\t" + "precr.qb.ph %[temp4], %[temp6], %[temp2] \n\t" + STORE_8_BYTES(temp4, temp4, 0, 0, 1, dst) + STORE_8_BYTES(temp4, temp4, 2, 0, 3, dst) + : [temp0]"=&r"(temp0), [temp1]"=&r"(temp1), [temp2]"=&r"(temp2), + [temp3]"=&r"(temp3), [temp4]"=&r"(temp4), [temp5]"=&r"(temp5), + [temp6]"=&r"(temp6) + : [top]"r"(top), [dst]"r"(dst) + : "memory" + ); +} + +static void DC4(uint8_t* dst) { // DC + int temp0, temp1, temp2, temp3, temp4; + __asm__ volatile ( + "ulw %[temp0], -1*" XSTR(BPS) "(%[dst]) \n\t" + LOAD_4_BYTES(temp1, temp2, temp3, temp4, -1, 0, -1, 1, -1, 2, -1, 3, dst) + "ins %[temp1], %[temp2], 8, 8 \n\t" + "ins %[temp1], %[temp3], 16, 8 \n\t" + "ins %[temp1], %[temp4], 24, 8 \n\t" + "raddu.w.qb %[temp0], %[temp0] \n\t" + "raddu.w.qb %[temp1], %[temp1] \n\t" + "addu %[temp0], %[temp0], %[temp1] \n\t" + "shra_r.w %[temp0], %[temp0], 3 \n\t" + "replv.qb %[temp0], %[temp0] \n\t" + STORE_8_BYTES(temp0, temp0, 0, 0, 1, dst) + STORE_8_BYTES(temp0, temp0, 2, 0, 3, dst) + : [temp0]"=&r"(temp0), [temp1]"=&r"(temp1), [temp2]"=&r"(temp2), + [temp3]"=&r"(temp3), [temp4]"=&r"(temp4) + : [dst]"r"(dst) + : "memory" + ); +} + +static void RD4(uint8_t* dst) { // Down-right + int temp0, temp1, temp2, temp3, temp4; + int temp5, temp6, temp7, temp8; + __asm__ volatile ( + LOAD_4_BYTES(temp0, temp1, temp2, temp3, -1, 0, -1, 1, -1, 2, -1, 3, dst) + "ulw %[temp7], -1-" XSTR(BPS) "(%[dst]) \n\t" + "ins %[temp1], %[temp0], 16, 16 \n\t" + "preceu.ph.qbr %[temp5], %[temp7] \n\t" + "ins %[temp2], %[temp1], 16, 16 \n\t" + "preceu.ph.qbl %[temp4], %[temp7] \n\t" + "ins %[temp3], %[temp2], 16, 16 \n\t" + "shll.ph %[temp2], %[temp2], 1 \n\t" + "addq.ph %[temp3], %[temp3], %[temp1] \n\t" + "packrl.ph %[temp6], %[temp5], %[temp1] \n\t" + "addq.ph %[temp3], %[temp3], %[temp2] \n\t" + "addq.ph %[temp1], %[temp1], %[temp5] \n\t" + "shll.ph %[temp6], %[temp6], 1 \n\t" + "addq.ph %[temp1], %[temp1], %[temp6] \n\t" + "packrl.ph %[temp0], %[temp4], %[temp5] \n\t" + "addq.ph %[temp8], %[temp5], %[temp4] \n\t" + "shra_r.ph %[temp3], %[temp3], 2 \n\t" + "shll.ph %[temp0], %[temp0], 1 \n\t" + "shra_r.ph %[temp1], %[temp1], 2 \n\t" + "addq.ph %[temp8], %[temp0], %[temp8] \n\t" + "lbu %[temp5], 3-" XSTR(BPS) "(%[dst]) \n\t" + "precrq.ph.w %[temp7], %[temp7], %[temp7] \n\t" + "shra_r.ph %[temp8], %[temp8], 2 \n\t" + "ins %[temp7], %[temp5], 0, 8 \n\t" + "precr.qb.ph %[temp2], %[temp1], %[temp3] \n\t" + "raddu.w.qb %[temp4], %[temp7] \n\t" + "precr.qb.ph %[temp6], %[temp8], %[temp1] \n\t" + "shra_r.w %[temp4], %[temp4], 2 \n\t" + STORE_8_BYTES(temp2, temp6, 3, 0, 1, dst) + "prepend %[temp2], %[temp8], 8 \n\t" + "prepend %[temp6], %[temp4], 8 \n\t" + STORE_8_BYTES(temp2, temp6, 2, 0, 0, dst) + : [temp0]"=&r"(temp0), [temp1]"=&r"(temp1), [temp2]"=&r"(temp2), + [temp3]"=&r"(temp3), [temp4]"=&r"(temp4), [temp5]"=&r"(temp5), + [temp6]"=&r"(temp6), [temp7]"=&r"(temp7), [temp8]"=&r"(temp8) + : [dst]"r"(dst) + : "memory" + ); +} + +// TEMP0 = SRC[A * BPS] +// TEMP1 = SRC[B + C * BPS] +#define LOAD_8_BYTES(TEMP0, TEMP1, A, B, C, SRC) \ + "ulw %[" #TEMP0 "], " #A "*" XSTR(BPS) "(%[" #SRC "]) \n\t" \ + "ulw %[" #TEMP1 "], " #B "+" #C "*" XSTR(BPS) "(%[" #SRC "]) \n\t" + +static void LD4(uint8_t* dst) { // Down-Left + int temp0, temp1, temp2, temp3, temp4; + int temp5, temp6, temp7, temp8, temp9; + __asm__ volatile ( + LOAD_8_BYTES(temp0, temp1, -1, 4, -1, dst) + "preceu.ph.qbl %[temp2], %[temp0] \n\t" + "preceu.ph.qbr %[temp3], %[temp0] \n\t" + "preceu.ph.qbr %[temp4], %[temp1] \n\t" + "preceu.ph.qbl %[temp5], %[temp1] \n\t" + "packrl.ph %[temp6], %[temp2], %[temp3] \n\t" + "packrl.ph %[temp7], %[temp4], %[temp2] \n\t" + "packrl.ph %[temp8], %[temp5], %[temp4] \n\t" + "shll.ph %[temp6], %[temp6], 1 \n\t" + "addq.ph %[temp9], %[temp2], %[temp6] \n\t" + "shll.ph %[temp7], %[temp7], 1 \n\t" + "addq.ph %[temp9], %[temp9], %[temp3] \n\t" + "shll.ph %[temp8], %[temp8], 1 \n\t" + "shra_r.ph %[temp9], %[temp9], 2 \n\t" + "addq.ph %[temp3], %[temp4], %[temp7] \n\t" + "addq.ph %[temp0], %[temp5], %[temp8] \n\t" + "addq.ph %[temp3], %[temp3], %[temp2] \n\t" + "addq.ph %[temp0], %[temp0], %[temp4] \n\t" + "shra_r.ph %[temp3], %[temp3], 2 \n\t" + "shra_r.ph %[temp0], %[temp0], 2 \n\t" + "srl %[temp1], %[temp1], 24 \n\t" + "sll %[temp1], %[temp1], 1 \n\t" + "raddu.w.qb %[temp5], %[temp5] \n\t" + "precr.qb.ph %[temp9], %[temp3], %[temp9] \n\t" + "precr.qb.ph %[temp3], %[temp0], %[temp3] \n\t" + "addu %[temp1], %[temp1], %[temp5] \n\t" + "shra_r.w %[temp1], %[temp1], 2 \n\t" + STORE_8_BYTES(temp9, temp3, 0, 0, 2, dst) + "prepend %[temp9], %[temp0], 8 \n\t" + "prepend %[temp3], %[temp1], 8 \n\t" + STORE_8_BYTES(temp9, temp3, 1, 0, 3, dst) + : [temp0]"=&r"(temp0), [temp1]"=&r"(temp1), [temp2]"=&r"(temp2), + [temp3]"=&r"(temp3), [temp4]"=&r"(temp4), [temp5]"=&r"(temp5), + [temp6]"=&r"(temp6), [temp7]"=&r"(temp7), [temp8]"=&r"(temp8), + [temp9]"=&r"(temp9) + : [dst]"r"(dst) + : "memory" + ); +} + +//------------------------------------------------------------------------------ +// Chroma + +static void DC8uv(uint8_t* dst) { // DC + int temp0, temp1, temp2, temp3, temp4; + int temp5, temp6, temp7, temp8, temp9; + __asm__ volatile ( + LOAD_8_BYTES(temp0, temp1, -1, 4, -1, dst) + LOAD_4_BYTES(temp2, temp3, temp4, temp5, -1, 0, -1, 1, -1, 2, -1, 3, dst) + LOAD_4_BYTES(temp6, temp7, temp8, temp9, -1, 4, -1, 5, -1, 6, -1, 7, dst) + "raddu.w.qb %[temp0], %[temp0] \n\t" + "raddu.w.qb %[temp1], %[temp1] \n\t" + "addu %[temp2], %[temp2], %[temp3] \n\t" + "addu %[temp4], %[temp4], %[temp5] \n\t" + "addu %[temp6], %[temp6], %[temp7] \n\t" + "addu %[temp8], %[temp8], %[temp9] \n\t" + "addu %[temp0], %[temp0], %[temp1] \n\t" + "addu %[temp2], %[temp2], %[temp4] \n\t" + "addu %[temp6], %[temp6], %[temp8] \n\t" + "addu %[temp0], %[temp0], %[temp2] \n\t" + "addu %[temp0], %[temp0], %[temp6] \n\t" + "shra_r.w %[temp0], %[temp0], 4 \n\t" + "replv.qb %[temp0], %[temp0] \n\t" + STORE_8_BYTES(temp0, temp0, 0, 4, 0, dst) + STORE_8_BYTES(temp0, temp0, 1, 4, 1, dst) + STORE_8_BYTES(temp0, temp0, 2, 4, 2, dst) + STORE_8_BYTES(temp0, temp0, 3, 4, 3, dst) + STORE_8_BYTES(temp0, temp0, 4, 4, 4, dst) + STORE_8_BYTES(temp0, temp0, 5, 4, 5, dst) + STORE_8_BYTES(temp0, temp0, 6, 4, 6, dst) + STORE_8_BYTES(temp0, temp0, 7, 4, 7, dst) + : [temp0]"=&r"(temp0), [temp1]"=&r"(temp1), [temp2]"=&r"(temp2), + [temp3]"=&r"(temp3), [temp4]"=&r"(temp4), [temp5]"=&r"(temp5), + [temp6]"=&r"(temp6), [temp7]"=&r"(temp7), [temp8]"=&r"(temp8), + [temp9]"=&r"(temp9) + : [dst]"r"(dst) + : "memory" + ); +} + +static void DC8uvNoLeft(uint8_t* dst) { // DC with no left samples + int temp0, temp1; + __asm__ volatile ( + LOAD_8_BYTES(temp0, temp1, -1, 4, -1, dst) + "raddu.w.qb %[temp0], %[temp0] \n\t" + "raddu.w.qb %[temp1], %[temp1] \n\t" + "addu %[temp0], %[temp0], %[temp1] \n\t" + "shra_r.w %[temp0], %[temp0], 3 \n\t" + "replv.qb %[temp0], %[temp0] \n\t" + STORE_8_BYTES(temp0, temp0, 0, 4, 0, dst) + STORE_8_BYTES(temp0, temp0, 1, 4, 1, dst) + STORE_8_BYTES(temp0, temp0, 2, 4, 2, dst) + STORE_8_BYTES(temp0, temp0, 3, 4, 3, dst) + STORE_8_BYTES(temp0, temp0, 4, 4, 4, dst) + STORE_8_BYTES(temp0, temp0, 5, 4, 5, dst) + STORE_8_BYTES(temp0, temp0, 6, 4, 6, dst) + STORE_8_BYTES(temp0, temp0, 7, 4, 7, dst) + : [temp0]"=&r"(temp0), [temp1]"=&r"(temp1) + : [dst]"r"(dst) + : "memory" + ); +} + +static void DC8uvNoTop(uint8_t* dst) { // DC with no top samples + int temp0, temp1, temp2, temp3, temp4; + int temp5, temp6, temp7, temp8; + __asm__ volatile ( + LOAD_4_BYTES(temp2, temp3, temp4, temp5, -1, 0, -1, 1, -1, 2, -1, 3, dst) + LOAD_4_BYTES(temp6, temp7, temp8, temp1, -1, 4, -1, 5, -1, 6, -1, 7, dst) + "addu %[temp2], %[temp2], %[temp3] \n\t" + "addu %[temp4], %[temp4], %[temp5] \n\t" + "addu %[temp6], %[temp6], %[temp7] \n\t" + "addu %[temp8], %[temp8], %[temp1] \n\t" + "addu %[temp2], %[temp2], %[temp4] \n\t" + "addu %[temp6], %[temp6], %[temp8] \n\t" + "addu %[temp0], %[temp6], %[temp2] \n\t" + "shra_r.w %[temp0], %[temp0], 3 \n\t" + "replv.qb %[temp0], %[temp0] \n\t" + STORE_8_BYTES(temp0, temp0, 0, 4, 0, dst) + STORE_8_BYTES(temp0, temp0, 1, 4, 1, dst) + STORE_8_BYTES(temp0, temp0, 2, 4, 2, dst) + STORE_8_BYTES(temp0, temp0, 3, 4, 3, dst) + STORE_8_BYTES(temp0, temp0, 4, 4, 4, dst) + STORE_8_BYTES(temp0, temp0, 5, 4, 5, dst) + STORE_8_BYTES(temp0, temp0, 6, 4, 6, dst) + STORE_8_BYTES(temp0, temp0, 7, 4, 7, dst) + : [temp0]"=&r"(temp0), [temp1]"=&r"(temp1), [temp2]"=&r"(temp2), + [temp3]"=&r"(temp3), [temp4]"=&r"(temp4), [temp5]"=&r"(temp5), + [temp6]"=&r"(temp6), [temp7]"=&r"(temp7), [temp8]"=&r"(temp8) + : [dst]"r"(dst) + : "memory" + ); +} + +#undef LOAD_8_BYTES +#undef STORE_8_BYTES +#undef LOAD_4_BYTES + +#define CLIPPING(SIZE) \ + "preceu.ph.qbl %[temp2], %[temp0] \n\t" \ + "preceu.ph.qbr %[temp0], %[temp0] \n\t" \ +".if " #SIZE " == 8 \n\t" \ + "preceu.ph.qbl %[temp3], %[temp1] \n\t" \ + "preceu.ph.qbr %[temp1], %[temp1] \n\t" \ +".endif \n\t" \ + "addu.ph %[temp2], %[temp2], %[dst_1] \n\t" \ + "addu.ph %[temp0], %[temp0], %[dst_1] \n\t" \ +".if " #SIZE " == 8 \n\t" \ + "addu.ph %[temp3], %[temp3], %[dst_1] \n\t" \ + "addu.ph %[temp1], %[temp1], %[dst_1] \n\t" \ +".endif \n\t" \ + "shll_s.ph %[temp2], %[temp2], 7 \n\t" \ + "shll_s.ph %[temp0], %[temp0], 7 \n\t" \ +".if " #SIZE " == 8 \n\t" \ + "shll_s.ph %[temp3], %[temp3], 7 \n\t" \ + "shll_s.ph %[temp1], %[temp1], 7 \n\t" \ +".endif \n\t" \ + "precrqu_s.qb.ph %[temp0], %[temp2], %[temp0] \n\t" \ +".if " #SIZE " == 8 \n\t" \ + "precrqu_s.qb.ph %[temp1], %[temp3], %[temp1] \n\t" \ +".endif \n\t" + + +#define CLIP_8B_TO_DST(DST, TOP, SIZE) do { \ + int dst_1 = ((int)(DST)[-1] << 16) + (DST)[-1]; \ + int temp0, temp1, temp2, temp3; \ + __asm__ volatile ( \ + ".if " #SIZE " < 8 \n\t" \ + "ulw %[temp0], 0(%[top]) \n\t" \ + "subu.ph %[dst_1], %[dst_1], %[top_1] \n\t" \ + CLIPPING(4) \ + "usw %[temp0], 0(%[dst]) \n\t" \ + ".else \n\t" \ + "ulw %[temp0], 0(%[top]) \n\t" \ + "ulw %[temp1], 4(%[top]) \n\t" \ + "subu.ph %[dst_1], %[dst_1], %[top_1] \n\t" \ + CLIPPING(8) \ + "usw %[temp0], 0(%[dst]) \n\t" \ + "usw %[temp1], 4(%[dst]) \n\t" \ + ".if " #SIZE " == 16 \n\t" \ + "ulw %[temp0], 8(%[top]) \n\t" \ + "ulw %[temp1], 12(%[top]) \n\t" \ + CLIPPING(8) \ + "usw %[temp0], 8(%[dst]) \n\t" \ + "usw %[temp1], 12(%[dst]) \n\t" \ + ".endif \n\t" \ + ".endif \n\t" \ + : [dst_1]"+&r"(dst_1), [temp0]"=&r"(temp0), [temp1]"=&r"(temp1), \ + [temp2]"=&r"(temp2), [temp3]"=&r"(temp3) \ + : [top_1]"r"(top_1), [top]"r"((TOP)), [dst]"r"((DST)) \ + : "memory" \ + ); \ +} while (0) + +#define CLIP_TO_DST(DST, SIZE) do { \ + int y; \ + const uint8_t* top = (DST) - BPS; \ + const int top_1 = ((int)top[-1] << 16) + top[-1]; \ + for (y = 0; y < (SIZE); ++y) { \ + CLIP_8B_TO_DST((DST), top, (SIZE)); \ + (DST) += BPS; \ + } \ +} while (0) + +#define TRUE_MOTION(DST, SIZE) \ +static void TrueMotion##SIZE(uint8_t* (DST)) { \ + CLIP_TO_DST((DST), (SIZE)); \ +} + +TRUE_MOTION(dst, 4) +TRUE_MOTION(dst, 8) +TRUE_MOTION(dst, 16) + +#undef TRUE_MOTION +#undef CLIP_TO_DST +#undef CLIP_8B_TO_DST +#undef CLIPPING + +//------------------------------------------------------------------------------ +// Entry point + +extern void VP8DspInitMIPSdspR2(void); + +WEBP_TSAN_IGNORE_FUNCTION void VP8DspInitMIPSdspR2(void) { + VP8TransformDC = TransformDC; + VP8TransformAC3 = TransformAC3; + VP8Transform = TransformTwo; + + VP8VFilter16 = VFilter16; + VP8HFilter16 = HFilter16; + VP8VFilter8 = VFilter8; + VP8HFilter8 = HFilter8; + VP8VFilter16i = VFilter16i; + VP8HFilter16i = HFilter16i; + VP8VFilter8i = VFilter8i; + VP8HFilter8i = HFilter8i; + VP8SimpleVFilter16 = SimpleVFilter16; + VP8SimpleHFilter16 = SimpleHFilter16; + VP8SimpleVFilter16i = SimpleVFilter16i; + VP8SimpleHFilter16i = SimpleHFilter16i; + + VP8PredLuma4[0] = DC4; + VP8PredLuma4[1] = TrueMotion4; + VP8PredLuma4[2] = VE4; + VP8PredLuma4[4] = RD4; + VP8PredLuma4[6] = LD4; + + VP8PredChroma8[0] = DC8uv; + VP8PredChroma8[1] = TrueMotion8; + VP8PredChroma8[4] = DC8uvNoTop; + VP8PredChroma8[5] = DC8uvNoLeft; + + VP8PredLuma16[1] = TrueMotion16; +} + +#else // !WEBP_USE_MIPS_DSP_R2 + +WEBP_DSP_INIT_STUB(VP8DspInitMIPSdspR2) + +#endif // WEBP_USE_MIPS_DSP_R2 diff --git a/vendor/github.com/sizeofint/webpanimation/dsp_dec_msa.c b/vendor/github.com/sizeofint/webpanimation/dsp_dec_msa.c new file mode 100644 index 00000000..629ca33d --- /dev/null +++ b/vendor/github.com/sizeofint/webpanimation/dsp_dec_msa.c @@ -0,0 +1,1020 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the COPYING file in the root of the source +// tree. An additional intellectual property rights grant can be found +// in the file PATENTS. All contributing project authors may +// be found in the AUTHORS file in the root of the source tree. +// ----------------------------------------------------------------------------- +// +// MSA version of dsp functions +// +// Author(s): Prashant Patil (prashant.patil@imgtec.com) + + +#include "dsp_dsp.h" + +#if defined(WEBP_USE_MSA) + +#include "dsp_msa_macro.h" + +//------------------------------------------------------------------------------ +// Transforms + +#define IDCT_1D_W(in0, in1, in2, in3, out0, out1, out2, out3) { \ + v4i32 a1_m, b1_m, c1_m, d1_m; \ + v4i32 c_tmp1_m, c_tmp2_m, d_tmp1_m, d_tmp2_m; \ + const v4i32 cospi8sqrt2minus1 = __msa_fill_w(20091); \ + const v4i32 sinpi8sqrt2 = __msa_fill_w(35468); \ + \ + a1_m = in0 + in2; \ + b1_m = in0 - in2; \ + c_tmp1_m = (in1 * sinpi8sqrt2) >> 16; \ + c_tmp2_m = in3 + ((in3 * cospi8sqrt2minus1) >> 16); \ + c1_m = c_tmp1_m - c_tmp2_m; \ + d_tmp1_m = in1 + ((in1 * cospi8sqrt2minus1) >> 16); \ + d_tmp2_m = (in3 * sinpi8sqrt2) >> 16; \ + d1_m = d_tmp1_m + d_tmp2_m; \ + BUTTERFLY_4(a1_m, b1_m, c1_m, d1_m, out0, out1, out2, out3); \ +} +#define MULT1(a) ((((a) * 20091) >> 16) + (a)) +#define MULT2(a) (((a) * 35468) >> 16) + +static void TransformOne(const int16_t* in, uint8_t* dst) { + v8i16 input0, input1; + v4i32 in0, in1, in2, in3, hz0, hz1, hz2, hz3, vt0, vt1, vt2, vt3; + v4i32 res0, res1, res2, res3; + const v16i8 zero = { 0 }; + v16i8 dest0, dest1, dest2, dest3; + + LD_SH2(in, 8, input0, input1); + UNPCK_SH_SW(input0, in0, in1); + UNPCK_SH_SW(input1, in2, in3); + IDCT_1D_W(in0, in1, in2, in3, hz0, hz1, hz2, hz3); + TRANSPOSE4x4_SW_SW(hz0, hz1, hz2, hz3, hz0, hz1, hz2, hz3); + IDCT_1D_W(hz0, hz1, hz2, hz3, vt0, vt1, vt2, vt3); + SRARI_W4_SW(vt0, vt1, vt2, vt3, 3); + TRANSPOSE4x4_SW_SW(vt0, vt1, vt2, vt3, vt0, vt1, vt2, vt3); + LD_SB4(dst, BPS, dest0, dest1, dest2, dest3); + ILVR_B4_SW(zero, dest0, zero, dest1, zero, dest2, zero, dest3, + res0, res1, res2, res3); + ILVR_H4_SW(zero, res0, zero, res1, zero, res2, zero, res3, + res0, res1, res2, res3); + ADD4(res0, vt0, res1, vt1, res2, vt2, res3, vt3, res0, res1, res2, res3); + CLIP_SW4_0_255(res0, res1, res2, res3); + PCKEV_B2_SW(res0, res1, res2, res3, vt0, vt1); + res0 = (v4i32)__msa_pckev_b((v16i8)vt0, (v16i8)vt1); + ST4x4_UB(res0, res0, 3, 2, 1, 0, dst, BPS); +} + +static void TransformTwo(const int16_t* in, uint8_t* dst, int do_two) { + TransformOne(in, dst); + if (do_two) { + TransformOne(in + 16, dst + 4); + } +} + +static void TransformWHT(const int16_t* in, int16_t* out) { + v8i16 input0, input1; + const v8i16 mask0 = { 0, 1, 2, 3, 8, 9, 10, 11 }; + const v8i16 mask1 = { 4, 5, 6, 7, 12, 13, 14, 15 }; + const v8i16 mask2 = { 0, 4, 8, 12, 1, 5, 9, 13 }; + const v8i16 mask3 = { 3, 7, 11, 15, 2, 6, 10, 14 }; + v8i16 tmp0, tmp1, tmp2, tmp3; + v8i16 out0, out1; + + LD_SH2(in, 8, input0, input1); + input1 = SLDI_SH(input1, input1, 8); + tmp0 = input0 + input1; + tmp1 = input0 - input1; + VSHF_H2_SH(tmp0, tmp1, tmp0, tmp1, mask0, mask1, tmp2, tmp3); + out0 = tmp2 + tmp3; + out1 = tmp2 - tmp3; + VSHF_H2_SH(out0, out1, out0, out1, mask2, mask3, input0, input1); + tmp0 = input0 + input1; + tmp1 = input0 - input1; + VSHF_H2_SH(tmp0, tmp1, tmp0, tmp1, mask0, mask1, tmp2, tmp3); + tmp0 = tmp2 + tmp3; + tmp1 = tmp2 - tmp3; + ADDVI_H2_SH(tmp0, 3, tmp1, 3, out0, out1); + SRAI_H2_SH(out0, out1, 3); + out[0] = __msa_copy_s_h(out0, 0); + out[16] = __msa_copy_s_h(out0, 4); + out[32] = __msa_copy_s_h(out1, 0); + out[48] = __msa_copy_s_h(out1, 4); + out[64] = __msa_copy_s_h(out0, 1); + out[80] = __msa_copy_s_h(out0, 5); + out[96] = __msa_copy_s_h(out1, 1); + out[112] = __msa_copy_s_h(out1, 5); + out[128] = __msa_copy_s_h(out0, 2); + out[144] = __msa_copy_s_h(out0, 6); + out[160] = __msa_copy_s_h(out1, 2); + out[176] = __msa_copy_s_h(out1, 6); + out[192] = __msa_copy_s_h(out0, 3); + out[208] = __msa_copy_s_h(out0, 7); + out[224] = __msa_copy_s_h(out1, 3); + out[240] = __msa_copy_s_h(out1, 7); +} + +static void TransformDC(const int16_t* in, uint8_t* dst) { + const int DC = (in[0] + 4) >> 3; + const v8i16 tmp0 = __msa_fill_h(DC); + ADDBLK_ST4x4_UB(tmp0, tmp0, tmp0, tmp0, dst, BPS); +} + +static void TransformAC3(const int16_t* in, uint8_t* dst) { + const int a = in[0] + 4; + const int c4 = MULT2(in[4]); + const int d4 = MULT1(in[4]); + const int in2 = MULT2(in[1]); + const int in3 = MULT1(in[1]); + v4i32 tmp0 = { 0 }; + v4i32 out0 = __msa_fill_w(a + d4); + v4i32 out1 = __msa_fill_w(a + c4); + v4i32 out2 = __msa_fill_w(a - c4); + v4i32 out3 = __msa_fill_w(a - d4); + v4i32 res0, res1, res2, res3; + const v4i32 zero = { 0 }; + v16u8 dest0, dest1, dest2, dest3; + + INSERT_W4_SW(in3, in2, -in2, -in3, tmp0); + ADD4(out0, tmp0, out1, tmp0, out2, tmp0, out3, tmp0, + out0, out1, out2, out3); + SRAI_W4_SW(out0, out1, out2, out3, 3); + LD_UB4(dst, BPS, dest0, dest1, dest2, dest3); + ILVR_B4_SW(zero, dest0, zero, dest1, zero, dest2, zero, dest3, + res0, res1, res2, res3); + ILVR_H4_SW(zero, res0, zero, res1, zero, res2, zero, res3, + res0, res1, res2, res3); + ADD4(res0, out0, res1, out1, res2, out2, res3, out3, res0, res1, res2, res3); + CLIP_SW4_0_255(res0, res1, res2, res3); + PCKEV_B2_SW(res0, res1, res2, res3, out0, out1); + res0 = (v4i32)__msa_pckev_b((v16i8)out0, (v16i8)out1); + ST4x4_UB(res0, res0, 3, 2, 1, 0, dst, BPS); +} + +//------------------------------------------------------------------------------ +// Edge filtering functions + +#define FLIP_SIGN2(in0, in1, out0, out1) { \ + out0 = (v16i8)__msa_xori_b(in0, 0x80); \ + out1 = (v16i8)__msa_xori_b(in1, 0x80); \ +} + +#define FLIP_SIGN4(in0, in1, in2, in3, out0, out1, out2, out3) { \ + FLIP_SIGN2(in0, in1, out0, out1); \ + FLIP_SIGN2(in2, in3, out2, out3); \ +} + +#define FILT_VAL(q0_m, p0_m, mask, filt) do { \ + v16i8 q0_sub_p0; \ + q0_sub_p0 = __msa_subs_s_b(q0_m, p0_m); \ + filt = __msa_adds_s_b(filt, q0_sub_p0); \ + filt = __msa_adds_s_b(filt, q0_sub_p0); \ + filt = __msa_adds_s_b(filt, q0_sub_p0); \ + filt = filt & mask; \ +} while (0) + +#define FILT2(q_m, p_m, q, p) do { \ + u_r = SRAI_H(temp1, 7); \ + u_r = __msa_sat_s_h(u_r, 7); \ + u_l = SRAI_H(temp3, 7); \ + u_l = __msa_sat_s_h(u_l, 7); \ + u = __msa_pckev_b((v16i8)u_l, (v16i8)u_r); \ + q_m = __msa_subs_s_b(q_m, u); \ + p_m = __msa_adds_s_b(p_m, u); \ + q = __msa_xori_b((v16u8)q_m, 0x80); \ + p = __msa_xori_b((v16u8)p_m, 0x80); \ +} while (0) + +#define LPF_FILTER4_4W(p1, p0, q0, q1, mask, hev) do { \ + v16i8 p1_m, p0_m, q0_m, q1_m; \ + v16i8 filt, t1, t2; \ + const v16i8 cnst4b = __msa_ldi_b(4); \ + const v16i8 cnst3b = __msa_ldi_b(3); \ + \ + FLIP_SIGN4(p1, p0, q0, q1, p1_m, p0_m, q0_m, q1_m); \ + filt = __msa_subs_s_b(p1_m, q1_m); \ + filt = filt & hev; \ + FILT_VAL(q0_m, p0_m, mask, filt); \ + t1 = __msa_adds_s_b(filt, cnst4b); \ + t1 = SRAI_B(t1, 3); \ + t2 = __msa_adds_s_b(filt, cnst3b); \ + t2 = SRAI_B(t2, 3); \ + q0_m = __msa_subs_s_b(q0_m, t1); \ + q0 = __msa_xori_b((v16u8)q0_m, 0x80); \ + p0_m = __msa_adds_s_b(p0_m, t2); \ + p0 = __msa_xori_b((v16u8)p0_m, 0x80); \ + filt = __msa_srari_b(t1, 1); \ + hev = __msa_xori_b(hev, 0xff); \ + filt = filt & hev; \ + q1_m = __msa_subs_s_b(q1_m, filt); \ + q1 = __msa_xori_b((v16u8)q1_m, 0x80); \ + p1_m = __msa_adds_s_b(p1_m, filt); \ + p1 = __msa_xori_b((v16u8)p1_m, 0x80); \ +} while (0) + +#define LPF_MBFILTER(p2, p1, p0, q0, q1, q2, mask, hev) do { \ + v16i8 p2_m, p1_m, p0_m, q2_m, q1_m, q0_m; \ + v16i8 u, filt, t1, t2, filt_sign; \ + v8i16 filt_r, filt_l, u_r, u_l; \ + v8i16 temp0, temp1, temp2, temp3; \ + const v16i8 cnst4b = __msa_ldi_b(4); \ + const v16i8 cnst3b = __msa_ldi_b(3); \ + const v8i16 cnst9h = __msa_ldi_h(9); \ + const v8i16 cnst63h = __msa_ldi_h(63); \ + \ + FLIP_SIGN4(p1, p0, q0, q1, p1_m, p0_m, q0_m, q1_m); \ + filt = __msa_subs_s_b(p1_m, q1_m); \ + FILT_VAL(q0_m, p0_m, mask, filt); \ + FLIP_SIGN2(p2, q2, p2_m, q2_m); \ + t2 = filt & hev; \ + /* filt_val &= ~hev */ \ + hev = __msa_xori_b(hev, 0xff); \ + filt = filt & hev; \ + t1 = __msa_adds_s_b(t2, cnst4b); \ + t1 = SRAI_B(t1, 3); \ + t2 = __msa_adds_s_b(t2, cnst3b); \ + t2 = SRAI_B(t2, 3); \ + q0_m = __msa_subs_s_b(q0_m, t1); \ + p0_m = __msa_adds_s_b(p0_m, t2); \ + filt_sign = __msa_clti_s_b(filt, 0); \ + ILVRL_B2_SH(filt_sign, filt, filt_r, filt_l); \ + /* update q2/p2 */ \ + temp0 = filt_r * cnst9h; \ + temp1 = temp0 + cnst63h; \ + temp2 = filt_l * cnst9h; \ + temp3 = temp2 + cnst63h; \ + FILT2(q2_m, p2_m, q2, p2); \ + /* update q1/p1 */ \ + temp1 = temp1 + temp0; \ + temp3 = temp3 + temp2; \ + FILT2(q1_m, p1_m, q1, p1); \ + /* update q0/p0 */ \ + temp1 = temp1 + temp0; \ + temp3 = temp3 + temp2; \ + FILT2(q0_m, p0_m, q0, p0); \ +} while (0) + +#define LPF_MASK_HEV(p3_in, p2_in, p1_in, p0_in, \ + q0_in, q1_in, q2_in, q3_in, \ + limit_in, b_limit_in, thresh_in, \ + hev_out, mask_out) do { \ + v16u8 p3_asub_p2_m, p2_asub_p1_m, p1_asub_p0_m, q1_asub_q0_m; \ + v16u8 p1_asub_q1_m, p0_asub_q0_m, q3_asub_q2_m, q2_asub_q1_m; \ + v16u8 flat_out; \ + \ + /* absolute subtraction of pixel values */ \ + p3_asub_p2_m = __msa_asub_u_b(p3_in, p2_in); \ + p2_asub_p1_m = __msa_asub_u_b(p2_in, p1_in); \ + p1_asub_p0_m = __msa_asub_u_b(p1_in, p0_in); \ + q1_asub_q0_m = __msa_asub_u_b(q1_in, q0_in); \ + q2_asub_q1_m = __msa_asub_u_b(q2_in, q1_in); \ + q3_asub_q2_m = __msa_asub_u_b(q3_in, q2_in); \ + p0_asub_q0_m = __msa_asub_u_b(p0_in, q0_in); \ + p1_asub_q1_m = __msa_asub_u_b(p1_in, q1_in); \ + /* calculation of hev */ \ + flat_out = __msa_max_u_b(p1_asub_p0_m, q1_asub_q0_m); \ + hev_out = (thresh_in < flat_out); \ + /* calculation of mask */ \ + p0_asub_q0_m = __msa_adds_u_b(p0_asub_q0_m, p0_asub_q0_m); \ + p1_asub_q1_m = SRAI_B(p1_asub_q1_m, 1); \ + p0_asub_q0_m = __msa_adds_u_b(p0_asub_q0_m, p1_asub_q1_m); \ + mask_out = (b_limit_in < p0_asub_q0_m); \ + mask_out = __msa_max_u_b(flat_out, mask_out); \ + p3_asub_p2_m = __msa_max_u_b(p3_asub_p2_m, p2_asub_p1_m); \ + mask_out = __msa_max_u_b(p3_asub_p2_m, mask_out); \ + q2_asub_q1_m = __msa_max_u_b(q2_asub_q1_m, q3_asub_q2_m); \ + mask_out = __msa_max_u_b(q2_asub_q1_m, mask_out); \ + mask_out = (limit_in < mask_out); \ + mask_out = __msa_xori_b(mask_out, 0xff); \ +} while (0) + +#define ST6x1_UB(in0, in0_idx, in1, in1_idx, pdst, stride) do { \ + const uint16_t tmp0_h = __msa_copy_s_h((v8i16)in1, in1_idx); \ + const uint32_t tmp0_w = __msa_copy_s_w((v4i32)in0, in0_idx); \ + SW(tmp0_w, pdst); \ + SH(tmp0_h, pdst + stride); \ +} while (0) + +#define ST6x4_UB(in0, start_in0_idx, in1, start_in1_idx, pdst, stride) do { \ + uint8_t* ptmp1 = (uint8_t*)pdst; \ + ST6x1_UB(in0, start_in0_idx, in1, start_in1_idx, ptmp1, 4); \ + ptmp1 += stride; \ + ST6x1_UB(in0, start_in0_idx + 1, in1, start_in1_idx + 1, ptmp1, 4); \ + ptmp1 += stride; \ + ST6x1_UB(in0, start_in0_idx + 2, in1, start_in1_idx + 2, ptmp1, 4); \ + ptmp1 += stride; \ + ST6x1_UB(in0, start_in0_idx + 3, in1, start_in1_idx + 3, ptmp1, 4); \ +} while (0) + +#define LPF_SIMPLE_FILT(p1_in, p0_in, q0_in, q1_in, mask) do { \ + v16i8 p1_m, p0_m, q0_m, q1_m, filt, filt1, filt2; \ + const v16i8 cnst4b = __msa_ldi_b(4); \ + const v16i8 cnst3b = __msa_ldi_b(3); \ + \ + FLIP_SIGN4(p1_in, p0_in, q0_in, q1_in, p1_m, p0_m, q0_m, q1_m); \ + filt = __msa_subs_s_b(p1_m, q1_m); \ + FILT_VAL(q0_m, p0_m, mask, filt); \ + filt1 = __msa_adds_s_b(filt, cnst4b); \ + filt1 = SRAI_B(filt1, 3); \ + filt2 = __msa_adds_s_b(filt, cnst3b); \ + filt2 = SRAI_B(filt2, 3); \ + q0_m = __msa_subs_s_b(q0_m, filt1); \ + p0_m = __msa_adds_s_b(p0_m, filt2); \ + q0_in = __msa_xori_b((v16u8)q0_m, 0x80); \ + p0_in = __msa_xori_b((v16u8)p0_m, 0x80); \ +} while (0) + +#define LPF_SIMPLE_MASK(p1, p0, q0, q1, b_limit, mask) do { \ + v16u8 p1_a_sub_q1, p0_a_sub_q0; \ + \ + p0_a_sub_q0 = __msa_asub_u_b(p0, q0); \ + p1_a_sub_q1 = __msa_asub_u_b(p1, q1); \ + p1_a_sub_q1 = (v16u8)__msa_srli_b((v16i8)p1_a_sub_q1, 1); \ + p0_a_sub_q0 = __msa_adds_u_b(p0_a_sub_q0, p0_a_sub_q0); \ + mask = __msa_adds_u_b(p0_a_sub_q0, p1_a_sub_q1); \ + mask = (mask <= b_limit); \ +} while (0) + +static void VFilter16(uint8_t* src, int stride, + int b_limit_in, int limit_in, int thresh_in) { + uint8_t* ptemp = src - 4 * stride; + v16u8 p3, p2, p1, p0, q3, q2, q1, q0; + v16u8 mask, hev; + const v16u8 thresh = (v16u8)__msa_fill_b(thresh_in); + const v16u8 limit = (v16u8)__msa_fill_b(limit_in); + const v16u8 b_limit = (v16u8)__msa_fill_b(b_limit_in); + + LD_UB8(ptemp, stride, p3, p2, p1, p0, q0, q1, q2, q3); + LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit, b_limit, thresh, + hev, mask); + LPF_MBFILTER(p2, p1, p0, q0, q1, q2, mask, hev); + ptemp = src - 3 * stride; + ST_UB4(p2, p1, p0, q0, ptemp, stride); + ptemp += (4 * stride); + ST_UB2(q1, q2, ptemp, stride); +} + +static void HFilter16(uint8_t* src, int stride, + int b_limit_in, int limit_in, int thresh_in) { + uint8_t* ptmp = src - 4; + v16u8 p3, p2, p1, p0, q3, q2, q1, q0; + v16u8 mask, hev; + v16u8 row0, row1, row2, row3, row4, row5, row6, row7, row8; + v16u8 row9, row10, row11, row12, row13, row14, row15; + v8i16 tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7; + const v16u8 b_limit = (v16u8)__msa_fill_b(b_limit_in); + const v16u8 limit = (v16u8)__msa_fill_b(limit_in); + const v16u8 thresh = (v16u8)__msa_fill_b(thresh_in); + + LD_UB8(ptmp, stride, row0, row1, row2, row3, row4, row5, row6, row7); + ptmp += (8 * stride); + LD_UB8(ptmp, stride, row8, row9, row10, row11, row12, row13, row14, row15); + TRANSPOSE16x8_UB_UB(row0, row1, row2, row3, row4, row5, row6, row7, + row8, row9, row10, row11, row12, row13, row14, row15, + p3, p2, p1, p0, q0, q1, q2, q3); + LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit, b_limit, thresh, + hev, mask); + LPF_MBFILTER(p2, p1, p0, q0, q1, q2, mask, hev); + ILVR_B2_SH(p1, p2, q0, p0, tmp0, tmp1); + ILVRL_H2_SH(tmp1, tmp0, tmp3, tmp4); + ILVL_B2_SH(p1, p2, q0, p0, tmp0, tmp1); + ILVRL_H2_SH(tmp1, tmp0, tmp6, tmp7); + ILVRL_B2_SH(q2, q1, tmp2, tmp5); + ptmp = src - 3; + ST6x1_UB(tmp3, 0, tmp2, 0, ptmp, 4); + ptmp += stride; + ST6x1_UB(tmp3, 1, tmp2, 1, ptmp, 4); + ptmp += stride; + ST6x1_UB(tmp3, 2, tmp2, 2, ptmp, 4); + ptmp += stride; + ST6x1_UB(tmp3, 3, tmp2, 3, ptmp, 4); + ptmp += stride; + ST6x1_UB(tmp4, 0, tmp2, 4, ptmp, 4); + ptmp += stride; + ST6x1_UB(tmp4, 1, tmp2, 5, ptmp, 4); + ptmp += stride; + ST6x1_UB(tmp4, 2, tmp2, 6, ptmp, 4); + ptmp += stride; + ST6x1_UB(tmp4, 3, tmp2, 7, ptmp, 4); + ptmp += stride; + ST6x1_UB(tmp6, 0, tmp5, 0, ptmp, 4); + ptmp += stride; + ST6x1_UB(tmp6, 1, tmp5, 1, ptmp, 4); + ptmp += stride; + ST6x1_UB(tmp6, 2, tmp5, 2, ptmp, 4); + ptmp += stride; + ST6x1_UB(tmp6, 3, tmp5, 3, ptmp, 4); + ptmp += stride; + ST6x1_UB(tmp7, 0, tmp5, 4, ptmp, 4); + ptmp += stride; + ST6x1_UB(tmp7, 1, tmp5, 5, ptmp, 4); + ptmp += stride; + ST6x1_UB(tmp7, 2, tmp5, 6, ptmp, 4); + ptmp += stride; + ST6x1_UB(tmp7, 3, tmp5, 7, ptmp, 4); +} + +// on three inner edges +static void VFilterHorEdge16i(uint8_t* src, int stride, + int b_limit, int limit, int thresh) { + v16u8 mask, hev; + v16u8 p3, p2, p1, p0, q3, q2, q1, q0; + const v16u8 thresh0 = (v16u8)__msa_fill_b(thresh); + const v16u8 b_limit0 = (v16u8)__msa_fill_b(b_limit); + const v16u8 limit0 = (v16u8)__msa_fill_b(limit); + + LD_UB8((src - 4 * stride), stride, p3, p2, p1, p0, q0, q1, q2, q3); + LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit0, b_limit0, thresh0, + hev, mask); + LPF_FILTER4_4W(p1, p0, q0, q1, mask, hev); + ST_UB4(p1, p0, q0, q1, (src - 2 * stride), stride); +} + +static void VFilter16i(uint8_t* src_y, int stride, + int b_limit, int limit, int thresh) { + VFilterHorEdge16i(src_y + 4 * stride, stride, b_limit, limit, thresh); + VFilterHorEdge16i(src_y + 8 * stride, stride, b_limit, limit, thresh); + VFilterHorEdge16i(src_y + 12 * stride, stride, b_limit, limit, thresh); +} + +static void HFilterVertEdge16i(uint8_t* src, int stride, + int b_limit, int limit, int thresh) { + v16u8 mask, hev; + v16u8 p3, p2, p1, p0, q3, q2, q1, q0; + v16u8 row0, row1, row2, row3, row4, row5, row6, row7; + v16u8 row8, row9, row10, row11, row12, row13, row14, row15; + v8i16 tmp0, tmp1, tmp2, tmp3, tmp4, tmp5; + const v16u8 thresh0 = (v16u8)__msa_fill_b(thresh); + const v16u8 b_limit0 = (v16u8)__msa_fill_b(b_limit); + const v16u8 limit0 = (v16u8)__msa_fill_b(limit); + + LD_UB8(src - 4, stride, row0, row1, row2, row3, row4, row5, row6, row7); + LD_UB8(src - 4 + (8 * stride), stride, + row8, row9, row10, row11, row12, row13, row14, row15); + TRANSPOSE16x8_UB_UB(row0, row1, row2, row3, row4, row5, row6, row7, + row8, row9, row10, row11, row12, row13, row14, row15, + p3, p2, p1, p0, q0, q1, q2, q3); + LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit0, b_limit0, thresh0, + hev, mask); + LPF_FILTER4_4W(p1, p0, q0, q1, mask, hev); + ILVR_B2_SH(p0, p1, q1, q0, tmp0, tmp1); + ILVRL_H2_SH(tmp1, tmp0, tmp2, tmp3); + ILVL_B2_SH(p0, p1, q1, q0, tmp0, tmp1); + ILVRL_H2_SH(tmp1, tmp0, tmp4, tmp5); + src -= 2; + ST4x8_UB(tmp2, tmp3, src, stride); + src += (8 * stride); + ST4x8_UB(tmp4, tmp5, src, stride); +} + +static void HFilter16i(uint8_t* src_y, int stride, + int b_limit, int limit, int thresh) { + HFilterVertEdge16i(src_y + 4, stride, b_limit, limit, thresh); + HFilterVertEdge16i(src_y + 8, stride, b_limit, limit, thresh); + HFilterVertEdge16i(src_y + 12, stride, b_limit, limit, thresh); +} + +// 8-pixels wide variants, for chroma filtering +static void VFilter8(uint8_t* src_u, uint8_t* src_v, int stride, + int b_limit_in, int limit_in, int thresh_in) { + uint8_t* ptmp_src_u = src_u - 4 * stride; + uint8_t* ptmp_src_v = src_v - 4 * stride; + uint64_t p2_d, p1_d, p0_d, q0_d, q1_d, q2_d; + v16u8 p3, p2, p1, p0, q3, q2, q1, q0, mask, hev; + v16u8 p3_u, p2_u, p1_u, p0_u, q3_u, q2_u, q1_u, q0_u; + v16u8 p3_v, p2_v, p1_v, p0_v, q3_v, q2_v, q1_v, q0_v; + const v16u8 b_limit = (v16u8)__msa_fill_b(b_limit_in); + const v16u8 limit = (v16u8)__msa_fill_b(limit_in); + const v16u8 thresh = (v16u8)__msa_fill_b(thresh_in); + + LD_UB8(ptmp_src_u, stride, p3_u, p2_u, p1_u, p0_u, q0_u, q1_u, q2_u, q3_u); + LD_UB8(ptmp_src_v, stride, p3_v, p2_v, p1_v, p0_v, q0_v, q1_v, q2_v, q3_v); + ILVR_D4_UB(p3_v, p3_u, p2_v, p2_u, p1_v, p1_u, p0_v, p0_u, p3, p2, p1, p0); + ILVR_D4_UB(q0_v, q0_u, q1_v, q1_u, q2_v, q2_u, q3_v, q3_u, q0, q1, q2, q3); + LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit, b_limit, thresh, + hev, mask); + LPF_MBFILTER(p2, p1, p0, q0, q1, q2, mask, hev); + p2_d = __msa_copy_s_d((v2i64)p2, 0); + p1_d = __msa_copy_s_d((v2i64)p1, 0); + p0_d = __msa_copy_s_d((v2i64)p0, 0); + q0_d = __msa_copy_s_d((v2i64)q0, 0); + q1_d = __msa_copy_s_d((v2i64)q1, 0); + q2_d = __msa_copy_s_d((v2i64)q2, 0); + ptmp_src_u += stride; + SD4(p2_d, p1_d, p0_d, q0_d, ptmp_src_u, stride); + ptmp_src_u += (4 * stride); + SD(q1_d, ptmp_src_u); + ptmp_src_u += stride; + SD(q2_d, ptmp_src_u); + p2_d = __msa_copy_s_d((v2i64)p2, 1); + p1_d = __msa_copy_s_d((v2i64)p1, 1); + p0_d = __msa_copy_s_d((v2i64)p0, 1); + q0_d = __msa_copy_s_d((v2i64)q0, 1); + q1_d = __msa_copy_s_d((v2i64)q1, 1); + q2_d = __msa_copy_s_d((v2i64)q2, 1); + ptmp_src_v += stride; + SD4(p2_d, p1_d, p0_d, q0_d, ptmp_src_v, stride); + ptmp_src_v += (4 * stride); + SD(q1_d, ptmp_src_v); + ptmp_src_v += stride; + SD(q2_d, ptmp_src_v); +} + +static void HFilter8(uint8_t* src_u, uint8_t* src_v, int stride, + int b_limit_in, int limit_in, int thresh_in) { + uint8_t* ptmp_src_u = src_u - 4; + uint8_t* ptmp_src_v = src_v - 4; + v16u8 p3, p2, p1, p0, q3, q2, q1, q0, mask, hev; + v16u8 row0, row1, row2, row3, row4, row5, row6, row7, row8; + v16u8 row9, row10, row11, row12, row13, row14, row15; + v8i16 tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7; + const v16u8 b_limit = (v16u8)__msa_fill_b(b_limit_in); + const v16u8 limit = (v16u8)__msa_fill_b(limit_in); + const v16u8 thresh = (v16u8)__msa_fill_b(thresh_in); + + LD_UB8(ptmp_src_u, stride, row0, row1, row2, row3, row4, row5, row6, row7); + LD_UB8(ptmp_src_v, stride, + row8, row9, row10, row11, row12, row13, row14, row15); + TRANSPOSE16x8_UB_UB(row0, row1, row2, row3, row4, row5, row6, row7, + row8, row9, row10, row11, row12, row13, row14, row15, + p3, p2, p1, p0, q0, q1, q2, q3); + LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit, b_limit, thresh, + hev, mask); + LPF_MBFILTER(p2, p1, p0, q0, q1, q2, mask, hev); + ILVR_B2_SH(p1, p2, q0, p0, tmp0, tmp1); + ILVRL_H2_SH(tmp1, tmp0, tmp3, tmp4); + ILVL_B2_SH(p1, p2, q0, p0, tmp0, tmp1); + ILVRL_H2_SH(tmp1, tmp0, tmp6, tmp7); + ILVRL_B2_SH(q2, q1, tmp2, tmp5); + ptmp_src_u += 1; + ST6x4_UB(tmp3, 0, tmp2, 0, ptmp_src_u, stride); + ptmp_src_u += 4 * stride; + ST6x4_UB(tmp4, 0, tmp2, 4, ptmp_src_u, stride); + ptmp_src_v += 1; + ST6x4_UB(tmp6, 0, tmp5, 0, ptmp_src_v, stride); + ptmp_src_v += 4 * stride; + ST6x4_UB(tmp7, 0, tmp5, 4, ptmp_src_v, stride); +} + +static void VFilter8i(uint8_t* src_u, uint8_t* src_v, int stride, + int b_limit_in, int limit_in, int thresh_in) { + uint64_t p1_d, p0_d, q0_d, q1_d; + v16u8 p3, p2, p1, p0, q3, q2, q1, q0, mask, hev; + v16u8 p3_u, p2_u, p1_u, p0_u, q3_u, q2_u, q1_u, q0_u; + v16u8 p3_v, p2_v, p1_v, p0_v, q3_v, q2_v, q1_v, q0_v; + const v16u8 thresh = (v16u8)__msa_fill_b(thresh_in); + const v16u8 limit = (v16u8)__msa_fill_b(limit_in); + const v16u8 b_limit = (v16u8)__msa_fill_b(b_limit_in); + + LD_UB8(src_u, stride, p3_u, p2_u, p1_u, p0_u, q0_u, q1_u, q2_u, q3_u); + src_u += (5 * stride); + LD_UB8(src_v, stride, p3_v, p2_v, p1_v, p0_v, q0_v, q1_v, q2_v, q3_v); + src_v += (5 * stride); + ILVR_D4_UB(p3_v, p3_u, p2_v, p2_u, p1_v, p1_u, p0_v, p0_u, p3, p2, p1, p0); + ILVR_D4_UB(q0_v, q0_u, q1_v, q1_u, q2_v, q2_u, q3_v, q3_u, q0, q1, q2, q3); + LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit, b_limit, thresh, + hev, mask); + LPF_FILTER4_4W(p1, p0, q0, q1, mask, hev); + p1_d = __msa_copy_s_d((v2i64)p1, 0); + p0_d = __msa_copy_s_d((v2i64)p0, 0); + q0_d = __msa_copy_s_d((v2i64)q0, 0); + q1_d = __msa_copy_s_d((v2i64)q1, 0); + SD4(q1_d, q0_d, p0_d, p1_d, src_u, -stride); + p1_d = __msa_copy_s_d((v2i64)p1, 1); + p0_d = __msa_copy_s_d((v2i64)p0, 1); + q0_d = __msa_copy_s_d((v2i64)q0, 1); + q1_d = __msa_copy_s_d((v2i64)q1, 1); + SD4(q1_d, q0_d, p0_d, p1_d, src_v, -stride); +} + +static void HFilter8i(uint8_t* src_u, uint8_t* src_v, int stride, + int b_limit_in, int limit_in, int thresh_in) { + v16u8 p3, p2, p1, p0, q3, q2, q1, q0, mask, hev; + v16u8 row0, row1, row2, row3, row4, row5, row6, row7, row8; + v16u8 row9, row10, row11, row12, row13, row14, row15; + v4i32 tmp0, tmp1, tmp2, tmp3, tmp4, tmp5; + const v16u8 thresh = (v16u8)__msa_fill_b(thresh_in); + const v16u8 limit = (v16u8)__msa_fill_b(limit_in); + const v16u8 b_limit = (v16u8)__msa_fill_b(b_limit_in); + + LD_UB8(src_u, stride, row0, row1, row2, row3, row4, row5, row6, row7); + LD_UB8(src_v, stride, + row8, row9, row10, row11, row12, row13, row14, row15); + TRANSPOSE16x8_UB_UB(row0, row1, row2, row3, row4, row5, row6, row7, + row8, row9, row10, row11, row12, row13, row14, row15, + p3, p2, p1, p0, q0, q1, q2, q3); + LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit, b_limit, thresh, + hev, mask); + LPF_FILTER4_4W(p1, p0, q0, q1, mask, hev); + ILVR_B2_SW(p0, p1, q1, q0, tmp0, tmp1); + ILVRL_H2_SW(tmp1, tmp0, tmp2, tmp3); + ILVL_B2_SW(p0, p1, q1, q0, tmp0, tmp1); + ILVRL_H2_SW(tmp1, tmp0, tmp4, tmp5); + src_u += 2; + ST4x4_UB(tmp2, tmp2, 0, 1, 2, 3, src_u, stride); + src_u += 4 * stride; + ST4x4_UB(tmp3, tmp3, 0, 1, 2, 3, src_u, stride); + src_v += 2; + ST4x4_UB(tmp4, tmp4, 0, 1, 2, 3, src_v, stride); + src_v += 4 * stride; + ST4x4_UB(tmp5, tmp5, 0, 1, 2, 3, src_v, stride); +} + +static void SimpleVFilter16(uint8_t* src, int stride, int b_limit_in) { + v16u8 p1, p0, q1, q0, mask; + const v16u8 b_limit = (v16u8)__msa_fill_b(b_limit_in); + + LD_UB4(src - 2 * stride, stride, p1, p0, q0, q1); + LPF_SIMPLE_MASK(p1, p0, q0, q1, b_limit, mask); + LPF_SIMPLE_FILT(p1, p0, q0, q1, mask); + ST_UB2(p0, q0, src - stride, stride); +} + +static void SimpleHFilter16(uint8_t* src, int stride, int b_limit_in) { + v16u8 p1, p0, q1, q0, mask, row0, row1, row2, row3, row4, row5, row6, row7; + v16u8 row8, row9, row10, row11, row12, row13, row14, row15; + v8i16 tmp0, tmp1; + const v16u8 b_limit = (v16u8)__msa_fill_b(b_limit_in); + uint8_t* ptemp_src = src - 2; + + LD_UB8(ptemp_src, stride, row0, row1, row2, row3, row4, row5, row6, row7); + LD_UB8(ptemp_src + 8 * stride, stride, + row8, row9, row10, row11, row12, row13, row14, row15); + TRANSPOSE16x4_UB_UB(row0, row1, row2, row3, row4, row5, row6, row7, + row8, row9, row10, row11, row12, row13, row14, row15, + p1, p0, q0, q1); + LPF_SIMPLE_MASK(p1, p0, q0, q1, b_limit, mask); + LPF_SIMPLE_FILT(p1, p0, q0, q1, mask); + ILVRL_B2_SH(q0, p0, tmp1, tmp0); + ptemp_src += 1; + ST2x4_UB(tmp1, 0, ptemp_src, stride); + ptemp_src += 4 * stride; + ST2x4_UB(tmp1, 4, ptemp_src, stride); + ptemp_src += 4 * stride; + ST2x4_UB(tmp0, 0, ptemp_src, stride); + ptemp_src += 4 * stride; + ST2x4_UB(tmp0, 4, ptemp_src, stride); + ptemp_src += 4 * stride; +} + +static void SimpleVFilter16i(uint8_t* src_y, int stride, int b_limit_in) { + SimpleVFilter16(src_y + 4 * stride, stride, b_limit_in); + SimpleVFilter16(src_y + 8 * stride, stride, b_limit_in); + SimpleVFilter16(src_y + 12 * stride, stride, b_limit_in); +} + +static void SimpleHFilter16i(uint8_t* src_y, int stride, int b_limit_in) { + SimpleHFilter16(src_y + 4, stride, b_limit_in); + SimpleHFilter16(src_y + 8, stride, b_limit_in); + SimpleHFilter16(src_y + 12, stride, b_limit_in); +} + +//------------------------------------------------------------------------------ +// Intra predictions +//------------------------------------------------------------------------------ + +// 4x4 + +static void DC4(uint8_t* dst) { // DC + uint32_t dc = 4; + int i; + for (i = 0; i < 4; ++i) dc += dst[i - BPS] + dst[-1 + i * BPS]; + dc >>= 3; + dc = dc | (dc << 8) | (dc << 16) | (dc << 24); + SW4(dc, dc, dc, dc, dst, BPS); +} + +static void TM4(uint8_t* dst) { + const uint8_t* const ptemp = dst - BPS - 1; + v8i16 T, d, r0, r1, r2, r3; + const v16i8 zero = { 0 }; + const v8i16 TL = (v8i16)__msa_fill_h(ptemp[0 * BPS]); + const v8i16 L0 = (v8i16)__msa_fill_h(ptemp[1 * BPS]); + const v8i16 L1 = (v8i16)__msa_fill_h(ptemp[2 * BPS]); + const v8i16 L2 = (v8i16)__msa_fill_h(ptemp[3 * BPS]); + const v8i16 L3 = (v8i16)__msa_fill_h(ptemp[4 * BPS]); + const v16u8 T1 = LD_UB(ptemp + 1); + + T = (v8i16)__msa_ilvr_b(zero, (v16i8)T1); + d = T - TL; + ADD4(d, L0, d, L1, d, L2, d, L3, r0, r1, r2, r3); + CLIP_SH4_0_255(r0, r1, r2, r3); + PCKEV_ST4x4_UB(r0, r1, r2, r3, dst, BPS); +} + +static void VE4(uint8_t* dst) { // vertical + const uint8_t* const ptop = dst - BPS - 1; + const uint32_t val0 = LW(ptop + 0); + const uint32_t val1 = LW(ptop + 4); + uint32_t out; + v16u8 A = { 0 }, B, C, AC, B2, R; + + INSERT_W2_UB(val0, val1, A); + B = SLDI_UB(A, A, 1); + C = SLDI_UB(A, A, 2); + AC = __msa_ave_u_b(A, C); + B2 = __msa_ave_u_b(B, B); + R = __msa_aver_u_b(AC, B2); + out = __msa_copy_s_w((v4i32)R, 0); + SW4(out, out, out, out, dst, BPS); +} + +static void RD4(uint8_t* dst) { // Down-right + const uint8_t* const ptop = dst - 1 - BPS; + uint32_t val0 = LW(ptop + 0); + uint32_t val1 = LW(ptop + 4); + uint32_t val2, val3; + v16u8 A, B, C, AC, B2, R, A1 = { 0 }; + + INSERT_W2_UB(val0, val1, A1); + A = SLDI_UB(A1, A1, 12); + A = (v16u8)__msa_insert_b((v16i8)A, 3, ptop[1 * BPS]); + A = (v16u8)__msa_insert_b((v16i8)A, 2, ptop[2 * BPS]); + A = (v16u8)__msa_insert_b((v16i8)A, 1, ptop[3 * BPS]); + A = (v16u8)__msa_insert_b((v16i8)A, 0, ptop[4 * BPS]); + B = SLDI_UB(A, A, 1); + C = SLDI_UB(A, A, 2); + AC = __msa_ave_u_b(A, C); + B2 = __msa_ave_u_b(B, B); + R = __msa_aver_u_b(AC, B2); + val3 = __msa_copy_s_w((v4i32)R, 0); + R = SLDI_UB(R, R, 1); + val2 = __msa_copy_s_w((v4i32)R, 0); + R = SLDI_UB(R, R, 1); + val1 = __msa_copy_s_w((v4i32)R, 0); + R = SLDI_UB(R, R, 1); + val0 = __msa_copy_s_w((v4i32)R, 0); + SW4(val0, val1, val2, val3, dst, BPS); +} + +static void LD4(uint8_t* dst) { // Down-Left + const uint8_t* const ptop = dst - BPS; + uint32_t val0 = LW(ptop + 0); + uint32_t val1 = LW(ptop + 4); + uint32_t val2, val3; + v16u8 A = { 0 }, B, C, AC, B2, R; + + INSERT_W2_UB(val0, val1, A); + B = SLDI_UB(A, A, 1); + C = SLDI_UB(A, A, 2); + C = (v16u8)__msa_insert_b((v16i8)C, 6, ptop[7]); + AC = __msa_ave_u_b(A, C); + B2 = __msa_ave_u_b(B, B); + R = __msa_aver_u_b(AC, B2); + val0 = __msa_copy_s_w((v4i32)R, 0); + R = SLDI_UB(R, R, 1); + val1 = __msa_copy_s_w((v4i32)R, 0); + R = SLDI_UB(R, R, 1); + val2 = __msa_copy_s_w((v4i32)R, 0); + R = SLDI_UB(R, R, 1); + val3 = __msa_copy_s_w((v4i32)R, 0); + SW4(val0, val1, val2, val3, dst, BPS); +} + +// 16x16 + +static void DC16(uint8_t* dst) { // DC + uint32_t dc = 16; + int i; + const v16u8 rtop = LD_UB(dst - BPS); + const v8u16 dctop = __msa_hadd_u_h(rtop, rtop); + v16u8 out; + + for (i = 0; i < 16; ++i) { + dc += dst[-1 + i * BPS]; + } + dc += HADD_UH_U32(dctop); + out = (v16u8)__msa_fill_b(dc >> 5); + ST_UB8(out, out, out, out, out, out, out, out, dst, BPS); + ST_UB8(out, out, out, out, out, out, out, out, dst + 8 * BPS, BPS); +} + +static void TM16(uint8_t* dst) { + int j; + v8i16 d1, d2; + const v16i8 zero = { 0 }; + const v8i16 TL = (v8i16)__msa_fill_h(dst[-1 - BPS]); + const v16i8 T = LD_SB(dst - BPS); + + ILVRL_B2_SH(zero, T, d1, d2); + SUB2(d1, TL, d2, TL, d1, d2); + for (j = 0; j < 16; j += 4) { + v16i8 t0, t1, t2, t3; + v8i16 r0, r1, r2, r3, r4, r5, r6, r7; + const v8i16 L0 = (v8i16)__msa_fill_h(dst[-1 + 0 * BPS]); + const v8i16 L1 = (v8i16)__msa_fill_h(dst[-1 + 1 * BPS]); + const v8i16 L2 = (v8i16)__msa_fill_h(dst[-1 + 2 * BPS]); + const v8i16 L3 = (v8i16)__msa_fill_h(dst[-1 + 3 * BPS]); + ADD4(d1, L0, d1, L1, d1, L2, d1, L3, r0, r1, r2, r3); + ADD4(d2, L0, d2, L1, d2, L2, d2, L3, r4, r5, r6, r7); + CLIP_SH4_0_255(r0, r1, r2, r3); + CLIP_SH4_0_255(r4, r5, r6, r7); + PCKEV_B4_SB(r4, r0, r5, r1, r6, r2, r7, r3, t0, t1, t2, t3); + ST_SB4(t0, t1, t2, t3, dst, BPS); + dst += 4 * BPS; + } +} + +static void VE16(uint8_t* dst) { // vertical + const v16u8 rtop = LD_UB(dst - BPS); + ST_UB8(rtop, rtop, rtop, rtop, rtop, rtop, rtop, rtop, dst, BPS); + ST_UB8(rtop, rtop, rtop, rtop, rtop, rtop, rtop, rtop, dst + 8 * BPS, BPS); +} + +static void HE16(uint8_t* dst) { // horizontal + int j; + for (j = 16; j > 0; j -= 4) { + const v16u8 L0 = (v16u8)__msa_fill_b(dst[-1 + 0 * BPS]); + const v16u8 L1 = (v16u8)__msa_fill_b(dst[-1 + 1 * BPS]); + const v16u8 L2 = (v16u8)__msa_fill_b(dst[-1 + 2 * BPS]); + const v16u8 L3 = (v16u8)__msa_fill_b(dst[-1 + 3 * BPS]); + ST_UB4(L0, L1, L2, L3, dst, BPS); + dst += 4 * BPS; + } +} + +static void DC16NoTop(uint8_t* dst) { // DC with top samples not available + int j; + uint32_t dc = 8; + v16u8 out; + + for (j = 0; j < 16; ++j) { + dc += dst[-1 + j * BPS]; + } + out = (v16u8)__msa_fill_b(dc >> 4); + ST_UB8(out, out, out, out, out, out, out, out, dst, BPS); + ST_UB8(out, out, out, out, out, out, out, out, dst + 8 * BPS, BPS); +} + +static void DC16NoLeft(uint8_t* dst) { // DC with left samples not available + uint32_t dc = 8; + const v16u8 rtop = LD_UB(dst - BPS); + const v8u16 dctop = __msa_hadd_u_h(rtop, rtop); + v16u8 out; + + dc += HADD_UH_U32(dctop); + out = (v16u8)__msa_fill_b(dc >> 4); + ST_UB8(out, out, out, out, out, out, out, out, dst, BPS); + ST_UB8(out, out, out, out, out, out, out, out, dst + 8 * BPS, BPS); +} + +static void DC16NoTopLeft(uint8_t* dst) { // DC with nothing + const v16u8 out = (v16u8)__msa_fill_b(0x80); + ST_UB8(out, out, out, out, out, out, out, out, dst, BPS); + ST_UB8(out, out, out, out, out, out, out, out, dst + 8 * BPS, BPS); +} + +// Chroma + +#define STORE8x8(out, dst) do { \ + SD4(out, out, out, out, dst + 0 * BPS, BPS); \ + SD4(out, out, out, out, dst + 4 * BPS, BPS); \ +} while (0) + +static void DC8uv(uint8_t* dst) { // DC + uint32_t dc = 8; + int i; + uint64_t out; + const v16u8 rtop = LD_UB(dst - BPS); + const v8u16 temp0 = __msa_hadd_u_h(rtop, rtop); + const v4u32 temp1 = __msa_hadd_u_w(temp0, temp0); + const v2u64 temp2 = __msa_hadd_u_d(temp1, temp1); + v16u8 dctemp; + + for (i = 0; i < 8; ++i) { + dc += dst[-1 + i * BPS]; + } + dc += __msa_copy_s_w((v4i32)temp2, 0); + dctemp = (v16u8)__msa_fill_b(dc >> 4); + out = __msa_copy_s_d((v2i64)dctemp, 0); + STORE8x8(out, dst); +} + +static void TM8uv(uint8_t* dst) { + int j; + const v16i8 T1 = LD_SB(dst - BPS); + const v16i8 zero = { 0 }; + const v8i16 T = (v8i16)__msa_ilvr_b(zero, T1); + const v8i16 TL = (v8i16)__msa_fill_h(dst[-1 - BPS]); + const v8i16 d = T - TL; + + for (j = 0; j < 8; j += 4) { + v16i8 t0, t1; + v8i16 r0 = (v8i16)__msa_fill_h(dst[-1 + 0 * BPS]); + v8i16 r1 = (v8i16)__msa_fill_h(dst[-1 + 1 * BPS]); + v8i16 r2 = (v8i16)__msa_fill_h(dst[-1 + 2 * BPS]); + v8i16 r3 = (v8i16)__msa_fill_h(dst[-1 + 3 * BPS]); + ADD4(d, r0, d, r1, d, r2, d, r3, r0, r1, r2, r3); + CLIP_SH4_0_255(r0, r1, r2, r3); + PCKEV_B2_SB(r1, r0, r3, r2, t0, t1); + ST4x4_UB(t0, t1, 0, 2, 0, 2, dst, BPS); + ST4x4_UB(t0, t1, 1, 3, 1, 3, dst + 4, BPS); + dst += 4 * BPS; + } +} + +static void VE8uv(uint8_t* dst) { // vertical + const v16u8 rtop = LD_UB(dst - BPS); + const uint64_t out = __msa_copy_s_d((v2i64)rtop, 0); + STORE8x8(out, dst); +} + +static void HE8uv(uint8_t* dst) { // horizontal + int j; + for (j = 0; j < 8; j += 4) { + const v16u8 L0 = (v16u8)__msa_fill_b(dst[-1 + 0 * BPS]); + const v16u8 L1 = (v16u8)__msa_fill_b(dst[-1 + 1 * BPS]); + const v16u8 L2 = (v16u8)__msa_fill_b(dst[-1 + 2 * BPS]); + const v16u8 L3 = (v16u8)__msa_fill_b(dst[-1 + 3 * BPS]); + const uint64_t out0 = __msa_copy_s_d((v2i64)L0, 0); + const uint64_t out1 = __msa_copy_s_d((v2i64)L1, 0); + const uint64_t out2 = __msa_copy_s_d((v2i64)L2, 0); + const uint64_t out3 = __msa_copy_s_d((v2i64)L3, 0); + SD4(out0, out1, out2, out3, dst, BPS); + dst += 4 * BPS; + } +} + +static void DC8uvNoLeft(uint8_t* dst) { // DC with no left samples + const uint32_t dc = 4; + const v16u8 rtop = LD_UB(dst - BPS); + const v8u16 temp0 = __msa_hadd_u_h(rtop, rtop); + const v4u32 temp1 = __msa_hadd_u_w(temp0, temp0); + const v2u64 temp2 = __msa_hadd_u_d(temp1, temp1); + const uint32_t sum_m = __msa_copy_s_w((v4i32)temp2, 0); + const v16u8 dcval = (v16u8)__msa_fill_b((dc + sum_m) >> 3); + const uint64_t out = __msa_copy_s_d((v2i64)dcval, 0); + STORE8x8(out, dst); +} + +static void DC8uvNoTop(uint8_t* dst) { // DC with no top samples + uint32_t dc = 4; + int i; + uint64_t out; + v16u8 dctemp; + + for (i = 0; i < 8; ++i) { + dc += dst[-1 + i * BPS]; + } + dctemp = (v16u8)__msa_fill_b(dc >> 3); + out = __msa_copy_s_d((v2i64)dctemp, 0); + STORE8x8(out, dst); +} + +static void DC8uvNoTopLeft(uint8_t* dst) { // DC with nothing + const uint64_t out = 0x8080808080808080ULL; + STORE8x8(out, dst); +} + +//------------------------------------------------------------------------------ +// Entry point + +extern void VP8DspInitMSA(void); + +WEBP_TSAN_IGNORE_FUNCTION void VP8DspInitMSA(void) { + VP8TransformWHT = TransformWHT; + VP8Transform = TransformTwo; + VP8TransformDC = TransformDC; + VP8TransformAC3 = TransformAC3; + + VP8VFilter16 = VFilter16; + VP8HFilter16 = HFilter16; + VP8VFilter16i = VFilter16i; + VP8HFilter16i = HFilter16i; + VP8VFilter8 = VFilter8; + VP8HFilter8 = HFilter8; + VP8VFilter8i = VFilter8i; + VP8HFilter8i = HFilter8i; + VP8SimpleVFilter16 = SimpleVFilter16; + VP8SimpleHFilter16 = SimpleHFilter16; + VP8SimpleVFilter16i = SimpleVFilter16i; + VP8SimpleHFilter16i = SimpleHFilter16i; + + VP8PredLuma4[0] = DC4; + VP8PredLuma4[1] = TM4; + VP8PredLuma4[2] = VE4; + VP8PredLuma4[4] = RD4; + VP8PredLuma4[6] = LD4; + VP8PredLuma16[0] = DC16; + VP8PredLuma16[1] = TM16; + VP8PredLuma16[2] = VE16; + VP8PredLuma16[3] = HE16; + VP8PredLuma16[4] = DC16NoTop; + VP8PredLuma16[5] = DC16NoLeft; + VP8PredLuma16[6] = DC16NoTopLeft; + VP8PredChroma8[0] = DC8uv; + VP8PredChroma8[1] = TM8uv; + VP8PredChroma8[2] = VE8uv; + VP8PredChroma8[3] = HE8uv; + VP8PredChroma8[4] = DC8uvNoTop; + VP8PredChroma8[5] = DC8uvNoLeft; + VP8PredChroma8[6] = DC8uvNoTopLeft; +} + +#else // !WEBP_USE_MSA + +WEBP_DSP_INIT_STUB(VP8DspInitMSA) + +#endif // WEBP_USE_MSA diff --git a/vendor/github.com/sizeofint/webpanimation/dsp_dec_neon.c b/vendor/github.com/sizeofint/webpanimation/dsp_dec_neon.c new file mode 100644 index 00000000..60917210 --- /dev/null +++ b/vendor/github.com/sizeofint/webpanimation/dsp_dec_neon.c @@ -0,0 +1,1663 @@ +// Copyright 2012 Google Inc. All Rights Reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the COPYING file in the root of the source +// tree. An additional intellectual property rights grant can be found +// in the file PATENTS. All contributing project authors may +// be found in the AUTHORS file in the root of the source tree. +// ----------------------------------------------------------------------------- +// +// ARM NEON version of dsp functions and loop filtering. +// +// Authors: Somnath Banerjee (somnath@google.com) +// Johann Koenig (johannkoenig@google.com) + +#include "dsp_dsp.h" + +#if defined(WEBP_USE_NEON) + +#include "dsp_neon.h" +#include "dec_vp8i_dec.h" + +//------------------------------------------------------------------------------ +// NxM Loading functions + +#if !defined(WORK_AROUND_GCC) + +// This intrinsics version makes gcc-4.6.3 crash during Load4x??() compilation +// (register alloc, probably). The variants somewhat mitigate the problem, but +// not quite. HFilter16i() remains problematic. +static WEBP_INLINE uint8x8x4_t Load4x8_NEON(const uint8_t* const src, + int stride) { + const uint8x8_t zero = vdup_n_u8(0); + uint8x8x4_t out; + INIT_VECTOR4(out, zero, zero, zero, zero); + out = vld4_lane_u8(src + 0 * stride, out, 0); + out = vld4_lane_u8(src + 1 * stride, out, 1); + out = vld4_lane_u8(src + 2 * stride, out, 2); + out = vld4_lane_u8(src + 3 * stride, out, 3); + out = vld4_lane_u8(src + 4 * stride, out, 4); + out = vld4_lane_u8(src + 5 * stride, out, 5); + out = vld4_lane_u8(src + 6 * stride, out, 6); + out = vld4_lane_u8(src + 7 * stride, out, 7); + return out; +} + +static WEBP_INLINE void Load4x16_NEON(const uint8_t* const src, int stride, + uint8x16_t* const p1, + uint8x16_t* const p0, + uint8x16_t* const q0, + uint8x16_t* const q1) { + // row0 = p1[0..7]|p0[0..7]|q0[0..7]|q1[0..7] + // row8 = p1[8..15]|p0[8..15]|q0[8..15]|q1[8..15] + const uint8x8x4_t row0 = Load4x8_NEON(src - 2 + 0 * stride, stride); + const uint8x8x4_t row8 = Load4x8_NEON(src - 2 + 8 * stride, stride); + *p1 = vcombine_u8(row0.val[0], row8.val[0]); + *p0 = vcombine_u8(row0.val[1], row8.val[1]); + *q0 = vcombine_u8(row0.val[2], row8.val[2]); + *q1 = vcombine_u8(row0.val[3], row8.val[3]); +} + +#else // WORK_AROUND_GCC + +#define LOADQ_LANE_32b(VALUE, LANE) do { \ + (VALUE) = vld1q_lane_u32((const uint32_t*)src, (VALUE), (LANE)); \ + src += stride; \ +} while (0) + +static WEBP_INLINE void Load4x16_NEON(const uint8_t* src, int stride, + uint8x16_t* const p1, + uint8x16_t* const p0, + uint8x16_t* const q0, + uint8x16_t* const q1) { + const uint32x4_t zero = vdupq_n_u32(0); + uint32x4x4_t in; + INIT_VECTOR4(in, zero, zero, zero, zero); + src -= 2; + LOADQ_LANE_32b(in.val[0], 0); + LOADQ_LANE_32b(in.val[1], 0); + LOADQ_LANE_32b(in.val[2], 0); + LOADQ_LANE_32b(in.val[3], 0); + LOADQ_LANE_32b(in.val[0], 1); + LOADQ_LANE_32b(in.val[1], 1); + LOADQ_LANE_32b(in.val[2], 1); + LOADQ_LANE_32b(in.val[3], 1); + LOADQ_LANE_32b(in.val[0], 2); + LOADQ_LANE_32b(in.val[1], 2); + LOADQ_LANE_32b(in.val[2], 2); + LOADQ_LANE_32b(in.val[3], 2); + LOADQ_LANE_32b(in.val[0], 3); + LOADQ_LANE_32b(in.val[1], 3); + LOADQ_LANE_32b(in.val[2], 3); + LOADQ_LANE_32b(in.val[3], 3); + // Transpose four 4x4 parts: + { + const uint8x16x2_t row01 = vtrnq_u8(vreinterpretq_u8_u32(in.val[0]), + vreinterpretq_u8_u32(in.val[1])); + const uint8x16x2_t row23 = vtrnq_u8(vreinterpretq_u8_u32(in.val[2]), + vreinterpretq_u8_u32(in.val[3])); + const uint16x8x2_t row02 = vtrnq_u16(vreinterpretq_u16_u8(row01.val[0]), + vreinterpretq_u16_u8(row23.val[0])); + const uint16x8x2_t row13 = vtrnq_u16(vreinterpretq_u16_u8(row01.val[1]), + vreinterpretq_u16_u8(row23.val[1])); + *p1 = vreinterpretq_u8_u16(row02.val[0]); + *p0 = vreinterpretq_u8_u16(row13.val[0]); + *q0 = vreinterpretq_u8_u16(row02.val[1]); + *q1 = vreinterpretq_u8_u16(row13.val[1]); + } +} +#undef LOADQ_LANE_32b + +#endif // !WORK_AROUND_GCC + +static WEBP_INLINE void Load8x16_NEON( + const uint8_t* const src, int stride, + uint8x16_t* const p3, uint8x16_t* const p2, uint8x16_t* const p1, + uint8x16_t* const p0, uint8x16_t* const q0, uint8x16_t* const q1, + uint8x16_t* const q2, uint8x16_t* const q3) { + Load4x16_NEON(src - 2, stride, p3, p2, p1, p0); + Load4x16_NEON(src + 2, stride, q0, q1, q2, q3); +} + +static WEBP_INLINE void Load16x4_NEON(const uint8_t* const src, int stride, + uint8x16_t* const p1, + uint8x16_t* const p0, + uint8x16_t* const q0, + uint8x16_t* const q1) { + *p1 = vld1q_u8(src - 2 * stride); + *p0 = vld1q_u8(src - 1 * stride); + *q0 = vld1q_u8(src + 0 * stride); + *q1 = vld1q_u8(src + 1 * stride); +} + +static WEBP_INLINE void Load16x8_NEON( + const uint8_t* const src, int stride, + uint8x16_t* const p3, uint8x16_t* const p2, uint8x16_t* const p1, + uint8x16_t* const p0, uint8x16_t* const q0, uint8x16_t* const q1, + uint8x16_t* const q2, uint8x16_t* const q3) { + Load16x4_NEON(src - 2 * stride, stride, p3, p2, p1, p0); + Load16x4_NEON(src + 2 * stride, stride, q0, q1, q2, q3); +} + +static WEBP_INLINE void Load8x8x2_NEON( + const uint8_t* const u, const uint8_t* const v, int stride, + uint8x16_t* const p3, uint8x16_t* const p2, uint8x16_t* const p1, + uint8x16_t* const p0, uint8x16_t* const q0, uint8x16_t* const q1, + uint8x16_t* const q2, uint8x16_t* const q3) { + // We pack the 8x8 u-samples in the lower half of the uint8x16_t destination + // and the v-samples on the higher half. + *p3 = vcombine_u8(vld1_u8(u - 4 * stride), vld1_u8(v - 4 * stride)); + *p2 = vcombine_u8(vld1_u8(u - 3 * stride), vld1_u8(v - 3 * stride)); + *p1 = vcombine_u8(vld1_u8(u - 2 * stride), vld1_u8(v - 2 * stride)); + *p0 = vcombine_u8(vld1_u8(u - 1 * stride), vld1_u8(v - 1 * stride)); + *q0 = vcombine_u8(vld1_u8(u + 0 * stride), vld1_u8(v + 0 * stride)); + *q1 = vcombine_u8(vld1_u8(u + 1 * stride), vld1_u8(v + 1 * stride)); + *q2 = vcombine_u8(vld1_u8(u + 2 * stride), vld1_u8(v + 2 * stride)); + *q3 = vcombine_u8(vld1_u8(u + 3 * stride), vld1_u8(v + 3 * stride)); +} + +#if !defined(WORK_AROUND_GCC) + +#define LOAD_UV_8(ROW) \ + vcombine_u8(vld1_u8(u - 4 + (ROW) * stride), vld1_u8(v - 4 + (ROW) * stride)) + +static WEBP_INLINE void Load8x8x2T_NEON( + const uint8_t* const u, const uint8_t* const v, int stride, + uint8x16_t* const p3, uint8x16_t* const p2, uint8x16_t* const p1, + uint8x16_t* const p0, uint8x16_t* const q0, uint8x16_t* const q1, + uint8x16_t* const q2, uint8x16_t* const q3) { + // We pack the 8x8 u-samples in the lower half of the uint8x16_t destination + // and the v-samples on the higher half. + const uint8x16_t row0 = LOAD_UV_8(0); + const uint8x16_t row1 = LOAD_UV_8(1); + const uint8x16_t row2 = LOAD_UV_8(2); + const uint8x16_t row3 = LOAD_UV_8(3); + const uint8x16_t row4 = LOAD_UV_8(4); + const uint8x16_t row5 = LOAD_UV_8(5); + const uint8x16_t row6 = LOAD_UV_8(6); + const uint8x16_t row7 = LOAD_UV_8(7); + // Perform two side-by-side 8x8 transposes + // u00 u01 u02 u03 u04 u05 u06 u07 | v00 v01 v02 v03 v04 v05 v06 v07 + // u10 u11 u12 u13 u14 u15 u16 u17 | v10 v11 v12 ... + // u20 u21 u22 u23 u24 u25 u26 u27 | v20 v21 ... + // u30 u31 u32 u33 u34 u35 u36 u37 | ... + // u40 u41 u42 u43 u44 u45 u46 u47 | ... + // u50 u51 u52 u53 u54 u55 u56 u57 | ... + // u60 u61 u62 u63 u64 u65 u66 u67 | v60 ... + // u70 u71 u72 u73 u74 u75 u76 u77 | v70 v71 v72 ... + const uint8x16x2_t row01 = vtrnq_u8(row0, row1); // u00 u10 u02 u12 ... + // u01 u11 u03 u13 ... + const uint8x16x2_t row23 = vtrnq_u8(row2, row3); // u20 u30 u22 u32 ... + // u21 u31 u23 u33 ... + const uint8x16x2_t row45 = vtrnq_u8(row4, row5); // ... + const uint8x16x2_t row67 = vtrnq_u8(row6, row7); // ... + const uint16x8x2_t row02 = vtrnq_u16(vreinterpretq_u16_u8(row01.val[0]), + vreinterpretq_u16_u8(row23.val[0])); + const uint16x8x2_t row13 = vtrnq_u16(vreinterpretq_u16_u8(row01.val[1]), + vreinterpretq_u16_u8(row23.val[1])); + const uint16x8x2_t row46 = vtrnq_u16(vreinterpretq_u16_u8(row45.val[0]), + vreinterpretq_u16_u8(row67.val[0])); + const uint16x8x2_t row57 = vtrnq_u16(vreinterpretq_u16_u8(row45.val[1]), + vreinterpretq_u16_u8(row67.val[1])); + const uint32x4x2_t row04 = vtrnq_u32(vreinterpretq_u32_u16(row02.val[0]), + vreinterpretq_u32_u16(row46.val[0])); + const uint32x4x2_t row26 = vtrnq_u32(vreinterpretq_u32_u16(row02.val[1]), + vreinterpretq_u32_u16(row46.val[1])); + const uint32x4x2_t row15 = vtrnq_u32(vreinterpretq_u32_u16(row13.val[0]), + vreinterpretq_u32_u16(row57.val[0])); + const uint32x4x2_t row37 = vtrnq_u32(vreinterpretq_u32_u16(row13.val[1]), + vreinterpretq_u32_u16(row57.val[1])); + *p3 = vreinterpretq_u8_u32(row04.val[0]); + *p2 = vreinterpretq_u8_u32(row15.val[0]); + *p1 = vreinterpretq_u8_u32(row26.val[0]); + *p0 = vreinterpretq_u8_u32(row37.val[0]); + *q0 = vreinterpretq_u8_u32(row04.val[1]); + *q1 = vreinterpretq_u8_u32(row15.val[1]); + *q2 = vreinterpretq_u8_u32(row26.val[1]); + *q3 = vreinterpretq_u8_u32(row37.val[1]); +} +#undef LOAD_UV_8 + +#endif // !WORK_AROUND_GCC + +static WEBP_INLINE void Store2x8_NEON(const uint8x8x2_t v, + uint8_t* const dst, int stride) { + vst2_lane_u8(dst + 0 * stride, v, 0); + vst2_lane_u8(dst + 1 * stride, v, 1); + vst2_lane_u8(dst + 2 * stride, v, 2); + vst2_lane_u8(dst + 3 * stride, v, 3); + vst2_lane_u8(dst + 4 * stride, v, 4); + vst2_lane_u8(dst + 5 * stride, v, 5); + vst2_lane_u8(dst + 6 * stride, v, 6); + vst2_lane_u8(dst + 7 * stride, v, 7); +} + +static WEBP_INLINE void Store2x16_NEON(const uint8x16_t p0, const uint8x16_t q0, + uint8_t* const dst, int stride) { + uint8x8x2_t lo, hi; + lo.val[0] = vget_low_u8(p0); + lo.val[1] = vget_low_u8(q0); + hi.val[0] = vget_high_u8(p0); + hi.val[1] = vget_high_u8(q0); + Store2x8_NEON(lo, dst - 1 + 0 * stride, stride); + Store2x8_NEON(hi, dst - 1 + 8 * stride, stride); +} + +#if !defined(WORK_AROUND_GCC) +static WEBP_INLINE void Store4x8_NEON(const uint8x8x4_t v, + uint8_t* const dst, int stride) { + vst4_lane_u8(dst + 0 * stride, v, 0); + vst4_lane_u8(dst + 1 * stride, v, 1); + vst4_lane_u8(dst + 2 * stride, v, 2); + vst4_lane_u8(dst + 3 * stride, v, 3); + vst4_lane_u8(dst + 4 * stride, v, 4); + vst4_lane_u8(dst + 5 * stride, v, 5); + vst4_lane_u8(dst + 6 * stride, v, 6); + vst4_lane_u8(dst + 7 * stride, v, 7); +} + +static WEBP_INLINE void Store4x16_NEON(const uint8x16_t p1, const uint8x16_t p0, + const uint8x16_t q0, const uint8x16_t q1, + uint8_t* const dst, int stride) { + uint8x8x4_t lo, hi; + INIT_VECTOR4(lo, + vget_low_u8(p1), vget_low_u8(p0), + vget_low_u8(q0), vget_low_u8(q1)); + INIT_VECTOR4(hi, + vget_high_u8(p1), vget_high_u8(p0), + vget_high_u8(q0), vget_high_u8(q1)); + Store4x8_NEON(lo, dst - 2 + 0 * stride, stride); + Store4x8_NEON(hi, dst - 2 + 8 * stride, stride); +} +#endif // !WORK_AROUND_GCC + +static WEBP_INLINE void Store16x2_NEON(const uint8x16_t p0, const uint8x16_t q0, + uint8_t* const dst, int stride) { + vst1q_u8(dst - stride, p0); + vst1q_u8(dst, q0); +} + +static WEBP_INLINE void Store16x4_NEON(const uint8x16_t p1, const uint8x16_t p0, + const uint8x16_t q0, const uint8x16_t q1, + uint8_t* const dst, int stride) { + Store16x2_NEON(p1, p0, dst - stride, stride); + Store16x2_NEON(q0, q1, dst + stride, stride); +} + +static WEBP_INLINE void Store8x2x2_NEON(const uint8x16_t p0, + const uint8x16_t q0, + uint8_t* const u, uint8_t* const v, + int stride) { + // p0 and q0 contain the u+v samples packed in low/high halves. + vst1_u8(u - stride, vget_low_u8(p0)); + vst1_u8(u, vget_low_u8(q0)); + vst1_u8(v - stride, vget_high_u8(p0)); + vst1_u8(v, vget_high_u8(q0)); +} + +static WEBP_INLINE void Store8x4x2_NEON(const uint8x16_t p1, + const uint8x16_t p0, + const uint8x16_t q0, + const uint8x16_t q1, + uint8_t* const u, uint8_t* const v, + int stride) { + // The p1...q1 registers contain the u+v samples packed in low/high halves. + Store8x2x2_NEON(p1, p0, u - stride, v - stride, stride); + Store8x2x2_NEON(q0, q1, u + stride, v + stride, stride); +} + +#if !defined(WORK_AROUND_GCC) + +#define STORE6_LANE(DST, VAL0, VAL1, LANE) do { \ + vst3_lane_u8((DST) - 3, (VAL0), (LANE)); \ + vst3_lane_u8((DST) + 0, (VAL1), (LANE)); \ + (DST) += stride; \ +} while (0) + +static WEBP_INLINE void Store6x8x2_NEON( + const uint8x16_t p2, const uint8x16_t p1, const uint8x16_t p0, + const uint8x16_t q0, const uint8x16_t q1, const uint8x16_t q2, + uint8_t* u, uint8_t* v, int stride) { + uint8x8x3_t u0, u1, v0, v1; + INIT_VECTOR3(u0, vget_low_u8(p2), vget_low_u8(p1), vget_low_u8(p0)); + INIT_VECTOR3(u1, vget_low_u8(q0), vget_low_u8(q1), vget_low_u8(q2)); + INIT_VECTOR3(v0, vget_high_u8(p2), vget_high_u8(p1), vget_high_u8(p0)); + INIT_VECTOR3(v1, vget_high_u8(q0), vget_high_u8(q1), vget_high_u8(q2)); + STORE6_LANE(u, u0, u1, 0); + STORE6_LANE(u, u0, u1, 1); + STORE6_LANE(u, u0, u1, 2); + STORE6_LANE(u, u0, u1, 3); + STORE6_LANE(u, u0, u1, 4); + STORE6_LANE(u, u0, u1, 5); + STORE6_LANE(u, u0, u1, 6); + STORE6_LANE(u, u0, u1, 7); + STORE6_LANE(v, v0, v1, 0); + STORE6_LANE(v, v0, v1, 1); + STORE6_LANE(v, v0, v1, 2); + STORE6_LANE(v, v0, v1, 3); + STORE6_LANE(v, v0, v1, 4); + STORE6_LANE(v, v0, v1, 5); + STORE6_LANE(v, v0, v1, 6); + STORE6_LANE(v, v0, v1, 7); +} +#undef STORE6_LANE + +static WEBP_INLINE void Store4x8x2_NEON(const uint8x16_t p1, + const uint8x16_t p0, + const uint8x16_t q0, + const uint8x16_t q1, + uint8_t* const u, uint8_t* const v, + int stride) { + uint8x8x4_t u0, v0; + INIT_VECTOR4(u0, + vget_low_u8(p1), vget_low_u8(p0), + vget_low_u8(q0), vget_low_u8(q1)); + INIT_VECTOR4(v0, + vget_high_u8(p1), vget_high_u8(p0), + vget_high_u8(q0), vget_high_u8(q1)); + vst4_lane_u8(u - 2 + 0 * stride, u0, 0); + vst4_lane_u8(u - 2 + 1 * stride, u0, 1); + vst4_lane_u8(u - 2 + 2 * stride, u0, 2); + vst4_lane_u8(u - 2 + 3 * stride, u0, 3); + vst4_lane_u8(u - 2 + 4 * stride, u0, 4); + vst4_lane_u8(u - 2 + 5 * stride, u0, 5); + vst4_lane_u8(u - 2 + 6 * stride, u0, 6); + vst4_lane_u8(u - 2 + 7 * stride, u0, 7); + vst4_lane_u8(v - 2 + 0 * stride, v0, 0); + vst4_lane_u8(v - 2 + 1 * stride, v0, 1); + vst4_lane_u8(v - 2 + 2 * stride, v0, 2); + vst4_lane_u8(v - 2 + 3 * stride, v0, 3); + vst4_lane_u8(v - 2 + 4 * stride, v0, 4); + vst4_lane_u8(v - 2 + 5 * stride, v0, 5); + vst4_lane_u8(v - 2 + 6 * stride, v0, 6); + vst4_lane_u8(v - 2 + 7 * stride, v0, 7); +} + +#endif // !WORK_AROUND_GCC + +// Zero extend 'v' to an int16x8_t. +static WEBP_INLINE int16x8_t ConvertU8ToS16_NEON(uint8x8_t v) { + return vreinterpretq_s16_u16(vmovl_u8(v)); +} + +// Performs unsigned 8b saturation on 'dst01' and 'dst23' storing the result +// to the corresponding rows of 'dst'. +static WEBP_INLINE void SaturateAndStore4x4_NEON(uint8_t* const dst, + const int16x8_t dst01, + const int16x8_t dst23) { + // Unsigned saturate to 8b. + const uint8x8_t dst01_u8 = vqmovun_s16(dst01); + const uint8x8_t dst23_u8 = vqmovun_s16(dst23); + + // Store the results. + vst1_lane_u32((uint32_t*)(dst + 0 * BPS), vreinterpret_u32_u8(dst01_u8), 0); + vst1_lane_u32((uint32_t*)(dst + 1 * BPS), vreinterpret_u32_u8(dst01_u8), 1); + vst1_lane_u32((uint32_t*)(dst + 2 * BPS), vreinterpret_u32_u8(dst23_u8), 0); + vst1_lane_u32((uint32_t*)(dst + 3 * BPS), vreinterpret_u32_u8(dst23_u8), 1); +} + +static WEBP_INLINE void Add4x4_NEON(const int16x8_t row01, + const int16x8_t row23, + uint8_t* const dst) { + uint32x2_t dst01 = vdup_n_u32(0); + uint32x2_t dst23 = vdup_n_u32(0); + + // Load the source pixels. + dst01 = vld1_lane_u32((uint32_t*)(dst + 0 * BPS), dst01, 0); + dst23 = vld1_lane_u32((uint32_t*)(dst + 2 * BPS), dst23, 0); + dst01 = vld1_lane_u32((uint32_t*)(dst + 1 * BPS), dst01, 1); + dst23 = vld1_lane_u32((uint32_t*)(dst + 3 * BPS), dst23, 1); + + { + // Convert to 16b. + const int16x8_t dst01_s16 = ConvertU8ToS16_NEON(vreinterpret_u8_u32(dst01)); + const int16x8_t dst23_s16 = ConvertU8ToS16_NEON(vreinterpret_u8_u32(dst23)); + + // Descale with rounding. + const int16x8_t out01 = vrsraq_n_s16(dst01_s16, row01, 3); + const int16x8_t out23 = vrsraq_n_s16(dst23_s16, row23, 3); + // Add the inverse transform. + SaturateAndStore4x4_NEON(dst, out01, out23); + } +} + +//----------------------------------------------------------------------------- +// Simple In-loop filtering (Paragraph 15.2) + +static uint8x16_t NeedsFilter_NEON(const uint8x16_t p1, const uint8x16_t p0, + const uint8x16_t q0, const uint8x16_t q1, + int thresh) { + const uint8x16_t thresh_v = vdupq_n_u8((uint8_t)thresh); + const uint8x16_t a_p0_q0 = vabdq_u8(p0, q0); // abs(p0-q0) + const uint8x16_t a_p1_q1 = vabdq_u8(p1, q1); // abs(p1-q1) + const uint8x16_t a_p0_q0_2 = vqaddq_u8(a_p0_q0, a_p0_q0); // 2 * abs(p0-q0) + const uint8x16_t a_p1_q1_2 = vshrq_n_u8(a_p1_q1, 1); // abs(p1-q1) / 2 + const uint8x16_t sum = vqaddq_u8(a_p0_q0_2, a_p1_q1_2); + const uint8x16_t mask = vcgeq_u8(thresh_v, sum); + return mask; +} + +static int8x16_t FlipSign_NEON(const uint8x16_t v) { + const uint8x16_t sign_bit = vdupq_n_u8(0x80); + return vreinterpretq_s8_u8(veorq_u8(v, sign_bit)); +} + +static uint8x16_t FlipSignBack_NEON(const int8x16_t v) { + const int8x16_t sign_bit = vdupq_n_s8(0x80); + return vreinterpretq_u8_s8(veorq_s8(v, sign_bit)); +} + +static int8x16_t GetBaseDelta_NEON(const int8x16_t p1, const int8x16_t p0, + const int8x16_t q0, const int8x16_t q1) { + const int8x16_t q0_p0 = vqsubq_s8(q0, p0); // (q0-p0) + const int8x16_t p1_q1 = vqsubq_s8(p1, q1); // (p1-q1) + const int8x16_t s1 = vqaddq_s8(p1_q1, q0_p0); // (p1-q1) + 1 * (q0 - p0) + const int8x16_t s2 = vqaddq_s8(q0_p0, s1); // (p1-q1) + 2 * (q0 - p0) + const int8x16_t s3 = vqaddq_s8(q0_p0, s2); // (p1-q1) + 3 * (q0 - p0) + return s3; +} + +static int8x16_t GetBaseDelta0_NEON(const int8x16_t p0, const int8x16_t q0) { + const int8x16_t q0_p0 = vqsubq_s8(q0, p0); // (q0-p0) + const int8x16_t s1 = vqaddq_s8(q0_p0, q0_p0); // 2 * (q0 - p0) + const int8x16_t s2 = vqaddq_s8(q0_p0, s1); // 3 * (q0 - p0) + return s2; +} + +//------------------------------------------------------------------------------ + +static void ApplyFilter2NoFlip_NEON(const int8x16_t p0s, const int8x16_t q0s, + const int8x16_t delta, + int8x16_t* const op0, + int8x16_t* const oq0) { + const int8x16_t kCst3 = vdupq_n_s8(0x03); + const int8x16_t kCst4 = vdupq_n_s8(0x04); + const int8x16_t delta_p3 = vqaddq_s8(delta, kCst3); + const int8x16_t delta_p4 = vqaddq_s8(delta, kCst4); + const int8x16_t delta3 = vshrq_n_s8(delta_p3, 3); + const int8x16_t delta4 = vshrq_n_s8(delta_p4, 3); + *op0 = vqaddq_s8(p0s, delta3); + *oq0 = vqsubq_s8(q0s, delta4); +} + +#if defined(WEBP_USE_INTRINSICS) + +static void ApplyFilter2_NEON(const int8x16_t p0s, const int8x16_t q0s, + const int8x16_t delta, + uint8x16_t* const op0, uint8x16_t* const oq0) { + const int8x16_t kCst3 = vdupq_n_s8(0x03); + const int8x16_t kCst4 = vdupq_n_s8(0x04); + const int8x16_t delta_p3 = vqaddq_s8(delta, kCst3); + const int8x16_t delta_p4 = vqaddq_s8(delta, kCst4); + const int8x16_t delta3 = vshrq_n_s8(delta_p3, 3); + const int8x16_t delta4 = vshrq_n_s8(delta_p4, 3); + const int8x16_t sp0 = vqaddq_s8(p0s, delta3); + const int8x16_t sq0 = vqsubq_s8(q0s, delta4); + *op0 = FlipSignBack_NEON(sp0); + *oq0 = FlipSignBack_NEON(sq0); +} + +static void DoFilter2_NEON(const uint8x16_t p1, const uint8x16_t p0, + const uint8x16_t q0, const uint8x16_t q1, + const uint8x16_t mask, + uint8x16_t* const op0, uint8x16_t* const oq0) { + const int8x16_t p1s = FlipSign_NEON(p1); + const int8x16_t p0s = FlipSign_NEON(p0); + const int8x16_t q0s = FlipSign_NEON(q0); + const int8x16_t q1s = FlipSign_NEON(q1); + const int8x16_t delta0 = GetBaseDelta_NEON(p1s, p0s, q0s, q1s); + const int8x16_t delta1 = vandq_s8(delta0, vreinterpretq_s8_u8(mask)); + ApplyFilter2_NEON(p0s, q0s, delta1, op0, oq0); +} + +static void SimpleVFilter16_NEON(uint8_t* p, int stride, int thresh) { + uint8x16_t p1, p0, q0, q1, op0, oq0; + Load16x4_NEON(p, stride, &p1, &p0, &q0, &q1); + { + const uint8x16_t mask = NeedsFilter_NEON(p1, p0, q0, q1, thresh); + DoFilter2_NEON(p1, p0, q0, q1, mask, &op0, &oq0); + } + Store16x2_NEON(op0, oq0, p, stride); +} + +static void SimpleHFilter16_NEON(uint8_t* p, int stride, int thresh) { + uint8x16_t p1, p0, q0, q1, oq0, op0; + Load4x16_NEON(p, stride, &p1, &p0, &q0, &q1); + { + const uint8x16_t mask = NeedsFilter_NEON(p1, p0, q0, q1, thresh); + DoFilter2_NEON(p1, p0, q0, q1, mask, &op0, &oq0); + } + Store2x16_NEON(op0, oq0, p, stride); +} + +#else + +// Load/Store vertical edge +#define LOAD8x4(c1, c2, c3, c4, b1, b2, stride) \ + "vld4.8 {" #c1 "[0]," #c2 "[0]," #c3 "[0]," #c4 "[0]}," #b1 "," #stride "\n" \ + "vld4.8 {" #c1 "[1]," #c2 "[1]," #c3 "[1]," #c4 "[1]}," #b2 "," #stride "\n" \ + "vld4.8 {" #c1 "[2]," #c2 "[2]," #c3 "[2]," #c4 "[2]}," #b1 "," #stride "\n" \ + "vld4.8 {" #c1 "[3]," #c2 "[3]," #c3 "[3]," #c4 "[3]}," #b2 "," #stride "\n" \ + "vld4.8 {" #c1 "[4]," #c2 "[4]," #c3 "[4]," #c4 "[4]}," #b1 "," #stride "\n" \ + "vld4.8 {" #c1 "[5]," #c2 "[5]," #c3 "[5]," #c4 "[5]}," #b2 "," #stride "\n" \ + "vld4.8 {" #c1 "[6]," #c2 "[6]," #c3 "[6]," #c4 "[6]}," #b1 "," #stride "\n" \ + "vld4.8 {" #c1 "[7]," #c2 "[7]," #c3 "[7]," #c4 "[7]}," #b2 "," #stride "\n" + +#define STORE8x2(c1, c2, p, stride) \ + "vst2.8 {" #c1 "[0], " #c2 "[0]}," #p "," #stride " \n" \ + "vst2.8 {" #c1 "[1], " #c2 "[1]}," #p "," #stride " \n" \ + "vst2.8 {" #c1 "[2], " #c2 "[2]}," #p "," #stride " \n" \ + "vst2.8 {" #c1 "[3], " #c2 "[3]}," #p "," #stride " \n" \ + "vst2.8 {" #c1 "[4], " #c2 "[4]}," #p "," #stride " \n" \ + "vst2.8 {" #c1 "[5], " #c2 "[5]}," #p "," #stride " \n" \ + "vst2.8 {" #c1 "[6], " #c2 "[6]}," #p "," #stride " \n" \ + "vst2.8 {" #c1 "[7], " #c2 "[7]}," #p "," #stride " \n" + +#define QRegs "q0", "q1", "q2", "q3", \ + "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" + +#define FLIP_SIGN_BIT2(a, b, s) \ + "veor " #a "," #a "," #s " \n" \ + "veor " #b "," #b "," #s " \n" \ + +#define FLIP_SIGN_BIT4(a, b, c, d, s) \ + FLIP_SIGN_BIT2(a, b, s) \ + FLIP_SIGN_BIT2(c, d, s) \ + +#define NEEDS_FILTER(p1, p0, q0, q1, thresh, mask) \ + "vabd.u8 q15," #p0 "," #q0 " \n" /* abs(p0 - q0) */ \ + "vabd.u8 q14," #p1 "," #q1 " \n" /* abs(p1 - q1) */ \ + "vqadd.u8 q15, q15, q15 \n" /* abs(p0 - q0) * 2 */ \ + "vshr.u8 q14, q14, #1 \n" /* abs(p1 - q1) / 2 */ \ + "vqadd.u8 q15, q15, q14 \n" /* abs(p0 - q0) * 2 + abs(p1 - q1) / 2 */ \ + "vdup.8 q14, " #thresh " \n" \ + "vcge.u8 " #mask ", q14, q15 \n" /* mask <= thresh */ + +#define GET_BASE_DELTA(p1, p0, q0, q1, o) \ + "vqsub.s8 q15," #q0 "," #p0 " \n" /* (q0 - p0) */ \ + "vqsub.s8 " #o "," #p1 "," #q1 " \n" /* (p1 - q1) */ \ + "vqadd.s8 " #o "," #o ", q15 \n" /* (p1 - q1) + 1 * (p0 - q0) */ \ + "vqadd.s8 " #o "," #o ", q15 \n" /* (p1 - q1) + 2 * (p0 - q0) */ \ + "vqadd.s8 " #o "," #o ", q15 \n" /* (p1 - q1) + 3 * (p0 - q0) */ + +#define DO_SIMPLE_FILTER(p0, q0, fl) \ + "vmov.i8 q15, #0x03 \n" \ + "vqadd.s8 q15, q15, " #fl " \n" /* filter1 = filter + 3 */ \ + "vshr.s8 q15, q15, #3 \n" /* filter1 >> 3 */ \ + "vqadd.s8 " #p0 "," #p0 ", q15 \n" /* p0 += filter1 */ \ + \ + "vmov.i8 q15, #0x04 \n" \ + "vqadd.s8 q15, q15, " #fl " \n" /* filter1 = filter + 4 */ \ + "vshr.s8 q15, q15, #3 \n" /* filter2 >> 3 */ \ + "vqsub.s8 " #q0 "," #q0 ", q15 \n" /* q0 -= filter2 */ + +// Applies filter on 2 pixels (p0 and q0) +#define DO_FILTER2(p1, p0, q0, q1, thresh) \ + NEEDS_FILTER(p1, p0, q0, q1, thresh, q9) /* filter mask in q9 */ \ + "vmov.i8 q10, #0x80 \n" /* sign bit */ \ + FLIP_SIGN_BIT4(p1, p0, q0, q1, q10) /* convert to signed value */ \ + GET_BASE_DELTA(p1, p0, q0, q1, q11) /* get filter level */ \ + "vand q9, q9, q11 \n" /* apply filter mask */ \ + DO_SIMPLE_FILTER(p0, q0, q9) /* apply filter */ \ + FLIP_SIGN_BIT2(p0, q0, q10) + +static void SimpleVFilter16_NEON(uint8_t* p, int stride, int thresh) { + __asm__ volatile ( + "sub %[p], %[p], %[stride], lsl #1 \n" // p -= 2 * stride + + "vld1.u8 {q1}, [%[p]], %[stride] \n" // p1 + "vld1.u8 {q2}, [%[p]], %[stride] \n" // p0 + "vld1.u8 {q3}, [%[p]], %[stride] \n" // q0 + "vld1.u8 {q12}, [%[p]] \n" // q1 + + DO_FILTER2(q1, q2, q3, q12, %[thresh]) + + "sub %[p], %[p], %[stride], lsl #1 \n" // p -= 2 * stride + + "vst1.u8 {q2}, [%[p]], %[stride] \n" // store op0 + "vst1.u8 {q3}, [%[p]] \n" // store oq0 + : [p] "+r"(p) + : [stride] "r"(stride), [thresh] "r"(thresh) + : "memory", QRegs + ); +} + +static void SimpleHFilter16_NEON(uint8_t* p, int stride, int thresh) { + __asm__ volatile ( + "sub r4, %[p], #2 \n" // base1 = p - 2 + "lsl r6, %[stride], #1 \n" // r6 = 2 * stride + "add r5, r4, %[stride] \n" // base2 = base1 + stride + + LOAD8x4(d2, d3, d4, d5, [r4], [r5], r6) + LOAD8x4(d24, d25, d26, d27, [r4], [r5], r6) + "vswp d3, d24 \n" // p1:q1 p0:q3 + "vswp d5, d26 \n" // q0:q2 q1:q4 + "vswp q2, q12 \n" // p1:q1 p0:q2 q0:q3 q1:q4 + + DO_FILTER2(q1, q2, q12, q13, %[thresh]) + + "sub %[p], %[p], #1 \n" // p - 1 + + "vswp d5, d24 \n" + STORE8x2(d4, d5, [%[p]], %[stride]) + STORE8x2(d24, d25, [%[p]], %[stride]) + + : [p] "+r"(p) + : [stride] "r"(stride), [thresh] "r"(thresh) + : "memory", "r4", "r5", "r6", QRegs + ); +} + +#undef LOAD8x4 +#undef STORE8x2 + +#endif // WEBP_USE_INTRINSICS + +static void SimpleVFilter16i_NEON(uint8_t* p, int stride, int thresh) { + uint32_t k; + for (k = 3; k != 0; --k) { + p += 4 * stride; + SimpleVFilter16_NEON(p, stride, thresh); + } +} + +static void SimpleHFilter16i_NEON(uint8_t* p, int stride, int thresh) { + uint32_t k; + for (k = 3; k != 0; --k) { + p += 4; + SimpleHFilter16_NEON(p, stride, thresh); + } +} + +//------------------------------------------------------------------------------ +// Complex In-loop filtering (Paragraph 15.3) + +static uint8x16_t NeedsHev_NEON(const uint8x16_t p1, const uint8x16_t p0, + const uint8x16_t q0, const uint8x16_t q1, + int hev_thresh) { + const uint8x16_t hev_thresh_v = vdupq_n_u8((uint8_t)hev_thresh); + const uint8x16_t a_p1_p0 = vabdq_u8(p1, p0); // abs(p1 - p0) + const uint8x16_t a_q1_q0 = vabdq_u8(q1, q0); // abs(q1 - q0) + const uint8x16_t a_max = vmaxq_u8(a_p1_p0, a_q1_q0); + const uint8x16_t mask = vcgtq_u8(a_max, hev_thresh_v); + return mask; +} + +static uint8x16_t NeedsFilter2_NEON(const uint8x16_t p3, const uint8x16_t p2, + const uint8x16_t p1, const uint8x16_t p0, + const uint8x16_t q0, const uint8x16_t q1, + const uint8x16_t q2, const uint8x16_t q3, + int ithresh, int thresh) { + const uint8x16_t ithresh_v = vdupq_n_u8((uint8_t)ithresh); + const uint8x16_t a_p3_p2 = vabdq_u8(p3, p2); // abs(p3 - p2) + const uint8x16_t a_p2_p1 = vabdq_u8(p2, p1); // abs(p2 - p1) + const uint8x16_t a_p1_p0 = vabdq_u8(p1, p0); // abs(p1 - p0) + const uint8x16_t a_q3_q2 = vabdq_u8(q3, q2); // abs(q3 - q2) + const uint8x16_t a_q2_q1 = vabdq_u8(q2, q1); // abs(q2 - q1) + const uint8x16_t a_q1_q0 = vabdq_u8(q1, q0); // abs(q1 - q0) + const uint8x16_t max1 = vmaxq_u8(a_p3_p2, a_p2_p1); + const uint8x16_t max2 = vmaxq_u8(a_p1_p0, a_q3_q2); + const uint8x16_t max3 = vmaxq_u8(a_q2_q1, a_q1_q0); + const uint8x16_t max12 = vmaxq_u8(max1, max2); + const uint8x16_t max123 = vmaxq_u8(max12, max3); + const uint8x16_t mask2 = vcgeq_u8(ithresh_v, max123); + const uint8x16_t mask1 = NeedsFilter_NEON(p1, p0, q0, q1, thresh); + const uint8x16_t mask = vandq_u8(mask1, mask2); + return mask; +} + +// 4-points filter + +static void ApplyFilter4_NEON( + const int8x16_t p1, const int8x16_t p0, + const int8x16_t q0, const int8x16_t q1, + const int8x16_t delta0, + uint8x16_t* const op1, uint8x16_t* const op0, + uint8x16_t* const oq0, uint8x16_t* const oq1) { + const int8x16_t kCst3 = vdupq_n_s8(0x03); + const int8x16_t kCst4 = vdupq_n_s8(0x04); + const int8x16_t delta1 = vqaddq_s8(delta0, kCst4); + const int8x16_t delta2 = vqaddq_s8(delta0, kCst3); + const int8x16_t a1 = vshrq_n_s8(delta1, 3); + const int8x16_t a2 = vshrq_n_s8(delta2, 3); + const int8x16_t a3 = vrshrq_n_s8(a1, 1); // a3 = (a1 + 1) >> 1 + *op0 = FlipSignBack_NEON(vqaddq_s8(p0, a2)); // clip(p0 + a2) + *oq0 = FlipSignBack_NEON(vqsubq_s8(q0, a1)); // clip(q0 - a1) + *op1 = FlipSignBack_NEON(vqaddq_s8(p1, a3)); // clip(p1 + a3) + *oq1 = FlipSignBack_NEON(vqsubq_s8(q1, a3)); // clip(q1 - a3) +} + +static void DoFilter4_NEON( + const uint8x16_t p1, const uint8x16_t p0, + const uint8x16_t q0, const uint8x16_t q1, + const uint8x16_t mask, const uint8x16_t hev_mask, + uint8x16_t* const op1, uint8x16_t* const op0, + uint8x16_t* const oq0, uint8x16_t* const oq1) { + // This is a fused version of DoFilter2() calling ApplyFilter2 directly + const int8x16_t p1s = FlipSign_NEON(p1); + int8x16_t p0s = FlipSign_NEON(p0); + int8x16_t q0s = FlipSign_NEON(q0); + const int8x16_t q1s = FlipSign_NEON(q1); + const uint8x16_t simple_lf_mask = vandq_u8(mask, hev_mask); + + // do_filter2 part (simple loopfilter on pixels with hev) + { + const int8x16_t delta = GetBaseDelta_NEON(p1s, p0s, q0s, q1s); + const int8x16_t simple_lf_delta = + vandq_s8(delta, vreinterpretq_s8_u8(simple_lf_mask)); + ApplyFilter2NoFlip_NEON(p0s, q0s, simple_lf_delta, &p0s, &q0s); + } + + // do_filter4 part (complex loopfilter on pixels without hev) + { + const int8x16_t delta0 = GetBaseDelta0_NEON(p0s, q0s); + // we use: (mask & hev_mask) ^ mask = mask & !hev_mask + const uint8x16_t complex_lf_mask = veorq_u8(simple_lf_mask, mask); + const int8x16_t complex_lf_delta = + vandq_s8(delta0, vreinterpretq_s8_u8(complex_lf_mask)); + ApplyFilter4_NEON(p1s, p0s, q0s, q1s, complex_lf_delta, op1, op0, oq0, oq1); + } +} + +// 6-points filter + +static void ApplyFilter6_NEON( + const int8x16_t p2, const int8x16_t p1, const int8x16_t p0, + const int8x16_t q0, const int8x16_t q1, const int8x16_t q2, + const int8x16_t delta, + uint8x16_t* const op2, uint8x16_t* const op1, uint8x16_t* const op0, + uint8x16_t* const oq0, uint8x16_t* const oq1, uint8x16_t* const oq2) { + // We have to compute: X = (9*a+63) >> 7, Y = (18*a+63)>>7, Z = (27*a+63) >> 7 + // Turns out, there's a common sub-expression S=9 * a - 1 that can be used + // with the special vqrshrn_n_s16 rounding-shift-and-narrow instruction: + // X = (S + 64) >> 7, Y = (S + 32) >> 6, Z = (18 * a + S + 64) >> 7 + const int8x8_t delta_lo = vget_low_s8(delta); + const int8x8_t delta_hi = vget_high_s8(delta); + const int8x8_t kCst9 = vdup_n_s8(9); + const int16x8_t kCstm1 = vdupq_n_s16(-1); + const int8x8_t kCst18 = vdup_n_s8(18); + const int16x8_t S_lo = vmlal_s8(kCstm1, kCst9, delta_lo); // S = 9 * a - 1 + const int16x8_t S_hi = vmlal_s8(kCstm1, kCst9, delta_hi); + const int16x8_t Z_lo = vmlal_s8(S_lo, kCst18, delta_lo); // S + 18 * a + const int16x8_t Z_hi = vmlal_s8(S_hi, kCst18, delta_hi); + const int8x8_t a3_lo = vqrshrn_n_s16(S_lo, 7); // (9 * a + 63) >> 7 + const int8x8_t a3_hi = vqrshrn_n_s16(S_hi, 7); + const int8x8_t a2_lo = vqrshrn_n_s16(S_lo, 6); // (9 * a + 31) >> 6 + const int8x8_t a2_hi = vqrshrn_n_s16(S_hi, 6); + const int8x8_t a1_lo = vqrshrn_n_s16(Z_lo, 7); // (27 * a + 63) >> 7 + const int8x8_t a1_hi = vqrshrn_n_s16(Z_hi, 7); + const int8x16_t a1 = vcombine_s8(a1_lo, a1_hi); + const int8x16_t a2 = vcombine_s8(a2_lo, a2_hi); + const int8x16_t a3 = vcombine_s8(a3_lo, a3_hi); + + *op0 = FlipSignBack_NEON(vqaddq_s8(p0, a1)); // clip(p0 + a1) + *oq0 = FlipSignBack_NEON(vqsubq_s8(q0, a1)); // clip(q0 - q1) + *oq1 = FlipSignBack_NEON(vqsubq_s8(q1, a2)); // clip(q1 - a2) + *op1 = FlipSignBack_NEON(vqaddq_s8(p1, a2)); // clip(p1 + a2) + *oq2 = FlipSignBack_NEON(vqsubq_s8(q2, a3)); // clip(q2 - a3) + *op2 = FlipSignBack_NEON(vqaddq_s8(p2, a3)); // clip(p2 + a3) +} + +static void DoFilter6_NEON( + const uint8x16_t p2, const uint8x16_t p1, const uint8x16_t p0, + const uint8x16_t q0, const uint8x16_t q1, const uint8x16_t q2, + const uint8x16_t mask, const uint8x16_t hev_mask, + uint8x16_t* const op2, uint8x16_t* const op1, uint8x16_t* const op0, + uint8x16_t* const oq0, uint8x16_t* const oq1, uint8x16_t* const oq2) { + // This is a fused version of DoFilter2() calling ApplyFilter2 directly + const int8x16_t p2s = FlipSign_NEON(p2); + const int8x16_t p1s = FlipSign_NEON(p1); + int8x16_t p0s = FlipSign_NEON(p0); + int8x16_t q0s = FlipSign_NEON(q0); + const int8x16_t q1s = FlipSign_NEON(q1); + const int8x16_t q2s = FlipSign_NEON(q2); + const uint8x16_t simple_lf_mask = vandq_u8(mask, hev_mask); + const int8x16_t delta0 = GetBaseDelta_NEON(p1s, p0s, q0s, q1s); + + // do_filter2 part (simple loopfilter on pixels with hev) + { + const int8x16_t simple_lf_delta = + vandq_s8(delta0, vreinterpretq_s8_u8(simple_lf_mask)); + ApplyFilter2NoFlip_NEON(p0s, q0s, simple_lf_delta, &p0s, &q0s); + } + + // do_filter6 part (complex loopfilter on pixels without hev) + { + // we use: (mask & hev_mask) ^ mask = mask & !hev_mask + const uint8x16_t complex_lf_mask = veorq_u8(simple_lf_mask, mask); + const int8x16_t complex_lf_delta = + vandq_s8(delta0, vreinterpretq_s8_u8(complex_lf_mask)); + ApplyFilter6_NEON(p2s, p1s, p0s, q0s, q1s, q2s, complex_lf_delta, + op2, op1, op0, oq0, oq1, oq2); + } +} + +// on macroblock edges + +static void VFilter16_NEON(uint8_t* p, int stride, + int thresh, int ithresh, int hev_thresh) { + uint8x16_t p3, p2, p1, p0, q0, q1, q2, q3; + Load16x8_NEON(p, stride, &p3, &p2, &p1, &p0, &q0, &q1, &q2, &q3); + { + const uint8x16_t mask = NeedsFilter2_NEON(p3, p2, p1, p0, q0, q1, q2, q3, + ithresh, thresh); + const uint8x16_t hev_mask = NeedsHev_NEON(p1, p0, q0, q1, hev_thresh); + uint8x16_t op2, op1, op0, oq0, oq1, oq2; + DoFilter6_NEON(p2, p1, p0, q0, q1, q2, mask, hev_mask, + &op2, &op1, &op0, &oq0, &oq1, &oq2); + Store16x2_NEON(op2, op1, p - 2 * stride, stride); + Store16x2_NEON(op0, oq0, p + 0 * stride, stride); + Store16x2_NEON(oq1, oq2, p + 2 * stride, stride); + } +} + +static void HFilter16_NEON(uint8_t* p, int stride, + int thresh, int ithresh, int hev_thresh) { + uint8x16_t p3, p2, p1, p0, q0, q1, q2, q3; + Load8x16_NEON(p, stride, &p3, &p2, &p1, &p0, &q0, &q1, &q2, &q3); + { + const uint8x16_t mask = NeedsFilter2_NEON(p3, p2, p1, p0, q0, q1, q2, q3, + ithresh, thresh); + const uint8x16_t hev_mask = NeedsHev_NEON(p1, p0, q0, q1, hev_thresh); + uint8x16_t op2, op1, op0, oq0, oq1, oq2; + DoFilter6_NEON(p2, p1, p0, q0, q1, q2, mask, hev_mask, + &op2, &op1, &op0, &oq0, &oq1, &oq2); + Store2x16_NEON(op2, op1, p - 2, stride); + Store2x16_NEON(op0, oq0, p + 0, stride); + Store2x16_NEON(oq1, oq2, p + 2, stride); + } +} + +// on three inner edges +static void VFilter16i_NEON(uint8_t* p, int stride, + int thresh, int ithresh, int hev_thresh) { + uint32_t k; + uint8x16_t p3, p2, p1, p0; + Load16x4_NEON(p + 2 * stride, stride, &p3, &p2, &p1, &p0); + for (k = 3; k != 0; --k) { + uint8x16_t q0, q1, q2, q3; + p += 4 * stride; + Load16x4_NEON(p + 2 * stride, stride, &q0, &q1, &q2, &q3); + { + const uint8x16_t mask = + NeedsFilter2_NEON(p3, p2, p1, p0, q0, q1, q2, q3, ithresh, thresh); + const uint8x16_t hev_mask = NeedsHev_NEON(p1, p0, q0, q1, hev_thresh); + // p3 and p2 are not just temporary variables here: they will be + // re-used for next span. And q2/q3 will become p1/p0 accordingly. + DoFilter4_NEON(p1, p0, q0, q1, mask, hev_mask, &p1, &p0, &p3, &p2); + Store16x4_NEON(p1, p0, p3, p2, p, stride); + p1 = q2; + p0 = q3; + } + } +} + +#if !defined(WORK_AROUND_GCC) +static void HFilter16i_NEON(uint8_t* p, int stride, + int thresh, int ithresh, int hev_thresh) { + uint32_t k; + uint8x16_t p3, p2, p1, p0; + Load4x16_NEON(p + 2, stride, &p3, &p2, &p1, &p0); + for (k = 3; k != 0; --k) { + uint8x16_t q0, q1, q2, q3; + p += 4; + Load4x16_NEON(p + 2, stride, &q0, &q1, &q2, &q3); + { + const uint8x16_t mask = + NeedsFilter2_NEON(p3, p2, p1, p0, q0, q1, q2, q3, ithresh, thresh); + const uint8x16_t hev_mask = NeedsHev_NEON(p1, p0, q0, q1, hev_thresh); + DoFilter4_NEON(p1, p0, q0, q1, mask, hev_mask, &p1, &p0, &p3, &p2); + Store4x16_NEON(p1, p0, p3, p2, p, stride); + p1 = q2; + p0 = q3; + } + } +} +#endif // !WORK_AROUND_GCC + +// 8-pixels wide variant, for chroma filtering +static void VFilter8_NEON(uint8_t* u, uint8_t* v, int stride, + int thresh, int ithresh, int hev_thresh) { + uint8x16_t p3, p2, p1, p0, q0, q1, q2, q3; + Load8x8x2_NEON(u, v, stride, &p3, &p2, &p1, &p0, &q0, &q1, &q2, &q3); + { + const uint8x16_t mask = NeedsFilter2_NEON(p3, p2, p1, p0, q0, q1, q2, q3, + ithresh, thresh); + const uint8x16_t hev_mask = NeedsHev_NEON(p1, p0, q0, q1, hev_thresh); + uint8x16_t op2, op1, op0, oq0, oq1, oq2; + DoFilter6_NEON(p2, p1, p0, q0, q1, q2, mask, hev_mask, + &op2, &op1, &op0, &oq0, &oq1, &oq2); + Store8x2x2_NEON(op2, op1, u - 2 * stride, v - 2 * stride, stride); + Store8x2x2_NEON(op0, oq0, u + 0 * stride, v + 0 * stride, stride); + Store8x2x2_NEON(oq1, oq2, u + 2 * stride, v + 2 * stride, stride); + } +} +static void VFilter8i_NEON(uint8_t* u, uint8_t* v, int stride, + int thresh, int ithresh, int hev_thresh) { + uint8x16_t p3, p2, p1, p0, q0, q1, q2, q3; + u += 4 * stride; + v += 4 * stride; + Load8x8x2_NEON(u, v, stride, &p3, &p2, &p1, &p0, &q0, &q1, &q2, &q3); + { + const uint8x16_t mask = NeedsFilter2_NEON(p3, p2, p1, p0, q0, q1, q2, q3, + ithresh, thresh); + const uint8x16_t hev_mask = NeedsHev_NEON(p1, p0, q0, q1, hev_thresh); + uint8x16_t op1, op0, oq0, oq1; + DoFilter4_NEON(p1, p0, q0, q1, mask, hev_mask, &op1, &op0, &oq0, &oq1); + Store8x4x2_NEON(op1, op0, oq0, oq1, u, v, stride); + } +} + +#if !defined(WORK_AROUND_GCC) +static void HFilter8_NEON(uint8_t* u, uint8_t* v, int stride, + int thresh, int ithresh, int hev_thresh) { + uint8x16_t p3, p2, p1, p0, q0, q1, q2, q3; + Load8x8x2T_NEON(u, v, stride, &p3, &p2, &p1, &p0, &q0, &q1, &q2, &q3); + { + const uint8x16_t mask = NeedsFilter2_NEON(p3, p2, p1, p0, q0, q1, q2, q3, + ithresh, thresh); + const uint8x16_t hev_mask = NeedsHev_NEON(p1, p0, q0, q1, hev_thresh); + uint8x16_t op2, op1, op0, oq0, oq1, oq2; + DoFilter6_NEON(p2, p1, p0, q0, q1, q2, mask, hev_mask, + &op2, &op1, &op0, &oq0, &oq1, &oq2); + Store6x8x2_NEON(op2, op1, op0, oq0, oq1, oq2, u, v, stride); + } +} + +static void HFilter8i_NEON(uint8_t* u, uint8_t* v, int stride, + int thresh, int ithresh, int hev_thresh) { + uint8x16_t p3, p2, p1, p0, q0, q1, q2, q3; + u += 4; + v += 4; + Load8x8x2T_NEON(u, v, stride, &p3, &p2, &p1, &p0, &q0, &q1, &q2, &q3); + { + const uint8x16_t mask = NeedsFilter2_NEON(p3, p2, p1, p0, q0, q1, q2, q3, + ithresh, thresh); + const uint8x16_t hev_mask = NeedsHev_NEON(p1, p0, q0, q1, hev_thresh); + uint8x16_t op1, op0, oq0, oq1; + DoFilter4_NEON(p1, p0, q0, q1, mask, hev_mask, &op1, &op0, &oq0, &oq1); + Store4x8x2_NEON(op1, op0, oq0, oq1, u, v, stride); + } +} +#endif // !WORK_AROUND_GCC + +//----------------------------------------------------------------------------- +// Inverse transforms (Paragraph 14.4) + +// Technically these are unsigned but vqdmulh is only available in signed. +// vqdmulh returns high half (effectively >> 16) but also doubles the value, +// changing the >> 16 to >> 15 and requiring an additional >> 1. +// We use this to our advantage with kC2. The canonical value is 35468. +// However, the high bit is set so treating it as signed will give incorrect +// results. We avoid this by down shifting by 1 here to clear the highest bit. +// Combined with the doubling effect of vqdmulh we get >> 16. +// This can not be applied to kC1 because the lowest bit is set. Down shifting +// the constant would reduce precision. + +// libwebp uses a trick to avoid some extra addition that libvpx does. +// Instead of: +// temp2 = ip[12] + ((ip[12] * cospi8sqrt2minus1) >> 16); +// libwebp adds 1 << 16 to cospi8sqrt2minus1 (kC1). However, this causes the +// same issue with kC1 and vqdmulh that we work around by down shifting kC2 + +static const int16_t kC1 = 20091; +static const int16_t kC2 = 17734; // half of kC2, actually. See comment above. + +#if defined(WEBP_USE_INTRINSICS) +static WEBP_INLINE void Transpose8x2_NEON(const int16x8_t in0, + const int16x8_t in1, + int16x8x2_t* const out) { + // a0 a1 a2 a3 | b0 b1 b2 b3 => a0 b0 c0 d0 | a1 b1 c1 d1 + // c0 c1 c2 c3 | d0 d1 d2 d3 a2 b2 c2 d2 | a3 b3 c3 d3 + const int16x8x2_t tmp0 = vzipq_s16(in0, in1); // a0 c0 a1 c1 a2 c2 ... + // b0 d0 b1 d1 b2 d2 ... + *out = vzipq_s16(tmp0.val[0], tmp0.val[1]); +} + +static WEBP_INLINE void TransformPass_NEON(int16x8x2_t* const rows) { + // {rows} = in0 | in4 + // in8 | in12 + // B1 = in4 | in12 + const int16x8_t B1 = + vcombine_s16(vget_high_s16(rows->val[0]), vget_high_s16(rows->val[1])); + // C0 = kC1 * in4 | kC1 * in12 + // C1 = kC2 * in4 | kC2 * in12 + const int16x8_t C0 = vsraq_n_s16(B1, vqdmulhq_n_s16(B1, kC1), 1); + const int16x8_t C1 = vqdmulhq_n_s16(B1, kC2); + const int16x4_t a = vqadd_s16(vget_low_s16(rows->val[0]), + vget_low_s16(rows->val[1])); // in0 + in8 + const int16x4_t b = vqsub_s16(vget_low_s16(rows->val[0]), + vget_low_s16(rows->val[1])); // in0 - in8 + // c = kC2 * in4 - kC1 * in12 + // d = kC1 * in4 + kC2 * in12 + const int16x4_t c = vqsub_s16(vget_low_s16(C1), vget_high_s16(C0)); + const int16x4_t d = vqadd_s16(vget_low_s16(C0), vget_high_s16(C1)); + const int16x8_t D0 = vcombine_s16(a, b); // D0 = a | b + const int16x8_t D1 = vcombine_s16(d, c); // D1 = d | c + const int16x8_t E0 = vqaddq_s16(D0, D1); // a+d | b+c + const int16x8_t E_tmp = vqsubq_s16(D0, D1); // a-d | b-c + const int16x8_t E1 = vcombine_s16(vget_high_s16(E_tmp), vget_low_s16(E_tmp)); + Transpose8x2_NEON(E0, E1, rows); +} + +static void TransformOne_NEON(const int16_t* in, uint8_t* dst) { + int16x8x2_t rows; + INIT_VECTOR2(rows, vld1q_s16(in + 0), vld1q_s16(in + 8)); + TransformPass_NEON(&rows); + TransformPass_NEON(&rows); + Add4x4_NEON(rows.val[0], rows.val[1], dst); +} + +#else + +static void TransformOne_NEON(const int16_t* in, uint8_t* dst) { + const int kBPS = BPS; + // kC1, kC2. Padded because vld1.16 loads 8 bytes + const int16_t constants[4] = { kC1, kC2, 0, 0 }; + /* Adapted from libvpx: vp8/common/arm/neon/shortidct4x4llm_neon.asm */ + __asm__ volatile ( + "vld1.16 {q1, q2}, [%[in]] \n" + "vld1.16 {d0}, [%[constants]] \n" + + /* d2: in[0] + * d3: in[8] + * d4: in[4] + * d5: in[12] + */ + "vswp d3, d4 \n" + + /* q8 = {in[4], in[12]} * kC1 * 2 >> 16 + * q9 = {in[4], in[12]} * kC2 >> 16 + */ + "vqdmulh.s16 q8, q2, d0[0] \n" + "vqdmulh.s16 q9, q2, d0[1] \n" + + /* d22 = a = in[0] + in[8] + * d23 = b = in[0] - in[8] + */ + "vqadd.s16 d22, d2, d3 \n" + "vqsub.s16 d23, d2, d3 \n" + + /* The multiplication should be x * kC1 >> 16 + * However, with vqdmulh we get x * kC1 * 2 >> 16 + * (multiply, double, return high half) + * We avoided this in kC2 by pre-shifting the constant. + * q8 = in[4]/[12] * kC1 >> 16 + */ + "vshr.s16 q8, q8, #1 \n" + + /* Add {in[4], in[12]} back after the multiplication. This is handled by + * adding 1 << 16 to kC1 in the libwebp C code. + */ + "vqadd.s16 q8, q2, q8 \n" + + /* d20 = c = in[4]*kC2 - in[12]*kC1 + * d21 = d = in[4]*kC1 + in[12]*kC2 + */ + "vqsub.s16 d20, d18, d17 \n" + "vqadd.s16 d21, d19, d16 \n" + + /* d2 = tmp[0] = a + d + * d3 = tmp[1] = b + c + * d4 = tmp[2] = b - c + * d5 = tmp[3] = a - d + */ + "vqadd.s16 d2, d22, d21 \n" + "vqadd.s16 d3, d23, d20 \n" + "vqsub.s16 d4, d23, d20 \n" + "vqsub.s16 d5, d22, d21 \n" + + "vzip.16 q1, q2 \n" + "vzip.16 q1, q2 \n" + + "vswp d3, d4 \n" + + /* q8 = {tmp[4], tmp[12]} * kC1 * 2 >> 16 + * q9 = {tmp[4], tmp[12]} * kC2 >> 16 + */ + "vqdmulh.s16 q8, q2, d0[0] \n" + "vqdmulh.s16 q9, q2, d0[1] \n" + + /* d22 = a = tmp[0] + tmp[8] + * d23 = b = tmp[0] - tmp[8] + */ + "vqadd.s16 d22, d2, d3 \n" + "vqsub.s16 d23, d2, d3 \n" + + /* See long winded explanations prior */ + "vshr.s16 q8, q8, #1 \n" + "vqadd.s16 q8, q2, q8 \n" + + /* d20 = c = in[4]*kC2 - in[12]*kC1 + * d21 = d = in[4]*kC1 + in[12]*kC2 + */ + "vqsub.s16 d20, d18, d17 \n" + "vqadd.s16 d21, d19, d16 \n" + + /* d2 = tmp[0] = a + d + * d3 = tmp[1] = b + c + * d4 = tmp[2] = b - c + * d5 = tmp[3] = a - d + */ + "vqadd.s16 d2, d22, d21 \n" + "vqadd.s16 d3, d23, d20 \n" + "vqsub.s16 d4, d23, d20 \n" + "vqsub.s16 d5, d22, d21 \n" + + "vld1.32 d6[0], [%[dst]], %[kBPS] \n" + "vld1.32 d6[1], [%[dst]], %[kBPS] \n" + "vld1.32 d7[0], [%[dst]], %[kBPS] \n" + "vld1.32 d7[1], [%[dst]], %[kBPS] \n" + + "sub %[dst], %[dst], %[kBPS], lsl #2 \n" + + /* (val) + 4 >> 3 */ + "vrshr.s16 d2, d2, #3 \n" + "vrshr.s16 d3, d3, #3 \n" + "vrshr.s16 d4, d4, #3 \n" + "vrshr.s16 d5, d5, #3 \n" + + "vzip.16 q1, q2 \n" + "vzip.16 q1, q2 \n" + + /* Must accumulate before saturating */ + "vmovl.u8 q8, d6 \n" + "vmovl.u8 q9, d7 \n" + + "vqadd.s16 q1, q1, q8 \n" + "vqadd.s16 q2, q2, q9 \n" + + "vqmovun.s16 d0, q1 \n" + "vqmovun.s16 d1, q2 \n" + + "vst1.32 d0[0], [%[dst]], %[kBPS] \n" + "vst1.32 d0[1], [%[dst]], %[kBPS] \n" + "vst1.32 d1[0], [%[dst]], %[kBPS] \n" + "vst1.32 d1[1], [%[dst]] \n" + + : [in] "+r"(in), [dst] "+r"(dst) /* modified registers */ + : [kBPS] "r"(kBPS), [constants] "r"(constants) /* constants */ + : "memory", "q0", "q1", "q2", "q8", "q9", "q10", "q11" /* clobbered */ + ); +} + +#endif // WEBP_USE_INTRINSICS + +static void TransformTwo_NEON(const int16_t* in, uint8_t* dst, int do_two) { + TransformOne_NEON(in, dst); + if (do_two) { + TransformOne_NEON(in + 16, dst + 4); + } +} + +static void TransformDC_NEON(const int16_t* in, uint8_t* dst) { + const int16x8_t DC = vdupq_n_s16(in[0]); + Add4x4_NEON(DC, DC, dst); +} + +//------------------------------------------------------------------------------ + +#define STORE_WHT(dst, col, rows) do { \ + *dst = vgetq_lane_s32(rows.val[0], col); (dst) += 16; \ + *dst = vgetq_lane_s32(rows.val[1], col); (dst) += 16; \ + *dst = vgetq_lane_s32(rows.val[2], col); (dst) += 16; \ + *dst = vgetq_lane_s32(rows.val[3], col); (dst) += 16; \ +} while (0) + +static void TransformWHT_NEON(const int16_t* in, int16_t* out) { + int32x4x4_t tmp; + + { + // Load the source. + const int16x4_t in00_03 = vld1_s16(in + 0); + const int16x4_t in04_07 = vld1_s16(in + 4); + const int16x4_t in08_11 = vld1_s16(in + 8); + const int16x4_t in12_15 = vld1_s16(in + 12); + const int32x4_t a0 = vaddl_s16(in00_03, in12_15); // in[0..3] + in[12..15] + const int32x4_t a1 = vaddl_s16(in04_07, in08_11); // in[4..7] + in[8..11] + const int32x4_t a2 = vsubl_s16(in04_07, in08_11); // in[4..7] - in[8..11] + const int32x4_t a3 = vsubl_s16(in00_03, in12_15); // in[0..3] - in[12..15] + tmp.val[0] = vaddq_s32(a0, a1); + tmp.val[1] = vaddq_s32(a3, a2); + tmp.val[2] = vsubq_s32(a0, a1); + tmp.val[3] = vsubq_s32(a3, a2); + // Arrange the temporary results column-wise. + tmp = Transpose4x4_NEON(tmp); + } + + { + const int32x4_t kCst3 = vdupq_n_s32(3); + const int32x4_t dc = vaddq_s32(tmp.val[0], kCst3); // add rounder + const int32x4_t a0 = vaddq_s32(dc, tmp.val[3]); + const int32x4_t a1 = vaddq_s32(tmp.val[1], tmp.val[2]); + const int32x4_t a2 = vsubq_s32(tmp.val[1], tmp.val[2]); + const int32x4_t a3 = vsubq_s32(dc, tmp.val[3]); + + tmp.val[0] = vaddq_s32(a0, a1); + tmp.val[1] = vaddq_s32(a3, a2); + tmp.val[2] = vsubq_s32(a0, a1); + tmp.val[3] = vsubq_s32(a3, a2); + + // right shift the results by 3. + tmp.val[0] = vshrq_n_s32(tmp.val[0], 3); + tmp.val[1] = vshrq_n_s32(tmp.val[1], 3); + tmp.val[2] = vshrq_n_s32(tmp.val[2], 3); + tmp.val[3] = vshrq_n_s32(tmp.val[3], 3); + + STORE_WHT(out, 0, tmp); + STORE_WHT(out, 1, tmp); + STORE_WHT(out, 2, tmp); + STORE_WHT(out, 3, tmp); + } +} + +#undef STORE_WHT + +//------------------------------------------------------------------------------ + +#define MUL(a, b) (((a) * (b)) >> 16) +static void TransformAC3_NEON(const int16_t* in, uint8_t* dst) { + static const int kC1_full = 20091 + (1 << 16); + static const int kC2_full = 35468; + const int16x4_t A = vld1_dup_s16(in); + const int16x4_t c4 = vdup_n_s16(MUL(in[4], kC2_full)); + const int16x4_t d4 = vdup_n_s16(MUL(in[4], kC1_full)); + const int c1 = MUL(in[1], kC2_full); + const int d1 = MUL(in[1], kC1_full); + const uint64_t cd = (uint64_t)( d1 & 0xffff) << 0 | + (uint64_t)( c1 & 0xffff) << 16 | + (uint64_t)(-c1 & 0xffff) << 32 | + (uint64_t)(-d1 & 0xffff) << 48; + const int16x4_t CD = vcreate_s16(cd); + const int16x4_t B = vqadd_s16(A, CD); + const int16x8_t m0_m1 = vcombine_s16(vqadd_s16(B, d4), vqadd_s16(B, c4)); + const int16x8_t m2_m3 = vcombine_s16(vqsub_s16(B, c4), vqsub_s16(B, d4)); + Add4x4_NEON(m0_m1, m2_m3, dst); +} +#undef MUL + +//------------------------------------------------------------------------------ +// 4x4 + +static void DC4_NEON(uint8_t* dst) { // DC + const uint8x8_t A = vld1_u8(dst - BPS); // top row + const uint16x4_t p0 = vpaddl_u8(A); // cascading summation of the top + const uint16x4_t p1 = vpadd_u16(p0, p0); + const uint8x8_t L0 = vld1_u8(dst + 0 * BPS - 1); + const uint8x8_t L1 = vld1_u8(dst + 1 * BPS - 1); + const uint8x8_t L2 = vld1_u8(dst + 2 * BPS - 1); + const uint8x8_t L3 = vld1_u8(dst + 3 * BPS - 1); + const uint16x8_t s0 = vaddl_u8(L0, L1); + const uint16x8_t s1 = vaddl_u8(L2, L3); + const uint16x8_t s01 = vaddq_u16(s0, s1); + const uint16x8_t sum = vaddq_u16(s01, vcombine_u16(p1, p1)); + const uint8x8_t dc0 = vrshrn_n_u16(sum, 3); // (sum + 4) >> 3 + const uint8x8_t dc = vdup_lane_u8(dc0, 0); + int i; + for (i = 0; i < 4; ++i) { + vst1_lane_u32((uint32_t*)(dst + i * BPS), vreinterpret_u32_u8(dc), 0); + } +} + +// TrueMotion (4x4 + 8x8) +static WEBP_INLINE void TrueMotion_NEON(uint8_t* dst, int size) { + const uint8x8_t TL = vld1_dup_u8(dst - BPS - 1); // top-left pixel 'A[-1]' + const uint8x8_t T = vld1_u8(dst - BPS); // top row 'A[0..3]' + const int16x8_t d = vreinterpretq_s16_u16(vsubl_u8(T, TL)); // A[c] - A[-1] + int y; + for (y = 0; y < size; y += 4) { + // left edge + const int16x8_t L0 = ConvertU8ToS16_NEON(vld1_dup_u8(dst + 0 * BPS - 1)); + const int16x8_t L1 = ConvertU8ToS16_NEON(vld1_dup_u8(dst + 1 * BPS - 1)); + const int16x8_t L2 = ConvertU8ToS16_NEON(vld1_dup_u8(dst + 2 * BPS - 1)); + const int16x8_t L3 = ConvertU8ToS16_NEON(vld1_dup_u8(dst + 3 * BPS - 1)); + const int16x8_t r0 = vaddq_s16(L0, d); // L[r] + A[c] - A[-1] + const int16x8_t r1 = vaddq_s16(L1, d); + const int16x8_t r2 = vaddq_s16(L2, d); + const int16x8_t r3 = vaddq_s16(L3, d); + // Saturate and store the result. + const uint32x2_t r0_u32 = vreinterpret_u32_u8(vqmovun_s16(r0)); + const uint32x2_t r1_u32 = vreinterpret_u32_u8(vqmovun_s16(r1)); + const uint32x2_t r2_u32 = vreinterpret_u32_u8(vqmovun_s16(r2)); + const uint32x2_t r3_u32 = vreinterpret_u32_u8(vqmovun_s16(r3)); + if (size == 4) { + vst1_lane_u32((uint32_t*)(dst + 0 * BPS), r0_u32, 0); + vst1_lane_u32((uint32_t*)(dst + 1 * BPS), r1_u32, 0); + vst1_lane_u32((uint32_t*)(dst + 2 * BPS), r2_u32, 0); + vst1_lane_u32((uint32_t*)(dst + 3 * BPS), r3_u32, 0); + } else { + vst1_u32((uint32_t*)(dst + 0 * BPS), r0_u32); + vst1_u32((uint32_t*)(dst + 1 * BPS), r1_u32); + vst1_u32((uint32_t*)(dst + 2 * BPS), r2_u32); + vst1_u32((uint32_t*)(dst + 3 * BPS), r3_u32); + } + dst += 4 * BPS; + } +} + +static void TM4_NEON(uint8_t* dst) { TrueMotion_NEON(dst, 4); } + +static void VE4_NEON(uint8_t* dst) { // vertical + // NB: avoid vld1_u64 here as an alignment hint may be added -> SIGBUS. + const uint64x1_t A0 = vreinterpret_u64_u8(vld1_u8(dst - BPS - 1)); // top row + const uint64x1_t A1 = vshr_n_u64(A0, 8); + const uint64x1_t A2 = vshr_n_u64(A0, 16); + const uint8x8_t ABCDEFGH = vreinterpret_u8_u64(A0); + const uint8x8_t BCDEFGH0 = vreinterpret_u8_u64(A1); + const uint8x8_t CDEFGH00 = vreinterpret_u8_u64(A2); + const uint8x8_t b = vhadd_u8(ABCDEFGH, CDEFGH00); + const uint8x8_t avg = vrhadd_u8(b, BCDEFGH0); + int i; + for (i = 0; i < 4; ++i) { + vst1_lane_u32((uint32_t*)(dst + i * BPS), vreinterpret_u32_u8(avg), 0); + } +} + +static void RD4_NEON(uint8_t* dst) { // Down-right + const uint8x8_t XABCD_u8 = vld1_u8(dst - BPS - 1); + const uint64x1_t XABCD = vreinterpret_u64_u8(XABCD_u8); + const uint64x1_t ____XABC = vshl_n_u64(XABCD, 32); + const uint32_t I = dst[-1 + 0 * BPS]; + const uint32_t J = dst[-1 + 1 * BPS]; + const uint32_t K = dst[-1 + 2 * BPS]; + const uint32_t L = dst[-1 + 3 * BPS]; + const uint64x1_t LKJI____ = + vcreate_u64((uint64_t)L | (K << 8) | (J << 16) | (I << 24)); + const uint64x1_t LKJIXABC = vorr_u64(LKJI____, ____XABC); + const uint8x8_t KJIXABC_ = vreinterpret_u8_u64(vshr_n_u64(LKJIXABC, 8)); + const uint8x8_t JIXABC__ = vreinterpret_u8_u64(vshr_n_u64(LKJIXABC, 16)); + const uint8_t D = vget_lane_u8(XABCD_u8, 4); + const uint8x8_t JIXABCD_ = vset_lane_u8(D, JIXABC__, 6); + const uint8x8_t LKJIXABC_u8 = vreinterpret_u8_u64(LKJIXABC); + const uint8x8_t avg1 = vhadd_u8(JIXABCD_, LKJIXABC_u8); + const uint8x8_t avg2 = vrhadd_u8(avg1, KJIXABC_); + const uint64x1_t avg2_u64 = vreinterpret_u64_u8(avg2); + const uint32x2_t r3 = vreinterpret_u32_u8(avg2); + const uint32x2_t r2 = vreinterpret_u32_u64(vshr_n_u64(avg2_u64, 8)); + const uint32x2_t r1 = vreinterpret_u32_u64(vshr_n_u64(avg2_u64, 16)); + const uint32x2_t r0 = vreinterpret_u32_u64(vshr_n_u64(avg2_u64, 24)); + vst1_lane_u32((uint32_t*)(dst + 0 * BPS), r0, 0); + vst1_lane_u32((uint32_t*)(dst + 1 * BPS), r1, 0); + vst1_lane_u32((uint32_t*)(dst + 2 * BPS), r2, 0); + vst1_lane_u32((uint32_t*)(dst + 3 * BPS), r3, 0); +} + +static void LD4_NEON(uint8_t* dst) { // Down-left + // Note using the same shift trick as VE4() is slower here. + const uint8x8_t ABCDEFGH = vld1_u8(dst - BPS + 0); + const uint8x8_t BCDEFGH0 = vld1_u8(dst - BPS + 1); + const uint8x8_t CDEFGH00 = vld1_u8(dst - BPS + 2); + const uint8x8_t CDEFGHH0 = vset_lane_u8(dst[-BPS + 7], CDEFGH00, 6); + const uint8x8_t avg1 = vhadd_u8(ABCDEFGH, CDEFGHH0); + const uint8x8_t avg2 = vrhadd_u8(avg1, BCDEFGH0); + const uint64x1_t avg2_u64 = vreinterpret_u64_u8(avg2); + const uint32x2_t r0 = vreinterpret_u32_u8(avg2); + const uint32x2_t r1 = vreinterpret_u32_u64(vshr_n_u64(avg2_u64, 8)); + const uint32x2_t r2 = vreinterpret_u32_u64(vshr_n_u64(avg2_u64, 16)); + const uint32x2_t r3 = vreinterpret_u32_u64(vshr_n_u64(avg2_u64, 24)); + vst1_lane_u32((uint32_t*)(dst + 0 * BPS), r0, 0); + vst1_lane_u32((uint32_t*)(dst + 1 * BPS), r1, 0); + vst1_lane_u32((uint32_t*)(dst + 2 * BPS), r2, 0); + vst1_lane_u32((uint32_t*)(dst + 3 * BPS), r3, 0); +} + +//------------------------------------------------------------------------------ +// Chroma + +static void VE8uv_NEON(uint8_t* dst) { // vertical + const uint8x8_t top = vld1_u8(dst - BPS); + int j; + for (j = 0; j < 8; ++j) { + vst1_u8(dst + j * BPS, top); + } +} + +static void HE8uv_NEON(uint8_t* dst) { // horizontal + int j; + for (j = 0; j < 8; ++j) { + const uint8x8_t left = vld1_dup_u8(dst - 1); + vst1_u8(dst, left); + dst += BPS; + } +} + +static WEBP_INLINE void DC8_NEON(uint8_t* dst, int do_top, int do_left) { + uint16x8_t sum_top; + uint16x8_t sum_left; + uint8x8_t dc0; + + if (do_top) { + const uint8x8_t A = vld1_u8(dst - BPS); // top row +#if defined(__aarch64__) + const uint16_t p2 = vaddlv_u8(A); + sum_top = vdupq_n_u16(p2); +#else + const uint16x4_t p0 = vpaddl_u8(A); // cascading summation of the top + const uint16x4_t p1 = vpadd_u16(p0, p0); + const uint16x4_t p2 = vpadd_u16(p1, p1); + sum_top = vcombine_u16(p2, p2); +#endif + } + + if (do_left) { + const uint8x8_t L0 = vld1_u8(dst + 0 * BPS - 1); + const uint8x8_t L1 = vld1_u8(dst + 1 * BPS - 1); + const uint8x8_t L2 = vld1_u8(dst + 2 * BPS - 1); + const uint8x8_t L3 = vld1_u8(dst + 3 * BPS - 1); + const uint8x8_t L4 = vld1_u8(dst + 4 * BPS - 1); + const uint8x8_t L5 = vld1_u8(dst + 5 * BPS - 1); + const uint8x8_t L6 = vld1_u8(dst + 6 * BPS - 1); + const uint8x8_t L7 = vld1_u8(dst + 7 * BPS - 1); + const uint16x8_t s0 = vaddl_u8(L0, L1); + const uint16x8_t s1 = vaddl_u8(L2, L3); + const uint16x8_t s2 = vaddl_u8(L4, L5); + const uint16x8_t s3 = vaddl_u8(L6, L7); + const uint16x8_t s01 = vaddq_u16(s0, s1); + const uint16x8_t s23 = vaddq_u16(s2, s3); + sum_left = vaddq_u16(s01, s23); + } + + if (do_top && do_left) { + const uint16x8_t sum = vaddq_u16(sum_left, sum_top); + dc0 = vrshrn_n_u16(sum, 4); + } else if (do_top) { + dc0 = vrshrn_n_u16(sum_top, 3); + } else if (do_left) { + dc0 = vrshrn_n_u16(sum_left, 3); + } else { + dc0 = vdup_n_u8(0x80); + } + + { + const uint8x8_t dc = vdup_lane_u8(dc0, 0); + int i; + for (i = 0; i < 8; ++i) { + vst1_u32((uint32_t*)(dst + i * BPS), vreinterpret_u32_u8(dc)); + } + } +} + +static void DC8uv_NEON(uint8_t* dst) { DC8_NEON(dst, 1, 1); } +static void DC8uvNoTop_NEON(uint8_t* dst) { DC8_NEON(dst, 0, 1); } +static void DC8uvNoLeft_NEON(uint8_t* dst) { DC8_NEON(dst, 1, 0); } +static void DC8uvNoTopLeft_NEON(uint8_t* dst) { DC8_NEON(dst, 0, 0); } + +static void TM8uv_NEON(uint8_t* dst) { TrueMotion_NEON(dst, 8); } + +//------------------------------------------------------------------------------ +// 16x16 + +static void VE16_NEON(uint8_t* dst) { // vertical + const uint8x16_t top = vld1q_u8(dst - BPS); + int j; + for (j = 0; j < 16; ++j) { + vst1q_u8(dst + j * BPS, top); + } +} + +static void HE16_NEON(uint8_t* dst) { // horizontal + int j; + for (j = 0; j < 16; ++j) { + const uint8x16_t left = vld1q_dup_u8(dst - 1); + vst1q_u8(dst, left); + dst += BPS; + } +} + +static WEBP_INLINE void DC16_NEON(uint8_t* dst, int do_top, int do_left) { + uint16x8_t sum_top; + uint16x8_t sum_left; + uint8x8_t dc0; + + if (do_top) { + const uint8x16_t A = vld1q_u8(dst - BPS); // top row +#if defined(__aarch64__) + const uint16_t p3 = vaddlvq_u8(A); + sum_top = vdupq_n_u16(p3); +#else + const uint16x8_t p0 = vpaddlq_u8(A); // cascading summation of the top + const uint16x4_t p1 = vadd_u16(vget_low_u16(p0), vget_high_u16(p0)); + const uint16x4_t p2 = vpadd_u16(p1, p1); + const uint16x4_t p3 = vpadd_u16(p2, p2); + sum_top = vcombine_u16(p3, p3); +#endif + } + + if (do_left) { + int i; + sum_left = vdupq_n_u16(0); + for (i = 0; i < 16; i += 8) { + const uint8x8_t L0 = vld1_u8(dst + (i + 0) * BPS - 1); + const uint8x8_t L1 = vld1_u8(dst + (i + 1) * BPS - 1); + const uint8x8_t L2 = vld1_u8(dst + (i + 2) * BPS - 1); + const uint8x8_t L3 = vld1_u8(dst + (i + 3) * BPS - 1); + const uint8x8_t L4 = vld1_u8(dst + (i + 4) * BPS - 1); + const uint8x8_t L5 = vld1_u8(dst + (i + 5) * BPS - 1); + const uint8x8_t L6 = vld1_u8(dst + (i + 6) * BPS - 1); + const uint8x8_t L7 = vld1_u8(dst + (i + 7) * BPS - 1); + const uint16x8_t s0 = vaddl_u8(L0, L1); + const uint16x8_t s1 = vaddl_u8(L2, L3); + const uint16x8_t s2 = vaddl_u8(L4, L5); + const uint16x8_t s3 = vaddl_u8(L6, L7); + const uint16x8_t s01 = vaddq_u16(s0, s1); + const uint16x8_t s23 = vaddq_u16(s2, s3); + const uint16x8_t sum = vaddq_u16(s01, s23); + sum_left = vaddq_u16(sum_left, sum); + } + } + + if (do_top && do_left) { + const uint16x8_t sum = vaddq_u16(sum_left, sum_top); + dc0 = vrshrn_n_u16(sum, 5); + } else if (do_top) { + dc0 = vrshrn_n_u16(sum_top, 4); + } else if (do_left) { + dc0 = vrshrn_n_u16(sum_left, 4); + } else { + dc0 = vdup_n_u8(0x80); + } + + { + const uint8x16_t dc = vdupq_lane_u8(dc0, 0); + int i; + for (i = 0; i < 16; ++i) { + vst1q_u8(dst + i * BPS, dc); + } + } +} + +static void DC16TopLeft_NEON(uint8_t* dst) { DC16_NEON(dst, 1, 1); } +static void DC16NoTop_NEON(uint8_t* dst) { DC16_NEON(dst, 0, 1); } +static void DC16NoLeft_NEON(uint8_t* dst) { DC16_NEON(dst, 1, 0); } +static void DC16NoTopLeft_NEON(uint8_t* dst) { DC16_NEON(dst, 0, 0); } + +static void TM16_NEON(uint8_t* dst) { + const uint8x8_t TL = vld1_dup_u8(dst - BPS - 1); // top-left pixel 'A[-1]' + const uint8x16_t T = vld1q_u8(dst - BPS); // top row 'A[0..15]' + // A[c] - A[-1] + const int16x8_t d_lo = vreinterpretq_s16_u16(vsubl_u8(vget_low_u8(T), TL)); + const int16x8_t d_hi = vreinterpretq_s16_u16(vsubl_u8(vget_high_u8(T), TL)); + int y; + for (y = 0; y < 16; y += 4) { + // left edge + const int16x8_t L0 = ConvertU8ToS16_NEON(vld1_dup_u8(dst + 0 * BPS - 1)); + const int16x8_t L1 = ConvertU8ToS16_NEON(vld1_dup_u8(dst + 1 * BPS - 1)); + const int16x8_t L2 = ConvertU8ToS16_NEON(vld1_dup_u8(dst + 2 * BPS - 1)); + const int16x8_t L3 = ConvertU8ToS16_NEON(vld1_dup_u8(dst + 3 * BPS - 1)); + const int16x8_t r0_lo = vaddq_s16(L0, d_lo); // L[r] + A[c] - A[-1] + const int16x8_t r1_lo = vaddq_s16(L1, d_lo); + const int16x8_t r2_lo = vaddq_s16(L2, d_lo); + const int16x8_t r3_lo = vaddq_s16(L3, d_lo); + const int16x8_t r0_hi = vaddq_s16(L0, d_hi); + const int16x8_t r1_hi = vaddq_s16(L1, d_hi); + const int16x8_t r2_hi = vaddq_s16(L2, d_hi); + const int16x8_t r3_hi = vaddq_s16(L3, d_hi); + // Saturate and store the result. + const uint8x16_t row0 = vcombine_u8(vqmovun_s16(r0_lo), vqmovun_s16(r0_hi)); + const uint8x16_t row1 = vcombine_u8(vqmovun_s16(r1_lo), vqmovun_s16(r1_hi)); + const uint8x16_t row2 = vcombine_u8(vqmovun_s16(r2_lo), vqmovun_s16(r2_hi)); + const uint8x16_t row3 = vcombine_u8(vqmovun_s16(r3_lo), vqmovun_s16(r3_hi)); + vst1q_u8(dst + 0 * BPS, row0); + vst1q_u8(dst + 1 * BPS, row1); + vst1q_u8(dst + 2 * BPS, row2); + vst1q_u8(dst + 3 * BPS, row3); + dst += 4 * BPS; + } +} + +//------------------------------------------------------------------------------ +// Entry point + +extern void VP8DspInitNEON(void); + +WEBP_TSAN_IGNORE_FUNCTION void VP8DspInitNEON(void) { + VP8Transform = TransformTwo_NEON; + VP8TransformAC3 = TransformAC3_NEON; + VP8TransformDC = TransformDC_NEON; + VP8TransformWHT = TransformWHT_NEON; + + VP8VFilter16 = VFilter16_NEON; + VP8VFilter16i = VFilter16i_NEON; + VP8HFilter16 = HFilter16_NEON; +#if !defined(WORK_AROUND_GCC) + VP8HFilter16i = HFilter16i_NEON; +#endif + VP8VFilter8 = VFilter8_NEON; + VP8VFilter8i = VFilter8i_NEON; +#if !defined(WORK_AROUND_GCC) + VP8HFilter8 = HFilter8_NEON; + VP8HFilter8i = HFilter8i_NEON; +#endif + VP8SimpleVFilter16 = SimpleVFilter16_NEON; + VP8SimpleHFilter16 = SimpleHFilter16_NEON; + VP8SimpleVFilter16i = SimpleVFilter16i_NEON; + VP8SimpleHFilter16i = SimpleHFilter16i_NEON; + + VP8PredLuma4[0] = DC4_NEON; + VP8PredLuma4[1] = TM4_NEON; + VP8PredLuma4[2] = VE4_NEON; + VP8PredLuma4[4] = RD4_NEON; + VP8PredLuma4[6] = LD4_NEON; + + VP8PredLuma16[0] = DC16TopLeft_NEON; + VP8PredLuma16[1] = TM16_NEON; + VP8PredLuma16[2] = VE16_NEON; + VP8PredLuma16[3] = HE16_NEON; + VP8PredLuma16[4] = DC16NoTop_NEON; + VP8PredLuma16[5] = DC16NoLeft_NEON; + VP8PredLuma16[6] = DC16NoTopLeft_NEON; + + VP8PredChroma8[0] = DC8uv_NEON; + VP8PredChroma8[1] = TM8uv_NEON; + VP8PredChroma8[2] = VE8uv_NEON; + VP8PredChroma8[3] = HE8uv_NEON; + VP8PredChroma8[4] = DC8uvNoTop_NEON; + VP8PredChroma8[5] = DC8uvNoLeft_NEON; + VP8PredChroma8[6] = DC8uvNoTopLeft_NEON; +} + +#else // !WEBP_USE_NEON + +WEBP_DSP_INIT_STUB(VP8DspInitNEON) + +#endif // WEBP_USE_NEON diff --git a/vendor/github.com/sizeofint/webpanimation/dsp_dec_sse2.c b/vendor/github.com/sizeofint/webpanimation/dsp_dec_sse2.c new file mode 100644 index 00000000..7b41d467 --- /dev/null +++ b/vendor/github.com/sizeofint/webpanimation/dsp_dec_sse2.c @@ -0,0 +1,1227 @@ +// Copyright 2011 Google Inc. All Rights Reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the COPYING file in the root of the source +// tree. An additional intellectual property rights grant can be found +// in the file PATENTS. All contributing project authors may +// be found in the AUTHORS file in the root of the source tree. +// ----------------------------------------------------------------------------- +// +// SSE2 version of some decoding functions (idct, loop filtering). +// +// Author: somnath@google.com (Somnath Banerjee) +// cduvivier@google.com (Christian Duvivier) + +#include "dsp_dsp.h" + +#if defined(WEBP_USE_SSE2) + +// The 3-coeff sparse transform in SSE2 is not really faster than the plain-C +// one it seems => disable it by default. Uncomment the following to enable: +#if !defined(USE_TRANSFORM_AC3) +#define USE_TRANSFORM_AC3 0 // ALTERNATE_CODE +#endif + +#include +#include "dsp_common_sse2.h" +#include "dec_vp8i_dec.h" +#include "utils_utils.h" + +//------------------------------------------------------------------------------ +// Transforms (Paragraph 14.4) + +static void Transform_SSE2(const int16_t* in, uint8_t* dst, int do_two) { + // This implementation makes use of 16-bit fixed point versions of two + // multiply constants: + // K1 = sqrt(2) * cos (pi/8) ~= 85627 / 2^16 + // K2 = sqrt(2) * sin (pi/8) ~= 35468 / 2^16 + // + // To be able to use signed 16-bit integers, we use the following trick to + // have constants within range: + // - Associated constants are obtained by subtracting the 16-bit fixed point + // version of one: + // k = K - (1 << 16) => K = k + (1 << 16) + // K1 = 85267 => k1 = 20091 + // K2 = 35468 => k2 = -30068 + // - The multiplication of a variable by a constant become the sum of the + // variable and the multiplication of that variable by the associated + // constant: + // (x * K) >> 16 = (x * (k + (1 << 16))) >> 16 = ((x * k ) >> 16) + x + const __m128i k1 = _mm_set1_epi16(20091); + const __m128i k2 = _mm_set1_epi16(-30068); + __m128i T0, T1, T2, T3; + + // Load and concatenate the transform coefficients (we'll do two transforms + // in parallel). In the case of only one transform, the second half of the + // vectors will just contain random value we'll never use nor store. + __m128i in0, in1, in2, in3; + { + in0 = _mm_loadl_epi64((const __m128i*)&in[0]); + in1 = _mm_loadl_epi64((const __m128i*)&in[4]); + in2 = _mm_loadl_epi64((const __m128i*)&in[8]); + in3 = _mm_loadl_epi64((const __m128i*)&in[12]); + // a00 a10 a20 a30 x x x x + // a01 a11 a21 a31 x x x x + // a02 a12 a22 a32 x x x x + // a03 a13 a23 a33 x x x x + if (do_two) { + const __m128i inB0 = _mm_loadl_epi64((const __m128i*)&in[16]); + const __m128i inB1 = _mm_loadl_epi64((const __m128i*)&in[20]); + const __m128i inB2 = _mm_loadl_epi64((const __m128i*)&in[24]); + const __m128i inB3 = _mm_loadl_epi64((const __m128i*)&in[28]); + in0 = _mm_unpacklo_epi64(in0, inB0); + in1 = _mm_unpacklo_epi64(in1, inB1); + in2 = _mm_unpacklo_epi64(in2, inB2); + in3 = _mm_unpacklo_epi64(in3, inB3); + // a00 a10 a20 a30 b00 b10 b20 b30 + // a01 a11 a21 a31 b01 b11 b21 b31 + // a02 a12 a22 a32 b02 b12 b22 b32 + // a03 a13 a23 a33 b03 b13 b23 b33 + } + } + + // Vertical pass and subsequent transpose. + { + // First pass, c and d calculations are longer because of the "trick" + // multiplications. + const __m128i a = _mm_add_epi16(in0, in2); + const __m128i b = _mm_sub_epi16(in0, in2); + // c = MUL(in1, K2) - MUL(in3, K1) = MUL(in1, k2) - MUL(in3, k1) + in1 - in3 + const __m128i c1 = _mm_mulhi_epi16(in1, k2); + const __m128i c2 = _mm_mulhi_epi16(in3, k1); + const __m128i c3 = _mm_sub_epi16(in1, in3); + const __m128i c4 = _mm_sub_epi16(c1, c2); + const __m128i c = _mm_add_epi16(c3, c4); + // d = MUL(in1, K1) + MUL(in3, K2) = MUL(in1, k1) + MUL(in3, k2) + in1 + in3 + const __m128i d1 = _mm_mulhi_epi16(in1, k1); + const __m128i d2 = _mm_mulhi_epi16(in3, k2); + const __m128i d3 = _mm_add_epi16(in1, in3); + const __m128i d4 = _mm_add_epi16(d1, d2); + const __m128i d = _mm_add_epi16(d3, d4); + + // Second pass. + const __m128i tmp0 = _mm_add_epi16(a, d); + const __m128i tmp1 = _mm_add_epi16(b, c); + const __m128i tmp2 = _mm_sub_epi16(b, c); + const __m128i tmp3 = _mm_sub_epi16(a, d); + + // Transpose the two 4x4. + VP8Transpose_2_4x4_16b(&tmp0, &tmp1, &tmp2, &tmp3, &T0, &T1, &T2, &T3); + } + + // Horizontal pass and subsequent transpose. + { + // First pass, c and d calculations are longer because of the "trick" + // multiplications. + const __m128i four = _mm_set1_epi16(4); + const __m128i dc = _mm_add_epi16(T0, four); + const __m128i a = _mm_add_epi16(dc, T2); + const __m128i b = _mm_sub_epi16(dc, T2); + // c = MUL(T1, K2) - MUL(T3, K1) = MUL(T1, k2) - MUL(T3, k1) + T1 - T3 + const __m128i c1 = _mm_mulhi_epi16(T1, k2); + const __m128i c2 = _mm_mulhi_epi16(T3, k1); + const __m128i c3 = _mm_sub_epi16(T1, T3); + const __m128i c4 = _mm_sub_epi16(c1, c2); + const __m128i c = _mm_add_epi16(c3, c4); + // d = MUL(T1, K1) + MUL(T3, K2) = MUL(T1, k1) + MUL(T3, k2) + T1 + T3 + const __m128i d1 = _mm_mulhi_epi16(T1, k1); + const __m128i d2 = _mm_mulhi_epi16(T3, k2); + const __m128i d3 = _mm_add_epi16(T1, T3); + const __m128i d4 = _mm_add_epi16(d1, d2); + const __m128i d = _mm_add_epi16(d3, d4); + + // Second pass. + const __m128i tmp0 = _mm_add_epi16(a, d); + const __m128i tmp1 = _mm_add_epi16(b, c); + const __m128i tmp2 = _mm_sub_epi16(b, c); + const __m128i tmp3 = _mm_sub_epi16(a, d); + const __m128i shifted0 = _mm_srai_epi16(tmp0, 3); + const __m128i shifted1 = _mm_srai_epi16(tmp1, 3); + const __m128i shifted2 = _mm_srai_epi16(tmp2, 3); + const __m128i shifted3 = _mm_srai_epi16(tmp3, 3); + + // Transpose the two 4x4. + VP8Transpose_2_4x4_16b(&shifted0, &shifted1, &shifted2, &shifted3, &T0, &T1, + &T2, &T3); + } + + // Add inverse transform to 'dst' and store. + { + const __m128i zero = _mm_setzero_si128(); + // Load the reference(s). + __m128i dst0, dst1, dst2, dst3; + if (do_two) { + // Load eight bytes/pixels per line. + dst0 = _mm_loadl_epi64((__m128i*)(dst + 0 * BPS)); + dst1 = _mm_loadl_epi64((__m128i*)(dst + 1 * BPS)); + dst2 = _mm_loadl_epi64((__m128i*)(dst + 2 * BPS)); + dst3 = _mm_loadl_epi64((__m128i*)(dst + 3 * BPS)); + } else { + // Load four bytes/pixels per line. + dst0 = _mm_cvtsi32_si128(WebPMemToUint32(dst + 0 * BPS)); + dst1 = _mm_cvtsi32_si128(WebPMemToUint32(dst + 1 * BPS)); + dst2 = _mm_cvtsi32_si128(WebPMemToUint32(dst + 2 * BPS)); + dst3 = _mm_cvtsi32_si128(WebPMemToUint32(dst + 3 * BPS)); + } + // Convert to 16b. + dst0 = _mm_unpacklo_epi8(dst0, zero); + dst1 = _mm_unpacklo_epi8(dst1, zero); + dst2 = _mm_unpacklo_epi8(dst2, zero); + dst3 = _mm_unpacklo_epi8(dst3, zero); + // Add the inverse transform(s). + dst0 = _mm_add_epi16(dst0, T0); + dst1 = _mm_add_epi16(dst1, T1); + dst2 = _mm_add_epi16(dst2, T2); + dst3 = _mm_add_epi16(dst3, T3); + // Unsigned saturate to 8b. + dst0 = _mm_packus_epi16(dst0, dst0); + dst1 = _mm_packus_epi16(dst1, dst1); + dst2 = _mm_packus_epi16(dst2, dst2); + dst3 = _mm_packus_epi16(dst3, dst3); + // Store the results. + if (do_two) { + // Store eight bytes/pixels per line. + _mm_storel_epi64((__m128i*)(dst + 0 * BPS), dst0); + _mm_storel_epi64((__m128i*)(dst + 1 * BPS), dst1); + _mm_storel_epi64((__m128i*)(dst + 2 * BPS), dst2); + _mm_storel_epi64((__m128i*)(dst + 3 * BPS), dst3); + } else { + // Store four bytes/pixels per line. + WebPUint32ToMem(dst + 0 * BPS, _mm_cvtsi128_si32(dst0)); + WebPUint32ToMem(dst + 1 * BPS, _mm_cvtsi128_si32(dst1)); + WebPUint32ToMem(dst + 2 * BPS, _mm_cvtsi128_si32(dst2)); + WebPUint32ToMem(dst + 3 * BPS, _mm_cvtsi128_si32(dst3)); + } + } +} + +#if (USE_TRANSFORM_AC3 == 1) +#define MUL(a, b) (((a) * (b)) >> 16) +static void TransformAC3(const int16_t* in, uint8_t* dst) { + static const int kC1 = 20091 + (1 << 16); + static const int kC2 = 35468; + const __m128i A = _mm_set1_epi16(in[0] + 4); + const __m128i c4 = _mm_set1_epi16(MUL(in[4], kC2)); + const __m128i d4 = _mm_set1_epi16(MUL(in[4], kC1)); + const int c1 = MUL(in[1], kC2); + const int d1 = MUL(in[1], kC1); + const __m128i CD = _mm_set_epi16(0, 0, 0, 0, -d1, -c1, c1, d1); + const __m128i B = _mm_adds_epi16(A, CD); + const __m128i m0 = _mm_adds_epi16(B, d4); + const __m128i m1 = _mm_adds_epi16(B, c4); + const __m128i m2 = _mm_subs_epi16(B, c4); + const __m128i m3 = _mm_subs_epi16(B, d4); + const __m128i zero = _mm_setzero_si128(); + // Load the source pixels. + __m128i dst0 = _mm_cvtsi32_si128(WebPMemToUint32(dst + 0 * BPS)); + __m128i dst1 = _mm_cvtsi32_si128(WebPMemToUint32(dst + 1 * BPS)); + __m128i dst2 = _mm_cvtsi32_si128(WebPMemToUint32(dst + 2 * BPS)); + __m128i dst3 = _mm_cvtsi32_si128(WebPMemToUint32(dst + 3 * BPS)); + // Convert to 16b. + dst0 = _mm_unpacklo_epi8(dst0, zero); + dst1 = _mm_unpacklo_epi8(dst1, zero); + dst2 = _mm_unpacklo_epi8(dst2, zero); + dst3 = _mm_unpacklo_epi8(dst3, zero); + // Add the inverse transform. + dst0 = _mm_adds_epi16(dst0, _mm_srai_epi16(m0, 3)); + dst1 = _mm_adds_epi16(dst1, _mm_srai_epi16(m1, 3)); + dst2 = _mm_adds_epi16(dst2, _mm_srai_epi16(m2, 3)); + dst3 = _mm_adds_epi16(dst3, _mm_srai_epi16(m3, 3)); + // Unsigned saturate to 8b. + dst0 = _mm_packus_epi16(dst0, dst0); + dst1 = _mm_packus_epi16(dst1, dst1); + dst2 = _mm_packus_epi16(dst2, dst2); + dst3 = _mm_packus_epi16(dst3, dst3); + // Store the results. + WebPUint32ToMem(dst + 0 * BPS, _mm_cvtsi128_si32(dst0)); + WebPUint32ToMem(dst + 1 * BPS, _mm_cvtsi128_si32(dst1)); + WebPUint32ToMem(dst + 2 * BPS, _mm_cvtsi128_si32(dst2)); + WebPUint32ToMem(dst + 3 * BPS, _mm_cvtsi128_si32(dst3)); +} +#undef MUL +#endif // USE_TRANSFORM_AC3 + +//------------------------------------------------------------------------------ +// Loop Filter (Paragraph 15) + +// Compute abs(p - q) = subs(p - q) OR subs(q - p) +#define MM_ABS(p, q) _mm_or_si128( \ + _mm_subs_epu8((q), (p)), \ + _mm_subs_epu8((p), (q))) + +// Shift each byte of "x" by 3 bits while preserving by the sign bit. +static WEBP_INLINE void SignedShift8b_SSE2(__m128i* const x) { + const __m128i zero = _mm_setzero_si128(); + const __m128i lo_0 = _mm_unpacklo_epi8(zero, *x); + const __m128i hi_0 = _mm_unpackhi_epi8(zero, *x); + const __m128i lo_1 = _mm_srai_epi16(lo_0, 3 + 8); + const __m128i hi_1 = _mm_srai_epi16(hi_0, 3 + 8); + *x = _mm_packs_epi16(lo_1, hi_1); +} + +#define FLIP_SIGN_BIT2(a, b) { \ + (a) = _mm_xor_si128(a, sign_bit); \ + (b) = _mm_xor_si128(b, sign_bit); \ +} + +#define FLIP_SIGN_BIT4(a, b, c, d) { \ + FLIP_SIGN_BIT2(a, b); \ + FLIP_SIGN_BIT2(c, d); \ +} + +// input/output is uint8_t +static WEBP_INLINE void GetNotHEV_SSE2(const __m128i* const p1, + const __m128i* const p0, + const __m128i* const q0, + const __m128i* const q1, + int hev_thresh, __m128i* const not_hev) { + const __m128i zero = _mm_setzero_si128(); + const __m128i t_1 = MM_ABS(*p1, *p0); + const __m128i t_2 = MM_ABS(*q1, *q0); + + const __m128i h = _mm_set1_epi8(hev_thresh); + const __m128i t_max = _mm_max_epu8(t_1, t_2); + + const __m128i t_max_h = _mm_subs_epu8(t_max, h); + *not_hev = _mm_cmpeq_epi8(t_max_h, zero); // not_hev <= t1 && not_hev <= t2 +} + +// input pixels are int8_t +static WEBP_INLINE void GetBaseDelta_SSE2(const __m128i* const p1, + const __m128i* const p0, + const __m128i* const q0, + const __m128i* const q1, + __m128i* const delta) { + // beware of addition order, for saturation! + const __m128i p1_q1 = _mm_subs_epi8(*p1, *q1); // p1 - q1 + const __m128i q0_p0 = _mm_subs_epi8(*q0, *p0); // q0 - p0 + const __m128i s1 = _mm_adds_epi8(p1_q1, q0_p0); // p1 - q1 + 1 * (q0 - p0) + const __m128i s2 = _mm_adds_epi8(q0_p0, s1); // p1 - q1 + 2 * (q0 - p0) + const __m128i s3 = _mm_adds_epi8(q0_p0, s2); // p1 - q1 + 3 * (q0 - p0) + *delta = s3; +} + +// input and output are int8_t +static WEBP_INLINE void DoSimpleFilter_SSE2(__m128i* const p0, + __m128i* const q0, + const __m128i* const fl) { + const __m128i k3 = _mm_set1_epi8(3); + const __m128i k4 = _mm_set1_epi8(4); + __m128i v3 = _mm_adds_epi8(*fl, k3); + __m128i v4 = _mm_adds_epi8(*fl, k4); + + SignedShift8b_SSE2(&v4); // v4 >> 3 + SignedShift8b_SSE2(&v3); // v3 >> 3 + *q0 = _mm_subs_epi8(*q0, v4); // q0 -= v4 + *p0 = _mm_adds_epi8(*p0, v3); // p0 += v3 +} + +// Updates values of 2 pixels at MB edge during complex filtering. +// Update operations: +// q = q - delta and p = p + delta; where delta = [(a_hi >> 7), (a_lo >> 7)] +// Pixels 'pi' and 'qi' are int8_t on input, uint8_t on output (sign flip). +static WEBP_INLINE void Update2Pixels_SSE2(__m128i* const pi, __m128i* const qi, + const __m128i* const a0_lo, + const __m128i* const a0_hi) { + const __m128i a1_lo = _mm_srai_epi16(*a0_lo, 7); + const __m128i a1_hi = _mm_srai_epi16(*a0_hi, 7); + const __m128i delta = _mm_packs_epi16(a1_lo, a1_hi); + const __m128i sign_bit = _mm_set1_epi8((char)0x80); + *pi = _mm_adds_epi8(*pi, delta); + *qi = _mm_subs_epi8(*qi, delta); + FLIP_SIGN_BIT2(*pi, *qi); +} + +// input pixels are uint8_t +static WEBP_INLINE void NeedsFilter_SSE2(const __m128i* const p1, + const __m128i* const p0, + const __m128i* const q0, + const __m128i* const q1, + int thresh, __m128i* const mask) { + const __m128i m_thresh = _mm_set1_epi8((char)thresh); + const __m128i t1 = MM_ABS(*p1, *q1); // abs(p1 - q1) + const __m128i kFE = _mm_set1_epi8((char)0xFE); + const __m128i t2 = _mm_and_si128(t1, kFE); // set lsb of each byte to zero + const __m128i t3 = _mm_srli_epi16(t2, 1); // abs(p1 - q1) / 2 + + const __m128i t4 = MM_ABS(*p0, *q0); // abs(p0 - q0) + const __m128i t5 = _mm_adds_epu8(t4, t4); // abs(p0 - q0) * 2 + const __m128i t6 = _mm_adds_epu8(t5, t3); // abs(p0-q0)*2 + abs(p1-q1)/2 + + const __m128i t7 = _mm_subs_epu8(t6, m_thresh); // mask <= m_thresh + *mask = _mm_cmpeq_epi8(t7, _mm_setzero_si128()); +} + +//------------------------------------------------------------------------------ +// Edge filtering functions + +// Applies filter on 2 pixels (p0 and q0) +static WEBP_INLINE void DoFilter2_SSE2(__m128i* const p1, __m128i* const p0, + __m128i* const q0, __m128i* const q1, + int thresh) { + __m128i a, mask; + const __m128i sign_bit = _mm_set1_epi8((char)0x80); + // convert p1/q1 to int8_t (for GetBaseDelta_SSE2) + const __m128i p1s = _mm_xor_si128(*p1, sign_bit); + const __m128i q1s = _mm_xor_si128(*q1, sign_bit); + + NeedsFilter_SSE2(p1, p0, q0, q1, thresh, &mask); + + FLIP_SIGN_BIT2(*p0, *q0); + GetBaseDelta_SSE2(&p1s, p0, q0, &q1s, &a); + a = _mm_and_si128(a, mask); // mask filter values we don't care about + DoSimpleFilter_SSE2(p0, q0, &a); + FLIP_SIGN_BIT2(*p0, *q0); +} + +// Applies filter on 4 pixels (p1, p0, q0 and q1) +static WEBP_INLINE void DoFilter4_SSE2(__m128i* const p1, __m128i* const p0, + __m128i* const q0, __m128i* const q1, + const __m128i* const mask, + int hev_thresh) { + const __m128i zero = _mm_setzero_si128(); + const __m128i sign_bit = _mm_set1_epi8((char)0x80); + const __m128i k64 = _mm_set1_epi8(64); + const __m128i k3 = _mm_set1_epi8(3); + const __m128i k4 = _mm_set1_epi8(4); + __m128i not_hev; + __m128i t1, t2, t3; + + // compute hev mask + GetNotHEV_SSE2(p1, p0, q0, q1, hev_thresh, ¬_hev); + + // convert to signed values + FLIP_SIGN_BIT4(*p1, *p0, *q0, *q1); + + t1 = _mm_subs_epi8(*p1, *q1); // p1 - q1 + t1 = _mm_andnot_si128(not_hev, t1); // hev(p1 - q1) + t2 = _mm_subs_epi8(*q0, *p0); // q0 - p0 + t1 = _mm_adds_epi8(t1, t2); // hev(p1 - q1) + 1 * (q0 - p0) + t1 = _mm_adds_epi8(t1, t2); // hev(p1 - q1) + 2 * (q0 - p0) + t1 = _mm_adds_epi8(t1, t2); // hev(p1 - q1) + 3 * (q0 - p0) + t1 = _mm_and_si128(t1, *mask); // mask filter values we don't care about + + t2 = _mm_adds_epi8(t1, k3); // 3 * (q0 - p0) + hev(p1 - q1) + 3 + t3 = _mm_adds_epi8(t1, k4); // 3 * (q0 - p0) + hev(p1 - q1) + 4 + SignedShift8b_SSE2(&t2); // (3 * (q0 - p0) + hev(p1 - q1) + 3) >> 3 + SignedShift8b_SSE2(&t3); // (3 * (q0 - p0) + hev(p1 - q1) + 4) >> 3 + *p0 = _mm_adds_epi8(*p0, t2); // p0 += t2 + *q0 = _mm_subs_epi8(*q0, t3); // q0 -= t3 + FLIP_SIGN_BIT2(*p0, *q0); + + // this is equivalent to signed (a + 1) >> 1 calculation + t2 = _mm_add_epi8(t3, sign_bit); + t3 = _mm_avg_epu8(t2, zero); + t3 = _mm_sub_epi8(t3, k64); + + t3 = _mm_and_si128(not_hev, t3); // if !hev + *q1 = _mm_subs_epi8(*q1, t3); // q1 -= t3 + *p1 = _mm_adds_epi8(*p1, t3); // p1 += t3 + FLIP_SIGN_BIT2(*p1, *q1); +} + +// Applies filter on 6 pixels (p2, p1, p0, q0, q1 and q2) +static WEBP_INLINE void DoFilter6_SSE2(__m128i* const p2, __m128i* const p1, + __m128i* const p0, __m128i* const q0, + __m128i* const q1, __m128i* const q2, + const __m128i* const mask, + int hev_thresh) { + const __m128i zero = _mm_setzero_si128(); + const __m128i sign_bit = _mm_set1_epi8((char)0x80); + __m128i a, not_hev; + + // compute hev mask + GetNotHEV_SSE2(p1, p0, q0, q1, hev_thresh, ¬_hev); + + FLIP_SIGN_BIT4(*p1, *p0, *q0, *q1); + FLIP_SIGN_BIT2(*p2, *q2); + GetBaseDelta_SSE2(p1, p0, q0, q1, &a); + + { // do simple filter on pixels with hev + const __m128i m = _mm_andnot_si128(not_hev, *mask); + const __m128i f = _mm_and_si128(a, m); + DoSimpleFilter_SSE2(p0, q0, &f); + } + + { // do strong filter on pixels with not hev + const __m128i k9 = _mm_set1_epi16(0x0900); + const __m128i k63 = _mm_set1_epi16(63); + + const __m128i m = _mm_and_si128(not_hev, *mask); + const __m128i f = _mm_and_si128(a, m); + + const __m128i f_lo = _mm_unpacklo_epi8(zero, f); + const __m128i f_hi = _mm_unpackhi_epi8(zero, f); + + const __m128i f9_lo = _mm_mulhi_epi16(f_lo, k9); // Filter (lo) * 9 + const __m128i f9_hi = _mm_mulhi_epi16(f_hi, k9); // Filter (hi) * 9 + + const __m128i a2_lo = _mm_add_epi16(f9_lo, k63); // Filter * 9 + 63 + const __m128i a2_hi = _mm_add_epi16(f9_hi, k63); // Filter * 9 + 63 + + const __m128i a1_lo = _mm_add_epi16(a2_lo, f9_lo); // Filter * 18 + 63 + const __m128i a1_hi = _mm_add_epi16(a2_hi, f9_hi); // Filter * 18 + 63 + + const __m128i a0_lo = _mm_add_epi16(a1_lo, f9_lo); // Filter * 27 + 63 + const __m128i a0_hi = _mm_add_epi16(a1_hi, f9_hi); // Filter * 27 + 63 + + Update2Pixels_SSE2(p2, q2, &a2_lo, &a2_hi); + Update2Pixels_SSE2(p1, q1, &a1_lo, &a1_hi); + Update2Pixels_SSE2(p0, q0, &a0_lo, &a0_hi); + } +} + +// reads 8 rows across a vertical edge. +static WEBP_INLINE void Load8x4_SSE2(const uint8_t* const b, int stride, + __m128i* const p, __m128i* const q) { + // A0 = 63 62 61 60 23 22 21 20 43 42 41 40 03 02 01 00 + // A1 = 73 72 71 70 33 32 31 30 53 52 51 50 13 12 11 10 + const __m128i A0 = _mm_set_epi32( + WebPMemToUint32(&b[6 * stride]), WebPMemToUint32(&b[2 * stride]), + WebPMemToUint32(&b[4 * stride]), WebPMemToUint32(&b[0 * stride])); + const __m128i A1 = _mm_set_epi32( + WebPMemToUint32(&b[7 * stride]), WebPMemToUint32(&b[3 * stride]), + WebPMemToUint32(&b[5 * stride]), WebPMemToUint32(&b[1 * stride])); + + // B0 = 53 43 52 42 51 41 50 40 13 03 12 02 11 01 10 00 + // B1 = 73 63 72 62 71 61 70 60 33 23 32 22 31 21 30 20 + const __m128i B0 = _mm_unpacklo_epi8(A0, A1); + const __m128i B1 = _mm_unpackhi_epi8(A0, A1); + + // C0 = 33 23 13 03 32 22 12 02 31 21 11 01 30 20 10 00 + // C1 = 73 63 53 43 72 62 52 42 71 61 51 41 70 60 50 40 + const __m128i C0 = _mm_unpacklo_epi16(B0, B1); + const __m128i C1 = _mm_unpackhi_epi16(B0, B1); + + // *p = 71 61 51 41 31 21 11 01 70 60 50 40 30 20 10 00 + // *q = 73 63 53 43 33 23 13 03 72 62 52 42 32 22 12 02 + *p = _mm_unpacklo_epi32(C0, C1); + *q = _mm_unpackhi_epi32(C0, C1); +} + +static WEBP_INLINE void Load16x4_SSE2(const uint8_t* const r0, + const uint8_t* const r8, + int stride, + __m128i* const p1, __m128i* const p0, + __m128i* const q0, __m128i* const q1) { + // Assume the pixels around the edge (|) are numbered as follows + // 00 01 | 02 03 + // 10 11 | 12 13 + // ... | ... + // e0 e1 | e2 e3 + // f0 f1 | f2 f3 + // + // r0 is pointing to the 0th row (00) + // r8 is pointing to the 8th row (80) + + // Load + // p1 = 71 61 51 41 31 21 11 01 70 60 50 40 30 20 10 00 + // q0 = 73 63 53 43 33 23 13 03 72 62 52 42 32 22 12 02 + // p0 = f1 e1 d1 c1 b1 a1 91 81 f0 e0 d0 c0 b0 a0 90 80 + // q1 = f3 e3 d3 c3 b3 a3 93 83 f2 e2 d2 c2 b2 a2 92 82 + Load8x4_SSE2(r0, stride, p1, q0); + Load8x4_SSE2(r8, stride, p0, q1); + + { + // p1 = f0 e0 d0 c0 b0 a0 90 80 70 60 50 40 30 20 10 00 + // p0 = f1 e1 d1 c1 b1 a1 91 81 71 61 51 41 31 21 11 01 + // q0 = f2 e2 d2 c2 b2 a2 92 82 72 62 52 42 32 22 12 02 + // q1 = f3 e3 d3 c3 b3 a3 93 83 73 63 53 43 33 23 13 03 + const __m128i t1 = *p1; + const __m128i t2 = *q0; + *p1 = _mm_unpacklo_epi64(t1, *p0); + *p0 = _mm_unpackhi_epi64(t1, *p0); + *q0 = _mm_unpacklo_epi64(t2, *q1); + *q1 = _mm_unpackhi_epi64(t2, *q1); + } +} + +static WEBP_INLINE void Store4x4_SSE2(__m128i* const x, + uint8_t* dst, int stride) { + int i; + for (i = 0; i < 4; ++i, dst += stride) { + WebPUint32ToMem(dst, _mm_cvtsi128_si32(*x)); + *x = _mm_srli_si128(*x, 4); + } +} + +// Transpose back and store +static WEBP_INLINE void Store16x4_SSE2(const __m128i* const p1, + const __m128i* const p0, + const __m128i* const q0, + const __m128i* const q1, + uint8_t* r0, uint8_t* r8, + int stride) { + __m128i t1, p1_s, p0_s, q0_s, q1_s; + + // p0 = 71 70 61 60 51 50 41 40 31 30 21 20 11 10 01 00 + // p1 = f1 f0 e1 e0 d1 d0 c1 c0 b1 b0 a1 a0 91 90 81 80 + t1 = *p0; + p0_s = _mm_unpacklo_epi8(*p1, t1); + p1_s = _mm_unpackhi_epi8(*p1, t1); + + // q0 = 73 72 63 62 53 52 43 42 33 32 23 22 13 12 03 02 + // q1 = f3 f2 e3 e2 d3 d2 c3 c2 b3 b2 a3 a2 93 92 83 82 + t1 = *q0; + q0_s = _mm_unpacklo_epi8(t1, *q1); + q1_s = _mm_unpackhi_epi8(t1, *q1); + + // p0 = 33 32 31 30 23 22 21 20 13 12 11 10 03 02 01 00 + // q0 = 73 72 71 70 63 62 61 60 53 52 51 50 43 42 41 40 + t1 = p0_s; + p0_s = _mm_unpacklo_epi16(t1, q0_s); + q0_s = _mm_unpackhi_epi16(t1, q0_s); + + // p1 = b3 b2 b1 b0 a3 a2 a1 a0 93 92 91 90 83 82 81 80 + // q1 = f3 f2 f1 f0 e3 e2 e1 e0 d3 d2 d1 d0 c3 c2 c1 c0 + t1 = p1_s; + p1_s = _mm_unpacklo_epi16(t1, q1_s); + q1_s = _mm_unpackhi_epi16(t1, q1_s); + + Store4x4_SSE2(&p0_s, r0, stride); + r0 += 4 * stride; + Store4x4_SSE2(&q0_s, r0, stride); + + Store4x4_SSE2(&p1_s, r8, stride); + r8 += 4 * stride; + Store4x4_SSE2(&q1_s, r8, stride); +} + +//------------------------------------------------------------------------------ +// Simple In-loop filtering (Paragraph 15.2) + +static void SimpleVFilter16_SSE2(uint8_t* p, int stride, int thresh) { + // Load + __m128i p1 = _mm_loadu_si128((__m128i*)&p[-2 * stride]); + __m128i p0 = _mm_loadu_si128((__m128i*)&p[-stride]); + __m128i q0 = _mm_loadu_si128((__m128i*)&p[0]); + __m128i q1 = _mm_loadu_si128((__m128i*)&p[stride]); + + DoFilter2_SSE2(&p1, &p0, &q0, &q1, thresh); + + // Store + _mm_storeu_si128((__m128i*)&p[-stride], p0); + _mm_storeu_si128((__m128i*)&p[0], q0); +} + +static void SimpleHFilter16_SSE2(uint8_t* p, int stride, int thresh) { + __m128i p1, p0, q0, q1; + + p -= 2; // beginning of p1 + + Load16x4_SSE2(p, p + 8 * stride, stride, &p1, &p0, &q0, &q1); + DoFilter2_SSE2(&p1, &p0, &q0, &q1, thresh); + Store16x4_SSE2(&p1, &p0, &q0, &q1, p, p + 8 * stride, stride); +} + +static void SimpleVFilter16i_SSE2(uint8_t* p, int stride, int thresh) { + int k; + for (k = 3; k > 0; --k) { + p += 4 * stride; + SimpleVFilter16_SSE2(p, stride, thresh); + } +} + +static void SimpleHFilter16i_SSE2(uint8_t* p, int stride, int thresh) { + int k; + for (k = 3; k > 0; --k) { + p += 4; + SimpleHFilter16_SSE2(p, stride, thresh); + } +} + +//------------------------------------------------------------------------------ +// Complex In-loop filtering (Paragraph 15.3) + +#define MAX_DIFF1(p3, p2, p1, p0, m) do { \ + (m) = MM_ABS(p1, p0); \ + (m) = _mm_max_epu8(m, MM_ABS(p3, p2)); \ + (m) = _mm_max_epu8(m, MM_ABS(p2, p1)); \ +} while (0) + +#define MAX_DIFF2(p3, p2, p1, p0, m) do { \ + (m) = _mm_max_epu8(m, MM_ABS(p1, p0)); \ + (m) = _mm_max_epu8(m, MM_ABS(p3, p2)); \ + (m) = _mm_max_epu8(m, MM_ABS(p2, p1)); \ +} while (0) + +#define LOAD_H_EDGES4(p, stride, e1, e2, e3, e4) { \ + (e1) = _mm_loadu_si128((__m128i*)&(p)[0 * (stride)]); \ + (e2) = _mm_loadu_si128((__m128i*)&(p)[1 * (stride)]); \ + (e3) = _mm_loadu_si128((__m128i*)&(p)[2 * (stride)]); \ + (e4) = _mm_loadu_si128((__m128i*)&(p)[3 * (stride)]); \ +} + +#define LOADUV_H_EDGE(p, u, v, stride) do { \ + const __m128i U = _mm_loadl_epi64((__m128i*)&(u)[(stride)]); \ + const __m128i V = _mm_loadl_epi64((__m128i*)&(v)[(stride)]); \ + (p) = _mm_unpacklo_epi64(U, V); \ +} while (0) + +#define LOADUV_H_EDGES4(u, v, stride, e1, e2, e3, e4) { \ + LOADUV_H_EDGE(e1, u, v, 0 * (stride)); \ + LOADUV_H_EDGE(e2, u, v, 1 * (stride)); \ + LOADUV_H_EDGE(e3, u, v, 2 * (stride)); \ + LOADUV_H_EDGE(e4, u, v, 3 * (stride)); \ +} + +#define STOREUV(p, u, v, stride) { \ + _mm_storel_epi64((__m128i*)&(u)[(stride)], p); \ + (p) = _mm_srli_si128(p, 8); \ + _mm_storel_epi64((__m128i*)&(v)[(stride)], p); \ +} + +static WEBP_INLINE void ComplexMask_SSE2(const __m128i* const p1, + const __m128i* const p0, + const __m128i* const q0, + const __m128i* const q1, + int thresh, int ithresh, + __m128i* const mask) { + const __m128i it = _mm_set1_epi8(ithresh); + const __m128i diff = _mm_subs_epu8(*mask, it); + const __m128i thresh_mask = _mm_cmpeq_epi8(diff, _mm_setzero_si128()); + __m128i filter_mask; + NeedsFilter_SSE2(p1, p0, q0, q1, thresh, &filter_mask); + *mask = _mm_and_si128(thresh_mask, filter_mask); +} + +// on macroblock edges +static void VFilter16_SSE2(uint8_t* p, int stride, + int thresh, int ithresh, int hev_thresh) { + __m128i t1; + __m128i mask; + __m128i p2, p1, p0, q0, q1, q2; + + // Load p3, p2, p1, p0 + LOAD_H_EDGES4(p - 4 * stride, stride, t1, p2, p1, p0); + MAX_DIFF1(t1, p2, p1, p0, mask); + + // Load q0, q1, q2, q3 + LOAD_H_EDGES4(p, stride, q0, q1, q2, t1); + MAX_DIFF2(t1, q2, q1, q0, mask); + + ComplexMask_SSE2(&p1, &p0, &q0, &q1, thresh, ithresh, &mask); + DoFilter6_SSE2(&p2, &p1, &p0, &q0, &q1, &q2, &mask, hev_thresh); + + // Store + _mm_storeu_si128((__m128i*)&p[-3 * stride], p2); + _mm_storeu_si128((__m128i*)&p[-2 * stride], p1); + _mm_storeu_si128((__m128i*)&p[-1 * stride], p0); + _mm_storeu_si128((__m128i*)&p[+0 * stride], q0); + _mm_storeu_si128((__m128i*)&p[+1 * stride], q1); + _mm_storeu_si128((__m128i*)&p[+2 * stride], q2); +} + +static void HFilter16_SSE2(uint8_t* p, int stride, + int thresh, int ithresh, int hev_thresh) { + __m128i mask; + __m128i p3, p2, p1, p0, q0, q1, q2, q3; + + uint8_t* const b = p - 4; + Load16x4_SSE2(b, b + 8 * stride, stride, &p3, &p2, &p1, &p0); + MAX_DIFF1(p3, p2, p1, p0, mask); + + Load16x4_SSE2(p, p + 8 * stride, stride, &q0, &q1, &q2, &q3); + MAX_DIFF2(q3, q2, q1, q0, mask); + + ComplexMask_SSE2(&p1, &p0, &q0, &q1, thresh, ithresh, &mask); + DoFilter6_SSE2(&p2, &p1, &p0, &q0, &q1, &q2, &mask, hev_thresh); + + Store16x4_SSE2(&p3, &p2, &p1, &p0, b, b + 8 * stride, stride); + Store16x4_SSE2(&q0, &q1, &q2, &q3, p, p + 8 * stride, stride); +} + +// on three inner edges +static void VFilter16i_SSE2(uint8_t* p, int stride, + int thresh, int ithresh, int hev_thresh) { + int k; + __m128i p3, p2, p1, p0; // loop invariants + + LOAD_H_EDGES4(p, stride, p3, p2, p1, p0); // prologue + + for (k = 3; k > 0; --k) { + __m128i mask, tmp1, tmp2; + uint8_t* const b = p + 2 * stride; // beginning of p1 + p += 4 * stride; + + MAX_DIFF1(p3, p2, p1, p0, mask); // compute partial mask + LOAD_H_EDGES4(p, stride, p3, p2, tmp1, tmp2); + MAX_DIFF2(p3, p2, tmp1, tmp2, mask); + + // p3 and p2 are not just temporary variables here: they will be + // re-used for next span. And q2/q3 will become p1/p0 accordingly. + ComplexMask_SSE2(&p1, &p0, &p3, &p2, thresh, ithresh, &mask); + DoFilter4_SSE2(&p1, &p0, &p3, &p2, &mask, hev_thresh); + + // Store + _mm_storeu_si128((__m128i*)&b[0 * stride], p1); + _mm_storeu_si128((__m128i*)&b[1 * stride], p0); + _mm_storeu_si128((__m128i*)&b[2 * stride], p3); + _mm_storeu_si128((__m128i*)&b[3 * stride], p2); + + // rotate samples + p1 = tmp1; + p0 = tmp2; + } +} + +static void HFilter16i_SSE2(uint8_t* p, int stride, + int thresh, int ithresh, int hev_thresh) { + int k; + __m128i p3, p2, p1, p0; // loop invariants + + Load16x4_SSE2(p, p + 8 * stride, stride, &p3, &p2, &p1, &p0); // prologue + + for (k = 3; k > 0; --k) { + __m128i mask, tmp1, tmp2; + uint8_t* const b = p + 2; // beginning of p1 + + p += 4; // beginning of q0 (and next span) + + MAX_DIFF1(p3, p2, p1, p0, mask); // compute partial mask + Load16x4_SSE2(p, p + 8 * stride, stride, &p3, &p2, &tmp1, &tmp2); + MAX_DIFF2(p3, p2, tmp1, tmp2, mask); + + ComplexMask_SSE2(&p1, &p0, &p3, &p2, thresh, ithresh, &mask); + DoFilter4_SSE2(&p1, &p0, &p3, &p2, &mask, hev_thresh); + + Store16x4_SSE2(&p1, &p0, &p3, &p2, b, b + 8 * stride, stride); + + // rotate samples + p1 = tmp1; + p0 = tmp2; + } +} + +// 8-pixels wide variant, for chroma filtering +static void VFilter8_SSE2(uint8_t* u, uint8_t* v, int stride, + int thresh, int ithresh, int hev_thresh) { + __m128i mask; + __m128i t1, p2, p1, p0, q0, q1, q2; + + // Load p3, p2, p1, p0 + LOADUV_H_EDGES4(u - 4 * stride, v - 4 * stride, stride, t1, p2, p1, p0); + MAX_DIFF1(t1, p2, p1, p0, mask); + + // Load q0, q1, q2, q3 + LOADUV_H_EDGES4(u, v, stride, q0, q1, q2, t1); + MAX_DIFF2(t1, q2, q1, q0, mask); + + ComplexMask_SSE2(&p1, &p0, &q0, &q1, thresh, ithresh, &mask); + DoFilter6_SSE2(&p2, &p1, &p0, &q0, &q1, &q2, &mask, hev_thresh); + + // Store + STOREUV(p2, u, v, -3 * stride); + STOREUV(p1, u, v, -2 * stride); + STOREUV(p0, u, v, -1 * stride); + STOREUV(q0, u, v, 0 * stride); + STOREUV(q1, u, v, 1 * stride); + STOREUV(q2, u, v, 2 * stride); +} + +static void HFilter8_SSE2(uint8_t* u, uint8_t* v, int stride, + int thresh, int ithresh, int hev_thresh) { + __m128i mask; + __m128i p3, p2, p1, p0, q0, q1, q2, q3; + + uint8_t* const tu = u - 4; + uint8_t* const tv = v - 4; + Load16x4_SSE2(tu, tv, stride, &p3, &p2, &p1, &p0); + MAX_DIFF1(p3, p2, p1, p0, mask); + + Load16x4_SSE2(u, v, stride, &q0, &q1, &q2, &q3); + MAX_DIFF2(q3, q2, q1, q0, mask); + + ComplexMask_SSE2(&p1, &p0, &q0, &q1, thresh, ithresh, &mask); + DoFilter6_SSE2(&p2, &p1, &p0, &q0, &q1, &q2, &mask, hev_thresh); + + Store16x4_SSE2(&p3, &p2, &p1, &p0, tu, tv, stride); + Store16x4_SSE2(&q0, &q1, &q2, &q3, u, v, stride); +} + +static void VFilter8i_SSE2(uint8_t* u, uint8_t* v, int stride, + int thresh, int ithresh, int hev_thresh) { + __m128i mask; + __m128i t1, t2, p1, p0, q0, q1; + + // Load p3, p2, p1, p0 + LOADUV_H_EDGES4(u, v, stride, t2, t1, p1, p0); + MAX_DIFF1(t2, t1, p1, p0, mask); + + u += 4 * stride; + v += 4 * stride; + + // Load q0, q1, q2, q3 + LOADUV_H_EDGES4(u, v, stride, q0, q1, t1, t2); + MAX_DIFF2(t2, t1, q1, q0, mask); + + ComplexMask_SSE2(&p1, &p0, &q0, &q1, thresh, ithresh, &mask); + DoFilter4_SSE2(&p1, &p0, &q0, &q1, &mask, hev_thresh); + + // Store + STOREUV(p1, u, v, -2 * stride); + STOREUV(p0, u, v, -1 * stride); + STOREUV(q0, u, v, 0 * stride); + STOREUV(q1, u, v, 1 * stride); +} + +static void HFilter8i_SSE2(uint8_t* u, uint8_t* v, int stride, + int thresh, int ithresh, int hev_thresh) { + __m128i mask; + __m128i t1, t2, p1, p0, q0, q1; + Load16x4_SSE2(u, v, stride, &t2, &t1, &p1, &p0); // p3, p2, p1, p0 + MAX_DIFF1(t2, t1, p1, p0, mask); + + u += 4; // beginning of q0 + v += 4; + Load16x4_SSE2(u, v, stride, &q0, &q1, &t1, &t2); // q0, q1, q2, q3 + MAX_DIFF2(t2, t1, q1, q0, mask); + + ComplexMask_SSE2(&p1, &p0, &q0, &q1, thresh, ithresh, &mask); + DoFilter4_SSE2(&p1, &p0, &q0, &q1, &mask, hev_thresh); + + u -= 2; // beginning of p1 + v -= 2; + Store16x4_SSE2(&p1, &p0, &q0, &q1, u, v, stride); +} + +//------------------------------------------------------------------------------ +// 4x4 predictions + +#define DST(x, y) dst[(x) + (y) * BPS] +#define AVG3(a, b, c) (((a) + 2 * (b) + (c) + 2) >> 2) + +// We use the following 8b-arithmetic tricks: +// (a + 2 * b + c + 2) >> 2 = (AC + b + 1) >> 1 +// where: AC = (a + c) >> 1 = [(a + c + 1) >> 1] - [(a^c) & 1] +// and: +// (a + 2 * b + c + 2) >> 2 = (AB + BC + 1) >> 1 - (ab|bc)&lsb +// where: AC = (a + b + 1) >> 1, BC = (b + c + 1) >> 1 +// and ab = a ^ b, bc = b ^ c, lsb = (AC^BC)&1 + +static void VE4_SSE2(uint8_t* dst) { // vertical + const __m128i one = _mm_set1_epi8(1); + const __m128i ABCDEFGH = _mm_loadl_epi64((__m128i*)(dst - BPS - 1)); + const __m128i BCDEFGH0 = _mm_srli_si128(ABCDEFGH, 1); + const __m128i CDEFGH00 = _mm_srli_si128(ABCDEFGH, 2); + const __m128i a = _mm_avg_epu8(ABCDEFGH, CDEFGH00); + const __m128i lsb = _mm_and_si128(_mm_xor_si128(ABCDEFGH, CDEFGH00), one); + const __m128i b = _mm_subs_epu8(a, lsb); + const __m128i avg = _mm_avg_epu8(b, BCDEFGH0); + const uint32_t vals = _mm_cvtsi128_si32(avg); + int i; + for (i = 0; i < 4; ++i) { + WebPUint32ToMem(dst + i * BPS, vals); + } +} + +static void LD4_SSE2(uint8_t* dst) { // Down-Left + const __m128i one = _mm_set1_epi8(1); + const __m128i ABCDEFGH = _mm_loadl_epi64((__m128i*)(dst - BPS)); + const __m128i BCDEFGH0 = _mm_srli_si128(ABCDEFGH, 1); + const __m128i CDEFGH00 = _mm_srli_si128(ABCDEFGH, 2); + const __m128i CDEFGHH0 = _mm_insert_epi16(CDEFGH00, dst[-BPS + 7], 3); + const __m128i avg1 = _mm_avg_epu8(ABCDEFGH, CDEFGHH0); + const __m128i lsb = _mm_and_si128(_mm_xor_si128(ABCDEFGH, CDEFGHH0), one); + const __m128i avg2 = _mm_subs_epu8(avg1, lsb); + const __m128i abcdefg = _mm_avg_epu8(avg2, BCDEFGH0); + WebPUint32ToMem(dst + 0 * BPS, _mm_cvtsi128_si32( abcdefg )); + WebPUint32ToMem(dst + 1 * BPS, _mm_cvtsi128_si32(_mm_srli_si128(abcdefg, 1))); + WebPUint32ToMem(dst + 2 * BPS, _mm_cvtsi128_si32(_mm_srli_si128(abcdefg, 2))); + WebPUint32ToMem(dst + 3 * BPS, _mm_cvtsi128_si32(_mm_srli_si128(abcdefg, 3))); +} + +static void VR4_SSE2(uint8_t* dst) { // Vertical-Right + const __m128i one = _mm_set1_epi8(1); + const int I = dst[-1 + 0 * BPS]; + const int J = dst[-1 + 1 * BPS]; + const int K = dst[-1 + 2 * BPS]; + const int X = dst[-1 - BPS]; + const __m128i XABCD = _mm_loadl_epi64((__m128i*)(dst - BPS - 1)); + const __m128i ABCD0 = _mm_srli_si128(XABCD, 1); + const __m128i abcd = _mm_avg_epu8(XABCD, ABCD0); + const __m128i _XABCD = _mm_slli_si128(XABCD, 1); + const __m128i IXABCD = _mm_insert_epi16(_XABCD, (short)(I | (X << 8)), 0); + const __m128i avg1 = _mm_avg_epu8(IXABCD, ABCD0); + const __m128i lsb = _mm_and_si128(_mm_xor_si128(IXABCD, ABCD0), one); + const __m128i avg2 = _mm_subs_epu8(avg1, lsb); + const __m128i efgh = _mm_avg_epu8(avg2, XABCD); + WebPUint32ToMem(dst + 0 * BPS, _mm_cvtsi128_si32( abcd )); + WebPUint32ToMem(dst + 1 * BPS, _mm_cvtsi128_si32( efgh )); + WebPUint32ToMem(dst + 2 * BPS, _mm_cvtsi128_si32(_mm_slli_si128(abcd, 1))); + WebPUint32ToMem(dst + 3 * BPS, _mm_cvtsi128_si32(_mm_slli_si128(efgh, 1))); + + // these two are hard to implement in SSE2, so we keep the C-version: + DST(0, 2) = AVG3(J, I, X); + DST(0, 3) = AVG3(K, J, I); +} + +static void VL4_SSE2(uint8_t* dst) { // Vertical-Left + const __m128i one = _mm_set1_epi8(1); + const __m128i ABCDEFGH = _mm_loadl_epi64((__m128i*)(dst - BPS)); + const __m128i BCDEFGH_ = _mm_srli_si128(ABCDEFGH, 1); + const __m128i CDEFGH__ = _mm_srli_si128(ABCDEFGH, 2); + const __m128i avg1 = _mm_avg_epu8(ABCDEFGH, BCDEFGH_); + const __m128i avg2 = _mm_avg_epu8(CDEFGH__, BCDEFGH_); + const __m128i avg3 = _mm_avg_epu8(avg1, avg2); + const __m128i lsb1 = _mm_and_si128(_mm_xor_si128(avg1, avg2), one); + const __m128i ab = _mm_xor_si128(ABCDEFGH, BCDEFGH_); + const __m128i bc = _mm_xor_si128(CDEFGH__, BCDEFGH_); + const __m128i abbc = _mm_or_si128(ab, bc); + const __m128i lsb2 = _mm_and_si128(abbc, lsb1); + const __m128i avg4 = _mm_subs_epu8(avg3, lsb2); + const uint32_t extra_out = _mm_cvtsi128_si32(_mm_srli_si128(avg4, 4)); + WebPUint32ToMem(dst + 0 * BPS, _mm_cvtsi128_si32( avg1 )); + WebPUint32ToMem(dst + 1 * BPS, _mm_cvtsi128_si32( avg4 )); + WebPUint32ToMem(dst + 2 * BPS, _mm_cvtsi128_si32(_mm_srli_si128(avg1, 1))); + WebPUint32ToMem(dst + 3 * BPS, _mm_cvtsi128_si32(_mm_srli_si128(avg4, 1))); + + // these two are hard to get and irregular + DST(3, 2) = (extra_out >> 0) & 0xff; + DST(3, 3) = (extra_out >> 8) & 0xff; +} + +static void RD4_SSE2(uint8_t* dst) { // Down-right + const __m128i one = _mm_set1_epi8(1); + const __m128i XABCD = _mm_loadl_epi64((__m128i*)(dst - BPS - 1)); + const __m128i ____XABCD = _mm_slli_si128(XABCD, 4); + const uint32_t I = dst[-1 + 0 * BPS]; + const uint32_t J = dst[-1 + 1 * BPS]; + const uint32_t K = dst[-1 + 2 * BPS]; + const uint32_t L = dst[-1 + 3 * BPS]; + const __m128i LKJI_____ = + _mm_cvtsi32_si128(L | (K << 8) | (J << 16) | (I << 24)); + const __m128i LKJIXABCD = _mm_or_si128(LKJI_____, ____XABCD); + const __m128i KJIXABCD_ = _mm_srli_si128(LKJIXABCD, 1); + const __m128i JIXABCD__ = _mm_srli_si128(LKJIXABCD, 2); + const __m128i avg1 = _mm_avg_epu8(JIXABCD__, LKJIXABCD); + const __m128i lsb = _mm_and_si128(_mm_xor_si128(JIXABCD__, LKJIXABCD), one); + const __m128i avg2 = _mm_subs_epu8(avg1, lsb); + const __m128i abcdefg = _mm_avg_epu8(avg2, KJIXABCD_); + WebPUint32ToMem(dst + 3 * BPS, _mm_cvtsi128_si32( abcdefg )); + WebPUint32ToMem(dst + 2 * BPS, _mm_cvtsi128_si32(_mm_srli_si128(abcdefg, 1))); + WebPUint32ToMem(dst + 1 * BPS, _mm_cvtsi128_si32(_mm_srli_si128(abcdefg, 2))); + WebPUint32ToMem(dst + 0 * BPS, _mm_cvtsi128_si32(_mm_srli_si128(abcdefg, 3))); +} + +#undef DST +#undef AVG3 + +//------------------------------------------------------------------------------ +// Luma 16x16 + +static WEBP_INLINE void TrueMotion_SSE2(uint8_t* dst, int size) { + const uint8_t* top = dst - BPS; + const __m128i zero = _mm_setzero_si128(); + int y; + if (size == 4) { + const __m128i top_values = _mm_cvtsi32_si128(WebPMemToUint32(top)); + const __m128i top_base = _mm_unpacklo_epi8(top_values, zero); + for (y = 0; y < 4; ++y, dst += BPS) { + const int val = dst[-1] - top[-1]; + const __m128i base = _mm_set1_epi16(val); + const __m128i out = _mm_packus_epi16(_mm_add_epi16(base, top_base), zero); + WebPUint32ToMem(dst, _mm_cvtsi128_si32(out)); + } + } else if (size == 8) { + const __m128i top_values = _mm_loadl_epi64((const __m128i*)top); + const __m128i top_base = _mm_unpacklo_epi8(top_values, zero); + for (y = 0; y < 8; ++y, dst += BPS) { + const int val = dst[-1] - top[-1]; + const __m128i base = _mm_set1_epi16(val); + const __m128i out = _mm_packus_epi16(_mm_add_epi16(base, top_base), zero); + _mm_storel_epi64((__m128i*)dst, out); + } + } else { + const __m128i top_values = _mm_loadu_si128((const __m128i*)top); + const __m128i top_base_0 = _mm_unpacklo_epi8(top_values, zero); + const __m128i top_base_1 = _mm_unpackhi_epi8(top_values, zero); + for (y = 0; y < 16; ++y, dst += BPS) { + const int val = dst[-1] - top[-1]; + const __m128i base = _mm_set1_epi16(val); + const __m128i out_0 = _mm_add_epi16(base, top_base_0); + const __m128i out_1 = _mm_add_epi16(base, top_base_1); + const __m128i out = _mm_packus_epi16(out_0, out_1); + _mm_storeu_si128((__m128i*)dst, out); + } + } +} + +static void TM4_SSE2(uint8_t* dst) { TrueMotion_SSE2(dst, 4); } +static void TM8uv_SSE2(uint8_t* dst) { TrueMotion_SSE2(dst, 8); } +static void TM16_SSE2(uint8_t* dst) { TrueMotion_SSE2(dst, 16); } + +static void VE16_SSE2(uint8_t* dst) { + const __m128i top = _mm_loadu_si128((const __m128i*)(dst - BPS)); + int j; + for (j = 0; j < 16; ++j) { + _mm_storeu_si128((__m128i*)(dst + j * BPS), top); + } +} + +static void HE16_SSE2(uint8_t* dst) { // horizontal + int j; + for (j = 16; j > 0; --j) { + const __m128i values = _mm_set1_epi8(dst[-1]); + _mm_storeu_si128((__m128i*)dst, values); + dst += BPS; + } +} + +static WEBP_INLINE void Put16_SSE2(uint8_t v, uint8_t* dst) { + int j; + const __m128i values = _mm_set1_epi8(v); + for (j = 0; j < 16; ++j) { + _mm_storeu_si128((__m128i*)(dst + j * BPS), values); + } +} + +static void DC16_SSE2(uint8_t* dst) { // DC + const __m128i zero = _mm_setzero_si128(); + const __m128i top = _mm_loadu_si128((const __m128i*)(dst - BPS)); + const __m128i sad8x2 = _mm_sad_epu8(top, zero); + // sum the two sads: sad8x2[0:1] + sad8x2[8:9] + const __m128i sum = _mm_add_epi16(sad8x2, _mm_shuffle_epi32(sad8x2, 2)); + int left = 0; + int j; + for (j = 0; j < 16; ++j) { + left += dst[-1 + j * BPS]; + } + { + const int DC = _mm_cvtsi128_si32(sum) + left + 16; + Put16_SSE2(DC >> 5, dst); + } +} + +static void DC16NoTop_SSE2(uint8_t* dst) { // DC with top samples unavailable + int DC = 8; + int j; + for (j = 0; j < 16; ++j) { + DC += dst[-1 + j * BPS]; + } + Put16_SSE2(DC >> 4, dst); +} + +static void DC16NoLeft_SSE2(uint8_t* dst) { // DC with left samples unavailable + const __m128i zero = _mm_setzero_si128(); + const __m128i top = _mm_loadu_si128((const __m128i*)(dst - BPS)); + const __m128i sad8x2 = _mm_sad_epu8(top, zero); + // sum the two sads: sad8x2[0:1] + sad8x2[8:9] + const __m128i sum = _mm_add_epi16(sad8x2, _mm_shuffle_epi32(sad8x2, 2)); + const int DC = _mm_cvtsi128_si32(sum) + 8; + Put16_SSE2(DC >> 4, dst); +} + +static void DC16NoTopLeft_SSE2(uint8_t* dst) { // DC with no top & left samples + Put16_SSE2(0x80, dst); +} + +//------------------------------------------------------------------------------ +// Chroma + +static void VE8uv_SSE2(uint8_t* dst) { // vertical + int j; + const __m128i top = _mm_loadl_epi64((const __m128i*)(dst - BPS)); + for (j = 0; j < 8; ++j) { + _mm_storel_epi64((__m128i*)(dst + j * BPS), top); + } +} + +// helper for chroma-DC predictions +static WEBP_INLINE void Put8x8uv_SSE2(uint8_t v, uint8_t* dst) { + int j; + const __m128i values = _mm_set1_epi8(v); + for (j = 0; j < 8; ++j) { + _mm_storel_epi64((__m128i*)(dst + j * BPS), values); + } +} + +static void DC8uv_SSE2(uint8_t* dst) { // DC + const __m128i zero = _mm_setzero_si128(); + const __m128i top = _mm_loadl_epi64((const __m128i*)(dst - BPS)); + const __m128i sum = _mm_sad_epu8(top, zero); + int left = 0; + int j; + for (j = 0; j < 8; ++j) { + left += dst[-1 + j * BPS]; + } + { + const int DC = _mm_cvtsi128_si32(sum) + left + 8; + Put8x8uv_SSE2(DC >> 4, dst); + } +} + +static void DC8uvNoLeft_SSE2(uint8_t* dst) { // DC with no left samples + const __m128i zero = _mm_setzero_si128(); + const __m128i top = _mm_loadl_epi64((const __m128i*)(dst - BPS)); + const __m128i sum = _mm_sad_epu8(top, zero); + const int DC = _mm_cvtsi128_si32(sum) + 4; + Put8x8uv_SSE2(DC >> 3, dst); +} + +static void DC8uvNoTop_SSE2(uint8_t* dst) { // DC with no top samples + int dc0 = 4; + int i; + for (i = 0; i < 8; ++i) { + dc0 += dst[-1 + i * BPS]; + } + Put8x8uv_SSE2(dc0 >> 3, dst); +} + +static void DC8uvNoTopLeft_SSE2(uint8_t* dst) { // DC with nothing + Put8x8uv_SSE2(0x80, dst); +} + +//------------------------------------------------------------------------------ +// Entry point + +extern void VP8DspInitSSE2(void); + +WEBP_TSAN_IGNORE_FUNCTION void VP8DspInitSSE2(void) { + VP8Transform = Transform_SSE2; +#if (USE_TRANSFORM_AC3 == 1) + VP8TransformAC3 = TransformAC3_SSE2; +#endif + + VP8VFilter16 = VFilter16_SSE2; + VP8HFilter16 = HFilter16_SSE2; + VP8VFilter8 = VFilter8_SSE2; + VP8HFilter8 = HFilter8_SSE2; + VP8VFilter16i = VFilter16i_SSE2; + VP8HFilter16i = HFilter16i_SSE2; + VP8VFilter8i = VFilter8i_SSE2; + VP8HFilter8i = HFilter8i_SSE2; + + VP8SimpleVFilter16 = SimpleVFilter16_SSE2; + VP8SimpleHFilter16 = SimpleHFilter16_SSE2; + VP8SimpleVFilter16i = SimpleVFilter16i_SSE2; + VP8SimpleHFilter16i = SimpleHFilter16i_SSE2; + + VP8PredLuma4[1] = TM4_SSE2; + VP8PredLuma4[2] = VE4_SSE2; + VP8PredLuma4[4] = RD4_SSE2; + VP8PredLuma4[5] = VR4_SSE2; + VP8PredLuma4[6] = LD4_SSE2; + VP8PredLuma4[7] = VL4_SSE2; + + VP8PredLuma16[0] = DC16_SSE2; + VP8PredLuma16[1] = TM16_SSE2; + VP8PredLuma16[2] = VE16_SSE2; + VP8PredLuma16[3] = HE16_SSE2; + VP8PredLuma16[4] = DC16NoTop_SSE2; + VP8PredLuma16[5] = DC16NoLeft_SSE2; + VP8PredLuma16[6] = DC16NoTopLeft_SSE2; + + VP8PredChroma8[0] = DC8uv_SSE2; + VP8PredChroma8[1] = TM8uv_SSE2; + VP8PredChroma8[2] = VE8uv_SSE2; + VP8PredChroma8[4] = DC8uvNoTop_SSE2; + VP8PredChroma8[5] = DC8uvNoLeft_SSE2; + VP8PredChroma8[6] = DC8uvNoTopLeft_SSE2; +} + +#else // !WEBP_USE_SSE2 + +WEBP_DSP_INIT_STUB(VP8DspInitSSE2) + +#endif // WEBP_USE_SSE2 diff --git a/vendor/github.com/sizeofint/webpanimation/dsp_dec_sse41.c b/vendor/github.com/sizeofint/webpanimation/dsp_dec_sse41.c new file mode 100644 index 00000000..84e4b096 --- /dev/null +++ b/vendor/github.com/sizeofint/webpanimation/dsp_dec_sse41.c @@ -0,0 +1,46 @@ +// Copyright 2015 Google Inc. All Rights Reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the COPYING file in the root of the source +// tree. An additional intellectual property rights grant can be found +// in the file PATENTS. All contributing project authors may +// be found in the AUTHORS file in the root of the source tree. +// ----------------------------------------------------------------------------- +// +// SSE4 version of some decoding functions. +// +// Author: Skal (pascal.massimino@gmail.com) + +#include "dsp_dsp.h" + +#if defined(WEBP_USE_SSE41) + +#include +#include "dec_vp8i_dec.h" +#include "utils_utils.h" + +static void HE16_SSE41(uint8_t* dst) { // horizontal + int j; + const __m128i kShuffle3 = _mm_set1_epi8(3); + for (j = 16; j > 0; --j) { + const __m128i in = _mm_cvtsi32_si128(WebPMemToUint32(dst - 4)); + const __m128i values = _mm_shuffle_epi8(in, kShuffle3); + _mm_storeu_si128((__m128i*)dst, values); + dst += BPS; + } +} + +//------------------------------------------------------------------------------ +// Entry point + +extern void VP8DspInitSSE41(void); + +WEBP_TSAN_IGNORE_FUNCTION void VP8DspInitSSE41(void) { + VP8PredLuma16[3] = HE16_SSE41; +} + +#else // !WEBP_USE_SSE41 + +WEBP_DSP_INIT_STUB(VP8DspInitSSE41) + +#endif // WEBP_USE_SSE41 diff --git a/vendor/github.com/sizeofint/webpanimation/dsp_dsp.h b/vendor/github.com/sizeofint/webpanimation/dsp_dsp.h new file mode 100644 index 00000000..33ec5971 --- /dev/null +++ b/vendor/github.com/sizeofint/webpanimation/dsp_dsp.h @@ -0,0 +1,686 @@ +// Copyright 2011 Google Inc. All Rights Reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the COPYING file in the root of the source +// tree. An additional intellectual property rights grant can be found +// in the file PATENTS. All contributing project authors may +// be found in the AUTHORS file in the root of the source tree. +// ----------------------------------------------------------------------------- +// +// Speed-critical functions. +// +// Author: Skal (pascal.massimino@gmail.com) + +#ifndef WEBP_DSP_DSP_H_ +#define WEBP_DSP_DSP_H_ + +#ifdef HAVE_CONFIG_H +#include "config.h" +#endif + +#include "webp_types.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#define BPS 32 // this is the common stride for enc/dec + +//------------------------------------------------------------------------------ +// CPU detection + +#if defined(__GNUC__) +# define LOCAL_GCC_VERSION ((__GNUC__ << 8) | __GNUC_MINOR__) +# define LOCAL_GCC_PREREQ(maj, min) \ + (LOCAL_GCC_VERSION >= (((maj) << 8) | (min))) +#else +# define LOCAL_GCC_VERSION 0 +# define LOCAL_GCC_PREREQ(maj, min) 0 +#endif + +#if defined(__clang__) +# define LOCAL_CLANG_VERSION ((__clang_major__ << 8) | __clang_minor__) +# define LOCAL_CLANG_PREREQ(maj, min) \ + (LOCAL_CLANG_VERSION >= (((maj) << 8) | (min))) +#else +# define LOCAL_CLANG_VERSION 0 +# define LOCAL_CLANG_PREREQ(maj, min) 0 +#endif + +#ifndef __has_builtin +# define __has_builtin(x) 0 +#endif + +#if !defined(HAVE_CONFIG_H) +#if defined(_MSC_VER) && _MSC_VER > 1310 && \ + (defined(_M_X64) || defined(_M_IX86)) +#define WEBP_MSC_SSE2 // Visual C++ SSE2 targets +#endif + +#if defined(_MSC_VER) && _MSC_VER >= 1500 && \ + (defined(_M_X64) || defined(_M_IX86)) +#define WEBP_MSC_SSE41 // Visual C++ SSE4.1 targets +#endif +#endif + +// WEBP_HAVE_* are used to indicate the presence of the instruction set in dsp +// files without intrinsics, allowing the corresponding Init() to be called. +// Files containing intrinsics will need to be built targeting the instruction +// set so should succeed on one of the earlier tests. +#if defined(__SSE2__) || defined(WEBP_MSC_SSE2) || defined(WEBP_HAVE_SSE2) +#define WEBP_USE_SSE2 +#endif + +#if defined(__SSE4_1__) || defined(WEBP_MSC_SSE41) || defined(WEBP_HAVE_SSE41) +#define WEBP_USE_SSE41 +#endif + +#undef WEBP_MSC_SSE41 +#undef WEBP_MSC_SSE2 + +// The intrinsics currently cause compiler errors with arm-nacl-gcc and the +// inline assembly would need to be modified for use with Native Client. +#if (defined(__ARM_NEON__) || \ + defined(__aarch64__) || defined(WEBP_HAVE_NEON)) && \ + !defined(__native_client__) +#define WEBP_USE_NEON +#endif + +#if !defined(WEBP_USE_NEON) && defined(__ANDROID__) && \ + defined(__ARM_ARCH_7A__) && defined(HAVE_CPU_FEATURES_H) +#define WEBP_ANDROID_NEON // Android targets that may have NEON +#define WEBP_USE_NEON +#endif + +#if defined(_MSC_VER) && _MSC_VER >= 1700 && defined(_M_ARM) +#define WEBP_USE_NEON +#define WEBP_USE_INTRINSICS +#endif + +#if defined(__mips__) && !defined(__mips64) && \ + defined(__mips_isa_rev) && (__mips_isa_rev >= 1) && (__mips_isa_rev < 6) +#define WEBP_USE_MIPS32 +#if (__mips_isa_rev >= 2) +#define WEBP_USE_MIPS32_R2 +#if defined(__mips_dspr2) || (defined(__mips_dsp_rev) && __mips_dsp_rev >= 2) +#define WEBP_USE_MIPS_DSP_R2 +#endif +#endif +#endif + +#if defined(__mips_msa) && defined(__mips_isa_rev) && (__mips_isa_rev >= 5) +#define WEBP_USE_MSA +#endif + +#ifndef WEBP_DSP_OMIT_C_CODE +#define WEBP_DSP_OMIT_C_CODE 1 +#endif + +#if (defined(__aarch64__) || defined(__ARM_NEON__)) && WEBP_DSP_OMIT_C_CODE +#define WEBP_NEON_OMIT_C_CODE 1 +#else +#define WEBP_NEON_OMIT_C_CODE 0 +#endif + +#if !(LOCAL_CLANG_PREREQ(3,8) || LOCAL_GCC_PREREQ(4,8) || defined(__aarch64__)) +#define WEBP_NEON_WORK_AROUND_GCC 1 +#else +#define WEBP_NEON_WORK_AROUND_GCC 0 +#endif + +// This macro prevents thread_sanitizer from reporting known concurrent writes. +#define WEBP_TSAN_IGNORE_FUNCTION +#if defined(__has_feature) +#if __has_feature(thread_sanitizer) +#undef WEBP_TSAN_IGNORE_FUNCTION +#define WEBP_TSAN_IGNORE_FUNCTION __attribute__((no_sanitize_thread)) +#endif +#endif + +#if defined(WEBP_USE_THREAD) && !defined(_WIN32) +#include // NOLINT + +#define WEBP_DSP_INIT(func) do { \ + static volatile VP8CPUInfo func ## _last_cpuinfo_used = \ + (VP8CPUInfo)&func ## _last_cpuinfo_used; \ + static pthread_mutex_t func ## _lock = PTHREAD_MUTEX_INITIALIZER; \ + if (pthread_mutex_lock(&func ## _lock)) break; \ + if (func ## _last_cpuinfo_used != VP8GetCPUInfo) func(); \ + func ## _last_cpuinfo_used = VP8GetCPUInfo; \ + (void)pthread_mutex_unlock(&func ## _lock); \ +} while (0) +#else // !(defined(WEBP_USE_THREAD) && !defined(_WIN32)) +#define WEBP_DSP_INIT(func) do { \ + static volatile VP8CPUInfo func ## _last_cpuinfo_used = \ + (VP8CPUInfo)&func ## _last_cpuinfo_used; \ + if (func ## _last_cpuinfo_used == VP8GetCPUInfo) break; \ + func(); \ + func ## _last_cpuinfo_used = VP8GetCPUInfo; \ +} while (0) +#endif // defined(WEBP_USE_THREAD) && !defined(_WIN32) + +// Defines an Init + helper function that control multiple initialization of +// function pointers / tables. +/* Usage: + WEBP_DSP_INIT_FUNC(InitFunc) { + ...function body + } +*/ +#define WEBP_DSP_INIT_FUNC(name) \ + static WEBP_TSAN_IGNORE_FUNCTION void name ## _body(void); \ + WEBP_TSAN_IGNORE_FUNCTION void name(void) { \ + WEBP_DSP_INIT(name ## _body); \ + } \ + static WEBP_TSAN_IGNORE_FUNCTION void name ## _body(void) + +#define WEBP_UBSAN_IGNORE_UNDEF +#define WEBP_UBSAN_IGNORE_UNSIGNED_OVERFLOW +#if defined(__clang__) && defined(__has_attribute) +#if __has_attribute(no_sanitize) +// This macro prevents the undefined behavior sanitizer from reporting +// failures. This is only meant to silence unaligned loads on platforms that +// are known to support them. +#undef WEBP_UBSAN_IGNORE_UNDEF +#define WEBP_UBSAN_IGNORE_UNDEF \ + __attribute__((no_sanitize("undefined"))) + +// This macro prevents the undefined behavior sanitizer from reporting +// failures related to unsigned integer overflows. This is only meant to +// silence cases where this well defined behavior is expected. +#undef WEBP_UBSAN_IGNORE_UNSIGNED_OVERFLOW +#define WEBP_UBSAN_IGNORE_UNSIGNED_OVERFLOW \ + __attribute__((no_sanitize("unsigned-integer-overflow"))) +#endif +#endif + +// If 'ptr' is NULL, returns NULL. Otherwise returns 'ptr + off'. +// Prevents undefined behavior sanitizer nullptr-with-nonzero-offset warning. +#if !defined(WEBP_OFFSET_PTR) +#define WEBP_OFFSET_PTR(ptr, off) (((ptr) == NULL) ? NULL : ((ptr) + (off))) +#endif + +// Regularize the definition of WEBP_SWAP_16BIT_CSP (backward compatibility) +#if !defined(WEBP_SWAP_16BIT_CSP) +#define WEBP_SWAP_16BIT_CSP 0 +#endif + +// some endian fix (e.g.: mips-gcc doesn't define __BIG_ENDIAN__) +#if !defined(WORDS_BIGENDIAN) && \ + (defined(__BIG_ENDIAN__) || defined(_M_PPC) || \ + (defined(__BYTE_ORDER__) && (__BYTE_ORDER__ == __ORDER_BIG_ENDIAN__))) +#define WORDS_BIGENDIAN +#endif + +typedef enum { + kSSE2, + kSSE3, + kSlowSSSE3, // special feature for slow SSSE3 architectures + kSSE4_1, + kAVX, + kAVX2, + kNEON, + kMIPS32, + kMIPSdspR2, + kMSA +} CPUFeature; +// returns true if the CPU supports the feature. +typedef int (*VP8CPUInfo)(CPUFeature feature); +WEBP_EXTERN VP8CPUInfo VP8GetCPUInfo; + +//------------------------------------------------------------------------------ +// Init stub generator + +// Defines an init function stub to ensure each module exposes a symbol, +// avoiding a compiler warning. +#define WEBP_DSP_INIT_STUB(func) \ + extern void func(void); \ + void func(void) {} + +//------------------------------------------------------------------------------ +// Encoding + +// Transforms +// VP8Idct: Does one of two inverse transforms. If do_two is set, the transforms +// will be done for (ref, in, dst) and (ref + 4, in + 16, dst + 4). +typedef void (*VP8Idct)(const uint8_t* ref, const int16_t* in, uint8_t* dst, + int do_two); +typedef void (*VP8Fdct)(const uint8_t* src, const uint8_t* ref, int16_t* out); +typedef void (*VP8WHT)(const int16_t* in, int16_t* out); +extern VP8Idct VP8ITransform; +extern VP8Fdct VP8FTransform; +extern VP8Fdct VP8FTransform2; // performs two transforms at a time +extern VP8WHT VP8FTransformWHT; +// Predictions +// *dst is the destination block. *top and *left can be NULL. +typedef void (*VP8IntraPreds)(uint8_t* dst, const uint8_t* left, + const uint8_t* top); +typedef void (*VP8Intra4Preds)(uint8_t* dst, const uint8_t* top); +extern VP8Intra4Preds VP8EncPredLuma4; +extern VP8IntraPreds VP8EncPredLuma16; +extern VP8IntraPreds VP8EncPredChroma8; + +typedef int (*VP8Metric)(const uint8_t* pix, const uint8_t* ref); +extern VP8Metric VP8SSE16x16, VP8SSE16x8, VP8SSE8x8, VP8SSE4x4; +typedef int (*VP8WMetric)(const uint8_t* pix, const uint8_t* ref, + const uint16_t* const weights); +// The weights for VP8TDisto4x4 and VP8TDisto16x16 contain a row-major +// 4 by 4 symmetric matrix. +extern VP8WMetric VP8TDisto4x4, VP8TDisto16x16; + +// Compute the average (DC) of four 4x4 blocks. +// Each sub-4x4 block #i sum is stored in dc[i]. +typedef void (*VP8MeanMetric)(const uint8_t* ref, uint32_t dc[4]); +extern VP8MeanMetric VP8Mean16x4; + +typedef void (*VP8BlockCopy)(const uint8_t* src, uint8_t* dst); +extern VP8BlockCopy VP8Copy4x4; +extern VP8BlockCopy VP8Copy16x8; +// Quantization +struct VP8Matrix; // forward declaration +typedef int (*VP8QuantizeBlock)(int16_t in[16], int16_t out[16], + const struct VP8Matrix* const mtx); +// Same as VP8QuantizeBlock, but quantizes two consecutive blocks. +typedef int (*VP8Quantize2Blocks)(int16_t in[32], int16_t out[32], + const struct VP8Matrix* const mtx); + +extern VP8QuantizeBlock VP8EncQuantizeBlock; +extern VP8Quantize2Blocks VP8EncQuantize2Blocks; + +// specific to 2nd transform: +typedef int (*VP8QuantizeBlockWHT)(int16_t in[16], int16_t out[16], + const struct VP8Matrix* const mtx); +extern VP8QuantizeBlockWHT VP8EncQuantizeBlockWHT; + +extern const int VP8DspScan[16 + 4 + 4]; + +// Collect histogram for susceptibility calculation. +#define MAX_COEFF_THRESH 31 // size of histogram used by CollectHistogram. +typedef struct { + // We only need to store max_value and last_non_zero, not the distribution. + int max_value; + int last_non_zero; +} VP8Histogram; +typedef void (*VP8CHisto)(const uint8_t* ref, const uint8_t* pred, + int start_block, int end_block, + VP8Histogram* const histo); +extern VP8CHisto VP8CollectHistogram; +// General-purpose util function to help VP8CollectHistogram(). +void VP8SetHistogramData(const int distribution[MAX_COEFF_THRESH + 1], + VP8Histogram* const histo); + +// must be called before using any of the above +void VP8EncDspInit(void); + +//------------------------------------------------------------------------------ +// cost functions (encoding) + +extern const uint16_t VP8EntropyCost[256]; // 8bit fixed-point log(p) +// approximate cost per level: +extern const uint16_t VP8LevelFixedCosts[2047 /*MAX_LEVEL*/ + 1]; +extern const uint8_t VP8EncBands[16 + 1]; + +struct VP8Residual; +typedef void (*VP8SetResidualCoeffsFunc)(const int16_t* const coeffs, + struct VP8Residual* const res); +extern VP8SetResidualCoeffsFunc VP8SetResidualCoeffs; + +// Cost calculation function. +typedef int (*VP8GetResidualCostFunc)(int ctx0, + const struct VP8Residual* const res); +extern VP8GetResidualCostFunc VP8GetResidualCost; + +// must be called before anything using the above +void VP8EncDspCostInit(void); + +//------------------------------------------------------------------------------ +// SSIM / PSNR utils + +// struct for accumulating statistical moments +typedef struct { + uint32_t w; // sum(w_i) : sum of weights + uint32_t xm, ym; // sum(w_i * x_i), sum(w_i * y_i) + uint32_t xxm, xym, yym; // sum(w_i * x_i * x_i), etc. +} VP8DistoStats; + +// Compute the final SSIM value +// The non-clipped version assumes stats->w = (2 * VP8_SSIM_KERNEL + 1)^2. +double VP8SSIMFromStats(const VP8DistoStats* const stats); +double VP8SSIMFromStatsClipped(const VP8DistoStats* const stats); + +#define VP8_SSIM_KERNEL 3 // total size of the kernel: 2 * VP8_SSIM_KERNEL + 1 +typedef double (*VP8SSIMGetClippedFunc)(const uint8_t* src1, int stride1, + const uint8_t* src2, int stride2, + int xo, int yo, // center position + int W, int H); // plane dimension + +#if !defined(WEBP_REDUCE_SIZE) +// This version is called with the guarantee that you can load 8 bytes and +// 8 rows at offset src1 and src2 +typedef double (*VP8SSIMGetFunc)(const uint8_t* src1, int stride1, + const uint8_t* src2, int stride2); + +extern VP8SSIMGetFunc VP8SSIMGet; // unclipped / unchecked +extern VP8SSIMGetClippedFunc VP8SSIMGetClipped; // with clipping +#endif + +#if !defined(WEBP_DISABLE_STATS) +typedef uint32_t (*VP8AccumulateSSEFunc)(const uint8_t* src1, + const uint8_t* src2, int len); +extern VP8AccumulateSSEFunc VP8AccumulateSSE; +#endif + +// must be called before using any of the above directly +void VP8SSIMDspInit(void); + +//------------------------------------------------------------------------------ +// Decoding + +typedef void (*VP8DecIdct)(const int16_t* coeffs, uint8_t* dst); +// when doing two transforms, coeffs is actually int16_t[2][16]. +typedef void (*VP8DecIdct2)(const int16_t* coeffs, uint8_t* dst, int do_two); +extern VP8DecIdct2 VP8Transform; +extern VP8DecIdct VP8TransformAC3; +extern VP8DecIdct VP8TransformUV; +extern VP8DecIdct VP8TransformDC; +extern VP8DecIdct VP8TransformDCUV; +extern VP8WHT VP8TransformWHT; + +// *dst is the destination block, with stride BPS. Boundary samples are +// assumed accessible when needed. +typedef void (*VP8PredFunc)(uint8_t* dst); +extern VP8PredFunc VP8PredLuma16[/* NUM_B_DC_MODES */]; +extern VP8PredFunc VP8PredChroma8[/* NUM_B_DC_MODES */]; +extern VP8PredFunc VP8PredLuma4[/* NUM_BMODES */]; + +// clipping tables (for filtering) +extern const int8_t* const VP8ksclip1; // clips [-1020, 1020] to [-128, 127] +extern const int8_t* const VP8ksclip2; // clips [-112, 112] to [-16, 15] +extern const uint8_t* const VP8kclip1; // clips [-255,511] to [0,255] +extern const uint8_t* const VP8kabs0; // abs(x) for x in [-255,255] +// must be called first +void VP8InitClipTables(void); + +// simple filter (only for luma) +typedef void (*VP8SimpleFilterFunc)(uint8_t* p, int stride, int thresh); +extern VP8SimpleFilterFunc VP8SimpleVFilter16; +extern VP8SimpleFilterFunc VP8SimpleHFilter16; +extern VP8SimpleFilterFunc VP8SimpleVFilter16i; // filter 3 inner edges +extern VP8SimpleFilterFunc VP8SimpleHFilter16i; + +// regular filter (on both macroblock edges and inner edges) +typedef void (*VP8LumaFilterFunc)(uint8_t* luma, int stride, + int thresh, int ithresh, int hev_t); +typedef void (*VP8ChromaFilterFunc)(uint8_t* u, uint8_t* v, int stride, + int thresh, int ithresh, int hev_t); +// on outer edge +extern VP8LumaFilterFunc VP8VFilter16; +extern VP8LumaFilterFunc VP8HFilter16; +extern VP8ChromaFilterFunc VP8VFilter8; +extern VP8ChromaFilterFunc VP8HFilter8; + +// on inner edge +extern VP8LumaFilterFunc VP8VFilter16i; // filtering 3 inner edges altogether +extern VP8LumaFilterFunc VP8HFilter16i; +extern VP8ChromaFilterFunc VP8VFilter8i; // filtering u and v altogether +extern VP8ChromaFilterFunc VP8HFilter8i; + +// Dithering. Combines dithering values (centered around 128) with dst[], +// according to: dst[] = clip(dst[] + (((dither[]-128) + 8) >> 4) +#define VP8_DITHER_DESCALE 4 +#define VP8_DITHER_DESCALE_ROUNDER (1 << (VP8_DITHER_DESCALE - 1)) +#define VP8_DITHER_AMP_BITS 7 +#define VP8_DITHER_AMP_CENTER (1 << VP8_DITHER_AMP_BITS) +extern void (*VP8DitherCombine8x8)(const uint8_t* dither, uint8_t* dst, + int dst_stride); + +// must be called before anything using the above +void VP8DspInit(void); + +//------------------------------------------------------------------------------ +// WebP I/O + +#define FANCY_UPSAMPLING // undefined to remove fancy upsampling support + +// Convert a pair of y/u/v lines together to the output rgb/a colorspace. +// bottom_y can be NULL if only one line of output is needed (at top/bottom). +typedef void (*WebPUpsampleLinePairFunc)( + const uint8_t* top_y, const uint8_t* bottom_y, + const uint8_t* top_u, const uint8_t* top_v, + const uint8_t* cur_u, const uint8_t* cur_v, + uint8_t* top_dst, uint8_t* bottom_dst, int len); + +#ifdef FANCY_UPSAMPLING + +// Fancy upsampling functions to convert YUV to RGB(A) modes +extern WebPUpsampleLinePairFunc WebPUpsamplers[/* MODE_LAST */]; + +#endif // FANCY_UPSAMPLING + +// Per-row point-sampling methods. +typedef void (*WebPSamplerRowFunc)(const uint8_t* y, + const uint8_t* u, const uint8_t* v, + uint8_t* dst, int len); +// Generic function to apply 'WebPSamplerRowFunc' to the whole plane: +void WebPSamplerProcessPlane(const uint8_t* y, int y_stride, + const uint8_t* u, const uint8_t* v, int uv_stride, + uint8_t* dst, int dst_stride, + int width, int height, WebPSamplerRowFunc func); + +// Sampling functions to convert rows of YUV to RGB(A) +extern WebPSamplerRowFunc WebPSamplers[/* MODE_LAST */]; + +// General function for converting two lines of ARGB or RGBA. +// 'alpha_is_last' should be true if 0xff000000 is stored in memory as +// as 0x00, 0x00, 0x00, 0xff (little endian). +WebPUpsampleLinePairFunc WebPGetLinePairConverter(int alpha_is_last); + +// YUV444->RGB converters +typedef void (*WebPYUV444Converter)(const uint8_t* y, + const uint8_t* u, const uint8_t* v, + uint8_t* dst, int len); + +extern WebPYUV444Converter WebPYUV444Converters[/* MODE_LAST */]; + +// Must be called before using the WebPUpsamplers[] (and for premultiplied +// colorspaces like rgbA, rgbA4444, etc) +void WebPInitUpsamplers(void); +// Must be called before using WebPSamplers[] +void WebPInitSamplers(void); +// Must be called before using WebPYUV444Converters[] +void WebPInitYUV444Converters(void); + +//------------------------------------------------------------------------------ +// ARGB -> YUV converters + +// Convert ARGB samples to luma Y. +extern void (*WebPConvertARGBToY)(const uint32_t* argb, uint8_t* y, int width); +// Convert ARGB samples to U/V with downsampling. do_store should be '1' for +// even lines and '0' for odd ones. 'src_width' is the original width, not +// the U/V one. +extern void (*WebPConvertARGBToUV)(const uint32_t* argb, uint8_t* u, uint8_t* v, + int src_width, int do_store); + +// Convert a row of accumulated (four-values) of rgba32 toward U/V +extern void (*WebPConvertRGBA32ToUV)(const uint16_t* rgb, + uint8_t* u, uint8_t* v, int width); + +// Convert RGB or BGR to Y +extern void (*WebPConvertRGB24ToY)(const uint8_t* rgb, uint8_t* y, int width); +extern void (*WebPConvertBGR24ToY)(const uint8_t* bgr, uint8_t* y, int width); + +// used for plain-C fallback. +extern void WebPConvertARGBToUV_C(const uint32_t* argb, uint8_t* u, uint8_t* v, + int src_width, int do_store); +extern void WebPConvertRGBA32ToUV_C(const uint16_t* rgb, + uint8_t* u, uint8_t* v, int width); + +// utilities for accurate RGB->YUV conversion +extern uint64_t (*WebPSharpYUVUpdateY)(const uint16_t* src, const uint16_t* ref, + uint16_t* dst, int len); +extern void (*WebPSharpYUVUpdateRGB)(const int16_t* src, const int16_t* ref, + int16_t* dst, int len); +extern void (*WebPSharpYUVFilterRow)(const int16_t* A, const int16_t* B, + int len, + const uint16_t* best_y, uint16_t* out); + +// Must be called before using the above. +void WebPInitConvertARGBToYUV(void); + +//------------------------------------------------------------------------------ +// Rescaler + +struct WebPRescaler; + +// Import a row of data and save its contribution in the rescaler. +// 'channel' denotes the channel number to be imported. 'Expand' corresponds to +// the wrk->x_expand case. Otherwise, 'Shrink' is to be used. +typedef void (*WebPRescalerImportRowFunc)(struct WebPRescaler* const wrk, + const uint8_t* src); + +extern WebPRescalerImportRowFunc WebPRescalerImportRowExpand; +extern WebPRescalerImportRowFunc WebPRescalerImportRowShrink; + +// Export one row (starting at x_out position) from rescaler. +// 'Expand' corresponds to the wrk->y_expand case. +// Otherwise 'Shrink' is to be used +typedef void (*WebPRescalerExportRowFunc)(struct WebPRescaler* const wrk); +extern WebPRescalerExportRowFunc WebPRescalerExportRowExpand; +extern WebPRescalerExportRowFunc WebPRescalerExportRowShrink; + +// Plain-C implementation, as fall-back. +extern void WebPRescalerImportRowExpand_C(struct WebPRescaler* const wrk, + const uint8_t* src); +extern void WebPRescalerImportRowShrink_C(struct WebPRescaler* const wrk, + const uint8_t* src); +extern void WebPRescalerExportRowExpand_C(struct WebPRescaler* const wrk); +extern void WebPRescalerExportRowShrink_C(struct WebPRescaler* const wrk); + +// Main entry calls: +extern void WebPRescalerImportRow(struct WebPRescaler* const wrk, + const uint8_t* src); +// Export one row (starting at x_out position) from rescaler. +extern void WebPRescalerExportRow(struct WebPRescaler* const wrk); + +// Must be called first before using the above. +void WebPRescalerDspInit(void); + +//------------------------------------------------------------------------------ +// Utilities for processing transparent channel. + +// Apply alpha pre-multiply on an rgba, bgra or argb plane of size w * h. +// alpha_first should be 0 for argb, 1 for rgba or bgra (where alpha is last). +extern void (*WebPApplyAlphaMultiply)( + uint8_t* rgba, int alpha_first, int w, int h, int stride); + +// Same, buf specifically for RGBA4444 format +extern void (*WebPApplyAlphaMultiply4444)( + uint8_t* rgba4444, int w, int h, int stride); + +// Dispatch the values from alpha[] plane to the ARGB destination 'dst'. +// Returns true if alpha[] plane has non-trivial values different from 0xff. +extern int (*WebPDispatchAlpha)(const uint8_t* alpha, int alpha_stride, + int width, int height, + uint8_t* dst, int dst_stride); + +// Transfer packed 8b alpha[] values to green channel in dst[], zero'ing the +// A/R/B values. 'dst_stride' is the stride for dst[] in uint32_t units. +extern void (*WebPDispatchAlphaToGreen)(const uint8_t* alpha, int alpha_stride, + int width, int height, + uint32_t* dst, int dst_stride); + +// Extract the alpha values from 32b values in argb[] and pack them into alpha[] +// (this is the opposite of WebPDispatchAlpha). +// Returns true if there's only trivial 0xff alpha values. +extern int (*WebPExtractAlpha)(const uint8_t* argb, int argb_stride, + int width, int height, + uint8_t* alpha, int alpha_stride); + +// Extract the green values from 32b values in argb[] and pack them into alpha[] +// (this is the opposite of WebPDispatchAlphaToGreen). +extern void (*WebPExtractGreen)(const uint32_t* argb, uint8_t* alpha, int size); + +// Pre-Multiply operation transforms x into x * A / 255 (where x=Y,R,G or B). +// Un-Multiply operation transforms x into x * 255 / A. + +// Pre-Multiply or Un-Multiply (if 'inverse' is true) argb values in a row. +extern void (*WebPMultARGBRow)(uint32_t* const ptr, int width, int inverse); + +// Same a WebPMultARGBRow(), but for several rows. +void WebPMultARGBRows(uint8_t* ptr, int stride, int width, int num_rows, + int inverse); + +// Same for a row of single values, with side alpha values. +extern void (*WebPMultRow)(uint8_t* const ptr, const uint8_t* const alpha, + int width, int inverse); + +// Same a WebPMultRow(), but for several 'num_rows' rows. +void WebPMultRows(uint8_t* ptr, int stride, + const uint8_t* alpha, int alpha_stride, + int width, int num_rows, int inverse); + +// Plain-C versions, used as fallback by some implementations. +void WebPMultRow_C(uint8_t* const ptr, const uint8_t* const alpha, + int width, int inverse); +void WebPMultARGBRow_C(uint32_t* const ptr, int width, int inverse); + +#ifdef WORDS_BIGENDIAN +// ARGB packing function: a/r/g/b input is rgba or bgra order. +extern void (*WebPPackARGB)(const uint8_t* a, const uint8_t* r, + const uint8_t* g, const uint8_t* b, int len, + uint32_t* out); +#endif + +// RGB packing function. 'step' can be 3 or 4. r/g/b input is rgb or bgr order. +extern void (*WebPPackRGB)(const uint8_t* r, const uint8_t* g, const uint8_t* b, + int len, int step, uint32_t* out); + +// This function returns true if src[i] contains a value different from 0xff. +extern int (*WebPHasAlpha8b)(const uint8_t* src, int length); +// This function returns true if src[4*i] contains a value different from 0xff. +extern int (*WebPHasAlpha32b)(const uint8_t* src, int length); +// replaces transparent values in src[] by 'color'. +extern void (*WebPAlphaReplace)(uint32_t* src, int length, uint32_t color); + +// To be called first before using the above. +void WebPInitAlphaProcessing(void); + +//------------------------------------------------------------------------------ +// Filter functions + +typedef enum { // Filter types. + WEBP_FILTER_NONE = 0, + WEBP_FILTER_HORIZONTAL, + WEBP_FILTER_VERTICAL, + WEBP_FILTER_GRADIENT, + WEBP_FILTER_LAST = WEBP_FILTER_GRADIENT + 1, // end marker + WEBP_FILTER_BEST, // meta-types + WEBP_FILTER_FAST +} WEBP_FILTER_TYPE; + +typedef void (*WebPFilterFunc)(const uint8_t* in, int width, int height, + int stride, uint8_t* out); +// In-place un-filtering. +// Warning! 'prev_line' pointer can be equal to 'cur_line' or 'preds'. +typedef void (*WebPUnfilterFunc)(const uint8_t* prev_line, const uint8_t* preds, + uint8_t* cur_line, int width); + +// Filter the given data using the given predictor. +// 'in' corresponds to a 2-dimensional pixel array of size (stride * height) +// in raster order. +// 'stride' is number of bytes per scan line (with possible padding). +// 'out' should be pre-allocated. +extern WebPFilterFunc WebPFilters[WEBP_FILTER_LAST]; + +// In-place reconstruct the original data from the given filtered data. +// The reconstruction will be done for 'num_rows' rows starting from 'row' +// (assuming rows upto 'row - 1' are already reconstructed). +extern WebPUnfilterFunc WebPUnfilters[WEBP_FILTER_LAST]; + +// To be called first before using the above. +void VP8FiltersInit(void); + +#ifdef __cplusplus +} // extern "C" +#endif + +#endif // WEBP_DSP_DSP_H_ diff --git a/vendor/github.com/sizeofint/webpanimation/dsp_enc.c b/vendor/github.com/sizeofint/webpanimation/dsp_enc.c new file mode 100644 index 00000000..bdc4da99 --- /dev/null +++ b/vendor/github.com/sizeofint/webpanimation/dsp_enc.c @@ -0,0 +1,830 @@ +// Copyright 2011 Google Inc. All Rights Reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the COPYING file in the root of the source +// tree. An additional intellectual property rights grant can be found +// in the file PATENTS. All contributing project authors may +// be found in the AUTHORS file in the root of the source tree. +// ----------------------------------------------------------------------------- +// +// Speed-critical encoding functions. +// +// Author: Skal (pascal.massimino@gmail.com) + +#include +#include // for abs() + +#include "dsp_dsp.h" +#include "enc_vp8i_enc.h" + +static WEBP_INLINE uint8_t clip_8b(int v) { + return (!(v & ~0xff)) ? v : (v < 0) ? 0 : 255; +} + +#if !WEBP_NEON_OMIT_C_CODE +static WEBP_INLINE int clip_max(int v, int max) { + return (v > max) ? max : v; +} +#endif // !WEBP_NEON_OMIT_C_CODE + +//------------------------------------------------------------------------------ +// Compute susceptibility based on DCT-coeff histograms: +// the higher, the "easier" the macroblock is to compress. + +const int VP8DspScan[16 + 4 + 4] = { + // Luma + 0 + 0 * BPS, 4 + 0 * BPS, 8 + 0 * BPS, 12 + 0 * BPS, + 0 + 4 * BPS, 4 + 4 * BPS, 8 + 4 * BPS, 12 + 4 * BPS, + 0 + 8 * BPS, 4 + 8 * BPS, 8 + 8 * BPS, 12 + 8 * BPS, + 0 + 12 * BPS, 4 + 12 * BPS, 8 + 12 * BPS, 12 + 12 * BPS, + + 0 + 0 * BPS, 4 + 0 * BPS, 0 + 4 * BPS, 4 + 4 * BPS, // U + 8 + 0 * BPS, 12 + 0 * BPS, 8 + 4 * BPS, 12 + 4 * BPS // V +}; + +// general-purpose util function +void VP8SetHistogramData(const int distribution[MAX_COEFF_THRESH + 1], + VP8Histogram* const histo) { + int max_value = 0, last_non_zero = 1; + int k; + for (k = 0; k <= MAX_COEFF_THRESH; ++k) { + const int value = distribution[k]; + if (value > 0) { + if (value > max_value) max_value = value; + last_non_zero = k; + } + } + histo->max_value = max_value; + histo->last_non_zero = last_non_zero; +} + +#if !WEBP_NEON_OMIT_C_CODE +static void CollectHistogram_C(const uint8_t* ref, const uint8_t* pred, + int start_block, int end_block, + VP8Histogram* const histo) { + int j; + int distribution[MAX_COEFF_THRESH + 1] = { 0 }; + for (j = start_block; j < end_block; ++j) { + int k; + int16_t out[16]; + + VP8FTransform(ref + VP8DspScan[j], pred + VP8DspScan[j], out); + + // Convert coefficients to bin. + for (k = 0; k < 16; ++k) { + const int v = abs(out[k]) >> 3; + const int clipped_value = clip_max(v, MAX_COEFF_THRESH); + ++distribution[clipped_value]; + } + } + VP8SetHistogramData(distribution, histo); +} +#endif // !WEBP_NEON_OMIT_C_CODE + +//------------------------------------------------------------------------------ +// run-time tables (~4k) + +static uint8_t clip1[255 + 510 + 1]; // clips [-255,510] to [0,255] + +// We declare this variable 'volatile' to prevent instruction reordering +// and make sure it's set to true _last_ (so as to be thread-safe) +static volatile int tables_ok = 0; + +static WEBP_TSAN_IGNORE_FUNCTION void InitTables(void) { + if (!tables_ok) { + int i; + for (i = -255; i <= 255 + 255; ++i) { + clip1[255 + i] = clip_8b(i); + } + tables_ok = 1; + } +} + + +//------------------------------------------------------------------------------ +// Transforms (Paragraph 14.4) + +#if !WEBP_NEON_OMIT_C_CODE + +#define STORE(x, y, v) \ + dst[(x) + (y) * BPS] = clip_8b(ref[(x) + (y) * BPS] + ((v) >> 3)) + +static const int kC1 = 20091 + (1 << 16); +static const int kC2 = 35468; +#define MUL(a, b) (((a) * (b)) >> 16) + +static WEBP_INLINE void ITransformOne(const uint8_t* ref, const int16_t* in, + uint8_t* dst) { + int C[4 * 4], *tmp; + int i; + tmp = C; + for (i = 0; i < 4; ++i) { // vertical pass + const int a = in[0] + in[8]; + const int b = in[0] - in[8]; + const int c = MUL(in[4], kC2) - MUL(in[12], kC1); + const int d = MUL(in[4], kC1) + MUL(in[12], kC2); + tmp[0] = a + d; + tmp[1] = b + c; + tmp[2] = b - c; + tmp[3] = a - d; + tmp += 4; + in++; + } + + tmp = C; + for (i = 0; i < 4; ++i) { // horizontal pass + const int dc = tmp[0] + 4; + const int a = dc + tmp[8]; + const int b = dc - tmp[8]; + const int c = MUL(tmp[4], kC2) - MUL(tmp[12], kC1); + const int d = MUL(tmp[4], kC1) + MUL(tmp[12], kC2); + STORE(0, i, a + d); + STORE(1, i, b + c); + STORE(2, i, b - c); + STORE(3, i, a - d); + tmp++; + } +} + +static void ITransform_C(const uint8_t* ref, const int16_t* in, uint8_t* dst, + int do_two) { + ITransformOne(ref, in, dst); + if (do_two) { + ITransformOne(ref + 4, in + 16, dst + 4); + } +} + +static void FTransform_C(const uint8_t* src, const uint8_t* ref, int16_t* out) { + int i; + int tmp[16]; + for (i = 0; i < 4; ++i, src += BPS, ref += BPS) { + const int d0 = src[0] - ref[0]; // 9bit dynamic range ([-255,255]) + const int d1 = src[1] - ref[1]; + const int d2 = src[2] - ref[2]; + const int d3 = src[3] - ref[3]; + const int a0 = (d0 + d3); // 10b [-510,510] + const int a1 = (d1 + d2); + const int a2 = (d1 - d2); + const int a3 = (d0 - d3); + tmp[0 + i * 4] = (a0 + a1) * 8; // 14b [-8160,8160] + tmp[1 + i * 4] = (a2 * 2217 + a3 * 5352 + 1812) >> 9; // [-7536,7542] + tmp[2 + i * 4] = (a0 - a1) * 8; + tmp[3 + i * 4] = (a3 * 2217 - a2 * 5352 + 937) >> 9; + } + for (i = 0; i < 4; ++i) { + const int a0 = (tmp[0 + i] + tmp[12 + i]); // 15b + const int a1 = (tmp[4 + i] + tmp[ 8 + i]); + const int a2 = (tmp[4 + i] - tmp[ 8 + i]); + const int a3 = (tmp[0 + i] - tmp[12 + i]); + out[0 + i] = (a0 + a1 + 7) >> 4; // 12b + out[4 + i] = ((a2 * 2217 + a3 * 5352 + 12000) >> 16) + (a3 != 0); + out[8 + i] = (a0 - a1 + 7) >> 4; + out[12+ i] = ((a3 * 2217 - a2 * 5352 + 51000) >> 16); + } +} +#endif // !WEBP_NEON_OMIT_C_CODE + +static void FTransform2_C(const uint8_t* src, const uint8_t* ref, + int16_t* out) { + VP8FTransform(src, ref, out); + VP8FTransform(src + 4, ref + 4, out + 16); +} + +#if !WEBP_NEON_OMIT_C_CODE +static void FTransformWHT_C(const int16_t* in, int16_t* out) { + // input is 12b signed + int32_t tmp[16]; + int i; + for (i = 0; i < 4; ++i, in += 64) { + const int a0 = (in[0 * 16] + in[2 * 16]); // 13b + const int a1 = (in[1 * 16] + in[3 * 16]); + const int a2 = (in[1 * 16] - in[3 * 16]); + const int a3 = (in[0 * 16] - in[2 * 16]); + tmp[0 + i * 4] = a0 + a1; // 14b + tmp[1 + i * 4] = a3 + a2; + tmp[2 + i * 4] = a3 - a2; + tmp[3 + i * 4] = a0 - a1; + } + for (i = 0; i < 4; ++i) { + const int a0 = (tmp[0 + i] + tmp[8 + i]); // 15b + const int a1 = (tmp[4 + i] + tmp[12+ i]); + const int a2 = (tmp[4 + i] - tmp[12+ i]); + const int a3 = (tmp[0 + i] - tmp[8 + i]); + const int b0 = a0 + a1; // 16b + const int b1 = a3 + a2; + const int b2 = a3 - a2; + const int b3 = a0 - a1; + out[ 0 + i] = b0 >> 1; // 15b + out[ 4 + i] = b1 >> 1; + out[ 8 + i] = b2 >> 1; + out[12 + i] = b3 >> 1; + } +} +#endif // !WEBP_NEON_OMIT_C_CODE + +#undef MUL +#undef STORE + +//------------------------------------------------------------------------------ +// Intra predictions + +static WEBP_INLINE void Fill(uint8_t* dst, int value, int size) { + int j; + for (j = 0; j < size; ++j) { + memset(dst + j * BPS, value, size); + } +} + +static WEBP_INLINE void VerticalPred(uint8_t* dst, + const uint8_t* top, int size) { + int j; + if (top != NULL) { + for (j = 0; j < size; ++j) memcpy(dst + j * BPS, top, size); + } else { + Fill(dst, 127, size); + } +} + +static WEBP_INLINE void HorizontalPred(uint8_t* dst, + const uint8_t* left, int size) { + if (left != NULL) { + int j; + for (j = 0; j < size; ++j) { + memset(dst + j * BPS, left[j], size); + } + } else { + Fill(dst, 129, size); + } +} + +static WEBP_INLINE void TrueMotion(uint8_t* dst, const uint8_t* left, + const uint8_t* top, int size) { + int y; + if (left != NULL) { + if (top != NULL) { + const uint8_t* const clip = clip1 + 255 - left[-1]; + for (y = 0; y < size; ++y) { + const uint8_t* const clip_table = clip + left[y]; + int x; + for (x = 0; x < size; ++x) { + dst[x] = clip_table[top[x]]; + } + dst += BPS; + } + } else { + HorizontalPred(dst, left, size); + } + } else { + // true motion without left samples (hence: with default 129 value) + // is equivalent to VE prediction where you just copy the top samples. + // Note that if top samples are not available, the default value is + // then 129, and not 127 as in the VerticalPred case. + if (top != NULL) { + VerticalPred(dst, top, size); + } else { + Fill(dst, 129, size); + } + } +} + +static WEBP_INLINE void DCMode(uint8_t* dst, const uint8_t* left, + const uint8_t* top, + int size, int round, int shift) { + int DC = 0; + int j; + if (top != NULL) { + for (j = 0; j < size; ++j) DC += top[j]; + if (left != NULL) { // top and left present + for (j = 0; j < size; ++j) DC += left[j]; + } else { // top, but no left + DC += DC; + } + DC = (DC + round) >> shift; + } else if (left != NULL) { // left but no top + for (j = 0; j < size; ++j) DC += left[j]; + DC += DC; + DC = (DC + round) >> shift; + } else { // no top, no left, nothing. + DC = 0x80; + } + Fill(dst, DC, size); +} + +//------------------------------------------------------------------------------ +// Chroma 8x8 prediction (paragraph 12.2) + +static void IntraChromaPreds_C(uint8_t* dst, const uint8_t* left, + const uint8_t* top) { + // U block + DCMode(C8DC8 + dst, left, top, 8, 8, 4); + VerticalPred(C8VE8 + dst, top, 8); + HorizontalPred(C8HE8 + dst, left, 8); + TrueMotion(C8TM8 + dst, left, top, 8); + // V block + dst += 8; + if (top != NULL) top += 8; + if (left != NULL) left += 16; + DCMode(C8DC8 + dst, left, top, 8, 8, 4); + VerticalPred(C8VE8 + dst, top, 8); + HorizontalPred(C8HE8 + dst, left, 8); + TrueMotion(C8TM8 + dst, left, top, 8); +} + +//------------------------------------------------------------------------------ +// luma 16x16 prediction (paragraph 12.3) + +static void Intra16Preds_C(uint8_t* dst, + const uint8_t* left, const uint8_t* top) { + DCMode(I16DC16 + dst, left, top, 16, 16, 5); + VerticalPred(I16VE16 + dst, top, 16); + HorizontalPred(I16HE16 + dst, left, 16); + TrueMotion(I16TM16 + dst, left, top, 16); +} + +//------------------------------------------------------------------------------ +// luma 4x4 prediction + +#define DST(x, y) dst[(x) + (y) * BPS] +#define AVG3(a, b, c) ((uint8_t)(((a) + 2 * (b) + (c) + 2) >> 2)) +#define AVG2(a, b) (((a) + (b) + 1) >> 1) + +static void VE4(uint8_t* dst, const uint8_t* top) { // vertical + const uint8_t vals[4] = { + AVG3(top[-1], top[0], top[1]), + AVG3(top[ 0], top[1], top[2]), + AVG3(top[ 1], top[2], top[3]), + AVG3(top[ 2], top[3], top[4]) + }; + int i; + for (i = 0; i < 4; ++i) { + memcpy(dst + i * BPS, vals, 4); + } +} + +static void HE4(uint8_t* dst, const uint8_t* top) { // horizontal + const int X = top[-1]; + const int I = top[-2]; + const int J = top[-3]; + const int K = top[-4]; + const int L = top[-5]; + WebPUint32ToMem(dst + 0 * BPS, 0x01010101U * AVG3(X, I, J)); + WebPUint32ToMem(dst + 1 * BPS, 0x01010101U * AVG3(I, J, K)); + WebPUint32ToMem(dst + 2 * BPS, 0x01010101U * AVG3(J, K, L)); + WebPUint32ToMem(dst + 3 * BPS, 0x01010101U * AVG3(K, L, L)); +} + +static void DC4(uint8_t* dst, const uint8_t* top) { + uint32_t dc = 4; + int i; + for (i = 0; i < 4; ++i) dc += top[i] + top[-5 + i]; + Fill(dst, dc >> 3, 4); +} + +static void RD4(uint8_t* dst, const uint8_t* top) { + const int X = top[-1]; + const int I = top[-2]; + const int J = top[-3]; + const int K = top[-4]; + const int L = top[-5]; + const int A = top[0]; + const int B = top[1]; + const int C = top[2]; + const int D = top[3]; + DST(0, 3) = AVG3(J, K, L); + DST(0, 2) = DST(1, 3) = AVG3(I, J, K); + DST(0, 1) = DST(1, 2) = DST(2, 3) = AVG3(X, I, J); + DST(0, 0) = DST(1, 1) = DST(2, 2) = DST(3, 3) = AVG3(A, X, I); + DST(1, 0) = DST(2, 1) = DST(3, 2) = AVG3(B, A, X); + DST(2, 0) = DST(3, 1) = AVG3(C, B, A); + DST(3, 0) = AVG3(D, C, B); +} + +static void LD4(uint8_t* dst, const uint8_t* top) { + const int A = top[0]; + const int B = top[1]; + const int C = top[2]; + const int D = top[3]; + const int E = top[4]; + const int F = top[5]; + const int G = top[6]; + const int H = top[7]; + DST(0, 0) = AVG3(A, B, C); + DST(1, 0) = DST(0, 1) = AVG3(B, C, D); + DST(2, 0) = DST(1, 1) = DST(0, 2) = AVG3(C, D, E); + DST(3, 0) = DST(2, 1) = DST(1, 2) = DST(0, 3) = AVG3(D, E, F); + DST(3, 1) = DST(2, 2) = DST(1, 3) = AVG3(E, F, G); + DST(3, 2) = DST(2, 3) = AVG3(F, G, H); + DST(3, 3) = AVG3(G, H, H); +} + +static void VR4(uint8_t* dst, const uint8_t* top) { + const int X = top[-1]; + const int I = top[-2]; + const int J = top[-3]; + const int K = top[-4]; + const int A = top[0]; + const int B = top[1]; + const int C = top[2]; + const int D = top[3]; + DST(0, 0) = DST(1, 2) = AVG2(X, A); + DST(1, 0) = DST(2, 2) = AVG2(A, B); + DST(2, 0) = DST(3, 2) = AVG2(B, C); + DST(3, 0) = AVG2(C, D); + + DST(0, 3) = AVG3(K, J, I); + DST(0, 2) = AVG3(J, I, X); + DST(0, 1) = DST(1, 3) = AVG3(I, X, A); + DST(1, 1) = DST(2, 3) = AVG3(X, A, B); + DST(2, 1) = DST(3, 3) = AVG3(A, B, C); + DST(3, 1) = AVG3(B, C, D); +} + +static void VL4(uint8_t* dst, const uint8_t* top) { + const int A = top[0]; + const int B = top[1]; + const int C = top[2]; + const int D = top[3]; + const int E = top[4]; + const int F = top[5]; + const int G = top[6]; + const int H = top[7]; + DST(0, 0) = AVG2(A, B); + DST(1, 0) = DST(0, 2) = AVG2(B, C); + DST(2, 0) = DST(1, 2) = AVG2(C, D); + DST(3, 0) = DST(2, 2) = AVG2(D, E); + + DST(0, 1) = AVG3(A, B, C); + DST(1, 1) = DST(0, 3) = AVG3(B, C, D); + DST(2, 1) = DST(1, 3) = AVG3(C, D, E); + DST(3, 1) = DST(2, 3) = AVG3(D, E, F); + DST(3, 2) = AVG3(E, F, G); + DST(3, 3) = AVG3(F, G, H); +} + +static void HU4(uint8_t* dst, const uint8_t* top) { + const int I = top[-2]; + const int J = top[-3]; + const int K = top[-4]; + const int L = top[-5]; + DST(0, 0) = AVG2(I, J); + DST(2, 0) = DST(0, 1) = AVG2(J, K); + DST(2, 1) = DST(0, 2) = AVG2(K, L); + DST(1, 0) = AVG3(I, J, K); + DST(3, 0) = DST(1, 1) = AVG3(J, K, L); + DST(3, 1) = DST(1, 2) = AVG3(K, L, L); + DST(3, 2) = DST(2, 2) = + DST(0, 3) = DST(1, 3) = DST(2, 3) = DST(3, 3) = L; +} + +static void HD4(uint8_t* dst, const uint8_t* top) { + const int X = top[-1]; + const int I = top[-2]; + const int J = top[-3]; + const int K = top[-4]; + const int L = top[-5]; + const int A = top[0]; + const int B = top[1]; + const int C = top[2]; + + DST(0, 0) = DST(2, 1) = AVG2(I, X); + DST(0, 1) = DST(2, 2) = AVG2(J, I); + DST(0, 2) = DST(2, 3) = AVG2(K, J); + DST(0, 3) = AVG2(L, K); + + DST(3, 0) = AVG3(A, B, C); + DST(2, 0) = AVG3(X, A, B); + DST(1, 0) = DST(3, 1) = AVG3(I, X, A); + DST(1, 1) = DST(3, 2) = AVG3(J, I, X); + DST(1, 2) = DST(3, 3) = AVG3(K, J, I); + DST(1, 3) = AVG3(L, K, J); +} + +static void TM4(uint8_t* dst, const uint8_t* top) { + int x, y; + const uint8_t* const clip = clip1 + 255 - top[-1]; + for (y = 0; y < 4; ++y) { + const uint8_t* const clip_table = clip + top[-2 - y]; + for (x = 0; x < 4; ++x) { + dst[x] = clip_table[top[x]]; + } + dst += BPS; + } +} + +#undef DST +#undef AVG3 +#undef AVG2 + +// Left samples are top[-5 .. -2], top_left is top[-1], top are +// located at top[0..3], and top right is top[4..7] +static void Intra4Preds_C(uint8_t* dst, const uint8_t* top) { + DC4(I4DC4 + dst, top); + TM4(I4TM4 + dst, top); + VE4(I4VE4 + dst, top); + HE4(I4HE4 + dst, top); + RD4(I4RD4 + dst, top); + VR4(I4VR4 + dst, top); + LD4(I4LD4 + dst, top); + VL4(I4VL4 + dst, top); + HD4(I4HD4 + dst, top); + HU4(I4HU4 + dst, top); +} + +//------------------------------------------------------------------------------ +// Metric + +#if !WEBP_NEON_OMIT_C_CODE +static WEBP_INLINE int GetSSE(const uint8_t* a, const uint8_t* b, + int w, int h) { + int count = 0; + int y, x; + for (y = 0; y < h; ++y) { + for (x = 0; x < w; ++x) { + const int diff = (int)a[x] - b[x]; + count += diff * diff; + } + a += BPS; + b += BPS; + } + return count; +} + +static int SSE16x16_C(const uint8_t* a, const uint8_t* b) { + return GetSSE(a, b, 16, 16); +} +static int SSE16x8_C(const uint8_t* a, const uint8_t* b) { + return GetSSE(a, b, 16, 8); +} +static int SSE8x8_C(const uint8_t* a, const uint8_t* b) { + return GetSSE(a, b, 8, 8); +} +static int SSE4x4_C(const uint8_t* a, const uint8_t* b) { + return GetSSE(a, b, 4, 4); +} +#endif // !WEBP_NEON_OMIT_C_CODE + +static void Mean16x4_C(const uint8_t* ref, uint32_t dc[4]) { + int k, x, y; + for (k = 0; k < 4; ++k) { + uint32_t avg = 0; + for (y = 0; y < 4; ++y) { + for (x = 0; x < 4; ++x) { + avg += ref[x + y * BPS]; + } + } + dc[k] = avg; + ref += 4; // go to next 4x4 block. + } +} + +//------------------------------------------------------------------------------ +// Texture distortion +// +// We try to match the spectral content (weighted) between source and +// reconstructed samples. + +#if !WEBP_NEON_OMIT_C_CODE +// Hadamard transform +// Returns the weighted sum of the absolute value of transformed coefficients. +// w[] contains a row-major 4 by 4 symmetric matrix. +static int TTransform(const uint8_t* in, const uint16_t* w) { + int sum = 0; + int tmp[16]; + int i; + // horizontal pass + for (i = 0; i < 4; ++i, in += BPS) { + const int a0 = in[0] + in[2]; + const int a1 = in[1] + in[3]; + const int a2 = in[1] - in[3]; + const int a3 = in[0] - in[2]; + tmp[0 + i * 4] = a0 + a1; + tmp[1 + i * 4] = a3 + a2; + tmp[2 + i * 4] = a3 - a2; + tmp[3 + i * 4] = a0 - a1; + } + // vertical pass + for (i = 0; i < 4; ++i, ++w) { + const int a0 = tmp[0 + i] + tmp[8 + i]; + const int a1 = tmp[4 + i] + tmp[12+ i]; + const int a2 = tmp[4 + i] - tmp[12+ i]; + const int a3 = tmp[0 + i] - tmp[8 + i]; + const int b0 = a0 + a1; + const int b1 = a3 + a2; + const int b2 = a3 - a2; + const int b3 = a0 - a1; + + sum += w[ 0] * abs(b0); + sum += w[ 4] * abs(b1); + sum += w[ 8] * abs(b2); + sum += w[12] * abs(b3); + } + return sum; +} + +static int Disto4x4_C(const uint8_t* const a, const uint8_t* const b, + const uint16_t* const w) { + const int sum1 = TTransform(a, w); + const int sum2 = TTransform(b, w); + return abs(sum2 - sum1) >> 5; +} + +static int Disto16x16_C(const uint8_t* const a, const uint8_t* const b, + const uint16_t* const w) { + int D = 0; + int x, y; + for (y = 0; y < 16 * BPS; y += 4 * BPS) { + for (x = 0; x < 16; x += 4) { + D += Disto4x4_C(a + x + y, b + x + y, w); + } + } + return D; +} +#endif // !WEBP_NEON_OMIT_C_CODE + +//------------------------------------------------------------------------------ +// Quantization +// + +static const uint8_t kZigzag[16] = { + 0, 1, 4, 8, 5, 2, 3, 6, 9, 12, 13, 10, 7, 11, 14, 15 +}; + +// Simple quantization +static int QuantizeBlock_C(int16_t in[16], int16_t out[16], + const VP8Matrix* const mtx) { + int last = -1; + int n; + for (n = 0; n < 16; ++n) { + const int j = kZigzag[n]; + const int sign = (in[j] < 0); + const uint32_t coeff = (sign ? -in[j] : in[j]) + mtx->sharpen_[j]; + if (coeff > mtx->zthresh_[j]) { + const uint32_t Q = mtx->q_[j]; + const uint32_t iQ = mtx->iq_[j]; + const uint32_t B = mtx->bias_[j]; + int level = QUANTDIV(coeff, iQ, B); + if (level > MAX_LEVEL) level = MAX_LEVEL; + if (sign) level = -level; + in[j] = level * (int)Q; + out[n] = level; + if (level) last = n; + } else { + out[n] = 0; + in[j] = 0; + } + } + return (last >= 0); +} + +#if !WEBP_NEON_OMIT_C_CODE || WEBP_NEON_WORK_AROUND_GCC +static int Quantize2Blocks_C(int16_t in[32], int16_t out[32], + const VP8Matrix* const mtx) { + int nz; + nz = VP8EncQuantizeBlock(in + 0 * 16, out + 0 * 16, mtx) << 0; + nz |= VP8EncQuantizeBlock(in + 1 * 16, out + 1 * 16, mtx) << 1; + return nz; +} +#endif // !WEBP_NEON_OMIT_C_CODE || WEBP_NEON_WORK_AROUND_GCC + +//------------------------------------------------------------------------------ +// Block copy + +static WEBP_INLINE void Copy(const uint8_t* src, uint8_t* dst, int w, int h) { + int y; + for (y = 0; y < h; ++y) { + memcpy(dst, src, w); + src += BPS; + dst += BPS; + } +} + +static void Copy4x4_C(const uint8_t* src, uint8_t* dst) { + Copy(src, dst, 4, 4); +} + +static void Copy16x8_C(const uint8_t* src, uint8_t* dst) { + Copy(src, dst, 16, 8); +} + +//------------------------------------------------------------------------------ +// Initialization + +// Speed-critical function pointers. We have to initialize them to the default +// implementations within VP8EncDspInit(). +VP8CHisto VP8CollectHistogram; +VP8Idct VP8ITransform; +VP8Fdct VP8FTransform; +VP8Fdct VP8FTransform2; +VP8WHT VP8FTransformWHT; +VP8Intra4Preds VP8EncPredLuma4; +VP8IntraPreds VP8EncPredLuma16; +VP8IntraPreds VP8EncPredChroma8; +VP8Metric VP8SSE16x16; +VP8Metric VP8SSE8x8; +VP8Metric VP8SSE16x8; +VP8Metric VP8SSE4x4; +VP8WMetric VP8TDisto4x4; +VP8WMetric VP8TDisto16x16; +VP8MeanMetric VP8Mean16x4; +VP8QuantizeBlock VP8EncQuantizeBlock; +VP8Quantize2Blocks VP8EncQuantize2Blocks; +VP8QuantizeBlockWHT VP8EncQuantizeBlockWHT; +VP8BlockCopy VP8Copy4x4; +VP8BlockCopy VP8Copy16x8; + +extern void VP8EncDspInitSSE2(void); +extern void VP8EncDspInitSSE41(void); +extern void VP8EncDspInitNEON(void); +extern void VP8EncDspInitMIPS32(void); +extern void VP8EncDspInitMIPSdspR2(void); +extern void VP8EncDspInitMSA(void); + +WEBP_DSP_INIT_FUNC(VP8EncDspInit) { + VP8DspInit(); // common inverse transforms + InitTables(); + + // default C implementations +#if !WEBP_NEON_OMIT_C_CODE + VP8ITransform = ITransform_C; + VP8FTransform = FTransform_C; + VP8FTransformWHT = FTransformWHT_C; + VP8TDisto4x4 = Disto4x4_C; + VP8TDisto16x16 = Disto16x16_C; + VP8CollectHistogram = CollectHistogram_C; + VP8SSE16x16 = SSE16x16_C; + VP8SSE16x8 = SSE16x8_C; + VP8SSE8x8 = SSE8x8_C; + VP8SSE4x4 = SSE4x4_C; +#endif + +#if !WEBP_NEON_OMIT_C_CODE || WEBP_NEON_WORK_AROUND_GCC + VP8EncQuantizeBlock = QuantizeBlock_C; + VP8EncQuantize2Blocks = Quantize2Blocks_C; +#endif + + VP8FTransform2 = FTransform2_C; + VP8EncPredLuma4 = Intra4Preds_C; + VP8EncPredLuma16 = Intra16Preds_C; + VP8EncPredChroma8 = IntraChromaPreds_C; + VP8Mean16x4 = Mean16x4_C; + VP8EncQuantizeBlockWHT = QuantizeBlock_C; + VP8Copy4x4 = Copy4x4_C; + VP8Copy16x8 = Copy16x8_C; + + // If defined, use CPUInfo() to overwrite some pointers with faster versions. + if (VP8GetCPUInfo != NULL) { +#if defined(WEBP_USE_SSE2) + if (VP8GetCPUInfo(kSSE2)) { + VP8EncDspInitSSE2(); +#if defined(WEBP_USE_SSE41) + if (VP8GetCPUInfo(kSSE4_1)) { + VP8EncDspInitSSE41(); + } +#endif + } +#endif +#if defined(WEBP_USE_MIPS32) + if (VP8GetCPUInfo(kMIPS32)) { + VP8EncDspInitMIPS32(); + } +#endif +#if defined(WEBP_USE_MIPS_DSP_R2) + if (VP8GetCPUInfo(kMIPSdspR2)) { + VP8EncDspInitMIPSdspR2(); + } +#endif +#if defined(WEBP_USE_MSA) + if (VP8GetCPUInfo(kMSA)) { + VP8EncDspInitMSA(); + } +#endif + } + +#if defined(WEBP_USE_NEON) + if (WEBP_NEON_OMIT_C_CODE || + (VP8GetCPUInfo != NULL && VP8GetCPUInfo(kNEON))) { + VP8EncDspInitNEON(); + } +#endif + + assert(VP8ITransform != NULL); + assert(VP8FTransform != NULL); + assert(VP8FTransformWHT != NULL); + assert(VP8TDisto4x4 != NULL); + assert(VP8TDisto16x16 != NULL); + assert(VP8CollectHistogram != NULL); + assert(VP8SSE16x16 != NULL); + assert(VP8SSE16x8 != NULL); + assert(VP8SSE8x8 != NULL); + assert(VP8SSE4x4 != NULL); + assert(VP8EncQuantizeBlock != NULL); + assert(VP8EncQuantize2Blocks != NULL); + assert(VP8FTransform2 != NULL); + assert(VP8EncPredLuma4 != NULL); + assert(VP8EncPredLuma16 != NULL); + assert(VP8EncPredChroma8 != NULL); + assert(VP8Mean16x4 != NULL); + assert(VP8EncQuantizeBlockWHT != NULL); + assert(VP8Copy4x4 != NULL); + assert(VP8Copy16x8 != NULL); +} diff --git a/vendor/github.com/sizeofint/webpanimation/dsp_enc_mips32.c b/vendor/github.com/sizeofint/webpanimation/dsp_enc_mips32.c new file mode 100644 index 00000000..80135b09 --- /dev/null +++ b/vendor/github.com/sizeofint/webpanimation/dsp_enc_mips32.c @@ -0,0 +1,677 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the COPYING file in the root of the source +// tree. An additional intellectual property rights grant can be found +// in the file PATENTS. All contributing project authors may +// be found in the AUTHORS file in the root of the source tree. +// ----------------------------------------------------------------------------- +// +// MIPS version of speed-critical encoding functions. +// +// Author(s): Djordje Pesut (djordje.pesut@imgtec.com) +// Jovan Zelincevic (jovan.zelincevic@imgtec.com) +// Slobodan Prijic (slobodan.prijic@imgtec.com) + +#include "dsp_dsp.h" + +#if defined(WEBP_USE_MIPS32) + +#include "dsp_mips_macro.h" +#include "enc_vp8i_enc.h" +#include "enc_cost_enc.h" + +static const int kC1 = 20091 + (1 << 16); +static const int kC2 = 35468; + +// macro for one vertical pass in ITransformOne +// MUL macro inlined +// temp0..temp15 holds tmp[0]..tmp[15] +// A..D - offsets in bytes to load from in buffer +// TEMP0..TEMP3 - registers for corresponding tmp elements +// TEMP4..TEMP5 - temporary registers +#define VERTICAL_PASS(A, B, C, D, TEMP4, TEMP0, TEMP1, TEMP2, TEMP3) \ + "lh %[temp16], " #A "(%[temp20]) \n\t" \ + "lh %[temp18], " #B "(%[temp20]) \n\t" \ + "lh %[temp17], " #C "(%[temp20]) \n\t" \ + "lh %[temp19], " #D "(%[temp20]) \n\t" \ + "addu %[" #TEMP4 "], %[temp16], %[temp18] \n\t" \ + "subu %[temp16], %[temp16], %[temp18] \n\t" \ + "mul %[" #TEMP0 "], %[temp17], %[kC2] \n\t" \ + "mul %[temp18], %[temp19], %[kC1] \n\t" \ + "mul %[temp17], %[temp17], %[kC1] \n\t" \ + "mul %[temp19], %[temp19], %[kC2] \n\t" \ + "sra %[" #TEMP0 "], %[" #TEMP0 "], 16 \n\n" \ + "sra %[temp18], %[temp18], 16 \n\n" \ + "sra %[temp17], %[temp17], 16 \n\n" \ + "sra %[temp19], %[temp19], 16 \n\n" \ + "subu %[" #TEMP2 "], %[" #TEMP0 "], %[temp18] \n\t" \ + "addu %[" #TEMP3 "], %[temp17], %[temp19] \n\t" \ + "addu %[" #TEMP0 "], %[" #TEMP4 "], %[" #TEMP3 "] \n\t" \ + "addu %[" #TEMP1 "], %[temp16], %[" #TEMP2 "] \n\t" \ + "subu %[" #TEMP2 "], %[temp16], %[" #TEMP2 "] \n\t" \ + "subu %[" #TEMP3 "], %[" #TEMP4 "], %[" #TEMP3 "] \n\t" + +// macro for one horizontal pass in ITransformOne +// MUL and STORE macros inlined +// a = clip_8b(a) is replaced with: a = max(a, 0); a = min(a, 255) +// temp0..temp15 holds tmp[0]..tmp[15] +// A - offset in bytes to load from ref and store to dst buffer +// TEMP0, TEMP4, TEMP8 and TEMP12 - registers for corresponding tmp elements +#define HORIZONTAL_PASS(A, TEMP0, TEMP4, TEMP8, TEMP12) \ + "addiu %[" #TEMP0 "], %[" #TEMP0 "], 4 \n\t" \ + "addu %[temp16], %[" #TEMP0 "], %[" #TEMP8 "] \n\t" \ + "subu %[temp17], %[" #TEMP0 "], %[" #TEMP8 "] \n\t" \ + "mul %[" #TEMP0 "], %[" #TEMP4 "], %[kC2] \n\t" \ + "mul %[" #TEMP8 "], %[" #TEMP12 "], %[kC1] \n\t" \ + "mul %[" #TEMP4 "], %[" #TEMP4 "], %[kC1] \n\t" \ + "mul %[" #TEMP12 "], %[" #TEMP12 "], %[kC2] \n\t" \ + "sra %[" #TEMP0 "], %[" #TEMP0 "], 16 \n\t" \ + "sra %[" #TEMP8 "], %[" #TEMP8 "], 16 \n\t" \ + "sra %[" #TEMP4 "], %[" #TEMP4 "], 16 \n\t" \ + "sra %[" #TEMP12 "], %[" #TEMP12 "], 16 \n\t" \ + "subu %[temp18], %[" #TEMP0 "], %[" #TEMP8 "] \n\t" \ + "addu %[temp19], %[" #TEMP4 "], %[" #TEMP12 "] \n\t" \ + "addu %[" #TEMP0 "], %[temp16], %[temp19] \n\t" \ + "addu %[" #TEMP4 "], %[temp17], %[temp18] \n\t" \ + "subu %[" #TEMP8 "], %[temp17], %[temp18] \n\t" \ + "subu %[" #TEMP12 "], %[temp16], %[temp19] \n\t" \ + "lw %[temp20], 0(%[args]) \n\t" \ + "sra %[" #TEMP0 "], %[" #TEMP0 "], 3 \n\t" \ + "sra %[" #TEMP4 "], %[" #TEMP4 "], 3 \n\t" \ + "sra %[" #TEMP8 "], %[" #TEMP8 "], 3 \n\t" \ + "sra %[" #TEMP12 "], %[" #TEMP12 "], 3 \n\t" \ + "lbu %[temp16], 0+" XSTR(BPS) "*" #A "(%[temp20]) \n\t" \ + "lbu %[temp17], 1+" XSTR(BPS) "*" #A "(%[temp20]) \n\t" \ + "lbu %[temp18], 2+" XSTR(BPS) "*" #A "(%[temp20]) \n\t" \ + "lbu %[temp19], 3+" XSTR(BPS) "*" #A "(%[temp20]) \n\t" \ + "addu %[" #TEMP0 "], %[temp16], %[" #TEMP0 "] \n\t" \ + "addu %[" #TEMP4 "], %[temp17], %[" #TEMP4 "] \n\t" \ + "addu %[" #TEMP8 "], %[temp18], %[" #TEMP8 "] \n\t" \ + "addu %[" #TEMP12 "], %[temp19], %[" #TEMP12 "] \n\t" \ + "slt %[temp16], %[" #TEMP0 "], $zero \n\t" \ + "slt %[temp17], %[" #TEMP4 "], $zero \n\t" \ + "slt %[temp18], %[" #TEMP8 "], $zero \n\t" \ + "slt %[temp19], %[" #TEMP12 "], $zero \n\t" \ + "movn %[" #TEMP0 "], $zero, %[temp16] \n\t" \ + "movn %[" #TEMP4 "], $zero, %[temp17] \n\t" \ + "movn %[" #TEMP8 "], $zero, %[temp18] \n\t" \ + "movn %[" #TEMP12 "], $zero, %[temp19] \n\t" \ + "addiu %[temp20], $zero, 255 \n\t" \ + "slt %[temp16], %[" #TEMP0 "], %[temp20] \n\t" \ + "slt %[temp17], %[" #TEMP4 "], %[temp20] \n\t" \ + "slt %[temp18], %[" #TEMP8 "], %[temp20] \n\t" \ + "slt %[temp19], %[" #TEMP12 "], %[temp20] \n\t" \ + "movz %[" #TEMP0 "], %[temp20], %[temp16] \n\t" \ + "movz %[" #TEMP4 "], %[temp20], %[temp17] \n\t" \ + "lw %[temp16], 8(%[args]) \n\t" \ + "movz %[" #TEMP8 "], %[temp20], %[temp18] \n\t" \ + "movz %[" #TEMP12 "], %[temp20], %[temp19] \n\t" \ + "sb %[" #TEMP0 "], 0+" XSTR(BPS) "*" #A "(%[temp16]) \n\t" \ + "sb %[" #TEMP4 "], 1+" XSTR(BPS) "*" #A "(%[temp16]) \n\t" \ + "sb %[" #TEMP8 "], 2+" XSTR(BPS) "*" #A "(%[temp16]) \n\t" \ + "sb %[" #TEMP12 "], 3+" XSTR(BPS) "*" #A "(%[temp16]) \n\t" + +// Does one or two inverse transforms. +static WEBP_INLINE void ITransformOne_MIPS32(const uint8_t* ref, + const int16_t* in, + uint8_t* dst) { + int temp0, temp1, temp2, temp3, temp4, temp5, temp6; + int temp7, temp8, temp9, temp10, temp11, temp12, temp13; + int temp14, temp15, temp16, temp17, temp18, temp19, temp20; + const int* args[3] = {(const int*)ref, (const int*)in, (const int*)dst}; + + __asm__ volatile( + "lw %[temp20], 4(%[args]) \n\t" + VERTICAL_PASS(0, 16, 8, 24, temp4, temp0, temp1, temp2, temp3) + VERTICAL_PASS(2, 18, 10, 26, temp8, temp4, temp5, temp6, temp7) + VERTICAL_PASS(4, 20, 12, 28, temp12, temp8, temp9, temp10, temp11) + VERTICAL_PASS(6, 22, 14, 30, temp20, temp12, temp13, temp14, temp15) + + HORIZONTAL_PASS(0, temp0, temp4, temp8, temp12) + HORIZONTAL_PASS(1, temp1, temp5, temp9, temp13) + HORIZONTAL_PASS(2, temp2, temp6, temp10, temp14) + HORIZONTAL_PASS(3, temp3, temp7, temp11, temp15) + + : [temp0]"=&r"(temp0), [temp1]"=&r"(temp1), [temp2]"=&r"(temp2), + [temp3]"=&r"(temp3), [temp4]"=&r"(temp4), [temp5]"=&r"(temp5), + [temp6]"=&r"(temp6), [temp7]"=&r"(temp7), [temp8]"=&r"(temp8), + [temp9]"=&r"(temp9), [temp10]"=&r"(temp10), [temp11]"=&r"(temp11), + [temp12]"=&r"(temp12), [temp13]"=&r"(temp13), [temp14]"=&r"(temp14), + [temp15]"=&r"(temp15), [temp16]"=&r"(temp16), [temp17]"=&r"(temp17), + [temp18]"=&r"(temp18), [temp19]"=&r"(temp19), [temp20]"=&r"(temp20) + : [args]"r"(args), [kC1]"r"(kC1), [kC2]"r"(kC2) + : "memory", "hi", "lo" + ); +} + +static void ITransform_MIPS32(const uint8_t* ref, const int16_t* in, + uint8_t* dst, int do_two) { + ITransformOne_MIPS32(ref, in, dst); + if (do_two) { + ITransformOne_MIPS32(ref + 4, in + 16, dst + 4); + } +} + +#undef VERTICAL_PASS +#undef HORIZONTAL_PASS + +// macro for one pass through for loop in QuantizeBlock +// QUANTDIV macro inlined +// J - offset in bytes (kZigzag[n] * 2) +// K - offset in bytes (kZigzag[n] * 4) +// N - offset in bytes (n * 2) +#define QUANTIZE_ONE(J, K, N) \ + "lh %[temp0], " #J "(%[ppin]) \n\t" \ + "lhu %[temp1], " #J "(%[ppsharpen]) \n\t" \ + "lw %[temp2], " #K "(%[ppzthresh]) \n\t" \ + "sra %[sign], %[temp0], 15 \n\t" \ + "xor %[coeff], %[temp0], %[sign] \n\t" \ + "subu %[coeff], %[coeff], %[sign] \n\t" \ + "addu %[coeff], %[coeff], %[temp1] \n\t" \ + "slt %[temp4], %[temp2], %[coeff] \n\t" \ + "addiu %[temp5], $zero, 0 \n\t" \ + "addiu %[level], $zero, 0 \n\t" \ + "beqz %[temp4], 2f \n\t" \ + "lhu %[temp1], " #J "(%[ppiq]) \n\t" \ + "lw %[temp2], " #K "(%[ppbias]) \n\t" \ + "lhu %[temp3], " #J "(%[ppq]) \n\t" \ + "mul %[level], %[coeff], %[temp1] \n\t" \ + "addu %[level], %[level], %[temp2] \n\t" \ + "sra %[level], %[level], 17 \n\t" \ + "slt %[temp4], %[max_level], %[level] \n\t" \ + "movn %[level], %[max_level], %[temp4] \n\t" \ + "xor %[level], %[level], %[sign] \n\t" \ + "subu %[level], %[level], %[sign] \n\t" \ + "mul %[temp5], %[level], %[temp3] \n\t" \ +"2: \n\t" \ + "sh %[temp5], " #J "(%[ppin]) \n\t" \ + "sh %[level], " #N "(%[pout]) \n\t" + +static int QuantizeBlock_MIPS32(int16_t in[16], int16_t out[16], + const VP8Matrix* const mtx) { + int temp0, temp1, temp2, temp3, temp4, temp5; + int sign, coeff, level, i; + int max_level = MAX_LEVEL; + + int16_t* ppin = &in[0]; + int16_t* pout = &out[0]; + const uint16_t* ppsharpen = &mtx->sharpen_[0]; + const uint32_t* ppzthresh = &mtx->zthresh_[0]; + const uint16_t* ppq = &mtx->q_[0]; + const uint16_t* ppiq = &mtx->iq_[0]; + const uint32_t* ppbias = &mtx->bias_[0]; + + __asm__ volatile( + QUANTIZE_ONE( 0, 0, 0) + QUANTIZE_ONE( 2, 4, 2) + QUANTIZE_ONE( 8, 16, 4) + QUANTIZE_ONE(16, 32, 6) + QUANTIZE_ONE(10, 20, 8) + QUANTIZE_ONE( 4, 8, 10) + QUANTIZE_ONE( 6, 12, 12) + QUANTIZE_ONE(12, 24, 14) + QUANTIZE_ONE(18, 36, 16) + QUANTIZE_ONE(24, 48, 18) + QUANTIZE_ONE(26, 52, 20) + QUANTIZE_ONE(20, 40, 22) + QUANTIZE_ONE(14, 28, 24) + QUANTIZE_ONE(22, 44, 26) + QUANTIZE_ONE(28, 56, 28) + QUANTIZE_ONE(30, 60, 30) + + : [temp0]"=&r"(temp0), [temp1]"=&r"(temp1), + [temp2]"=&r"(temp2), [temp3]"=&r"(temp3), + [temp4]"=&r"(temp4), [temp5]"=&r"(temp5), + [sign]"=&r"(sign), [coeff]"=&r"(coeff), + [level]"=&r"(level) + : [pout]"r"(pout), [ppin]"r"(ppin), + [ppiq]"r"(ppiq), [max_level]"r"(max_level), + [ppbias]"r"(ppbias), [ppzthresh]"r"(ppzthresh), + [ppsharpen]"r"(ppsharpen), [ppq]"r"(ppq) + : "memory", "hi", "lo" + ); + + // moved out from macro to increase possibility for earlier breaking + for (i = 15; i >= 0; i--) { + if (out[i]) return 1; + } + return 0; +} + +static int Quantize2Blocks_MIPS32(int16_t in[32], int16_t out[32], + const VP8Matrix* const mtx) { + int nz; + nz = QuantizeBlock_MIPS32(in + 0 * 16, out + 0 * 16, mtx) << 0; + nz |= QuantizeBlock_MIPS32(in + 1 * 16, out + 1 * 16, mtx) << 1; + return nz; +} + +#undef QUANTIZE_ONE + +// macro for one horizontal pass in Disto4x4 (TTransform) +// two calls of function TTransform are merged into single one +// A - offset in bytes to load from a and b buffers +// E..H - offsets in bytes to store first results to tmp buffer +// E1..H1 - offsets in bytes to store second results to tmp buffer +#define HORIZONTAL_PASS(A, E, F, G, H, E1, F1, G1, H1) \ + "lbu %[temp0], 0+" XSTR(BPS) "*" #A "(%[a]) \n\t" \ + "lbu %[temp1], 1+" XSTR(BPS) "*" #A "(%[a]) \n\t" \ + "lbu %[temp2], 2+" XSTR(BPS) "*" #A "(%[a]) \n\t" \ + "lbu %[temp3], 3+" XSTR(BPS) "*" #A "(%[a]) \n\t" \ + "lbu %[temp4], 0+" XSTR(BPS) "*" #A "(%[b]) \n\t" \ + "lbu %[temp5], 1+" XSTR(BPS) "*" #A "(%[b]) \n\t" \ + "lbu %[temp6], 2+" XSTR(BPS) "*" #A "(%[b]) \n\t" \ + "lbu %[temp7], 3+" XSTR(BPS) "*" #A "(%[b]) \n\t" \ + "addu %[temp8], %[temp0], %[temp2] \n\t" \ + "subu %[temp0], %[temp0], %[temp2] \n\t" \ + "addu %[temp2], %[temp1], %[temp3] \n\t" \ + "subu %[temp1], %[temp1], %[temp3] \n\t" \ + "addu %[temp3], %[temp4], %[temp6] \n\t" \ + "subu %[temp4], %[temp4], %[temp6] \n\t" \ + "addu %[temp6], %[temp5], %[temp7] \n\t" \ + "subu %[temp5], %[temp5], %[temp7] \n\t" \ + "addu %[temp7], %[temp8], %[temp2] \n\t" \ + "subu %[temp2], %[temp8], %[temp2] \n\t" \ + "addu %[temp8], %[temp0], %[temp1] \n\t" \ + "subu %[temp0], %[temp0], %[temp1] \n\t" \ + "addu %[temp1], %[temp3], %[temp6] \n\t" \ + "subu %[temp3], %[temp3], %[temp6] \n\t" \ + "addu %[temp6], %[temp4], %[temp5] \n\t" \ + "subu %[temp4], %[temp4], %[temp5] \n\t" \ + "sw %[temp7], " #E "(%[tmp]) \n\t" \ + "sw %[temp2], " #H "(%[tmp]) \n\t" \ + "sw %[temp8], " #F "(%[tmp]) \n\t" \ + "sw %[temp0], " #G "(%[tmp]) \n\t" \ + "sw %[temp1], " #E1 "(%[tmp]) \n\t" \ + "sw %[temp3], " #H1 "(%[tmp]) \n\t" \ + "sw %[temp6], " #F1 "(%[tmp]) \n\t" \ + "sw %[temp4], " #G1 "(%[tmp]) \n\t" + +// macro for one vertical pass in Disto4x4 (TTransform) +// two calls of function TTransform are merged into single one +// since only one accu is available in mips32r1 instruction set +// first is done second call of function TTransform and after +// that first one. +// const int sum1 = TTransform(a, w); +// const int sum2 = TTransform(b, w); +// return abs(sum2 - sum1) >> 5; +// (sum2 - sum1) is calculated with madds (sub2) and msubs (sub1) +// A..D - offsets in bytes to load first results from tmp buffer +// A1..D1 - offsets in bytes to load second results from tmp buffer +// E..H - offsets in bytes to load from w buffer +#define VERTICAL_PASS(A, B, C, D, A1, B1, C1, D1, E, F, G, H) \ + "lw %[temp0], " #A1 "(%[tmp]) \n\t" \ + "lw %[temp1], " #C1 "(%[tmp]) \n\t" \ + "lw %[temp2], " #B1 "(%[tmp]) \n\t" \ + "lw %[temp3], " #D1 "(%[tmp]) \n\t" \ + "addu %[temp8], %[temp0], %[temp1] \n\t" \ + "subu %[temp0], %[temp0], %[temp1] \n\t" \ + "addu %[temp1], %[temp2], %[temp3] \n\t" \ + "subu %[temp2], %[temp2], %[temp3] \n\t" \ + "addu %[temp3], %[temp8], %[temp1] \n\t" \ + "subu %[temp8], %[temp8], %[temp1] \n\t" \ + "addu %[temp1], %[temp0], %[temp2] \n\t" \ + "subu %[temp0], %[temp0], %[temp2] \n\t" \ + "sra %[temp4], %[temp3], 31 \n\t" \ + "sra %[temp5], %[temp1], 31 \n\t" \ + "sra %[temp6], %[temp0], 31 \n\t" \ + "sra %[temp7], %[temp8], 31 \n\t" \ + "xor %[temp3], %[temp3], %[temp4] \n\t" \ + "xor %[temp1], %[temp1], %[temp5] \n\t" \ + "xor %[temp0], %[temp0], %[temp6] \n\t" \ + "xor %[temp8], %[temp8], %[temp7] \n\t" \ + "subu %[temp3], %[temp3], %[temp4] \n\t" \ + "subu %[temp1], %[temp1], %[temp5] \n\t" \ + "subu %[temp0], %[temp0], %[temp6] \n\t" \ + "subu %[temp8], %[temp8], %[temp7] \n\t" \ + "lhu %[temp4], " #E "(%[w]) \n\t" \ + "lhu %[temp5], " #F "(%[w]) \n\t" \ + "lhu %[temp6], " #G "(%[w]) \n\t" \ + "lhu %[temp7], " #H "(%[w]) \n\t" \ + "madd %[temp4], %[temp3] \n\t" \ + "madd %[temp5], %[temp1] \n\t" \ + "madd %[temp6], %[temp0] \n\t" \ + "madd %[temp7], %[temp8] \n\t" \ + "lw %[temp0], " #A "(%[tmp]) \n\t" \ + "lw %[temp1], " #C "(%[tmp]) \n\t" \ + "lw %[temp2], " #B "(%[tmp]) \n\t" \ + "lw %[temp3], " #D "(%[tmp]) \n\t" \ + "addu %[temp8], %[temp0], %[temp1] \n\t" \ + "subu %[temp0], %[temp0], %[temp1] \n\t" \ + "addu %[temp1], %[temp2], %[temp3] \n\t" \ + "subu %[temp2], %[temp2], %[temp3] \n\t" \ + "addu %[temp3], %[temp8], %[temp1] \n\t" \ + "subu %[temp1], %[temp8], %[temp1] \n\t" \ + "addu %[temp8], %[temp0], %[temp2] \n\t" \ + "subu %[temp0], %[temp0], %[temp2] \n\t" \ + "sra %[temp2], %[temp3], 31 \n\t" \ + "xor %[temp3], %[temp3], %[temp2] \n\t" \ + "subu %[temp3], %[temp3], %[temp2] \n\t" \ + "msub %[temp4], %[temp3] \n\t" \ + "sra %[temp2], %[temp8], 31 \n\t" \ + "sra %[temp3], %[temp0], 31 \n\t" \ + "sra %[temp4], %[temp1], 31 \n\t" \ + "xor %[temp8], %[temp8], %[temp2] \n\t" \ + "xor %[temp0], %[temp0], %[temp3] \n\t" \ + "xor %[temp1], %[temp1], %[temp4] \n\t" \ + "subu %[temp8], %[temp8], %[temp2] \n\t" \ + "subu %[temp0], %[temp0], %[temp3] \n\t" \ + "subu %[temp1], %[temp1], %[temp4] \n\t" \ + "msub %[temp5], %[temp8] \n\t" \ + "msub %[temp6], %[temp0] \n\t" \ + "msub %[temp7], %[temp1] \n\t" + +static int Disto4x4_MIPS32(const uint8_t* const a, const uint8_t* const b, + const uint16_t* const w) { + int tmp[32]; + int temp0, temp1, temp2, temp3, temp4, temp5, temp6, temp7, temp8; + + __asm__ volatile( + HORIZONTAL_PASS(0, 0, 4, 8, 12, 64, 68, 72, 76) + HORIZONTAL_PASS(1, 16, 20, 24, 28, 80, 84, 88, 92) + HORIZONTAL_PASS(2, 32, 36, 40, 44, 96, 100, 104, 108) + HORIZONTAL_PASS(3, 48, 52, 56, 60, 112, 116, 120, 124) + "mthi $zero \n\t" + "mtlo $zero \n\t" + VERTICAL_PASS( 0, 16, 32, 48, 64, 80, 96, 112, 0, 8, 16, 24) + VERTICAL_PASS( 4, 20, 36, 52, 68, 84, 100, 116, 2, 10, 18, 26) + VERTICAL_PASS( 8, 24, 40, 56, 72, 88, 104, 120, 4, 12, 20, 28) + VERTICAL_PASS(12, 28, 44, 60, 76, 92, 108, 124, 6, 14, 22, 30) + "mflo %[temp0] \n\t" + "sra %[temp1], %[temp0], 31 \n\t" + "xor %[temp0], %[temp0], %[temp1] \n\t" + "subu %[temp0], %[temp0], %[temp1] \n\t" + "sra %[temp0], %[temp0], 5 \n\t" + + : [temp0]"=&r"(temp0), [temp1]"=&r"(temp1), [temp2]"=&r"(temp2), + [temp3]"=&r"(temp3), [temp4]"=&r"(temp4), [temp5]"=&r"(temp5), + [temp6]"=&r"(temp6), [temp7]"=&r"(temp7), [temp8]"=&r"(temp8) + : [a]"r"(a), [b]"r"(b), [w]"r"(w), [tmp]"r"(tmp) + : "memory", "hi", "lo" + ); + + return temp0; +} + +#undef VERTICAL_PASS +#undef HORIZONTAL_PASS + +static int Disto16x16_MIPS32(const uint8_t* const a, const uint8_t* const b, + const uint16_t* const w) { + int D = 0; + int x, y; + for (y = 0; y < 16 * BPS; y += 4 * BPS) { + for (x = 0; x < 16; x += 4) { + D += Disto4x4_MIPS32(a + x + y, b + x + y, w); + } + } + return D; +} + +// macro for one horizontal pass in FTransform +// temp0..temp15 holds tmp[0]..tmp[15] +// A - offset in bytes to load from src and ref buffers +// TEMP0..TEMP3 - registers for corresponding tmp elements +#define HORIZONTAL_PASS(A, TEMP0, TEMP1, TEMP2, TEMP3) \ + "lw %[" #TEMP1 "], 0(%[args]) \n\t" \ + "lw %[" #TEMP2 "], 4(%[args]) \n\t" \ + "lbu %[temp16], 0+" XSTR(BPS) "*" #A "(%[" #TEMP1 "]) \n\t" \ + "lbu %[temp17], 0+" XSTR(BPS) "*" #A "(%[" #TEMP2 "]) \n\t" \ + "lbu %[temp18], 1+" XSTR(BPS) "*" #A "(%[" #TEMP1 "]) \n\t" \ + "lbu %[temp19], 1+" XSTR(BPS) "*" #A "(%[" #TEMP2 "]) \n\t" \ + "subu %[temp20], %[temp16], %[temp17] \n\t" \ + "lbu %[temp16], 2+" XSTR(BPS) "*" #A "(%[" #TEMP1 "]) \n\t" \ + "lbu %[temp17], 2+" XSTR(BPS) "*" #A "(%[" #TEMP2 "]) \n\t" \ + "subu %[" #TEMP0 "], %[temp18], %[temp19] \n\t" \ + "lbu %[temp18], 3+" XSTR(BPS) "*" #A "(%[" #TEMP1 "]) \n\t" \ + "lbu %[temp19], 3+" XSTR(BPS) "*" #A "(%[" #TEMP2 "]) \n\t" \ + "subu %[" #TEMP1 "], %[temp16], %[temp17] \n\t" \ + "subu %[" #TEMP2 "], %[temp18], %[temp19] \n\t" \ + "addu %[" #TEMP3 "], %[temp20], %[" #TEMP2 "] \n\t" \ + "subu %[" #TEMP2 "], %[temp20], %[" #TEMP2 "] \n\t" \ + "addu %[temp20], %[" #TEMP0 "], %[" #TEMP1 "] \n\t" \ + "subu %[" #TEMP0 "], %[" #TEMP0 "], %[" #TEMP1 "] \n\t" \ + "mul %[temp16], %[" #TEMP2 "], %[c5352] \n\t" \ + "mul %[temp17], %[" #TEMP2 "], %[c2217] \n\t" \ + "mul %[temp18], %[" #TEMP0 "], %[c5352] \n\t" \ + "mul %[temp19], %[" #TEMP0 "], %[c2217] \n\t" \ + "addu %[" #TEMP1 "], %[" #TEMP3 "], %[temp20] \n\t" \ + "subu %[temp20], %[" #TEMP3 "], %[temp20] \n\t" \ + "sll %[" #TEMP0 "], %[" #TEMP1 "], 3 \n\t" \ + "sll %[" #TEMP2 "], %[temp20], 3 \n\t" \ + "addiu %[temp16], %[temp16], 1812 \n\t" \ + "addiu %[temp17], %[temp17], 937 \n\t" \ + "addu %[temp16], %[temp16], %[temp19] \n\t" \ + "subu %[temp17], %[temp17], %[temp18] \n\t" \ + "sra %[" #TEMP1 "], %[temp16], 9 \n\t" \ + "sra %[" #TEMP3 "], %[temp17], 9 \n\t" + +// macro for one vertical pass in FTransform +// temp0..temp15 holds tmp[0]..tmp[15] +// A..D - offsets in bytes to store to out buffer +// TEMP0, TEMP4, TEMP8 and TEMP12 - registers for corresponding tmp elements +#define VERTICAL_PASS(A, B, C, D, TEMP0, TEMP4, TEMP8, TEMP12) \ + "addu %[temp16], %[" #TEMP0 "], %[" #TEMP12 "] \n\t" \ + "subu %[temp19], %[" #TEMP0 "], %[" #TEMP12 "] \n\t" \ + "addu %[temp17], %[" #TEMP4 "], %[" #TEMP8 "] \n\t" \ + "subu %[temp18], %[" #TEMP4 "], %[" #TEMP8 "] \n\t" \ + "mul %[" #TEMP8 "], %[temp19], %[c2217] \n\t" \ + "mul %[" #TEMP12 "], %[temp18], %[c2217] \n\t" \ + "mul %[" #TEMP4 "], %[temp19], %[c5352] \n\t" \ + "mul %[temp18], %[temp18], %[c5352] \n\t" \ + "addiu %[temp16], %[temp16], 7 \n\t" \ + "addu %[" #TEMP0 "], %[temp16], %[temp17] \n\t" \ + "sra %[" #TEMP0 "], %[" #TEMP0 "], 4 \n\t" \ + "addu %[" #TEMP12 "], %[" #TEMP12 "], %[" #TEMP4 "] \n\t" \ + "subu %[" #TEMP4 "], %[temp16], %[temp17] \n\t" \ + "sra %[" #TEMP4 "], %[" #TEMP4 "], 4 \n\t" \ + "addiu %[" #TEMP8 "], %[" #TEMP8 "], 30000 \n\t" \ + "addiu %[" #TEMP12 "], %[" #TEMP12 "], 12000 \n\t" \ + "addiu %[" #TEMP8 "], %[" #TEMP8 "], 21000 \n\t" \ + "subu %[" #TEMP8 "], %[" #TEMP8 "], %[temp18] \n\t" \ + "sra %[" #TEMP12 "], %[" #TEMP12 "], 16 \n\t" \ + "sra %[" #TEMP8 "], %[" #TEMP8 "], 16 \n\t" \ + "addiu %[temp16], %[" #TEMP12 "], 1 \n\t" \ + "movn %[" #TEMP12 "], %[temp16], %[temp19] \n\t" \ + "sh %[" #TEMP0 "], " #A "(%[temp20]) \n\t" \ + "sh %[" #TEMP4 "], " #C "(%[temp20]) \n\t" \ + "sh %[" #TEMP8 "], " #D "(%[temp20]) \n\t" \ + "sh %[" #TEMP12 "], " #B "(%[temp20]) \n\t" + +static void FTransform_MIPS32(const uint8_t* src, const uint8_t* ref, + int16_t* out) { + int temp0, temp1, temp2, temp3, temp4, temp5, temp6, temp7, temp8; + int temp9, temp10, temp11, temp12, temp13, temp14, temp15, temp16; + int temp17, temp18, temp19, temp20; + const int c2217 = 2217; + const int c5352 = 5352; + const int* const args[3] = + { (const int*)src, (const int*)ref, (const int*)out }; + + __asm__ volatile( + HORIZONTAL_PASS(0, temp0, temp1, temp2, temp3) + HORIZONTAL_PASS(1, temp4, temp5, temp6, temp7) + HORIZONTAL_PASS(2, temp8, temp9, temp10, temp11) + HORIZONTAL_PASS(3, temp12, temp13, temp14, temp15) + "lw %[temp20], 8(%[args]) \n\t" + VERTICAL_PASS(0, 8, 16, 24, temp0, temp4, temp8, temp12) + VERTICAL_PASS(2, 10, 18, 26, temp1, temp5, temp9, temp13) + VERTICAL_PASS(4, 12, 20, 28, temp2, temp6, temp10, temp14) + VERTICAL_PASS(6, 14, 22, 30, temp3, temp7, temp11, temp15) + + : [temp0]"=&r"(temp0), [temp1]"=&r"(temp1), [temp2]"=&r"(temp2), + [temp3]"=&r"(temp3), [temp4]"=&r"(temp4), [temp5]"=&r"(temp5), + [temp6]"=&r"(temp6), [temp7]"=&r"(temp7), [temp8]"=&r"(temp8), + [temp9]"=&r"(temp9), [temp10]"=&r"(temp10), [temp11]"=&r"(temp11), + [temp12]"=&r"(temp12), [temp13]"=&r"(temp13), [temp14]"=&r"(temp14), + [temp15]"=&r"(temp15), [temp16]"=&r"(temp16), [temp17]"=&r"(temp17), + [temp18]"=&r"(temp18), [temp19]"=&r"(temp19), [temp20]"=&r"(temp20) + : [args]"r"(args), [c2217]"r"(c2217), [c5352]"r"(c5352) + : "memory", "hi", "lo" + ); +} + +#undef VERTICAL_PASS +#undef HORIZONTAL_PASS + +#if !defined(WORK_AROUND_GCC) + +#define GET_SSE_INNER(A, B, C, D) \ + "lbu %[temp0], " #A "(%[a]) \n\t" \ + "lbu %[temp1], " #A "(%[b]) \n\t" \ + "lbu %[temp2], " #B "(%[a]) \n\t" \ + "lbu %[temp3], " #B "(%[b]) \n\t" \ + "lbu %[temp4], " #C "(%[a]) \n\t" \ + "lbu %[temp5], " #C "(%[b]) \n\t" \ + "lbu %[temp6], " #D "(%[a]) \n\t" \ + "lbu %[temp7], " #D "(%[b]) \n\t" \ + "subu %[temp0], %[temp0], %[temp1] \n\t" \ + "subu %[temp2], %[temp2], %[temp3] \n\t" \ + "subu %[temp4], %[temp4], %[temp5] \n\t" \ + "subu %[temp6], %[temp6], %[temp7] \n\t" \ + "madd %[temp0], %[temp0] \n\t" \ + "madd %[temp2], %[temp2] \n\t" \ + "madd %[temp4], %[temp4] \n\t" \ + "madd %[temp6], %[temp6] \n\t" + +#define GET_SSE(A, B, C, D) \ + GET_SSE_INNER(A, A + 1, A + 2, A + 3) \ + GET_SSE_INNER(B, B + 1, B + 2, B + 3) \ + GET_SSE_INNER(C, C + 1, C + 2, C + 3) \ + GET_SSE_INNER(D, D + 1, D + 2, D + 3) + +static int SSE16x16_MIPS32(const uint8_t* a, const uint8_t* b) { + int count; + int temp0, temp1, temp2, temp3, temp4, temp5, temp6, temp7; + + __asm__ volatile( + "mult $zero, $zero \n\t" + + GET_SSE( 0 * BPS, 4 + 0 * BPS, 8 + 0 * BPS, 12 + 0 * BPS) + GET_SSE( 1 * BPS, 4 + 1 * BPS, 8 + 1 * BPS, 12 + 1 * BPS) + GET_SSE( 2 * BPS, 4 + 2 * BPS, 8 + 2 * BPS, 12 + 2 * BPS) + GET_SSE( 3 * BPS, 4 + 3 * BPS, 8 + 3 * BPS, 12 + 3 * BPS) + GET_SSE( 4 * BPS, 4 + 4 * BPS, 8 + 4 * BPS, 12 + 4 * BPS) + GET_SSE( 5 * BPS, 4 + 5 * BPS, 8 + 5 * BPS, 12 + 5 * BPS) + GET_SSE( 6 * BPS, 4 + 6 * BPS, 8 + 6 * BPS, 12 + 6 * BPS) + GET_SSE( 7 * BPS, 4 + 7 * BPS, 8 + 7 * BPS, 12 + 7 * BPS) + GET_SSE( 8 * BPS, 4 + 8 * BPS, 8 + 8 * BPS, 12 + 8 * BPS) + GET_SSE( 9 * BPS, 4 + 9 * BPS, 8 + 9 * BPS, 12 + 9 * BPS) + GET_SSE(10 * BPS, 4 + 10 * BPS, 8 + 10 * BPS, 12 + 10 * BPS) + GET_SSE(11 * BPS, 4 + 11 * BPS, 8 + 11 * BPS, 12 + 11 * BPS) + GET_SSE(12 * BPS, 4 + 12 * BPS, 8 + 12 * BPS, 12 + 12 * BPS) + GET_SSE(13 * BPS, 4 + 13 * BPS, 8 + 13 * BPS, 12 + 13 * BPS) + GET_SSE(14 * BPS, 4 + 14 * BPS, 8 + 14 * BPS, 12 + 14 * BPS) + GET_SSE(15 * BPS, 4 + 15 * BPS, 8 + 15 * BPS, 12 + 15 * BPS) + + "mflo %[count] \n\t" + : [temp0]"=&r"(temp0), [temp1]"=&r"(temp1), [temp2]"=&r"(temp2), + [temp3]"=&r"(temp3), [temp4]"=&r"(temp4), [temp5]"=&r"(temp5), + [temp6]"=&r"(temp6), [temp7]"=&r"(temp7), [count]"=&r"(count) + : [a]"r"(a), [b]"r"(b) + : "memory", "hi", "lo" + ); + return count; +} + +static int SSE16x8_MIPS32(const uint8_t* a, const uint8_t* b) { + int count; + int temp0, temp1, temp2, temp3, temp4, temp5, temp6, temp7; + + __asm__ volatile( + "mult $zero, $zero \n\t" + + GET_SSE( 0 * BPS, 4 + 0 * BPS, 8 + 0 * BPS, 12 + 0 * BPS) + GET_SSE( 1 * BPS, 4 + 1 * BPS, 8 + 1 * BPS, 12 + 1 * BPS) + GET_SSE( 2 * BPS, 4 + 2 * BPS, 8 + 2 * BPS, 12 + 2 * BPS) + GET_SSE( 3 * BPS, 4 + 3 * BPS, 8 + 3 * BPS, 12 + 3 * BPS) + GET_SSE( 4 * BPS, 4 + 4 * BPS, 8 + 4 * BPS, 12 + 4 * BPS) + GET_SSE( 5 * BPS, 4 + 5 * BPS, 8 + 5 * BPS, 12 + 5 * BPS) + GET_SSE( 6 * BPS, 4 + 6 * BPS, 8 + 6 * BPS, 12 + 6 * BPS) + GET_SSE( 7 * BPS, 4 + 7 * BPS, 8 + 7 * BPS, 12 + 7 * BPS) + + "mflo %[count] \n\t" + : [temp0]"=&r"(temp0), [temp1]"=&r"(temp1), [temp2]"=&r"(temp2), + [temp3]"=&r"(temp3), [temp4]"=&r"(temp4), [temp5]"=&r"(temp5), + [temp6]"=&r"(temp6), [temp7]"=&r"(temp7), [count]"=&r"(count) + : [a]"r"(a), [b]"r"(b) + : "memory", "hi", "lo" + ); + return count; +} + +static int SSE8x8_MIPS32(const uint8_t* a, const uint8_t* b) { + int count; + int temp0, temp1, temp2, temp3, temp4, temp5, temp6, temp7; + + __asm__ volatile( + "mult $zero, $zero \n\t" + + GET_SSE(0 * BPS, 4 + 0 * BPS, 1 * BPS, 4 + 1 * BPS) + GET_SSE(2 * BPS, 4 + 2 * BPS, 3 * BPS, 4 + 3 * BPS) + GET_SSE(4 * BPS, 4 + 4 * BPS, 5 * BPS, 4 + 5 * BPS) + GET_SSE(6 * BPS, 4 + 6 * BPS, 7 * BPS, 4 + 7 * BPS) + + "mflo %[count] \n\t" + : [temp0]"=&r"(temp0), [temp1]"=&r"(temp1), [temp2]"=&r"(temp2), + [temp3]"=&r"(temp3), [temp4]"=&r"(temp4), [temp5]"=&r"(temp5), + [temp6]"=&r"(temp6), [temp7]"=&r"(temp7), [count]"=&r"(count) + : [a]"r"(a), [b]"r"(b) + : "memory", "hi", "lo" + ); + return count; +} + +static int SSE4x4_MIPS32(const uint8_t* a, const uint8_t* b) { + int count; + int temp0, temp1, temp2, temp3, temp4, temp5, temp6, temp7; + + __asm__ volatile( + "mult $zero, $zero \n\t" + + GET_SSE(0 * BPS, 1 * BPS, 2 * BPS, 3 * BPS) + + "mflo %[count] \n\t" + : [temp0]"=&r"(temp0), [temp1]"=&r"(temp1), [temp2]"=&r"(temp2), + [temp3]"=&r"(temp3), [temp4]"=&r"(temp4), [temp5]"=&r"(temp5), + [temp6]"=&r"(temp6), [temp7]"=&r"(temp7), [count]"=&r"(count) + : [a]"r"(a), [b]"r"(b) + : "memory", "hi", "lo" + ); + return count; +} + +#undef GET_SSE +#undef GET_SSE_INNER + +#endif // !WORK_AROUND_GCC + +//------------------------------------------------------------------------------ +// Entry point + +extern void VP8EncDspInitMIPS32(void); + +WEBP_TSAN_IGNORE_FUNCTION void VP8EncDspInitMIPS32(void) { + VP8ITransform = ITransform_MIPS32; + VP8FTransform = FTransform_MIPS32; + + VP8EncQuantizeBlock = QuantizeBlock_MIPS32; + VP8EncQuantize2Blocks = Quantize2Blocks_MIPS32; + + VP8TDisto4x4 = Disto4x4_MIPS32; + VP8TDisto16x16 = Disto16x16_MIPS32; + +#if !defined(WORK_AROUND_GCC) + VP8SSE16x16 = SSE16x16_MIPS32; + VP8SSE8x8 = SSE8x8_MIPS32; + VP8SSE16x8 = SSE16x8_MIPS32; + VP8SSE4x4 = SSE4x4_MIPS32; +#endif +} + +#else // !WEBP_USE_MIPS32 + +WEBP_DSP_INIT_STUB(VP8EncDspInitMIPS32) + +#endif // WEBP_USE_MIPS32 diff --git a/vendor/github.com/sizeofint/webpanimation/dsp_enc_mips_dsp_r2.c b/vendor/github.com/sizeofint/webpanimation/dsp_enc_mips_dsp_r2.c new file mode 100644 index 00000000..04bbbf08 --- /dev/null +++ b/vendor/github.com/sizeofint/webpanimation/dsp_enc_mips_dsp_r2.c @@ -0,0 +1,1517 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the COPYING file in the root of the source +// tree. An additional intellectual property rights grant can be found +// in the file PATENTS. All contributing project authors may +// be found in the AUTHORS file in the root of the source tree. +// ----------------------------------------------------------------------------- +// +// MIPS version of speed-critical encoding functions. +// +// Author(s): Darko Laus (darko.laus@imgtec.com) +// Mirko Raus (mirko.raus@imgtec.com) + +#include "dsp_dsp.h" + +#if defined(WEBP_USE_MIPS_DSP_R2) + +#include "dsp_mips_macro.h" +#include "enc_cost_enc.h" +#include "enc_vp8i_enc.h" + +static const int kC1 = 20091 + (1 << 16); +static const int kC2 = 35468; + +// O - output +// I - input (macro doesn't change it) +#define ADD_SUB_HALVES_X4(O0, O1, O2, O3, O4, O5, O6, O7, \ + I0, I1, I2, I3, I4, I5, I6, I7) \ + "addq.ph %[" #O0 "], %[" #I0 "], %[" #I1 "] \n\t" \ + "subq.ph %[" #O1 "], %[" #I0 "], %[" #I1 "] \n\t" \ + "addq.ph %[" #O2 "], %[" #I2 "], %[" #I3 "] \n\t" \ + "subq.ph %[" #O3 "], %[" #I2 "], %[" #I3 "] \n\t" \ + "addq.ph %[" #O4 "], %[" #I4 "], %[" #I5 "] \n\t" \ + "subq.ph %[" #O5 "], %[" #I4 "], %[" #I5 "] \n\t" \ + "addq.ph %[" #O6 "], %[" #I6 "], %[" #I7 "] \n\t" \ + "subq.ph %[" #O7 "], %[" #I6 "], %[" #I7 "] \n\t" + +// IO - input/output +#define ABS_X8(IO0, IO1, IO2, IO3, IO4, IO5, IO6, IO7) \ + "absq_s.ph %[" #IO0 "], %[" #IO0 "] \n\t" \ + "absq_s.ph %[" #IO1 "], %[" #IO1 "] \n\t" \ + "absq_s.ph %[" #IO2 "], %[" #IO2 "] \n\t" \ + "absq_s.ph %[" #IO3 "], %[" #IO3 "] \n\t" \ + "absq_s.ph %[" #IO4 "], %[" #IO4 "] \n\t" \ + "absq_s.ph %[" #IO5 "], %[" #IO5 "] \n\t" \ + "absq_s.ph %[" #IO6 "], %[" #IO6 "] \n\t" \ + "absq_s.ph %[" #IO7 "], %[" #IO7 "] \n\t" + +// dpa.w.ph $ac0 temp0 ,temp1 +// $ac += temp0[31..16] * temp1[31..16] + temp0[15..0] * temp1[15..0] +// dpax.w.ph $ac0 temp0 ,temp1 +// $ac += temp0[31..16] * temp1[15..0] + temp0[15..0] * temp1[31..16] +// O - output +// I - input (macro doesn't change it) +#define MUL_HALF(O0, I0, I1, I2, I3, I4, I5, I6, I7, \ + I8, I9, I10, I11, I12, I13, I14, I15) \ + "mult $ac0, $zero, $zero \n\t" \ + "dpa.w.ph $ac0, %[" #I2 "], %[" #I0 "] \n\t" \ + "dpax.w.ph $ac0, %[" #I5 "], %[" #I6 "] \n\t" \ + "dpa.w.ph $ac0, %[" #I8 "], %[" #I9 "] \n\t" \ + "dpax.w.ph $ac0, %[" #I11 "], %[" #I4 "] \n\t" \ + "dpa.w.ph $ac0, %[" #I12 "], %[" #I7 "] \n\t" \ + "dpax.w.ph $ac0, %[" #I13 "], %[" #I1 "] \n\t" \ + "dpa.w.ph $ac0, %[" #I14 "], %[" #I3 "] \n\t" \ + "dpax.w.ph $ac0, %[" #I15 "], %[" #I10 "] \n\t" \ + "mflo %[" #O0 "], $ac0 \n\t" + +#define OUTPUT_EARLY_CLOBBER_REGS_17() \ + OUTPUT_EARLY_CLOBBER_REGS_10(), \ + [temp11]"=&r"(temp11), [temp12]"=&r"(temp12), [temp13]"=&r"(temp13), \ + [temp14]"=&r"(temp14), [temp15]"=&r"(temp15), [temp16]"=&r"(temp16), \ + [temp17]"=&r"(temp17) + +// macro for one horizontal pass in FTransform +// temp0..temp15 holds tmp[0]..tmp[15] +// A - offset in bytes to load from src and ref buffers +// TEMP0..TEMP3 - registers for corresponding tmp elements +#define HORIZONTAL_PASS(A, TEMP0, TEMP1, TEMP2, TEMP3) \ + "lw %[" #TEMP0 "], 0(%[args]) \n\t" \ + "lw %[" #TEMP1 "], 4(%[args]) \n\t" \ + "lw %[" #TEMP2 "], " XSTR(BPS) "*" #A "(%[" #TEMP0 "]) \n\t" \ + "lw %[" #TEMP3 "], " XSTR(BPS) "*" #A "(%[" #TEMP1 "]) \n\t" \ + "preceu.ph.qbl %[" #TEMP0 "], %[" #TEMP2 "] \n\t" \ + "preceu.ph.qbl %[" #TEMP1 "], %[" #TEMP3 "] \n\t" \ + "preceu.ph.qbr %[" #TEMP2 "], %[" #TEMP2 "] \n\t" \ + "preceu.ph.qbr %[" #TEMP3 "], %[" #TEMP3 "] \n\t" \ + "subq.ph %[" #TEMP0 "], %[" #TEMP0 "], %[" #TEMP1 "] \n\t" \ + "subq.ph %[" #TEMP2 "], %[" #TEMP2 "], %[" #TEMP3 "] \n\t" \ + "rotr %[" #TEMP0 "], %[" #TEMP0 "], 16 \n\t" \ + "addq.ph %[" #TEMP1 "], %[" #TEMP2 "], %[" #TEMP0 "] \n\t" \ + "subq.ph %[" #TEMP3 "], %[" #TEMP2 "], %[" #TEMP0 "] \n\t" \ + "seh %[" #TEMP0 "], %[" #TEMP1 "] \n\t" \ + "sra %[temp16], %[" #TEMP1 "], 16 \n\t" \ + "seh %[temp19], %[" #TEMP3 "] \n\t" \ + "sra %[" #TEMP3 "], %[" #TEMP3 "], 16 \n\t" \ + "subu %[" #TEMP2 "], %[" #TEMP0 "], %[temp16] \n\t" \ + "addu %[" #TEMP0 "], %[" #TEMP0 "], %[temp16] \n\t" \ + "mul %[temp17], %[temp19], %[c2217] \n\t" \ + "mul %[temp18], %[" #TEMP3 "], %[c5352] \n\t" \ + "mul %[" #TEMP1 "], %[temp19], %[c5352] \n\t" \ + "mul %[temp16], %[" #TEMP3 "], %[c2217] \n\t" \ + "sll %[" #TEMP2 "], %[" #TEMP2 "], 3 \n\t" \ + "sll %[" #TEMP0 "], %[" #TEMP0 "], 3 \n\t" \ + "subu %[" #TEMP3 "], %[temp17], %[temp18] \n\t" \ + "addu %[" #TEMP1 "], %[temp16], %[" #TEMP1 "] \n\t" \ + "addiu %[" #TEMP3 "], %[" #TEMP3 "], 937 \n\t" \ + "addiu %[" #TEMP1 "], %[" #TEMP1 "], 1812 \n\t" \ + "sra %[" #TEMP3 "], %[" #TEMP3 "], 9 \n\t" \ + "sra %[" #TEMP1 "], %[" #TEMP1 "], 9 \n\t" + +// macro for one vertical pass in FTransform +// temp0..temp15 holds tmp[0]..tmp[15] +// A..D - offsets in bytes to store to out buffer +// TEMP0, TEMP4, TEMP8 and TEMP12 - registers for corresponding tmp elements +#define VERTICAL_PASS(A, B, C, D, TEMP0, TEMP4, TEMP8, TEMP12) \ + "addu %[temp16], %[" #TEMP0 "], %[" #TEMP12 "] \n\t" \ + "subu %[temp19], %[" #TEMP0 "], %[" #TEMP12 "] \n\t" \ + "addu %[temp17], %[" #TEMP4 "], %[" #TEMP8 "] \n\t" \ + "subu %[temp18], %[" #TEMP4 "], %[" #TEMP8 "] \n\t" \ + "mul %[" #TEMP8 "], %[temp19], %[c2217] \n\t" \ + "mul %[" #TEMP12 "], %[temp18], %[c2217] \n\t" \ + "mul %[" #TEMP4 "], %[temp19], %[c5352] \n\t" \ + "mul %[temp18], %[temp18], %[c5352] \n\t" \ + "addiu %[temp16], %[temp16], 7 \n\t" \ + "addu %[" #TEMP0 "], %[temp16], %[temp17] \n\t" \ + "sra %[" #TEMP0 "], %[" #TEMP0 "], 4 \n\t" \ + "addu %[" #TEMP12 "], %[" #TEMP12 "], %[" #TEMP4 "] \n\t" \ + "subu %[" #TEMP4 "], %[temp16], %[temp17] \n\t" \ + "sra %[" #TEMP4 "], %[" #TEMP4 "], 4 \n\t" \ + "addiu %[" #TEMP8 "], %[" #TEMP8 "], 30000 \n\t" \ + "addiu %[" #TEMP12 "], %[" #TEMP12 "], 12000 \n\t" \ + "addiu %[" #TEMP8 "], %[" #TEMP8 "], 21000 \n\t" \ + "subu %[" #TEMP8 "], %[" #TEMP8 "], %[temp18] \n\t" \ + "sra %[" #TEMP12 "], %[" #TEMP12 "], 16 \n\t" \ + "sra %[" #TEMP8 "], %[" #TEMP8 "], 16 \n\t" \ + "addiu %[temp16], %[" #TEMP12 "], 1 \n\t" \ + "movn %[" #TEMP12 "], %[temp16], %[temp19] \n\t" \ + "sh %[" #TEMP0 "], " #A "(%[temp20]) \n\t" \ + "sh %[" #TEMP4 "], " #C "(%[temp20]) \n\t" \ + "sh %[" #TEMP8 "], " #D "(%[temp20]) \n\t" \ + "sh %[" #TEMP12 "], " #B "(%[temp20]) \n\t" + +static void FTransform_MIPSdspR2(const uint8_t* src, const uint8_t* ref, + int16_t* out) { + const int c2217 = 2217; + const int c5352 = 5352; + int temp0, temp1, temp2, temp3, temp4, temp5, temp6, temp7, temp8; + int temp9, temp10, temp11, temp12, temp13, temp14, temp15, temp16; + int temp17, temp18, temp19, temp20; + const int* const args[3] = + { (const int*)src, (const int*)ref, (const int*)out }; + + __asm__ volatile ( + HORIZONTAL_PASS(0, temp0, temp1, temp2, temp3) + HORIZONTAL_PASS(1, temp4, temp5, temp6, temp7) + HORIZONTAL_PASS(2, temp8, temp9, temp10, temp11) + HORIZONTAL_PASS(3, temp12, temp13, temp14, temp15) + "lw %[temp20], 8(%[args]) \n\t" + VERTICAL_PASS(0, 8, 16, 24, temp0, temp4, temp8, temp12) + VERTICAL_PASS(2, 10, 18, 26, temp1, temp5, temp9, temp13) + VERTICAL_PASS(4, 12, 20, 28, temp2, temp6, temp10, temp14) + VERTICAL_PASS(6, 14, 22, 30, temp3, temp7, temp11, temp15) + OUTPUT_EARLY_CLOBBER_REGS_18(), + [temp0]"=&r"(temp0), [temp19]"=&r"(temp19), [temp20]"=&r"(temp20) + : [args]"r"(args), [c2217]"r"(c2217), [c5352]"r"(c5352) + : "memory", "hi", "lo" + ); +} + +#undef VERTICAL_PASS +#undef HORIZONTAL_PASS + +static WEBP_INLINE void ITransformOne(const uint8_t* ref, const int16_t* in, + uint8_t* dst) { + int temp1, temp2, temp3, temp4, temp5, temp6, temp7, temp8, temp9; + int temp10, temp11, temp12, temp13, temp14, temp15, temp16, temp17, temp18; + + __asm__ volatile ( + "ulw %[temp1], 0(%[in]) \n\t" + "ulw %[temp2], 16(%[in]) \n\t" + LOAD_IN_X2(temp5, temp6, 24, 26) + ADD_SUB_HALVES(temp3, temp4, temp1, temp2) + LOAD_IN_X2(temp1, temp2, 8, 10) + MUL_SHIFT_SUM(temp7, temp8, temp9, temp10, temp11, temp12, temp13, temp14, + temp10, temp8, temp9, temp7, temp1, temp2, temp5, temp6, + temp13, temp11, temp14, temp12) + INSERT_HALF_X2(temp8, temp7, temp10, temp9) + "ulw %[temp17], 4(%[in]) \n\t" + "ulw %[temp18], 20(%[in]) \n\t" + ADD_SUB_HALVES(temp1, temp2, temp3, temp8) + ADD_SUB_HALVES(temp5, temp6, temp4, temp7) + ADD_SUB_HALVES(temp7, temp8, temp17, temp18) + LOAD_IN_X2(temp17, temp18, 12, 14) + LOAD_IN_X2(temp9, temp10, 28, 30) + MUL_SHIFT_SUM(temp11, temp12, temp13, temp14, temp15, temp16, temp4, temp17, + temp12, temp14, temp11, temp13, temp17, temp18, temp9, temp10, + temp15, temp4, temp16, temp17) + INSERT_HALF_X2(temp11, temp12, temp13, temp14) + ADD_SUB_HALVES(temp17, temp8, temp8, temp11) + ADD_SUB_HALVES(temp3, temp4, temp7, temp12) + + // horizontal + SRA_16(temp9, temp10, temp11, temp12, temp1, temp2, temp5, temp6) + INSERT_HALF_X2(temp1, temp6, temp5, temp2) + SRA_16(temp13, temp14, temp15, temp16, temp3, temp4, temp17, temp8) + "repl.ph %[temp2], 0x4 \n\t" + INSERT_HALF_X2(temp3, temp8, temp17, temp4) + "addq.ph %[temp1], %[temp1], %[temp2] \n\t" + "addq.ph %[temp6], %[temp6], %[temp2] \n\t" + ADD_SUB_HALVES(temp2, temp4, temp1, temp3) + ADD_SUB_HALVES(temp5, temp7, temp6, temp8) + MUL_SHIFT_SUM(temp1, temp3, temp6, temp8, temp9, temp13, temp17, temp18, + temp3, temp13, temp1, temp9, temp9, temp13, temp11, temp15, + temp6, temp17, temp8, temp18) + MUL_SHIFT_SUM(temp6, temp8, temp18, temp17, temp11, temp15, temp12, temp16, + temp8, temp15, temp6, temp11, temp12, temp16, temp10, temp14, + temp18, temp12, temp17, temp16) + INSERT_HALF_X2(temp1, temp3, temp9, temp13) + INSERT_HALF_X2(temp6, temp8, temp11, temp15) + SHIFT_R_SUM_X2(temp9, temp10, temp11, temp12, temp13, temp14, temp15, + temp16, temp2, temp4, temp5, temp7, temp3, temp1, temp8, + temp6) + PACK_2_HALVES_TO_WORD(temp1, temp2, temp3, temp4, temp9, temp12, temp13, + temp16, temp11, temp10, temp15, temp14) + LOAD_WITH_OFFSET_X4(temp10, temp11, temp14, temp15, ref, + 0, 0, 0, 0, + 0, 1, 2, 3, + BPS) + CONVERT_2_BYTES_TO_HALF(temp5, temp6, temp7, temp8, temp17, temp18, temp10, + temp11, temp10, temp11, temp14, temp15) + STORE_SAT_SUM_X2(temp5, temp6, temp7, temp8, temp17, temp18, temp10, temp11, + temp9, temp12, temp1, temp2, temp13, temp16, temp3, temp4, + dst, 0, 1, 2, 3, BPS) + + OUTPUT_EARLY_CLOBBER_REGS_18() + : [dst]"r"(dst), [in]"r"(in), [kC1]"r"(kC1), [kC2]"r"(kC2), [ref]"r"(ref) + : "memory", "hi", "lo" + ); +} + +static void ITransform_MIPSdspR2(const uint8_t* ref, const int16_t* in, + uint8_t* dst, int do_two) { + ITransformOne(ref, in, dst); + if (do_two) { + ITransformOne(ref + 4, in + 16, dst + 4); + } +} + +static int Disto4x4_MIPSdspR2(const uint8_t* const a, const uint8_t* const b, + const uint16_t* const w) { + int temp1, temp2, temp3, temp4, temp5, temp6, temp7, temp8, temp9; + int temp10, temp11, temp12, temp13, temp14, temp15, temp16, temp17; + + __asm__ volatile ( + LOAD_WITH_OFFSET_X4(temp1, temp2, temp3, temp4, a, + 0, 0, 0, 0, + 0, 1, 2, 3, + BPS) + CONVERT_2_BYTES_TO_HALF(temp5, temp6, temp7, temp8, temp9,temp10, temp11, + temp12, temp1, temp2, temp3, temp4) + ADD_SUB_HALVES_X4(temp1, temp2, temp3, temp4, temp5, temp6, temp7, temp8, + temp5, temp6, temp7, temp8, temp9, temp10, temp11, temp12) + PACK_2_HALVES_TO_WORD(temp9, temp10, temp11, temp12, temp1, temp3, temp5, + temp7, temp2, temp4, temp6, temp8) + ADD_SUB_HALVES_X4(temp2, temp4, temp6, temp8, temp9, temp1, temp3, temp10, + temp1, temp9, temp3, temp10, temp5, temp11, temp7, temp12) + ADD_SUB_HALVES_X4(temp5, temp11, temp7, temp2, temp9, temp3, temp6, temp12, + temp2, temp9, temp6, temp3, temp4, temp1, temp8, temp10) + ADD_SUB_HALVES_X4(temp1, temp4, temp10, temp8, temp7, temp11, temp5, temp2, + temp5, temp7, temp11, temp2, temp9, temp6, temp3, temp12) + ABS_X8(temp1, temp4, temp10, temp8, temp7, temp11, temp5, temp2) + LOAD_WITH_OFFSET_X4(temp3, temp6, temp9, temp12, w, + 0, 4, 8, 12, + 0, 0, 0, 0, + 0) + LOAD_WITH_OFFSET_X4(temp13, temp14, temp15, temp16, w, + 0, 4, 8, 12, + 1, 1, 1, 1, + 16) + MUL_HALF(temp17, temp1, temp2, temp3, temp4, temp5, temp6, temp7, temp8, + temp9, temp10, temp11, temp12, temp13, temp14, temp15, temp16) + LOAD_WITH_OFFSET_X4(temp1, temp2, temp3, temp4, b, + 0, 0, 0, 0, + 0, 1, 2, 3, + BPS) + CONVERT_2_BYTES_TO_HALF(temp5,temp6, temp7, temp8, temp9,temp10, temp11, + temp12, temp1, temp2, temp3, temp4) + ADD_SUB_HALVES_X4(temp1, temp2, temp3, temp4, temp5, temp6, temp7, temp8, + temp5, temp6, temp7, temp8, temp9, temp10, temp11, temp12) + PACK_2_HALVES_TO_WORD(temp9, temp10, temp11, temp12, temp1, temp3, temp5, + temp7, temp2, temp4, temp6, temp8) + ADD_SUB_HALVES_X4(temp2, temp4, temp6, temp8, temp9, temp1, temp3, temp10, + temp1, temp9, temp3, temp10, temp5, temp11, temp7, temp12) + ADD_SUB_HALVES_X4(temp5, temp11, temp7, temp2, temp9, temp3, temp6, temp12, + temp2, temp9, temp6, temp3, temp4, temp1, temp8, temp10) + ADD_SUB_HALVES_X4(temp1, temp4, temp10, temp8, temp7, temp11, temp5, temp2, + temp5, temp7, temp11, temp2, temp9, temp6, temp3, temp12) + ABS_X8(temp1, temp4, temp10, temp8, temp7, temp11, temp5, temp2) + LOAD_WITH_OFFSET_X4(temp3, temp6, temp9, temp12, w, + 0, 4, 8, 12, + 0, 0, 0, 0, + 0) + LOAD_WITH_OFFSET_X4(temp13, temp14, temp15, temp16, w, + 0, 4, 8, 12, + 1, 1, 1, 1, + 16) + MUL_HALF(temp3, temp1, temp2, temp3, temp4, temp5, temp6, temp7, temp8, + temp9, temp10, temp11, temp12, temp13, temp14, temp15, temp16) + OUTPUT_EARLY_CLOBBER_REGS_17() + : [a]"r"(a), [b]"r"(b), [w]"r"(w) + : "memory", "hi", "lo" + ); + return abs(temp3 - temp17) >> 5; +} + +static int Disto16x16_MIPSdspR2(const uint8_t* const a, + const uint8_t* const b, + const uint16_t* const w) { + int D = 0; + int x, y; + for (y = 0; y < 16 * BPS; y += 4 * BPS) { + for (x = 0; x < 16; x += 4) { + D += Disto4x4_MIPSdspR2(a + x + y, b + x + y, w); + } + } + return D; +} + +//------------------------------------------------------------------------------ +// Intra predictions + +#define FILL_PART(J, SIZE) \ + "usw %[value], 0+" #J "*" XSTR(BPS) "(%[dst]) \n\t" \ + "usw %[value], 4+" #J "*" XSTR(BPS) "(%[dst]) \n\t" \ + ".if " #SIZE " == 16 \n\t" \ + "usw %[value], 8+" #J "*" XSTR(BPS) "(%[dst]) \n\t" \ + "usw %[value], 12+" #J "*" XSTR(BPS) "(%[dst]) \n\t" \ + ".endif \n\t" + +#define FILL_8_OR_16(DST, VALUE, SIZE) do { \ + int value = (VALUE); \ + __asm__ volatile ( \ + "replv.qb %[value], %[value] \n\t" \ + FILL_PART( 0, SIZE) \ + FILL_PART( 1, SIZE) \ + FILL_PART( 2, SIZE) \ + FILL_PART( 3, SIZE) \ + FILL_PART( 4, SIZE) \ + FILL_PART( 5, SIZE) \ + FILL_PART( 6, SIZE) \ + FILL_PART( 7, SIZE) \ + ".if " #SIZE " == 16 \n\t" \ + FILL_PART( 8, 16) \ + FILL_PART( 9, 16) \ + FILL_PART(10, 16) \ + FILL_PART(11, 16) \ + FILL_PART(12, 16) \ + FILL_PART(13, 16) \ + FILL_PART(14, 16) \ + FILL_PART(15, 16) \ + ".endif \n\t" \ + : [value]"+&r"(value) \ + : [dst]"r"((DST)) \ + : "memory" \ + ); \ +} while (0) + +#define VERTICAL_PRED(DST, TOP, SIZE) \ +static WEBP_INLINE void VerticalPred##SIZE(uint8_t* (DST), \ + const uint8_t* (TOP)) { \ + int j; \ + if ((TOP)) { \ + for (j = 0; j < (SIZE); ++j) memcpy((DST) + j * BPS, (TOP), (SIZE)); \ + } else { \ + FILL_8_OR_16((DST), 127, (SIZE)); \ + } \ +} + +VERTICAL_PRED(dst, top, 8) +VERTICAL_PRED(dst, top, 16) + +#undef VERTICAL_PRED + +#define HORIZONTAL_PRED(DST, LEFT, SIZE) \ +static WEBP_INLINE void HorizontalPred##SIZE(uint8_t* (DST), \ + const uint8_t* (LEFT)) { \ + if (LEFT) { \ + int j; \ + for (j = 0; j < (SIZE); ++j) { \ + memset((DST) + j * BPS, (LEFT)[j], (SIZE)); \ + } \ + } else { \ + FILL_8_OR_16((DST), 129, (SIZE)); \ + } \ +} + +HORIZONTAL_PRED(dst, left, 8) +HORIZONTAL_PRED(dst, left, 16) + +#undef HORIZONTAL_PRED + +#define CLIPPING() \ + "preceu.ph.qbl %[temp2], %[temp0] \n\t" \ + "preceu.ph.qbr %[temp0], %[temp0] \n\t" \ + "preceu.ph.qbl %[temp3], %[temp1] \n\t" \ + "preceu.ph.qbr %[temp1], %[temp1] \n\t" \ + "addu.ph %[temp2], %[temp2], %[leftY_1] \n\t" \ + "addu.ph %[temp0], %[temp0], %[leftY_1] \n\t" \ + "addu.ph %[temp3], %[temp3], %[leftY_1] \n\t" \ + "addu.ph %[temp1], %[temp1], %[leftY_1] \n\t" \ + "shll_s.ph %[temp2], %[temp2], 7 \n\t" \ + "shll_s.ph %[temp0], %[temp0], 7 \n\t" \ + "shll_s.ph %[temp3], %[temp3], 7 \n\t" \ + "shll_s.ph %[temp1], %[temp1], 7 \n\t" \ + "precrqu_s.qb.ph %[temp0], %[temp2], %[temp0] \n\t" \ + "precrqu_s.qb.ph %[temp1], %[temp3], %[temp1] \n\t" + +#define CLIP_8B_TO_DST(DST, LEFT, TOP, SIZE) do { \ + int leftY_1 = ((int)(LEFT)[y] << 16) + (LEFT)[y]; \ + int temp0, temp1, temp2, temp3; \ + __asm__ volatile ( \ + "replv.ph %[leftY_1], %[leftY_1] \n\t" \ + "ulw %[temp0], 0(%[top]) \n\t" \ + "ulw %[temp1], 4(%[top]) \n\t" \ + "subu.ph %[leftY_1], %[leftY_1], %[left_1] \n\t" \ + CLIPPING() \ + "usw %[temp0], 0(%[dst]) \n\t" \ + "usw %[temp1], 4(%[dst]) \n\t" \ + ".if " #SIZE " == 16 \n\t" \ + "ulw %[temp0], 8(%[top]) \n\t" \ + "ulw %[temp1], 12(%[top]) \n\t" \ + CLIPPING() \ + "usw %[temp0], 8(%[dst]) \n\t" \ + "usw %[temp1], 12(%[dst]) \n\t" \ + ".endif \n\t" \ + : [leftY_1]"+&r"(leftY_1), [temp0]"=&r"(temp0), [temp1]"=&r"(temp1), \ + [temp2]"=&r"(temp2), [temp3]"=&r"(temp3) \ + : [left_1]"r"(left_1), [top]"r"((TOP)), [dst]"r"((DST)) \ + : "memory" \ + ); \ +} while (0) + +#define CLIP_TO_DST(DST, LEFT, TOP, SIZE) do { \ + int y; \ + const int left_1 = ((int)(LEFT)[-1] << 16) + (LEFT)[-1]; \ + for (y = 0; y < (SIZE); ++y) { \ + CLIP_8B_TO_DST((DST), (LEFT), (TOP), (SIZE)); \ + (DST) += BPS; \ + } \ +} while (0) + +#define TRUE_MOTION(DST, LEFT, TOP, SIZE) \ +static WEBP_INLINE void TrueMotion##SIZE(uint8_t* (DST), const uint8_t* (LEFT),\ + const uint8_t* (TOP)) { \ + if ((LEFT) != NULL) { \ + if ((TOP) != NULL) { \ + CLIP_TO_DST((DST), (LEFT), (TOP), (SIZE)); \ + } else { \ + HorizontalPred##SIZE((DST), (LEFT)); \ + } \ + } else { \ + /* true motion without left samples (hence: with default 129 value) */ \ + /* is equivalent to VE prediction where you just copy the top samples. */ \ + /* Note that if top samples are not available, the default value is */ \ + /* then 129, and not 127 as in the VerticalPred case. */ \ + if ((TOP) != NULL) { \ + VerticalPred##SIZE((DST), (TOP)); \ + } else { \ + FILL_8_OR_16((DST), 129, (SIZE)); \ + } \ + } \ +} + +TRUE_MOTION(dst, left, top, 8) +TRUE_MOTION(dst, left, top, 16) + +#undef TRUE_MOTION +#undef CLIP_TO_DST +#undef CLIP_8B_TO_DST +#undef CLIPPING + +static WEBP_INLINE void DCMode16(uint8_t* dst, const uint8_t* left, + const uint8_t* top) { + int DC, DC1; + int temp0, temp1, temp2, temp3; + + __asm__ volatile( + "beqz %[top], 2f \n\t" + LOAD_WITH_OFFSET_X4(temp0, temp1, temp2, temp3, top, + 0, 4, 8, 12, + 0, 0, 0, 0, + 0) + "raddu.w.qb %[temp0], %[temp0] \n\t" + "raddu.w.qb %[temp1], %[temp1] \n\t" + "raddu.w.qb %[temp2], %[temp2] \n\t" + "raddu.w.qb %[temp3], %[temp3] \n\t" + "addu %[temp0], %[temp0], %[temp1] \n\t" + "addu %[temp2], %[temp2], %[temp3] \n\t" + "addu %[DC], %[temp0], %[temp2] \n\t" + "move %[DC1], %[DC] \n\t" + "beqz %[left], 1f \n\t" + LOAD_WITH_OFFSET_X4(temp0, temp1, temp2, temp3, left, + 0, 4, 8, 12, + 0, 0, 0, 0, + 0) + "raddu.w.qb %[temp0], %[temp0] \n\t" + "raddu.w.qb %[temp1], %[temp1] \n\t" + "raddu.w.qb %[temp2], %[temp2] \n\t" + "raddu.w.qb %[temp3], %[temp3] \n\t" + "addu %[temp0], %[temp0], %[temp1] \n\t" + "addu %[temp2], %[temp2], %[temp3] \n\t" + "addu %[DC1], %[temp0], %[temp2] \n\t" + "1: \n\t" + "addu %[DC], %[DC], %[DC1] \n\t" + "j 3f \n\t" + "2: \n\t" + "beqz %[left], 4f \n\t" + LOAD_WITH_OFFSET_X4(temp0, temp1, temp2, temp3, left, + 0, 4, 8, 12, + 0, 0, 0, 0, + 0) + "raddu.w.qb %[temp0], %[temp0] \n\t" + "raddu.w.qb %[temp1], %[temp1] \n\t" + "raddu.w.qb %[temp2], %[temp2] \n\t" + "raddu.w.qb %[temp3], %[temp3] \n\t" + "addu %[temp0], %[temp0], %[temp1] \n\t" + "addu %[temp2], %[temp2], %[temp3] \n\t" + "addu %[DC], %[temp0], %[temp2] \n\t" + "addu %[DC], %[DC], %[DC] \n\t" + "3: \n\t" + "shra_r.w %[DC], %[DC], 5 \n\t" + "j 5f \n\t" + "4: \n\t" + "li %[DC], 0x80 \n\t" + "5: \n\t" + : [temp0]"=&r"(temp0), [temp1]"=&r"(temp1), [DC]"=&r"(DC), + [temp2]"=&r"(temp2), [temp3]"=&r"(temp3), [DC1]"=&r"(DC1) + : [left]"r"(left), [top]"r"(top) + : "memory" + ); + + FILL_8_OR_16(dst, DC, 16); +} + +static WEBP_INLINE void DCMode8(uint8_t* dst, const uint8_t* left, + const uint8_t* top) { + int DC, DC1; + int temp0, temp1, temp2, temp3; + + __asm__ volatile( + "beqz %[top], 2f \n\t" + "ulw %[temp0], 0(%[top]) \n\t" + "ulw %[temp1], 4(%[top]) \n\t" + "raddu.w.qb %[temp0], %[temp0] \n\t" + "raddu.w.qb %[temp1], %[temp1] \n\t" + "addu %[DC], %[temp0], %[temp1] \n\t" + "move %[DC1], %[DC] \n\t" + "beqz %[left], 1f \n\t" + "ulw %[temp2], 0(%[left]) \n\t" + "ulw %[temp3], 4(%[left]) \n\t" + "raddu.w.qb %[temp2], %[temp2] \n\t" + "raddu.w.qb %[temp3], %[temp3] \n\t" + "addu %[DC1], %[temp2], %[temp3] \n\t" + "1: \n\t" + "addu %[DC], %[DC], %[DC1] \n\t" + "j 3f \n\t" + "2: \n\t" + "beqz %[left], 4f \n\t" + "ulw %[temp2], 0(%[left]) \n\t" + "ulw %[temp3], 4(%[left]) \n\t" + "raddu.w.qb %[temp2], %[temp2] \n\t" + "raddu.w.qb %[temp3], %[temp3] \n\t" + "addu %[DC], %[temp2], %[temp3] \n\t" + "addu %[DC], %[DC], %[DC] \n\t" + "3: \n\t" + "shra_r.w %[DC], %[DC], 4 \n\t" + "j 5f \n\t" + "4: \n\t" + "li %[DC], 0x80 \n\t" + "5: \n\t" + : [temp0]"=&r"(temp0), [temp1]"=&r"(temp1), [DC]"=&r"(DC), + [temp2]"=&r"(temp2), [temp3]"=&r"(temp3), [DC1]"=&r"(DC1) + : [left]"r"(left), [top]"r"(top) + : "memory" + ); + + FILL_8_OR_16(dst, DC, 8); +} + +static void DC4(uint8_t* dst, const uint8_t* top) { + int temp0, temp1; + __asm__ volatile( + "ulw %[temp0], 0(%[top]) \n\t" + "ulw %[temp1], -5(%[top]) \n\t" + "raddu.w.qb %[temp0], %[temp0] \n\t" + "raddu.w.qb %[temp1], %[temp1] \n\t" + "addu %[temp0], %[temp0], %[temp1] \n\t" + "addiu %[temp0], %[temp0], 4 \n\t" + "srl %[temp0], %[temp0], 3 \n\t" + "replv.qb %[temp0], %[temp0] \n\t" + "usw %[temp0], 0*" XSTR(BPS) "(%[dst]) \n\t" + "usw %[temp0], 1*" XSTR(BPS) "(%[dst]) \n\t" + "usw %[temp0], 2*" XSTR(BPS) "(%[dst]) \n\t" + "usw %[temp0], 3*" XSTR(BPS) "(%[dst]) \n\t" + : [temp0]"=&r"(temp0), [temp1]"=&r"(temp1) + : [top]"r"(top), [dst]"r"(dst) + : "memory" + ); +} + +static void TM4(uint8_t* dst, const uint8_t* top) { + int a10, a32, temp0, temp1, temp2, temp3, temp4, temp5; + const int c35 = 0xff00ff; + __asm__ volatile ( + "lbu %[temp1], 0(%[top]) \n\t" + "lbu %[a10], 1(%[top]) \n\t" + "lbu %[temp2], 2(%[top]) \n\t" + "lbu %[a32], 3(%[top]) \n\t" + "ulw %[temp0], -5(%[top]) \n\t" + "lbu %[temp4], -1(%[top]) \n\t" + "append %[a10], %[temp1], 16 \n\t" + "append %[a32], %[temp2], 16 \n\t" + "replv.ph %[temp4], %[temp4] \n\t" + "shrl.ph %[temp1], %[temp0], 8 \n\t" + "and %[temp0], %[temp0], %[c35] \n\t" + "subu.ph %[temp1], %[temp1], %[temp4] \n\t" + "subu.ph %[temp0], %[temp0], %[temp4] \n\t" + "srl %[temp2], %[temp1], 16 \n\t" + "srl %[temp3], %[temp0], 16 \n\t" + "replv.ph %[temp2], %[temp2] \n\t" + "replv.ph %[temp3], %[temp3] \n\t" + "replv.ph %[temp4], %[temp1] \n\t" + "replv.ph %[temp5], %[temp0] \n\t" + "addu.ph %[temp0], %[temp3], %[a10] \n\t" + "addu.ph %[temp1], %[temp3], %[a32] \n\t" + "addu.ph %[temp3], %[temp2], %[a10] \n\t" + "addu.ph %[temp2], %[temp2], %[a32] \n\t" + "shll_s.ph %[temp0], %[temp0], 7 \n\t" + "shll_s.ph %[temp1], %[temp1], 7 \n\t" + "shll_s.ph %[temp3], %[temp3], 7 \n\t" + "shll_s.ph %[temp2], %[temp2], 7 \n\t" + "precrqu_s.qb.ph %[temp0], %[temp1], %[temp0] \n\t" + "precrqu_s.qb.ph %[temp1], %[temp2], %[temp3] \n\t" + "addu.ph %[temp2], %[temp5], %[a10] \n\t" + "addu.ph %[temp3], %[temp5], %[a32] \n\t" + "addu.ph %[temp5], %[temp4], %[a10] \n\t" + "addu.ph %[temp4], %[temp4], %[a32] \n\t" + "shll_s.ph %[temp2], %[temp2], 7 \n\t" + "shll_s.ph %[temp3], %[temp3], 7 \n\t" + "shll_s.ph %[temp4], %[temp4], 7 \n\t" + "shll_s.ph %[temp5], %[temp5], 7 \n\t" + "precrqu_s.qb.ph %[temp2], %[temp3], %[temp2] \n\t" + "precrqu_s.qb.ph %[temp3], %[temp4], %[temp5] \n\t" + "usw %[temp1], 0*" XSTR(BPS) "(%[dst]) \n\t" + "usw %[temp0], 1*" XSTR(BPS) "(%[dst]) \n\t" + "usw %[temp3], 2*" XSTR(BPS) "(%[dst]) \n\t" + "usw %[temp2], 3*" XSTR(BPS) "(%[dst]) \n\t" + : [temp0]"=&r"(temp0), [temp1]"=&r"(temp1), [temp2]"=&r"(temp2), + [temp3]"=&r"(temp3), [temp4]"=&r"(temp4), [temp5]"=&r"(temp5), + [a10]"=&r"(a10), [a32]"=&r"(a32) + : [c35]"r"(c35), [top]"r"(top), [dst]"r"(dst) + : "memory" + ); +} + +static void VE4(uint8_t* dst, const uint8_t* top) { + int temp0, temp1, temp2, temp3, temp4, temp5, temp6; + __asm__ volatile( + "ulw %[temp0], -1(%[top]) \n\t" + "ulh %[temp1], 3(%[top]) \n\t" + "preceu.ph.qbr %[temp2], %[temp0] \n\t" + "preceu.ph.qbl %[temp3], %[temp0] \n\t" + "preceu.ph.qbr %[temp4], %[temp1] \n\t" + "packrl.ph %[temp5], %[temp3], %[temp2] \n\t" + "packrl.ph %[temp6], %[temp4], %[temp3] \n\t" + "shll.ph %[temp5], %[temp5], 1 \n\t" + "shll.ph %[temp6], %[temp6], 1 \n\t" + "addq.ph %[temp2], %[temp5], %[temp2] \n\t" + "addq.ph %[temp6], %[temp6], %[temp4] \n\t" + "addq.ph %[temp2], %[temp2], %[temp3] \n\t" + "addq.ph %[temp6], %[temp6], %[temp3] \n\t" + "shra_r.ph %[temp2], %[temp2], 2 \n\t" + "shra_r.ph %[temp6], %[temp6], 2 \n\t" + "precr.qb.ph %[temp4], %[temp6], %[temp2] \n\t" + "usw %[temp4], 0*" XSTR(BPS) "(%[dst]) \n\t" + "usw %[temp4], 1*" XSTR(BPS) "(%[dst]) \n\t" + "usw %[temp4], 2*" XSTR(BPS) "(%[dst]) \n\t" + "usw %[temp4], 3*" XSTR(BPS) "(%[dst]) \n\t" + : [temp0]"=&r"(temp0), [temp1]"=&r"(temp1), [temp2]"=&r"(temp2), + [temp3]"=&r"(temp3), [temp4]"=&r"(temp4), [temp5]"=&r"(temp5), + [temp6]"=&r"(temp6) + : [top]"r"(top), [dst]"r"(dst) + : "memory" + ); +} + +static void HE4(uint8_t* dst, const uint8_t* top) { + int temp0, temp1, temp2, temp3, temp4, temp5, temp6; + __asm__ volatile( + "ulw %[temp0], -4(%[top]) \n\t" + "lbu %[temp1], -5(%[top]) \n\t" + "preceu.ph.qbr %[temp2], %[temp0] \n\t" + "preceu.ph.qbl %[temp3], %[temp0] \n\t" + "replv.ph %[temp4], %[temp1] \n\t" + "packrl.ph %[temp5], %[temp3], %[temp2] \n\t" + "packrl.ph %[temp6], %[temp2], %[temp4] \n\t" + "shll.ph %[temp5], %[temp5], 1 \n\t" + "shll.ph %[temp6], %[temp6], 1 \n\t" + "addq.ph %[temp3], %[temp3], %[temp5] \n\t" + "addq.ph %[temp3], %[temp3], %[temp2] \n\t" + "addq.ph %[temp2], %[temp2], %[temp6] \n\t" + "addq.ph %[temp2], %[temp2], %[temp4] \n\t" + "shra_r.ph %[temp3], %[temp3], 2 \n\t" + "shra_r.ph %[temp2], %[temp2], 2 \n\t" + "replv.qb %[temp0], %[temp3] \n\t" + "replv.qb %[temp1], %[temp2] \n\t" + "srl %[temp3], %[temp3], 16 \n\t" + "srl %[temp2], %[temp2], 16 \n\t" + "replv.qb %[temp3], %[temp3] \n\t" + "replv.qb %[temp2], %[temp2] \n\t" + "usw %[temp3], 0*" XSTR(BPS) "(%[dst]) \n\t" + "usw %[temp0], 1*" XSTR(BPS) "(%[dst]) \n\t" + "usw %[temp2], 2*" XSTR(BPS) "(%[dst]) \n\t" + "usw %[temp1], 3*" XSTR(BPS) "(%[dst]) \n\t" + : [temp0]"=&r"(temp0), [temp1]"=&r"(temp1), [temp2]"=&r"(temp2), + [temp3]"=&r"(temp3), [temp4]"=&r"(temp4), [temp5]"=&r"(temp5), + [temp6]"=&r"(temp6) + : [top]"r"(top), [dst]"r"(dst) + : "memory" + ); +} + +static void RD4(uint8_t* dst, const uint8_t* top) { + int temp0, temp1, temp2, temp3, temp4, temp5; + int temp6, temp7, temp8, temp9, temp10, temp11; + __asm__ volatile( + "ulw %[temp0], -5(%[top]) \n\t" + "ulw %[temp1], -1(%[top]) \n\t" + "preceu.ph.qbl %[temp2], %[temp0] \n\t" + "preceu.ph.qbr %[temp3], %[temp0] \n\t" + "preceu.ph.qbr %[temp4], %[temp1] \n\t" + "preceu.ph.qbl %[temp5], %[temp1] \n\t" + "packrl.ph %[temp6], %[temp2], %[temp3] \n\t" + "packrl.ph %[temp7], %[temp4], %[temp2] \n\t" + "packrl.ph %[temp8], %[temp5], %[temp4] \n\t" + "shll.ph %[temp6], %[temp6], 1 \n\t" + "addq.ph %[temp9], %[temp2], %[temp6] \n\t" + "shll.ph %[temp7], %[temp7], 1 \n\t" + "addq.ph %[temp9], %[temp9], %[temp3] \n\t" + "shll.ph %[temp8], %[temp8], 1 \n\t" + "shra_r.ph %[temp9], %[temp9], 2 \n\t" + "addq.ph %[temp10], %[temp4], %[temp7] \n\t" + "addq.ph %[temp11], %[temp5], %[temp8] \n\t" + "addq.ph %[temp10], %[temp10], %[temp2] \n\t" + "addq.ph %[temp11], %[temp11], %[temp4] \n\t" + "shra_r.ph %[temp10], %[temp10], 2 \n\t" + "shra_r.ph %[temp11], %[temp11], 2 \n\t" + "lbu %[temp0], 3(%[top]) \n\t" + "lbu %[temp1], 2(%[top]) \n\t" + "lbu %[temp2], 1(%[top]) \n\t" + "sll %[temp1], %[temp1], 1 \n\t" + "addu %[temp0], %[temp0], %[temp1] \n\t" + "addu %[temp0], %[temp0], %[temp2] \n\t" + "precr.qb.ph %[temp9], %[temp10], %[temp9] \n\t" + "shra_r.w %[temp0], %[temp0], 2 \n\t" + "precr.qb.ph %[temp10], %[temp11], %[temp10] \n\t" + "usw %[temp9], 3*" XSTR(BPS) "(%[dst]) \n\t" + "usw %[temp10], 1*" XSTR(BPS) "(%[dst]) \n\t" + "prepend %[temp9], %[temp11], 8 \n\t" + "prepend %[temp10], %[temp0], 8 \n\t" + "usw %[temp9], 2*" XSTR(BPS) "(%[dst]) \n\t" + "usw %[temp10], 0*" XSTR(BPS) "(%[dst]) \n\t" + : [temp0]"=&r"(temp0), [temp1]"=&r"(temp1), [temp2]"=&r"(temp2), + [temp3]"=&r"(temp3), [temp4]"=&r"(temp4), [temp5]"=&r"(temp5), + [temp6]"=&r"(temp6), [temp7]"=&r"(temp7), [temp8]"=&r"(temp8), + [temp9]"=&r"(temp9), [temp10]"=&r"(temp10), [temp11]"=&r"(temp11) + : [top]"r"(top), [dst]"r"(dst) + : "memory" + ); +} + +static void VR4(uint8_t* dst, const uint8_t* top) { + int temp0, temp1, temp2, temp3, temp4; + int temp5, temp6, temp7, temp8, temp9; + __asm__ volatile ( + "ulw %[temp0], -4(%[top]) \n\t" + "ulw %[temp1], 0(%[top]) \n\t" + "preceu.ph.qbl %[temp2], %[temp0] \n\t" + "preceu.ph.qbr %[temp0], %[temp0] \n\t" + "preceu.ph.qbla %[temp3], %[temp1] \n\t" + "preceu.ph.qbra %[temp1], %[temp1] \n\t" + "packrl.ph %[temp7], %[temp3], %[temp2] \n\t" + "addqh_r.ph %[temp4], %[temp1], %[temp3] \n\t" + "move %[temp6], %[temp1] \n\t" + "append %[temp1], %[temp2], 16 \n\t" + "shll.ph %[temp9], %[temp6], 1 \n\t" + "addqh_r.ph %[temp5], %[temp7], %[temp6] \n\t" + "shll.ph %[temp8], %[temp7], 1 \n\t" + "addu.ph %[temp3], %[temp7], %[temp3] \n\t" + "addu.ph %[temp1], %[temp1], %[temp6] \n\t" + "packrl.ph %[temp7], %[temp2], %[temp0] \n\t" + "addu.ph %[temp6], %[temp0], %[temp2] \n\t" + "addu.ph %[temp3], %[temp3], %[temp9] \n\t" + "addu.ph %[temp1], %[temp1], %[temp8] \n\t" + "shll.ph %[temp7], %[temp7], 1 \n\t" + "shra_r.ph %[temp3], %[temp3], 2 \n\t" + "shra_r.ph %[temp1], %[temp1], 2 \n\t" + "addu.ph %[temp6], %[temp6], %[temp7] \n\t" + "shra_r.ph %[temp6], %[temp6], 2 \n\t" + "precrq.ph.w %[temp8], %[temp4], %[temp5] \n\t" + "append %[temp4], %[temp5], 16 \n\t" + "precrq.ph.w %[temp2], %[temp3], %[temp1] \n\t" + "append %[temp3], %[temp1], 16 \n\t" + "precr.qb.ph %[temp8], %[temp8], %[temp4] \n\t" + "precr.qb.ph %[temp3], %[temp2], %[temp3] \n\t" + "usw %[temp8], 0*" XSTR(BPS) "(%[dst]) \n\t" + "usw %[temp3], 1*" XSTR(BPS) "(%[dst]) \n\t" + "append %[temp3], %[temp6], 8 \n\t" + "srl %[temp6], %[temp6], 16 \n\t" + "append %[temp8], %[temp6], 8 \n\t" + "usw %[temp3], 3*" XSTR(BPS) "(%[dst]) \n\t" + "usw %[temp8], 2*" XSTR(BPS) "(%[dst]) \n\t" + : [temp0]"=&r"(temp0), [temp1]"=&r"(temp1), [temp2]"=&r"(temp2), + [temp3]"=&r"(temp3), [temp4]"=&r"(temp4), [temp5]"=&r"(temp5), + [temp6]"=&r"(temp6), [temp7]"=&r"(temp7), [temp8]"=&r"(temp8), + [temp9]"=&r"(temp9) + : [top]"r"(top), [dst]"r"(dst) + : "memory" + ); +} + +static void LD4(uint8_t* dst, const uint8_t* top) { + int temp0, temp1, temp2, temp3, temp4, temp5; + int temp6, temp7, temp8, temp9, temp10, temp11; + __asm__ volatile( + "ulw %[temp0], 0(%[top]) \n\t" + "ulw %[temp1], 4(%[top]) \n\t" + "preceu.ph.qbl %[temp2], %[temp0] \n\t" + "preceu.ph.qbr %[temp3], %[temp0] \n\t" + "preceu.ph.qbr %[temp4], %[temp1] \n\t" + "preceu.ph.qbl %[temp5], %[temp1] \n\t" + "packrl.ph %[temp6], %[temp2], %[temp3] \n\t" + "packrl.ph %[temp7], %[temp4], %[temp2] \n\t" + "packrl.ph %[temp8], %[temp5], %[temp4] \n\t" + "shll.ph %[temp6], %[temp6], 1 \n\t" + "addq.ph %[temp9], %[temp2], %[temp6] \n\t" + "shll.ph %[temp7], %[temp7], 1 \n\t" + "addq.ph %[temp9], %[temp9], %[temp3] \n\t" + "shll.ph %[temp8], %[temp8], 1 \n\t" + "shra_r.ph %[temp9], %[temp9], 2 \n\t" + "addq.ph %[temp10], %[temp4], %[temp7] \n\t" + "addq.ph %[temp11], %[temp5], %[temp8] \n\t" + "addq.ph %[temp10], %[temp10], %[temp2] \n\t" + "addq.ph %[temp11], %[temp11], %[temp4] \n\t" + "shra_r.ph %[temp10], %[temp10], 2 \n\t" + "shra_r.ph %[temp11], %[temp11], 2 \n\t" + "srl %[temp1], %[temp1], 24 \n\t" + "sll %[temp1], %[temp1], 1 \n\t" + "raddu.w.qb %[temp5], %[temp5] \n\t" + "precr.qb.ph %[temp9], %[temp10], %[temp9] \n\t" + "precr.qb.ph %[temp10], %[temp11], %[temp10] \n\t" + "addu %[temp1], %[temp1], %[temp5] \n\t" + "shra_r.w %[temp1], %[temp1], 2 \n\t" + "usw %[temp9], 0*" XSTR(BPS) "(%[dst]) \n\t" + "usw %[temp10], 2*" XSTR(BPS) "(%[dst]) \n\t" + "prepend %[temp9], %[temp11], 8 \n\t" + "prepend %[temp10], %[temp1], 8 \n\t" + "usw %[temp9], 1*" XSTR(BPS) "(%[dst]) \n\t" + "usw %[temp10], 3*" XSTR(BPS) "(%[dst]) \n\t" + : [temp0]"=&r"(temp0), [temp1]"=&r"(temp1), [temp2]"=&r"(temp2), + [temp3]"=&r"(temp3), [temp4]"=&r"(temp4), [temp5]"=&r"(temp5), + [temp6]"=&r"(temp6), [temp7]"=&r"(temp7), [temp8]"=&r"(temp8), + [temp9]"=&r"(temp9), [temp10]"=&r"(temp10), [temp11]"=&r"(temp11) + : [top]"r"(top), [dst]"r"(dst) + : "memory" + ); +} + +static void VL4(uint8_t* dst, const uint8_t* top) { + int temp0, temp1, temp2, temp3, temp4; + int temp5, temp6, temp7, temp8, temp9; + __asm__ volatile ( + "ulw %[temp0], 0(%[top]) \n\t" + "ulw %[temp1], 4(%[top]) \n\t" + "preceu.ph.qbla %[temp2], %[temp0] \n\t" + "preceu.ph.qbra %[temp0], %[temp0] \n\t" + "preceu.ph.qbl %[temp3], %[temp1] \n\t" + "preceu.ph.qbr %[temp1], %[temp1] \n\t" + "addqh_r.ph %[temp4], %[temp0], %[temp2] \n\t" + "packrl.ph %[temp7], %[temp1], %[temp0] \n\t" + "precrq.ph.w %[temp6], %[temp1], %[temp2] \n\t" + "shll.ph %[temp9], %[temp2], 1 \n\t" + "addqh_r.ph %[temp5], %[temp7], %[temp2] \n\t" + "shll.ph %[temp8], %[temp7], 1 \n\t" + "addu.ph %[temp2], %[temp2], %[temp6] \n\t" + "addu.ph %[temp0], %[temp0], %[temp7] \n\t" + "packrl.ph %[temp7], %[temp3], %[temp1] \n\t" + "addu.ph %[temp6], %[temp1], %[temp3] \n\t" + "addu.ph %[temp2], %[temp2], %[temp8] \n\t" + "addu.ph %[temp0], %[temp0], %[temp9] \n\t" + "shll.ph %[temp7], %[temp7], 1 \n\t" + "shra_r.ph %[temp2], %[temp2], 2 \n\t" + "shra_r.ph %[temp0], %[temp0], 2 \n\t" + "addu.ph %[temp6], %[temp6], %[temp7] \n\t" + "shra_r.ph %[temp6], %[temp6], 2 \n\t" + "precrq.ph.w %[temp8], %[temp5], %[temp4] \n\t" + "append %[temp5], %[temp4], 16 \n\t" + "precrq.ph.w %[temp3], %[temp2], %[temp0] \n\t" + "append %[temp2], %[temp0], 16 \n\t" + "precr.qb.ph %[temp8], %[temp8], %[temp5] \n\t" + "precr.qb.ph %[temp3], %[temp3], %[temp2] \n\t" + "usw %[temp8], 0*" XSTR(BPS) "(%[dst]) \n\t" + "prepend %[temp8], %[temp6], 8 \n\t" + "usw %[temp3], 1*" XSTR(BPS) "(%[dst]) \n\t" + "srl %[temp6], %[temp6], 16 \n\t" + "prepend %[temp3], %[temp6], 8 \n\t" + "usw %[temp8], 2*" XSTR(BPS) "(%[dst]) \n\t" + "usw %[temp3], 3*" XSTR(BPS) "(%[dst]) \n\t" + : [temp0]"=&r"(temp0), [temp1]"=&r"(temp1), [temp2]"=&r"(temp2), + [temp3]"=&r"(temp3), [temp4]"=&r"(temp4), [temp5]"=&r"(temp5), + [temp6]"=&r"(temp6), [temp7]"=&r"(temp7), [temp8]"=&r"(temp8), + [temp9]"=&r"(temp9) + : [top]"r"(top), [dst]"r"(dst) + : "memory" + ); +} + +static void HD4(uint8_t* dst, const uint8_t* top) { + int temp0, temp1, temp2, temp3, temp4; + int temp5, temp6, temp7, temp8, temp9; + __asm__ volatile ( + "ulw %[temp0], -5(%[top]) \n\t" + "ulw %[temp1], -1(%[top]) \n\t" + "preceu.ph.qbla %[temp2], %[temp0] \n\t" + "preceu.ph.qbra %[temp0], %[temp0] \n\t" + "preceu.ph.qbl %[temp3], %[temp1] \n\t" + "preceu.ph.qbr %[temp1], %[temp1] \n\t" + "addqh_r.ph %[temp4], %[temp0], %[temp2] \n\t" + "packrl.ph %[temp7], %[temp1], %[temp0] \n\t" + "precrq.ph.w %[temp6], %[temp1], %[temp2] \n\t" + "shll.ph %[temp9], %[temp2], 1 \n\t" + "addqh_r.ph %[temp5], %[temp7], %[temp2] \n\t" + "shll.ph %[temp8], %[temp7], 1 \n\t" + "addu.ph %[temp2], %[temp2], %[temp6] \n\t" + "addu.ph %[temp0], %[temp0], %[temp7] \n\t" + "packrl.ph %[temp7], %[temp3], %[temp1] \n\t" + "addu.ph %[temp6], %[temp1], %[temp3] \n\t" + "addu.ph %[temp2], %[temp2], %[temp8] \n\t" + "addu.ph %[temp0], %[temp0], %[temp9] \n\t" + "shll.ph %[temp7], %[temp7], 1 \n\t" + "shra_r.ph %[temp2], %[temp2], 2 \n\t" + "shra_r.ph %[temp0], %[temp0], 2 \n\t" + "addu.ph %[temp6], %[temp6], %[temp7] \n\t" + "shra_r.ph %[temp6], %[temp6], 2 \n\t" + "precrq.ph.w %[temp1], %[temp2], %[temp5] \n\t" + "precrq.ph.w %[temp3], %[temp0], %[temp4] \n\t" + "precr.qb.ph %[temp7], %[temp6], %[temp1] \n\t" + "precr.qb.ph %[temp6], %[temp1], %[temp3] \n\t" + "usw %[temp7], 0*" XSTR(BPS) "(%[dst]) \n\t" + "usw %[temp6], 1*" XSTR(BPS) "(%[dst]) \n\t" + "append %[temp2], %[temp5], 16 \n\t" + "append %[temp0], %[temp4], 16 \n\t" + "precr.qb.ph %[temp5], %[temp3], %[temp2] \n\t" + "precr.qb.ph %[temp4], %[temp2], %[temp0] \n\t" + "usw %[temp5], 2*" XSTR(BPS) "(%[dst]) \n\t" + "usw %[temp4], 3*" XSTR(BPS) "(%[dst]) \n\t" + : [temp0]"=&r"(temp0), [temp1]"=&r"(temp1), [temp2]"=&r"(temp2), + [temp3]"=&r"(temp3), [temp4]"=&r"(temp4), [temp5]"=&r"(temp5), + [temp6]"=&r"(temp6), [temp7]"=&r"(temp7), [temp8]"=&r"(temp8), + [temp9]"=&r"(temp9) + : [top]"r"(top), [dst]"r"(dst) + : "memory" + ); +} + +static void HU4(uint8_t* dst, const uint8_t* top) { + int temp0, temp1, temp2, temp3, temp4, temp5, temp6, temp7; + __asm__ volatile ( + "ulw %[temp0], -5(%[top]) \n\t" + "preceu.ph.qbl %[temp1], %[temp0] \n\t" + "preceu.ph.qbr %[temp2], %[temp0] \n\t" + "packrl.ph %[temp3], %[temp1], %[temp2] \n\t" + "replv.qb %[temp7], %[temp2] \n\t" + "addqh_r.ph %[temp4], %[temp1], %[temp3] \n\t" + "addqh_r.ph %[temp5], %[temp3], %[temp2] \n\t" + "shll.ph %[temp6], %[temp3], 1 \n\t" + "addu.ph %[temp3], %[temp2], %[temp3] \n\t" + "addu.ph %[temp6], %[temp1], %[temp6] \n\t" + "shll.ph %[temp0], %[temp2], 1 \n\t" + "addu.ph %[temp6], %[temp6], %[temp2] \n\t" + "addu.ph %[temp0], %[temp3], %[temp0] \n\t" + "shra_r.ph %[temp6], %[temp6], 2 \n\t" + "shra_r.ph %[temp0], %[temp0], 2 \n\t" + "packrl.ph %[temp3], %[temp6], %[temp5] \n\t" + "precrq.ph.w %[temp2], %[temp6], %[temp4] \n\t" + "append %[temp0], %[temp5], 16 \n\t" + "precr.qb.ph %[temp3], %[temp3], %[temp2] \n\t" + "usw %[temp3], 0*" XSTR(BPS) "(%[dst]) \n\t" + "precr.qb.ph %[temp1], %[temp7], %[temp0] \n\t" + "usw %[temp7], 3*" XSTR(BPS) "(%[dst]) \n\t" + "packrl.ph %[temp2], %[temp1], %[temp3] \n\t" + "usw %[temp1], 2*" XSTR(BPS) "(%[dst]) \n\t" + "usw %[temp2], 1*" XSTR(BPS) "(%[dst]) \n\t" + : [temp0]"=&r"(temp0), [temp1]"=&r"(temp1), [temp2]"=&r"(temp2), + [temp3]"=&r"(temp3), [temp4]"=&r"(temp4), [temp5]"=&r"(temp5), + [temp6]"=&r"(temp6), [temp7]"=&r"(temp7) + : [top]"r"(top), [dst]"r"(dst) + : "memory" + ); +} + +//------------------------------------------------------------------------------ +// Chroma 8x8 prediction (paragraph 12.2) + +static void IntraChromaPreds_MIPSdspR2(uint8_t* dst, const uint8_t* left, + const uint8_t* top) { + // U block + DCMode8(C8DC8 + dst, left, top); + VerticalPred8(C8VE8 + dst, top); + HorizontalPred8(C8HE8 + dst, left); + TrueMotion8(C8TM8 + dst, left, top); + // V block + dst += 8; + if (top) top += 8; + if (left) left += 16; + DCMode8(C8DC8 + dst, left, top); + VerticalPred8(C8VE8 + dst, top); + HorizontalPred8(C8HE8 + dst, left); + TrueMotion8(C8TM8 + dst, left, top); +} + +//------------------------------------------------------------------------------ +// luma 16x16 prediction (paragraph 12.3) + +static void Intra16Preds_MIPSdspR2(uint8_t* dst, + const uint8_t* left, const uint8_t* top) { + DCMode16(I16DC16 + dst, left, top); + VerticalPred16(I16VE16 + dst, top); + HorizontalPred16(I16HE16 + dst, left); + TrueMotion16(I16TM16 + dst, left, top); +} + +// Left samples are top[-5 .. -2], top_left is top[-1], top are +// located at top[0..3], and top right is top[4..7] +static void Intra4Preds_MIPSdspR2(uint8_t* dst, const uint8_t* top) { + DC4(I4DC4 + dst, top); + TM4(I4TM4 + dst, top); + VE4(I4VE4 + dst, top); + HE4(I4HE4 + dst, top); + RD4(I4RD4 + dst, top); + VR4(I4VR4 + dst, top); + LD4(I4LD4 + dst, top); + VL4(I4VL4 + dst, top); + HD4(I4HD4 + dst, top); + HU4(I4HU4 + dst, top); +} + +//------------------------------------------------------------------------------ +// Metric + +#if !defined(WORK_AROUND_GCC) + +#define GET_SSE_INNER(A) \ + "lw %[temp0], " #A "(%[a]) \n\t" \ + "lw %[temp1], " #A "(%[b]) \n\t" \ + "preceu.ph.qbr %[temp2], %[temp0] \n\t" \ + "preceu.ph.qbl %[temp0], %[temp0] \n\t" \ + "preceu.ph.qbr %[temp3], %[temp1] \n\t" \ + "preceu.ph.qbl %[temp1], %[temp1] \n\t" \ + "subq.ph %[temp2], %[temp2], %[temp3] \n\t" \ + "subq.ph %[temp0], %[temp0], %[temp1] \n\t" \ + "dpa.w.ph $ac0, %[temp2], %[temp2] \n\t" \ + "dpa.w.ph $ac0, %[temp0], %[temp0] \n\t" + +#define GET_SSE(A, B, C, D) \ + GET_SSE_INNER(A) \ + GET_SSE_INNER(B) \ + GET_SSE_INNER(C) \ + GET_SSE_INNER(D) + +static int SSE16x16_MIPSdspR2(const uint8_t* a, const uint8_t* b) { + int count; + int temp0, temp1, temp2, temp3; + __asm__ volatile ( + "mult $zero, $zero \n\t" + GET_SSE( 0 * BPS, 4 + 0 * BPS, 8 + 0 * BPS, 12 + 0 * BPS) + GET_SSE( 1 * BPS, 4 + 1 * BPS, 8 + 1 * BPS, 12 + 1 * BPS) + GET_SSE( 2 * BPS, 4 + 2 * BPS, 8 + 2 * BPS, 12 + 2 * BPS) + GET_SSE( 3 * BPS, 4 + 3 * BPS, 8 + 3 * BPS, 12 + 3 * BPS) + GET_SSE( 4 * BPS, 4 + 4 * BPS, 8 + 4 * BPS, 12 + 4 * BPS) + GET_SSE( 5 * BPS, 4 + 5 * BPS, 8 + 5 * BPS, 12 + 5 * BPS) + GET_SSE( 6 * BPS, 4 + 6 * BPS, 8 + 6 * BPS, 12 + 6 * BPS) + GET_SSE( 7 * BPS, 4 + 7 * BPS, 8 + 7 * BPS, 12 + 7 * BPS) + GET_SSE( 8 * BPS, 4 + 8 * BPS, 8 + 8 * BPS, 12 + 8 * BPS) + GET_SSE( 9 * BPS, 4 + 9 * BPS, 8 + 9 * BPS, 12 + 9 * BPS) + GET_SSE(10 * BPS, 4 + 10 * BPS, 8 + 10 * BPS, 12 + 10 * BPS) + GET_SSE(11 * BPS, 4 + 11 * BPS, 8 + 11 * BPS, 12 + 11 * BPS) + GET_SSE(12 * BPS, 4 + 12 * BPS, 8 + 12 * BPS, 12 + 12 * BPS) + GET_SSE(13 * BPS, 4 + 13 * BPS, 8 + 13 * BPS, 12 + 13 * BPS) + GET_SSE(14 * BPS, 4 + 14 * BPS, 8 + 14 * BPS, 12 + 14 * BPS) + GET_SSE(15 * BPS, 4 + 15 * BPS, 8 + 15 * BPS, 12 + 15 * BPS) + "mflo %[count] \n\t" + : [temp0]"=&r"(temp0), [temp1]"=&r"(temp1), [temp2]"=&r"(temp2), + [temp3]"=&r"(temp3), [count]"=&r"(count) + : [a]"r"(a), [b]"r"(b) + : "memory", "hi", "lo" + ); + return count; +} + +static int SSE16x8_MIPSdspR2(const uint8_t* a, const uint8_t* b) { + int count; + int temp0, temp1, temp2, temp3; + __asm__ volatile ( + "mult $zero, $zero \n\t" + GET_SSE( 0 * BPS, 4 + 0 * BPS, 8 + 0 * BPS, 12 + 0 * BPS) + GET_SSE( 1 * BPS, 4 + 1 * BPS, 8 + 1 * BPS, 12 + 1 * BPS) + GET_SSE( 2 * BPS, 4 + 2 * BPS, 8 + 2 * BPS, 12 + 2 * BPS) + GET_SSE( 3 * BPS, 4 + 3 * BPS, 8 + 3 * BPS, 12 + 3 * BPS) + GET_SSE( 4 * BPS, 4 + 4 * BPS, 8 + 4 * BPS, 12 + 4 * BPS) + GET_SSE( 5 * BPS, 4 + 5 * BPS, 8 + 5 * BPS, 12 + 5 * BPS) + GET_SSE( 6 * BPS, 4 + 6 * BPS, 8 + 6 * BPS, 12 + 6 * BPS) + GET_SSE( 7 * BPS, 4 + 7 * BPS, 8 + 7 * BPS, 12 + 7 * BPS) + "mflo %[count] \n\t" + : [temp0]"=&r"(temp0), [temp1]"=&r"(temp1), [temp2]"=&r"(temp2), + [temp3]"=&r"(temp3), [count]"=&r"(count) + : [a]"r"(a), [b]"r"(b) + : "memory", "hi", "lo" + ); + return count; +} + +static int SSE8x8_MIPSdspR2(const uint8_t* a, const uint8_t* b) { + int count; + int temp0, temp1, temp2, temp3; + __asm__ volatile ( + "mult $zero, $zero \n\t" + GET_SSE(0 * BPS, 4 + 0 * BPS, 1 * BPS, 4 + 1 * BPS) + GET_SSE(2 * BPS, 4 + 2 * BPS, 3 * BPS, 4 + 3 * BPS) + GET_SSE(4 * BPS, 4 + 4 * BPS, 5 * BPS, 4 + 5 * BPS) + GET_SSE(6 * BPS, 4 + 6 * BPS, 7 * BPS, 4 + 7 * BPS) + "mflo %[count] \n\t" + : [temp0]"=&r"(temp0), [temp1]"=&r"(temp1), [temp2]"=&r"(temp2), + [temp3]"=&r"(temp3), [count]"=&r"(count) + : [a]"r"(a), [b]"r"(b) + : "memory", "hi", "lo" + ); + return count; +} + +static int SSE4x4_MIPSdspR2(const uint8_t* a, const uint8_t* b) { + int count; + int temp0, temp1, temp2, temp3; + __asm__ volatile ( + "mult $zero, $zero \n\t" + GET_SSE(0 * BPS, 1 * BPS, 2 * BPS, 3 * BPS) + "mflo %[count] \n\t" + : [temp0]"=&r"(temp0), [temp1]"=&r"(temp1), [temp2]"=&r"(temp2), + [temp3]"=&r"(temp3), [count]"=&r"(count) + : [a]"r"(a), [b]"r"(b) + : "memory", "hi", "lo" + ); + return count; +} + +#undef GET_SSE +#undef GET_SSE_INNER + +#endif // !WORK_AROUND_GCC + +#undef FILL_8_OR_16 +#undef FILL_PART +#undef OUTPUT_EARLY_CLOBBER_REGS_17 +#undef MUL_HALF +#undef ABS_X8 +#undef ADD_SUB_HALVES_X4 + +//------------------------------------------------------------------------------ +// Quantization +// + +// macro for one pass through for loop in QuantizeBlock reading 2 values at time +// QUANTDIV macro inlined +// J - offset in bytes (kZigzag[n] * 2) +// K - offset in bytes (kZigzag[n] * 4) +// N - offset in bytes (n * 2) +// N1 - offset in bytes ((n + 1) * 2) +#define QUANTIZE_ONE(J, K, N, N1) \ + "ulw %[temp1], " #J "(%[ppin]) \n\t" \ + "ulw %[temp2], " #J "(%[ppsharpen]) \n\t" \ + "lhu %[temp3], " #K "(%[ppzthresh]) \n\t" \ + "lhu %[temp6], " #K "+4(%[ppzthresh]) \n\t" \ + "absq_s.ph %[temp4], %[temp1] \n\t" \ + "ins %[temp3], %[temp6], 16, 16 \n\t" \ + "addu.ph %[coeff], %[temp4], %[temp2] \n\t" \ + "shra.ph %[sign], %[temp1], 15 \n\t" \ + "li %[level], 0x10001 \n\t" \ + "cmp.lt.ph %[temp3], %[coeff] \n\t" \ + "lhu %[temp1], " #J "(%[ppiq]) \n\t" \ + "pick.ph %[temp5], %[level], $0 \n\t" \ + "lw %[temp2], " #K "(%[ppbias]) \n\t" \ + "beqz %[temp5], 0f \n\t" \ + "lhu %[temp3], " #J "(%[ppq]) \n\t" \ + "beq %[temp5], %[level], 1f \n\t" \ + "andi %[temp5], %[temp5], 0x1 \n\t" \ + "andi %[temp4], %[coeff], 0xffff \n\t" \ + "beqz %[temp5], 2f \n\t" \ + "mul %[level], %[temp4], %[temp1] \n\t" \ + "sh $0, " #J "+2(%[ppin]) \n\t" \ + "sh $0, " #N1 "(%[pout]) \n\t" \ + "addu %[level], %[level], %[temp2] \n\t" \ + "sra %[level], %[level], 17 \n\t" \ + "slt %[temp4], %[max_level], %[level] \n\t" \ + "movn %[level], %[max_level], %[temp4] \n\t" \ + "andi %[temp6], %[sign], 0xffff \n\t" \ + "xor %[level], %[level], %[temp6] \n\t" \ + "subu %[level], %[level], %[temp6] \n\t" \ + "mul %[temp5], %[level], %[temp3] \n\t" \ + "or %[ret], %[ret], %[level] \n\t" \ + "sh %[level], " #N "(%[pout]) \n\t" \ + "sh %[temp5], " #J "(%[ppin]) \n\t" \ + "j 3f \n\t" \ +"2: \n\t" \ + "lhu %[temp1], " #J "+2(%[ppiq]) \n\t" \ + "srl %[temp5], %[coeff], 16 \n\t" \ + "mul %[level], %[temp5], %[temp1] \n\t" \ + "lw %[temp2], " #K "+4(%[ppbias]) \n\t" \ + "lhu %[temp3], " #J "+2(%[ppq]) \n\t" \ + "addu %[level], %[level], %[temp2] \n\t" \ + "sra %[level], %[level], 17 \n\t" \ + "srl %[temp6], %[sign], 16 \n\t" \ + "slt %[temp4], %[max_level], %[level] \n\t" \ + "movn %[level], %[max_level], %[temp4] \n\t" \ + "xor %[level], %[level], %[temp6] \n\t" \ + "subu %[level], %[level], %[temp6] \n\t" \ + "mul %[temp5], %[level], %[temp3] \n\t" \ + "sh $0, " #J "(%[ppin]) \n\t" \ + "sh $0, " #N "(%[pout]) \n\t" \ + "or %[ret], %[ret], %[level] \n\t" \ + "sh %[temp5], " #J "+2(%[ppin]) \n\t" \ + "sh %[level], " #N1 "(%[pout]) \n\t" \ + "j 3f \n\t" \ +"1: \n\t" \ + "lhu %[temp1], " #J "(%[ppiq]) \n\t" \ + "lw %[temp2], " #K "(%[ppbias]) \n\t" \ + "ulw %[temp3], " #J "(%[ppq]) \n\t" \ + "andi %[temp5], %[coeff], 0xffff \n\t" \ + "srl %[temp0], %[coeff], 16 \n\t" \ + "lhu %[temp6], " #J "+2(%[ppiq]) \n\t" \ + "lw %[coeff], " #K "+4(%[ppbias]) \n\t" \ + "mul %[level], %[temp5], %[temp1] \n\t" \ + "mul %[temp4], %[temp0], %[temp6] \n\t" \ + "addu %[level], %[level], %[temp2] \n\t" \ + "addu %[temp4], %[temp4], %[coeff] \n\t" \ + "precrq.ph.w %[level], %[temp4], %[level] \n\t" \ + "shra.ph %[level], %[level], 1 \n\t" \ + "cmp.lt.ph %[max_level1],%[level] \n\t" \ + "pick.ph %[level], %[max_level], %[level] \n\t" \ + "xor %[level], %[level], %[sign] \n\t" \ + "subu.ph %[level], %[level], %[sign] \n\t" \ + "mul.ph %[temp3], %[level], %[temp3] \n\t" \ + "or %[ret], %[ret], %[level] \n\t" \ + "sh %[level], " #N "(%[pout]) \n\t" \ + "srl %[level], %[level], 16 \n\t" \ + "sh %[level], " #N1 "(%[pout]) \n\t" \ + "usw %[temp3], " #J "(%[ppin]) \n\t" \ + "j 3f \n\t" \ +"0: \n\t" \ + "sh $0, " #N "(%[pout]) \n\t" \ + "sh $0, " #N1 "(%[pout]) \n\t" \ + "usw $0, " #J "(%[ppin]) \n\t" \ +"3: \n\t" + +static int QuantizeBlock_MIPSdspR2(int16_t in[16], int16_t out[16], + const VP8Matrix* const mtx) { + int temp0, temp1, temp2, temp3, temp4, temp5,temp6; + int sign, coeff, level; + int max_level = MAX_LEVEL; + int max_level1 = max_level << 16 | max_level; + int ret = 0; + + int16_t* ppin = &in[0]; + int16_t* pout = &out[0]; + const uint16_t* ppsharpen = &mtx->sharpen_[0]; + const uint32_t* ppzthresh = &mtx->zthresh_[0]; + const uint16_t* ppq = &mtx->q_[0]; + const uint16_t* ppiq = &mtx->iq_[0]; + const uint32_t* ppbias = &mtx->bias_[0]; + + __asm__ volatile ( + QUANTIZE_ONE( 0, 0, 0, 2) + QUANTIZE_ONE( 4, 8, 10, 12) + QUANTIZE_ONE( 8, 16, 4, 8) + QUANTIZE_ONE(12, 24, 14, 24) + QUANTIZE_ONE(16, 32, 6, 16) + QUANTIZE_ONE(20, 40, 22, 26) + QUANTIZE_ONE(24, 48, 18, 20) + QUANTIZE_ONE(28, 56, 28, 30) + + : [temp0]"=&r"(temp0), [temp1]"=&r"(temp1), + [temp2]"=&r"(temp2), [temp3]"=&r"(temp3), + [temp4]"=&r"(temp4), [temp5]"=&r"(temp5), + [sign]"=&r"(sign), [coeff]"=&r"(coeff), + [level]"=&r"(level), [temp6]"=&r"(temp6), [ret]"+&r"(ret) + : [ppin]"r"(ppin), [pout]"r"(pout), [max_level1]"r"(max_level1), + [ppiq]"r"(ppiq), [max_level]"r"(max_level), + [ppbias]"r"(ppbias), [ppzthresh]"r"(ppzthresh), + [ppsharpen]"r"(ppsharpen), [ppq]"r"(ppq) + : "memory", "hi", "lo" + ); + + return (ret != 0); +} + +static int Quantize2Blocks_MIPSdspR2(int16_t in[32], int16_t out[32], + const VP8Matrix* const mtx) { + int nz; + nz = QuantizeBlock_MIPSdspR2(in + 0 * 16, out + 0 * 16, mtx) << 0; + nz |= QuantizeBlock_MIPSdspR2(in + 1 * 16, out + 1 * 16, mtx) << 1; + return nz; +} + +#undef QUANTIZE_ONE + +// macro for one horizontal pass in FTransformWHT +// temp0..temp7 holds tmp[0]..tmp[15] +// A, B, C, D - offset in bytes to load from in buffer +// TEMP0, TEMP1 - registers for corresponding tmp elements +#define HORIZONTAL_PASS_WHT(A, B, C, D, TEMP0, TEMP1) \ + "lh %[" #TEMP0 "], " #A "(%[in]) \n\t" \ + "lh %[" #TEMP1 "], " #B "(%[in]) \n\t" \ + "lh %[temp8], " #C "(%[in]) \n\t" \ + "lh %[temp9], " #D "(%[in]) \n\t" \ + "ins %[" #TEMP1 "], %[" #TEMP0 "], 16, 16 \n\t" \ + "ins %[temp9], %[temp8], 16, 16 \n\t" \ + "subq.ph %[temp8], %[" #TEMP1 "], %[temp9] \n\t" \ + "addq.ph %[temp9], %[" #TEMP1 "], %[temp9] \n\t" \ + "precrq.ph.w %[" #TEMP0 "], %[temp8], %[temp9] \n\t" \ + "append %[temp8], %[temp9], 16 \n\t" \ + "subq.ph %[" #TEMP1 "], %[" #TEMP0 "], %[temp8] \n\t" \ + "addq.ph %[" #TEMP0 "], %[" #TEMP0 "], %[temp8] \n\t" \ + "rotr %[" #TEMP1 "], %[" #TEMP1 "], 16 \n\t" + +// macro for one vertical pass in FTransformWHT +// temp0..temp7 holds tmp[0]..tmp[15] +// A, B, C, D - offsets in bytes to store to out buffer +// TEMP0, TEMP2, TEMP4 and TEMP6 - registers for corresponding tmp elements +#define VERTICAL_PASS_WHT(A, B, C, D, TEMP0, TEMP2, TEMP4, TEMP6) \ + "addq.ph %[temp8], %[" #TEMP0 "], %[" #TEMP4 "] \n\t" \ + "addq.ph %[temp9], %[" #TEMP2 "], %[" #TEMP6 "] \n\t" \ + "subq.ph %[" #TEMP2 "], %[" #TEMP2 "], %[" #TEMP6 "] \n\t" \ + "subq.ph %[" #TEMP6 "], %[" #TEMP0 "], %[" #TEMP4 "] \n\t" \ + "addqh.ph %[" #TEMP0 "], %[temp8], %[temp9] \n\t" \ + "subqh.ph %[" #TEMP4 "], %[" #TEMP6 "], %[" #TEMP2 "] \n\t" \ + "addqh.ph %[" #TEMP2 "], %[" #TEMP2 "], %[" #TEMP6 "] \n\t" \ + "subqh.ph %[" #TEMP6 "], %[temp8], %[temp9] \n\t" \ + "usw %[" #TEMP0 "], " #A "(%[out]) \n\t" \ + "usw %[" #TEMP2 "], " #B "(%[out]) \n\t" \ + "usw %[" #TEMP4 "], " #C "(%[out]) \n\t" \ + "usw %[" #TEMP6 "], " #D "(%[out]) \n\t" + +static void FTransformWHT_MIPSdspR2(const int16_t* in, int16_t* out) { + int temp0, temp1, temp2, temp3, temp4; + int temp5, temp6, temp7, temp8, temp9; + + __asm__ volatile ( + HORIZONTAL_PASS_WHT( 0, 32, 64, 96, temp0, temp1) + HORIZONTAL_PASS_WHT(128, 160, 192, 224, temp2, temp3) + HORIZONTAL_PASS_WHT(256, 288, 320, 352, temp4, temp5) + HORIZONTAL_PASS_WHT(384, 416, 448, 480, temp6, temp7) + VERTICAL_PASS_WHT(0, 8, 16, 24, temp0, temp2, temp4, temp6) + VERTICAL_PASS_WHT(4, 12, 20, 28, temp1, temp3, temp5, temp7) + : [temp0]"=&r"(temp0), [temp1]"=&r"(temp1), [temp2]"=&r"(temp2), + [temp3]"=&r"(temp3), [temp4]"=&r"(temp4), [temp5]"=&r"(temp5), + [temp6]"=&r"(temp6), [temp7]"=&r"(temp7), [temp8]"=&r"(temp8), + [temp9]"=&r"(temp9) + : [in]"r"(in), [out]"r"(out) + : "memory" + ); +} + +#undef VERTICAL_PASS_WHT +#undef HORIZONTAL_PASS_WHT + +// macro for converting coefficients to bin +// convert 8 coeffs at time +// A, B, C, D - offsets in bytes to load from out buffer +#define CONVERT_COEFFS_TO_BIN(A, B, C, D) \ + "ulw %[temp0], " #A "(%[out]) \n\t" \ + "ulw %[temp1], " #B "(%[out]) \n\t" \ + "ulw %[temp2], " #C "(%[out]) \n\t" \ + "ulw %[temp3], " #D "(%[out]) \n\t" \ + "absq_s.ph %[temp0], %[temp0] \n\t" \ + "absq_s.ph %[temp1], %[temp1] \n\t" \ + "absq_s.ph %[temp2], %[temp2] \n\t" \ + "absq_s.ph %[temp3], %[temp3] \n\t" \ + "shra.ph %[temp0], %[temp0], 3 \n\t" \ + "shra.ph %[temp1], %[temp1], 3 \n\t" \ + "shra.ph %[temp2], %[temp2], 3 \n\t" \ + "shra.ph %[temp3], %[temp3], 3 \n\t" \ + "shll_s.ph %[temp0], %[temp0], 10 \n\t" \ + "shll_s.ph %[temp1], %[temp1], 10 \n\t" \ + "shll_s.ph %[temp2], %[temp2], 10 \n\t" \ + "shll_s.ph %[temp3], %[temp3], 10 \n\t" \ + "shrl.ph %[temp0], %[temp0], 10 \n\t" \ + "shrl.ph %[temp1], %[temp1], 10 \n\t" \ + "shrl.ph %[temp2], %[temp2], 10 \n\t" \ + "shrl.ph %[temp3], %[temp3], 10 \n\t" \ + "shll.ph %[temp0], %[temp0], 2 \n\t" \ + "shll.ph %[temp1], %[temp1], 2 \n\t" \ + "shll.ph %[temp2], %[temp2], 2 \n\t" \ + "shll.ph %[temp3], %[temp3], 2 \n\t" \ + "ext %[temp4], %[temp0], 0, 16 \n\t" \ + "ext %[temp0], %[temp0], 16, 16 \n\t" \ + "addu %[temp4], %[temp4], %[dist] \n\t" \ + "addu %[temp0], %[temp0], %[dist] \n\t" \ + "ext %[temp5], %[temp1], 0, 16 \n\t" \ + "lw %[temp8], 0(%[temp4]) \n\t" \ + "ext %[temp1], %[temp1], 16, 16 \n\t" \ + "addu %[temp5], %[temp5], %[dist] \n\t" \ + "addiu %[temp8], %[temp8], 1 \n\t" \ + "sw %[temp8], 0(%[temp4]) \n\t" \ + "lw %[temp8], 0(%[temp0]) \n\t" \ + "addu %[temp1], %[temp1], %[dist] \n\t" \ + "ext %[temp6], %[temp2], 0, 16 \n\t" \ + "addiu %[temp8], %[temp8], 1 \n\t" \ + "sw %[temp8], 0(%[temp0]) \n\t" \ + "lw %[temp8], 0(%[temp5]) \n\t" \ + "ext %[temp2], %[temp2], 16, 16 \n\t" \ + "addu %[temp6], %[temp6], %[dist] \n\t" \ + "addiu %[temp8], %[temp8], 1 \n\t" \ + "sw %[temp8], 0(%[temp5]) \n\t" \ + "lw %[temp8], 0(%[temp1]) \n\t" \ + "addu %[temp2], %[temp2], %[dist] \n\t" \ + "ext %[temp7], %[temp3], 0, 16 \n\t" \ + "addiu %[temp8], %[temp8], 1 \n\t" \ + "sw %[temp8], 0(%[temp1]) \n\t" \ + "lw %[temp8], 0(%[temp6]) \n\t" \ + "ext %[temp3], %[temp3], 16, 16 \n\t" \ + "addu %[temp7], %[temp7], %[dist] \n\t" \ + "addiu %[temp8], %[temp8], 1 \n\t" \ + "sw %[temp8], 0(%[temp6]) \n\t" \ + "lw %[temp8], 0(%[temp2]) \n\t" \ + "addu %[temp3], %[temp3], %[dist] \n\t" \ + "addiu %[temp8], %[temp8], 1 \n\t" \ + "sw %[temp8], 0(%[temp2]) \n\t" \ + "lw %[temp8], 0(%[temp7]) \n\t" \ + "addiu %[temp8], %[temp8], 1 \n\t" \ + "sw %[temp8], 0(%[temp7]) \n\t" \ + "lw %[temp8], 0(%[temp3]) \n\t" \ + "addiu %[temp8], %[temp8], 1 \n\t" \ + "sw %[temp8], 0(%[temp3]) \n\t" + +static void CollectHistogram_MIPSdspR2(const uint8_t* ref, const uint8_t* pred, + int start_block, int end_block, + VP8Histogram* const histo) { + int j; + int distribution[MAX_COEFF_THRESH + 1] = { 0 }; + const int max_coeff = (MAX_COEFF_THRESH << 16) + MAX_COEFF_THRESH; + for (j = start_block; j < end_block; ++j) { + int16_t out[16]; + int temp0, temp1, temp2, temp3, temp4, temp5, temp6, temp7, temp8; + + VP8FTransform(ref + VP8DspScan[j], pred + VP8DspScan[j], out); + + // Convert coefficients to bin. + __asm__ volatile ( + CONVERT_COEFFS_TO_BIN( 0, 4, 8, 12) + CONVERT_COEFFS_TO_BIN(16, 20, 24, 28) + : [temp0]"=&r"(temp0), [temp1]"=&r"(temp1), [temp2]"=&r"(temp2), + [temp3]"=&r"(temp3), [temp4]"=&r"(temp4), [temp5]"=&r"(temp5), + [temp6]"=&r"(temp6), [temp7]"=&r"(temp7), [temp8]"=&r"(temp8) + : [dist]"r"(distribution), [out]"r"(out), [max_coeff]"r"(max_coeff) + : "memory" + ); + } + VP8SetHistogramData(distribution, histo); +} + +#undef CONVERT_COEFFS_TO_BIN + +//------------------------------------------------------------------------------ +// Entry point + +extern void VP8EncDspInitMIPSdspR2(void); + +WEBP_TSAN_IGNORE_FUNCTION void VP8EncDspInitMIPSdspR2(void) { + VP8FTransform = FTransform_MIPSdspR2; + VP8FTransformWHT = FTransformWHT_MIPSdspR2; + VP8ITransform = ITransform_MIPSdspR2; + + VP8TDisto4x4 = Disto4x4_MIPSdspR2; + VP8TDisto16x16 = Disto16x16_MIPSdspR2; + + VP8EncPredLuma16 = Intra16Preds_MIPSdspR2; + VP8EncPredChroma8 = IntraChromaPreds_MIPSdspR2; + VP8EncPredLuma4 = Intra4Preds_MIPSdspR2; + +#if !defined(WORK_AROUND_GCC) + VP8SSE16x16 = SSE16x16_MIPSdspR2; + VP8SSE8x8 = SSE8x8_MIPSdspR2; + VP8SSE16x8 = SSE16x8_MIPSdspR2; + VP8SSE4x4 = SSE4x4_MIPSdspR2; +#endif + + VP8EncQuantizeBlock = QuantizeBlock_MIPSdspR2; + VP8EncQuantize2Blocks = Quantize2Blocks_MIPSdspR2; + + VP8CollectHistogram = CollectHistogram_MIPSdspR2; +} + +#else // !WEBP_USE_MIPS_DSP_R2 + +WEBP_DSP_INIT_STUB(VP8EncDspInitMIPSdspR2) + +#endif // WEBP_USE_MIPS_DSP_R2 diff --git a/vendor/github.com/sizeofint/webpanimation/dsp_enc_msa.c b/vendor/github.com/sizeofint/webpanimation/dsp_enc_msa.c new file mode 100644 index 00000000..f7465557 --- /dev/null +++ b/vendor/github.com/sizeofint/webpanimation/dsp_enc_msa.c @@ -0,0 +1,896 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the COPYING file in the root of the source +// tree. An additional intellectual property rights grant can be found +// in the file PATENTS. All contributing project authors may +// be found in the AUTHORS file in the root of the source tree. +// ----------------------------------------------------------------------------- +// +// MSA version of encoder dsp functions. +// +// Author: Prashant Patil (prashant.patil@imgtec.com) + +#include "dsp_dsp.h" + +#if defined(WEBP_USE_MSA) + +#include +#include "dsp_msa_macro.h" +#include "enc_vp8i_enc.h" + +//------------------------------------------------------------------------------ +// Transforms + +#define IDCT_1D_W(in0, in1, in2, in3, out0, out1, out2, out3) do { \ + v4i32 a1_m, b1_m, c1_m, d1_m; \ + const v4i32 cospi8sqrt2minus1 = __msa_fill_w(20091); \ + const v4i32 sinpi8sqrt2 = __msa_fill_w(35468); \ + v4i32 c_tmp1_m = in1 * sinpi8sqrt2; \ + v4i32 c_tmp2_m = in3 * cospi8sqrt2minus1; \ + v4i32 d_tmp1_m = in1 * cospi8sqrt2minus1; \ + v4i32 d_tmp2_m = in3 * sinpi8sqrt2; \ + \ + ADDSUB2(in0, in2, a1_m, b1_m); \ + SRAI_W2_SW(c_tmp1_m, c_tmp2_m, 16); \ + c_tmp2_m = c_tmp2_m + in3; \ + c1_m = c_tmp1_m - c_tmp2_m; \ + SRAI_W2_SW(d_tmp1_m, d_tmp2_m, 16); \ + d_tmp1_m = d_tmp1_m + in1; \ + d1_m = d_tmp1_m + d_tmp2_m; \ + BUTTERFLY_4(a1_m, b1_m, c1_m, d1_m, out0, out1, out2, out3); \ +} while (0) + +static WEBP_INLINE void ITransformOne(const uint8_t* ref, const int16_t* in, + uint8_t* dst) { + v8i16 input0, input1; + v4i32 in0, in1, in2, in3, hz0, hz1, hz2, hz3, vt0, vt1, vt2, vt3; + v4i32 res0, res1, res2, res3; + v16i8 dest0, dest1, dest2, dest3; + const v16i8 zero = { 0 }; + + LD_SH2(in, 8, input0, input1); + UNPCK_SH_SW(input0, in0, in1); + UNPCK_SH_SW(input1, in2, in3); + IDCT_1D_W(in0, in1, in2, in3, hz0, hz1, hz2, hz3); + TRANSPOSE4x4_SW_SW(hz0, hz1, hz2, hz3, hz0, hz1, hz2, hz3); + IDCT_1D_W(hz0, hz1, hz2, hz3, vt0, vt1, vt2, vt3); + SRARI_W4_SW(vt0, vt1, vt2, vt3, 3); + TRANSPOSE4x4_SW_SW(vt0, vt1, vt2, vt3, vt0, vt1, vt2, vt3); + LD_SB4(ref, BPS, dest0, dest1, dest2, dest3); + ILVR_B4_SW(zero, dest0, zero, dest1, zero, dest2, zero, dest3, + res0, res1, res2, res3); + ILVR_H4_SW(zero, res0, zero, res1, zero, res2, zero, res3, + res0, res1, res2, res3); + ADD4(res0, vt0, res1, vt1, res2, vt2, res3, vt3, res0, res1, res2, res3); + CLIP_SW4_0_255(res0, res1, res2, res3); + PCKEV_B2_SW(res0, res1, res2, res3, vt0, vt1); + res0 = (v4i32)__msa_pckev_b((v16i8)vt0, (v16i8)vt1); + ST4x4_UB(res0, res0, 3, 2, 1, 0, dst, BPS); +} + +static void ITransform_MSA(const uint8_t* ref, const int16_t* in, uint8_t* dst, + int do_two) { + ITransformOne(ref, in, dst); + if (do_two) { + ITransformOne(ref + 4, in + 16, dst + 4); + } +} + +static void FTransform_MSA(const uint8_t* src, const uint8_t* ref, + int16_t* out) { + uint64_t out0, out1, out2, out3; + uint32_t in0, in1, in2, in3; + v4i32 tmp0, tmp1, tmp2, tmp3, tmp4, tmp5; + v8i16 t0, t1, t2, t3; + v16u8 srcl0, srcl1, src0 = { 0 }, src1 = { 0 }; + const v8i16 mask0 = { 0, 4, 8, 12, 1, 5, 9, 13 }; + const v8i16 mask1 = { 3, 7, 11, 15, 2, 6, 10, 14 }; + const v8i16 mask2 = { 4, 0, 5, 1, 6, 2, 7, 3 }; + const v8i16 mask3 = { 0, 4, 1, 5, 2, 6, 3, 7 }; + const v8i16 cnst0 = { 2217, -5352, 2217, -5352, 2217, -5352, 2217, -5352 }; + const v8i16 cnst1 = { 5352, 2217, 5352, 2217, 5352, 2217, 5352, 2217 }; + + LW4(src, BPS, in0, in1, in2, in3); + INSERT_W4_UB(in0, in1, in2, in3, src0); + LW4(ref, BPS, in0, in1, in2, in3); + INSERT_W4_UB(in0, in1, in2, in3, src1); + ILVRL_B2_UB(src0, src1, srcl0, srcl1); + HSUB_UB2_SH(srcl0, srcl1, t0, t1); + VSHF_H2_SH(t0, t1, t0, t1, mask0, mask1, t2, t3); + ADDSUB2(t2, t3, t0, t1); + t0 = SRLI_H(t0, 3); + VSHF_H2_SH(t0, t0, t1, t1, mask2, mask3, t3, t2); + tmp0 = __msa_hadd_s_w(t3, t3); + tmp2 = __msa_hsub_s_w(t3, t3); + FILL_W2_SW(1812, 937, tmp1, tmp3); + DPADD_SH2_SW(t2, t2, cnst0, cnst1, tmp3, tmp1); + SRAI_W2_SW(tmp1, tmp3, 9); + PCKEV_H2_SH(tmp1, tmp0, tmp3, tmp2, t0, t1); + VSHF_H2_SH(t0, t1, t0, t1, mask0, mask1, t2, t3); + ADDSUB2(t2, t3, t0, t1); + VSHF_H2_SH(t0, t0, t1, t1, mask2, mask3, t3, t2); + tmp0 = __msa_hadd_s_w(t3, t3); + tmp2 = __msa_hsub_s_w(t3, t3); + ADDVI_W2_SW(tmp0, 7, tmp2, 7, tmp0, tmp2); + SRAI_W2_SW(tmp0, tmp2, 4); + FILL_W2_SW(12000, 51000, tmp1, tmp3); + DPADD_SH2_SW(t2, t2, cnst0, cnst1, tmp3, tmp1); + SRAI_W2_SW(tmp1, tmp3, 16); + UNPCK_R_SH_SW(t1, tmp4); + tmp5 = __msa_ceqi_w(tmp4, 0); + tmp4 = (v4i32)__msa_nor_v((v16u8)tmp5, (v16u8)tmp5); + tmp5 = __msa_fill_w(1); + tmp5 = (v4i32)__msa_and_v((v16u8)tmp5, (v16u8)tmp4); + tmp1 += tmp5; + PCKEV_H2_SH(tmp1, tmp0, tmp3, tmp2, t0, t1); + out0 = __msa_copy_s_d((v2i64)t0, 0); + out1 = __msa_copy_s_d((v2i64)t0, 1); + out2 = __msa_copy_s_d((v2i64)t1, 0); + out3 = __msa_copy_s_d((v2i64)t1, 1); + SD4(out0, out1, out2, out3, out, 8); +} + +static void FTransformWHT_MSA(const int16_t* in, int16_t* out) { + v8i16 in0 = { 0 }; + v8i16 in1 = { 0 }; + v8i16 tmp0, tmp1, tmp2, tmp3; + v8i16 out0, out1; + const v8i16 mask0 = { 0, 1, 2, 3, 8, 9, 10, 11 }; + const v8i16 mask1 = { 4, 5, 6, 7, 12, 13, 14, 15 }; + const v8i16 mask2 = { 0, 4, 8, 12, 1, 5, 9, 13 }; + const v8i16 mask3 = { 3, 7, 11, 15, 2, 6, 10, 14 }; + + in0 = __msa_insert_h(in0, 0, in[ 0]); + in0 = __msa_insert_h(in0, 1, in[ 64]); + in0 = __msa_insert_h(in0, 2, in[128]); + in0 = __msa_insert_h(in0, 3, in[192]); + in0 = __msa_insert_h(in0, 4, in[ 16]); + in0 = __msa_insert_h(in0, 5, in[ 80]); + in0 = __msa_insert_h(in0, 6, in[144]); + in0 = __msa_insert_h(in0, 7, in[208]); + in1 = __msa_insert_h(in1, 0, in[ 48]); + in1 = __msa_insert_h(in1, 1, in[112]); + in1 = __msa_insert_h(in1, 2, in[176]); + in1 = __msa_insert_h(in1, 3, in[240]); + in1 = __msa_insert_h(in1, 4, in[ 32]); + in1 = __msa_insert_h(in1, 5, in[ 96]); + in1 = __msa_insert_h(in1, 6, in[160]); + in1 = __msa_insert_h(in1, 7, in[224]); + ADDSUB2(in0, in1, tmp0, tmp1); + VSHF_H2_SH(tmp0, tmp1, tmp0, tmp1, mask0, mask1, tmp2, tmp3); + ADDSUB2(tmp2, tmp3, tmp0, tmp1); + VSHF_H2_SH(tmp0, tmp1, tmp0, tmp1, mask2, mask3, in0, in1); + ADDSUB2(in0, in1, tmp0, tmp1); + VSHF_H2_SH(tmp0, tmp1, tmp0, tmp1, mask0, mask1, tmp2, tmp3); + ADDSUB2(tmp2, tmp3, out0, out1); + SRAI_H2_SH(out0, out1, 1); + ST_SH2(out0, out1, out, 8); +} + +static int TTransform_MSA(const uint8_t* in, const uint16_t* w) { + int sum; + uint32_t in0_m, in1_m, in2_m, in3_m; + v16i8 src0 = { 0 }; + v8i16 in0, in1, tmp0, tmp1, tmp2, tmp3; + v4i32 dst0, dst1; + const v16i8 zero = { 0 }; + const v8i16 mask0 = { 0, 1, 2, 3, 8, 9, 10, 11 }; + const v8i16 mask1 = { 4, 5, 6, 7, 12, 13, 14, 15 }; + const v8i16 mask2 = { 0, 4, 8, 12, 1, 5, 9, 13 }; + const v8i16 mask3 = { 3, 7, 11, 15, 2, 6, 10, 14 }; + + LW4(in, BPS, in0_m, in1_m, in2_m, in3_m); + INSERT_W4_SB(in0_m, in1_m, in2_m, in3_m, src0); + ILVRL_B2_SH(zero, src0, tmp0, tmp1); + VSHF_H2_SH(tmp0, tmp1, tmp0, tmp1, mask2, mask3, in0, in1); + ADDSUB2(in0, in1, tmp0, tmp1); + VSHF_H2_SH(tmp0, tmp1, tmp0, tmp1, mask0, mask1, tmp2, tmp3); + ADDSUB2(tmp2, tmp3, tmp0, tmp1); + VSHF_H2_SH(tmp0, tmp1, tmp0, tmp1, mask2, mask3, in0, in1); + ADDSUB2(in0, in1, tmp0, tmp1); + VSHF_H2_SH(tmp0, tmp1, tmp0, tmp1, mask0, mask1, tmp2, tmp3); + ADDSUB2(tmp2, tmp3, tmp0, tmp1); + tmp0 = __msa_add_a_h(tmp0, (v8i16)zero); + tmp1 = __msa_add_a_h(tmp1, (v8i16)zero); + LD_SH2(w, 8, tmp2, tmp3); + DOTP_SH2_SW(tmp0, tmp1, tmp2, tmp3, dst0, dst1); + dst0 = dst0 + dst1; + sum = HADD_SW_S32(dst0); + return sum; +} + +static int Disto4x4_MSA(const uint8_t* const a, const uint8_t* const b, + const uint16_t* const w) { + const int sum1 = TTransform_MSA(a, w); + const int sum2 = TTransform_MSA(b, w); + return abs(sum2 - sum1) >> 5; +} + +static int Disto16x16_MSA(const uint8_t* const a, const uint8_t* const b, + const uint16_t* const w) { + int D = 0; + int x, y; + for (y = 0; y < 16 * BPS; y += 4 * BPS) { + for (x = 0; x < 16; x += 4) { + D += Disto4x4_MSA(a + x + y, b + x + y, w); + } + } + return D; +} + +//------------------------------------------------------------------------------ +// Histogram + +static void CollectHistogram_MSA(const uint8_t* ref, const uint8_t* pred, + int start_block, int end_block, + VP8Histogram* const histo) { + int j; + int distribution[MAX_COEFF_THRESH + 1] = { 0 }; + for (j = start_block; j < end_block; ++j) { + int16_t out[16]; + VP8FTransform(ref + VP8DspScan[j], pred + VP8DspScan[j], out); + { + int k; + v8i16 coeff0, coeff1; + const v8i16 zero = { 0 }; + const v8i16 max_coeff_thr = __msa_ldi_h(MAX_COEFF_THRESH); + LD_SH2(&out[0], 8, coeff0, coeff1); + coeff0 = __msa_add_a_h(coeff0, zero); + coeff1 = __msa_add_a_h(coeff1, zero); + SRAI_H2_SH(coeff0, coeff1, 3); + coeff0 = __msa_min_s_h(coeff0, max_coeff_thr); + coeff1 = __msa_min_s_h(coeff1, max_coeff_thr); + ST_SH2(coeff0, coeff1, &out[0], 8); + for (k = 0; k < 16; ++k) { + ++distribution[out[k]]; + } + } + } + VP8SetHistogramData(distribution, histo); +} + +//------------------------------------------------------------------------------ +// Intra predictions + +// luma 4x4 prediction + +#define DST(x, y) dst[(x) + (y) * BPS] +#define AVG3(a, b, c) (((a) + 2 * (b) + (c) + 2) >> 2) +#define AVG2(a, b) (((a) + (b) + 1) >> 1) + +static WEBP_INLINE void VE4(uint8_t* dst, const uint8_t* top) { // vertical + const v16u8 A1 = { 0 }; + const uint64_t val_m = LD(top - 1); + const v16u8 A = (v16u8)__msa_insert_d((v2i64)A1, 0, val_m); + const v16u8 B = SLDI_UB(A, A, 1); + const v16u8 C = SLDI_UB(A, A, 2); + const v16u8 AC = __msa_ave_u_b(A, C); + const v16u8 B2 = __msa_ave_u_b(B, B); + const v16u8 R = __msa_aver_u_b(AC, B2); + const uint32_t out = __msa_copy_s_w((v4i32)R, 0); + SW4(out, out, out, out, dst, BPS); +} + +static WEBP_INLINE void HE4(uint8_t* dst, const uint8_t* top) { // horizontal + const int X = top[-1]; + const int I = top[-2]; + const int J = top[-3]; + const int K = top[-4]; + const int L = top[-5]; + WebPUint32ToMem(dst + 0 * BPS, 0x01010101U * AVG3(X, I, J)); + WebPUint32ToMem(dst + 1 * BPS, 0x01010101U * AVG3(I, J, K)); + WebPUint32ToMem(dst + 2 * BPS, 0x01010101U * AVG3(J, K, L)); + WebPUint32ToMem(dst + 3 * BPS, 0x01010101U * AVG3(K, L, L)); +} + +static WEBP_INLINE void DC4(uint8_t* dst, const uint8_t* top) { + uint32_t dc = 4; + int i; + for (i = 0; i < 4; ++i) dc += top[i] + top[-5 + i]; + dc >>= 3; + dc = dc | (dc << 8) | (dc << 16) | (dc << 24); + SW4(dc, dc, dc, dc, dst, BPS); +} + +static WEBP_INLINE void RD4(uint8_t* dst, const uint8_t* top) { + const v16u8 A2 = { 0 }; + const uint64_t val_m = LD(top - 5); + const v16u8 A1 = (v16u8)__msa_insert_d((v2i64)A2, 0, val_m); + const v16u8 A = (v16u8)__msa_insert_b((v16i8)A1, 8, top[3]); + const v16u8 B = SLDI_UB(A, A, 1); + const v16u8 C = SLDI_UB(A, A, 2); + const v16u8 AC = __msa_ave_u_b(A, C); + const v16u8 B2 = __msa_ave_u_b(B, B); + const v16u8 R0 = __msa_aver_u_b(AC, B2); + const v16u8 R1 = SLDI_UB(R0, R0, 1); + const v16u8 R2 = SLDI_UB(R1, R1, 1); + const v16u8 R3 = SLDI_UB(R2, R2, 1); + const uint32_t val0 = __msa_copy_s_w((v4i32)R0, 0); + const uint32_t val1 = __msa_copy_s_w((v4i32)R1, 0); + const uint32_t val2 = __msa_copy_s_w((v4i32)R2, 0); + const uint32_t val3 = __msa_copy_s_w((v4i32)R3, 0); + SW4(val3, val2, val1, val0, dst, BPS); +} + +static WEBP_INLINE void LD4(uint8_t* dst, const uint8_t* top) { + const v16u8 A1 = { 0 }; + const uint64_t val_m = LD(top); + const v16u8 A = (v16u8)__msa_insert_d((v2i64)A1, 0, val_m); + const v16u8 B = SLDI_UB(A, A, 1); + const v16u8 C1 = SLDI_UB(A, A, 2); + const v16u8 C = (v16u8)__msa_insert_b((v16i8)C1, 6, top[7]); + const v16u8 AC = __msa_ave_u_b(A, C); + const v16u8 B2 = __msa_ave_u_b(B, B); + const v16u8 R0 = __msa_aver_u_b(AC, B2); + const v16u8 R1 = SLDI_UB(R0, R0, 1); + const v16u8 R2 = SLDI_UB(R1, R1, 1); + const v16u8 R3 = SLDI_UB(R2, R2, 1); + const uint32_t val0 = __msa_copy_s_w((v4i32)R0, 0); + const uint32_t val1 = __msa_copy_s_w((v4i32)R1, 0); + const uint32_t val2 = __msa_copy_s_w((v4i32)R2, 0); + const uint32_t val3 = __msa_copy_s_w((v4i32)R3, 0); + SW4(val0, val1, val2, val3, dst, BPS); +} + +static WEBP_INLINE void VR4(uint8_t* dst, const uint8_t* top) { + const int X = top[-1]; + const int I = top[-2]; + const int J = top[-3]; + const int K = top[-4]; + const int A = top[0]; + const int B = top[1]; + const int C = top[2]; + const int D = top[3]; + DST(0, 0) = DST(1, 2) = AVG2(X, A); + DST(1, 0) = DST(2, 2) = AVG2(A, B); + DST(2, 0) = DST(3, 2) = AVG2(B, C); + DST(3, 0) = AVG2(C, D); + DST(0, 3) = AVG3(K, J, I); + DST(0, 2) = AVG3(J, I, X); + DST(0, 1) = DST(1, 3) = AVG3(I, X, A); + DST(1, 1) = DST(2, 3) = AVG3(X, A, B); + DST(2, 1) = DST(3, 3) = AVG3(A, B, C); + DST(3, 1) = AVG3(B, C, D); +} + +static WEBP_INLINE void VL4(uint8_t* dst, const uint8_t* top) { + const int A = top[0]; + const int B = top[1]; + const int C = top[2]; + const int D = top[3]; + const int E = top[4]; + const int F = top[5]; + const int G = top[6]; + const int H = top[7]; + DST(0, 0) = AVG2(A, B); + DST(1, 0) = DST(0, 2) = AVG2(B, C); + DST(2, 0) = DST(1, 2) = AVG2(C, D); + DST(3, 0) = DST(2, 2) = AVG2(D, E); + DST(0, 1) = AVG3(A, B, C); + DST(1, 1) = DST(0, 3) = AVG3(B, C, D); + DST(2, 1) = DST(1, 3) = AVG3(C, D, E); + DST(3, 1) = DST(2, 3) = AVG3(D, E, F); + DST(3, 2) = AVG3(E, F, G); + DST(3, 3) = AVG3(F, G, H); +} + +static WEBP_INLINE void HU4(uint8_t* dst, const uint8_t* top) { + const int I = top[-2]; + const int J = top[-3]; + const int K = top[-4]; + const int L = top[-5]; + DST(0, 0) = AVG2(I, J); + DST(2, 0) = DST(0, 1) = AVG2(J, K); + DST(2, 1) = DST(0, 2) = AVG2(K, L); + DST(1, 0) = AVG3(I, J, K); + DST(3, 0) = DST(1, 1) = AVG3(J, K, L); + DST(3, 1) = DST(1, 2) = AVG3(K, L, L); + DST(3, 2) = DST(2, 2) = + DST(0, 3) = DST(1, 3) = DST(2, 3) = DST(3, 3) = L; +} + +static WEBP_INLINE void HD4(uint8_t* dst, const uint8_t* top) { + const int X = top[-1]; + const int I = top[-2]; + const int J = top[-3]; + const int K = top[-4]; + const int L = top[-5]; + const int A = top[0]; + const int B = top[1]; + const int C = top[2]; + DST(0, 0) = DST(2, 1) = AVG2(I, X); + DST(0, 1) = DST(2, 2) = AVG2(J, I); + DST(0, 2) = DST(2, 3) = AVG2(K, J); + DST(0, 3) = AVG2(L, K); + DST(3, 0) = AVG3(A, B, C); + DST(2, 0) = AVG3(X, A, B); + DST(1, 0) = DST(3, 1) = AVG3(I, X, A); + DST(1, 1) = DST(3, 2) = AVG3(J, I, X); + DST(1, 2) = DST(3, 3) = AVG3(K, J, I); + DST(1, 3) = AVG3(L, K, J); +} + +static WEBP_INLINE void TM4(uint8_t* dst, const uint8_t* top) { + const v16i8 zero = { 0 }; + const v8i16 TL = (v8i16)__msa_fill_h(top[-1]); + const v8i16 L0 = (v8i16)__msa_fill_h(top[-2]); + const v8i16 L1 = (v8i16)__msa_fill_h(top[-3]); + const v8i16 L2 = (v8i16)__msa_fill_h(top[-4]); + const v8i16 L3 = (v8i16)__msa_fill_h(top[-5]); + const v16u8 T1 = LD_UB(top); + const v8i16 T = (v8i16)__msa_ilvr_b(zero, (v16i8)T1); + const v8i16 d = T - TL; + v8i16 r0, r1, r2, r3; + ADD4(d, L0, d, L1, d, L2, d, L3, r0, r1, r2, r3); + CLIP_SH4_0_255(r0, r1, r2, r3); + PCKEV_ST4x4_UB(r0, r1, r2, r3, dst, BPS); +} + +#undef DST +#undef AVG3 +#undef AVG2 + +static void Intra4Preds_MSA(uint8_t* dst, const uint8_t* top) { + DC4(I4DC4 + dst, top); + TM4(I4TM4 + dst, top); + VE4(I4VE4 + dst, top); + HE4(I4HE4 + dst, top); + RD4(I4RD4 + dst, top); + VR4(I4VR4 + dst, top); + LD4(I4LD4 + dst, top); + VL4(I4VL4 + dst, top); + HD4(I4HD4 + dst, top); + HU4(I4HU4 + dst, top); +} + +// luma 16x16 prediction + +#define STORE16x16(out, dst) do { \ + ST_UB8(out, out, out, out, out, out, out, out, dst + 0 * BPS, BPS); \ + ST_UB8(out, out, out, out, out, out, out, out, dst + 8 * BPS, BPS); \ +} while (0) + +static WEBP_INLINE void VerticalPred16x16(uint8_t* dst, const uint8_t* top) { + if (top != NULL) { + const v16u8 out = LD_UB(top); + STORE16x16(out, dst); + } else { + const v16u8 out = (v16u8)__msa_fill_b(0x7f); + STORE16x16(out, dst); + } +} + +static WEBP_INLINE void HorizontalPred16x16(uint8_t* dst, + const uint8_t* left) { + if (left != NULL) { + int j; + for (j = 0; j < 16; j += 4) { + const v16u8 L0 = (v16u8)__msa_fill_b(left[0]); + const v16u8 L1 = (v16u8)__msa_fill_b(left[1]); + const v16u8 L2 = (v16u8)__msa_fill_b(left[2]); + const v16u8 L3 = (v16u8)__msa_fill_b(left[3]); + ST_UB4(L0, L1, L2, L3, dst, BPS); + dst += 4 * BPS; + left += 4; + } + } else { + const v16u8 out = (v16u8)__msa_fill_b(0x81); + STORE16x16(out, dst); + } +} + +static WEBP_INLINE void TrueMotion16x16(uint8_t* dst, const uint8_t* left, + const uint8_t* top) { + if (left != NULL) { + if (top != NULL) { + int j; + v8i16 d1, d2; + const v16i8 zero = { 0 }; + const v8i16 TL = (v8i16)__msa_fill_h(left[-1]); + const v16u8 T = LD_UB(top); + ILVRL_B2_SH(zero, T, d1, d2); + SUB2(d1, TL, d2, TL, d1, d2); + for (j = 0; j < 16; j += 4) { + v16i8 t0, t1, t2, t3; + v8i16 r0, r1, r2, r3, r4, r5, r6, r7; + const v8i16 L0 = (v8i16)__msa_fill_h(left[j + 0]); + const v8i16 L1 = (v8i16)__msa_fill_h(left[j + 1]); + const v8i16 L2 = (v8i16)__msa_fill_h(left[j + 2]); + const v8i16 L3 = (v8i16)__msa_fill_h(left[j + 3]); + ADD4(d1, L0, d1, L1, d1, L2, d1, L3, r0, r1, r2, r3); + ADD4(d2, L0, d2, L1, d2, L2, d2, L3, r4, r5, r6, r7); + CLIP_SH4_0_255(r0, r1, r2, r3); + CLIP_SH4_0_255(r4, r5, r6, r7); + PCKEV_B4_SB(r4, r0, r5, r1, r6, r2, r7, r3, t0, t1, t2, t3); + ST_SB4(t0, t1, t2, t3, dst, BPS); + dst += 4 * BPS; + } + } else { + HorizontalPred16x16(dst, left); + } + } else { + if (top != NULL) { + VerticalPred16x16(dst, top); + } else { + const v16u8 out = (v16u8)__msa_fill_b(0x81); + STORE16x16(out, dst); + } + } +} + +static WEBP_INLINE void DCMode16x16(uint8_t* dst, const uint8_t* left, + const uint8_t* top) { + int DC; + v16u8 out; + if (top != NULL && left != NULL) { + const v16u8 rtop = LD_UB(top); + const v8u16 dctop = __msa_hadd_u_h(rtop, rtop); + const v16u8 rleft = LD_UB(left); + const v8u16 dcleft = __msa_hadd_u_h(rleft, rleft); + const v8u16 dctemp = dctop + dcleft; + DC = HADD_UH_U32(dctemp); + DC = (DC + 16) >> 5; + } else if (left != NULL) { // left but no top + const v16u8 rleft = LD_UB(left); + const v8u16 dcleft = __msa_hadd_u_h(rleft, rleft); + DC = HADD_UH_U32(dcleft); + DC = (DC + DC + 16) >> 5; + } else if (top != NULL) { // top but no left + const v16u8 rtop = LD_UB(top); + const v8u16 dctop = __msa_hadd_u_h(rtop, rtop); + DC = HADD_UH_U32(dctop); + DC = (DC + DC + 16) >> 5; + } else { // no top, no left, nothing. + DC = 0x80; + } + out = (v16u8)__msa_fill_b(DC); + STORE16x16(out, dst); +} + +static void Intra16Preds_MSA(uint8_t* dst, + const uint8_t* left, const uint8_t* top) { + DCMode16x16(I16DC16 + dst, left, top); + VerticalPred16x16(I16VE16 + dst, top); + HorizontalPred16x16(I16HE16 + dst, left); + TrueMotion16x16(I16TM16 + dst, left, top); +} + +// Chroma 8x8 prediction + +#define CALC_DC8(in, out) do { \ + const v8u16 temp0 = __msa_hadd_u_h(in, in); \ + const v4u32 temp1 = __msa_hadd_u_w(temp0, temp0); \ + const v2i64 temp2 = (v2i64)__msa_hadd_u_d(temp1, temp1); \ + const v2i64 temp3 = __msa_splati_d(temp2, 1); \ + const v2i64 temp4 = temp3 + temp2; \ + const v16i8 temp5 = (v16i8)__msa_srari_d(temp4, 4); \ + const v2i64 temp6 = (v2i64)__msa_splati_b(temp5, 0); \ + out = __msa_copy_s_d(temp6, 0); \ +} while (0) + +#define STORE8x8(out, dst) do { \ + SD4(out, out, out, out, dst + 0 * BPS, BPS); \ + SD4(out, out, out, out, dst + 4 * BPS, BPS); \ +} while (0) + +static WEBP_INLINE void VerticalPred8x8(uint8_t* dst, const uint8_t* top) { + if (top != NULL) { + const uint64_t out = LD(top); + STORE8x8(out, dst); + } else { + const uint64_t out = 0x7f7f7f7f7f7f7f7fULL; + STORE8x8(out, dst); + } +} + +static WEBP_INLINE void HorizontalPred8x8(uint8_t* dst, const uint8_t* left) { + if (left != NULL) { + int j; + for (j = 0; j < 8; j += 4) { + const v16u8 L0 = (v16u8)__msa_fill_b(left[0]); + const v16u8 L1 = (v16u8)__msa_fill_b(left[1]); + const v16u8 L2 = (v16u8)__msa_fill_b(left[2]); + const v16u8 L3 = (v16u8)__msa_fill_b(left[3]); + const uint64_t out0 = __msa_copy_s_d((v2i64)L0, 0); + const uint64_t out1 = __msa_copy_s_d((v2i64)L1, 0); + const uint64_t out2 = __msa_copy_s_d((v2i64)L2, 0); + const uint64_t out3 = __msa_copy_s_d((v2i64)L3, 0); + SD4(out0, out1, out2, out3, dst, BPS); + dst += 4 * BPS; + left += 4; + } + } else { + const uint64_t out = 0x8181818181818181ULL; + STORE8x8(out, dst); + } +} + +static WEBP_INLINE void TrueMotion8x8(uint8_t* dst, const uint8_t* left, + const uint8_t* top) { + if (left != NULL) { + if (top != NULL) { + int j; + const v8i16 TL = (v8i16)__msa_fill_h(left[-1]); + const v16u8 T1 = LD_UB(top); + const v16i8 zero = { 0 }; + const v8i16 T = (v8i16)__msa_ilvr_b(zero, (v16i8)T1); + const v8i16 d = T - TL; + for (j = 0; j < 8; j += 4) { + uint64_t out0, out1, out2, out3; + v16i8 t0, t1; + v8i16 r0 = (v8i16)__msa_fill_h(left[j + 0]); + v8i16 r1 = (v8i16)__msa_fill_h(left[j + 1]); + v8i16 r2 = (v8i16)__msa_fill_h(left[j + 2]); + v8i16 r3 = (v8i16)__msa_fill_h(left[j + 3]); + ADD4(d, r0, d, r1, d, r2, d, r3, r0, r1, r2, r3); + CLIP_SH4_0_255(r0, r1, r2, r3); + PCKEV_B2_SB(r1, r0, r3, r2, t0, t1); + out0 = __msa_copy_s_d((v2i64)t0, 0); + out1 = __msa_copy_s_d((v2i64)t0, 1); + out2 = __msa_copy_s_d((v2i64)t1, 0); + out3 = __msa_copy_s_d((v2i64)t1, 1); + SD4(out0, out1, out2, out3, dst, BPS); + dst += 4 * BPS; + } + } else { + HorizontalPred8x8(dst, left); + } + } else { + if (top != NULL) { + VerticalPred8x8(dst, top); + } else { + const uint64_t out = 0x8181818181818181ULL; + STORE8x8(out, dst); + } + } +} + +static WEBP_INLINE void DCMode8x8(uint8_t* dst, const uint8_t* left, + const uint8_t* top) { + uint64_t out; + v16u8 src = { 0 }; + if (top != NULL && left != NULL) { + const uint64_t left_m = LD(left); + const uint64_t top_m = LD(top); + INSERT_D2_UB(left_m, top_m, src); + CALC_DC8(src, out); + } else if (left != NULL) { // left but no top + const uint64_t left_m = LD(left); + INSERT_D2_UB(left_m, left_m, src); + CALC_DC8(src, out); + } else if (top != NULL) { // top but no left + const uint64_t top_m = LD(top); + INSERT_D2_UB(top_m, top_m, src); + CALC_DC8(src, out); + } else { // no top, no left, nothing. + src = (v16u8)__msa_fill_b(0x80); + out = __msa_copy_s_d((v2i64)src, 0); + } + STORE8x8(out, dst); +} + +static void IntraChromaPreds_MSA(uint8_t* dst, const uint8_t* left, + const uint8_t* top) { + // U block + DCMode8x8(C8DC8 + dst, left, top); + VerticalPred8x8(C8VE8 + dst, top); + HorizontalPred8x8(C8HE8 + dst, left); + TrueMotion8x8(C8TM8 + dst, left, top); + // V block + dst += 8; + if (top != NULL) top += 8; + if (left != NULL) left += 16; + DCMode8x8(C8DC8 + dst, left, top); + VerticalPred8x8(C8VE8 + dst, top); + HorizontalPred8x8(C8HE8 + dst, left); + TrueMotion8x8(C8TM8 + dst, left, top); +} + +//------------------------------------------------------------------------------ +// Metric + +#define PACK_DOTP_UB4_SW(in0, in1, in2, in3, out0, out1, out2, out3) do { \ + v16u8 tmp0, tmp1; \ + v8i16 tmp2, tmp3; \ + ILVRL_B2_UB(in0, in1, tmp0, tmp1); \ + HSUB_UB2_SH(tmp0, tmp1, tmp2, tmp3); \ + DOTP_SH2_SW(tmp2, tmp3, tmp2, tmp3, out0, out1); \ + ILVRL_B2_UB(in2, in3, tmp0, tmp1); \ + HSUB_UB2_SH(tmp0, tmp1, tmp2, tmp3); \ + DOTP_SH2_SW(tmp2, tmp3, tmp2, tmp3, out2, out3); \ +} while (0) + +#define PACK_DPADD_UB4_SW(in0, in1, in2, in3, out0, out1, out2, out3) do { \ + v16u8 tmp0, tmp1; \ + v8i16 tmp2, tmp3; \ + ILVRL_B2_UB(in0, in1, tmp0, tmp1); \ + HSUB_UB2_SH(tmp0, tmp1, tmp2, tmp3); \ + DPADD_SH2_SW(tmp2, tmp3, tmp2, tmp3, out0, out1); \ + ILVRL_B2_UB(in2, in3, tmp0, tmp1); \ + HSUB_UB2_SH(tmp0, tmp1, tmp2, tmp3); \ + DPADD_SH2_SW(tmp2, tmp3, tmp2, tmp3, out2, out3); \ +} while (0) + +static int SSE16x16_MSA(const uint8_t* a, const uint8_t* b) { + uint32_t sum; + v16u8 src0, src1, src2, src3, src4, src5, src6, src7; + v16u8 ref0, ref1, ref2, ref3, ref4, ref5, ref6, ref7; + v4i32 out0, out1, out2, out3; + + LD_UB8(a, BPS, src0, src1, src2, src3, src4, src5, src6, src7); + LD_UB8(b, BPS, ref0, ref1, ref2, ref3, ref4, ref5, ref6, ref7); + PACK_DOTP_UB4_SW(src0, ref0, src1, ref1, out0, out1, out2, out3); + PACK_DPADD_UB4_SW(src2, ref2, src3, ref3, out0, out1, out2, out3); + PACK_DPADD_UB4_SW(src4, ref4, src5, ref5, out0, out1, out2, out3); + PACK_DPADD_UB4_SW(src6, ref6, src7, ref7, out0, out1, out2, out3); + a += 8 * BPS; + b += 8 * BPS; + LD_UB8(a, BPS, src0, src1, src2, src3, src4, src5, src6, src7); + LD_UB8(b, BPS, ref0, ref1, ref2, ref3, ref4, ref5, ref6, ref7); + PACK_DPADD_UB4_SW(src0, ref0, src1, ref1, out0, out1, out2, out3); + PACK_DPADD_UB4_SW(src2, ref2, src3, ref3, out0, out1, out2, out3); + PACK_DPADD_UB4_SW(src4, ref4, src5, ref5, out0, out1, out2, out3); + PACK_DPADD_UB4_SW(src6, ref6, src7, ref7, out0, out1, out2, out3); + out0 += out1; + out2 += out3; + out0 += out2; + sum = HADD_SW_S32(out0); + return sum; +} + +static int SSE16x8_MSA(const uint8_t* a, const uint8_t* b) { + uint32_t sum; + v16u8 src0, src1, src2, src3, src4, src5, src6, src7; + v16u8 ref0, ref1, ref2, ref3, ref4, ref5, ref6, ref7; + v4i32 out0, out1, out2, out3; + + LD_UB8(a, BPS, src0, src1, src2, src3, src4, src5, src6, src7); + LD_UB8(b, BPS, ref0, ref1, ref2, ref3, ref4, ref5, ref6, ref7); + PACK_DOTP_UB4_SW(src0, ref0, src1, ref1, out0, out1, out2, out3); + PACK_DPADD_UB4_SW(src2, ref2, src3, ref3, out0, out1, out2, out3); + PACK_DPADD_UB4_SW(src4, ref4, src5, ref5, out0, out1, out2, out3); + PACK_DPADD_UB4_SW(src6, ref6, src7, ref7, out0, out1, out2, out3); + out0 += out1; + out2 += out3; + out0 += out2; + sum = HADD_SW_S32(out0); + return sum; +} + +static int SSE8x8_MSA(const uint8_t* a, const uint8_t* b) { + uint32_t sum; + v16u8 src0, src1, src2, src3, src4, src5, src6, src7; + v16u8 ref0, ref1, ref2, ref3, ref4, ref5, ref6, ref7; + v16u8 t0, t1, t2, t3; + v4i32 out0, out1, out2, out3; + + LD_UB8(a, BPS, src0, src1, src2, src3, src4, src5, src6, src7); + LD_UB8(b, BPS, ref0, ref1, ref2, ref3, ref4, ref5, ref6, ref7); + ILVR_B4_UB(src0, src1, src2, src3, ref0, ref1, ref2, ref3, t0, t1, t2, t3); + PACK_DOTP_UB4_SW(t0, t2, t1, t3, out0, out1, out2, out3); + ILVR_B4_UB(src4, src5, src6, src7, ref4, ref5, ref6, ref7, t0, t1, t2, t3); + PACK_DPADD_UB4_SW(t0, t2, t1, t3, out0, out1, out2, out3); + out0 += out1; + out2 += out3; + out0 += out2; + sum = HADD_SW_S32(out0); + return sum; +} + +static int SSE4x4_MSA(const uint8_t* a, const uint8_t* b) { + uint32_t sum = 0; + uint32_t src0, src1, src2, src3, ref0, ref1, ref2, ref3; + v16u8 src = { 0 }, ref = { 0 }, tmp0, tmp1; + v8i16 diff0, diff1; + v4i32 out0, out1; + + LW4(a, BPS, src0, src1, src2, src3); + LW4(b, BPS, ref0, ref1, ref2, ref3); + INSERT_W4_UB(src0, src1, src2, src3, src); + INSERT_W4_UB(ref0, ref1, ref2, ref3, ref); + ILVRL_B2_UB(src, ref, tmp0, tmp1); + HSUB_UB2_SH(tmp0, tmp1, diff0, diff1); + DOTP_SH2_SW(diff0, diff1, diff0, diff1, out0, out1); + out0 += out1; + sum = HADD_SW_S32(out0); + return sum; +} + +//------------------------------------------------------------------------------ +// Quantization + +static int QuantizeBlock_MSA(int16_t in[16], int16_t out[16], + const VP8Matrix* const mtx) { + int sum; + v8i16 in0, in1, sh0, sh1, out0, out1; + v8i16 tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, sign0, sign1; + v4i32 s0, s1, s2, s3, b0, b1, b2, b3, t0, t1, t2, t3; + const v8i16 zero = { 0 }; + const v8i16 zigzag0 = { 0, 1, 4, 8, 5, 2, 3, 6 }; + const v8i16 zigzag1 = { 9, 12, 13, 10, 7, 11, 14, 15 }; + const v8i16 maxlevel = __msa_fill_h(MAX_LEVEL); + + LD_SH2(&in[0], 8, in0, in1); + LD_SH2(&mtx->sharpen_[0], 8, sh0, sh1); + tmp4 = __msa_add_a_h(in0, zero); + tmp5 = __msa_add_a_h(in1, zero); + ILVRL_H2_SH(sh0, tmp4, tmp0, tmp1); + ILVRL_H2_SH(sh1, tmp5, tmp2, tmp3); + HADD_SH4_SW(tmp0, tmp1, tmp2, tmp3, s0, s1, s2, s3); + sign0 = (in0 < zero); + sign1 = (in1 < zero); // sign + LD_SH2(&mtx->iq_[0], 8, tmp0, tmp1); // iq + ILVRL_H2_SW(zero, tmp0, t0, t1); + ILVRL_H2_SW(zero, tmp1, t2, t3); + LD_SW4(&mtx->bias_[0], 4, b0, b1, b2, b3); // bias + MUL4(t0, s0, t1, s1, t2, s2, t3, s3, t0, t1, t2, t3); + ADD4(b0, t0, b1, t1, b2, t2, b3, t3, b0, b1, b2, b3); + SRAI_W4_SW(b0, b1, b2, b3, 17); + PCKEV_H2_SH(b1, b0, b3, b2, tmp2, tmp3); + tmp0 = (tmp2 > maxlevel); + tmp1 = (tmp3 > maxlevel); + tmp2 = (v8i16)__msa_bmnz_v((v16u8)tmp2, (v16u8)maxlevel, (v16u8)tmp0); + tmp3 = (v8i16)__msa_bmnz_v((v16u8)tmp3, (v16u8)maxlevel, (v16u8)tmp1); + SUB2(zero, tmp2, zero, tmp3, tmp0, tmp1); + tmp2 = (v8i16)__msa_bmnz_v((v16u8)tmp2, (v16u8)tmp0, (v16u8)sign0); + tmp3 = (v8i16)__msa_bmnz_v((v16u8)tmp3, (v16u8)tmp1, (v16u8)sign1); + LD_SW4(&mtx->zthresh_[0], 4, t0, t1, t2, t3); // zthresh + t0 = (s0 > t0); + t1 = (s1 > t1); + t2 = (s2 > t2); + t3 = (s3 > t3); + PCKEV_H2_SH(t1, t0, t3, t2, tmp0, tmp1); + tmp4 = (v8i16)__msa_bmnz_v((v16u8)zero, (v16u8)tmp2, (v16u8)tmp0); + tmp5 = (v8i16)__msa_bmnz_v((v16u8)zero, (v16u8)tmp3, (v16u8)tmp1); + LD_SH2(&mtx->q_[0], 8, tmp0, tmp1); + MUL2(tmp4, tmp0, tmp5, tmp1, in0, in1); + VSHF_H2_SH(tmp4, tmp5, tmp4, tmp5, zigzag0, zigzag1, out0, out1); + ST_SH2(in0, in1, &in[0], 8); + ST_SH2(out0, out1, &out[0], 8); + out0 = __msa_add_a_h(out0, out1); + sum = HADD_SH_S32(out0); + return (sum > 0); +} + +static int Quantize2Blocks_MSA(int16_t in[32], int16_t out[32], + const VP8Matrix* const mtx) { + int nz; + nz = VP8EncQuantizeBlock(in + 0 * 16, out + 0 * 16, mtx) << 0; + nz |= VP8EncQuantizeBlock(in + 1 * 16, out + 1 * 16, mtx) << 1; + return nz; +} + +//------------------------------------------------------------------------------ +// Entry point + +extern void VP8EncDspInitMSA(void); + +WEBP_TSAN_IGNORE_FUNCTION void VP8EncDspInitMSA(void) { + VP8ITransform = ITransform_MSA; + VP8FTransform = FTransform_MSA; + VP8FTransformWHT = FTransformWHT_MSA; + + VP8TDisto4x4 = Disto4x4_MSA; + VP8TDisto16x16 = Disto16x16_MSA; + VP8CollectHistogram = CollectHistogram_MSA; + + VP8EncPredLuma4 = Intra4Preds_MSA; + VP8EncPredLuma16 = Intra16Preds_MSA; + VP8EncPredChroma8 = IntraChromaPreds_MSA; + + VP8SSE16x16 = SSE16x16_MSA; + VP8SSE16x8 = SSE16x8_MSA; + VP8SSE8x8 = SSE8x8_MSA; + VP8SSE4x4 = SSE4x4_MSA; + + VP8EncQuantizeBlock = QuantizeBlock_MSA; + VP8EncQuantize2Blocks = Quantize2Blocks_MSA; + VP8EncQuantizeBlockWHT = QuantizeBlock_MSA; +} + +#else // !WEBP_USE_MSA + +WEBP_DSP_INIT_STUB(VP8EncDspInitMSA) + +#endif // WEBP_USE_MSA diff --git a/vendor/github.com/sizeofint/webpanimation/dsp_enc_neon.c b/vendor/github.com/sizeofint/webpanimation/dsp_enc_neon.c new file mode 100644 index 00000000..b57697f6 --- /dev/null +++ b/vendor/github.com/sizeofint/webpanimation/dsp_enc_neon.c @@ -0,0 +1,938 @@ +// Copyright 2012 Google Inc. All Rights Reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the COPYING file in the root of the source +// tree. An additional intellectual property rights grant can be found +// in the file PATENTS. All contributing project authors may +// be found in the AUTHORS file in the root of the source tree. +// ----------------------------------------------------------------------------- +// +// ARM NEON version of speed-critical encoding functions. +// +// adapted from libvpx (http://www.webmproject.org/code/) + +#include "dsp_dsp.h" + +#if defined(WEBP_USE_NEON) + +#include + +#include "dsp_neon.h" +#include "enc_vp8i_enc.h" + +//------------------------------------------------------------------------------ +// Transforms (Paragraph 14.4) + +// Inverse transform. +// This code is pretty much the same as TransformOne in the dec_neon.c, except +// for subtraction to *ref. See the comments there for algorithmic explanations. + +static const int16_t kC1 = 20091; +static const int16_t kC2 = 17734; // half of kC2, actually. See comment above. + +// This code works but is *slower* than the inlined-asm version below +// (with gcc-4.6). So we disable it for now. Later, it'll be conditional to +// WEBP_USE_INTRINSICS define. +// With gcc-4.8, it's a little faster speed than inlined-assembly. +#if defined(WEBP_USE_INTRINSICS) + +// Treats 'v' as an uint8x8_t and zero extends to an int16x8_t. +static WEBP_INLINE int16x8_t ConvertU8ToS16_NEON(uint32x2_t v) { + return vreinterpretq_s16_u16(vmovl_u8(vreinterpret_u8_u32(v))); +} + +// Performs unsigned 8b saturation on 'dst01' and 'dst23' storing the result +// to the corresponding rows of 'dst'. +static WEBP_INLINE void SaturateAndStore4x4_NEON(uint8_t* const dst, + const int16x8_t dst01, + const int16x8_t dst23) { + // Unsigned saturate to 8b. + const uint8x8_t dst01_u8 = vqmovun_s16(dst01); + const uint8x8_t dst23_u8 = vqmovun_s16(dst23); + + // Store the results. + vst1_lane_u32((uint32_t*)(dst + 0 * BPS), vreinterpret_u32_u8(dst01_u8), 0); + vst1_lane_u32((uint32_t*)(dst + 1 * BPS), vreinterpret_u32_u8(dst01_u8), 1); + vst1_lane_u32((uint32_t*)(dst + 2 * BPS), vreinterpret_u32_u8(dst23_u8), 0); + vst1_lane_u32((uint32_t*)(dst + 3 * BPS), vreinterpret_u32_u8(dst23_u8), 1); +} + +static WEBP_INLINE void Add4x4_NEON(const int16x8_t row01, + const int16x8_t row23, + const uint8_t* const ref, + uint8_t* const dst) { + uint32x2_t dst01 = vdup_n_u32(0); + uint32x2_t dst23 = vdup_n_u32(0); + + // Load the source pixels. + dst01 = vld1_lane_u32((uint32_t*)(ref + 0 * BPS), dst01, 0); + dst23 = vld1_lane_u32((uint32_t*)(ref + 2 * BPS), dst23, 0); + dst01 = vld1_lane_u32((uint32_t*)(ref + 1 * BPS), dst01, 1); + dst23 = vld1_lane_u32((uint32_t*)(ref + 3 * BPS), dst23, 1); + + { + // Convert to 16b. + const int16x8_t dst01_s16 = ConvertU8ToS16_NEON(dst01); + const int16x8_t dst23_s16 = ConvertU8ToS16_NEON(dst23); + + // Descale with rounding. + const int16x8_t out01 = vrsraq_n_s16(dst01_s16, row01, 3); + const int16x8_t out23 = vrsraq_n_s16(dst23_s16, row23, 3); + // Add the inverse transform. + SaturateAndStore4x4_NEON(dst, out01, out23); + } +} + +static WEBP_INLINE void Transpose8x2_NEON(const int16x8_t in0, + const int16x8_t in1, + int16x8x2_t* const out) { + // a0 a1 a2 a3 | b0 b1 b2 b3 => a0 b0 c0 d0 | a1 b1 c1 d1 + // c0 c1 c2 c3 | d0 d1 d2 d3 a2 b2 c2 d2 | a3 b3 c3 d3 + const int16x8x2_t tmp0 = vzipq_s16(in0, in1); // a0 c0 a1 c1 a2 c2 ... + // b0 d0 b1 d1 b2 d2 ... + *out = vzipq_s16(tmp0.val[0], tmp0.val[1]); +} + +static WEBP_INLINE void TransformPass_NEON(int16x8x2_t* const rows) { + // {rows} = in0 | in4 + // in8 | in12 + // B1 = in4 | in12 + const int16x8_t B1 = + vcombine_s16(vget_high_s16(rows->val[0]), vget_high_s16(rows->val[1])); + // C0 = kC1 * in4 | kC1 * in12 + // C1 = kC2 * in4 | kC2 * in12 + const int16x8_t C0 = vsraq_n_s16(B1, vqdmulhq_n_s16(B1, kC1), 1); + const int16x8_t C1 = vqdmulhq_n_s16(B1, kC2); + const int16x4_t a = vqadd_s16(vget_low_s16(rows->val[0]), + vget_low_s16(rows->val[1])); // in0 + in8 + const int16x4_t b = vqsub_s16(vget_low_s16(rows->val[0]), + vget_low_s16(rows->val[1])); // in0 - in8 + // c = kC2 * in4 - kC1 * in12 + // d = kC1 * in4 + kC2 * in12 + const int16x4_t c = vqsub_s16(vget_low_s16(C1), vget_high_s16(C0)); + const int16x4_t d = vqadd_s16(vget_low_s16(C0), vget_high_s16(C1)); + const int16x8_t D0 = vcombine_s16(a, b); // D0 = a | b + const int16x8_t D1 = vcombine_s16(d, c); // D1 = d | c + const int16x8_t E0 = vqaddq_s16(D0, D1); // a+d | b+c + const int16x8_t E_tmp = vqsubq_s16(D0, D1); // a-d | b-c + const int16x8_t E1 = vcombine_s16(vget_high_s16(E_tmp), vget_low_s16(E_tmp)); + Transpose8x2_NEON(E0, E1, rows); +} + +static void ITransformOne_NEON(const uint8_t* ref, + const int16_t* in, uint8_t* dst) { + int16x8x2_t rows; + INIT_VECTOR2(rows, vld1q_s16(in + 0), vld1q_s16(in + 8)); + TransformPass_NEON(&rows); + TransformPass_NEON(&rows); + Add4x4_NEON(rows.val[0], rows.val[1], ref, dst); +} + +#else + +static void ITransformOne_NEON(const uint8_t* ref, + const int16_t* in, uint8_t* dst) { + const int kBPS = BPS; + const int16_t kC1C2[] = { kC1, kC2, 0, 0 }; + + __asm__ volatile ( + "vld1.16 {q1, q2}, [%[in]] \n" + "vld1.16 {d0}, [%[kC1C2]] \n" + + // d2: in[0] + // d3: in[8] + // d4: in[4] + // d5: in[12] + "vswp d3, d4 \n" + + // q8 = {in[4], in[12]} * kC1 * 2 >> 16 + // q9 = {in[4], in[12]} * kC2 >> 16 + "vqdmulh.s16 q8, q2, d0[0] \n" + "vqdmulh.s16 q9, q2, d0[1] \n" + + // d22 = a = in[0] + in[8] + // d23 = b = in[0] - in[8] + "vqadd.s16 d22, d2, d3 \n" + "vqsub.s16 d23, d2, d3 \n" + + // q8 = in[4]/[12] * kC1 >> 16 + "vshr.s16 q8, q8, #1 \n" + + // Add {in[4], in[12]} back after the multiplication. + "vqadd.s16 q8, q2, q8 \n" + + // d20 = c = in[4]*kC2 - in[12]*kC1 + // d21 = d = in[4]*kC1 + in[12]*kC2 + "vqsub.s16 d20, d18, d17 \n" + "vqadd.s16 d21, d19, d16 \n" + + // d2 = tmp[0] = a + d + // d3 = tmp[1] = b + c + // d4 = tmp[2] = b - c + // d5 = tmp[3] = a - d + "vqadd.s16 d2, d22, d21 \n" + "vqadd.s16 d3, d23, d20 \n" + "vqsub.s16 d4, d23, d20 \n" + "vqsub.s16 d5, d22, d21 \n" + + "vzip.16 q1, q2 \n" + "vzip.16 q1, q2 \n" + + "vswp d3, d4 \n" + + // q8 = {tmp[4], tmp[12]} * kC1 * 2 >> 16 + // q9 = {tmp[4], tmp[12]} * kC2 >> 16 + "vqdmulh.s16 q8, q2, d0[0] \n" + "vqdmulh.s16 q9, q2, d0[1] \n" + + // d22 = a = tmp[0] + tmp[8] + // d23 = b = tmp[0] - tmp[8] + "vqadd.s16 d22, d2, d3 \n" + "vqsub.s16 d23, d2, d3 \n" + + "vshr.s16 q8, q8, #1 \n" + "vqadd.s16 q8, q2, q8 \n" + + // d20 = c = in[4]*kC2 - in[12]*kC1 + // d21 = d = in[4]*kC1 + in[12]*kC2 + "vqsub.s16 d20, d18, d17 \n" + "vqadd.s16 d21, d19, d16 \n" + + // d2 = tmp[0] = a + d + // d3 = tmp[1] = b + c + // d4 = tmp[2] = b - c + // d5 = tmp[3] = a - d + "vqadd.s16 d2, d22, d21 \n" + "vqadd.s16 d3, d23, d20 \n" + "vqsub.s16 d4, d23, d20 \n" + "vqsub.s16 d5, d22, d21 \n" + + "vld1.32 d6[0], [%[ref]], %[kBPS] \n" + "vld1.32 d6[1], [%[ref]], %[kBPS] \n" + "vld1.32 d7[0], [%[ref]], %[kBPS] \n" + "vld1.32 d7[1], [%[ref]], %[kBPS] \n" + + "sub %[ref], %[ref], %[kBPS], lsl #2 \n" + + // (val) + 4 >> 3 + "vrshr.s16 d2, d2, #3 \n" + "vrshr.s16 d3, d3, #3 \n" + "vrshr.s16 d4, d4, #3 \n" + "vrshr.s16 d5, d5, #3 \n" + + "vzip.16 q1, q2 \n" + "vzip.16 q1, q2 \n" + + // Must accumulate before saturating + "vmovl.u8 q8, d6 \n" + "vmovl.u8 q9, d7 \n" + + "vqadd.s16 q1, q1, q8 \n" + "vqadd.s16 q2, q2, q9 \n" + + "vqmovun.s16 d0, q1 \n" + "vqmovun.s16 d1, q2 \n" + + "vst1.32 d0[0], [%[dst]], %[kBPS] \n" + "vst1.32 d0[1], [%[dst]], %[kBPS] \n" + "vst1.32 d1[0], [%[dst]], %[kBPS] \n" + "vst1.32 d1[1], [%[dst]] \n" + + : [in] "+r"(in), [dst] "+r"(dst) // modified registers + : [kBPS] "r"(kBPS), [kC1C2] "r"(kC1C2), [ref] "r"(ref) // constants + : "memory", "q0", "q1", "q2", "q8", "q9", "q10", "q11" // clobbered + ); +} + +#endif // WEBP_USE_INTRINSICS + +static void ITransform_NEON(const uint8_t* ref, + const int16_t* in, uint8_t* dst, int do_two) { + ITransformOne_NEON(ref, in, dst); + if (do_two) { + ITransformOne_NEON(ref + 4, in + 16, dst + 4); + } +} + +// Load all 4x4 pixels into a single uint8x16_t variable. +static uint8x16_t Load4x4_NEON(const uint8_t* src) { + uint32x4_t out = vdupq_n_u32(0); + out = vld1q_lane_u32((const uint32_t*)(src + 0 * BPS), out, 0); + out = vld1q_lane_u32((const uint32_t*)(src + 1 * BPS), out, 1); + out = vld1q_lane_u32((const uint32_t*)(src + 2 * BPS), out, 2); + out = vld1q_lane_u32((const uint32_t*)(src + 3 * BPS), out, 3); + return vreinterpretq_u8_u32(out); +} + +// Forward transform. + +#if defined(WEBP_USE_INTRINSICS) + +static WEBP_INLINE void Transpose4x4_S16_NEON(const int16x4_t A, + const int16x4_t B, + const int16x4_t C, + const int16x4_t D, + int16x8_t* const out01, + int16x8_t* const out32) { + const int16x4x2_t AB = vtrn_s16(A, B); + const int16x4x2_t CD = vtrn_s16(C, D); + const int32x2x2_t tmp02 = vtrn_s32(vreinterpret_s32_s16(AB.val[0]), + vreinterpret_s32_s16(CD.val[0])); + const int32x2x2_t tmp13 = vtrn_s32(vreinterpret_s32_s16(AB.val[1]), + vreinterpret_s32_s16(CD.val[1])); + *out01 = vreinterpretq_s16_s64( + vcombine_s64(vreinterpret_s64_s32(tmp02.val[0]), + vreinterpret_s64_s32(tmp13.val[0]))); + *out32 = vreinterpretq_s16_s64( + vcombine_s64(vreinterpret_s64_s32(tmp13.val[1]), + vreinterpret_s64_s32(tmp02.val[1]))); +} + +static WEBP_INLINE int16x8_t DiffU8ToS16_NEON(const uint8x8_t a, + const uint8x8_t b) { + return vreinterpretq_s16_u16(vsubl_u8(a, b)); +} + +static void FTransform_NEON(const uint8_t* src, const uint8_t* ref, + int16_t* out) { + int16x8_t d0d1, d3d2; // working 4x4 int16 variables + { + const uint8x16_t S0 = Load4x4_NEON(src); + const uint8x16_t R0 = Load4x4_NEON(ref); + const int16x8_t D0D1 = DiffU8ToS16_NEON(vget_low_u8(S0), vget_low_u8(R0)); + const int16x8_t D2D3 = DiffU8ToS16_NEON(vget_high_u8(S0), vget_high_u8(R0)); + const int16x4_t D0 = vget_low_s16(D0D1); + const int16x4_t D1 = vget_high_s16(D0D1); + const int16x4_t D2 = vget_low_s16(D2D3); + const int16x4_t D3 = vget_high_s16(D2D3); + Transpose4x4_S16_NEON(D0, D1, D2, D3, &d0d1, &d3d2); + } + { // 1rst pass + const int32x4_t kCst937 = vdupq_n_s32(937); + const int32x4_t kCst1812 = vdupq_n_s32(1812); + const int16x8_t a0a1 = vaddq_s16(d0d1, d3d2); // d0+d3 | d1+d2 (=a0|a1) + const int16x8_t a3a2 = vsubq_s16(d0d1, d3d2); // d0-d3 | d1-d2 (=a3|a2) + const int16x8_t a0a1_2 = vshlq_n_s16(a0a1, 3); + const int16x4_t tmp0 = vadd_s16(vget_low_s16(a0a1_2), + vget_high_s16(a0a1_2)); + const int16x4_t tmp2 = vsub_s16(vget_low_s16(a0a1_2), + vget_high_s16(a0a1_2)); + const int32x4_t a3_2217 = vmull_n_s16(vget_low_s16(a3a2), 2217); + const int32x4_t a2_2217 = vmull_n_s16(vget_high_s16(a3a2), 2217); + const int32x4_t a2_p_a3 = vmlal_n_s16(a2_2217, vget_low_s16(a3a2), 5352); + const int32x4_t a3_m_a2 = vmlsl_n_s16(a3_2217, vget_high_s16(a3a2), 5352); + const int16x4_t tmp1 = vshrn_n_s32(vaddq_s32(a2_p_a3, kCst1812), 9); + const int16x4_t tmp3 = vshrn_n_s32(vaddq_s32(a3_m_a2, kCst937), 9); + Transpose4x4_S16_NEON(tmp0, tmp1, tmp2, tmp3, &d0d1, &d3d2); + } + { // 2nd pass + // the (1<<16) addition is for the replacement: a3!=0 <-> 1-(a3==0) + const int32x4_t kCst12000 = vdupq_n_s32(12000 + (1 << 16)); + const int32x4_t kCst51000 = vdupq_n_s32(51000); + const int16x8_t a0a1 = vaddq_s16(d0d1, d3d2); // d0+d3 | d1+d2 (=a0|a1) + const int16x8_t a3a2 = vsubq_s16(d0d1, d3d2); // d0-d3 | d1-d2 (=a3|a2) + const int16x4_t a0_k7 = vadd_s16(vget_low_s16(a0a1), vdup_n_s16(7)); + const int16x4_t out0 = vshr_n_s16(vadd_s16(a0_k7, vget_high_s16(a0a1)), 4); + const int16x4_t out2 = vshr_n_s16(vsub_s16(a0_k7, vget_high_s16(a0a1)), 4); + const int32x4_t a3_2217 = vmull_n_s16(vget_low_s16(a3a2), 2217); + const int32x4_t a2_2217 = vmull_n_s16(vget_high_s16(a3a2), 2217); + const int32x4_t a2_p_a3 = vmlal_n_s16(a2_2217, vget_low_s16(a3a2), 5352); + const int32x4_t a3_m_a2 = vmlsl_n_s16(a3_2217, vget_high_s16(a3a2), 5352); + const int16x4_t tmp1 = vaddhn_s32(a2_p_a3, kCst12000); + const int16x4_t out3 = vaddhn_s32(a3_m_a2, kCst51000); + const int16x4_t a3_eq_0 = + vreinterpret_s16_u16(vceq_s16(vget_low_s16(a3a2), vdup_n_s16(0))); + const int16x4_t out1 = vadd_s16(tmp1, a3_eq_0); + vst1_s16(out + 0, out0); + vst1_s16(out + 4, out1); + vst1_s16(out + 8, out2); + vst1_s16(out + 12, out3); + } +} + +#else + +// adapted from vp8/encoder/arm/neon/shortfdct_neon.asm +static const int16_t kCoeff16[] = { + 5352, 5352, 5352, 5352, 2217, 2217, 2217, 2217 +}; +static const int32_t kCoeff32[] = { + 1812, 1812, 1812, 1812, + 937, 937, 937, 937, + 12000, 12000, 12000, 12000, + 51000, 51000, 51000, 51000 +}; + +static void FTransform_NEON(const uint8_t* src, const uint8_t* ref, + int16_t* out) { + const int kBPS = BPS; + const uint8_t* src_ptr = src; + const uint8_t* ref_ptr = ref; + const int16_t* coeff16 = kCoeff16; + const int32_t* coeff32 = kCoeff32; + + __asm__ volatile ( + // load src into q4, q5 in high half + "vld1.8 {d8}, [%[src_ptr]], %[kBPS] \n" + "vld1.8 {d10}, [%[src_ptr]], %[kBPS] \n" + "vld1.8 {d9}, [%[src_ptr]], %[kBPS] \n" + "vld1.8 {d11}, [%[src_ptr]] \n" + + // load ref into q6, q7 in high half + "vld1.8 {d12}, [%[ref_ptr]], %[kBPS] \n" + "vld1.8 {d14}, [%[ref_ptr]], %[kBPS] \n" + "vld1.8 {d13}, [%[ref_ptr]], %[kBPS] \n" + "vld1.8 {d15}, [%[ref_ptr]] \n" + + // Pack the high values in to q4 and q6 + "vtrn.32 q4, q5 \n" + "vtrn.32 q6, q7 \n" + + // d[0-3] = src - ref + "vsubl.u8 q0, d8, d12 \n" + "vsubl.u8 q1, d9, d13 \n" + + // load coeff16 into q8(d16=5352, d17=2217) + "vld1.16 {q8}, [%[coeff16]] \n" + + // load coeff32 high half into q9 = 1812, q10 = 937 + "vld1.32 {q9, q10}, [%[coeff32]]! \n" + + // load coeff32 low half into q11=12000, q12=51000 + "vld1.32 {q11,q12}, [%[coeff32]] \n" + + // part 1 + // Transpose. Register dN is the same as dN in C + "vtrn.32 d0, d2 \n" + "vtrn.32 d1, d3 \n" + "vtrn.16 d0, d1 \n" + "vtrn.16 d2, d3 \n" + + "vadd.s16 d4, d0, d3 \n" // a0 = d0 + d3 + "vadd.s16 d5, d1, d2 \n" // a1 = d1 + d2 + "vsub.s16 d6, d1, d2 \n" // a2 = d1 - d2 + "vsub.s16 d7, d0, d3 \n" // a3 = d0 - d3 + + "vadd.s16 d0, d4, d5 \n" // a0 + a1 + "vshl.s16 d0, d0, #3 \n" // temp[0+i*4] = (a0+a1) << 3 + "vsub.s16 d2, d4, d5 \n" // a0 - a1 + "vshl.s16 d2, d2, #3 \n" // (temp[2+i*4] = (a0-a1) << 3 + + "vmlal.s16 q9, d7, d16 \n" // a3*5352 + 1812 + "vmlal.s16 q10, d7, d17 \n" // a3*2217 + 937 + "vmlal.s16 q9, d6, d17 \n" // a2*2217 + a3*5352 + 1812 + "vmlsl.s16 q10, d6, d16 \n" // a3*2217 + 937 - a2*5352 + + // temp[1+i*4] = (d2*2217 + d3*5352 + 1812) >> 9 + // temp[3+i*4] = (d3*2217 + 937 - d2*5352) >> 9 + "vshrn.s32 d1, q9, #9 \n" + "vshrn.s32 d3, q10, #9 \n" + + // part 2 + // transpose d0=ip[0], d1=ip[4], d2=ip[8], d3=ip[12] + "vtrn.32 d0, d2 \n" + "vtrn.32 d1, d3 \n" + "vtrn.16 d0, d1 \n" + "vtrn.16 d2, d3 \n" + + "vmov.s16 d26, #7 \n" + + "vadd.s16 d4, d0, d3 \n" // a1 = ip[0] + ip[12] + "vadd.s16 d5, d1, d2 \n" // b1 = ip[4] + ip[8] + "vsub.s16 d6, d1, d2 \n" // c1 = ip[4] - ip[8] + "vadd.s16 d4, d4, d26 \n" // a1 + 7 + "vsub.s16 d7, d0, d3 \n" // d1 = ip[0] - ip[12] + + "vadd.s16 d0, d4, d5 \n" // op[0] = a1 + b1 + 7 + "vsub.s16 d2, d4, d5 \n" // op[8] = a1 - b1 + 7 + + "vmlal.s16 q11, d7, d16 \n" // d1*5352 + 12000 + "vmlal.s16 q12, d7, d17 \n" // d1*2217 + 51000 + + "vceq.s16 d4, d7, #0 \n" + + "vshr.s16 d0, d0, #4 \n" + "vshr.s16 d2, d2, #4 \n" + + "vmlal.s16 q11, d6, d17 \n" // c1*2217 + d1*5352 + 12000 + "vmlsl.s16 q12, d6, d16 \n" // d1*2217 - c1*5352 + 51000 + + "vmvn d4, d4 \n" // !(d1 == 0) + // op[4] = (c1*2217 + d1*5352 + 12000)>>16 + "vshrn.s32 d1, q11, #16 \n" + // op[4] += (d1!=0) + "vsub.s16 d1, d1, d4 \n" + // op[12]= (d1*2217 - c1*5352 + 51000)>>16 + "vshrn.s32 d3, q12, #16 \n" + + // set result to out array + "vst1.16 {q0, q1}, [%[out]] \n" + : [src_ptr] "+r"(src_ptr), [ref_ptr] "+r"(ref_ptr), + [coeff32] "+r"(coeff32) // modified registers + : [kBPS] "r"(kBPS), [coeff16] "r"(coeff16), + [out] "r"(out) // constants + : "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", + "q10", "q11", "q12", "q13" // clobbered + ); +} + +#endif + +#define LOAD_LANE_16b(VALUE, LANE) do { \ + (VALUE) = vld1_lane_s16(src, (VALUE), (LANE)); \ + src += stride; \ +} while (0) + +static void FTransformWHT_NEON(const int16_t* src, int16_t* out) { + const int stride = 16; + const int16x4_t zero = vdup_n_s16(0); + int32x4x4_t tmp0; + int16x4x4_t in; + INIT_VECTOR4(in, zero, zero, zero, zero); + LOAD_LANE_16b(in.val[0], 0); + LOAD_LANE_16b(in.val[1], 0); + LOAD_LANE_16b(in.val[2], 0); + LOAD_LANE_16b(in.val[3], 0); + LOAD_LANE_16b(in.val[0], 1); + LOAD_LANE_16b(in.val[1], 1); + LOAD_LANE_16b(in.val[2], 1); + LOAD_LANE_16b(in.val[3], 1); + LOAD_LANE_16b(in.val[0], 2); + LOAD_LANE_16b(in.val[1], 2); + LOAD_LANE_16b(in.val[2], 2); + LOAD_LANE_16b(in.val[3], 2); + LOAD_LANE_16b(in.val[0], 3); + LOAD_LANE_16b(in.val[1], 3); + LOAD_LANE_16b(in.val[2], 3); + LOAD_LANE_16b(in.val[3], 3); + + { + // a0 = in[0 * 16] + in[2 * 16] + // a1 = in[1 * 16] + in[3 * 16] + // a2 = in[1 * 16] - in[3 * 16] + // a3 = in[0 * 16] - in[2 * 16] + const int32x4_t a0 = vaddl_s16(in.val[0], in.val[2]); + const int32x4_t a1 = vaddl_s16(in.val[1], in.val[3]); + const int32x4_t a2 = vsubl_s16(in.val[1], in.val[3]); + const int32x4_t a3 = vsubl_s16(in.val[0], in.val[2]); + tmp0.val[0] = vaddq_s32(a0, a1); + tmp0.val[1] = vaddq_s32(a3, a2); + tmp0.val[2] = vsubq_s32(a3, a2); + tmp0.val[3] = vsubq_s32(a0, a1); + } + { + const int32x4x4_t tmp1 = Transpose4x4_NEON(tmp0); + // a0 = tmp[0 + i] + tmp[ 8 + i] + // a1 = tmp[4 + i] + tmp[12 + i] + // a2 = tmp[4 + i] - tmp[12 + i] + // a3 = tmp[0 + i] - tmp[ 8 + i] + const int32x4_t a0 = vaddq_s32(tmp1.val[0], tmp1.val[2]); + const int32x4_t a1 = vaddq_s32(tmp1.val[1], tmp1.val[3]); + const int32x4_t a2 = vsubq_s32(tmp1.val[1], tmp1.val[3]); + const int32x4_t a3 = vsubq_s32(tmp1.val[0], tmp1.val[2]); + const int32x4_t b0 = vhaddq_s32(a0, a1); // (a0 + a1) >> 1 + const int32x4_t b1 = vhaddq_s32(a3, a2); // (a3 + a2) >> 1 + const int32x4_t b2 = vhsubq_s32(a3, a2); // (a3 - a2) >> 1 + const int32x4_t b3 = vhsubq_s32(a0, a1); // (a0 - a1) >> 1 + const int16x4_t out0 = vmovn_s32(b0); + const int16x4_t out1 = vmovn_s32(b1); + const int16x4_t out2 = vmovn_s32(b2); + const int16x4_t out3 = vmovn_s32(b3); + + vst1_s16(out + 0, out0); + vst1_s16(out + 4, out1); + vst1_s16(out + 8, out2); + vst1_s16(out + 12, out3); + } +} +#undef LOAD_LANE_16b + +//------------------------------------------------------------------------------ +// Texture distortion +// +// We try to match the spectral content (weighted) between source and +// reconstructed samples. + +// a 0123, b 0123 +// a 4567, b 4567 +// a 89ab, b 89ab +// a cdef, b cdef +// +// transpose +// +// a 048c, b 048c +// a 159d, b 159d +// a 26ae, b 26ae +// a 37bf, b 37bf +// +static WEBP_INLINE int16x8x4_t DistoTranspose4x4S16_NEON(int16x8x4_t q4_in) { + const int16x8x2_t q2_tmp0 = vtrnq_s16(q4_in.val[0], q4_in.val[1]); + const int16x8x2_t q2_tmp1 = vtrnq_s16(q4_in.val[2], q4_in.val[3]); + const int32x4x2_t q2_tmp2 = vtrnq_s32(vreinterpretq_s32_s16(q2_tmp0.val[0]), + vreinterpretq_s32_s16(q2_tmp1.val[0])); + const int32x4x2_t q2_tmp3 = vtrnq_s32(vreinterpretq_s32_s16(q2_tmp0.val[1]), + vreinterpretq_s32_s16(q2_tmp1.val[1])); + q4_in.val[0] = vreinterpretq_s16_s32(q2_tmp2.val[0]); + q4_in.val[2] = vreinterpretq_s16_s32(q2_tmp2.val[1]); + q4_in.val[1] = vreinterpretq_s16_s32(q2_tmp3.val[0]); + q4_in.val[3] = vreinterpretq_s16_s32(q2_tmp3.val[1]); + return q4_in; +} + +static WEBP_INLINE int16x8x4_t DistoHorizontalPass_NEON( + const int16x8x4_t q4_in) { + // {a0, a1} = {in[0] + in[2], in[1] + in[3]} + // {a3, a2} = {in[0] - in[2], in[1] - in[3]} + const int16x8_t q_a0 = vaddq_s16(q4_in.val[0], q4_in.val[2]); + const int16x8_t q_a1 = vaddq_s16(q4_in.val[1], q4_in.val[3]); + const int16x8_t q_a3 = vsubq_s16(q4_in.val[0], q4_in.val[2]); + const int16x8_t q_a2 = vsubq_s16(q4_in.val[1], q4_in.val[3]); + int16x8x4_t q4_out; + // tmp[0] = a0 + a1 + // tmp[1] = a3 + a2 + // tmp[2] = a3 - a2 + // tmp[3] = a0 - a1 + INIT_VECTOR4(q4_out, + vabsq_s16(vaddq_s16(q_a0, q_a1)), + vabsq_s16(vaddq_s16(q_a3, q_a2)), + vabdq_s16(q_a3, q_a2), vabdq_s16(q_a0, q_a1)); + return q4_out; +} + +static WEBP_INLINE int16x8x4_t DistoVerticalPass_NEON(const uint8x8x4_t q4_in) { + const int16x8_t q_a0 = vreinterpretq_s16_u16(vaddl_u8(q4_in.val[0], + q4_in.val[2])); + const int16x8_t q_a1 = vreinterpretq_s16_u16(vaddl_u8(q4_in.val[1], + q4_in.val[3])); + const int16x8_t q_a2 = vreinterpretq_s16_u16(vsubl_u8(q4_in.val[1], + q4_in.val[3])); + const int16x8_t q_a3 = vreinterpretq_s16_u16(vsubl_u8(q4_in.val[0], + q4_in.val[2])); + int16x8x4_t q4_out; + + INIT_VECTOR4(q4_out, + vaddq_s16(q_a0, q_a1), vaddq_s16(q_a3, q_a2), + vsubq_s16(q_a3, q_a2), vsubq_s16(q_a0, q_a1)); + return q4_out; +} + +static WEBP_INLINE int16x4x4_t DistoLoadW_NEON(const uint16_t* w) { + const uint16x8_t q_w07 = vld1q_u16(&w[0]); + const uint16x8_t q_w8f = vld1q_u16(&w[8]); + int16x4x4_t d4_w; + INIT_VECTOR4(d4_w, + vget_low_s16(vreinterpretq_s16_u16(q_w07)), + vget_high_s16(vreinterpretq_s16_u16(q_w07)), + vget_low_s16(vreinterpretq_s16_u16(q_w8f)), + vget_high_s16(vreinterpretq_s16_u16(q_w8f))); + return d4_w; +} + +static WEBP_INLINE int32x2_t DistoSum_NEON(const int16x8x4_t q4_in, + const int16x4x4_t d4_w) { + int32x2_t d_sum; + // sum += w[ 0] * abs(b0); + // sum += w[ 4] * abs(b1); + // sum += w[ 8] * abs(b2); + // sum += w[12] * abs(b3); + int32x4_t q_sum0 = vmull_s16(d4_w.val[0], vget_low_s16(q4_in.val[0])); + int32x4_t q_sum1 = vmull_s16(d4_w.val[1], vget_low_s16(q4_in.val[1])); + int32x4_t q_sum2 = vmull_s16(d4_w.val[2], vget_low_s16(q4_in.val[2])); + int32x4_t q_sum3 = vmull_s16(d4_w.val[3], vget_low_s16(q4_in.val[3])); + q_sum0 = vmlsl_s16(q_sum0, d4_w.val[0], vget_high_s16(q4_in.val[0])); + q_sum1 = vmlsl_s16(q_sum1, d4_w.val[1], vget_high_s16(q4_in.val[1])); + q_sum2 = vmlsl_s16(q_sum2, d4_w.val[2], vget_high_s16(q4_in.val[2])); + q_sum3 = vmlsl_s16(q_sum3, d4_w.val[3], vget_high_s16(q4_in.val[3])); + + q_sum0 = vaddq_s32(q_sum0, q_sum1); + q_sum2 = vaddq_s32(q_sum2, q_sum3); + q_sum2 = vaddq_s32(q_sum0, q_sum2); + d_sum = vpadd_s32(vget_low_s32(q_sum2), vget_high_s32(q_sum2)); + d_sum = vpadd_s32(d_sum, d_sum); + return d_sum; +} + +#define LOAD_LANE_32b(src, VALUE, LANE) \ + (VALUE) = vld1_lane_u32((const uint32_t*)(src), (VALUE), (LANE)) + +// Hadamard transform +// Returns the weighted sum of the absolute value of transformed coefficients. +// w[] contains a row-major 4 by 4 symmetric matrix. +static int Disto4x4_NEON(const uint8_t* const a, const uint8_t* const b, + const uint16_t* const w) { + uint32x2_t d_in_ab_0123 = vdup_n_u32(0); + uint32x2_t d_in_ab_4567 = vdup_n_u32(0); + uint32x2_t d_in_ab_89ab = vdup_n_u32(0); + uint32x2_t d_in_ab_cdef = vdup_n_u32(0); + uint8x8x4_t d4_in; + + // load data a, b + LOAD_LANE_32b(a + 0 * BPS, d_in_ab_0123, 0); + LOAD_LANE_32b(a + 1 * BPS, d_in_ab_4567, 0); + LOAD_LANE_32b(a + 2 * BPS, d_in_ab_89ab, 0); + LOAD_LANE_32b(a + 3 * BPS, d_in_ab_cdef, 0); + LOAD_LANE_32b(b + 0 * BPS, d_in_ab_0123, 1); + LOAD_LANE_32b(b + 1 * BPS, d_in_ab_4567, 1); + LOAD_LANE_32b(b + 2 * BPS, d_in_ab_89ab, 1); + LOAD_LANE_32b(b + 3 * BPS, d_in_ab_cdef, 1); + INIT_VECTOR4(d4_in, + vreinterpret_u8_u32(d_in_ab_0123), + vreinterpret_u8_u32(d_in_ab_4567), + vreinterpret_u8_u32(d_in_ab_89ab), + vreinterpret_u8_u32(d_in_ab_cdef)); + + { + // Vertical pass first to avoid a transpose (vertical and horizontal passes + // are commutative because w/kWeightY is symmetric) and subsequent + // transpose. + const int16x8x4_t q4_v = DistoVerticalPass_NEON(d4_in); + const int16x4x4_t d4_w = DistoLoadW_NEON(w); + // horizontal pass + const int16x8x4_t q4_t = DistoTranspose4x4S16_NEON(q4_v); + const int16x8x4_t q4_h = DistoHorizontalPass_NEON(q4_t); + int32x2_t d_sum = DistoSum_NEON(q4_h, d4_w); + + // abs(sum2 - sum1) >> 5 + d_sum = vabs_s32(d_sum); + d_sum = vshr_n_s32(d_sum, 5); + return vget_lane_s32(d_sum, 0); + } +} +#undef LOAD_LANE_32b + +static int Disto16x16_NEON(const uint8_t* const a, const uint8_t* const b, + const uint16_t* const w) { + int D = 0; + int x, y; + for (y = 0; y < 16 * BPS; y += 4 * BPS) { + for (x = 0; x < 16; x += 4) { + D += Disto4x4_NEON(a + x + y, b + x + y, w); + } + } + return D; +} + +//------------------------------------------------------------------------------ + +static void CollectHistogram_NEON(const uint8_t* ref, const uint8_t* pred, + int start_block, int end_block, + VP8Histogram* const histo) { + const uint16x8_t max_coeff_thresh = vdupq_n_u16(MAX_COEFF_THRESH); + int j; + int distribution[MAX_COEFF_THRESH + 1] = { 0 }; + for (j = start_block; j < end_block; ++j) { + int16_t out[16]; + FTransform_NEON(ref + VP8DspScan[j], pred + VP8DspScan[j], out); + { + int k; + const int16x8_t a0 = vld1q_s16(out + 0); + const int16x8_t b0 = vld1q_s16(out + 8); + const uint16x8_t a1 = vreinterpretq_u16_s16(vabsq_s16(a0)); + const uint16x8_t b1 = vreinterpretq_u16_s16(vabsq_s16(b0)); + const uint16x8_t a2 = vshrq_n_u16(a1, 3); + const uint16x8_t b2 = vshrq_n_u16(b1, 3); + const uint16x8_t a3 = vminq_u16(a2, max_coeff_thresh); + const uint16x8_t b3 = vminq_u16(b2, max_coeff_thresh); + vst1q_s16(out + 0, vreinterpretq_s16_u16(a3)); + vst1q_s16(out + 8, vreinterpretq_s16_u16(b3)); + // Convert coefficients to bin. + for (k = 0; k < 16; ++k) { + ++distribution[out[k]]; + } + } + } + VP8SetHistogramData(distribution, histo); +} + +//------------------------------------------------------------------------------ + +static WEBP_INLINE void AccumulateSSE16_NEON(const uint8_t* const a, + const uint8_t* const b, + uint32x4_t* const sum) { + const uint8x16_t a0 = vld1q_u8(a); + const uint8x16_t b0 = vld1q_u8(b); + const uint8x16_t abs_diff = vabdq_u8(a0, b0); + const uint16x8_t prod1 = vmull_u8(vget_low_u8(abs_diff), + vget_low_u8(abs_diff)); + const uint16x8_t prod2 = vmull_u8(vget_high_u8(abs_diff), + vget_high_u8(abs_diff)); + /* pair-wise adds and widen */ + const uint32x4_t sum1 = vpaddlq_u16(prod1); + const uint32x4_t sum2 = vpaddlq_u16(prod2); + *sum = vaddq_u32(*sum, vaddq_u32(sum1, sum2)); +} + +// Horizontal sum of all four uint32_t values in 'sum'. +static int SumToInt_NEON(uint32x4_t sum) { + const uint64x2_t sum2 = vpaddlq_u32(sum); + const uint64_t sum3 = vgetq_lane_u64(sum2, 0) + vgetq_lane_u64(sum2, 1); + return (int)sum3; +} + +static int SSE16x16_NEON(const uint8_t* a, const uint8_t* b) { + uint32x4_t sum = vdupq_n_u32(0); + int y; + for (y = 0; y < 16; ++y) { + AccumulateSSE16_NEON(a + y * BPS, b + y * BPS, &sum); + } + return SumToInt_NEON(sum); +} + +static int SSE16x8_NEON(const uint8_t* a, const uint8_t* b) { + uint32x4_t sum = vdupq_n_u32(0); + int y; + for (y = 0; y < 8; ++y) { + AccumulateSSE16_NEON(a + y * BPS, b + y * BPS, &sum); + } + return SumToInt_NEON(sum); +} + +static int SSE8x8_NEON(const uint8_t* a, const uint8_t* b) { + uint32x4_t sum = vdupq_n_u32(0); + int y; + for (y = 0; y < 8; ++y) { + const uint8x8_t a0 = vld1_u8(a + y * BPS); + const uint8x8_t b0 = vld1_u8(b + y * BPS); + const uint8x8_t abs_diff = vabd_u8(a0, b0); + const uint16x8_t prod = vmull_u8(abs_diff, abs_diff); + sum = vpadalq_u16(sum, prod); + } + return SumToInt_NEON(sum); +} + +static int SSE4x4_NEON(const uint8_t* a, const uint8_t* b) { + const uint8x16_t a0 = Load4x4_NEON(a); + const uint8x16_t b0 = Load4x4_NEON(b); + const uint8x16_t abs_diff = vabdq_u8(a0, b0); + const uint16x8_t prod1 = vmull_u8(vget_low_u8(abs_diff), + vget_low_u8(abs_diff)); + const uint16x8_t prod2 = vmull_u8(vget_high_u8(abs_diff), + vget_high_u8(abs_diff)); + /* pair-wise adds and widen */ + const uint32x4_t sum1 = vpaddlq_u16(prod1); + const uint32x4_t sum2 = vpaddlq_u16(prod2); + return SumToInt_NEON(vaddq_u32(sum1, sum2)); +} + +//------------------------------------------------------------------------------ + +// Compilation with gcc-4.6.x is problematic for now. +#if !defined(WORK_AROUND_GCC) + +static int16x8_t Quantize_NEON(int16_t* const in, + const VP8Matrix* const mtx, int offset) { + const uint16x8_t sharp = vld1q_u16(&mtx->sharpen_[offset]); + const uint16x8_t q = vld1q_u16(&mtx->q_[offset]); + const uint16x8_t iq = vld1q_u16(&mtx->iq_[offset]); + const uint32x4_t bias0 = vld1q_u32(&mtx->bias_[offset + 0]); + const uint32x4_t bias1 = vld1q_u32(&mtx->bias_[offset + 4]); + + const int16x8_t a = vld1q_s16(in + offset); // in + const uint16x8_t b = vreinterpretq_u16_s16(vabsq_s16(a)); // coeff = abs(in) + const int16x8_t sign = vshrq_n_s16(a, 15); // sign + const uint16x8_t c = vaddq_u16(b, sharp); // + sharpen + const uint32x4_t m0 = vmull_u16(vget_low_u16(c), vget_low_u16(iq)); + const uint32x4_t m1 = vmull_u16(vget_high_u16(c), vget_high_u16(iq)); + const uint32x4_t m2 = vhaddq_u32(m0, bias0); + const uint32x4_t m3 = vhaddq_u32(m1, bias1); // (coeff * iQ + bias) >> 1 + const uint16x8_t c0 = vcombine_u16(vshrn_n_u32(m2, 16), + vshrn_n_u32(m3, 16)); // QFIX=17 = 16+1 + const uint16x8_t c1 = vminq_u16(c0, vdupq_n_u16(MAX_LEVEL)); + const int16x8_t c2 = veorq_s16(vreinterpretq_s16_u16(c1), sign); + const int16x8_t c3 = vsubq_s16(c2, sign); // restore sign + const int16x8_t c4 = vmulq_s16(c3, vreinterpretq_s16_u16(q)); + vst1q_s16(in + offset, c4); + assert(QFIX == 17); // this function can't work as is if QFIX != 16+1 + return c3; +} + +static const uint8_t kShuffles[4][8] = { + { 0, 1, 2, 3, 8, 9, 16, 17 }, + { 10, 11, 4, 5, 6, 7, 12, 13 }, + { 18, 19, 24, 25, 26, 27, 20, 21 }, + { 14, 15, 22, 23, 28, 29, 30, 31 } +}; + +static int QuantizeBlock_NEON(int16_t in[16], int16_t out[16], + const VP8Matrix* const mtx) { + const int16x8_t out0 = Quantize_NEON(in, mtx, 0); + const int16x8_t out1 = Quantize_NEON(in, mtx, 8); + uint8x8x4_t shuffles; + // vtbl?_u8 are marked unavailable for iOS arm64 with Xcode < 6.3, use + // non-standard versions there. +#if defined(__APPLE__) && defined(__aarch64__) && \ + defined(__apple_build_version__) && (__apple_build_version__< 6020037) + uint8x16x2_t all_out; + INIT_VECTOR2(all_out, vreinterpretq_u8_s16(out0), vreinterpretq_u8_s16(out1)); + INIT_VECTOR4(shuffles, + vtbl2q_u8(all_out, vld1_u8(kShuffles[0])), + vtbl2q_u8(all_out, vld1_u8(kShuffles[1])), + vtbl2q_u8(all_out, vld1_u8(kShuffles[2])), + vtbl2q_u8(all_out, vld1_u8(kShuffles[3]))); +#else + uint8x8x4_t all_out; + INIT_VECTOR4(all_out, + vreinterpret_u8_s16(vget_low_s16(out0)), + vreinterpret_u8_s16(vget_high_s16(out0)), + vreinterpret_u8_s16(vget_low_s16(out1)), + vreinterpret_u8_s16(vget_high_s16(out1))); + INIT_VECTOR4(shuffles, + vtbl4_u8(all_out, vld1_u8(kShuffles[0])), + vtbl4_u8(all_out, vld1_u8(kShuffles[1])), + vtbl4_u8(all_out, vld1_u8(kShuffles[2])), + vtbl4_u8(all_out, vld1_u8(kShuffles[3]))); +#endif + // Zigzag reordering + vst1_u8((uint8_t*)(out + 0), shuffles.val[0]); + vst1_u8((uint8_t*)(out + 4), shuffles.val[1]); + vst1_u8((uint8_t*)(out + 8), shuffles.val[2]); + vst1_u8((uint8_t*)(out + 12), shuffles.val[3]); + // test zeros + if (*(uint64_t*)(out + 0) != 0) return 1; + if (*(uint64_t*)(out + 4) != 0) return 1; + if (*(uint64_t*)(out + 8) != 0) return 1; + if (*(uint64_t*)(out + 12) != 0) return 1; + return 0; +} + +static int Quantize2Blocks_NEON(int16_t in[32], int16_t out[32], + const VP8Matrix* const mtx) { + int nz; + nz = QuantizeBlock_NEON(in + 0 * 16, out + 0 * 16, mtx) << 0; + nz |= QuantizeBlock_NEON(in + 1 * 16, out + 1 * 16, mtx) << 1; + return nz; +} + +#endif // !WORK_AROUND_GCC + +//------------------------------------------------------------------------------ +// Entry point + +extern void VP8EncDspInitNEON(void); + +WEBP_TSAN_IGNORE_FUNCTION void VP8EncDspInitNEON(void) { + VP8ITransform = ITransform_NEON; + VP8FTransform = FTransform_NEON; + + VP8FTransformWHT = FTransformWHT_NEON; + + VP8TDisto4x4 = Disto4x4_NEON; + VP8TDisto16x16 = Disto16x16_NEON; + VP8CollectHistogram = CollectHistogram_NEON; + + VP8SSE16x16 = SSE16x16_NEON; + VP8SSE16x8 = SSE16x8_NEON; + VP8SSE8x8 = SSE8x8_NEON; + VP8SSE4x4 = SSE4x4_NEON; + +#if !defined(WORK_AROUND_GCC) + VP8EncQuantizeBlock = QuantizeBlock_NEON; + VP8EncQuantize2Blocks = Quantize2Blocks_NEON; +#endif +} + +#else // !WEBP_USE_NEON + +WEBP_DSP_INIT_STUB(VP8EncDspInitNEON) + +#endif // WEBP_USE_NEON diff --git a/vendor/github.com/sizeofint/webpanimation/dsp_enc_sse2.c b/vendor/github.com/sizeofint/webpanimation/dsp_enc_sse2.c new file mode 100644 index 00000000..1275817c --- /dev/null +++ b/vendor/github.com/sizeofint/webpanimation/dsp_enc_sse2.c @@ -0,0 +1,1381 @@ +// Copyright 2011 Google Inc. All Rights Reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the COPYING file in the root of the source +// tree. An additional intellectual property rights grant can be found +// in the file PATENTS. All contributing project authors may +// be found in the AUTHORS file in the root of the source tree. +// ----------------------------------------------------------------------------- +// +// SSE2 version of speed-critical encoding functions. +// +// Author: Christian Duvivier (cduvivier@google.com) + +#include "dsp_dsp.h" + +#if defined(WEBP_USE_SSE2) +#include +#include // for abs() +#include + +#include "dsp_common_sse2.h" +#include "enc_cost_enc.h" +#include "enc_vp8i_enc.h" + +//------------------------------------------------------------------------------ +// Transforms (Paragraph 14.4) + +// Does one or two inverse transforms. +static void ITransform_SSE2(const uint8_t* ref, const int16_t* in, uint8_t* dst, + int do_two) { + // This implementation makes use of 16-bit fixed point versions of two + // multiply constants: + // K1 = sqrt(2) * cos (pi/8) ~= 85627 / 2^16 + // K2 = sqrt(2) * sin (pi/8) ~= 35468 / 2^16 + // + // To be able to use signed 16-bit integers, we use the following trick to + // have constants within range: + // - Associated constants are obtained by subtracting the 16-bit fixed point + // version of one: + // k = K - (1 << 16) => K = k + (1 << 16) + // K1 = 85267 => k1 = 20091 + // K2 = 35468 => k2 = -30068 + // - The multiplication of a variable by a constant become the sum of the + // variable and the multiplication of that variable by the associated + // constant: + // (x * K) >> 16 = (x * (k + (1 << 16))) >> 16 = ((x * k ) >> 16) + x + const __m128i k1 = _mm_set1_epi16(20091); + const __m128i k2 = _mm_set1_epi16(-30068); + __m128i T0, T1, T2, T3; + + // Load and concatenate the transform coefficients (we'll do two inverse + // transforms in parallel). In the case of only one inverse transform, the + // second half of the vectors will just contain random value we'll never + // use nor store. + __m128i in0, in1, in2, in3; + { + in0 = _mm_loadl_epi64((const __m128i*)&in[0]); + in1 = _mm_loadl_epi64((const __m128i*)&in[4]); + in2 = _mm_loadl_epi64((const __m128i*)&in[8]); + in3 = _mm_loadl_epi64((const __m128i*)&in[12]); + // a00 a10 a20 a30 x x x x + // a01 a11 a21 a31 x x x x + // a02 a12 a22 a32 x x x x + // a03 a13 a23 a33 x x x x + if (do_two) { + const __m128i inB0 = _mm_loadl_epi64((const __m128i*)&in[16]); + const __m128i inB1 = _mm_loadl_epi64((const __m128i*)&in[20]); + const __m128i inB2 = _mm_loadl_epi64((const __m128i*)&in[24]); + const __m128i inB3 = _mm_loadl_epi64((const __m128i*)&in[28]); + in0 = _mm_unpacklo_epi64(in0, inB0); + in1 = _mm_unpacklo_epi64(in1, inB1); + in2 = _mm_unpacklo_epi64(in2, inB2); + in3 = _mm_unpacklo_epi64(in3, inB3); + // a00 a10 a20 a30 b00 b10 b20 b30 + // a01 a11 a21 a31 b01 b11 b21 b31 + // a02 a12 a22 a32 b02 b12 b22 b32 + // a03 a13 a23 a33 b03 b13 b23 b33 + } + } + + // Vertical pass and subsequent transpose. + { + // First pass, c and d calculations are longer because of the "trick" + // multiplications. + const __m128i a = _mm_add_epi16(in0, in2); + const __m128i b = _mm_sub_epi16(in0, in2); + // c = MUL(in1, K2) - MUL(in3, K1) = MUL(in1, k2) - MUL(in3, k1) + in1 - in3 + const __m128i c1 = _mm_mulhi_epi16(in1, k2); + const __m128i c2 = _mm_mulhi_epi16(in3, k1); + const __m128i c3 = _mm_sub_epi16(in1, in3); + const __m128i c4 = _mm_sub_epi16(c1, c2); + const __m128i c = _mm_add_epi16(c3, c4); + // d = MUL(in1, K1) + MUL(in3, K2) = MUL(in1, k1) + MUL(in3, k2) + in1 + in3 + const __m128i d1 = _mm_mulhi_epi16(in1, k1); + const __m128i d2 = _mm_mulhi_epi16(in3, k2); + const __m128i d3 = _mm_add_epi16(in1, in3); + const __m128i d4 = _mm_add_epi16(d1, d2); + const __m128i d = _mm_add_epi16(d3, d4); + + // Second pass. + const __m128i tmp0 = _mm_add_epi16(a, d); + const __m128i tmp1 = _mm_add_epi16(b, c); + const __m128i tmp2 = _mm_sub_epi16(b, c); + const __m128i tmp3 = _mm_sub_epi16(a, d); + + // Transpose the two 4x4. + VP8Transpose_2_4x4_16b(&tmp0, &tmp1, &tmp2, &tmp3, &T0, &T1, &T2, &T3); + } + + // Horizontal pass and subsequent transpose. + { + // First pass, c and d calculations are longer because of the "trick" + // multiplications. + const __m128i four = _mm_set1_epi16(4); + const __m128i dc = _mm_add_epi16(T0, four); + const __m128i a = _mm_add_epi16(dc, T2); + const __m128i b = _mm_sub_epi16(dc, T2); + // c = MUL(T1, K2) - MUL(T3, K1) = MUL(T1, k2) - MUL(T3, k1) + T1 - T3 + const __m128i c1 = _mm_mulhi_epi16(T1, k2); + const __m128i c2 = _mm_mulhi_epi16(T3, k1); + const __m128i c3 = _mm_sub_epi16(T1, T3); + const __m128i c4 = _mm_sub_epi16(c1, c2); + const __m128i c = _mm_add_epi16(c3, c4); + // d = MUL(T1, K1) + MUL(T3, K2) = MUL(T1, k1) + MUL(T3, k2) + T1 + T3 + const __m128i d1 = _mm_mulhi_epi16(T1, k1); + const __m128i d2 = _mm_mulhi_epi16(T3, k2); + const __m128i d3 = _mm_add_epi16(T1, T3); + const __m128i d4 = _mm_add_epi16(d1, d2); + const __m128i d = _mm_add_epi16(d3, d4); + + // Second pass. + const __m128i tmp0 = _mm_add_epi16(a, d); + const __m128i tmp1 = _mm_add_epi16(b, c); + const __m128i tmp2 = _mm_sub_epi16(b, c); + const __m128i tmp3 = _mm_sub_epi16(a, d); + const __m128i shifted0 = _mm_srai_epi16(tmp0, 3); + const __m128i shifted1 = _mm_srai_epi16(tmp1, 3); + const __m128i shifted2 = _mm_srai_epi16(tmp2, 3); + const __m128i shifted3 = _mm_srai_epi16(tmp3, 3); + + // Transpose the two 4x4. + VP8Transpose_2_4x4_16b(&shifted0, &shifted1, &shifted2, &shifted3, &T0, &T1, + &T2, &T3); + } + + // Add inverse transform to 'ref' and store. + { + const __m128i zero = _mm_setzero_si128(); + // Load the reference(s). + __m128i ref0, ref1, ref2, ref3; + if (do_two) { + // Load eight bytes/pixels per line. + ref0 = _mm_loadl_epi64((const __m128i*)&ref[0 * BPS]); + ref1 = _mm_loadl_epi64((const __m128i*)&ref[1 * BPS]); + ref2 = _mm_loadl_epi64((const __m128i*)&ref[2 * BPS]); + ref3 = _mm_loadl_epi64((const __m128i*)&ref[3 * BPS]); + } else { + // Load four bytes/pixels per line. + ref0 = _mm_cvtsi32_si128(WebPMemToUint32(&ref[0 * BPS])); + ref1 = _mm_cvtsi32_si128(WebPMemToUint32(&ref[1 * BPS])); + ref2 = _mm_cvtsi32_si128(WebPMemToUint32(&ref[2 * BPS])); + ref3 = _mm_cvtsi32_si128(WebPMemToUint32(&ref[3 * BPS])); + } + // Convert to 16b. + ref0 = _mm_unpacklo_epi8(ref0, zero); + ref1 = _mm_unpacklo_epi8(ref1, zero); + ref2 = _mm_unpacklo_epi8(ref2, zero); + ref3 = _mm_unpacklo_epi8(ref3, zero); + // Add the inverse transform(s). + ref0 = _mm_add_epi16(ref0, T0); + ref1 = _mm_add_epi16(ref1, T1); + ref2 = _mm_add_epi16(ref2, T2); + ref3 = _mm_add_epi16(ref3, T3); + // Unsigned saturate to 8b. + ref0 = _mm_packus_epi16(ref0, ref0); + ref1 = _mm_packus_epi16(ref1, ref1); + ref2 = _mm_packus_epi16(ref2, ref2); + ref3 = _mm_packus_epi16(ref3, ref3); + // Store the results. + if (do_two) { + // Store eight bytes/pixels per line. + _mm_storel_epi64((__m128i*)&dst[0 * BPS], ref0); + _mm_storel_epi64((__m128i*)&dst[1 * BPS], ref1); + _mm_storel_epi64((__m128i*)&dst[2 * BPS], ref2); + _mm_storel_epi64((__m128i*)&dst[3 * BPS], ref3); + } else { + // Store four bytes/pixels per line. + WebPUint32ToMem(&dst[0 * BPS], _mm_cvtsi128_si32(ref0)); + WebPUint32ToMem(&dst[1 * BPS], _mm_cvtsi128_si32(ref1)); + WebPUint32ToMem(&dst[2 * BPS], _mm_cvtsi128_si32(ref2)); + WebPUint32ToMem(&dst[3 * BPS], _mm_cvtsi128_si32(ref3)); + } + } +} + +static void FTransformPass1_SSE2(const __m128i* const in01, + const __m128i* const in23, + __m128i* const out01, + __m128i* const out32) { + const __m128i k937 = _mm_set1_epi32(937); + const __m128i k1812 = _mm_set1_epi32(1812); + + const __m128i k88p = _mm_set_epi16(8, 8, 8, 8, 8, 8, 8, 8); + const __m128i k88m = _mm_set_epi16(-8, 8, -8, 8, -8, 8, -8, 8); + const __m128i k5352_2217p = _mm_set_epi16(2217, 5352, 2217, 5352, + 2217, 5352, 2217, 5352); + const __m128i k5352_2217m = _mm_set_epi16(-5352, 2217, -5352, 2217, + -5352, 2217, -5352, 2217); + + // *in01 = 00 01 10 11 02 03 12 13 + // *in23 = 20 21 30 31 22 23 32 33 + const __m128i shuf01_p = _mm_shufflehi_epi16(*in01, _MM_SHUFFLE(2, 3, 0, 1)); + const __m128i shuf23_p = _mm_shufflehi_epi16(*in23, _MM_SHUFFLE(2, 3, 0, 1)); + // 00 01 10 11 03 02 13 12 + // 20 21 30 31 23 22 33 32 + const __m128i s01 = _mm_unpacklo_epi64(shuf01_p, shuf23_p); + const __m128i s32 = _mm_unpackhi_epi64(shuf01_p, shuf23_p); + // 00 01 10 11 20 21 30 31 + // 03 02 13 12 23 22 33 32 + const __m128i a01 = _mm_add_epi16(s01, s32); + const __m128i a32 = _mm_sub_epi16(s01, s32); + // [d0 + d3 | d1 + d2 | ...] = [a0 a1 | a0' a1' | ... ] + // [d0 - d3 | d1 - d2 | ...] = [a3 a2 | a3' a2' | ... ] + + const __m128i tmp0 = _mm_madd_epi16(a01, k88p); // [ (a0 + a1) << 3, ... ] + const __m128i tmp2 = _mm_madd_epi16(a01, k88m); // [ (a0 - a1) << 3, ... ] + const __m128i tmp1_1 = _mm_madd_epi16(a32, k5352_2217p); + const __m128i tmp3_1 = _mm_madd_epi16(a32, k5352_2217m); + const __m128i tmp1_2 = _mm_add_epi32(tmp1_1, k1812); + const __m128i tmp3_2 = _mm_add_epi32(tmp3_1, k937); + const __m128i tmp1 = _mm_srai_epi32(tmp1_2, 9); + const __m128i tmp3 = _mm_srai_epi32(tmp3_2, 9); + const __m128i s03 = _mm_packs_epi32(tmp0, tmp2); + const __m128i s12 = _mm_packs_epi32(tmp1, tmp3); + const __m128i s_lo = _mm_unpacklo_epi16(s03, s12); // 0 1 0 1 0 1... + const __m128i s_hi = _mm_unpackhi_epi16(s03, s12); // 2 3 2 3 2 3 + const __m128i v23 = _mm_unpackhi_epi32(s_lo, s_hi); + *out01 = _mm_unpacklo_epi32(s_lo, s_hi); + *out32 = _mm_shuffle_epi32(v23, _MM_SHUFFLE(1, 0, 3, 2)); // 3 2 3 2 3 2.. +} + +static void FTransformPass2_SSE2(const __m128i* const v01, + const __m128i* const v32, + int16_t* out) { + const __m128i zero = _mm_setzero_si128(); + const __m128i seven = _mm_set1_epi16(7); + const __m128i k5352_2217 = _mm_set_epi16(5352, 2217, 5352, 2217, + 5352, 2217, 5352, 2217); + const __m128i k2217_5352 = _mm_set_epi16(2217, -5352, 2217, -5352, + 2217, -5352, 2217, -5352); + const __m128i k12000_plus_one = _mm_set1_epi32(12000 + (1 << 16)); + const __m128i k51000 = _mm_set1_epi32(51000); + + // Same operations are done on the (0,3) and (1,2) pairs. + // a3 = v0 - v3 + // a2 = v1 - v2 + const __m128i a32 = _mm_sub_epi16(*v01, *v32); + const __m128i a22 = _mm_unpackhi_epi64(a32, a32); + + const __m128i b23 = _mm_unpacklo_epi16(a22, a32); + const __m128i c1 = _mm_madd_epi16(b23, k5352_2217); + const __m128i c3 = _mm_madd_epi16(b23, k2217_5352); + const __m128i d1 = _mm_add_epi32(c1, k12000_plus_one); + const __m128i d3 = _mm_add_epi32(c3, k51000); + const __m128i e1 = _mm_srai_epi32(d1, 16); + const __m128i e3 = _mm_srai_epi32(d3, 16); + // f1 = ((b3 * 5352 + b2 * 2217 + 12000) >> 16) + // f3 = ((b3 * 2217 - b2 * 5352 + 51000) >> 16) + const __m128i f1 = _mm_packs_epi32(e1, e1); + const __m128i f3 = _mm_packs_epi32(e3, e3); + // g1 = f1 + (a3 != 0); + // The compare will return (0xffff, 0) for (==0, !=0). To turn that into the + // desired (0, 1), we add one earlier through k12000_plus_one. + // -> g1 = f1 + 1 - (a3 == 0) + const __m128i g1 = _mm_add_epi16(f1, _mm_cmpeq_epi16(a32, zero)); + + // a0 = v0 + v3 + // a1 = v1 + v2 + const __m128i a01 = _mm_add_epi16(*v01, *v32); + const __m128i a01_plus_7 = _mm_add_epi16(a01, seven); + const __m128i a11 = _mm_unpackhi_epi64(a01, a01); + const __m128i c0 = _mm_add_epi16(a01_plus_7, a11); + const __m128i c2 = _mm_sub_epi16(a01_plus_7, a11); + // d0 = (a0 + a1 + 7) >> 4; + // d2 = (a0 - a1 + 7) >> 4; + const __m128i d0 = _mm_srai_epi16(c0, 4); + const __m128i d2 = _mm_srai_epi16(c2, 4); + + const __m128i d0_g1 = _mm_unpacklo_epi64(d0, g1); + const __m128i d2_f3 = _mm_unpacklo_epi64(d2, f3); + _mm_storeu_si128((__m128i*)&out[0], d0_g1); + _mm_storeu_si128((__m128i*)&out[8], d2_f3); +} + +static void FTransform_SSE2(const uint8_t* src, const uint8_t* ref, + int16_t* out) { + const __m128i zero = _mm_setzero_si128(); + // Load src. + const __m128i src0 = _mm_loadl_epi64((const __m128i*)&src[0 * BPS]); + const __m128i src1 = _mm_loadl_epi64((const __m128i*)&src[1 * BPS]); + const __m128i src2 = _mm_loadl_epi64((const __m128i*)&src[2 * BPS]); + const __m128i src3 = _mm_loadl_epi64((const __m128i*)&src[3 * BPS]); + // 00 01 02 03 * + // 10 11 12 13 * + // 20 21 22 23 * + // 30 31 32 33 * + // Shuffle. + const __m128i src_0 = _mm_unpacklo_epi16(src0, src1); + const __m128i src_1 = _mm_unpacklo_epi16(src2, src3); + // 00 01 10 11 02 03 12 13 * * ... + // 20 21 30 31 22 22 32 33 * * ... + + // Load ref. + const __m128i ref0 = _mm_loadl_epi64((const __m128i*)&ref[0 * BPS]); + const __m128i ref1 = _mm_loadl_epi64((const __m128i*)&ref[1 * BPS]); + const __m128i ref2 = _mm_loadl_epi64((const __m128i*)&ref[2 * BPS]); + const __m128i ref3 = _mm_loadl_epi64((const __m128i*)&ref[3 * BPS]); + const __m128i ref_0 = _mm_unpacklo_epi16(ref0, ref1); + const __m128i ref_1 = _mm_unpacklo_epi16(ref2, ref3); + + // Convert both to 16 bit. + const __m128i src_0_16b = _mm_unpacklo_epi8(src_0, zero); + const __m128i src_1_16b = _mm_unpacklo_epi8(src_1, zero); + const __m128i ref_0_16b = _mm_unpacklo_epi8(ref_0, zero); + const __m128i ref_1_16b = _mm_unpacklo_epi8(ref_1, zero); + + // Compute the difference. + const __m128i row01 = _mm_sub_epi16(src_0_16b, ref_0_16b); + const __m128i row23 = _mm_sub_epi16(src_1_16b, ref_1_16b); + __m128i v01, v32; + + // First pass + FTransformPass1_SSE2(&row01, &row23, &v01, &v32); + + // Second pass + FTransformPass2_SSE2(&v01, &v32, out); +} + +static void FTransform2_SSE2(const uint8_t* src, const uint8_t* ref, + int16_t* out) { + const __m128i zero = _mm_setzero_si128(); + + // Load src and convert to 16b. + const __m128i src0 = _mm_loadl_epi64((const __m128i*)&src[0 * BPS]); + const __m128i src1 = _mm_loadl_epi64((const __m128i*)&src[1 * BPS]); + const __m128i src2 = _mm_loadl_epi64((const __m128i*)&src[2 * BPS]); + const __m128i src3 = _mm_loadl_epi64((const __m128i*)&src[3 * BPS]); + const __m128i src_0 = _mm_unpacklo_epi8(src0, zero); + const __m128i src_1 = _mm_unpacklo_epi8(src1, zero); + const __m128i src_2 = _mm_unpacklo_epi8(src2, zero); + const __m128i src_3 = _mm_unpacklo_epi8(src3, zero); + // Load ref and convert to 16b. + const __m128i ref0 = _mm_loadl_epi64((const __m128i*)&ref[0 * BPS]); + const __m128i ref1 = _mm_loadl_epi64((const __m128i*)&ref[1 * BPS]); + const __m128i ref2 = _mm_loadl_epi64((const __m128i*)&ref[2 * BPS]); + const __m128i ref3 = _mm_loadl_epi64((const __m128i*)&ref[3 * BPS]); + const __m128i ref_0 = _mm_unpacklo_epi8(ref0, zero); + const __m128i ref_1 = _mm_unpacklo_epi8(ref1, zero); + const __m128i ref_2 = _mm_unpacklo_epi8(ref2, zero); + const __m128i ref_3 = _mm_unpacklo_epi8(ref3, zero); + // Compute difference. -> 00 01 02 03 00' 01' 02' 03' + const __m128i diff0 = _mm_sub_epi16(src_0, ref_0); + const __m128i diff1 = _mm_sub_epi16(src_1, ref_1); + const __m128i diff2 = _mm_sub_epi16(src_2, ref_2); + const __m128i diff3 = _mm_sub_epi16(src_3, ref_3); + + // Unpack and shuffle + // 00 01 02 03 0 0 0 0 + // 10 11 12 13 0 0 0 0 + // 20 21 22 23 0 0 0 0 + // 30 31 32 33 0 0 0 0 + const __m128i shuf01l = _mm_unpacklo_epi32(diff0, diff1); + const __m128i shuf23l = _mm_unpacklo_epi32(diff2, diff3); + const __m128i shuf01h = _mm_unpackhi_epi32(diff0, diff1); + const __m128i shuf23h = _mm_unpackhi_epi32(diff2, diff3); + __m128i v01l, v32l; + __m128i v01h, v32h; + + // First pass + FTransformPass1_SSE2(&shuf01l, &shuf23l, &v01l, &v32l); + FTransformPass1_SSE2(&shuf01h, &shuf23h, &v01h, &v32h); + + // Second pass + FTransformPass2_SSE2(&v01l, &v32l, out + 0); + FTransformPass2_SSE2(&v01h, &v32h, out + 16); +} + +static void FTransformWHTRow_SSE2(const int16_t* const in, __m128i* const out) { + const __m128i kMult = _mm_set_epi16(-1, 1, -1, 1, 1, 1, 1, 1); + const __m128i src0 = _mm_loadl_epi64((__m128i*)&in[0 * 16]); + const __m128i src1 = _mm_loadl_epi64((__m128i*)&in[1 * 16]); + const __m128i src2 = _mm_loadl_epi64((__m128i*)&in[2 * 16]); + const __m128i src3 = _mm_loadl_epi64((__m128i*)&in[3 * 16]); + const __m128i A01 = _mm_unpacklo_epi16(src0, src1); // A0 A1 | ... + const __m128i A23 = _mm_unpacklo_epi16(src2, src3); // A2 A3 | ... + const __m128i B0 = _mm_adds_epi16(A01, A23); // a0 | a1 | ... + const __m128i B1 = _mm_subs_epi16(A01, A23); // a3 | a2 | ... + const __m128i C0 = _mm_unpacklo_epi32(B0, B1); // a0 | a1 | a3 | a2 | ... + const __m128i C1 = _mm_unpacklo_epi32(B1, B0); // a3 | a2 | a0 | a1 | ... + const __m128i D = _mm_unpacklo_epi64(C0, C1); // a0 a1 a3 a2 a3 a2 a0 a1 + *out = _mm_madd_epi16(D, kMult); +} + +static void FTransformWHT_SSE2(const int16_t* in, int16_t* out) { + // Input is 12b signed. + __m128i row0, row1, row2, row3; + // Rows are 14b signed. + FTransformWHTRow_SSE2(in + 0 * 64, &row0); + FTransformWHTRow_SSE2(in + 1 * 64, &row1); + FTransformWHTRow_SSE2(in + 2 * 64, &row2); + FTransformWHTRow_SSE2(in + 3 * 64, &row3); + + { + // The a* are 15b signed. + const __m128i a0 = _mm_add_epi32(row0, row2); + const __m128i a1 = _mm_add_epi32(row1, row3); + const __m128i a2 = _mm_sub_epi32(row1, row3); + const __m128i a3 = _mm_sub_epi32(row0, row2); + const __m128i a0a3 = _mm_packs_epi32(a0, a3); + const __m128i a1a2 = _mm_packs_epi32(a1, a2); + + // The b* are 16b signed. + const __m128i b0b1 = _mm_add_epi16(a0a3, a1a2); + const __m128i b3b2 = _mm_sub_epi16(a0a3, a1a2); + const __m128i tmp_b2b3 = _mm_unpackhi_epi64(b3b2, b3b2); + const __m128i b2b3 = _mm_unpacklo_epi64(tmp_b2b3, b3b2); + + _mm_storeu_si128((__m128i*)&out[0], _mm_srai_epi16(b0b1, 1)); + _mm_storeu_si128((__m128i*)&out[8], _mm_srai_epi16(b2b3, 1)); + } +} + +//------------------------------------------------------------------------------ +// Compute susceptibility based on DCT-coeff histograms: +// the higher, the "easier" the macroblock is to compress. + +static void CollectHistogram_SSE2(const uint8_t* ref, const uint8_t* pred, + int start_block, int end_block, + VP8Histogram* const histo) { + const __m128i zero = _mm_setzero_si128(); + const __m128i max_coeff_thresh = _mm_set1_epi16(MAX_COEFF_THRESH); + int j; + int distribution[MAX_COEFF_THRESH + 1] = { 0 }; + for (j = start_block; j < end_block; ++j) { + int16_t out[16]; + int k; + + FTransform_SSE2(ref + VP8DspScan[j], pred + VP8DspScan[j], out); + + // Convert coefficients to bin (within out[]). + { + // Load. + const __m128i out0 = _mm_loadu_si128((__m128i*)&out[0]); + const __m128i out1 = _mm_loadu_si128((__m128i*)&out[8]); + const __m128i d0 = _mm_sub_epi16(zero, out0); + const __m128i d1 = _mm_sub_epi16(zero, out1); + const __m128i abs0 = _mm_max_epi16(out0, d0); // abs(v), 16b + const __m128i abs1 = _mm_max_epi16(out1, d1); + // v = abs(out) >> 3 + const __m128i v0 = _mm_srai_epi16(abs0, 3); + const __m128i v1 = _mm_srai_epi16(abs1, 3); + // bin = min(v, MAX_COEFF_THRESH) + const __m128i bin0 = _mm_min_epi16(v0, max_coeff_thresh); + const __m128i bin1 = _mm_min_epi16(v1, max_coeff_thresh); + // Store. + _mm_storeu_si128((__m128i*)&out[0], bin0); + _mm_storeu_si128((__m128i*)&out[8], bin1); + } + + // Convert coefficients to bin. + for (k = 0; k < 16; ++k) { + ++distribution[out[k]]; + } + } + VP8SetHistogramData(distribution, histo); +} + +//------------------------------------------------------------------------------ +// Intra predictions + +// helper for chroma-DC predictions +static WEBP_INLINE void Put8x8uv_SSE2(uint8_t v, uint8_t* dst) { + int j; + const __m128i values = _mm_set1_epi8(v); + for (j = 0; j < 8; ++j) { + _mm_storel_epi64((__m128i*)(dst + j * BPS), values); + } +} + +static WEBP_INLINE void Put16_SSE2(uint8_t v, uint8_t* dst) { + int j; + const __m128i values = _mm_set1_epi8(v); + for (j = 0; j < 16; ++j) { + _mm_store_si128((__m128i*)(dst + j * BPS), values); + } +} + +static WEBP_INLINE void Fill_SSE2(uint8_t* dst, int value, int size) { + if (size == 4) { + int j; + for (j = 0; j < 4; ++j) { + memset(dst + j * BPS, value, 4); + } + } else if (size == 8) { + Put8x8uv_SSE2(value, dst); + } else { + Put16_SSE2(value, dst); + } +} + +static WEBP_INLINE void VE8uv_SSE2(uint8_t* dst, const uint8_t* top) { + int j; + const __m128i top_values = _mm_loadl_epi64((const __m128i*)top); + for (j = 0; j < 8; ++j) { + _mm_storel_epi64((__m128i*)(dst + j * BPS), top_values); + } +} + +static WEBP_INLINE void VE16_SSE2(uint8_t* dst, const uint8_t* top) { + const __m128i top_values = _mm_load_si128((const __m128i*)top); + int j; + for (j = 0; j < 16; ++j) { + _mm_store_si128((__m128i*)(dst + j * BPS), top_values); + } +} + +static WEBP_INLINE void VerticalPred_SSE2(uint8_t* dst, + const uint8_t* top, int size) { + if (top != NULL) { + if (size == 8) { + VE8uv_SSE2(dst, top); + } else { + VE16_SSE2(dst, top); + } + } else { + Fill_SSE2(dst, 127, size); + } +} + +static WEBP_INLINE void HE8uv_SSE2(uint8_t* dst, const uint8_t* left) { + int j; + for (j = 0; j < 8; ++j) { + const __m128i values = _mm_set1_epi8(left[j]); + _mm_storel_epi64((__m128i*)dst, values); + dst += BPS; + } +} + +static WEBP_INLINE void HE16_SSE2(uint8_t* dst, const uint8_t* left) { + int j; + for (j = 0; j < 16; ++j) { + const __m128i values = _mm_set1_epi8(left[j]); + _mm_store_si128((__m128i*)dst, values); + dst += BPS; + } +} + +static WEBP_INLINE void HorizontalPred_SSE2(uint8_t* dst, + const uint8_t* left, int size) { + if (left != NULL) { + if (size == 8) { + HE8uv_SSE2(dst, left); + } else { + HE16_SSE2(dst, left); + } + } else { + Fill_SSE2(dst, 129, size); + } +} + +static WEBP_INLINE void TM_SSE2(uint8_t* dst, const uint8_t* left, + const uint8_t* top, int size) { + const __m128i zero = _mm_setzero_si128(); + int y; + if (size == 8) { + const __m128i top_values = _mm_loadl_epi64((const __m128i*)top); + const __m128i top_base = _mm_unpacklo_epi8(top_values, zero); + for (y = 0; y < 8; ++y, dst += BPS) { + const int val = left[y] - left[-1]; + const __m128i base = _mm_set1_epi16(val); + const __m128i out = _mm_packus_epi16(_mm_add_epi16(base, top_base), zero); + _mm_storel_epi64((__m128i*)dst, out); + } + } else { + const __m128i top_values = _mm_load_si128((const __m128i*)top); + const __m128i top_base_0 = _mm_unpacklo_epi8(top_values, zero); + const __m128i top_base_1 = _mm_unpackhi_epi8(top_values, zero); + for (y = 0; y < 16; ++y, dst += BPS) { + const int val = left[y] - left[-1]; + const __m128i base = _mm_set1_epi16(val); + const __m128i out_0 = _mm_add_epi16(base, top_base_0); + const __m128i out_1 = _mm_add_epi16(base, top_base_1); + const __m128i out = _mm_packus_epi16(out_0, out_1); + _mm_store_si128((__m128i*)dst, out); + } + } +} + +static WEBP_INLINE void TrueMotion_SSE2(uint8_t* dst, const uint8_t* left, + const uint8_t* top, int size) { + if (left != NULL) { + if (top != NULL) { + TM_SSE2(dst, left, top, size); + } else { + HorizontalPred_SSE2(dst, left, size); + } + } else { + // true motion without left samples (hence: with default 129 value) + // is equivalent to VE prediction where you just copy the top samples. + // Note that if top samples are not available, the default value is + // then 129, and not 127 as in the VerticalPred case. + if (top != NULL) { + VerticalPred_SSE2(dst, top, size); + } else { + Fill_SSE2(dst, 129, size); + } + } +} + +static WEBP_INLINE void DC8uv_SSE2(uint8_t* dst, const uint8_t* left, + const uint8_t* top) { + const __m128i top_values = _mm_loadl_epi64((const __m128i*)top); + const __m128i left_values = _mm_loadl_epi64((const __m128i*)left); + const __m128i combined = _mm_unpacklo_epi64(top_values, left_values); + const int DC = VP8HorizontalAdd8b(&combined) + 8; + Put8x8uv_SSE2(DC >> 4, dst); +} + +static WEBP_INLINE void DC8uvNoLeft_SSE2(uint8_t* dst, const uint8_t* top) { + const __m128i zero = _mm_setzero_si128(); + const __m128i top_values = _mm_loadl_epi64((const __m128i*)top); + const __m128i sum = _mm_sad_epu8(top_values, zero); + const int DC = _mm_cvtsi128_si32(sum) + 4; + Put8x8uv_SSE2(DC >> 3, dst); +} + +static WEBP_INLINE void DC8uvNoTop_SSE2(uint8_t* dst, const uint8_t* left) { + // 'left' is contiguous so we can reuse the top summation. + DC8uvNoLeft_SSE2(dst, left); +} + +static WEBP_INLINE void DC8uvNoTopLeft_SSE2(uint8_t* dst) { + Put8x8uv_SSE2(0x80, dst); +} + +static WEBP_INLINE void DC8uvMode_SSE2(uint8_t* dst, const uint8_t* left, + const uint8_t* top) { + if (top != NULL) { + if (left != NULL) { // top and left present + DC8uv_SSE2(dst, left, top); + } else { // top, but no left + DC8uvNoLeft_SSE2(dst, top); + } + } else if (left != NULL) { // left but no top + DC8uvNoTop_SSE2(dst, left); + } else { // no top, no left, nothing. + DC8uvNoTopLeft_SSE2(dst); + } +} + +static WEBP_INLINE void DC16_SSE2(uint8_t* dst, const uint8_t* left, + const uint8_t* top) { + const __m128i top_row = _mm_load_si128((const __m128i*)top); + const __m128i left_row = _mm_load_si128((const __m128i*)left); + const int DC = + VP8HorizontalAdd8b(&top_row) + VP8HorizontalAdd8b(&left_row) + 16; + Put16_SSE2(DC >> 5, dst); +} + +static WEBP_INLINE void DC16NoLeft_SSE2(uint8_t* dst, const uint8_t* top) { + const __m128i top_row = _mm_load_si128((const __m128i*)top); + const int DC = VP8HorizontalAdd8b(&top_row) + 8; + Put16_SSE2(DC >> 4, dst); +} + +static WEBP_INLINE void DC16NoTop_SSE2(uint8_t* dst, const uint8_t* left) { + // 'left' is contiguous so we can reuse the top summation. + DC16NoLeft_SSE2(dst, left); +} + +static WEBP_INLINE void DC16NoTopLeft_SSE2(uint8_t* dst) { + Put16_SSE2(0x80, dst); +} + +static WEBP_INLINE void DC16Mode_SSE2(uint8_t* dst, const uint8_t* left, + const uint8_t* top) { + if (top != NULL) { + if (left != NULL) { // top and left present + DC16_SSE2(dst, left, top); + } else { // top, but no left + DC16NoLeft_SSE2(dst, top); + } + } else if (left != NULL) { // left but no top + DC16NoTop_SSE2(dst, left); + } else { // no top, no left, nothing. + DC16NoTopLeft_SSE2(dst); + } +} + +//------------------------------------------------------------------------------ +// 4x4 predictions + +#define DST(x, y) dst[(x) + (y) * BPS] +#define AVG3(a, b, c) (((a) + 2 * (b) + (c) + 2) >> 2) +#define AVG2(a, b) (((a) + (b) + 1) >> 1) + +// We use the following 8b-arithmetic tricks: +// (a + 2 * b + c + 2) >> 2 = (AC + b + 1) >> 1 +// where: AC = (a + c) >> 1 = [(a + c + 1) >> 1] - [(a^c) & 1] +// and: +// (a + 2 * b + c + 2) >> 2 = (AB + BC + 1) >> 1 - (ab|bc)&lsb +// where: AC = (a + b + 1) >> 1, BC = (b + c + 1) >> 1 +// and ab = a ^ b, bc = b ^ c, lsb = (AC^BC)&1 + +static WEBP_INLINE void VE4_SSE2(uint8_t* dst, + const uint8_t* top) { // vertical + const __m128i one = _mm_set1_epi8(1); + const __m128i ABCDEFGH = _mm_loadl_epi64((__m128i*)(top - 1)); + const __m128i BCDEFGH0 = _mm_srli_si128(ABCDEFGH, 1); + const __m128i CDEFGH00 = _mm_srli_si128(ABCDEFGH, 2); + const __m128i a = _mm_avg_epu8(ABCDEFGH, CDEFGH00); + const __m128i lsb = _mm_and_si128(_mm_xor_si128(ABCDEFGH, CDEFGH00), one); + const __m128i b = _mm_subs_epu8(a, lsb); + const __m128i avg = _mm_avg_epu8(b, BCDEFGH0); + const uint32_t vals = _mm_cvtsi128_si32(avg); + int i; + for (i = 0; i < 4; ++i) { + WebPUint32ToMem(dst + i * BPS, vals); + } +} + +static WEBP_INLINE void HE4_SSE2(uint8_t* dst, + const uint8_t* top) { // horizontal + const int X = top[-1]; + const int I = top[-2]; + const int J = top[-3]; + const int K = top[-4]; + const int L = top[-5]; + WebPUint32ToMem(dst + 0 * BPS, 0x01010101U * AVG3(X, I, J)); + WebPUint32ToMem(dst + 1 * BPS, 0x01010101U * AVG3(I, J, K)); + WebPUint32ToMem(dst + 2 * BPS, 0x01010101U * AVG3(J, K, L)); + WebPUint32ToMem(dst + 3 * BPS, 0x01010101U * AVG3(K, L, L)); +} + +static WEBP_INLINE void DC4_SSE2(uint8_t* dst, const uint8_t* top) { + uint32_t dc = 4; + int i; + for (i = 0; i < 4; ++i) dc += top[i] + top[-5 + i]; + Fill_SSE2(dst, dc >> 3, 4); +} + +static WEBP_INLINE void LD4_SSE2(uint8_t* dst, + const uint8_t* top) { // Down-Left + const __m128i one = _mm_set1_epi8(1); + const __m128i ABCDEFGH = _mm_loadl_epi64((const __m128i*)top); + const __m128i BCDEFGH0 = _mm_srli_si128(ABCDEFGH, 1); + const __m128i CDEFGH00 = _mm_srli_si128(ABCDEFGH, 2); + const __m128i CDEFGHH0 = _mm_insert_epi16(CDEFGH00, top[7], 3); + const __m128i avg1 = _mm_avg_epu8(ABCDEFGH, CDEFGHH0); + const __m128i lsb = _mm_and_si128(_mm_xor_si128(ABCDEFGH, CDEFGHH0), one); + const __m128i avg2 = _mm_subs_epu8(avg1, lsb); + const __m128i abcdefg = _mm_avg_epu8(avg2, BCDEFGH0); + WebPUint32ToMem(dst + 0 * BPS, _mm_cvtsi128_si32( abcdefg )); + WebPUint32ToMem(dst + 1 * BPS, _mm_cvtsi128_si32(_mm_srli_si128(abcdefg, 1))); + WebPUint32ToMem(dst + 2 * BPS, _mm_cvtsi128_si32(_mm_srli_si128(abcdefg, 2))); + WebPUint32ToMem(dst + 3 * BPS, _mm_cvtsi128_si32(_mm_srli_si128(abcdefg, 3))); +} + +static WEBP_INLINE void VR4_SSE2(uint8_t* dst, + const uint8_t* top) { // Vertical-Right + const __m128i one = _mm_set1_epi8(1); + const int I = top[-2]; + const int J = top[-3]; + const int K = top[-4]; + const int X = top[-1]; + const __m128i XABCD = _mm_loadl_epi64((const __m128i*)(top - 1)); + const __m128i ABCD0 = _mm_srli_si128(XABCD, 1); + const __m128i abcd = _mm_avg_epu8(XABCD, ABCD0); + const __m128i _XABCD = _mm_slli_si128(XABCD, 1); + const __m128i IXABCD = _mm_insert_epi16(_XABCD, (short)(I | (X << 8)), 0); + const __m128i avg1 = _mm_avg_epu8(IXABCD, ABCD0); + const __m128i lsb = _mm_and_si128(_mm_xor_si128(IXABCD, ABCD0), one); + const __m128i avg2 = _mm_subs_epu8(avg1, lsb); + const __m128i efgh = _mm_avg_epu8(avg2, XABCD); + WebPUint32ToMem(dst + 0 * BPS, _mm_cvtsi128_si32( abcd )); + WebPUint32ToMem(dst + 1 * BPS, _mm_cvtsi128_si32( efgh )); + WebPUint32ToMem(dst + 2 * BPS, _mm_cvtsi128_si32(_mm_slli_si128(abcd, 1))); + WebPUint32ToMem(dst + 3 * BPS, _mm_cvtsi128_si32(_mm_slli_si128(efgh, 1))); + + // these two are hard to implement in SSE2, so we keep the C-version: + DST(0, 2) = AVG3(J, I, X); + DST(0, 3) = AVG3(K, J, I); +} + +static WEBP_INLINE void VL4_SSE2(uint8_t* dst, + const uint8_t* top) { // Vertical-Left + const __m128i one = _mm_set1_epi8(1); + const __m128i ABCDEFGH = _mm_loadl_epi64((const __m128i*)top); + const __m128i BCDEFGH_ = _mm_srli_si128(ABCDEFGH, 1); + const __m128i CDEFGH__ = _mm_srli_si128(ABCDEFGH, 2); + const __m128i avg1 = _mm_avg_epu8(ABCDEFGH, BCDEFGH_); + const __m128i avg2 = _mm_avg_epu8(CDEFGH__, BCDEFGH_); + const __m128i avg3 = _mm_avg_epu8(avg1, avg2); + const __m128i lsb1 = _mm_and_si128(_mm_xor_si128(avg1, avg2), one); + const __m128i ab = _mm_xor_si128(ABCDEFGH, BCDEFGH_); + const __m128i bc = _mm_xor_si128(CDEFGH__, BCDEFGH_); + const __m128i abbc = _mm_or_si128(ab, bc); + const __m128i lsb2 = _mm_and_si128(abbc, lsb1); + const __m128i avg4 = _mm_subs_epu8(avg3, lsb2); + const uint32_t extra_out = _mm_cvtsi128_si32(_mm_srli_si128(avg4, 4)); + WebPUint32ToMem(dst + 0 * BPS, _mm_cvtsi128_si32( avg1 )); + WebPUint32ToMem(dst + 1 * BPS, _mm_cvtsi128_si32( avg4 )); + WebPUint32ToMem(dst + 2 * BPS, _mm_cvtsi128_si32(_mm_srli_si128(avg1, 1))); + WebPUint32ToMem(dst + 3 * BPS, _mm_cvtsi128_si32(_mm_srli_si128(avg4, 1))); + + // these two are hard to get and irregular + DST(3, 2) = (extra_out >> 0) & 0xff; + DST(3, 3) = (extra_out >> 8) & 0xff; +} + +static WEBP_INLINE void RD4_SSE2(uint8_t* dst, + const uint8_t* top) { // Down-right + const __m128i one = _mm_set1_epi8(1); + const __m128i LKJIXABC = _mm_loadl_epi64((const __m128i*)(top - 5)); + const __m128i LKJIXABCD = _mm_insert_epi16(LKJIXABC, top[3], 4); + const __m128i KJIXABCD_ = _mm_srli_si128(LKJIXABCD, 1); + const __m128i JIXABCD__ = _mm_srli_si128(LKJIXABCD, 2); + const __m128i avg1 = _mm_avg_epu8(JIXABCD__, LKJIXABCD); + const __m128i lsb = _mm_and_si128(_mm_xor_si128(JIXABCD__, LKJIXABCD), one); + const __m128i avg2 = _mm_subs_epu8(avg1, lsb); + const __m128i abcdefg = _mm_avg_epu8(avg2, KJIXABCD_); + WebPUint32ToMem(dst + 3 * BPS, _mm_cvtsi128_si32( abcdefg )); + WebPUint32ToMem(dst + 2 * BPS, _mm_cvtsi128_si32(_mm_srli_si128(abcdefg, 1))); + WebPUint32ToMem(dst + 1 * BPS, _mm_cvtsi128_si32(_mm_srli_si128(abcdefg, 2))); + WebPUint32ToMem(dst + 0 * BPS, _mm_cvtsi128_si32(_mm_srli_si128(abcdefg, 3))); +} + +static WEBP_INLINE void HU4_SSE2(uint8_t* dst, const uint8_t* top) { + const int I = top[-2]; + const int J = top[-3]; + const int K = top[-4]; + const int L = top[-5]; + DST(0, 0) = AVG2(I, J); + DST(2, 0) = DST(0, 1) = AVG2(J, K); + DST(2, 1) = DST(0, 2) = AVG2(K, L); + DST(1, 0) = AVG3(I, J, K); + DST(3, 0) = DST(1, 1) = AVG3(J, K, L); + DST(3, 1) = DST(1, 2) = AVG3(K, L, L); + DST(3, 2) = DST(2, 2) = + DST(0, 3) = DST(1, 3) = DST(2, 3) = DST(3, 3) = L; +} + +static WEBP_INLINE void HD4_SSE2(uint8_t* dst, const uint8_t* top) { + const int X = top[-1]; + const int I = top[-2]; + const int J = top[-3]; + const int K = top[-4]; + const int L = top[-5]; + const int A = top[0]; + const int B = top[1]; + const int C = top[2]; + + DST(0, 0) = DST(2, 1) = AVG2(I, X); + DST(0, 1) = DST(2, 2) = AVG2(J, I); + DST(0, 2) = DST(2, 3) = AVG2(K, J); + DST(0, 3) = AVG2(L, K); + + DST(3, 0) = AVG3(A, B, C); + DST(2, 0) = AVG3(X, A, B); + DST(1, 0) = DST(3, 1) = AVG3(I, X, A); + DST(1, 1) = DST(3, 2) = AVG3(J, I, X); + DST(1, 2) = DST(3, 3) = AVG3(K, J, I); + DST(1, 3) = AVG3(L, K, J); +} + +static WEBP_INLINE void TM4_SSE2(uint8_t* dst, const uint8_t* top) { + const __m128i zero = _mm_setzero_si128(); + const __m128i top_values = _mm_cvtsi32_si128(WebPMemToUint32(top)); + const __m128i top_base = _mm_unpacklo_epi8(top_values, zero); + int y; + for (y = 0; y < 4; ++y, dst += BPS) { + const int val = top[-2 - y] - top[-1]; + const __m128i base = _mm_set1_epi16(val); + const __m128i out = _mm_packus_epi16(_mm_add_epi16(base, top_base), zero); + WebPUint32ToMem(dst, _mm_cvtsi128_si32(out)); + } +} + +#undef DST +#undef AVG3 +#undef AVG2 + +//------------------------------------------------------------------------------ +// luma 4x4 prediction + +// Left samples are top[-5 .. -2], top_left is top[-1], top are +// located at top[0..3], and top right is top[4..7] +static void Intra4Preds_SSE2(uint8_t* dst, const uint8_t* top) { + DC4_SSE2(I4DC4 + dst, top); + TM4_SSE2(I4TM4 + dst, top); + VE4_SSE2(I4VE4 + dst, top); + HE4_SSE2(I4HE4 + dst, top); + RD4_SSE2(I4RD4 + dst, top); + VR4_SSE2(I4VR4 + dst, top); + LD4_SSE2(I4LD4 + dst, top); + VL4_SSE2(I4VL4 + dst, top); + HD4_SSE2(I4HD4 + dst, top); + HU4_SSE2(I4HU4 + dst, top); +} + +//------------------------------------------------------------------------------ +// Chroma 8x8 prediction (paragraph 12.2) + +static void IntraChromaPreds_SSE2(uint8_t* dst, const uint8_t* left, + const uint8_t* top) { + // U block + DC8uvMode_SSE2(C8DC8 + dst, left, top); + VerticalPred_SSE2(C8VE8 + dst, top, 8); + HorizontalPred_SSE2(C8HE8 + dst, left, 8); + TrueMotion_SSE2(C8TM8 + dst, left, top, 8); + // V block + dst += 8; + if (top != NULL) top += 8; + if (left != NULL) left += 16; + DC8uvMode_SSE2(C8DC8 + dst, left, top); + VerticalPred_SSE2(C8VE8 + dst, top, 8); + HorizontalPred_SSE2(C8HE8 + dst, left, 8); + TrueMotion_SSE2(C8TM8 + dst, left, top, 8); +} + +//------------------------------------------------------------------------------ +// luma 16x16 prediction (paragraph 12.3) + +static void Intra16Preds_SSE2(uint8_t* dst, + const uint8_t* left, const uint8_t* top) { + DC16Mode_SSE2(I16DC16 + dst, left, top); + VerticalPred_SSE2(I16VE16 + dst, top, 16); + HorizontalPred_SSE2(I16HE16 + dst, left, 16); + TrueMotion_SSE2(I16TM16 + dst, left, top, 16); +} + +//------------------------------------------------------------------------------ +// Metric + +static WEBP_INLINE void SubtractAndAccumulate_SSE2(const __m128i a, + const __m128i b, + __m128i* const sum) { + // take abs(a-b) in 8b + const __m128i a_b = _mm_subs_epu8(a, b); + const __m128i b_a = _mm_subs_epu8(b, a); + const __m128i abs_a_b = _mm_or_si128(a_b, b_a); + // zero-extend to 16b + const __m128i zero = _mm_setzero_si128(); + const __m128i C0 = _mm_unpacklo_epi8(abs_a_b, zero); + const __m128i C1 = _mm_unpackhi_epi8(abs_a_b, zero); + // multiply with self + const __m128i sum1 = _mm_madd_epi16(C0, C0); + const __m128i sum2 = _mm_madd_epi16(C1, C1); + *sum = _mm_add_epi32(sum1, sum2); +} + +static WEBP_INLINE int SSE_16xN_SSE2(const uint8_t* a, const uint8_t* b, + int num_pairs) { + __m128i sum = _mm_setzero_si128(); + int32_t tmp[4]; + int i; + + for (i = 0; i < num_pairs; ++i) { + const __m128i a0 = _mm_loadu_si128((const __m128i*)&a[BPS * 0]); + const __m128i b0 = _mm_loadu_si128((const __m128i*)&b[BPS * 0]); + const __m128i a1 = _mm_loadu_si128((const __m128i*)&a[BPS * 1]); + const __m128i b1 = _mm_loadu_si128((const __m128i*)&b[BPS * 1]); + __m128i sum1, sum2; + SubtractAndAccumulate_SSE2(a0, b0, &sum1); + SubtractAndAccumulate_SSE2(a1, b1, &sum2); + sum = _mm_add_epi32(sum, _mm_add_epi32(sum1, sum2)); + a += 2 * BPS; + b += 2 * BPS; + } + _mm_storeu_si128((__m128i*)tmp, sum); + return (tmp[3] + tmp[2] + tmp[1] + tmp[0]); +} + +static int SSE16x16_SSE2(const uint8_t* a, const uint8_t* b) { + return SSE_16xN_SSE2(a, b, 8); +} + +static int SSE16x8_SSE2(const uint8_t* a, const uint8_t* b) { + return SSE_16xN_SSE2(a, b, 4); +} + +#define LOAD_8x16b(ptr) \ + _mm_unpacklo_epi8(_mm_loadl_epi64((const __m128i*)(ptr)), zero) + +static int SSE8x8_SSE2(const uint8_t* a, const uint8_t* b) { + const __m128i zero = _mm_setzero_si128(); + int num_pairs = 4; + __m128i sum = zero; + int32_t tmp[4]; + while (num_pairs-- > 0) { + const __m128i a0 = LOAD_8x16b(&a[BPS * 0]); + const __m128i a1 = LOAD_8x16b(&a[BPS * 1]); + const __m128i b0 = LOAD_8x16b(&b[BPS * 0]); + const __m128i b1 = LOAD_8x16b(&b[BPS * 1]); + // subtract + const __m128i c0 = _mm_subs_epi16(a0, b0); + const __m128i c1 = _mm_subs_epi16(a1, b1); + // multiply/accumulate with self + const __m128i d0 = _mm_madd_epi16(c0, c0); + const __m128i d1 = _mm_madd_epi16(c1, c1); + // collect + const __m128i sum01 = _mm_add_epi32(d0, d1); + sum = _mm_add_epi32(sum, sum01); + a += 2 * BPS; + b += 2 * BPS; + } + _mm_storeu_si128((__m128i*)tmp, sum); + return (tmp[3] + tmp[2] + tmp[1] + tmp[0]); +} +#undef LOAD_8x16b + +static int SSE4x4_SSE2(const uint8_t* a, const uint8_t* b) { + const __m128i zero = _mm_setzero_si128(); + + // Load values. Note that we read 8 pixels instead of 4, + // but the a/b buffers are over-allocated to that effect. + const __m128i a0 = _mm_loadl_epi64((const __m128i*)&a[BPS * 0]); + const __m128i a1 = _mm_loadl_epi64((const __m128i*)&a[BPS * 1]); + const __m128i a2 = _mm_loadl_epi64((const __m128i*)&a[BPS * 2]); + const __m128i a3 = _mm_loadl_epi64((const __m128i*)&a[BPS * 3]); + const __m128i b0 = _mm_loadl_epi64((const __m128i*)&b[BPS * 0]); + const __m128i b1 = _mm_loadl_epi64((const __m128i*)&b[BPS * 1]); + const __m128i b2 = _mm_loadl_epi64((const __m128i*)&b[BPS * 2]); + const __m128i b3 = _mm_loadl_epi64((const __m128i*)&b[BPS * 3]); + // Combine pair of lines. + const __m128i a01 = _mm_unpacklo_epi32(a0, a1); + const __m128i a23 = _mm_unpacklo_epi32(a2, a3); + const __m128i b01 = _mm_unpacklo_epi32(b0, b1); + const __m128i b23 = _mm_unpacklo_epi32(b2, b3); + // Convert to 16b. + const __m128i a01s = _mm_unpacklo_epi8(a01, zero); + const __m128i a23s = _mm_unpacklo_epi8(a23, zero); + const __m128i b01s = _mm_unpacklo_epi8(b01, zero); + const __m128i b23s = _mm_unpacklo_epi8(b23, zero); + // subtract, square and accumulate + const __m128i d0 = _mm_subs_epi16(a01s, b01s); + const __m128i d1 = _mm_subs_epi16(a23s, b23s); + const __m128i e0 = _mm_madd_epi16(d0, d0); + const __m128i e1 = _mm_madd_epi16(d1, d1); + const __m128i sum = _mm_add_epi32(e0, e1); + + int32_t tmp[4]; + _mm_storeu_si128((__m128i*)tmp, sum); + return (tmp[3] + tmp[2] + tmp[1] + tmp[0]); +} + +//------------------------------------------------------------------------------ + +static void Mean16x4_SSE2(const uint8_t* ref, uint32_t dc[4]) { + const __m128i mask = _mm_set1_epi16(0x00ff); + const __m128i a0 = _mm_loadu_si128((const __m128i*)&ref[BPS * 0]); + const __m128i a1 = _mm_loadu_si128((const __m128i*)&ref[BPS * 1]); + const __m128i a2 = _mm_loadu_si128((const __m128i*)&ref[BPS * 2]); + const __m128i a3 = _mm_loadu_si128((const __m128i*)&ref[BPS * 3]); + const __m128i b0 = _mm_srli_epi16(a0, 8); // hi byte + const __m128i b1 = _mm_srli_epi16(a1, 8); + const __m128i b2 = _mm_srli_epi16(a2, 8); + const __m128i b3 = _mm_srli_epi16(a3, 8); + const __m128i c0 = _mm_and_si128(a0, mask); // lo byte + const __m128i c1 = _mm_and_si128(a1, mask); + const __m128i c2 = _mm_and_si128(a2, mask); + const __m128i c3 = _mm_and_si128(a3, mask); + const __m128i d0 = _mm_add_epi32(b0, c0); + const __m128i d1 = _mm_add_epi32(b1, c1); + const __m128i d2 = _mm_add_epi32(b2, c2); + const __m128i d3 = _mm_add_epi32(b3, c3); + const __m128i e0 = _mm_add_epi32(d0, d1); + const __m128i e1 = _mm_add_epi32(d2, d3); + const __m128i f0 = _mm_add_epi32(e0, e1); + uint16_t tmp[8]; + _mm_storeu_si128((__m128i*)tmp, f0); + dc[0] = tmp[0] + tmp[1]; + dc[1] = tmp[2] + tmp[3]; + dc[2] = tmp[4] + tmp[5]; + dc[3] = tmp[6] + tmp[7]; +} + +//------------------------------------------------------------------------------ +// Texture distortion +// +// We try to match the spectral content (weighted) between source and +// reconstructed samples. + +// Hadamard transform +// Returns the weighted sum of the absolute value of transformed coefficients. +// w[] contains a row-major 4 by 4 symmetric matrix. +static int TTransform_SSE2(const uint8_t* inA, const uint8_t* inB, + const uint16_t* const w) { + int32_t sum[4]; + __m128i tmp_0, tmp_1, tmp_2, tmp_3; + const __m128i zero = _mm_setzero_si128(); + + // Load and combine inputs. + { + const __m128i inA_0 = _mm_loadl_epi64((const __m128i*)&inA[BPS * 0]); + const __m128i inA_1 = _mm_loadl_epi64((const __m128i*)&inA[BPS * 1]); + const __m128i inA_2 = _mm_loadl_epi64((const __m128i*)&inA[BPS * 2]); + const __m128i inA_3 = _mm_loadl_epi64((const __m128i*)&inA[BPS * 3]); + const __m128i inB_0 = _mm_loadl_epi64((const __m128i*)&inB[BPS * 0]); + const __m128i inB_1 = _mm_loadl_epi64((const __m128i*)&inB[BPS * 1]); + const __m128i inB_2 = _mm_loadl_epi64((const __m128i*)&inB[BPS * 2]); + const __m128i inB_3 = _mm_loadl_epi64((const __m128i*)&inB[BPS * 3]); + + // Combine inA and inB (we'll do two transforms in parallel). + const __m128i inAB_0 = _mm_unpacklo_epi32(inA_0, inB_0); + const __m128i inAB_1 = _mm_unpacklo_epi32(inA_1, inB_1); + const __m128i inAB_2 = _mm_unpacklo_epi32(inA_2, inB_2); + const __m128i inAB_3 = _mm_unpacklo_epi32(inA_3, inB_3); + tmp_0 = _mm_unpacklo_epi8(inAB_0, zero); + tmp_1 = _mm_unpacklo_epi8(inAB_1, zero); + tmp_2 = _mm_unpacklo_epi8(inAB_2, zero); + tmp_3 = _mm_unpacklo_epi8(inAB_3, zero); + // a00 a01 a02 a03 b00 b01 b02 b03 + // a10 a11 a12 a13 b10 b11 b12 b13 + // a20 a21 a22 a23 b20 b21 b22 b23 + // a30 a31 a32 a33 b30 b31 b32 b33 + } + + // Vertical pass first to avoid a transpose (vertical and horizontal passes + // are commutative because w/kWeightY is symmetric) and subsequent transpose. + { + // Calculate a and b (two 4x4 at once). + const __m128i a0 = _mm_add_epi16(tmp_0, tmp_2); + const __m128i a1 = _mm_add_epi16(tmp_1, tmp_3); + const __m128i a2 = _mm_sub_epi16(tmp_1, tmp_3); + const __m128i a3 = _mm_sub_epi16(tmp_0, tmp_2); + const __m128i b0 = _mm_add_epi16(a0, a1); + const __m128i b1 = _mm_add_epi16(a3, a2); + const __m128i b2 = _mm_sub_epi16(a3, a2); + const __m128i b3 = _mm_sub_epi16(a0, a1); + // a00 a01 a02 a03 b00 b01 b02 b03 + // a10 a11 a12 a13 b10 b11 b12 b13 + // a20 a21 a22 a23 b20 b21 b22 b23 + // a30 a31 a32 a33 b30 b31 b32 b33 + + // Transpose the two 4x4. + VP8Transpose_2_4x4_16b(&b0, &b1, &b2, &b3, &tmp_0, &tmp_1, &tmp_2, &tmp_3); + } + + // Horizontal pass and difference of weighted sums. + { + // Load all inputs. + const __m128i w_0 = _mm_loadu_si128((const __m128i*)&w[0]); + const __m128i w_8 = _mm_loadu_si128((const __m128i*)&w[8]); + + // Calculate a and b (two 4x4 at once). + const __m128i a0 = _mm_add_epi16(tmp_0, tmp_2); + const __m128i a1 = _mm_add_epi16(tmp_1, tmp_3); + const __m128i a2 = _mm_sub_epi16(tmp_1, tmp_3); + const __m128i a3 = _mm_sub_epi16(tmp_0, tmp_2); + const __m128i b0 = _mm_add_epi16(a0, a1); + const __m128i b1 = _mm_add_epi16(a3, a2); + const __m128i b2 = _mm_sub_epi16(a3, a2); + const __m128i b3 = _mm_sub_epi16(a0, a1); + + // Separate the transforms of inA and inB. + __m128i A_b0 = _mm_unpacklo_epi64(b0, b1); + __m128i A_b2 = _mm_unpacklo_epi64(b2, b3); + __m128i B_b0 = _mm_unpackhi_epi64(b0, b1); + __m128i B_b2 = _mm_unpackhi_epi64(b2, b3); + + { + const __m128i d0 = _mm_sub_epi16(zero, A_b0); + const __m128i d1 = _mm_sub_epi16(zero, A_b2); + const __m128i d2 = _mm_sub_epi16(zero, B_b0); + const __m128i d3 = _mm_sub_epi16(zero, B_b2); + A_b0 = _mm_max_epi16(A_b0, d0); // abs(v), 16b + A_b2 = _mm_max_epi16(A_b2, d1); + B_b0 = _mm_max_epi16(B_b0, d2); + B_b2 = _mm_max_epi16(B_b2, d3); + } + + // weighted sums + A_b0 = _mm_madd_epi16(A_b0, w_0); + A_b2 = _mm_madd_epi16(A_b2, w_8); + B_b0 = _mm_madd_epi16(B_b0, w_0); + B_b2 = _mm_madd_epi16(B_b2, w_8); + A_b0 = _mm_add_epi32(A_b0, A_b2); + B_b0 = _mm_add_epi32(B_b0, B_b2); + + // difference of weighted sums + A_b0 = _mm_sub_epi32(A_b0, B_b0); + _mm_storeu_si128((__m128i*)&sum[0], A_b0); + } + return sum[0] + sum[1] + sum[2] + sum[3]; +} + +static int Disto4x4_SSE2(const uint8_t* const a, const uint8_t* const b, + const uint16_t* const w) { + const int diff_sum = TTransform_SSE2(a, b, w); + return abs(diff_sum) >> 5; +} + +static int Disto16x16_SSE2(const uint8_t* const a, const uint8_t* const b, + const uint16_t* const w) { + int D = 0; + int x, y; + for (y = 0; y < 16 * BPS; y += 4 * BPS) { + for (x = 0; x < 16; x += 4) { + D += Disto4x4_SSE2(a + x + y, b + x + y, w); + } + } + return D; +} + +//------------------------------------------------------------------------------ +// Quantization +// + +static WEBP_INLINE int DoQuantizeBlock_SSE2(int16_t in[16], int16_t out[16], + const uint16_t* const sharpen, + const VP8Matrix* const mtx) { + const __m128i max_coeff_2047 = _mm_set1_epi16(MAX_LEVEL); + const __m128i zero = _mm_setzero_si128(); + __m128i coeff0, coeff8; + __m128i out0, out8; + __m128i packed_out; + + // Load all inputs. + __m128i in0 = _mm_loadu_si128((__m128i*)&in[0]); + __m128i in8 = _mm_loadu_si128((__m128i*)&in[8]); + const __m128i iq0 = _mm_loadu_si128((const __m128i*)&mtx->iq_[0]); + const __m128i iq8 = _mm_loadu_si128((const __m128i*)&mtx->iq_[8]); + const __m128i q0 = _mm_loadu_si128((const __m128i*)&mtx->q_[0]); + const __m128i q8 = _mm_loadu_si128((const __m128i*)&mtx->q_[8]); + + // extract sign(in) (0x0000 if positive, 0xffff if negative) + const __m128i sign0 = _mm_cmpgt_epi16(zero, in0); + const __m128i sign8 = _mm_cmpgt_epi16(zero, in8); + + // coeff = abs(in) = (in ^ sign) - sign + coeff0 = _mm_xor_si128(in0, sign0); + coeff8 = _mm_xor_si128(in8, sign8); + coeff0 = _mm_sub_epi16(coeff0, sign0); + coeff8 = _mm_sub_epi16(coeff8, sign8); + + // coeff = abs(in) + sharpen + if (sharpen != NULL) { + const __m128i sharpen0 = _mm_loadu_si128((const __m128i*)&sharpen[0]); + const __m128i sharpen8 = _mm_loadu_si128((const __m128i*)&sharpen[8]); + coeff0 = _mm_add_epi16(coeff0, sharpen0); + coeff8 = _mm_add_epi16(coeff8, sharpen8); + } + + // out = (coeff * iQ + B) >> QFIX + { + // doing calculations with 32b precision (QFIX=17) + // out = (coeff * iQ) + const __m128i coeff_iQ0H = _mm_mulhi_epu16(coeff0, iq0); + const __m128i coeff_iQ0L = _mm_mullo_epi16(coeff0, iq0); + const __m128i coeff_iQ8H = _mm_mulhi_epu16(coeff8, iq8); + const __m128i coeff_iQ8L = _mm_mullo_epi16(coeff8, iq8); + __m128i out_00 = _mm_unpacklo_epi16(coeff_iQ0L, coeff_iQ0H); + __m128i out_04 = _mm_unpackhi_epi16(coeff_iQ0L, coeff_iQ0H); + __m128i out_08 = _mm_unpacklo_epi16(coeff_iQ8L, coeff_iQ8H); + __m128i out_12 = _mm_unpackhi_epi16(coeff_iQ8L, coeff_iQ8H); + // out = (coeff * iQ + B) + const __m128i bias_00 = _mm_loadu_si128((const __m128i*)&mtx->bias_[0]); + const __m128i bias_04 = _mm_loadu_si128((const __m128i*)&mtx->bias_[4]); + const __m128i bias_08 = _mm_loadu_si128((const __m128i*)&mtx->bias_[8]); + const __m128i bias_12 = _mm_loadu_si128((const __m128i*)&mtx->bias_[12]); + out_00 = _mm_add_epi32(out_00, bias_00); + out_04 = _mm_add_epi32(out_04, bias_04); + out_08 = _mm_add_epi32(out_08, bias_08); + out_12 = _mm_add_epi32(out_12, bias_12); + // out = QUANTDIV(coeff, iQ, B, QFIX) + out_00 = _mm_srai_epi32(out_00, QFIX); + out_04 = _mm_srai_epi32(out_04, QFIX); + out_08 = _mm_srai_epi32(out_08, QFIX); + out_12 = _mm_srai_epi32(out_12, QFIX); + + // pack result as 16b + out0 = _mm_packs_epi32(out_00, out_04); + out8 = _mm_packs_epi32(out_08, out_12); + + // if (coeff > 2047) coeff = 2047 + out0 = _mm_min_epi16(out0, max_coeff_2047); + out8 = _mm_min_epi16(out8, max_coeff_2047); + } + + // get sign back (if (sign[j]) out_n = -out_n) + out0 = _mm_xor_si128(out0, sign0); + out8 = _mm_xor_si128(out8, sign8); + out0 = _mm_sub_epi16(out0, sign0); + out8 = _mm_sub_epi16(out8, sign8); + + // in = out * Q + in0 = _mm_mullo_epi16(out0, q0); + in8 = _mm_mullo_epi16(out8, q8); + + _mm_storeu_si128((__m128i*)&in[0], in0); + _mm_storeu_si128((__m128i*)&in[8], in8); + + // zigzag the output before storing it. + // + // The zigzag pattern can almost be reproduced with a small sequence of + // shuffles. After it, we only need to swap the 7th (ending up in third + // position instead of twelfth) and 8th values. + { + __m128i outZ0, outZ8; + outZ0 = _mm_shufflehi_epi16(out0, _MM_SHUFFLE(2, 1, 3, 0)); + outZ0 = _mm_shuffle_epi32 (outZ0, _MM_SHUFFLE(3, 1, 2, 0)); + outZ0 = _mm_shufflehi_epi16(outZ0, _MM_SHUFFLE(3, 1, 0, 2)); + outZ8 = _mm_shufflelo_epi16(out8, _MM_SHUFFLE(3, 0, 2, 1)); + outZ8 = _mm_shuffle_epi32 (outZ8, _MM_SHUFFLE(3, 1, 2, 0)); + outZ8 = _mm_shufflelo_epi16(outZ8, _MM_SHUFFLE(1, 3, 2, 0)); + _mm_storeu_si128((__m128i*)&out[0], outZ0); + _mm_storeu_si128((__m128i*)&out[8], outZ8); + packed_out = _mm_packs_epi16(outZ0, outZ8); + } + { + const int16_t outZ_12 = out[12]; + const int16_t outZ_3 = out[3]; + out[3] = outZ_12; + out[12] = outZ_3; + } + + // detect if all 'out' values are zeroes or not + return (_mm_movemask_epi8(_mm_cmpeq_epi8(packed_out, zero)) != 0xffff); +} + +static int QuantizeBlock_SSE2(int16_t in[16], int16_t out[16], + const VP8Matrix* const mtx) { + return DoQuantizeBlock_SSE2(in, out, &mtx->sharpen_[0], mtx); +} + +static int QuantizeBlockWHT_SSE2(int16_t in[16], int16_t out[16], + const VP8Matrix* const mtx) { + return DoQuantizeBlock_SSE2(in, out, NULL, mtx); +} + +static int Quantize2Blocks_SSE2(int16_t in[32], int16_t out[32], + const VP8Matrix* const mtx) { + int nz; + const uint16_t* const sharpen = &mtx->sharpen_[0]; + nz = DoQuantizeBlock_SSE2(in + 0 * 16, out + 0 * 16, sharpen, mtx) << 0; + nz |= DoQuantizeBlock_SSE2(in + 1 * 16, out + 1 * 16, sharpen, mtx) << 1; + return nz; +} + +//------------------------------------------------------------------------------ +// Entry point + +extern void VP8EncDspInitSSE2(void); + +WEBP_TSAN_IGNORE_FUNCTION void VP8EncDspInitSSE2(void) { + VP8CollectHistogram = CollectHistogram_SSE2; + VP8EncPredLuma16 = Intra16Preds_SSE2; + VP8EncPredChroma8 = IntraChromaPreds_SSE2; + VP8EncPredLuma4 = Intra4Preds_SSE2; + VP8EncQuantizeBlock = QuantizeBlock_SSE2; + VP8EncQuantize2Blocks = Quantize2Blocks_SSE2; + VP8EncQuantizeBlockWHT = QuantizeBlockWHT_SSE2; + VP8ITransform = ITransform_SSE2; + VP8FTransform = FTransform_SSE2; + VP8FTransform2 = FTransform2_SSE2; + VP8FTransformWHT = FTransformWHT_SSE2; + VP8SSE16x16 = SSE16x16_SSE2; + VP8SSE16x8 = SSE16x8_SSE2; + VP8SSE8x8 = SSE8x8_SSE2; + VP8SSE4x4 = SSE4x4_SSE2; + VP8TDisto4x4 = Disto4x4_SSE2; + VP8TDisto16x16 = Disto16x16_SSE2; + VP8Mean16x4 = Mean16x4_SSE2; +} + +#else // !WEBP_USE_SSE2 + +WEBP_DSP_INIT_STUB(VP8EncDspInitSSE2) + +#endif // WEBP_USE_SSE2 diff --git a/vendor/github.com/sizeofint/webpanimation/dsp_enc_sse41.c b/vendor/github.com/sizeofint/webpanimation/dsp_enc_sse41.c new file mode 100644 index 00000000..f7243cdf --- /dev/null +++ b/vendor/github.com/sizeofint/webpanimation/dsp_enc_sse41.c @@ -0,0 +1,339 @@ +// Copyright 2015 Google Inc. All Rights Reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the COPYING file in the root of the source +// tree. An additional intellectual property rights grant can be found +// in the file PATENTS. All contributing project authors may +// be found in the AUTHORS file in the root of the source tree. +// ----------------------------------------------------------------------------- +// +// SSE4 version of some encoding functions. +// +// Author: Skal (pascal.massimino@gmail.com) + +#include "dsp_dsp.h" + +#if defined(WEBP_USE_SSE41) +#include +#include // for abs() + +#include "dsp_common_sse2.h" +#include "enc_vp8i_enc.h" + +//------------------------------------------------------------------------------ +// Compute susceptibility based on DCT-coeff histograms. + +static void CollectHistogram_SSE41(const uint8_t* ref, const uint8_t* pred, + int start_block, int end_block, + VP8Histogram* const histo) { + const __m128i max_coeff_thresh = _mm_set1_epi16(MAX_COEFF_THRESH); + int j; + int distribution[MAX_COEFF_THRESH + 1] = { 0 }; + for (j = start_block; j < end_block; ++j) { + int16_t out[16]; + int k; + + VP8FTransform(ref + VP8DspScan[j], pred + VP8DspScan[j], out); + + // Convert coefficients to bin (within out[]). + { + // Load. + const __m128i out0 = _mm_loadu_si128((__m128i*)&out[0]); + const __m128i out1 = _mm_loadu_si128((__m128i*)&out[8]); + // v = abs(out) >> 3 + const __m128i abs0 = _mm_abs_epi16(out0); + const __m128i abs1 = _mm_abs_epi16(out1); + const __m128i v0 = _mm_srai_epi16(abs0, 3); + const __m128i v1 = _mm_srai_epi16(abs1, 3); + // bin = min(v, MAX_COEFF_THRESH) + const __m128i bin0 = _mm_min_epi16(v0, max_coeff_thresh); + const __m128i bin1 = _mm_min_epi16(v1, max_coeff_thresh); + // Store. + _mm_storeu_si128((__m128i*)&out[0], bin0); + _mm_storeu_si128((__m128i*)&out[8], bin1); + } + + // Convert coefficients to bin. + for (k = 0; k < 16; ++k) { + ++distribution[out[k]]; + } + } + VP8SetHistogramData(distribution, histo); +} + +//------------------------------------------------------------------------------ +// Texture distortion +// +// We try to match the spectral content (weighted) between source and +// reconstructed samples. + +// Hadamard transform +// Returns the weighted sum of the absolute value of transformed coefficients. +// w[] contains a row-major 4 by 4 symmetric matrix. +static int TTransform_SSE41(const uint8_t* inA, const uint8_t* inB, + const uint16_t* const w) { + int32_t sum[4]; + __m128i tmp_0, tmp_1, tmp_2, tmp_3; + + // Load and combine inputs. + { + const __m128i inA_0 = _mm_loadu_si128((const __m128i*)&inA[BPS * 0]); + const __m128i inA_1 = _mm_loadu_si128((const __m128i*)&inA[BPS * 1]); + const __m128i inA_2 = _mm_loadu_si128((const __m128i*)&inA[BPS * 2]); + // In SSE4.1, with gcc 4.8 at least (maybe other versions), + // _mm_loadu_si128 is faster than _mm_loadl_epi64. But for the last lump + // of inA and inB, _mm_loadl_epi64 is still used not to have an out of + // bound read. + const __m128i inA_3 = _mm_loadl_epi64((const __m128i*)&inA[BPS * 3]); + const __m128i inB_0 = _mm_loadu_si128((const __m128i*)&inB[BPS * 0]); + const __m128i inB_1 = _mm_loadu_si128((const __m128i*)&inB[BPS * 1]); + const __m128i inB_2 = _mm_loadu_si128((const __m128i*)&inB[BPS * 2]); + const __m128i inB_3 = _mm_loadl_epi64((const __m128i*)&inB[BPS * 3]); + + // Combine inA and inB (we'll do two transforms in parallel). + const __m128i inAB_0 = _mm_unpacklo_epi32(inA_0, inB_0); + const __m128i inAB_1 = _mm_unpacklo_epi32(inA_1, inB_1); + const __m128i inAB_2 = _mm_unpacklo_epi32(inA_2, inB_2); + const __m128i inAB_3 = _mm_unpacklo_epi32(inA_3, inB_3); + tmp_0 = _mm_cvtepu8_epi16(inAB_0); + tmp_1 = _mm_cvtepu8_epi16(inAB_1); + tmp_2 = _mm_cvtepu8_epi16(inAB_2); + tmp_3 = _mm_cvtepu8_epi16(inAB_3); + // a00 a01 a02 a03 b00 b01 b02 b03 + // a10 a11 a12 a13 b10 b11 b12 b13 + // a20 a21 a22 a23 b20 b21 b22 b23 + // a30 a31 a32 a33 b30 b31 b32 b33 + } + + // Vertical pass first to avoid a transpose (vertical and horizontal passes + // are commutative because w/kWeightY is symmetric) and subsequent transpose. + { + // Calculate a and b (two 4x4 at once). + const __m128i a0 = _mm_add_epi16(tmp_0, tmp_2); + const __m128i a1 = _mm_add_epi16(tmp_1, tmp_3); + const __m128i a2 = _mm_sub_epi16(tmp_1, tmp_3); + const __m128i a3 = _mm_sub_epi16(tmp_0, tmp_2); + const __m128i b0 = _mm_add_epi16(a0, a1); + const __m128i b1 = _mm_add_epi16(a3, a2); + const __m128i b2 = _mm_sub_epi16(a3, a2); + const __m128i b3 = _mm_sub_epi16(a0, a1); + // a00 a01 a02 a03 b00 b01 b02 b03 + // a10 a11 a12 a13 b10 b11 b12 b13 + // a20 a21 a22 a23 b20 b21 b22 b23 + // a30 a31 a32 a33 b30 b31 b32 b33 + + // Transpose the two 4x4. + VP8Transpose_2_4x4_16b(&b0, &b1, &b2, &b3, &tmp_0, &tmp_1, &tmp_2, &tmp_3); + } + + // Horizontal pass and difference of weighted sums. + { + // Load all inputs. + const __m128i w_0 = _mm_loadu_si128((const __m128i*)&w[0]); + const __m128i w_8 = _mm_loadu_si128((const __m128i*)&w[8]); + + // Calculate a and b (two 4x4 at once). + const __m128i a0 = _mm_add_epi16(tmp_0, tmp_2); + const __m128i a1 = _mm_add_epi16(tmp_1, tmp_3); + const __m128i a2 = _mm_sub_epi16(tmp_1, tmp_3); + const __m128i a3 = _mm_sub_epi16(tmp_0, tmp_2); + const __m128i b0 = _mm_add_epi16(a0, a1); + const __m128i b1 = _mm_add_epi16(a3, a2); + const __m128i b2 = _mm_sub_epi16(a3, a2); + const __m128i b3 = _mm_sub_epi16(a0, a1); + + // Separate the transforms of inA and inB. + __m128i A_b0 = _mm_unpacklo_epi64(b0, b1); + __m128i A_b2 = _mm_unpacklo_epi64(b2, b3); + __m128i B_b0 = _mm_unpackhi_epi64(b0, b1); + __m128i B_b2 = _mm_unpackhi_epi64(b2, b3); + + A_b0 = _mm_abs_epi16(A_b0); + A_b2 = _mm_abs_epi16(A_b2); + B_b0 = _mm_abs_epi16(B_b0); + B_b2 = _mm_abs_epi16(B_b2); + + // weighted sums + A_b0 = _mm_madd_epi16(A_b0, w_0); + A_b2 = _mm_madd_epi16(A_b2, w_8); + B_b0 = _mm_madd_epi16(B_b0, w_0); + B_b2 = _mm_madd_epi16(B_b2, w_8); + A_b0 = _mm_add_epi32(A_b0, A_b2); + B_b0 = _mm_add_epi32(B_b0, B_b2); + + // difference of weighted sums + A_b2 = _mm_sub_epi32(A_b0, B_b0); + _mm_storeu_si128((__m128i*)&sum[0], A_b2); + } + return sum[0] + sum[1] + sum[2] + sum[3]; +} + +static int Disto4x4_SSE41(const uint8_t* const a, const uint8_t* const b, + const uint16_t* const w) { + const int diff_sum = TTransform_SSE41(a, b, w); + return abs(diff_sum) >> 5; +} + +static int Disto16x16_SSE41(const uint8_t* const a, const uint8_t* const b, + const uint16_t* const w) { + int D = 0; + int x, y; + for (y = 0; y < 16 * BPS; y += 4 * BPS) { + for (x = 0; x < 16; x += 4) { + D += Disto4x4_SSE41(a + x + y, b + x + y, w); + } + } + return D; +} + +//------------------------------------------------------------------------------ +// Quantization +// + +// Generates a pshufb constant for shuffling 16b words. +#define PSHUFB_CST(A,B,C,D,E,F,G,H) \ + _mm_set_epi8(2 * (H) + 1, 2 * (H) + 0, 2 * (G) + 1, 2 * (G) + 0, \ + 2 * (F) + 1, 2 * (F) + 0, 2 * (E) + 1, 2 * (E) + 0, \ + 2 * (D) + 1, 2 * (D) + 0, 2 * (C) + 1, 2 * (C) + 0, \ + 2 * (B) + 1, 2 * (B) + 0, 2 * (A) + 1, 2 * (A) + 0) + +static WEBP_INLINE int DoQuantizeBlock_SSE41(int16_t in[16], int16_t out[16], + const uint16_t* const sharpen, + const VP8Matrix* const mtx) { + const __m128i max_coeff_2047 = _mm_set1_epi16(MAX_LEVEL); + const __m128i zero = _mm_setzero_si128(); + __m128i out0, out8; + __m128i packed_out; + + // Load all inputs. + __m128i in0 = _mm_loadu_si128((__m128i*)&in[0]); + __m128i in8 = _mm_loadu_si128((__m128i*)&in[8]); + const __m128i iq0 = _mm_loadu_si128((const __m128i*)&mtx->iq_[0]); + const __m128i iq8 = _mm_loadu_si128((const __m128i*)&mtx->iq_[8]); + const __m128i q0 = _mm_loadu_si128((const __m128i*)&mtx->q_[0]); + const __m128i q8 = _mm_loadu_si128((const __m128i*)&mtx->q_[8]); + + // coeff = abs(in) + __m128i coeff0 = _mm_abs_epi16(in0); + __m128i coeff8 = _mm_abs_epi16(in8); + + // coeff = abs(in) + sharpen + if (sharpen != NULL) { + const __m128i sharpen0 = _mm_loadu_si128((const __m128i*)&sharpen[0]); + const __m128i sharpen8 = _mm_loadu_si128((const __m128i*)&sharpen[8]); + coeff0 = _mm_add_epi16(coeff0, sharpen0); + coeff8 = _mm_add_epi16(coeff8, sharpen8); + } + + // out = (coeff * iQ + B) >> QFIX + { + // doing calculations with 32b precision (QFIX=17) + // out = (coeff * iQ) + const __m128i coeff_iQ0H = _mm_mulhi_epu16(coeff0, iq0); + const __m128i coeff_iQ0L = _mm_mullo_epi16(coeff0, iq0); + const __m128i coeff_iQ8H = _mm_mulhi_epu16(coeff8, iq8); + const __m128i coeff_iQ8L = _mm_mullo_epi16(coeff8, iq8); + __m128i out_00 = _mm_unpacklo_epi16(coeff_iQ0L, coeff_iQ0H); + __m128i out_04 = _mm_unpackhi_epi16(coeff_iQ0L, coeff_iQ0H); + __m128i out_08 = _mm_unpacklo_epi16(coeff_iQ8L, coeff_iQ8H); + __m128i out_12 = _mm_unpackhi_epi16(coeff_iQ8L, coeff_iQ8H); + // out = (coeff * iQ + B) + const __m128i bias_00 = _mm_loadu_si128((const __m128i*)&mtx->bias_[0]); + const __m128i bias_04 = _mm_loadu_si128((const __m128i*)&mtx->bias_[4]); + const __m128i bias_08 = _mm_loadu_si128((const __m128i*)&mtx->bias_[8]); + const __m128i bias_12 = _mm_loadu_si128((const __m128i*)&mtx->bias_[12]); + out_00 = _mm_add_epi32(out_00, bias_00); + out_04 = _mm_add_epi32(out_04, bias_04); + out_08 = _mm_add_epi32(out_08, bias_08); + out_12 = _mm_add_epi32(out_12, bias_12); + // out = QUANTDIV(coeff, iQ, B, QFIX) + out_00 = _mm_srai_epi32(out_00, QFIX); + out_04 = _mm_srai_epi32(out_04, QFIX); + out_08 = _mm_srai_epi32(out_08, QFIX); + out_12 = _mm_srai_epi32(out_12, QFIX); + + // pack result as 16b + out0 = _mm_packs_epi32(out_00, out_04); + out8 = _mm_packs_epi32(out_08, out_12); + + // if (coeff > 2047) coeff = 2047 + out0 = _mm_min_epi16(out0, max_coeff_2047); + out8 = _mm_min_epi16(out8, max_coeff_2047); + } + + // put sign back + out0 = _mm_sign_epi16(out0, in0); + out8 = _mm_sign_epi16(out8, in8); + + // in = out * Q + in0 = _mm_mullo_epi16(out0, q0); + in8 = _mm_mullo_epi16(out8, q8); + + _mm_storeu_si128((__m128i*)&in[0], in0); + _mm_storeu_si128((__m128i*)&in[8], in8); + + // zigzag the output before storing it. The re-ordering is: + // 0 1 2 3 4 5 6 7 | 8 9 10 11 12 13 14 15 + // -> 0 1 4[8]5 2 3 6 | 9 12 13 10 [7]11 14 15 + // There's only two misplaced entries ([8] and [7]) that are crossing the + // reg's boundaries. + // We use pshufb instead of pshuflo/pshufhi. + { + const __m128i kCst_lo = PSHUFB_CST(0, 1, 4, -1, 5, 2, 3, 6); + const __m128i kCst_7 = PSHUFB_CST(-1, -1, -1, -1, 7, -1, -1, -1); + const __m128i tmp_lo = _mm_shuffle_epi8(out0, kCst_lo); + const __m128i tmp_7 = _mm_shuffle_epi8(out0, kCst_7); // extract #7 + const __m128i kCst_hi = PSHUFB_CST(1, 4, 5, 2, -1, 3, 6, 7); + const __m128i kCst_8 = PSHUFB_CST(-1, -1, -1, 0, -1, -1, -1, -1); + const __m128i tmp_hi = _mm_shuffle_epi8(out8, kCst_hi); + const __m128i tmp_8 = _mm_shuffle_epi8(out8, kCst_8); // extract #8 + const __m128i out_z0 = _mm_or_si128(tmp_lo, tmp_8); + const __m128i out_z8 = _mm_or_si128(tmp_hi, tmp_7); + _mm_storeu_si128((__m128i*)&out[0], out_z0); + _mm_storeu_si128((__m128i*)&out[8], out_z8); + packed_out = _mm_packs_epi16(out_z0, out_z8); + } + + // detect if all 'out' values are zeroes or not + return (_mm_movemask_epi8(_mm_cmpeq_epi8(packed_out, zero)) != 0xffff); +} + +#undef PSHUFB_CST + +static int QuantizeBlock_SSE41(int16_t in[16], int16_t out[16], + const VP8Matrix* const mtx) { + return DoQuantizeBlock_SSE41(in, out, &mtx->sharpen_[0], mtx); +} + +static int QuantizeBlockWHT_SSE41(int16_t in[16], int16_t out[16], + const VP8Matrix* const mtx) { + return DoQuantizeBlock_SSE41(in, out, NULL, mtx); +} + +static int Quantize2Blocks_SSE41(int16_t in[32], int16_t out[32], + const VP8Matrix* const mtx) { + int nz; + const uint16_t* const sharpen = &mtx->sharpen_[0]; + nz = DoQuantizeBlock_SSE41(in + 0 * 16, out + 0 * 16, sharpen, mtx) << 0; + nz |= DoQuantizeBlock_SSE41(in + 1 * 16, out + 1 * 16, sharpen, mtx) << 1; + return nz; +} + +//------------------------------------------------------------------------------ +// Entry point + +extern void VP8EncDspInitSSE41(void); +WEBP_TSAN_IGNORE_FUNCTION void VP8EncDspInitSSE41(void) { + VP8CollectHistogram = CollectHistogram_SSE41; + VP8EncQuantizeBlock = QuantizeBlock_SSE41; + VP8EncQuantize2Blocks = Quantize2Blocks_SSE41; + VP8EncQuantizeBlockWHT = QuantizeBlockWHT_SSE41; + VP8TDisto4x4 = Disto4x4_SSE41; + VP8TDisto16x16 = Disto16x16_SSE41; +} + +#else // !WEBP_USE_SSE41 + +WEBP_DSP_INIT_STUB(VP8EncDspInitSSE41) + +#endif // WEBP_USE_SSE41 diff --git a/vendor/github.com/sizeofint/webpanimation/dsp_filters.c b/vendor/github.com/sizeofint/webpanimation/dsp_filters.c new file mode 100644 index 00000000..391c6149 --- /dev/null +++ b/vendor/github.com/sizeofint/webpanimation/dsp_filters.c @@ -0,0 +1,287 @@ +// Copyright 2011 Google Inc. All Rights Reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the COPYING file in the root of the source +// tree. An additional intellectual property rights grant can be found +// in the file PATENTS. All contributing project authors may +// be found in the AUTHORS file in the root of the source tree. +// ----------------------------------------------------------------------------- +// +// Spatial prediction using various filters +// +// Author: Urvang (urvang@google.com) + +#include "dsp_dsp.h" +#include +#include +#include + +//------------------------------------------------------------------------------ +// Helpful macro. + +# define SANITY_CHECK(in, out) \ + assert((in) != NULL); \ + assert((out) != NULL); \ + assert(width > 0); \ + assert(height > 0); \ + assert(stride >= width); \ + assert(row >= 0 && num_rows > 0 && row + num_rows <= height); \ + (void)height; // Silence unused warning. + +#if !WEBP_NEON_OMIT_C_CODE +static WEBP_INLINE void PredictLine_C(const uint8_t* src, const uint8_t* pred, + uint8_t* dst, int length, int inverse) { + int i; + if (inverse) { + for (i = 0; i < length; ++i) dst[i] = (uint8_t)(src[i] + pred[i]); + } else { + for (i = 0; i < length; ++i) dst[i] = (uint8_t)(src[i] - pred[i]); + } +} + +//------------------------------------------------------------------------------ +// Horizontal filter. + +static WEBP_INLINE void DoHorizontalFilter_C(const uint8_t* in, + int width, int height, int stride, + int row, int num_rows, + int inverse, uint8_t* out) { + const uint8_t* preds; + const size_t start_offset = row * stride; + const int last_row = row + num_rows; + SANITY_CHECK(in, out); + in += start_offset; + out += start_offset; + preds = inverse ? out : in; + + if (row == 0) { + // Leftmost pixel is the same as input for topmost scanline. + out[0] = in[0]; + PredictLine_C(in + 1, preds, out + 1, width - 1, inverse); + row = 1; + preds += stride; + in += stride; + out += stride; + } + + // Filter line-by-line. + while (row < last_row) { + // Leftmost pixel is predicted from above. + PredictLine_C(in, preds - stride, out, 1, inverse); + PredictLine_C(in + 1, preds, out + 1, width - 1, inverse); + ++row; + preds += stride; + in += stride; + out += stride; + } +} + +//------------------------------------------------------------------------------ +// Vertical filter. + +static WEBP_INLINE void DoVerticalFilter_C(const uint8_t* in, + int width, int height, int stride, + int row, int num_rows, + int inverse, uint8_t* out) { + const uint8_t* preds; + const size_t start_offset = row * stride; + const int last_row = row + num_rows; + SANITY_CHECK(in, out); + in += start_offset; + out += start_offset; + preds = inverse ? out : in; + + if (row == 0) { + // Very first top-left pixel is copied. + out[0] = in[0]; + // Rest of top scan-line is left-predicted. + PredictLine_C(in + 1, preds, out + 1, width - 1, inverse); + row = 1; + in += stride; + out += stride; + } else { + // We are starting from in-between. Make sure 'preds' points to prev row. + preds -= stride; + } + + // Filter line-by-line. + while (row < last_row) { + PredictLine_C(in, preds, out, width, inverse); + ++row; + preds += stride; + in += stride; + out += stride; + } +} +#endif // !WEBP_NEON_OMIT_C_CODE + +//------------------------------------------------------------------------------ +// Gradient filter. + +static WEBP_INLINE int GradientPredictor_C(uint8_t a, uint8_t b, uint8_t c) { + const int g = a + b - c; + return ((g & ~0xff) == 0) ? g : (g < 0) ? 0 : 255; // clip to 8bit +} + +#if !WEBP_NEON_OMIT_C_CODE +static WEBP_INLINE void DoGradientFilter_C(const uint8_t* in, + int width, int height, int stride, + int row, int num_rows, + int inverse, uint8_t* out) { + const uint8_t* preds; + const size_t start_offset = row * stride; + const int last_row = row + num_rows; + SANITY_CHECK(in, out); + in += start_offset; + out += start_offset; + preds = inverse ? out : in; + + // left prediction for top scan-line + if (row == 0) { + out[0] = in[0]; + PredictLine_C(in + 1, preds, out + 1, width - 1, inverse); + row = 1; + preds += stride; + in += stride; + out += stride; + } + + // Filter line-by-line. + while (row < last_row) { + int w; + // leftmost pixel: predict from above. + PredictLine_C(in, preds - stride, out, 1, inverse); + for (w = 1; w < width; ++w) { + const int pred = GradientPredictor_C(preds[w - 1], + preds[w - stride], + preds[w - stride - 1]); + out[w] = (uint8_t)(in[w] + (inverse ? pred : -pred)); + } + ++row; + preds += stride; + in += stride; + out += stride; + } +} +#endif // !WEBP_NEON_OMIT_C_CODE + +#undef SANITY_CHECK + +//------------------------------------------------------------------------------ + +#if !WEBP_NEON_OMIT_C_CODE +static void HorizontalFilter_C(const uint8_t* data, int width, int height, + int stride, uint8_t* filtered_data) { + DoHorizontalFilter_C(data, width, height, stride, 0, height, 0, + filtered_data); +} + +static void VerticalFilter_C(const uint8_t* data, int width, int height, + int stride, uint8_t* filtered_data) { + DoVerticalFilter_C(data, width, height, stride, 0, height, 0, filtered_data); +} + +static void GradientFilter_C(const uint8_t* data, int width, int height, + int stride, uint8_t* filtered_data) { + DoGradientFilter_C(data, width, height, stride, 0, height, 0, filtered_data); +} +#endif // !WEBP_NEON_OMIT_C_CODE + +//------------------------------------------------------------------------------ + +static void HorizontalUnfilter_C(const uint8_t* prev, const uint8_t* in, + uint8_t* out, int width) { + uint8_t pred = (prev == NULL) ? 0 : prev[0]; + int i; + for (i = 0; i < width; ++i) { + out[i] = (uint8_t)(pred + in[i]); + pred = out[i]; + } +} + +#if !WEBP_NEON_OMIT_C_CODE +static void VerticalUnfilter_C(const uint8_t* prev, const uint8_t* in, + uint8_t* out, int width) { + if (prev == NULL) { + HorizontalUnfilter_C(NULL, in, out, width); + } else { + int i; + for (i = 0; i < width; ++i) out[i] = (uint8_t)(prev[i] + in[i]); + } +} +#endif // !WEBP_NEON_OMIT_C_CODE + +static void GradientUnfilter_C(const uint8_t* prev, const uint8_t* in, + uint8_t* out, int width) { + if (prev == NULL) { + HorizontalUnfilter_C(NULL, in, out, width); + } else { + uint8_t top = prev[0], top_left = top, left = top; + int i; + for (i = 0; i < width; ++i) { + top = prev[i]; // need to read this first, in case prev==out + left = (uint8_t)(in[i] + GradientPredictor_C(left, top, top_left)); + top_left = top; + out[i] = left; + } + } +} + +//------------------------------------------------------------------------------ +// Init function + +WebPFilterFunc WebPFilters[WEBP_FILTER_LAST]; +WebPUnfilterFunc WebPUnfilters[WEBP_FILTER_LAST]; + +extern void VP8FiltersInitMIPSdspR2(void); +extern void VP8FiltersInitMSA(void); +extern void VP8FiltersInitNEON(void); +extern void VP8FiltersInitSSE2(void); + +WEBP_DSP_INIT_FUNC(VP8FiltersInit) { + WebPUnfilters[WEBP_FILTER_NONE] = NULL; +#if !WEBP_NEON_OMIT_C_CODE + WebPUnfilters[WEBP_FILTER_HORIZONTAL] = HorizontalUnfilter_C; + WebPUnfilters[WEBP_FILTER_VERTICAL] = VerticalUnfilter_C; +#endif + WebPUnfilters[WEBP_FILTER_GRADIENT] = GradientUnfilter_C; + + WebPFilters[WEBP_FILTER_NONE] = NULL; +#if !WEBP_NEON_OMIT_C_CODE + WebPFilters[WEBP_FILTER_HORIZONTAL] = HorizontalFilter_C; + WebPFilters[WEBP_FILTER_VERTICAL] = VerticalFilter_C; + WebPFilters[WEBP_FILTER_GRADIENT] = GradientFilter_C; +#endif + + if (VP8GetCPUInfo != NULL) { +#if defined(WEBP_USE_SSE2) + if (VP8GetCPUInfo(kSSE2)) { + VP8FiltersInitSSE2(); + } +#endif +#if defined(WEBP_USE_MIPS_DSP_R2) + if (VP8GetCPUInfo(kMIPSdspR2)) { + VP8FiltersInitMIPSdspR2(); + } +#endif +#if defined(WEBP_USE_MSA) + if (VP8GetCPUInfo(kMSA)) { + VP8FiltersInitMSA(); + } +#endif + } + +#if defined(WEBP_USE_NEON) + if (WEBP_NEON_OMIT_C_CODE || + (VP8GetCPUInfo != NULL && VP8GetCPUInfo(kNEON))) { + VP8FiltersInitNEON(); + } +#endif + + assert(WebPUnfilters[WEBP_FILTER_HORIZONTAL] != NULL); + assert(WebPUnfilters[WEBP_FILTER_VERTICAL] != NULL); + assert(WebPUnfilters[WEBP_FILTER_GRADIENT] != NULL); + assert(WebPFilters[WEBP_FILTER_HORIZONTAL] != NULL); + assert(WebPFilters[WEBP_FILTER_VERTICAL] != NULL); + assert(WebPFilters[WEBP_FILTER_GRADIENT] != NULL); +} diff --git a/vendor/github.com/sizeofint/webpanimation/dsp_filters_mips_dsp_r2.c b/vendor/github.com/sizeofint/webpanimation/dsp_filters_mips_dsp_r2.c new file mode 100644 index 00000000..0f710c78 --- /dev/null +++ b/vendor/github.com/sizeofint/webpanimation/dsp_filters_mips_dsp_r2.c @@ -0,0 +1,402 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the COPYING file in the root of the source +// tree. An additional intellectual property rights grant can be found +// in the file PATENTS. All contributing project authors may +// be found in the AUTHORS file in the root of the source tree. +// ----------------------------------------------------------------------------- +// +// Spatial prediction using various filters +// +// Author(s): Branimir Vasic (branimir.vasic@imgtec.com) +// Djordje Pesut (djordje.pesut@imgtec.com) + +#include "dsp_dsp.h" + +#if defined(WEBP_USE_MIPS_DSP_R2) + +#include "dsp_dsp.h" +#include +#include +#include + +//------------------------------------------------------------------------------ +// Helpful macro. + +# define SANITY_CHECK(in, out) \ + assert(in != NULL); \ + assert(out != NULL); \ + assert(width > 0); \ + assert(height > 0); \ + assert(stride >= width); \ + assert(row >= 0 && num_rows > 0 && row + num_rows <= height); \ + (void)height; // Silence unused warning. + +#define DO_PREDICT_LINE(SRC, DST, LENGTH, INVERSE) do { \ + const uint8_t* psrc = (uint8_t*)(SRC); \ + uint8_t* pdst = (uint8_t*)(DST); \ + const int ilength = (int)(LENGTH); \ + int temp0, temp1, temp2, temp3, temp4, temp5, temp6; \ + __asm__ volatile ( \ + ".set push \n\t" \ + ".set noreorder \n\t" \ + "srl %[temp0], %[length], 2 \n\t" \ + "beqz %[temp0], 4f \n\t" \ + " andi %[temp6], %[length], 3 \n\t" \ + ".if " #INVERSE " \n\t" \ + "1: \n\t" \ + "lbu %[temp1], -1(%[dst]) \n\t" \ + "lbu %[temp2], 0(%[src]) \n\t" \ + "lbu %[temp3], 1(%[src]) \n\t" \ + "lbu %[temp4], 2(%[src]) \n\t" \ + "lbu %[temp5], 3(%[src]) \n\t" \ + "addu %[temp1], %[temp1], %[temp2] \n\t" \ + "addu %[temp2], %[temp1], %[temp3] \n\t" \ + "addu %[temp3], %[temp2], %[temp4] \n\t" \ + "addu %[temp4], %[temp3], %[temp5] \n\t" \ + "sb %[temp1], 0(%[dst]) \n\t" \ + "sb %[temp2], 1(%[dst]) \n\t" \ + "sb %[temp3], 2(%[dst]) \n\t" \ + "sb %[temp4], 3(%[dst]) \n\t" \ + "addiu %[src], %[src], 4 \n\t" \ + "addiu %[temp0], %[temp0], -1 \n\t" \ + "bnez %[temp0], 1b \n\t" \ + " addiu %[dst], %[dst], 4 \n\t" \ + ".else \n\t" \ + "1: \n\t" \ + "ulw %[temp1], -1(%[src]) \n\t" \ + "ulw %[temp2], 0(%[src]) \n\t" \ + "addiu %[src], %[src], 4 \n\t" \ + "addiu %[temp0], %[temp0], -1 \n\t" \ + "subu.qb %[temp3], %[temp2], %[temp1] \n\t" \ + "usw %[temp3], 0(%[dst]) \n\t" \ + "bnez %[temp0], 1b \n\t" \ + " addiu %[dst], %[dst], 4 \n\t" \ + ".endif \n\t" \ + "4: \n\t" \ + "beqz %[temp6], 3f \n\t" \ + " nop \n\t" \ + "2: \n\t" \ + "lbu %[temp2], 0(%[src]) \n\t" \ + ".if " #INVERSE " \n\t" \ + "lbu %[temp1], -1(%[dst]) \n\t" \ + "addu %[temp3], %[temp1], %[temp2] \n\t" \ + ".else \n\t" \ + "lbu %[temp1], -1(%[src]) \n\t" \ + "subu %[temp3], %[temp1], %[temp2] \n\t" \ + ".endif \n\t" \ + "addiu %[src], %[src], 1 \n\t" \ + "sb %[temp3], 0(%[dst]) \n\t" \ + "addiu %[temp6], %[temp6], -1 \n\t" \ + "bnez %[temp6], 2b \n\t" \ + " addiu %[dst], %[dst], 1 \n\t" \ + "3: \n\t" \ + ".set pop \n\t" \ + : [temp0]"=&r"(temp0), [temp1]"=&r"(temp1), [temp2]"=&r"(temp2), \ + [temp3]"=&r"(temp3), [temp4]"=&r"(temp4), [temp5]"=&r"(temp5), \ + [temp6]"=&r"(temp6), [dst]"+&r"(pdst), [src]"+&r"(psrc) \ + : [length]"r"(ilength) \ + : "memory" \ + ); \ + } while (0) + +static WEBP_INLINE void PredictLine_MIPSdspR2(const uint8_t* src, uint8_t* dst, + int length) { + DO_PREDICT_LINE(src, dst, length, 0); +} + +#define DO_PREDICT_LINE_VERTICAL(SRC, PRED, DST, LENGTH, INVERSE) do { \ + const uint8_t* psrc = (uint8_t*)(SRC); \ + const uint8_t* ppred = (uint8_t*)(PRED); \ + uint8_t* pdst = (uint8_t*)(DST); \ + const int ilength = (int)(LENGTH); \ + int temp0, temp1, temp2, temp3, temp4, temp5, temp6, temp7; \ + __asm__ volatile ( \ + ".set push \n\t" \ + ".set noreorder \n\t" \ + "srl %[temp0], %[length], 0x3 \n\t" \ + "beqz %[temp0], 4f \n\t" \ + " andi %[temp7], %[length], 0x7 \n\t" \ + "1: \n\t" \ + "ulw %[temp1], 0(%[src]) \n\t" \ + "ulw %[temp2], 0(%[pred]) \n\t" \ + "ulw %[temp3], 4(%[src]) \n\t" \ + "ulw %[temp4], 4(%[pred]) \n\t" \ + "addiu %[src], %[src], 8 \n\t" \ + ".if " #INVERSE " \n\t" \ + "addu.qb %[temp5], %[temp1], %[temp2] \n\t" \ + "addu.qb %[temp6], %[temp3], %[temp4] \n\t" \ + ".else \n\t" \ + "subu.qb %[temp5], %[temp1], %[temp2] \n\t" \ + "subu.qb %[temp6], %[temp3], %[temp4] \n\t" \ + ".endif \n\t" \ + "addiu %[pred], %[pred], 8 \n\t" \ + "usw %[temp5], 0(%[dst]) \n\t" \ + "usw %[temp6], 4(%[dst]) \n\t" \ + "addiu %[temp0], %[temp0], -1 \n\t" \ + "bnez %[temp0], 1b \n\t" \ + " addiu %[dst], %[dst], 8 \n\t" \ + "4: \n\t" \ + "beqz %[temp7], 3f \n\t" \ + " nop \n\t" \ + "2: \n\t" \ + "lbu %[temp1], 0(%[src]) \n\t" \ + "lbu %[temp2], 0(%[pred]) \n\t" \ + "addiu %[src], %[src], 1 \n\t" \ + "addiu %[pred], %[pred], 1 \n\t" \ + ".if " #INVERSE " \n\t" \ + "addu %[temp3], %[temp1], %[temp2] \n\t" \ + ".else \n\t" \ + "subu %[temp3], %[temp1], %[temp2] \n\t" \ + ".endif \n\t" \ + "sb %[temp3], 0(%[dst]) \n\t" \ + "addiu %[temp7], %[temp7], -1 \n\t" \ + "bnez %[temp7], 2b \n\t" \ + " addiu %[dst], %[dst], 1 \n\t" \ + "3: \n\t" \ + ".set pop \n\t" \ + : [temp0]"=&r"(temp0), [temp1]"=&r"(temp1), [temp2]"=&r"(temp2), \ + [temp3]"=&r"(temp3), [temp4]"=&r"(temp4), [temp5]"=&r"(temp5), \ + [temp6]"=&r"(temp6), [temp7]"=&r"(temp7), [pred]"+&r"(ppred), \ + [dst]"+&r"(pdst), [src]"+&r"(psrc) \ + : [length]"r"(ilength) \ + : "memory" \ + ); \ + } while (0) + +#define PREDICT_LINE_ONE_PASS(SRC, PRED, DST) do { \ + int temp1, temp2, temp3; \ + __asm__ volatile ( \ + "lbu %[temp1], 0(%[src]) \n\t" \ + "lbu %[temp2], 0(%[pred]) \n\t" \ + "subu %[temp3], %[temp1], %[temp2] \n\t" \ + "sb %[temp3], 0(%[dst]) \n\t" \ + : [temp1]"=&r"(temp1), [temp2]"=&r"(temp2), [temp3]"=&r"(temp3) \ + : [pred]"r"((PRED)), [dst]"r"((DST)), [src]"r"((SRC)) \ + : "memory" \ + ); \ + } while (0) + +//------------------------------------------------------------------------------ +// Horizontal filter. + +#define FILTER_LINE_BY_LINE do { \ + while (row < last_row) { \ + PREDICT_LINE_ONE_PASS(in, preds - stride, out); \ + DO_PREDICT_LINE(in + 1, out + 1, width - 1, 0); \ + ++row; \ + preds += stride; \ + in += stride; \ + out += stride; \ + } \ + } while (0) + +static WEBP_INLINE void DoHorizontalFilter_MIPSdspR2(const uint8_t* in, + int width, int height, + int stride, + int row, int num_rows, + uint8_t* out) { + const uint8_t* preds; + const size_t start_offset = row * stride; + const int last_row = row + num_rows; + SANITY_CHECK(in, out); + in += start_offset; + out += start_offset; + preds = in; + + if (row == 0) { + // Leftmost pixel is the same as input for topmost scanline. + out[0] = in[0]; + PredictLine_MIPSdspR2(in + 1, out + 1, width - 1); + row = 1; + preds += stride; + in += stride; + out += stride; + } + + // Filter line-by-line. + FILTER_LINE_BY_LINE; +} +#undef FILTER_LINE_BY_LINE + +static void HorizontalFilter_MIPSdspR2(const uint8_t* data, + int width, int height, + int stride, uint8_t* filtered_data) { + DoHorizontalFilter_MIPSdspR2(data, width, height, stride, 0, height, + filtered_data); +} + +//------------------------------------------------------------------------------ +// Vertical filter. + +#define FILTER_LINE_BY_LINE do { \ + while (row < last_row) { \ + DO_PREDICT_LINE_VERTICAL(in, preds, out, width, 0); \ + ++row; \ + preds += stride; \ + in += stride; \ + out += stride; \ + } \ + } while (0) + +static WEBP_INLINE void DoVerticalFilter_MIPSdspR2(const uint8_t* in, + int width, int height, + int stride, + int row, int num_rows, + uint8_t* out) { + const uint8_t* preds; + const size_t start_offset = row * stride; + const int last_row = row + num_rows; + SANITY_CHECK(in, out); + in += start_offset; + out += start_offset; + preds = in; + + if (row == 0) { + // Very first top-left pixel is copied. + out[0] = in[0]; + // Rest of top scan-line is left-predicted. + PredictLine_MIPSdspR2(in + 1, out + 1, width - 1); + row = 1; + in += stride; + out += stride; + } else { + // We are starting from in-between. Make sure 'preds' points to prev row. + preds -= stride; + } + + // Filter line-by-line. + FILTER_LINE_BY_LINE; +} +#undef FILTER_LINE_BY_LINE + +static void VerticalFilter_MIPSdspR2(const uint8_t* data, int width, int height, + int stride, uint8_t* filtered_data) { + DoVerticalFilter_MIPSdspR2(data, width, height, stride, 0, height, + filtered_data); +} + +//------------------------------------------------------------------------------ +// Gradient filter. + +static int GradientPredictor_MIPSdspR2(uint8_t a, uint8_t b, uint8_t c) { + int temp0; + __asm__ volatile ( + "addu %[temp0], %[a], %[b] \n\t" + "subu %[temp0], %[temp0], %[c] \n\t" + "shll_s.w %[temp0], %[temp0], 23 \n\t" + "precrqu_s.qb.ph %[temp0], %[temp0], $zero \n\t" + "srl %[temp0], %[temp0], 24 \n\t" + : [temp0]"=&r"(temp0) + : [a]"r"(a),[b]"r"(b),[c]"r"(c) + ); + return temp0; +} + +#define FILTER_LINE_BY_LINE(PREDS, OPERATION) do { \ + while (row < last_row) { \ + int w; \ + PREDICT_LINE_ONE_PASS(in, PREDS - stride, out); \ + for (w = 1; w < width; ++w) { \ + const int pred = GradientPredictor_MIPSdspR2(PREDS[w - 1], \ + PREDS[w - stride], \ + PREDS[w - stride - 1]); \ + out[w] = in[w] OPERATION pred; \ + } \ + ++row; \ + in += stride; \ + out += stride; \ + } \ + } while (0) + +static void DoGradientFilter_MIPSdspR2(const uint8_t* in, + int width, int height, int stride, + int row, int num_rows, uint8_t* out) { + const uint8_t* preds; + const size_t start_offset = row * stride; + const int last_row = row + num_rows; + SANITY_CHECK(in, out); + in += start_offset; + out += start_offset; + preds = in; + + // left prediction for top scan-line + if (row == 0) { + out[0] = in[0]; + PredictLine_MIPSdspR2(in + 1, out + 1, width - 1); + row = 1; + preds += stride; + in += stride; + out += stride; + } + + // Filter line-by-line. + FILTER_LINE_BY_LINE(in, -); +} +#undef FILTER_LINE_BY_LINE + +static void GradientFilter_MIPSdspR2(const uint8_t* data, int width, int height, + int stride, uint8_t* filtered_data) { + DoGradientFilter_MIPSdspR2(data, width, height, stride, 0, height, + filtered_data); +} + +//------------------------------------------------------------------------------ + +static void HorizontalUnfilter_MIPSdspR2(const uint8_t* prev, const uint8_t* in, + uint8_t* out, int width) { + out[0] = in[0] + (prev == NULL ? 0 : prev[0]); + DO_PREDICT_LINE(in + 1, out + 1, width - 1, 1); +} + +static void VerticalUnfilter_MIPSdspR2(const uint8_t* prev, const uint8_t* in, + uint8_t* out, int width) { + if (prev == NULL) { + HorizontalUnfilter_MIPSdspR2(NULL, in, out, width); + } else { + DO_PREDICT_LINE_VERTICAL(in, prev, out, width, 1); + } +} + +static void GradientUnfilter_MIPSdspR2(const uint8_t* prev, const uint8_t* in, + uint8_t* out, int width) { + if (prev == NULL) { + HorizontalUnfilter_MIPSdspR2(NULL, in, out, width); + } else { + uint8_t top = prev[0], top_left = top, left = top; + int i; + for (i = 0; i < width; ++i) { + top = prev[i]; // need to read this first, in case prev==dst + left = in[i] + GradientPredictor_MIPSdspR2(left, top, top_left); + top_left = top; + out[i] = left; + } + } +} + +#undef DO_PREDICT_LINE_VERTICAL +#undef PREDICT_LINE_ONE_PASS +#undef DO_PREDICT_LINE +#undef SANITY_CHECK + +//------------------------------------------------------------------------------ +// Entry point + +extern void VP8FiltersInitMIPSdspR2(void); + +WEBP_TSAN_IGNORE_FUNCTION void VP8FiltersInitMIPSdspR2(void) { + WebPUnfilters[WEBP_FILTER_HORIZONTAL] = HorizontalUnfilter_MIPSdspR2; + WebPUnfilters[WEBP_FILTER_VERTICAL] = VerticalUnfilter_MIPSdspR2; + WebPUnfilters[WEBP_FILTER_GRADIENT] = GradientUnfilter_MIPSdspR2; + + WebPFilters[WEBP_FILTER_HORIZONTAL] = HorizontalFilter_MIPSdspR2; + WebPFilters[WEBP_FILTER_VERTICAL] = VerticalFilter_MIPSdspR2; + WebPFilters[WEBP_FILTER_GRADIENT] = GradientFilter_MIPSdspR2; +} + +#else // !WEBP_USE_MIPS_DSP_R2 + +WEBP_DSP_INIT_STUB(VP8FiltersInitMIPSdspR2) + +#endif // WEBP_USE_MIPS_DSP_R2 diff --git a/vendor/github.com/sizeofint/webpanimation/dsp_filters_msa.c b/vendor/github.com/sizeofint/webpanimation/dsp_filters_msa.c new file mode 100644 index 00000000..e546e78c --- /dev/null +++ b/vendor/github.com/sizeofint/webpanimation/dsp_filters_msa.c @@ -0,0 +1,202 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the COPYING file in the root of the source +// tree. An additional intellectual property rights grant can be found +// in the file PATENTS. All contributing project authors may +// be found in the AUTHORS file in the root of the source tree. +// ----------------------------------------------------------------------------- +// +// MSA variant of alpha filters +// +// Author: Prashant Patil (prashant.patil@imgtec.com) + +#include "dsp_dsp.h" + +#if defined(WEBP_USE_MSA) + +#include "dsp_msa_macro.h" + +#include + +static WEBP_INLINE void PredictLineInverse0(const uint8_t* src, + const uint8_t* pred, + uint8_t* dst, int length) { + v16u8 src0, pred0, dst0; + assert(length >= 0); + while (length >= 32) { + v16u8 src1, pred1, dst1; + LD_UB2(src, 16, src0, src1); + LD_UB2(pred, 16, pred0, pred1); + SUB2(src0, pred0, src1, pred1, dst0, dst1); + ST_UB2(dst0, dst1, dst, 16); + src += 32; + pred += 32; + dst += 32; + length -= 32; + } + if (length > 0) { + int i; + if (length >= 16) { + src0 = LD_UB(src); + pred0 = LD_UB(pred); + dst0 = src0 - pred0; + ST_UB(dst0, dst); + src += 16; + pred += 16; + dst += 16; + length -= 16; + } + for (i = 0; i < length; i++) { + dst[i] = src[i] - pred[i]; + } + } +} + +//------------------------------------------------------------------------------ +// Helpful macro. + +#define SANITY_CHECK(in, out) \ + assert(in != NULL); \ + assert(out != NULL); \ + assert(width > 0); \ + assert(height > 0); \ + assert(stride >= width); + +//------------------------------------------------------------------------------ +// Horrizontal filter + +static void HorizontalFilter_MSA(const uint8_t* data, int width, int height, + int stride, uint8_t* filtered_data) { + const uint8_t* preds = data; + const uint8_t* in = data; + uint8_t* out = filtered_data; + int row = 1; + SANITY_CHECK(in, out); + + // Leftmost pixel is the same as input for topmost scanline. + out[0] = in[0]; + PredictLineInverse0(in + 1, preds, out + 1, width - 1); + preds += stride; + in += stride; + out += stride; + // Filter line-by-line. + while (row < height) { + // Leftmost pixel is predicted from above. + PredictLineInverse0(in, preds - stride, out, 1); + PredictLineInverse0(in + 1, preds, out + 1, width - 1); + ++row; + preds += stride; + in += stride; + out += stride; + } +} + +//------------------------------------------------------------------------------ +// Gradient filter + +static WEBP_INLINE void PredictLineGradient(const uint8_t* pinput, + const uint8_t* ppred, + uint8_t* poutput, int stride, + int size) { + int w; + const v16i8 zero = { 0 }; + while (size >= 16) { + v16u8 pred0, dst0; + v8i16 a0, a1, b0, b1, c0, c1; + const v16u8 tmp0 = LD_UB(ppred - 1); + const v16u8 tmp1 = LD_UB(ppred - stride); + const v16u8 tmp2 = LD_UB(ppred - stride - 1); + const v16u8 src0 = LD_UB(pinput); + ILVRL_B2_SH(zero, tmp0, a0, a1); + ILVRL_B2_SH(zero, tmp1, b0, b1); + ILVRL_B2_SH(zero, tmp2, c0, c1); + ADD2(a0, b0, a1, b1, a0, a1); + SUB2(a0, c0, a1, c1, a0, a1); + CLIP_SH2_0_255(a0, a1); + pred0 = (v16u8)__msa_pckev_b((v16i8)a1, (v16i8)a0); + dst0 = src0 - pred0; + ST_UB(dst0, poutput); + ppred += 16; + pinput += 16; + poutput += 16; + size -= 16; + } + for (w = 0; w < size; ++w) { + const int pred = ppred[w - 1] + ppred[w - stride] - ppred[w - stride - 1]; + poutput[w] = pinput[w] - (pred < 0 ? 0 : pred > 255 ? 255 : pred); + } +} + + +static void GradientFilter_MSA(const uint8_t* data, int width, int height, + int stride, uint8_t* filtered_data) { + const uint8_t* in = data; + const uint8_t* preds = data; + uint8_t* out = filtered_data; + int row = 1; + SANITY_CHECK(in, out); + + // left prediction for top scan-line + out[0] = in[0]; + PredictLineInverse0(in + 1, preds, out + 1, width - 1); + preds += stride; + in += stride; + out += stride; + // Filter line-by-line. + while (row < height) { + out[0] = in[0] - preds[- stride]; + PredictLineGradient(preds + 1, in + 1, out + 1, stride, width - 1); + ++row; + preds += stride; + in += stride; + out += stride; + } +} + +//------------------------------------------------------------------------------ +// Vertical filter + +static void VerticalFilter_MSA(const uint8_t* data, int width, int height, + int stride, uint8_t* filtered_data) { + const uint8_t* in = data; + const uint8_t* preds = data; + uint8_t* out = filtered_data; + int row = 1; + SANITY_CHECK(in, out); + + // Very first top-left pixel is copied. + out[0] = in[0]; + // Rest of top scan-line is left-predicted. + PredictLineInverse0(in + 1, preds, out + 1, width - 1); + in += stride; + out += stride; + + // Filter line-by-line. + while (row < height) { + PredictLineInverse0(in, preds, out, width); + ++row; + preds += stride; + in += stride; + out += stride; + } +} + +#undef SANITY_CHECK + +//------------------------------------------------------------------------------ +// Entry point + +extern void VP8FiltersInitMSA(void); + +WEBP_TSAN_IGNORE_FUNCTION void VP8FiltersInitMSA(void) { + WebPFilters[WEBP_FILTER_HORIZONTAL] = HorizontalFilter_MSA; + WebPFilters[WEBP_FILTER_VERTICAL] = VerticalFilter_MSA; + WebPFilters[WEBP_FILTER_GRADIENT] = GradientFilter_MSA; +} + +#else // !WEBP_USE_MSA + +WEBP_DSP_INIT_STUB(VP8FiltersInitMSA) + +#endif // WEBP_USE_MSA diff --git a/vendor/github.com/sizeofint/webpanimation/dsp_filters_neon.c b/vendor/github.com/sizeofint/webpanimation/dsp_filters_neon.c new file mode 100644 index 00000000..7bf8cae6 --- /dev/null +++ b/vendor/github.com/sizeofint/webpanimation/dsp_filters_neon.c @@ -0,0 +1,329 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the COPYING file in the root of the source +// tree. An additional intellectual property rights grant can be found +// in the file PATENTS. All contributing project authors may +// be found in the AUTHORS file in the root of the source tree. +// ----------------------------------------------------------------------------- +// +// NEON variant of alpha filters +// +// Author: Skal (pascal.massimino@gmail.com) + +#include "dsp_dsp.h" + +#if defined(WEBP_USE_NEON) + +#include +#include "dsp_neon.h" + +//------------------------------------------------------------------------------ +// Helpful macros. + +# define SANITY_CHECK(in, out) \ + assert(in != NULL); \ + assert(out != NULL); \ + assert(width > 0); \ + assert(height > 0); \ + assert(stride >= width); \ + assert(row >= 0 && num_rows > 0 && row + num_rows <= height); \ + (void)height; // Silence unused warning. + +// load eight u8 and widen to s16 +#define U8_TO_S16(A) vreinterpretq_s16_u16(vmovl_u8(A)) +#define LOAD_U8_TO_S16(A) U8_TO_S16(vld1_u8(A)) + +// shift left or right by N byte, inserting zeros +#define SHIFT_RIGHT_N_Q(A, N) vextq_u8((A), zero, (N)) +#define SHIFT_LEFT_N_Q(A, N) vextq_u8(zero, (A), (16 - (N)) % 16) + +// rotate left by N bytes +#define ROTATE_LEFT_N(A, N) vext_u8((A), (A), (N)) +// rotate right by N bytes +#define ROTATE_RIGHT_N(A, N) vext_u8((A), (A), (8 - (N)) % 8) + +static void PredictLine_NEON(const uint8_t* src, const uint8_t* pred, + uint8_t* dst, int length) { + int i; + assert(length >= 0); + for (i = 0; i + 16 <= length; i += 16) { + const uint8x16_t A = vld1q_u8(&src[i]); + const uint8x16_t B = vld1q_u8(&pred[i]); + const uint8x16_t C = vsubq_u8(A, B); + vst1q_u8(&dst[i], C); + } + for (; i < length; ++i) dst[i] = src[i] - pred[i]; +} + +// Special case for left-based prediction (when preds==dst-1 or preds==src-1). +static void PredictLineLeft_NEON(const uint8_t* src, uint8_t* dst, int length) { + PredictLine_NEON(src, src - 1, dst, length); +} + +//------------------------------------------------------------------------------ +// Horizontal filter. + +static WEBP_INLINE void DoHorizontalFilter_NEON(const uint8_t* in, + int width, int height, + int stride, + int row, int num_rows, + uint8_t* out) { + const size_t start_offset = row * stride; + const int last_row = row + num_rows; + SANITY_CHECK(in, out); + in += start_offset; + out += start_offset; + + if (row == 0) { + // Leftmost pixel is the same as input for topmost scanline. + out[0] = in[0]; + PredictLineLeft_NEON(in + 1, out + 1, width - 1); + row = 1; + in += stride; + out += stride; + } + + // Filter line-by-line. + while (row < last_row) { + // Leftmost pixel is predicted from above. + out[0] = in[0] - in[-stride]; + PredictLineLeft_NEON(in + 1, out + 1, width - 1); + ++row; + in += stride; + out += stride; + } +} + +static void HorizontalFilter_NEON(const uint8_t* data, int width, int height, + int stride, uint8_t* filtered_data) { + DoHorizontalFilter_NEON(data, width, height, stride, 0, height, + filtered_data); +} + +//------------------------------------------------------------------------------ +// Vertical filter. + +static WEBP_INLINE void DoVerticalFilter_NEON(const uint8_t* in, + int width, int height, int stride, + int row, int num_rows, + uint8_t* out) { + const size_t start_offset = row * stride; + const int last_row = row + num_rows; + SANITY_CHECK(in, out); + in += start_offset; + out += start_offset; + + if (row == 0) { + // Very first top-left pixel is copied. + out[0] = in[0]; + // Rest of top scan-line is left-predicted. + PredictLineLeft_NEON(in + 1, out + 1, width - 1); + row = 1; + in += stride; + out += stride; + } + + // Filter line-by-line. + while (row < last_row) { + PredictLine_NEON(in, in - stride, out, width); + ++row; + in += stride; + out += stride; + } +} + +static void VerticalFilter_NEON(const uint8_t* data, int width, int height, + int stride, uint8_t* filtered_data) { + DoVerticalFilter_NEON(data, width, height, stride, 0, height, + filtered_data); +} + +//------------------------------------------------------------------------------ +// Gradient filter. + +static WEBP_INLINE int GradientPredictor_C(uint8_t a, uint8_t b, uint8_t c) { + const int g = a + b - c; + return ((g & ~0xff) == 0) ? g : (g < 0) ? 0 : 255; // clip to 8bit +} + +static void GradientPredictDirect_NEON(const uint8_t* const row, + const uint8_t* const top, + uint8_t* const out, int length) { + int i; + for (i = 0; i + 8 <= length; i += 8) { + const uint8x8_t A = vld1_u8(&row[i - 1]); + const uint8x8_t B = vld1_u8(&top[i + 0]); + const int16x8_t C = vreinterpretq_s16_u16(vaddl_u8(A, B)); + const int16x8_t D = LOAD_U8_TO_S16(&top[i - 1]); + const uint8x8_t E = vqmovun_s16(vsubq_s16(C, D)); + const uint8x8_t F = vld1_u8(&row[i + 0]); + vst1_u8(&out[i], vsub_u8(F, E)); + } + for (; i < length; ++i) { + out[i] = row[i] - GradientPredictor_C(row[i - 1], top[i], top[i - 1]); + } +} + +static WEBP_INLINE void DoGradientFilter_NEON(const uint8_t* in, + int width, int height, + int stride, + int row, int num_rows, + uint8_t* out) { + const size_t start_offset = row * stride; + const int last_row = row + num_rows; + SANITY_CHECK(in, out); + in += start_offset; + out += start_offset; + + // left prediction for top scan-line + if (row == 0) { + out[0] = in[0]; + PredictLineLeft_NEON(in + 1, out + 1, width - 1); + row = 1; + in += stride; + out += stride; + } + + // Filter line-by-line. + while (row < last_row) { + out[0] = in[0] - in[-stride]; + GradientPredictDirect_NEON(in + 1, in + 1 - stride, out + 1, width - 1); + ++row; + in += stride; + out += stride; + } +} + +static void GradientFilter_NEON(const uint8_t* data, int width, int height, + int stride, uint8_t* filtered_data) { + DoGradientFilter_NEON(data, width, height, stride, 0, height, + filtered_data); +} + +#undef SANITY_CHECK + +//------------------------------------------------------------------------------ +// Inverse transforms + +static void HorizontalUnfilter_NEON(const uint8_t* prev, const uint8_t* in, + uint8_t* out, int width) { + int i; + const uint8x16_t zero = vdupq_n_u8(0); + uint8x16_t last; + out[0] = in[0] + (prev == NULL ? 0 : prev[0]); + if (width <= 1) return; + last = vsetq_lane_u8(out[0], zero, 0); + for (i = 1; i + 16 <= width; i += 16) { + const uint8x16_t A0 = vld1q_u8(&in[i]); + const uint8x16_t A1 = vaddq_u8(A0, last); + const uint8x16_t A2 = SHIFT_LEFT_N_Q(A1, 1); + const uint8x16_t A3 = vaddq_u8(A1, A2); + const uint8x16_t A4 = SHIFT_LEFT_N_Q(A3, 2); + const uint8x16_t A5 = vaddq_u8(A3, A4); + const uint8x16_t A6 = SHIFT_LEFT_N_Q(A5, 4); + const uint8x16_t A7 = vaddq_u8(A5, A6); + const uint8x16_t A8 = SHIFT_LEFT_N_Q(A7, 8); + const uint8x16_t A9 = vaddq_u8(A7, A8); + vst1q_u8(&out[i], A9); + last = SHIFT_RIGHT_N_Q(A9, 15); + } + for (; i < width; ++i) out[i] = in[i] + out[i - 1]; +} + +static void VerticalUnfilter_NEON(const uint8_t* prev, const uint8_t* in, + uint8_t* out, int width) { + if (prev == NULL) { + HorizontalUnfilter_NEON(NULL, in, out, width); + } else { + int i; + assert(width >= 0); + for (i = 0; i + 16 <= width; i += 16) { + const uint8x16_t A = vld1q_u8(&in[i]); + const uint8x16_t B = vld1q_u8(&prev[i]); + const uint8x16_t C = vaddq_u8(A, B); + vst1q_u8(&out[i], C); + } + for (; i < width; ++i) out[i] = in[i] + prev[i]; + } +} + +// GradientUnfilter_NEON is correct but slower than the C-version, +// at least on ARM64. For armv7, it's a wash. +// So best is to disable it for now, but keep the idea around... +#if !defined(USE_GRADIENT_UNFILTER) +#define USE_GRADIENT_UNFILTER 0 // ALTERNATE_CODE +#endif + +#if (USE_GRADIENT_UNFILTER == 1) +#define GRAD_PROCESS_LANE(L) do { \ + const uint8x8_t tmp1 = ROTATE_RIGHT_N(pred, 1); /* rotate predictor in */ \ + const int16x8_t tmp2 = vaddq_s16(BC, U8_TO_S16(tmp1)); \ + const uint8x8_t delta = vqmovun_s16(tmp2); \ + pred = vadd_u8(D, delta); \ + out = vext_u8(out, ROTATE_LEFT_N(pred, (L)), 1); \ +} while (0) + +static void GradientPredictInverse_NEON(const uint8_t* const in, + const uint8_t* const top, + uint8_t* const row, int length) { + if (length > 0) { + int i; + uint8x8_t pred = vdup_n_u8(row[-1]); // left sample + uint8x8_t out = vdup_n_u8(0); + for (i = 0; i + 8 <= length; i += 8) { + const int16x8_t B = LOAD_U8_TO_S16(&top[i + 0]); + const int16x8_t C = LOAD_U8_TO_S16(&top[i - 1]); + const int16x8_t BC = vsubq_s16(B, C); // unclipped gradient basis B - C + const uint8x8_t D = vld1_u8(&in[i]); // base input + GRAD_PROCESS_LANE(0); + GRAD_PROCESS_LANE(1); + GRAD_PROCESS_LANE(2); + GRAD_PROCESS_LANE(3); + GRAD_PROCESS_LANE(4); + GRAD_PROCESS_LANE(5); + GRAD_PROCESS_LANE(6); + GRAD_PROCESS_LANE(7); + vst1_u8(&row[i], out); + } + for (; i < length; ++i) { + row[i] = in[i] + GradientPredictor_C(row[i - 1], top[i], top[i - 1]); + } + } +} +#undef GRAD_PROCESS_LANE + +static void GradientUnfilter_NEON(const uint8_t* prev, const uint8_t* in, + uint8_t* out, int width) { + if (prev == NULL) { + HorizontalUnfilter_NEON(NULL, in, out, width); + } else { + out[0] = in[0] + prev[0]; // predict from above + GradientPredictInverse_NEON(in + 1, prev + 1, out + 1, width - 1); + } +} + +#endif // USE_GRADIENT_UNFILTER + +//------------------------------------------------------------------------------ +// Entry point + +extern void VP8FiltersInitNEON(void); + +WEBP_TSAN_IGNORE_FUNCTION void VP8FiltersInitNEON(void) { + WebPUnfilters[WEBP_FILTER_HORIZONTAL] = HorizontalUnfilter_NEON; + WebPUnfilters[WEBP_FILTER_VERTICAL] = VerticalUnfilter_NEON; +#if (USE_GRADIENT_UNFILTER == 1) + WebPUnfilters[WEBP_FILTER_GRADIENT] = GradientUnfilter_NEON; +#endif + + WebPFilters[WEBP_FILTER_HORIZONTAL] = HorizontalFilter_NEON; + WebPFilters[WEBP_FILTER_VERTICAL] = VerticalFilter_NEON; + WebPFilters[WEBP_FILTER_GRADIENT] = GradientFilter_NEON; +} + +#else // !WEBP_USE_NEON + +WEBP_DSP_INIT_STUB(VP8FiltersInitNEON) + +#endif // WEBP_USE_NEON diff --git a/vendor/github.com/sizeofint/webpanimation/dsp_filters_sse2.c b/vendor/github.com/sizeofint/webpanimation/dsp_filters_sse2.c new file mode 100644 index 00000000..e701357e --- /dev/null +++ b/vendor/github.com/sizeofint/webpanimation/dsp_filters_sse2.c @@ -0,0 +1,335 @@ +// Copyright 2015 Google Inc. All Rights Reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the COPYING file in the root of the source +// tree. An additional intellectual property rights grant can be found +// in the file PATENTS. All contributing project authors may +// be found in the AUTHORS file in the root of the source tree. +// ----------------------------------------------------------------------------- +// +// SSE2 variant of alpha filters +// +// Author: Skal (pascal.massimino@gmail.com) + +#include "dsp_dsp.h" + +#if defined(WEBP_USE_SSE2) + +#include +#include +#include +#include + +//------------------------------------------------------------------------------ +// Helpful macro. + +# define SANITY_CHECK(in, out) \ + assert((in) != NULL); \ + assert((out) != NULL); \ + assert(width > 0); \ + assert(height > 0); \ + assert(stride >= width); \ + assert(row >= 0 && num_rows > 0 && row + num_rows <= height); \ + (void)height; // Silence unused warning. + +static void PredictLineTop_SSE2(const uint8_t* src, const uint8_t* pred, + uint8_t* dst, int length) { + int i; + const int max_pos = length & ~31; + assert(length >= 0); + for (i = 0; i < max_pos; i += 32) { + const __m128i A0 = _mm_loadu_si128((const __m128i*)&src[i + 0]); + const __m128i A1 = _mm_loadu_si128((const __m128i*)&src[i + 16]); + const __m128i B0 = _mm_loadu_si128((const __m128i*)&pred[i + 0]); + const __m128i B1 = _mm_loadu_si128((const __m128i*)&pred[i + 16]); + const __m128i C0 = _mm_sub_epi8(A0, B0); + const __m128i C1 = _mm_sub_epi8(A1, B1); + _mm_storeu_si128((__m128i*)&dst[i + 0], C0); + _mm_storeu_si128((__m128i*)&dst[i + 16], C1); + } + for (; i < length; ++i) dst[i] = src[i] - pred[i]; +} + +// Special case for left-based prediction (when preds==dst-1 or preds==src-1). +static void PredictLineLeft_SSE2(const uint8_t* src, uint8_t* dst, int length) { + int i; + const int max_pos = length & ~31; + assert(length >= 0); + for (i = 0; i < max_pos; i += 32) { + const __m128i A0 = _mm_loadu_si128((const __m128i*)(src + i + 0 )); + const __m128i B0 = _mm_loadu_si128((const __m128i*)(src + i + 0 - 1)); + const __m128i A1 = _mm_loadu_si128((const __m128i*)(src + i + 16 )); + const __m128i B1 = _mm_loadu_si128((const __m128i*)(src + i + 16 - 1)); + const __m128i C0 = _mm_sub_epi8(A0, B0); + const __m128i C1 = _mm_sub_epi8(A1, B1); + _mm_storeu_si128((__m128i*)(dst + i + 0), C0); + _mm_storeu_si128((__m128i*)(dst + i + 16), C1); + } + for (; i < length; ++i) dst[i] = src[i] - src[i - 1]; +} + +//------------------------------------------------------------------------------ +// Horizontal filter. + +static WEBP_INLINE void DoHorizontalFilter_SSE2(const uint8_t* in, + int width, int height, + int stride, + int row, int num_rows, + uint8_t* out) { + const size_t start_offset = row * stride; + const int last_row = row + num_rows; + SANITY_CHECK(in, out); + in += start_offset; + out += start_offset; + + if (row == 0) { + // Leftmost pixel is the same as input for topmost scanline. + out[0] = in[0]; + PredictLineLeft_SSE2(in + 1, out + 1, width - 1); + row = 1; + in += stride; + out += stride; + } + + // Filter line-by-line. + while (row < last_row) { + // Leftmost pixel is predicted from above. + out[0] = in[0] - in[-stride]; + PredictLineLeft_SSE2(in + 1, out + 1, width - 1); + ++row; + in += stride; + out += stride; + } +} + +//------------------------------------------------------------------------------ +// Vertical filter. + +static WEBP_INLINE void DoVerticalFilter_SSE2(const uint8_t* in, + int width, int height, int stride, + int row, int num_rows, + uint8_t* out) { + const size_t start_offset = row * stride; + const int last_row = row + num_rows; + SANITY_CHECK(in, out); + in += start_offset; + out += start_offset; + + if (row == 0) { + // Very first top-left pixel is copied. + out[0] = in[0]; + // Rest of top scan-line is left-predicted. + PredictLineLeft_SSE2(in + 1, out + 1, width - 1); + row = 1; + in += stride; + out += stride; + } + + // Filter line-by-line. + while (row < last_row) { + PredictLineTop_SSE2(in, in - stride, out, width); + ++row; + in += stride; + out += stride; + } +} + +//------------------------------------------------------------------------------ +// Gradient filter. + +static WEBP_INLINE int GradientPredictor_SSE2(uint8_t a, uint8_t b, uint8_t c) { + const int g = a + b - c; + return ((g & ~0xff) == 0) ? g : (g < 0) ? 0 : 255; // clip to 8bit +} + +static void GradientPredictDirect_SSE2(const uint8_t* const row, + const uint8_t* const top, + uint8_t* const out, int length) { + const int max_pos = length & ~7; + int i; + const __m128i zero = _mm_setzero_si128(); + for (i = 0; i < max_pos; i += 8) { + const __m128i A0 = _mm_loadl_epi64((const __m128i*)&row[i - 1]); + const __m128i B0 = _mm_loadl_epi64((const __m128i*)&top[i]); + const __m128i C0 = _mm_loadl_epi64((const __m128i*)&top[i - 1]); + const __m128i D = _mm_loadl_epi64((const __m128i*)&row[i]); + const __m128i A1 = _mm_unpacklo_epi8(A0, zero); + const __m128i B1 = _mm_unpacklo_epi8(B0, zero); + const __m128i C1 = _mm_unpacklo_epi8(C0, zero); + const __m128i E = _mm_add_epi16(A1, B1); + const __m128i F = _mm_sub_epi16(E, C1); + const __m128i G = _mm_packus_epi16(F, zero); + const __m128i H = _mm_sub_epi8(D, G); + _mm_storel_epi64((__m128i*)(out + i), H); + } + for (; i < length; ++i) { + const int delta = GradientPredictor_SSE2(row[i - 1], top[i], top[i - 1]); + out[i] = (uint8_t)(row[i] - delta); + } +} + +static WEBP_INLINE void DoGradientFilter_SSE2(const uint8_t* in, + int width, int height, int stride, + int row, int num_rows, + uint8_t* out) { + const size_t start_offset = row * stride; + const int last_row = row + num_rows; + SANITY_CHECK(in, out); + in += start_offset; + out += start_offset; + + // left prediction for top scan-line + if (row == 0) { + out[0] = in[0]; + PredictLineLeft_SSE2(in + 1, out + 1, width - 1); + row = 1; + in += stride; + out += stride; + } + + // Filter line-by-line. + while (row < last_row) { + out[0] = (uint8_t)(in[0] - in[-stride]); + GradientPredictDirect_SSE2(in + 1, in + 1 - stride, out + 1, width - 1); + ++row; + in += stride; + out += stride; + } +} + +#undef SANITY_CHECK + +//------------------------------------------------------------------------------ + +static void HorizontalFilter_SSE2(const uint8_t* data, int width, int height, + int stride, uint8_t* filtered_data) { + DoHorizontalFilter_SSE2(data, width, height, stride, 0, height, + filtered_data); +} + +static void VerticalFilter_SSE2(const uint8_t* data, int width, int height, + int stride, uint8_t* filtered_data) { + DoVerticalFilter_SSE2(data, width, height, stride, 0, height, filtered_data); +} + +static void GradientFilter_SSE2(const uint8_t* data, int width, int height, + int stride, uint8_t* filtered_data) { + DoGradientFilter_SSE2(data, width, height, stride, 0, height, filtered_data); +} + +//------------------------------------------------------------------------------ +// Inverse transforms + +static void HorizontalUnfilter_SSE2(const uint8_t* prev, const uint8_t* in, + uint8_t* out, int width) { + int i; + __m128i last; + out[0] = (uint8_t)(in[0] + (prev == NULL ? 0 : prev[0])); + if (width <= 1) return; + last = _mm_set_epi32(0, 0, 0, out[0]); + for (i = 1; i + 8 <= width; i += 8) { + const __m128i A0 = _mm_loadl_epi64((const __m128i*)(in + i)); + const __m128i A1 = _mm_add_epi8(A0, last); + const __m128i A2 = _mm_slli_si128(A1, 1); + const __m128i A3 = _mm_add_epi8(A1, A2); + const __m128i A4 = _mm_slli_si128(A3, 2); + const __m128i A5 = _mm_add_epi8(A3, A4); + const __m128i A6 = _mm_slli_si128(A5, 4); + const __m128i A7 = _mm_add_epi8(A5, A6); + _mm_storel_epi64((__m128i*)(out + i), A7); + last = _mm_srli_epi64(A7, 56); + } + for (; i < width; ++i) out[i] = (uint8_t)(in[i] + out[i - 1]); +} + +static void VerticalUnfilter_SSE2(const uint8_t* prev, const uint8_t* in, + uint8_t* out, int width) { + if (prev == NULL) { + HorizontalUnfilter_SSE2(NULL, in, out, width); + } else { + int i; + const int max_pos = width & ~31; + assert(width >= 0); + for (i = 0; i < max_pos; i += 32) { + const __m128i A0 = _mm_loadu_si128((const __m128i*)&in[i + 0]); + const __m128i A1 = _mm_loadu_si128((const __m128i*)&in[i + 16]); + const __m128i B0 = _mm_loadu_si128((const __m128i*)&prev[i + 0]); + const __m128i B1 = _mm_loadu_si128((const __m128i*)&prev[i + 16]); + const __m128i C0 = _mm_add_epi8(A0, B0); + const __m128i C1 = _mm_add_epi8(A1, B1); + _mm_storeu_si128((__m128i*)&out[i + 0], C0); + _mm_storeu_si128((__m128i*)&out[i + 16], C1); + } + for (; i < width; ++i) out[i] = (uint8_t)(in[i] + prev[i]); + } +} + +static void GradientPredictInverse_SSE2(const uint8_t* const in, + const uint8_t* const top, + uint8_t* const row, int length) { + if (length > 0) { + int i; + const int max_pos = length & ~7; + const __m128i zero = _mm_setzero_si128(); + __m128i A = _mm_set_epi32(0, 0, 0, row[-1]); // left sample + for (i = 0; i < max_pos; i += 8) { + const __m128i tmp0 = _mm_loadl_epi64((const __m128i*)&top[i]); + const __m128i tmp1 = _mm_loadl_epi64((const __m128i*)&top[i - 1]); + const __m128i B = _mm_unpacklo_epi8(tmp0, zero); + const __m128i C = _mm_unpacklo_epi8(tmp1, zero); + const __m128i D = _mm_loadl_epi64((const __m128i*)&in[i]); // base input + const __m128i E = _mm_sub_epi16(B, C); // unclipped gradient basis B - C + __m128i out = zero; // accumulator for output + __m128i mask_hi = _mm_set_epi32(0, 0, 0, 0xff); + int k = 8; + while (1) { + const __m128i tmp3 = _mm_add_epi16(A, E); // delta = A + B - C + const __m128i tmp4 = _mm_packus_epi16(tmp3, zero); // saturate delta + const __m128i tmp5 = _mm_add_epi8(tmp4, D); // add to in[] + A = _mm_and_si128(tmp5, mask_hi); // 1-complement clip + out = _mm_or_si128(out, A); // accumulate output + if (--k == 0) break; + A = _mm_slli_si128(A, 1); // rotate left sample + mask_hi = _mm_slli_si128(mask_hi, 1); // rotate mask + A = _mm_unpacklo_epi8(A, zero); // convert 8b->16b + } + A = _mm_srli_si128(A, 7); // prepare left sample for next iteration + _mm_storel_epi64((__m128i*)&row[i], out); + } + for (; i < length; ++i) { + const int delta = GradientPredictor_SSE2(row[i - 1], top[i], top[i - 1]); + row[i] = (uint8_t)(in[i] + delta); + } + } +} + +static void GradientUnfilter_SSE2(const uint8_t* prev, const uint8_t* in, + uint8_t* out, int width) { + if (prev == NULL) { + HorizontalUnfilter_SSE2(NULL, in, out, width); + } else { + out[0] = (uint8_t)(in[0] + prev[0]); // predict from above + GradientPredictInverse_SSE2(in + 1, prev + 1, out + 1, width - 1); + } +} + +//------------------------------------------------------------------------------ +// Entry point + +extern void VP8FiltersInitSSE2(void); + +WEBP_TSAN_IGNORE_FUNCTION void VP8FiltersInitSSE2(void) { + WebPUnfilters[WEBP_FILTER_HORIZONTAL] = HorizontalUnfilter_SSE2; + WebPUnfilters[WEBP_FILTER_VERTICAL] = VerticalUnfilter_SSE2; + WebPUnfilters[WEBP_FILTER_GRADIENT] = GradientUnfilter_SSE2; + + WebPFilters[WEBP_FILTER_HORIZONTAL] = HorizontalFilter_SSE2; + WebPFilters[WEBP_FILTER_VERTICAL] = VerticalFilter_SSE2; + WebPFilters[WEBP_FILTER_GRADIENT] = GradientFilter_SSE2; +} + +#else // !WEBP_USE_SSE2 + +WEBP_DSP_INIT_STUB(VP8FiltersInitSSE2) + +#endif // WEBP_USE_SSE2 diff --git a/vendor/github.com/sizeofint/webpanimation/dsp_lossless.c b/vendor/github.com/sizeofint/webpanimation/dsp_lossless.c new file mode 100644 index 00000000..53d08c8e --- /dev/null +++ b/vendor/github.com/sizeofint/webpanimation/dsp_lossless.c @@ -0,0 +1,660 @@ +// Copyright 2012 Google Inc. All Rights Reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the COPYING file in the root of the source +// tree. An additional intellectual property rights grant can be found +// in the file PATENTS. All contributing project authors may +// be found in the AUTHORS file in the root of the source tree. +// ----------------------------------------------------------------------------- +// +// Image transforms and color space conversion methods for lossless decoder. +// +// Authors: Vikas Arora (vikaas.arora@gmail.com) +// Jyrki Alakuijala (jyrki@google.com) +// Urvang Joshi (urvang@google.com) + +#include "dsp_dsp.h" + +#include +#include +#include +#include "dec_vp8li_dec.h" +#include "utils_endian_inl_utils.h" +#include "dsp_lossless.h" +#include "dsp_lossless_common.h" + +//------------------------------------------------------------------------------ +// Image transforms. + +static WEBP_INLINE uint32_t Average2(uint32_t a0, uint32_t a1) { + return (((a0 ^ a1) & 0xfefefefeu) >> 1) + (a0 & a1); +} + +static WEBP_INLINE uint32_t Average3(uint32_t a0, uint32_t a1, uint32_t a2) { + return Average2(Average2(a0, a2), a1); +} + +static WEBP_INLINE uint32_t Average4(uint32_t a0, uint32_t a1, + uint32_t a2, uint32_t a3) { + return Average2(Average2(a0, a1), Average2(a2, a3)); +} + +static WEBP_INLINE uint32_t Clip255(uint32_t a) { + if (a < 256) { + return a; + } + // return 0, when a is a negative integer. + // return 255, when a is positive. + return ~a >> 24; +} + +static WEBP_INLINE int AddSubtractComponentFull(int a, int b, int c) { + return Clip255(a + b - c); +} + +static WEBP_INLINE uint32_t ClampedAddSubtractFull(uint32_t c0, uint32_t c1, + uint32_t c2) { + const int a = AddSubtractComponentFull(c0 >> 24, c1 >> 24, c2 >> 24); + const int r = AddSubtractComponentFull((c0 >> 16) & 0xff, + (c1 >> 16) & 0xff, + (c2 >> 16) & 0xff); + const int g = AddSubtractComponentFull((c0 >> 8) & 0xff, + (c1 >> 8) & 0xff, + (c2 >> 8) & 0xff); + const int b = AddSubtractComponentFull(c0 & 0xff, c1 & 0xff, c2 & 0xff); + return ((uint32_t)a << 24) | (r << 16) | (g << 8) | b; +} + +static WEBP_INLINE int AddSubtractComponentHalf(int a, int b) { + return Clip255(a + (a - b) / 2); +} + +static WEBP_INLINE uint32_t ClampedAddSubtractHalf(uint32_t c0, uint32_t c1, + uint32_t c2) { + const uint32_t ave = Average2(c0, c1); + const int a = AddSubtractComponentHalf(ave >> 24, c2 >> 24); + const int r = AddSubtractComponentHalf((ave >> 16) & 0xff, (c2 >> 16) & 0xff); + const int g = AddSubtractComponentHalf((ave >> 8) & 0xff, (c2 >> 8) & 0xff); + const int b = AddSubtractComponentHalf((ave >> 0) & 0xff, (c2 >> 0) & 0xff); + return ((uint32_t)a << 24) | (r << 16) | (g << 8) | b; +} + +// gcc <= 4.9 on ARM generates incorrect code in Select() when Sub3() is +// inlined. +#if defined(__arm__) && defined(__GNUC__) && LOCAL_GCC_VERSION <= 0x409 +# define LOCAL_INLINE __attribute__ ((noinline)) +#else +# define LOCAL_INLINE WEBP_INLINE +#endif + +static LOCAL_INLINE int Sub3(int a, int b, int c) { + const int pb = b - c; + const int pa = a - c; + return abs(pb) - abs(pa); +} + +#undef LOCAL_INLINE + +static WEBP_INLINE uint32_t Select(uint32_t a, uint32_t b, uint32_t c) { + const int pa_minus_pb = + Sub3((a >> 24) , (b >> 24) , (c >> 24) ) + + Sub3((a >> 16) & 0xff, (b >> 16) & 0xff, (c >> 16) & 0xff) + + Sub3((a >> 8) & 0xff, (b >> 8) & 0xff, (c >> 8) & 0xff) + + Sub3((a ) & 0xff, (b ) & 0xff, (c ) & 0xff); + return (pa_minus_pb <= 0) ? a : b; +} + +//------------------------------------------------------------------------------ +// Predictors + +uint32_t VP8LPredictor0_C(uint32_t left, const uint32_t* const top) { + (void)top; + (void)left; + return ARGB_BLACK; +} +uint32_t VP8LPredictor1_C(uint32_t left, const uint32_t* const top) { + (void)top; + return left; +} +uint32_t VP8LPredictor2_C(uint32_t left, const uint32_t* const top) { + (void)left; + return top[0]; +} +uint32_t VP8LPredictor3_C(uint32_t left, const uint32_t* const top) { + (void)left; + return top[1]; +} +uint32_t VP8LPredictor4_C(uint32_t left, const uint32_t* const top) { + (void)left; + return top[-1]; +} +uint32_t VP8LPredictor5_C(uint32_t left, const uint32_t* const top) { + const uint32_t pred = Average3(left, top[0], top[1]); + return pred; +} +uint32_t VP8LPredictor6_C(uint32_t left, const uint32_t* const top) { + const uint32_t pred = Average2(left, top[-1]); + return pred; +} +uint32_t VP8LPredictor7_C(uint32_t left, const uint32_t* const top) { + const uint32_t pred = Average2(left, top[0]); + return pred; +} +uint32_t VP8LPredictor8_C(uint32_t left, const uint32_t* const top) { + const uint32_t pred = Average2(top[-1], top[0]); + (void)left; + return pred; +} +uint32_t VP8LPredictor9_C(uint32_t left, const uint32_t* const top) { + const uint32_t pred = Average2(top[0], top[1]); + (void)left; + return pred; +} +uint32_t VP8LPredictor10_C(uint32_t left, const uint32_t* const top) { + const uint32_t pred = Average4(left, top[-1], top[0], top[1]); + return pred; +} +uint32_t VP8LPredictor11_C(uint32_t left, const uint32_t* const top) { + const uint32_t pred = Select(top[0], left, top[-1]); + return pred; +} +uint32_t VP8LPredictor12_C(uint32_t left, const uint32_t* const top) { + const uint32_t pred = ClampedAddSubtractFull(left, top[0], top[-1]); + return pred; +} +uint32_t VP8LPredictor13_C(uint32_t left, const uint32_t* const top) { + const uint32_t pred = ClampedAddSubtractHalf(left, top[0], top[-1]); + return pred; +} + +static void PredictorAdd0_C(const uint32_t* in, const uint32_t* upper, + int num_pixels, uint32_t* out) { + int x; + (void)upper; + for (x = 0; x < num_pixels; ++x) out[x] = VP8LAddPixels(in[x], ARGB_BLACK); +} +static void PredictorAdd1_C(const uint32_t* in, const uint32_t* upper, + int num_pixels, uint32_t* out) { + int i; + uint32_t left = out[-1]; + (void)upper; + for (i = 0; i < num_pixels; ++i) { + out[i] = left = VP8LAddPixels(in[i], left); + } +} +GENERATE_PREDICTOR_ADD(VP8LPredictor2_C, PredictorAdd2_C) +GENERATE_PREDICTOR_ADD(VP8LPredictor3_C, PredictorAdd3_C) +GENERATE_PREDICTOR_ADD(VP8LPredictor4_C, PredictorAdd4_C) +GENERATE_PREDICTOR_ADD(VP8LPredictor5_C, PredictorAdd5_C) +GENERATE_PREDICTOR_ADD(VP8LPredictor6_C, PredictorAdd6_C) +GENERATE_PREDICTOR_ADD(VP8LPredictor7_C, PredictorAdd7_C) +GENERATE_PREDICTOR_ADD(VP8LPredictor8_C, PredictorAdd8_C) +GENERATE_PREDICTOR_ADD(VP8LPredictor9_C, PredictorAdd9_C) +GENERATE_PREDICTOR_ADD(VP8LPredictor10_C, PredictorAdd10_C) +GENERATE_PREDICTOR_ADD(VP8LPredictor11_C, PredictorAdd11_C) +GENERATE_PREDICTOR_ADD(VP8LPredictor12_C, PredictorAdd12_C) +GENERATE_PREDICTOR_ADD(VP8LPredictor13_C, PredictorAdd13_C) + +//------------------------------------------------------------------------------ + +// Inverse prediction. +static void PredictorInverseTransform_C(const VP8LTransform* const transform, + int y_start, int y_end, + const uint32_t* in, uint32_t* out) { + const int width = transform->xsize_; + if (y_start == 0) { // First Row follows the L (mode=1) mode. + PredictorAdd0_C(in, NULL, 1, out); + PredictorAdd1_C(in + 1, NULL, width - 1, out + 1); + in += width; + out += width; + ++y_start; + } + + { + int y = y_start; + const int tile_width = 1 << transform->bits_; + const int mask = tile_width - 1; + const int tiles_per_row = VP8LSubSampleSize(width, transform->bits_); + const uint32_t* pred_mode_base = + transform->data_ + (y >> transform->bits_) * tiles_per_row; + + while (y < y_end) { + const uint32_t* pred_mode_src = pred_mode_base; + int x = 1; + // First pixel follows the T (mode=2) mode. + PredictorAdd2_C(in, out - width, 1, out); + // .. the rest: + while (x < width) { + const VP8LPredictorAddSubFunc pred_func = + VP8LPredictorsAdd[((*pred_mode_src++) >> 8) & 0xf]; + int x_end = (x & ~mask) + tile_width; + if (x_end > width) x_end = width; + pred_func(in + x, out + x - width, x_end - x, out + x); + x = x_end; + } + in += width; + out += width; + ++y; + if ((y & mask) == 0) { // Use the same mask, since tiles are squares. + pred_mode_base += tiles_per_row; + } + } + } +} + +// Add green to blue and red channels (i.e. perform the inverse transform of +// 'subtract green'). +void VP8LAddGreenToBlueAndRed_C(const uint32_t* src, int num_pixels, + uint32_t* dst) { + int i; + for (i = 0; i < num_pixels; ++i) { + const uint32_t argb = src[i]; + const uint32_t green = ((argb >> 8) & 0xff); + uint32_t red_blue = (argb & 0x00ff00ffu); + red_blue += (green << 16) | green; + red_blue &= 0x00ff00ffu; + dst[i] = (argb & 0xff00ff00u) | red_blue; + } +} + +static WEBP_INLINE int ColorTransformDelta(int8_t color_pred, + int8_t color) { + return ((int)color_pred * color) >> 5; +} + +static WEBP_INLINE void ColorCodeToMultipliers(uint32_t color_code, + VP8LMultipliers* const m) { + m->green_to_red_ = (color_code >> 0) & 0xff; + m->green_to_blue_ = (color_code >> 8) & 0xff; + m->red_to_blue_ = (color_code >> 16) & 0xff; +} + +void VP8LTransformColorInverse_C(const VP8LMultipliers* const m, + const uint32_t* src, int num_pixels, + uint32_t* dst) { + int i; + for (i = 0; i < num_pixels; ++i) { + const uint32_t argb = src[i]; + const int8_t green = (int8_t)(argb >> 8); + const uint32_t red = argb >> 16; + int new_red = red & 0xff; + int new_blue = argb & 0xff; + new_red += ColorTransformDelta(m->green_to_red_, green); + new_red &= 0xff; + new_blue += ColorTransformDelta(m->green_to_blue_, green); + new_blue += ColorTransformDelta(m->red_to_blue_, (int8_t)new_red); + new_blue &= 0xff; + dst[i] = (argb & 0xff00ff00u) | (new_red << 16) | (new_blue); + } +} + +// Color space inverse transform. +static void ColorSpaceInverseTransform_C(const VP8LTransform* const transform, + int y_start, int y_end, + const uint32_t* src, uint32_t* dst) { + const int width = transform->xsize_; + const int tile_width = 1 << transform->bits_; + const int mask = tile_width - 1; + const int safe_width = width & ~mask; + const int remaining_width = width - safe_width; + const int tiles_per_row = VP8LSubSampleSize(width, transform->bits_); + int y = y_start; + const uint32_t* pred_row = + transform->data_ + (y >> transform->bits_) * tiles_per_row; + + while (y < y_end) { + const uint32_t* pred = pred_row; + VP8LMultipliers m = { 0, 0, 0 }; + const uint32_t* const src_safe_end = src + safe_width; + const uint32_t* const src_end = src + width; + while (src < src_safe_end) { + ColorCodeToMultipliers(*pred++, &m); + VP8LTransformColorInverse(&m, src, tile_width, dst); + src += tile_width; + dst += tile_width; + } + if (src < src_end) { // Left-overs using C-version. + ColorCodeToMultipliers(*pred++, &m); + VP8LTransformColorInverse(&m, src, remaining_width, dst); + src += remaining_width; + dst += remaining_width; + } + ++y; + if ((y & mask) == 0) pred_row += tiles_per_row; + } +} + +// Separate out pixels packed together using pixel-bundling. +// We define two methods for ARGB data (uint32_t) and alpha-only data (uint8_t). +#define COLOR_INDEX_INVERSE(FUNC_NAME, F_NAME, STATIC_DECL, TYPE, BIT_SUFFIX, \ + GET_INDEX, GET_VALUE) \ +static void F_NAME(const TYPE* src, const uint32_t* const color_map, \ + TYPE* dst, int y_start, int y_end, int width) { \ + int y; \ + for (y = y_start; y < y_end; ++y) { \ + int x; \ + for (x = 0; x < width; ++x) { \ + *dst++ = GET_VALUE(color_map[GET_INDEX(*src++)]); \ + } \ + } \ +} \ +STATIC_DECL void FUNC_NAME(const VP8LTransform* const transform, \ + int y_start, int y_end, const TYPE* src, \ + TYPE* dst) { \ + int y; \ + const int bits_per_pixel = 8 >> transform->bits_; \ + const int width = transform->xsize_; \ + const uint32_t* const color_map = transform->data_; \ + if (bits_per_pixel < 8) { \ + const int pixels_per_byte = 1 << transform->bits_; \ + const int count_mask = pixels_per_byte - 1; \ + const uint32_t bit_mask = (1 << bits_per_pixel) - 1; \ + for (y = y_start; y < y_end; ++y) { \ + uint32_t packed_pixels = 0; \ + int x; \ + for (x = 0; x < width; ++x) { \ + /* We need to load fresh 'packed_pixels' once every */ \ + /* 'pixels_per_byte' increments of x. Fortunately, pixels_per_byte */ \ + /* is a power of 2, so can just use a mask for that, instead of */ \ + /* decrementing a counter. */ \ + if ((x & count_mask) == 0) packed_pixels = GET_INDEX(*src++); \ + *dst++ = GET_VALUE(color_map[packed_pixels & bit_mask]); \ + packed_pixels >>= bits_per_pixel; \ + } \ + } \ + } else { \ + VP8LMapColor##BIT_SUFFIX(src, color_map, dst, y_start, y_end, width); \ + } \ +} + +COLOR_INDEX_INVERSE(ColorIndexInverseTransform_C, MapARGB_C, static, + uint32_t, 32b, VP8GetARGBIndex, VP8GetARGBValue) +COLOR_INDEX_INVERSE(VP8LColorIndexInverseTransformAlpha, MapAlpha_C, , + uint8_t, 8b, VP8GetAlphaIndex, VP8GetAlphaValue) + +#undef COLOR_INDEX_INVERSE + +void VP8LInverseTransform(const VP8LTransform* const transform, + int row_start, int row_end, + const uint32_t* const in, uint32_t* const out) { + const int width = transform->xsize_; + assert(row_start < row_end); + assert(row_end <= transform->ysize_); + switch (transform->type_) { + case SUBTRACT_GREEN: + VP8LAddGreenToBlueAndRed(in, (row_end - row_start) * width, out); + break; + case PREDICTOR_TRANSFORM: + PredictorInverseTransform_C(transform, row_start, row_end, in, out); + if (row_end != transform->ysize_) { + // The last predicted row in this iteration will be the top-pred row + // for the first row in next iteration. + memcpy(out - width, out + (row_end - row_start - 1) * width, + width * sizeof(*out)); + } + break; + case CROSS_COLOR_TRANSFORM: + ColorSpaceInverseTransform_C(transform, row_start, row_end, in, out); + break; + case COLOR_INDEXING_TRANSFORM: + if (in == out && transform->bits_ > 0) { + // Move packed pixels to the end of unpacked region, so that unpacking + // can occur seamlessly. + // Also, note that this is the only transform that applies on + // the effective width of VP8LSubSampleSize(xsize_, bits_). All other + // transforms work on effective width of xsize_. + const int out_stride = (row_end - row_start) * width; + const int in_stride = (row_end - row_start) * + VP8LSubSampleSize(transform->xsize_, transform->bits_); + uint32_t* const src = out + out_stride - in_stride; + memmove(src, out, in_stride * sizeof(*src)); + ColorIndexInverseTransform_C(transform, row_start, row_end, src, out); + } else { + ColorIndexInverseTransform_C(transform, row_start, row_end, in, out); + } + break; + } +} + +//------------------------------------------------------------------------------ +// Color space conversion. + +static int is_big_endian(void) { + static const union { + uint16_t w; + uint8_t b[2]; + } tmp = { 1 }; + return (tmp.b[0] != 1); +} + +void VP8LConvertBGRAToRGB_C(const uint32_t* src, + int num_pixels, uint8_t* dst) { + const uint32_t* const src_end = src + num_pixels; + while (src < src_end) { + const uint32_t argb = *src++; + *dst++ = (argb >> 16) & 0xff; + *dst++ = (argb >> 8) & 0xff; + *dst++ = (argb >> 0) & 0xff; + } +} + +void VP8LConvertBGRAToRGBA_C(const uint32_t* src, + int num_pixels, uint8_t* dst) { + const uint32_t* const src_end = src + num_pixels; + while (src < src_end) { + const uint32_t argb = *src++; + *dst++ = (argb >> 16) & 0xff; + *dst++ = (argb >> 8) & 0xff; + *dst++ = (argb >> 0) & 0xff; + *dst++ = (argb >> 24) & 0xff; + } +} + +void VP8LConvertBGRAToRGBA4444_C(const uint32_t* src, + int num_pixels, uint8_t* dst) { + const uint32_t* const src_end = src + num_pixels; + while (src < src_end) { + const uint32_t argb = *src++; + const uint8_t rg = ((argb >> 16) & 0xf0) | ((argb >> 12) & 0xf); + const uint8_t ba = ((argb >> 0) & 0xf0) | ((argb >> 28) & 0xf); +#if (WEBP_SWAP_16BIT_CSP == 1) + *dst++ = ba; + *dst++ = rg; +#else + *dst++ = rg; + *dst++ = ba; +#endif + } +} + +void VP8LConvertBGRAToRGB565_C(const uint32_t* src, + int num_pixels, uint8_t* dst) { + const uint32_t* const src_end = src + num_pixels; + while (src < src_end) { + const uint32_t argb = *src++; + const uint8_t rg = ((argb >> 16) & 0xf8) | ((argb >> 13) & 0x7); + const uint8_t gb = ((argb >> 5) & 0xe0) | ((argb >> 3) & 0x1f); +#if (WEBP_SWAP_16BIT_CSP == 1) + *dst++ = gb; + *dst++ = rg; +#else + *dst++ = rg; + *dst++ = gb; +#endif + } +} + +void VP8LConvertBGRAToBGR_C(const uint32_t* src, + int num_pixels, uint8_t* dst) { + const uint32_t* const src_end = src + num_pixels; + while (src < src_end) { + const uint32_t argb = *src++; + *dst++ = (argb >> 0) & 0xff; + *dst++ = (argb >> 8) & 0xff; + *dst++ = (argb >> 16) & 0xff; + } +} + +static void CopyOrSwap(const uint32_t* src, int num_pixels, uint8_t* dst, + int swap_on_big_endian) { + if (is_big_endian() == swap_on_big_endian) { + const uint32_t* const src_end = src + num_pixels; + while (src < src_end) { + const uint32_t argb = *src++; + WebPUint32ToMem(dst, BSwap32(argb)); + dst += sizeof(argb); + } + } else { + memcpy(dst, src, num_pixels * sizeof(*src)); + } +} + +void VP8LConvertFromBGRA(const uint32_t* const in_data, int num_pixels, + WEBP_CSP_MODE out_colorspace, uint8_t* const rgba) { + switch (out_colorspace) { + case MODE_RGB: + VP8LConvertBGRAToRGB(in_data, num_pixels, rgba); + break; + case MODE_RGBA: + VP8LConvertBGRAToRGBA(in_data, num_pixels, rgba); + break; + case MODE_rgbA: + VP8LConvertBGRAToRGBA(in_data, num_pixels, rgba); + WebPApplyAlphaMultiply(rgba, 0, num_pixels, 1, 0); + break; + case MODE_BGR: + VP8LConvertBGRAToBGR(in_data, num_pixels, rgba); + break; + case MODE_BGRA: + CopyOrSwap(in_data, num_pixels, rgba, 1); + break; + case MODE_bgrA: + CopyOrSwap(in_data, num_pixels, rgba, 1); + WebPApplyAlphaMultiply(rgba, 0, num_pixels, 1, 0); + break; + case MODE_ARGB: + CopyOrSwap(in_data, num_pixels, rgba, 0); + break; + case MODE_Argb: + CopyOrSwap(in_data, num_pixels, rgba, 0); + WebPApplyAlphaMultiply(rgba, 1, num_pixels, 1, 0); + break; + case MODE_RGBA_4444: + VP8LConvertBGRAToRGBA4444(in_data, num_pixels, rgba); + break; + case MODE_rgbA_4444: + VP8LConvertBGRAToRGBA4444(in_data, num_pixels, rgba); + WebPApplyAlphaMultiply4444(rgba, num_pixels, 1, 0); + break; + case MODE_RGB_565: + VP8LConvertBGRAToRGB565(in_data, num_pixels, rgba); + break; + default: + assert(0); // Code flow should not reach here. + } +} + +//------------------------------------------------------------------------------ + +VP8LProcessDecBlueAndRedFunc VP8LAddGreenToBlueAndRed; +VP8LPredictorAddSubFunc VP8LPredictorsAdd[16]; +VP8LPredictorFunc VP8LPredictors[16]; + +// exposed plain-C implementations +VP8LPredictorAddSubFunc VP8LPredictorsAdd_C[16]; + +VP8LTransformColorInverseFunc VP8LTransformColorInverse; + +VP8LConvertFunc VP8LConvertBGRAToRGB; +VP8LConvertFunc VP8LConvertBGRAToRGBA; +VP8LConvertFunc VP8LConvertBGRAToRGBA4444; +VP8LConvertFunc VP8LConvertBGRAToRGB565; +VP8LConvertFunc VP8LConvertBGRAToBGR; + +VP8LMapARGBFunc VP8LMapColor32b; +VP8LMapAlphaFunc VP8LMapColor8b; + +extern void VP8LDspInitSSE2(void); +extern void VP8LDspInitNEON(void); +extern void VP8LDspInitMIPSdspR2(void); +extern void VP8LDspInitMSA(void); + +#define COPY_PREDICTOR_ARRAY(IN, OUT) do { \ + (OUT)[0] = IN##0_C; \ + (OUT)[1] = IN##1_C; \ + (OUT)[2] = IN##2_C; \ + (OUT)[3] = IN##3_C; \ + (OUT)[4] = IN##4_C; \ + (OUT)[5] = IN##5_C; \ + (OUT)[6] = IN##6_C; \ + (OUT)[7] = IN##7_C; \ + (OUT)[8] = IN##8_C; \ + (OUT)[9] = IN##9_C; \ + (OUT)[10] = IN##10_C; \ + (OUT)[11] = IN##11_C; \ + (OUT)[12] = IN##12_C; \ + (OUT)[13] = IN##13_C; \ + (OUT)[14] = IN##0_C; /* <- padding security sentinels*/ \ + (OUT)[15] = IN##0_C; \ +} while (0); + +WEBP_DSP_INIT_FUNC(VP8LDspInit) { + COPY_PREDICTOR_ARRAY(VP8LPredictor, VP8LPredictors) + COPY_PREDICTOR_ARRAY(PredictorAdd, VP8LPredictorsAdd) + COPY_PREDICTOR_ARRAY(PredictorAdd, VP8LPredictorsAdd_C) + +#if !WEBP_NEON_OMIT_C_CODE + VP8LAddGreenToBlueAndRed = VP8LAddGreenToBlueAndRed_C; + + VP8LTransformColorInverse = VP8LTransformColorInverse_C; + + VP8LConvertBGRAToRGBA = VP8LConvertBGRAToRGBA_C; + VP8LConvertBGRAToRGB = VP8LConvertBGRAToRGB_C; + VP8LConvertBGRAToBGR = VP8LConvertBGRAToBGR_C; +#endif + + VP8LConvertBGRAToRGBA4444 = VP8LConvertBGRAToRGBA4444_C; + VP8LConvertBGRAToRGB565 = VP8LConvertBGRAToRGB565_C; + + VP8LMapColor32b = MapARGB_C; + VP8LMapColor8b = MapAlpha_C; + + // If defined, use CPUInfo() to overwrite some pointers with faster versions. + if (VP8GetCPUInfo != NULL) { +#if defined(WEBP_USE_SSE2) + if (VP8GetCPUInfo(kSSE2)) { + VP8LDspInitSSE2(); + } +#endif +#if defined(WEBP_USE_MIPS_DSP_R2) + if (VP8GetCPUInfo(kMIPSdspR2)) { + VP8LDspInitMIPSdspR2(); + } +#endif +#if defined(WEBP_USE_MSA) + if (VP8GetCPUInfo(kMSA)) { + VP8LDspInitMSA(); + } +#endif + } + +#if defined(WEBP_USE_NEON) + if (WEBP_NEON_OMIT_C_CODE || + (VP8GetCPUInfo != NULL && VP8GetCPUInfo(kNEON))) { + VP8LDspInitNEON(); + } +#endif + + assert(VP8LAddGreenToBlueAndRed != NULL); + assert(VP8LTransformColorInverse != NULL); + assert(VP8LConvertBGRAToRGBA != NULL); + assert(VP8LConvertBGRAToRGB != NULL); + assert(VP8LConvertBGRAToBGR != NULL); + assert(VP8LConvertBGRAToRGBA4444 != NULL); + assert(VP8LConvertBGRAToRGB565 != NULL); + assert(VP8LMapColor32b != NULL); + assert(VP8LMapColor8b != NULL); +} +#undef COPY_PREDICTOR_ARRAY + +//------------------------------------------------------------------------------ diff --git a/vendor/github.com/sizeofint/webpanimation/dsp_lossless.h b/vendor/github.com/sizeofint/webpanimation/dsp_lossless.h new file mode 100644 index 00000000..9b4fa4f6 --- /dev/null +++ b/vendor/github.com/sizeofint/webpanimation/dsp_lossless.h @@ -0,0 +1,244 @@ +// Copyright 2012 Google Inc. All Rights Reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the COPYING file in the root of the source +// tree. An additional intellectual property rights grant can be found +// in the file PATENTS. All contributing project authors may +// be found in the AUTHORS file in the root of the source tree. +// ----------------------------------------------------------------------------- +// +// Image transforms and color space conversion methods for lossless decoder. +// +// Authors: Vikas Arora (vikaas.arora@gmail.com) +// Jyrki Alakuijala (jyrki@google.com) + +#ifndef WEBP_DSP_LOSSLESS_H_ +#define WEBP_DSP_LOSSLESS_H_ + +#include "webp_types.h" +#include "webp_decode.h" + +#include "enc_histogram_enc.h" +#include "utils_utils.h" + +#ifdef __cplusplus +extern "C" { +#endif + +//------------------------------------------------------------------------------ +// Decoding + +typedef uint32_t (*VP8LPredictorFunc)(uint32_t left, const uint32_t* const top); +extern VP8LPredictorFunc VP8LPredictors[16]; + +uint32_t VP8LPredictor0_C(uint32_t left, const uint32_t* const top); +uint32_t VP8LPredictor1_C(uint32_t left, const uint32_t* const top); +uint32_t VP8LPredictor2_C(uint32_t left, const uint32_t* const top); +uint32_t VP8LPredictor3_C(uint32_t left, const uint32_t* const top); +uint32_t VP8LPredictor4_C(uint32_t left, const uint32_t* const top); +uint32_t VP8LPredictor5_C(uint32_t left, const uint32_t* const top); +uint32_t VP8LPredictor6_C(uint32_t left, const uint32_t* const top); +uint32_t VP8LPredictor7_C(uint32_t left, const uint32_t* const top); +uint32_t VP8LPredictor8_C(uint32_t left, const uint32_t* const top); +uint32_t VP8LPredictor9_C(uint32_t left, const uint32_t* const top); +uint32_t VP8LPredictor10_C(uint32_t left, const uint32_t* const top); +uint32_t VP8LPredictor11_C(uint32_t left, const uint32_t* const top); +uint32_t VP8LPredictor12_C(uint32_t left, const uint32_t* const top); +uint32_t VP8LPredictor13_C(uint32_t left, const uint32_t* const top); + +// These Add/Sub function expects upper[-1] and out[-1] to be readable. +typedef void (*VP8LPredictorAddSubFunc)(const uint32_t* in, + const uint32_t* upper, int num_pixels, + uint32_t* out); +extern VP8LPredictorAddSubFunc VP8LPredictorsAdd[16]; +extern VP8LPredictorAddSubFunc VP8LPredictorsAdd_C[16]; + +typedef void (*VP8LProcessDecBlueAndRedFunc)(const uint32_t* src, + int num_pixels, uint32_t* dst); +extern VP8LProcessDecBlueAndRedFunc VP8LAddGreenToBlueAndRed; + +typedef struct { + // Note: the members are uint8_t, so that any negative values are + // automatically converted to "mod 256" values. + uint8_t green_to_red_; + uint8_t green_to_blue_; + uint8_t red_to_blue_; +} VP8LMultipliers; +typedef void (*VP8LTransformColorInverseFunc)(const VP8LMultipliers* const m, + const uint32_t* src, + int num_pixels, uint32_t* dst); +extern VP8LTransformColorInverseFunc VP8LTransformColorInverse; + +struct VP8LTransform; // Defined in dec/vp8li.h. + +// Performs inverse transform of data given transform information, start and end +// rows. Transform will be applied to rows [row_start, row_end[. +// The *in and *out pointers refer to source and destination data respectively +// corresponding to the intermediate row (row_start). +void VP8LInverseTransform(const struct VP8LTransform* const transform, + int row_start, int row_end, + const uint32_t* const in, uint32_t* const out); + +// Color space conversion. +typedef void (*VP8LConvertFunc)(const uint32_t* src, int num_pixels, + uint8_t* dst); +extern VP8LConvertFunc VP8LConvertBGRAToRGB; +extern VP8LConvertFunc VP8LConvertBGRAToRGBA; +extern VP8LConvertFunc VP8LConvertBGRAToRGBA4444; +extern VP8LConvertFunc VP8LConvertBGRAToRGB565; +extern VP8LConvertFunc VP8LConvertBGRAToBGR; + +// Converts from BGRA to other color spaces. +void VP8LConvertFromBGRA(const uint32_t* const in_data, int num_pixels, + WEBP_CSP_MODE out_colorspace, uint8_t* const rgba); + +typedef void (*VP8LMapARGBFunc)(const uint32_t* src, + const uint32_t* const color_map, + uint32_t* dst, int y_start, + int y_end, int width); +typedef void (*VP8LMapAlphaFunc)(const uint8_t* src, + const uint32_t* const color_map, + uint8_t* dst, int y_start, + int y_end, int width); + +extern VP8LMapARGBFunc VP8LMapColor32b; +extern VP8LMapAlphaFunc VP8LMapColor8b; + +// Similar to the static method ColorIndexInverseTransform() that is part of +// lossless.c, but used only for alpha decoding. It takes uint8_t (rather than +// uint32_t) arguments for 'src' and 'dst'. +void VP8LColorIndexInverseTransformAlpha( + const struct VP8LTransform* const transform, int y_start, int y_end, + const uint8_t* src, uint8_t* dst); + +// Expose some C-only fallback functions +void VP8LTransformColorInverse_C(const VP8LMultipliers* const m, + const uint32_t* src, int num_pixels, + uint32_t* dst); + +void VP8LConvertBGRAToRGB_C(const uint32_t* src, int num_pixels, uint8_t* dst); +void VP8LConvertBGRAToRGBA_C(const uint32_t* src, int num_pixels, uint8_t* dst); +void VP8LConvertBGRAToRGBA4444_C(const uint32_t* src, + int num_pixels, uint8_t* dst); +void VP8LConvertBGRAToRGB565_C(const uint32_t* src, + int num_pixels, uint8_t* dst); +void VP8LConvertBGRAToBGR_C(const uint32_t* src, int num_pixels, uint8_t* dst); +void VP8LAddGreenToBlueAndRed_C(const uint32_t* src, int num_pixels, + uint32_t* dst); + +// Must be called before calling any of the above methods. +void VP8LDspInit(void); + +//------------------------------------------------------------------------------ +// Encoding + +typedef void (*VP8LProcessEncBlueAndRedFunc)(uint32_t* dst, int num_pixels); +extern VP8LProcessEncBlueAndRedFunc VP8LSubtractGreenFromBlueAndRed; +typedef void (*VP8LTransformColorFunc)(const VP8LMultipliers* const m, + uint32_t* dst, int num_pixels); +extern VP8LTransformColorFunc VP8LTransformColor; +typedef void (*VP8LCollectColorBlueTransformsFunc)( + const uint32_t* argb, int stride, + int tile_width, int tile_height, + int green_to_blue, int red_to_blue, int histo[]); +extern VP8LCollectColorBlueTransformsFunc VP8LCollectColorBlueTransforms; + +typedef void (*VP8LCollectColorRedTransformsFunc)( + const uint32_t* argb, int stride, + int tile_width, int tile_height, + int green_to_red, int histo[]); +extern VP8LCollectColorRedTransformsFunc VP8LCollectColorRedTransforms; + +// Expose some C-only fallback functions +void VP8LTransformColor_C(const VP8LMultipliers* const m, + uint32_t* data, int num_pixels); +void VP8LSubtractGreenFromBlueAndRed_C(uint32_t* argb_data, int num_pixels); +void VP8LCollectColorRedTransforms_C(const uint32_t* argb, int stride, + int tile_width, int tile_height, + int green_to_red, int histo[]); +void VP8LCollectColorBlueTransforms_C(const uint32_t* argb, int stride, + int tile_width, int tile_height, + int green_to_blue, int red_to_blue, + int histo[]); + +extern VP8LPredictorAddSubFunc VP8LPredictorsSub[16]; +extern VP8LPredictorAddSubFunc VP8LPredictorsSub_C[16]; + +// ----------------------------------------------------------------------------- +// Huffman-cost related functions. + +typedef double (*VP8LCostFunc)(const uint32_t* population, int length); +typedef double (*VP8LCostCombinedFunc)(const uint32_t* X, const uint32_t* Y, + int length); +typedef float (*VP8LCombinedShannonEntropyFunc)(const int X[256], + const int Y[256]); + +extern VP8LCostFunc VP8LExtraCost; +extern VP8LCostCombinedFunc VP8LExtraCostCombined; +extern VP8LCombinedShannonEntropyFunc VP8LCombinedShannonEntropy; + +typedef struct { // small struct to hold counters + int counts[2]; // index: 0=zero streak, 1=non-zero streak + int streaks[2][2]; // [zero/non-zero][streak<3 / streak>=3] +} VP8LStreaks; + +typedef struct { // small struct to hold bit entropy results + double entropy; // entropy + uint32_t sum; // sum of the population + int nonzeros; // number of non-zero elements in the population + uint32_t max_val; // maximum value in the population + uint32_t nonzero_code; // index of the last non-zero in the population +} VP8LBitEntropy; + +void VP8LBitEntropyInit(VP8LBitEntropy* const entropy); + +// Get the combined symbol bit entropy and Huffman cost stats for the +// distributions 'X' and 'Y'. Those results can then be refined according to +// codec specific heuristics. +typedef void (*VP8LGetCombinedEntropyUnrefinedFunc)( + const uint32_t X[], const uint32_t Y[], int length, + VP8LBitEntropy* const bit_entropy, VP8LStreaks* const stats); +extern VP8LGetCombinedEntropyUnrefinedFunc VP8LGetCombinedEntropyUnrefined; + +// Get the entropy for the distribution 'X'. +typedef void (*VP8LGetEntropyUnrefinedFunc)(const uint32_t X[], int length, + VP8LBitEntropy* const bit_entropy, + VP8LStreaks* const stats); +extern VP8LGetEntropyUnrefinedFunc VP8LGetEntropyUnrefined; + +void VP8LBitsEntropyUnrefined(const uint32_t* const array, int n, + VP8LBitEntropy* const entropy); + +typedef void (*VP8LAddVectorFunc)(const uint32_t* a, const uint32_t* b, + uint32_t* out, int size); +extern VP8LAddVectorFunc VP8LAddVector; +typedef void (*VP8LAddVectorEqFunc)(const uint32_t* a, uint32_t* out, int size); +extern VP8LAddVectorEqFunc VP8LAddVectorEq; +void VP8LHistogramAdd(const VP8LHistogram* const a, + const VP8LHistogram* const b, + VP8LHistogram* const out); + +// ----------------------------------------------------------------------------- +// PrefixEncode() + +typedef int (*VP8LVectorMismatchFunc)(const uint32_t* const array1, + const uint32_t* const array2, int length); +// Returns the first index where array1 and array2 are different. +extern VP8LVectorMismatchFunc VP8LVectorMismatch; + +typedef void (*VP8LBundleColorMapFunc)(const uint8_t* const row, int width, + int xbits, uint32_t* dst); +extern VP8LBundleColorMapFunc VP8LBundleColorMap; +void VP8LBundleColorMap_C(const uint8_t* const row, int width, int xbits, + uint32_t* dst); + +// Must be called before calling any of the above methods. +void VP8LEncDspInit(void); + +//------------------------------------------------------------------------------ + +#ifdef __cplusplus +} // extern "C" +#endif + +#endif // WEBP_DSP_LOSSLESS_H_ diff --git a/vendor/github.com/sizeofint/webpanimation/dsp_lossless_common.h b/vendor/github.com/sizeofint/webpanimation/dsp_lossless_common.h new file mode 100644 index 00000000..e7bea93f --- /dev/null +++ b/vendor/github.com/sizeofint/webpanimation/dsp_lossless_common.h @@ -0,0 +1,191 @@ +// Copyright 2012 Google Inc. All Rights Reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the COPYING file in the root of the source +// tree. An additional intellectual property rights grant can be found +// in the file PATENTS. All contributing project authors may +// be found in the AUTHORS file in the root of the source tree. +// ----------------------------------------------------------------------------- +// +// Image transforms and color space conversion methods for lossless decoder. +// +// Authors: Vikas Arora (vikaas.arora@gmail.com) +// Jyrki Alakuijala (jyrki@google.com) +// Vincent Rabaud (vrabaud@google.com) + +#ifndef WEBP_DSP_LOSSLESS_COMMON_H_ +#define WEBP_DSP_LOSSLESS_COMMON_H_ + +#include "webp_types.h" + +#include "utils_utils.h" + +#ifdef __cplusplus +extern "C" { +#endif + +//------------------------------------------------------------------------------ +// Decoding + +// color mapping related functions. +static WEBP_INLINE uint32_t VP8GetARGBIndex(uint32_t idx) { + return (idx >> 8) & 0xff; +} + +static WEBP_INLINE uint8_t VP8GetAlphaIndex(uint8_t idx) { + return idx; +} + +static WEBP_INLINE uint32_t VP8GetARGBValue(uint32_t val) { + return val; +} + +static WEBP_INLINE uint8_t VP8GetAlphaValue(uint32_t val) { + return (val >> 8) & 0xff; +} + +//------------------------------------------------------------------------------ +// Misc methods. + +// Computes sampled size of 'size' when sampling using 'sampling bits'. +static WEBP_INLINE uint32_t VP8LSubSampleSize(uint32_t size, + uint32_t sampling_bits) { + return (size + (1 << sampling_bits) - 1) >> sampling_bits; +} + +// Converts near lossless quality into max number of bits shaved off. +static WEBP_INLINE int VP8LNearLosslessBits(int near_lossless_quality) { + // 100 -> 0 + // 80..99 -> 1 + // 60..79 -> 2 + // 40..59 -> 3 + // 20..39 -> 4 + // 0..19 -> 5 + return 5 - near_lossless_quality / 20; +} + +// ----------------------------------------------------------------------------- +// Faster logarithm for integers. Small values use a look-up table. + +// The threshold till approximate version of log_2 can be used. +// Practically, we can get rid of the call to log() as the two values match to +// very high degree (the ratio of these two is 0.99999x). +// Keeping a high threshold for now. +#define APPROX_LOG_WITH_CORRECTION_MAX 65536 +#define APPROX_LOG_MAX 4096 +#define LOG_2_RECIPROCAL 1.44269504088896338700465094007086 +#define LOG_LOOKUP_IDX_MAX 256 +extern const float kLog2Table[LOG_LOOKUP_IDX_MAX]; +extern const float kSLog2Table[LOG_LOOKUP_IDX_MAX]; +typedef float (*VP8LFastLog2SlowFunc)(uint32_t v); + +extern VP8LFastLog2SlowFunc VP8LFastLog2Slow; +extern VP8LFastLog2SlowFunc VP8LFastSLog2Slow; + +static WEBP_INLINE float VP8LFastLog2(uint32_t v) { + return (v < LOG_LOOKUP_IDX_MAX) ? kLog2Table[v] : VP8LFastLog2Slow(v); +} +// Fast calculation of v * log2(v) for integer input. +static WEBP_INLINE float VP8LFastSLog2(uint32_t v) { + return (v < LOG_LOOKUP_IDX_MAX) ? kSLog2Table[v] : VP8LFastSLog2Slow(v); +} + +// ----------------------------------------------------------------------------- +// PrefixEncode() + +// Splitting of distance and length codes into prefixes and +// extra bits. The prefixes are encoded with an entropy code +// while the extra bits are stored just as normal bits. +static WEBP_INLINE void VP8LPrefixEncodeBitsNoLUT(int distance, int* const code, + int* const extra_bits) { + const int highest_bit = BitsLog2Floor(--distance); + const int second_highest_bit = (distance >> (highest_bit - 1)) & 1; + *extra_bits = highest_bit - 1; + *code = 2 * highest_bit + second_highest_bit; +} + +static WEBP_INLINE void VP8LPrefixEncodeNoLUT(int distance, int* const code, + int* const extra_bits, + int* const extra_bits_value) { + const int highest_bit = BitsLog2Floor(--distance); + const int second_highest_bit = (distance >> (highest_bit - 1)) & 1; + *extra_bits = highest_bit - 1; + *extra_bits_value = distance & ((1 << *extra_bits) - 1); + *code = 2 * highest_bit + second_highest_bit; +} + +#define PREFIX_LOOKUP_IDX_MAX 512 +typedef struct { + int8_t code_; + int8_t extra_bits_; +} VP8LPrefixCode; + +// These tables are derived using VP8LPrefixEncodeNoLUT. +extern const VP8LPrefixCode kPrefixEncodeCode[PREFIX_LOOKUP_IDX_MAX]; +extern const uint8_t kPrefixEncodeExtraBitsValue[PREFIX_LOOKUP_IDX_MAX]; +static WEBP_INLINE void VP8LPrefixEncodeBits(int distance, int* const code, + int* const extra_bits) { + if (distance < PREFIX_LOOKUP_IDX_MAX) { + const VP8LPrefixCode prefix_code = kPrefixEncodeCode[distance]; + *code = prefix_code.code_; + *extra_bits = prefix_code.extra_bits_; + } else { + VP8LPrefixEncodeBitsNoLUT(distance, code, extra_bits); + } +} + +static WEBP_INLINE void VP8LPrefixEncode(int distance, int* const code, + int* const extra_bits, + int* const extra_bits_value) { + if (distance < PREFIX_LOOKUP_IDX_MAX) { + const VP8LPrefixCode prefix_code = kPrefixEncodeCode[distance]; + *code = prefix_code.code_; + *extra_bits = prefix_code.extra_bits_; + *extra_bits_value = kPrefixEncodeExtraBitsValue[distance]; + } else { + VP8LPrefixEncodeNoLUT(distance, code, extra_bits, extra_bits_value); + } +} + +// Sum of each component, mod 256. +static WEBP_UBSAN_IGNORE_UNSIGNED_OVERFLOW WEBP_INLINE +uint32_t VP8LAddPixels(uint32_t a, uint32_t b) { + const uint32_t alpha_and_green = (a & 0xff00ff00u) + (b & 0xff00ff00u); + const uint32_t red_and_blue = (a & 0x00ff00ffu) + (b & 0x00ff00ffu); + return (alpha_and_green & 0xff00ff00u) | (red_and_blue & 0x00ff00ffu); +} + +// Difference of each component, mod 256. +static WEBP_UBSAN_IGNORE_UNSIGNED_OVERFLOW WEBP_INLINE +uint32_t VP8LSubPixels(uint32_t a, uint32_t b) { + const uint32_t alpha_and_green = + 0x00ff00ffu + (a & 0xff00ff00u) - (b & 0xff00ff00u); + const uint32_t red_and_blue = + 0xff00ff00u + (a & 0x00ff00ffu) - (b & 0x00ff00ffu); + return (alpha_and_green & 0xff00ff00u) | (red_and_blue & 0x00ff00ffu); +} + +//------------------------------------------------------------------------------ +// Transform-related functions use din both encoding and decoding. + +// Macros used to create a batch predictor that iteratively uses a +// one-pixel predictor. + +// The predictor is added to the output pixel (which +// is therefore considered as a residual) to get the final prediction. +#define GENERATE_PREDICTOR_ADD(PREDICTOR, PREDICTOR_ADD) \ +static void PREDICTOR_ADD(const uint32_t* in, const uint32_t* upper, \ + int num_pixels, uint32_t* out) { \ + int x; \ + assert(upper != NULL); \ + for (x = 0; x < num_pixels; ++x) { \ + const uint32_t pred = (PREDICTOR)(out[x - 1], upper + x); \ + out[x] = VP8LAddPixels(in[x], pred); \ + } \ +} + +#ifdef __cplusplus +} // extern "C" +#endif + +#endif // WEBP_DSP_LOSSLESS_COMMON_H_ diff --git a/vendor/github.com/sizeofint/webpanimation/dsp_lossless_enc.c b/vendor/github.com/sizeofint/webpanimation/dsp_lossless_enc.c new file mode 100644 index 00000000..ae1f824a --- /dev/null +++ b/vendor/github.com/sizeofint/webpanimation/dsp_lossless_enc.c @@ -0,0 +1,929 @@ +// Copyright 2015 Google Inc. All Rights Reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the COPYING file in the root of the source +// tree. An additional intellectual property rights grant can be found +// in the file PATENTS. All contributing project authors may +// be found in the AUTHORS file in the root of the source tree. +// ----------------------------------------------------------------------------- +// +// Image transform methods for lossless encoder. +// +// Authors: Vikas Arora (vikaas.arora@gmail.com) +// Jyrki Alakuijala (jyrki@google.com) +// Urvang Joshi (urvang@google.com) + +#include "dsp_dsp.h" + +#include +#include +#include +#include "dec_vp8li_dec.h" +#include "utils_endian_inl_utils.h" +#include "dsp_lossless.h" +#include "dsp_lossless_common.h" +#include "dsp_yuv.h" + +// lookup table for small values of log2(int) +const float kLog2Table[LOG_LOOKUP_IDX_MAX] = { + 0.0000000000000000f, 0.0000000000000000f, + 1.0000000000000000f, 1.5849625007211560f, + 2.0000000000000000f, 2.3219280948873621f, + 2.5849625007211560f, 2.8073549220576041f, + 3.0000000000000000f, 3.1699250014423121f, + 3.3219280948873621f, 3.4594316186372973f, + 3.5849625007211560f, 3.7004397181410921f, + 3.8073549220576041f, 3.9068905956085187f, + 4.0000000000000000f, 4.0874628412503390f, + 4.1699250014423121f, 4.2479275134435852f, + 4.3219280948873626f, 4.3923174227787606f, + 4.4594316186372973f, 4.5235619560570130f, + 4.5849625007211560f, 4.6438561897747243f, + 4.7004397181410917f, 4.7548875021634682f, + 4.8073549220576037f, 4.8579809951275718f, + 4.9068905956085187f, 4.9541963103868749f, + 5.0000000000000000f, 5.0443941193584533f, + 5.0874628412503390f, 5.1292830169449663f, + 5.1699250014423121f, 5.2094533656289501f, + 5.2479275134435852f, 5.2854022188622487f, + 5.3219280948873626f, 5.3575520046180837f, + 5.3923174227787606f, 5.4262647547020979f, + 5.4594316186372973f, 5.4918530963296747f, + 5.5235619560570130f, 5.5545888516776376f, + 5.5849625007211560f, 5.6147098441152083f, + 5.6438561897747243f, 5.6724253419714951f, + 5.7004397181410917f, 5.7279204545631987f, + 5.7548875021634682f, 5.7813597135246599f, + 5.8073549220576037f, 5.8328900141647412f, + 5.8579809951275718f, 5.8826430493618415f, + 5.9068905956085187f, 5.9307373375628866f, + 5.9541963103868749f, 5.9772799234999167f, + 6.0000000000000000f, 6.0223678130284543f, + 6.0443941193584533f, 6.0660891904577720f, + 6.0874628412503390f, 6.1085244567781691f, + 6.1292830169449663f, 6.1497471195046822f, + 6.1699250014423121f, 6.1898245588800175f, + 6.2094533656289501f, 6.2288186904958804f, + 6.2479275134435852f, 6.2667865406949010f, + 6.2854022188622487f, 6.3037807481771030f, + 6.3219280948873626f, 6.3398500028846243f, + 6.3575520046180837f, 6.3750394313469245f, + 6.3923174227787606f, 6.4093909361377017f, + 6.4262647547020979f, 6.4429434958487279f, + 6.4594316186372973f, 6.4757334309663976f, + 6.4918530963296747f, 6.5077946401986963f, + 6.5235619560570130f, 6.5391588111080309f, + 6.5545888516776376f, 6.5698556083309478f, + 6.5849625007211560f, 6.5999128421871278f, + 6.6147098441152083f, 6.6293566200796094f, + 6.6438561897747243f, 6.6582114827517946f, + 6.6724253419714951f, 6.6865005271832185f, + 6.7004397181410917f, 6.7142455176661224f, + 6.7279204545631987f, 6.7414669864011464f, + 6.7548875021634682f, 6.7681843247769259f, + 6.7813597135246599f, 6.7944158663501061f, + 6.8073549220576037f, 6.8201789624151878f, + 6.8328900141647412f, 6.8454900509443747f, + 6.8579809951275718f, 6.8703647195834047f, + 6.8826430493618415f, 6.8948177633079437f, + 6.9068905956085187f, 6.9188632372745946f, + 6.9307373375628866f, 6.9425145053392398f, + 6.9541963103868749f, 6.9657842846620869f, + 6.9772799234999167f, 6.9886846867721654f, + 7.0000000000000000f, 7.0112272554232539f, + 7.0223678130284543f, 7.0334230015374501f, + 7.0443941193584533f, 7.0552824355011898f, + 7.0660891904577720f, 7.0768155970508308f, + 7.0874628412503390f, 7.0980320829605263f, + 7.1085244567781691f, 7.1189410727235076f, + 7.1292830169449663f, 7.1395513523987936f, + 7.1497471195046822f, 7.1598713367783890f, + 7.1699250014423121f, 7.1799090900149344f, + 7.1898245588800175f, 7.1996723448363644f, + 7.2094533656289501f, 7.2191685204621611f, + 7.2288186904958804f, 7.2384047393250785f, + 7.2479275134435852f, 7.2573878426926521f, + 7.2667865406949010f, 7.2761244052742375f, + 7.2854022188622487f, 7.2946207488916270f, + 7.3037807481771030f, 7.3128829552843557f, + 7.3219280948873626f, 7.3309168781146167f, + 7.3398500028846243f, 7.3487281542310771f, + 7.3575520046180837f, 7.3663222142458160f, + 7.3750394313469245f, 7.3837042924740519f, + 7.3923174227787606f, 7.4008794362821843f, + 7.4093909361377017f, 7.4178525148858982f, + 7.4262647547020979f, 7.4346282276367245f, + 7.4429434958487279f, 7.4512111118323289f, + 7.4594316186372973f, 7.4676055500829976f, + 7.4757334309663976f, 7.4838157772642563f, + 7.4918530963296747f, 7.4998458870832056f, + 7.5077946401986963f, 7.5156998382840427f, + 7.5235619560570130f, 7.5313814605163118f, + 7.5391588111080309f, 7.5468944598876364f, + 7.5545888516776376f, 7.5622424242210728f, + 7.5698556083309478f, 7.5774288280357486f, + 7.5849625007211560f, 7.5924570372680806f, + 7.5999128421871278f, 7.6073303137496104f, + 7.6147098441152083f, 7.6220518194563764f, + 7.6293566200796094f, 7.6366246205436487f, + 7.6438561897747243f, 7.6510516911789281f, + 7.6582114827517946f, 7.6653359171851764f, + 7.6724253419714951f, 7.6794800995054464f, + 7.6865005271832185f, 7.6934869574993252f, + 7.7004397181410917f, 7.7073591320808825f, + 7.7142455176661224f, 7.7210991887071855f, + 7.7279204545631987f, 7.7347096202258383f, + 7.7414669864011464f, 7.7481928495894605f, + 7.7548875021634682f, 7.7615512324444795f, + 7.7681843247769259f, 7.7747870596011736f, + 7.7813597135246599f, 7.7879025593914317f, + 7.7944158663501061f, 7.8008998999203047f, + 7.8073549220576037f, 7.8137811912170374f, + 7.8201789624151878f, 7.8265484872909150f, + 7.8328900141647412f, 7.8392037880969436f, + 7.8454900509443747f, 7.8517490414160571f, + 7.8579809951275718f, 7.8641861446542797f, + 7.8703647195834047f, 7.8765169465649993f, + 7.8826430493618415f, 7.8887432488982591f, + 7.8948177633079437f, 7.9008668079807486f, + 7.9068905956085187f, 7.9128893362299619f, + 7.9188632372745946f, 7.9248125036057812f, + 7.9307373375628866f, 7.9366379390025709f, + 7.9425145053392398f, 7.9483672315846778f, + 7.9541963103868749f, 7.9600019320680805f, + 7.9657842846620869f, 7.9715435539507719f, + 7.9772799234999167f, 7.9829935746943103f, + 7.9886846867721654f, 7.9943534368588577f +}; + +const float kSLog2Table[LOG_LOOKUP_IDX_MAX] = { + 0.00000000f, 0.00000000f, 2.00000000f, 4.75488750f, + 8.00000000f, 11.60964047f, 15.50977500f, 19.65148445f, + 24.00000000f, 28.52932501f, 33.21928095f, 38.05374781f, + 43.01955001f, 48.10571634f, 53.30296891f, 58.60335893f, + 64.00000000f, 69.48686830f, 75.05865003f, 80.71062276f, + 86.43856190f, 92.23866588f, 98.10749561f, 104.04192499f, + 110.03910002f, 116.09640474f, 122.21143267f, 128.38196256f, + 134.60593782f, 140.88144886f, 147.20671787f, 153.58008562f, + 160.00000000f, 166.46500594f, 172.97373660f, 179.52490559f, + 186.11730005f, 192.74977453f, 199.42124551f, 206.13068654f, + 212.87712380f, 219.65963219f, 226.47733176f, 233.32938445f, + 240.21499122f, 247.13338933f, 254.08384998f, 261.06567603f, + 268.07820003f, 275.12078236f, 282.19280949f, 289.29369244f, + 296.42286534f, 303.57978409f, 310.76392512f, 317.97478424f, + 325.21187564f, 332.47473081f, 339.76289772f, 347.07593991f, + 354.41343574f, 361.77497759f, 369.16017124f, 376.56863518f, + 384.00000000f, 391.45390785f, 398.93001188f, 406.42797576f, + 413.94747321f, 421.48818752f, 429.04981119f, 436.63204548f, + 444.23460010f, 451.85719280f, 459.49954906f, 467.16140179f, + 474.84249102f, 482.54256363f, 490.26137307f, 497.99867911f, + 505.75424759f, 513.52785023f, 521.31926438f, 529.12827280f, + 536.95466351f, 544.79822957f, 552.65876890f, 560.53608414f, + 568.42998244f, 576.34027536f, 584.26677867f, 592.20931226f, + 600.16769996f, 608.14176943f, 616.13135206f, 624.13628279f, + 632.15640007f, 640.19154569f, 648.24156472f, 656.30630539f, + 664.38561898f, 672.47935976f, 680.58738488f, 688.70955430f, + 696.84573069f, 704.99577935f, 713.15956818f, 721.33696754f, + 729.52785023f, 737.73209140f, 745.94956849f, 754.18016116f, + 762.42375127f, 770.68022275f, 778.94946161f, 787.23135586f, + 795.52579543f, 803.83267219f, 812.15187982f, 820.48331383f, + 828.82687147f, 837.18245171f, 845.54995518f, 853.92928416f, + 862.32034249f, 870.72303558f, 879.13727036f, 887.56295522f, + 896.00000000f, 904.44831595f, 912.90781569f, 921.37841320f, + 929.86002376f, 938.35256392f, 946.85595152f, 955.37010560f, + 963.89494641f, 972.43039537f, 980.97637504f, 989.53280911f, + 998.09962237f, 1006.67674069f, 1015.26409097f, 1023.86160116f, + 1032.46920021f, 1041.08681805f, 1049.71438560f, 1058.35183469f, + 1066.99909811f, 1075.65610955f, 1084.32280357f, 1092.99911564f, + 1101.68498204f, 1110.38033993f, 1119.08512727f, 1127.79928282f, + 1136.52274614f, 1145.25545758f, 1153.99735821f, 1162.74838989f, + 1171.50849518f, 1180.27761738f, 1189.05570047f, 1197.84268914f, + 1206.63852876f, 1215.44316535f, 1224.25654560f, 1233.07861684f, + 1241.90932703f, 1250.74862473f, 1259.59645914f, 1268.45278005f, + 1277.31753781f, 1286.19068338f, 1295.07216828f, 1303.96194457f, + 1312.85996488f, 1321.76618236f, 1330.68055071f, 1339.60302413f, + 1348.53355734f, 1357.47210556f, 1366.41862452f, 1375.37307041f, + 1384.33539991f, 1393.30557020f, 1402.28353887f, 1411.26926400f, + 1420.26270412f, 1429.26381818f, 1438.27256558f, 1447.28890615f, + 1456.31280014f, 1465.34420819f, 1474.38309138f, 1483.42941118f, + 1492.48312945f, 1501.54420843f, 1510.61261078f, 1519.68829949f, + 1528.77123795f, 1537.86138993f, 1546.95871952f, 1556.06319119f, + 1565.17476976f, 1574.29342040f, 1583.41910860f, 1592.55180020f, + 1601.69146137f, 1610.83805860f, 1619.99155871f, 1629.15192882f, + 1638.31913637f, 1647.49314911f, 1656.67393509f, 1665.86146266f, + 1675.05570047f, 1684.25661744f, 1693.46418280f, 1702.67836605f, + 1711.89913698f, 1721.12646563f, 1730.36032233f, 1739.60067768f, + 1748.84750254f, 1758.10076802f, 1767.36044551f, 1776.62650662f, + 1785.89892323f, 1795.17766747f, 1804.46271172f, 1813.75402857f, + 1823.05159087f, 1832.35537170f, 1841.66534438f, 1850.98148244f, + 1860.30375965f, 1869.63214999f, 1878.96662767f, 1888.30716711f, + 1897.65374295f, 1907.00633003f, 1916.36490342f, 1925.72943838f, + 1935.09991037f, 1944.47629506f, 1953.85856831f, 1963.24670620f, + 1972.64068498f, 1982.04048108f, 1991.44607117f, 2000.85743204f, + 2010.27454072f, 2019.69737440f, 2029.12591044f, 2038.56012640f +}; + +const VP8LPrefixCode kPrefixEncodeCode[PREFIX_LOOKUP_IDX_MAX] = { + { 0, 0}, { 0, 0}, { 1, 0}, { 2, 0}, { 3, 0}, { 4, 1}, { 4, 1}, { 5, 1}, + { 5, 1}, { 6, 2}, { 6, 2}, { 6, 2}, { 6, 2}, { 7, 2}, { 7, 2}, { 7, 2}, + { 7, 2}, { 8, 3}, { 8, 3}, { 8, 3}, { 8, 3}, { 8, 3}, { 8, 3}, { 8, 3}, + { 8, 3}, { 9, 3}, { 9, 3}, { 9, 3}, { 9, 3}, { 9, 3}, { 9, 3}, { 9, 3}, + { 9, 3}, {10, 4}, {10, 4}, {10, 4}, {10, 4}, {10, 4}, {10, 4}, {10, 4}, + {10, 4}, {10, 4}, {10, 4}, {10, 4}, {10, 4}, {10, 4}, {10, 4}, {10, 4}, + {10, 4}, {11, 4}, {11, 4}, {11, 4}, {11, 4}, {11, 4}, {11, 4}, {11, 4}, + {11, 4}, {11, 4}, {11, 4}, {11, 4}, {11, 4}, {11, 4}, {11, 4}, {11, 4}, + {11, 4}, {12, 5}, {12, 5}, {12, 5}, {12, 5}, {12, 5}, {12, 5}, {12, 5}, + {12, 5}, {12, 5}, {12, 5}, {12, 5}, {12, 5}, {12, 5}, {12, 5}, {12, 5}, + {12, 5}, {12, 5}, {12, 5}, {12, 5}, {12, 5}, {12, 5}, {12, 5}, {12, 5}, + {12, 5}, {12, 5}, {12, 5}, {12, 5}, {12, 5}, {12, 5}, {12, 5}, {12, 5}, + {12, 5}, {13, 5}, {13, 5}, {13, 5}, {13, 5}, {13, 5}, {13, 5}, {13, 5}, + {13, 5}, {13, 5}, {13, 5}, {13, 5}, {13, 5}, {13, 5}, {13, 5}, {13, 5}, + {13, 5}, {13, 5}, {13, 5}, {13, 5}, {13, 5}, {13, 5}, {13, 5}, {13, 5}, + {13, 5}, {13, 5}, {13, 5}, {13, 5}, {13, 5}, {13, 5}, {13, 5}, {13, 5}, + {13, 5}, {14, 6}, {14, 6}, {14, 6}, {14, 6}, {14, 6}, {14, 6}, {14, 6}, + {14, 6}, {14, 6}, {14, 6}, {14, 6}, {14, 6}, {14, 6}, {14, 6}, {14, 6}, + {14, 6}, {14, 6}, {14, 6}, {14, 6}, {14, 6}, {14, 6}, {14, 6}, {14, 6}, + {14, 6}, {14, 6}, {14, 6}, {14, 6}, {14, 6}, {14, 6}, {14, 6}, {14, 6}, + {14, 6}, {14, 6}, {14, 6}, {14, 6}, {14, 6}, {14, 6}, {14, 6}, {14, 6}, + {14, 6}, {14, 6}, {14, 6}, {14, 6}, {14, 6}, {14, 6}, {14, 6}, {14, 6}, + {14, 6}, {14, 6}, {14, 6}, {14, 6}, {14, 6}, {14, 6}, {14, 6}, {14, 6}, + {14, 6}, {14, 6}, {14, 6}, {14, 6}, {14, 6}, {14, 6}, {14, 6}, {14, 6}, + {14, 6}, {15, 6}, {15, 6}, {15, 6}, {15, 6}, {15, 6}, {15, 6}, {15, 6}, + {15, 6}, {15, 6}, {15, 6}, {15, 6}, {15, 6}, {15, 6}, {15, 6}, {15, 6}, + {15, 6}, {15, 6}, {15, 6}, {15, 6}, {15, 6}, {15, 6}, {15, 6}, {15, 6}, + {15, 6}, {15, 6}, {15, 6}, {15, 6}, {15, 6}, {15, 6}, {15, 6}, {15, 6}, + {15, 6}, {15, 6}, {15, 6}, {15, 6}, {15, 6}, {15, 6}, {15, 6}, {15, 6}, + {15, 6}, {15, 6}, {15, 6}, {15, 6}, {15, 6}, {15, 6}, {15, 6}, {15, 6}, + {15, 6}, {15, 6}, {15, 6}, {15, 6}, {15, 6}, {15, 6}, {15, 6}, {15, 6}, + {15, 6}, {15, 6}, {15, 6}, {15, 6}, {15, 6}, {15, 6}, {15, 6}, {15, 6}, + {15, 6}, {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7}, + {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7}, + {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7}, + {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7}, + {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7}, + {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7}, + {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7}, + {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7}, + {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7}, + {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7}, + {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7}, + {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7}, + {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7}, + {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7}, + {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7}, + {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7}, {16, 7}, + {16, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7}, + {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7}, + {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7}, + {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7}, + {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7}, + {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7}, + {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7}, + {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7}, + {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7}, + {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7}, + {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7}, + {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7}, + {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7}, + {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7}, + {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7}, + {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7}, {17, 7}, +}; + +const uint8_t kPrefixEncodeExtraBitsValue[PREFIX_LOOKUP_IDX_MAX] = { + 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 2, 3, 0, 1, 2, 3, + 0, 1, 2, 3, 4, 5, 6, 7, 0, 1, 2, 3, 4, 5, 6, 7, + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, + 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, + 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, + 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, + 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, + 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, + 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, + 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, + 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, + 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, + 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, + 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, + 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, + 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, + 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, + 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, + 127, + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, + 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, + 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, + 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, + 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, + 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, + 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, + 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126 +}; + +static float FastSLog2Slow_C(uint32_t v) { + assert(v >= LOG_LOOKUP_IDX_MAX); + if (v < APPROX_LOG_WITH_CORRECTION_MAX) { + int log_cnt = 0; + uint32_t y = 1; + int correction = 0; + const float v_f = (float)v; + const uint32_t orig_v = v; + do { + ++log_cnt; + v = v >> 1; + y = y << 1; + } while (v >= LOG_LOOKUP_IDX_MAX); + // vf = (2^log_cnt) * Xf; where y = 2^log_cnt and Xf < 256 + // Xf = floor(Xf) * (1 + (v % y) / v) + // log2(Xf) = log2(floor(Xf)) + log2(1 + (v % y) / v) + // The correction factor: log(1 + d) ~ d; for very small d values, so + // log2(1 + (v % y) / v) ~ LOG_2_RECIPROCAL * (v % y)/v + // LOG_2_RECIPROCAL ~ 23/16 + correction = (23 * (orig_v & (y - 1))) >> 4; + return v_f * (kLog2Table[v] + log_cnt) + correction; + } else { + return (float)(LOG_2_RECIPROCAL * v * log((double)v)); + } +} + +static float FastLog2Slow_C(uint32_t v) { + assert(v >= LOG_LOOKUP_IDX_MAX); + if (v < APPROX_LOG_WITH_CORRECTION_MAX) { + int log_cnt = 0; + uint32_t y = 1; + const uint32_t orig_v = v; + double log_2; + do { + ++log_cnt; + v = v >> 1; + y = y << 1; + } while (v >= LOG_LOOKUP_IDX_MAX); + log_2 = kLog2Table[v] + log_cnt; + if (orig_v >= APPROX_LOG_MAX) { + // Since the division is still expensive, add this correction factor only + // for large values of 'v'. + const int correction = (23 * (orig_v & (y - 1))) >> 4; + log_2 += (double)correction / orig_v; + } + return (float)log_2; + } else { + return (float)(LOG_2_RECIPROCAL * log((double)v)); + } +} + +//------------------------------------------------------------------------------ +// Methods to calculate Entropy (Shannon). + +// Compute the combined Shanon's entropy for distribution {X} and {X+Y} +static float CombinedShannonEntropy_C(const int X[256], const int Y[256]) { + int i; + double retval = 0.; + int sumX = 0, sumXY = 0; + for (i = 0; i < 256; ++i) { + const int x = X[i]; + if (x != 0) { + const int xy = x + Y[i]; + sumX += x; + retval -= VP8LFastSLog2(x); + sumXY += xy; + retval -= VP8LFastSLog2(xy); + } else if (Y[i] != 0) { + sumXY += Y[i]; + retval -= VP8LFastSLog2(Y[i]); + } + } + retval += VP8LFastSLog2(sumX) + VP8LFastSLog2(sumXY); + return (float)retval; +} + +void VP8LBitEntropyInit(VP8LBitEntropy* const entropy) { + entropy->entropy = 0.; + entropy->sum = 0; + entropy->nonzeros = 0; + entropy->max_val = 0; + entropy->nonzero_code = VP8L_NON_TRIVIAL_SYM; +} + +void VP8LBitsEntropyUnrefined(const uint32_t* const array, int n, + VP8LBitEntropy* const entropy) { + int i; + + VP8LBitEntropyInit(entropy); + + for (i = 0; i < n; ++i) { + if (array[i] != 0) { + entropy->sum += array[i]; + entropy->nonzero_code = i; + ++entropy->nonzeros; + entropy->entropy -= VP8LFastSLog2(array[i]); + if (entropy->max_val < array[i]) { + entropy->max_val = array[i]; + } + } + } + entropy->entropy += VP8LFastSLog2(entropy->sum); +} + +static WEBP_INLINE void GetEntropyUnrefinedHelper( + uint32_t val, int i, uint32_t* const val_prev, int* const i_prev, + VP8LBitEntropy* const bit_entropy, VP8LStreaks* const stats) { + const int streak = i - *i_prev; + + // Gather info for the bit entropy. + if (*val_prev != 0) { + bit_entropy->sum += (*val_prev) * streak; + bit_entropy->nonzeros += streak; + bit_entropy->nonzero_code = *i_prev; + bit_entropy->entropy -= VP8LFastSLog2(*val_prev) * streak; + if (bit_entropy->max_val < *val_prev) { + bit_entropy->max_val = *val_prev; + } + } + + // Gather info for the Huffman cost. + stats->counts[*val_prev != 0] += (streak > 3); + stats->streaks[*val_prev != 0][(streak > 3)] += streak; + + *val_prev = val; + *i_prev = i; +} + +static void GetEntropyUnrefined_C(const uint32_t X[], int length, + VP8LBitEntropy* const bit_entropy, + VP8LStreaks* const stats) { + int i; + int i_prev = 0; + uint32_t x_prev = X[0]; + + memset(stats, 0, sizeof(*stats)); + VP8LBitEntropyInit(bit_entropy); + + for (i = 1; i < length; ++i) { + const uint32_t x = X[i]; + if (x != x_prev) { + GetEntropyUnrefinedHelper(x, i, &x_prev, &i_prev, bit_entropy, stats); + } + } + GetEntropyUnrefinedHelper(0, i, &x_prev, &i_prev, bit_entropy, stats); + + bit_entropy->entropy += VP8LFastSLog2(bit_entropy->sum); +} + +static void GetCombinedEntropyUnrefined_C(const uint32_t X[], + const uint32_t Y[], + int length, + VP8LBitEntropy* const bit_entropy, + VP8LStreaks* const stats) { + int i = 1; + int i_prev = 0; + uint32_t xy_prev = X[0] + Y[0]; + + memset(stats, 0, sizeof(*stats)); + VP8LBitEntropyInit(bit_entropy); + + for (i = 1; i < length; ++i) { + const uint32_t xy = X[i] + Y[i]; + if (xy != xy_prev) { + GetEntropyUnrefinedHelper(xy, i, &xy_prev, &i_prev, bit_entropy, stats); + } + } + GetEntropyUnrefinedHelper(0, i, &xy_prev, &i_prev, bit_entropy, stats); + + bit_entropy->entropy += VP8LFastSLog2(bit_entropy->sum); +} + +//------------------------------------------------------------------------------ + +void VP8LSubtractGreenFromBlueAndRed_C(uint32_t* argb_data, int num_pixels) { + int i; + for (i = 0; i < num_pixels; ++i) { + const int argb = argb_data[i]; + const int green = (argb >> 8) & 0xff; + const uint32_t new_r = (((argb >> 16) & 0xff) - green) & 0xff; + const uint32_t new_b = (((argb >> 0) & 0xff) - green) & 0xff; + argb_data[i] = (argb & 0xff00ff00u) | (new_r << 16) | new_b; + } +} + +static WEBP_INLINE int ColorTransformDelta(int8_t color_pred, int8_t color) { + return ((int)color_pred * color) >> 5; +} + +static WEBP_INLINE int8_t U32ToS8(uint32_t v) { + return (int8_t)(v & 0xff); +} + +void VP8LTransformColor_C(const VP8LMultipliers* const m, uint32_t* data, + int num_pixels) { + int i; + for (i = 0; i < num_pixels; ++i) { + const uint32_t argb = data[i]; + const int8_t green = U32ToS8(argb >> 8); + const int8_t red = U32ToS8(argb >> 16); + int new_red = red & 0xff; + int new_blue = argb & 0xff; + new_red -= ColorTransformDelta(m->green_to_red_, green); + new_red &= 0xff; + new_blue -= ColorTransformDelta(m->green_to_blue_, green); + new_blue -= ColorTransformDelta(m->red_to_blue_, red); + new_blue &= 0xff; + data[i] = (argb & 0xff00ff00u) | (new_red << 16) | (new_blue); + } +} + +static WEBP_INLINE uint8_t TransformColorRed(uint8_t green_to_red, + uint32_t argb) { + const int8_t green = U32ToS8(argb >> 8); + int new_red = argb >> 16; + new_red -= ColorTransformDelta(green_to_red, green); + return (new_red & 0xff); +} + +static WEBP_INLINE uint8_t TransformColorBlue(uint8_t green_to_blue, + uint8_t red_to_blue, + uint32_t argb) { + const int8_t green = U32ToS8(argb >> 8); + const int8_t red = U32ToS8(argb >> 16); + uint8_t new_blue = argb & 0xff; + new_blue -= ColorTransformDelta(green_to_blue, green); + new_blue -= ColorTransformDelta(red_to_blue, red); + return (new_blue & 0xff); +} + +void VP8LCollectColorRedTransforms_C(const uint32_t* argb, int stride, + int tile_width, int tile_height, + int green_to_red, int histo[]) { + while (tile_height-- > 0) { + int x; + for (x = 0; x < tile_width; ++x) { + ++histo[TransformColorRed((uint8_t)green_to_red, argb[x])]; + } + argb += stride; + } +} + +void VP8LCollectColorBlueTransforms_C(const uint32_t* argb, int stride, + int tile_width, int tile_height, + int green_to_blue, int red_to_blue, + int histo[]) { + while (tile_height-- > 0) { + int x; + for (x = 0; x < tile_width; ++x) { + ++histo[TransformColorBlue((uint8_t)green_to_blue, (uint8_t)red_to_blue, + argb[x])]; + } + argb += stride; + } +} + +//------------------------------------------------------------------------------ + +static int VectorMismatch_C(const uint32_t* const array1, + const uint32_t* const array2, int length) { + int match_len = 0; + + while (match_len < length && array1[match_len] == array2[match_len]) { + ++match_len; + } + return match_len; +} + +// Bundles multiple (1, 2, 4 or 8) pixels into a single pixel. +void VP8LBundleColorMap_C(const uint8_t* const row, int width, int xbits, + uint32_t* dst) { + int x; + if (xbits > 0) { + const int bit_depth = 1 << (3 - xbits); + const int mask = (1 << xbits) - 1; + uint32_t code = 0xff000000; + for (x = 0; x < width; ++x) { + const int xsub = x & mask; + if (xsub == 0) { + code = 0xff000000; + } + code |= row[x] << (8 + bit_depth * xsub); + dst[x >> xbits] = code; + } + } else { + for (x = 0; x < width; ++x) dst[x] = 0xff000000 | (row[x] << 8); + } +} + +//------------------------------------------------------------------------------ + +static double ExtraCost_C(const uint32_t* population, int length) { + int i; + double cost = 0.; + for (i = 2; i < length - 2; ++i) cost += (i >> 1) * population[i + 2]; + return cost; +} + +static double ExtraCostCombined_C(const uint32_t* X, const uint32_t* Y, + int length) { + int i; + double cost = 0.; + for (i = 2; i < length - 2; ++i) { + const int xy = X[i + 2] + Y[i + 2]; + cost += (i >> 1) * xy; + } + return cost; +} + +//------------------------------------------------------------------------------ + +static void AddVector_C(const uint32_t* a, const uint32_t* b, uint32_t* out, + int size) { + int i; + for (i = 0; i < size; ++i) out[i] = a[i] + b[i]; +} + +static void AddVectorEq_C(const uint32_t* a, uint32_t* out, int size) { + int i; + for (i = 0; i < size; ++i) out[i] += a[i]; +} + +#define ADD(X, ARG, LEN) do { \ + if (a->is_used_[X]) { \ + if (b->is_used_[X]) { \ + VP8LAddVector(a->ARG, b->ARG, out->ARG, (LEN)); \ + } else { \ + memcpy(&out->ARG[0], &a->ARG[0], (LEN) * sizeof(out->ARG[0])); \ + } \ + } else if (b->is_used_[X]) { \ + memcpy(&out->ARG[0], &b->ARG[0], (LEN) * sizeof(out->ARG[0])); \ + } else { \ + memset(&out->ARG[0], 0, (LEN) * sizeof(out->ARG[0])); \ + } \ +} while (0) + +#define ADD_EQ(X, ARG, LEN) do { \ + if (a->is_used_[X]) { \ + if (out->is_used_[X]) { \ + VP8LAddVectorEq(a->ARG, out->ARG, (LEN)); \ + } else { \ + memcpy(&out->ARG[0], &a->ARG[0], (LEN) * sizeof(out->ARG[0])); \ + } \ + } \ +} while (0) + +void VP8LHistogramAdd(const VP8LHistogram* const a, + const VP8LHistogram* const b, VP8LHistogram* const out) { + int i; + const int literal_size = VP8LHistogramNumCodes(a->palette_code_bits_); + assert(a->palette_code_bits_ == b->palette_code_bits_); + + if (b != out) { + ADD(0, literal_, literal_size); + ADD(1, red_, NUM_LITERAL_CODES); + ADD(2, blue_, NUM_LITERAL_CODES); + ADD(3, alpha_, NUM_LITERAL_CODES); + ADD(4, distance_, NUM_DISTANCE_CODES); + for (i = 0; i < 5; ++i) { + out->is_used_[i] = (a->is_used_[i] | b->is_used_[i]); + } + } else { + ADD_EQ(0, literal_, literal_size); + ADD_EQ(1, red_, NUM_LITERAL_CODES); + ADD_EQ(2, blue_, NUM_LITERAL_CODES); + ADD_EQ(3, alpha_, NUM_LITERAL_CODES); + ADD_EQ(4, distance_, NUM_DISTANCE_CODES); + for (i = 0; i < 5; ++i) out->is_used_[i] |= a->is_used_[i]; + } +} +#undef ADD +#undef ADD_EQ + +//------------------------------------------------------------------------------ +// Image transforms. + +static void PredictorSub0_C(const uint32_t* in, const uint32_t* upper, + int num_pixels, uint32_t* out) { + int i; + for (i = 0; i < num_pixels; ++i) out[i] = VP8LSubPixels(in[i], ARGB_BLACK); + (void)upper; +} + +static void PredictorSub1_C(const uint32_t* in, const uint32_t* upper, + int num_pixels, uint32_t* out) { + int i; + for (i = 0; i < num_pixels; ++i) out[i] = VP8LSubPixels(in[i], in[i - 1]); + (void)upper; +} + +// It subtracts the prediction from the input pixel and stores the residual +// in the output pixel. +#define GENERATE_PREDICTOR_SUB(PREDICTOR_I) \ +static void PredictorSub##PREDICTOR_I##_C(const uint32_t* in, \ + const uint32_t* upper, \ + int num_pixels, uint32_t* out) { \ + int x; \ + assert(upper != NULL); \ + for (x = 0; x < num_pixels; ++x) { \ + const uint32_t pred = \ + VP8LPredictor##PREDICTOR_I##_C(in[x - 1], upper + x); \ + out[x] = VP8LSubPixels(in[x], pred); \ + } \ +} + +GENERATE_PREDICTOR_SUB(2) +GENERATE_PREDICTOR_SUB(3) +GENERATE_PREDICTOR_SUB(4) +GENERATE_PREDICTOR_SUB(5) +GENERATE_PREDICTOR_SUB(6) +GENERATE_PREDICTOR_SUB(7) +GENERATE_PREDICTOR_SUB(8) +GENERATE_PREDICTOR_SUB(9) +GENERATE_PREDICTOR_SUB(10) +GENERATE_PREDICTOR_SUB(11) +GENERATE_PREDICTOR_SUB(12) +GENERATE_PREDICTOR_SUB(13) + +//------------------------------------------------------------------------------ + +VP8LProcessEncBlueAndRedFunc VP8LSubtractGreenFromBlueAndRed; + +VP8LTransformColorFunc VP8LTransformColor; + +VP8LCollectColorBlueTransformsFunc VP8LCollectColorBlueTransforms; +VP8LCollectColorRedTransformsFunc VP8LCollectColorRedTransforms; + +VP8LFastLog2SlowFunc VP8LFastLog2Slow; +VP8LFastLog2SlowFunc VP8LFastSLog2Slow; + +VP8LCostFunc VP8LExtraCost; +VP8LCostCombinedFunc VP8LExtraCostCombined; +VP8LCombinedShannonEntropyFunc VP8LCombinedShannonEntropy; + +VP8LGetEntropyUnrefinedFunc VP8LGetEntropyUnrefined; +VP8LGetCombinedEntropyUnrefinedFunc VP8LGetCombinedEntropyUnrefined; + +VP8LAddVectorFunc VP8LAddVector; +VP8LAddVectorEqFunc VP8LAddVectorEq; + +VP8LVectorMismatchFunc VP8LVectorMismatch; +VP8LBundleColorMapFunc VP8LBundleColorMap; + +VP8LPredictorAddSubFunc VP8LPredictorsSub[16]; +VP8LPredictorAddSubFunc VP8LPredictorsSub_C[16]; + +extern void VP8LEncDspInitSSE2(void); +extern void VP8LEncDspInitSSE41(void); +extern void VP8LEncDspInitNEON(void); +extern void VP8LEncDspInitMIPS32(void); +extern void VP8LEncDspInitMIPSdspR2(void); +extern void VP8LEncDspInitMSA(void); + +WEBP_DSP_INIT_FUNC(VP8LEncDspInit) { + VP8LDspInit(); + +#if !WEBP_NEON_OMIT_C_CODE + VP8LSubtractGreenFromBlueAndRed = VP8LSubtractGreenFromBlueAndRed_C; + + VP8LTransformColor = VP8LTransformColor_C; +#endif + + VP8LCollectColorBlueTransforms = VP8LCollectColorBlueTransforms_C; + VP8LCollectColorRedTransforms = VP8LCollectColorRedTransforms_C; + + VP8LFastLog2Slow = FastLog2Slow_C; + VP8LFastSLog2Slow = FastSLog2Slow_C; + + VP8LExtraCost = ExtraCost_C; + VP8LExtraCostCombined = ExtraCostCombined_C; + VP8LCombinedShannonEntropy = CombinedShannonEntropy_C; + + VP8LGetEntropyUnrefined = GetEntropyUnrefined_C; + VP8LGetCombinedEntropyUnrefined = GetCombinedEntropyUnrefined_C; + + VP8LAddVector = AddVector_C; + VP8LAddVectorEq = AddVectorEq_C; + + VP8LVectorMismatch = VectorMismatch_C; + VP8LBundleColorMap = VP8LBundleColorMap_C; + + VP8LPredictorsSub[0] = PredictorSub0_C; + VP8LPredictorsSub[1] = PredictorSub1_C; + VP8LPredictorsSub[2] = PredictorSub2_C; + VP8LPredictorsSub[3] = PredictorSub3_C; + VP8LPredictorsSub[4] = PredictorSub4_C; + VP8LPredictorsSub[5] = PredictorSub5_C; + VP8LPredictorsSub[6] = PredictorSub6_C; + VP8LPredictorsSub[7] = PredictorSub7_C; + VP8LPredictorsSub[8] = PredictorSub8_C; + VP8LPredictorsSub[9] = PredictorSub9_C; + VP8LPredictorsSub[10] = PredictorSub10_C; + VP8LPredictorsSub[11] = PredictorSub11_C; + VP8LPredictorsSub[12] = PredictorSub12_C; + VP8LPredictorsSub[13] = PredictorSub13_C; + VP8LPredictorsSub[14] = PredictorSub0_C; // <- padding security sentinels + VP8LPredictorsSub[15] = PredictorSub0_C; + + VP8LPredictorsSub_C[0] = PredictorSub0_C; + VP8LPredictorsSub_C[1] = PredictorSub1_C; + VP8LPredictorsSub_C[2] = PredictorSub2_C; + VP8LPredictorsSub_C[3] = PredictorSub3_C; + VP8LPredictorsSub_C[4] = PredictorSub4_C; + VP8LPredictorsSub_C[5] = PredictorSub5_C; + VP8LPredictorsSub_C[6] = PredictorSub6_C; + VP8LPredictorsSub_C[7] = PredictorSub7_C; + VP8LPredictorsSub_C[8] = PredictorSub8_C; + VP8LPredictorsSub_C[9] = PredictorSub9_C; + VP8LPredictorsSub_C[10] = PredictorSub10_C; + VP8LPredictorsSub_C[11] = PredictorSub11_C; + VP8LPredictorsSub_C[12] = PredictorSub12_C; + VP8LPredictorsSub_C[13] = PredictorSub13_C; + VP8LPredictorsSub_C[14] = PredictorSub0_C; // <- padding security sentinels + VP8LPredictorsSub_C[15] = PredictorSub0_C; + + // If defined, use CPUInfo() to overwrite some pointers with faster versions. + if (VP8GetCPUInfo != NULL) { +#if defined(WEBP_USE_SSE2) + if (VP8GetCPUInfo(kSSE2)) { + VP8LEncDspInitSSE2(); +#if defined(WEBP_USE_SSE41) + if (VP8GetCPUInfo(kSSE4_1)) { + VP8LEncDspInitSSE41(); + } +#endif + } +#endif +#if defined(WEBP_USE_MIPS32) + if (VP8GetCPUInfo(kMIPS32)) { + VP8LEncDspInitMIPS32(); + } +#endif +#if defined(WEBP_USE_MIPS_DSP_R2) + if (VP8GetCPUInfo(kMIPSdspR2)) { + VP8LEncDspInitMIPSdspR2(); + } +#endif +#if defined(WEBP_USE_MSA) + if (VP8GetCPUInfo(kMSA)) { + VP8LEncDspInitMSA(); + } +#endif + } + +#if defined(WEBP_USE_NEON) + if (WEBP_NEON_OMIT_C_CODE || + (VP8GetCPUInfo != NULL && VP8GetCPUInfo(kNEON))) { + VP8LEncDspInitNEON(); + } +#endif + + assert(VP8LSubtractGreenFromBlueAndRed != NULL); + assert(VP8LTransformColor != NULL); + assert(VP8LCollectColorBlueTransforms != NULL); + assert(VP8LCollectColorRedTransforms != NULL); + assert(VP8LFastLog2Slow != NULL); + assert(VP8LFastSLog2Slow != NULL); + assert(VP8LExtraCost != NULL); + assert(VP8LExtraCostCombined != NULL); + assert(VP8LCombinedShannonEntropy != NULL); + assert(VP8LGetEntropyUnrefined != NULL); + assert(VP8LGetCombinedEntropyUnrefined != NULL); + assert(VP8LAddVector != NULL); + assert(VP8LAddVectorEq != NULL); + assert(VP8LVectorMismatch != NULL); + assert(VP8LBundleColorMap != NULL); + assert(VP8LPredictorsSub[0] != NULL); + assert(VP8LPredictorsSub[1] != NULL); + assert(VP8LPredictorsSub[2] != NULL); + assert(VP8LPredictorsSub[3] != NULL); + assert(VP8LPredictorsSub[4] != NULL); + assert(VP8LPredictorsSub[5] != NULL); + assert(VP8LPredictorsSub[6] != NULL); + assert(VP8LPredictorsSub[7] != NULL); + assert(VP8LPredictorsSub[8] != NULL); + assert(VP8LPredictorsSub[9] != NULL); + assert(VP8LPredictorsSub[10] != NULL); + assert(VP8LPredictorsSub[11] != NULL); + assert(VP8LPredictorsSub[12] != NULL); + assert(VP8LPredictorsSub[13] != NULL); + assert(VP8LPredictorsSub[14] != NULL); + assert(VP8LPredictorsSub[15] != NULL); + assert(VP8LPredictorsSub_C[0] != NULL); + assert(VP8LPredictorsSub_C[1] != NULL); + assert(VP8LPredictorsSub_C[2] != NULL); + assert(VP8LPredictorsSub_C[3] != NULL); + assert(VP8LPredictorsSub_C[4] != NULL); + assert(VP8LPredictorsSub_C[5] != NULL); + assert(VP8LPredictorsSub_C[6] != NULL); + assert(VP8LPredictorsSub_C[7] != NULL); + assert(VP8LPredictorsSub_C[8] != NULL); + assert(VP8LPredictorsSub_C[9] != NULL); + assert(VP8LPredictorsSub_C[10] != NULL); + assert(VP8LPredictorsSub_C[11] != NULL); + assert(VP8LPredictorsSub_C[12] != NULL); + assert(VP8LPredictorsSub_C[13] != NULL); + assert(VP8LPredictorsSub_C[14] != NULL); + assert(VP8LPredictorsSub_C[15] != NULL); +} + +//------------------------------------------------------------------------------ diff --git a/vendor/github.com/sizeofint/webpanimation/dsp_lossless_enc_mips32.c b/vendor/github.com/sizeofint/webpanimation/dsp_lossless_enc_mips32.c new file mode 100644 index 00000000..b39f8e10 --- /dev/null +++ b/vendor/github.com/sizeofint/webpanimation/dsp_lossless_enc_mips32.c @@ -0,0 +1,397 @@ +// Copyright 2015 Google Inc. All Rights Reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the COPYING file in the root of the source +// tree. An additional intellectual property rights grant can be found +// in the file PATENTS. All contributing project authors may +// be found in the AUTHORS file in the root of the source tree. +// ----------------------------------------------------------------------------- +// +// MIPS version of lossless functions +// +// Author(s): Djordje Pesut (djordje.pesut@imgtec.com) +// Jovan Zelincevic (jovan.zelincevic@imgtec.com) + +#include "dsp_dsp.h" +#include "dsp_lossless.h" +#include "dsp_lossless_common.h" + +#if defined(WEBP_USE_MIPS32) + +#include +#include +#include +#include + +static float FastSLog2Slow_MIPS32(uint32_t v) { + assert(v >= LOG_LOOKUP_IDX_MAX); + if (v < APPROX_LOG_WITH_CORRECTION_MAX) { + uint32_t log_cnt, y, correction; + const int c24 = 24; + const float v_f = (float)v; + uint32_t temp; + + // Xf = 256 = 2^8 + // log_cnt is index of leading one in upper 24 bits + __asm__ volatile( + "clz %[log_cnt], %[v] \n\t" + "addiu %[y], $zero, 1 \n\t" + "subu %[log_cnt], %[c24], %[log_cnt] \n\t" + "sllv %[y], %[y], %[log_cnt] \n\t" + "srlv %[temp], %[v], %[log_cnt] \n\t" + : [log_cnt]"=&r"(log_cnt), [y]"=&r"(y), + [temp]"=r"(temp) + : [c24]"r"(c24), [v]"r"(v) + ); + + // vf = (2^log_cnt) * Xf; where y = 2^log_cnt and Xf < 256 + // Xf = floor(Xf) * (1 + (v % y) / v) + // log2(Xf) = log2(floor(Xf)) + log2(1 + (v % y) / v) + // The correction factor: log(1 + d) ~ d; for very small d values, so + // log2(1 + (v % y) / v) ~ LOG_2_RECIPROCAL * (v % y)/v + // LOG_2_RECIPROCAL ~ 23/16 + + // (v % y) = (v % 2^log_cnt) = v & (2^log_cnt - 1) + correction = (23 * (v & (y - 1))) >> 4; + return v_f * (kLog2Table[temp] + log_cnt) + correction; + } else { + return (float)(LOG_2_RECIPROCAL * v * log((double)v)); + } +} + +static float FastLog2Slow_MIPS32(uint32_t v) { + assert(v >= LOG_LOOKUP_IDX_MAX); + if (v < APPROX_LOG_WITH_CORRECTION_MAX) { + uint32_t log_cnt, y; + const int c24 = 24; + double log_2; + uint32_t temp; + + __asm__ volatile( + "clz %[log_cnt], %[v] \n\t" + "addiu %[y], $zero, 1 \n\t" + "subu %[log_cnt], %[c24], %[log_cnt] \n\t" + "sllv %[y], %[y], %[log_cnt] \n\t" + "srlv %[temp], %[v], %[log_cnt] \n\t" + : [log_cnt]"=&r"(log_cnt), [y]"=&r"(y), + [temp]"=r"(temp) + : [c24]"r"(c24), [v]"r"(v) + ); + + log_2 = kLog2Table[temp] + log_cnt; + if (v >= APPROX_LOG_MAX) { + // Since the division is still expensive, add this correction factor only + // for large values of 'v'. + + const uint32_t correction = (23 * (v & (y - 1))) >> 4; + log_2 += (double)correction / v; + } + return (float)log_2; + } else { + return (float)(LOG_2_RECIPROCAL * log((double)v)); + } +} + +// C version of this function: +// int i = 0; +// int64_t cost = 0; +// const uint32_t* pop = &population[4]; +// const uint32_t* LoopEnd = &population[length]; +// while (pop != LoopEnd) { +// ++i; +// cost += i * *pop; +// cost += i * *(pop + 1); +// pop += 2; +// } +// return (double)cost; +static double ExtraCost_MIPS32(const uint32_t* const population, int length) { + int i, temp0, temp1; + const uint32_t* pop = &population[4]; + const uint32_t* const LoopEnd = &population[length]; + + __asm__ volatile( + "mult $zero, $zero \n\t" + "xor %[i], %[i], %[i] \n\t" + "beq %[pop], %[LoopEnd], 2f \n\t" + "1: \n\t" + "lw %[temp0], 0(%[pop]) \n\t" + "lw %[temp1], 4(%[pop]) \n\t" + "addiu %[i], %[i], 1 \n\t" + "addiu %[pop], %[pop], 8 \n\t" + "madd %[i], %[temp0] \n\t" + "madd %[i], %[temp1] \n\t" + "bne %[pop], %[LoopEnd], 1b \n\t" + "2: \n\t" + "mfhi %[temp0] \n\t" + "mflo %[temp1] \n\t" + : [temp0]"=&r"(temp0), [temp1]"=&r"(temp1), + [i]"=&r"(i), [pop]"+r"(pop) + : [LoopEnd]"r"(LoopEnd) + : "memory", "hi", "lo" + ); + + return (double)((int64_t)temp0 << 32 | temp1); +} + +// C version of this function: +// int i = 0; +// int64_t cost = 0; +// const uint32_t* pX = &X[4]; +// const uint32_t* pY = &Y[4]; +// const uint32_t* LoopEnd = &X[length]; +// while (pX != LoopEnd) { +// const uint32_t xy0 = *pX + *pY; +// const uint32_t xy1 = *(pX + 1) + *(pY + 1); +// ++i; +// cost += i * xy0; +// cost += i * xy1; +// pX += 2; +// pY += 2; +// } +// return (double)cost; +static double ExtraCostCombined_MIPS32(const uint32_t* const X, + const uint32_t* const Y, int length) { + int i, temp0, temp1, temp2, temp3; + const uint32_t* pX = &X[4]; + const uint32_t* pY = &Y[4]; + const uint32_t* const LoopEnd = &X[length]; + + __asm__ volatile( + "mult $zero, $zero \n\t" + "xor %[i], %[i], %[i] \n\t" + "beq %[pX], %[LoopEnd], 2f \n\t" + "1: \n\t" + "lw %[temp0], 0(%[pX]) \n\t" + "lw %[temp1], 0(%[pY]) \n\t" + "lw %[temp2], 4(%[pX]) \n\t" + "lw %[temp3], 4(%[pY]) \n\t" + "addiu %[i], %[i], 1 \n\t" + "addu %[temp0], %[temp0], %[temp1] \n\t" + "addu %[temp2], %[temp2], %[temp3] \n\t" + "addiu %[pX], %[pX], 8 \n\t" + "addiu %[pY], %[pY], 8 \n\t" + "madd %[i], %[temp0] \n\t" + "madd %[i], %[temp2] \n\t" + "bne %[pX], %[LoopEnd], 1b \n\t" + "2: \n\t" + "mfhi %[temp0] \n\t" + "mflo %[temp1] \n\t" + : [temp0]"=&r"(temp0), [temp1]"=&r"(temp1), + [temp2]"=&r"(temp2), [temp3]"=&r"(temp3), + [i]"=&r"(i), [pX]"+r"(pX), [pY]"+r"(pY) + : [LoopEnd]"r"(LoopEnd) + : "memory", "hi", "lo" + ); + + return (double)((int64_t)temp0 << 32 | temp1); +} + +#define HUFFMAN_COST_PASS \ + __asm__ volatile( \ + "sll %[temp1], %[temp0], 3 \n\t" \ + "addiu %[temp3], %[streak], -3 \n\t" \ + "addu %[temp2], %[pstreaks], %[temp1] \n\t" \ + "blez %[temp3], 1f \n\t" \ + "srl %[temp1], %[temp1], 1 \n\t" \ + "addu %[temp3], %[pcnts], %[temp1] \n\t" \ + "lw %[temp0], 4(%[temp2]) \n\t" \ + "lw %[temp1], 0(%[temp3]) \n\t" \ + "addu %[temp0], %[temp0], %[streak] \n\t" \ + "addiu %[temp1], %[temp1], 1 \n\t" \ + "sw %[temp0], 4(%[temp2]) \n\t" \ + "sw %[temp1], 0(%[temp3]) \n\t" \ + "b 2f \n\t" \ + "1: \n\t" \ + "lw %[temp0], 0(%[temp2]) \n\t" \ + "addu %[temp0], %[temp0], %[streak] \n\t" \ + "sw %[temp0], 0(%[temp2]) \n\t" \ + "2: \n\t" \ + : [temp1]"=&r"(temp1), [temp2]"=&r"(temp2), \ + [temp3]"=&r"(temp3), [temp0]"+r"(temp0) \ + : [pstreaks]"r"(pstreaks), [pcnts]"r"(pcnts), \ + [streak]"r"(streak) \ + : "memory" \ + ); + +// Returns the various RLE counts +static WEBP_INLINE void GetEntropyUnrefinedHelper( + uint32_t val, int i, uint32_t* const val_prev, int* const i_prev, + VP8LBitEntropy* const bit_entropy, VP8LStreaks* const stats) { + int* const pstreaks = &stats->streaks[0][0]; + int* const pcnts = &stats->counts[0]; + int temp0, temp1, temp2, temp3; + const int streak = i - *i_prev; + + // Gather info for the bit entropy. + if (*val_prev != 0) { + bit_entropy->sum += (*val_prev) * streak; + bit_entropy->nonzeros += streak; + bit_entropy->nonzero_code = *i_prev; + bit_entropy->entropy -= VP8LFastSLog2(*val_prev) * streak; + if (bit_entropy->max_val < *val_prev) { + bit_entropy->max_val = *val_prev; + } + } + + // Gather info for the Huffman cost. + temp0 = (*val_prev != 0); + HUFFMAN_COST_PASS + + *val_prev = val; + *i_prev = i; +} + +static void GetEntropyUnrefined_MIPS32(const uint32_t X[], int length, + VP8LBitEntropy* const bit_entropy, + VP8LStreaks* const stats) { + int i; + int i_prev = 0; + uint32_t x_prev = X[0]; + + memset(stats, 0, sizeof(*stats)); + VP8LBitEntropyInit(bit_entropy); + + for (i = 1; i < length; ++i) { + const uint32_t x = X[i]; + if (x != x_prev) { + GetEntropyUnrefinedHelper(x, i, &x_prev, &i_prev, bit_entropy, stats); + } + } + GetEntropyUnrefinedHelper(0, i, &x_prev, &i_prev, bit_entropy, stats); + + bit_entropy->entropy += VP8LFastSLog2(bit_entropy->sum); +} + +static void GetCombinedEntropyUnrefined_MIPS32(const uint32_t X[], + const uint32_t Y[], + int length, + VP8LBitEntropy* const entropy, + VP8LStreaks* const stats) { + int i = 1; + int i_prev = 0; + uint32_t xy_prev = X[0] + Y[0]; + + memset(stats, 0, sizeof(*stats)); + VP8LBitEntropyInit(entropy); + + for (i = 1; i < length; ++i) { + const uint32_t xy = X[i] + Y[i]; + if (xy != xy_prev) { + GetEntropyUnrefinedHelper(xy, i, &xy_prev, &i_prev, entropy, stats); + } + } + GetEntropyUnrefinedHelper(0, i, &xy_prev, &i_prev, entropy, stats); + + entropy->entropy += VP8LFastSLog2(entropy->sum); +} + +#define ASM_START \ + __asm__ volatile( \ + ".set push \n\t" \ + ".set at \n\t" \ + ".set macro \n\t" \ + "1: \n\t" + +// P2 = P0 + P1 +// A..D - offsets +// E - temp variable to tell macro +// if pointer should be incremented +// literal_ and successive histograms could be unaligned +// so we must use ulw and usw +#define ADD_TO_OUT(A, B, C, D, E, P0, P1, P2) \ + "ulw %[temp0], " #A "(%[" #P0 "]) \n\t" \ + "ulw %[temp1], " #B "(%[" #P0 "]) \n\t" \ + "ulw %[temp2], " #C "(%[" #P0 "]) \n\t" \ + "ulw %[temp3], " #D "(%[" #P0 "]) \n\t" \ + "ulw %[temp4], " #A "(%[" #P1 "]) \n\t" \ + "ulw %[temp5], " #B "(%[" #P1 "]) \n\t" \ + "ulw %[temp6], " #C "(%[" #P1 "]) \n\t" \ + "ulw %[temp7], " #D "(%[" #P1 "]) \n\t" \ + "addu %[temp4], %[temp4], %[temp0] \n\t" \ + "addu %[temp5], %[temp5], %[temp1] \n\t" \ + "addu %[temp6], %[temp6], %[temp2] \n\t" \ + "addu %[temp7], %[temp7], %[temp3] \n\t" \ + "addiu %[" #P0 "], %[" #P0 "], 16 \n\t" \ + ".if " #E " == 1 \n\t" \ + "addiu %[" #P1 "], %[" #P1 "], 16 \n\t" \ + ".endif \n\t" \ + "usw %[temp4], " #A "(%[" #P2 "]) \n\t" \ + "usw %[temp5], " #B "(%[" #P2 "]) \n\t" \ + "usw %[temp6], " #C "(%[" #P2 "]) \n\t" \ + "usw %[temp7], " #D "(%[" #P2 "]) \n\t" \ + "addiu %[" #P2 "], %[" #P2 "], 16 \n\t" \ + "bne %[" #P0 "], %[LoopEnd], 1b \n\t" \ + ".set pop \n\t" \ + +#define ASM_END_COMMON_0 \ + : [temp0]"=&r"(temp0), [temp1]"=&r"(temp1), \ + [temp2]"=&r"(temp2), [temp3]"=&r"(temp3), \ + [temp4]"=&r"(temp4), [temp5]"=&r"(temp5), \ + [temp6]"=&r"(temp6), [temp7]"=&r"(temp7), \ + [pa]"+r"(pa), [pout]"+r"(pout) + +#define ASM_END_COMMON_1 \ + : [LoopEnd]"r"(LoopEnd) \ + : "memory", "at" \ + ); + +#define ASM_END_0 \ + ASM_END_COMMON_0 \ + , [pb]"+r"(pb) \ + ASM_END_COMMON_1 + +#define ASM_END_1 \ + ASM_END_COMMON_0 \ + ASM_END_COMMON_1 + +static void AddVector_MIPS32(const uint32_t* pa, const uint32_t* pb, + uint32_t* pout, int size) { + uint32_t temp0, temp1, temp2, temp3, temp4, temp5, temp6, temp7; + const uint32_t end = ((size) / 4) * 4; + const uint32_t* const LoopEnd = pa + end; + int i; + ASM_START + ADD_TO_OUT(0, 4, 8, 12, 1, pa, pb, pout) + ASM_END_0 + for (i = end; i < size; ++i) pout[i] = pa[i] + pb[i]; +} + +static void AddVectorEq_MIPS32(const uint32_t* pa, uint32_t* pout, int size) { + uint32_t temp0, temp1, temp2, temp3, temp4, temp5, temp6, temp7; + const uint32_t end = ((size) / 4) * 4; + const uint32_t* const LoopEnd = pa + end; + int i; + ASM_START + ADD_TO_OUT(0, 4, 8, 12, 0, pa, pout, pout) + ASM_END_1 + for (i = end; i < size; ++i) pout[i] += pa[i]; +} + +#undef ASM_END_1 +#undef ASM_END_0 +#undef ASM_END_COMMON_1 +#undef ASM_END_COMMON_0 +#undef ADD_TO_OUT +#undef ASM_START + +//------------------------------------------------------------------------------ +// Entry point + +extern void VP8LEncDspInitMIPS32(void); + +WEBP_TSAN_IGNORE_FUNCTION void VP8LEncDspInitMIPS32(void) { + VP8LFastSLog2Slow = FastSLog2Slow_MIPS32; + VP8LFastLog2Slow = FastLog2Slow_MIPS32; + VP8LExtraCost = ExtraCost_MIPS32; + VP8LExtraCostCombined = ExtraCostCombined_MIPS32; + VP8LGetEntropyUnrefined = GetEntropyUnrefined_MIPS32; + VP8LGetCombinedEntropyUnrefined = GetCombinedEntropyUnrefined_MIPS32; + VP8LAddVector = AddVector_MIPS32; + VP8LAddVectorEq = AddVectorEq_MIPS32; +} + +#else // !WEBP_USE_MIPS32 + +WEBP_DSP_INIT_STUB(VP8LEncDspInitMIPS32) + +#endif // WEBP_USE_MIPS32 diff --git a/vendor/github.com/sizeofint/webpanimation/dsp_lossless_enc_mips_dsp_r2.c b/vendor/github.com/sizeofint/webpanimation/dsp_lossless_enc_mips_dsp_r2.c new file mode 100644 index 00000000..fc0e4c34 --- /dev/null +++ b/vendor/github.com/sizeofint/webpanimation/dsp_lossless_enc_mips_dsp_r2.c @@ -0,0 +1,281 @@ +// Copyright 2015 Google Inc. All Rights Reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the COPYING file in the root of the source +// tree. An additional intellectual property rights grant can be found +// in the file PATENTS. All contributing project authors may +// be found in the AUTHORS file in the root of the source tree. +// ----------------------------------------------------------------------------- +// +// Image transform methods for lossless encoder. +// +// Author(s): Djordje Pesut (djordje.pesut@imgtec.com) +// Jovan Zelincevic (jovan.zelincevic@imgtec.com) + +#include "dsp_dsp.h" + +#if defined(WEBP_USE_MIPS_DSP_R2) + +#include "dsp_lossless.h" + +static void SubtractGreenFromBlueAndRed_MIPSdspR2(uint32_t* argb_data, + int num_pixels) { + uint32_t temp0, temp1, temp2, temp3, temp4, temp5, temp6, temp7; + uint32_t* const p_loop1_end = argb_data + (num_pixels & ~3); + uint32_t* const p_loop2_end = p_loop1_end + (num_pixels & 3); + __asm__ volatile ( + ".set push \n\t" + ".set noreorder \n\t" + "beq %[argb_data], %[p_loop1_end], 3f \n\t" + " nop \n\t" + "0: \n\t" + "lw %[temp0], 0(%[argb_data]) \n\t" + "lw %[temp1], 4(%[argb_data]) \n\t" + "lw %[temp2], 8(%[argb_data]) \n\t" + "lw %[temp3], 12(%[argb_data]) \n\t" + "ext %[temp4], %[temp0], 8, 8 \n\t" + "ext %[temp5], %[temp1], 8, 8 \n\t" + "ext %[temp6], %[temp2], 8, 8 \n\t" + "ext %[temp7], %[temp3], 8, 8 \n\t" + "addiu %[argb_data], %[argb_data], 16 \n\t" + "replv.ph %[temp4], %[temp4] \n\t" + "replv.ph %[temp5], %[temp5] \n\t" + "replv.ph %[temp6], %[temp6] \n\t" + "replv.ph %[temp7], %[temp7] \n\t" + "subu.qb %[temp0], %[temp0], %[temp4] \n\t" + "subu.qb %[temp1], %[temp1], %[temp5] \n\t" + "subu.qb %[temp2], %[temp2], %[temp6] \n\t" + "subu.qb %[temp3], %[temp3], %[temp7] \n\t" + "sw %[temp0], -16(%[argb_data]) \n\t" + "sw %[temp1], -12(%[argb_data]) \n\t" + "sw %[temp2], -8(%[argb_data]) \n\t" + "bne %[argb_data], %[p_loop1_end], 0b \n\t" + " sw %[temp3], -4(%[argb_data]) \n\t" + "3: \n\t" + "beq %[argb_data], %[p_loop2_end], 2f \n\t" + " nop \n\t" + "1: \n\t" + "lw %[temp0], 0(%[argb_data]) \n\t" + "addiu %[argb_data], %[argb_data], 4 \n\t" + "ext %[temp4], %[temp0], 8, 8 \n\t" + "replv.ph %[temp4], %[temp4] \n\t" + "subu.qb %[temp0], %[temp0], %[temp4] \n\t" + "bne %[argb_data], %[p_loop2_end], 1b \n\t" + " sw %[temp0], -4(%[argb_data]) \n\t" + "2: \n\t" + ".set pop \n\t" + : [argb_data]"+&r"(argb_data), [temp0]"=&r"(temp0), + [temp1]"=&r"(temp1), [temp2]"=&r"(temp2), [temp3]"=&r"(temp3), + [temp4]"=&r"(temp4), [temp5]"=&r"(temp5), [temp6]"=&r"(temp6), + [temp7]"=&r"(temp7) + : [p_loop1_end]"r"(p_loop1_end), [p_loop2_end]"r"(p_loop2_end) + : "memory" + ); +} + +static WEBP_INLINE uint32_t ColorTransformDelta(int8_t color_pred, + int8_t color) { + return (uint32_t)((int)(color_pred) * color) >> 5; +} + +static void TransformColor_MIPSdspR2(const VP8LMultipliers* const m, + uint32_t* data, int num_pixels) { + int temp0, temp1, temp2, temp3, temp4, temp5; + uint32_t argb, argb1, new_red, new_red1; + const uint32_t G_to_R = m->green_to_red_; + const uint32_t G_to_B = m->green_to_blue_; + const uint32_t R_to_B = m->red_to_blue_; + uint32_t* const p_loop_end = data + (num_pixels & ~1); + __asm__ volatile ( + ".set push \n\t" + ".set noreorder \n\t" + "beq %[data], %[p_loop_end], 1f \n\t" + " nop \n\t" + "replv.ph %[temp0], %[G_to_R] \n\t" + "replv.ph %[temp1], %[G_to_B] \n\t" + "replv.ph %[temp2], %[R_to_B] \n\t" + "shll.ph %[temp0], %[temp0], 8 \n\t" + "shll.ph %[temp1], %[temp1], 8 \n\t" + "shll.ph %[temp2], %[temp2], 8 \n\t" + "shra.ph %[temp0], %[temp0], 8 \n\t" + "shra.ph %[temp1], %[temp1], 8 \n\t" + "shra.ph %[temp2], %[temp2], 8 \n\t" + "0: \n\t" + "lw %[argb], 0(%[data]) \n\t" + "lw %[argb1], 4(%[data]) \n\t" + "lhu %[new_red], 2(%[data]) \n\t" + "lhu %[new_red1], 6(%[data]) \n\t" + "precrq.qb.ph %[temp3], %[argb], %[argb1] \n\t" + "precr.qb.ph %[temp4], %[argb], %[argb1] \n\t" + "preceu.ph.qbra %[temp3], %[temp3] \n\t" + "preceu.ph.qbla %[temp4], %[temp4] \n\t" + "shll.ph %[temp3], %[temp3], 8 \n\t" + "shll.ph %[temp4], %[temp4], 8 \n\t" + "shra.ph %[temp3], %[temp3], 8 \n\t" + "shra.ph %[temp4], %[temp4], 8 \n\t" + "mul.ph %[temp5], %[temp3], %[temp0] \n\t" + "mul.ph %[temp3], %[temp3], %[temp1] \n\t" + "mul.ph %[temp4], %[temp4], %[temp2] \n\t" + "addiu %[data], %[data], 8 \n\t" + "ins %[new_red1], %[new_red], 16, 16 \n\t" + "ins %[argb1], %[argb], 16, 16 \n\t" + "shra.ph %[temp5], %[temp5], 5 \n\t" + "shra.ph %[temp3], %[temp3], 5 \n\t" + "shra.ph %[temp4], %[temp4], 5 \n\t" + "subu.ph %[new_red1], %[new_red1], %[temp5] \n\t" + "subu.ph %[argb1], %[argb1], %[temp3] \n\t" + "preceu.ph.qbra %[temp5], %[new_red1] \n\t" + "subu.ph %[argb1], %[argb1], %[temp4] \n\t" + "preceu.ph.qbra %[temp3], %[argb1] \n\t" + "sb %[temp5], -2(%[data]) \n\t" + "sb %[temp3], -4(%[data]) \n\t" + "sra %[temp5], %[temp5], 16 \n\t" + "sra %[temp3], %[temp3], 16 \n\t" + "sb %[temp5], -6(%[data]) \n\t" + "bne %[data], %[p_loop_end], 0b \n\t" + " sb %[temp3], -8(%[data]) \n\t" + "1: \n\t" + ".set pop \n\t" + : [temp0]"=&r"(temp0), [temp1]"=&r"(temp1), [temp2]"=&r"(temp2), + [temp3]"=&r"(temp3), [temp4]"=&r"(temp4), [temp5]"=&r"(temp5), + [new_red1]"=&r"(new_red1), [new_red]"=&r"(new_red), + [argb]"=&r"(argb), [argb1]"=&r"(argb1), [data]"+&r"(data) + : [G_to_R]"r"(G_to_R), [R_to_B]"r"(R_to_B), + [G_to_B]"r"(G_to_B), [p_loop_end]"r"(p_loop_end) + : "memory", "hi", "lo" + ); + + if (num_pixels & 1) { + const uint32_t argb_ = data[0]; + const uint32_t green = argb_ >> 8; + const uint32_t red = argb_ >> 16; + uint32_t new_blue = argb_; + new_red = red; + new_red -= ColorTransformDelta(m->green_to_red_, green); + new_red &= 0xff; + new_blue -= ColorTransformDelta(m->green_to_blue_, green); + new_blue -= ColorTransformDelta(m->red_to_blue_, red); + new_blue &= 0xff; + data[0] = (argb_ & 0xff00ff00u) | (new_red << 16) | (new_blue); + } +} + +static WEBP_INLINE uint8_t TransformColorBlue(uint8_t green_to_blue, + uint8_t red_to_blue, + uint32_t argb) { + const uint32_t green = argb >> 8; + const uint32_t red = argb >> 16; + uint8_t new_blue = argb; + new_blue -= ColorTransformDelta(green_to_blue, green); + new_blue -= ColorTransformDelta(red_to_blue, red); + return (new_blue & 0xff); +} + +static void CollectColorBlueTransforms_MIPSdspR2(const uint32_t* argb, + int stride, + int tile_width, + int tile_height, + int green_to_blue, + int red_to_blue, + int histo[]) { + const int rtb = (red_to_blue << 16) | (red_to_blue & 0xffff); + const int gtb = (green_to_blue << 16) | (green_to_blue & 0xffff); + const uint32_t mask = 0xff00ffu; + while (tile_height-- > 0) { + int x; + const uint32_t* p_argb = argb; + argb += stride; + for (x = 0; x < (tile_width >> 1); ++x) { + int temp0, temp1, temp2, temp3, temp4, temp5, temp6; + __asm__ volatile ( + "lw %[temp0], 0(%[p_argb]) \n\t" + "lw %[temp1], 4(%[p_argb]) \n\t" + "precr.qb.ph %[temp2], %[temp0], %[temp1] \n\t" + "ins %[temp1], %[temp0], 16, 16 \n\t" + "shra.ph %[temp2], %[temp2], 8 \n\t" + "shra.ph %[temp3], %[temp1], 8 \n\t" + "mul.ph %[temp5], %[temp2], %[rtb] \n\t" + "mul.ph %[temp6], %[temp3], %[gtb] \n\t" + "and %[temp4], %[temp1], %[mask] \n\t" + "addiu %[p_argb], %[p_argb], 8 \n\t" + "shra.ph %[temp5], %[temp5], 5 \n\t" + "shra.ph %[temp6], %[temp6], 5 \n\t" + "subu.qb %[temp2], %[temp4], %[temp5] \n\t" + "subu.qb %[temp2], %[temp2], %[temp6] \n\t" + : [p_argb]"+&r"(p_argb), [temp0]"=&r"(temp0), [temp1]"=&r"(temp1), + [temp2]"=&r"(temp2), [temp3]"=&r"(temp3), [temp4]"=&r"(temp4), + [temp5]"=&r"(temp5), [temp6]"=&r"(temp6) + : [rtb]"r"(rtb), [gtb]"r"(gtb), [mask]"r"(mask) + : "memory", "hi", "lo" + ); + ++histo[(uint8_t)(temp2 >> 16)]; + ++histo[(uint8_t)temp2]; + } + if (tile_width & 1) { + ++histo[TransformColorBlue(green_to_blue, red_to_blue, *p_argb)]; + } + } +} + +static WEBP_INLINE uint8_t TransformColorRed(uint8_t green_to_red, + uint32_t argb) { + const uint32_t green = argb >> 8; + uint32_t new_red = argb >> 16; + new_red -= ColorTransformDelta(green_to_red, green); + return (new_red & 0xff); +} + +static void CollectColorRedTransforms_MIPSdspR2(const uint32_t* argb, + int stride, + int tile_width, + int tile_height, + int green_to_red, + int histo[]) { + const int gtr = (green_to_red << 16) | (green_to_red & 0xffff); + while (tile_height-- > 0) { + int x; + const uint32_t* p_argb = argb; + argb += stride; + for (x = 0; x < (tile_width >> 1); ++x) { + int temp0, temp1, temp2, temp3, temp4; + __asm__ volatile ( + "lw %[temp0], 0(%[p_argb]) \n\t" + "lw %[temp1], 4(%[p_argb]) \n\t" + "precrq.ph.w %[temp4], %[temp0], %[temp1] \n\t" + "ins %[temp1], %[temp0], 16, 16 \n\t" + "shra.ph %[temp3], %[temp1], 8 \n\t" + "mul.ph %[temp2], %[temp3], %[gtr] \n\t" + "addiu %[p_argb], %[p_argb], 8 \n\t" + "shra.ph %[temp2], %[temp2], 5 \n\t" + "subu.qb %[temp2], %[temp4], %[temp2] \n\t" + : [p_argb]"+&r"(p_argb), [temp0]"=&r"(temp0), [temp1]"=&r"(temp1), + [temp2]"=&r"(temp2), [temp3]"=&r"(temp3), [temp4]"=&r"(temp4) + : [gtr]"r"(gtr) + : "memory", "hi", "lo" + ); + ++histo[(uint8_t)(temp2 >> 16)]; + ++histo[(uint8_t)temp2]; + } + if (tile_width & 1) { + ++histo[TransformColorRed(green_to_red, *p_argb)]; + } + } +} + +//------------------------------------------------------------------------------ +// Entry point + +extern void VP8LEncDspInitMIPSdspR2(void); + +WEBP_TSAN_IGNORE_FUNCTION void VP8LEncDspInitMIPSdspR2(void) { + VP8LSubtractGreenFromBlueAndRed = SubtractGreenFromBlueAndRed_MIPSdspR2; + VP8LTransformColor = TransformColor_MIPSdspR2; + VP8LCollectColorBlueTransforms = CollectColorBlueTransforms_MIPSdspR2; + VP8LCollectColorRedTransforms = CollectColorRedTransforms_MIPSdspR2; +} + +#else // !WEBP_USE_MIPS_DSP_R2 + +WEBP_DSP_INIT_STUB(VP8LEncDspInitMIPSdspR2) + +#endif // WEBP_USE_MIPS_DSP_R2 diff --git a/vendor/github.com/sizeofint/webpanimation/dsp_lossless_enc_msa.c b/vendor/github.com/sizeofint/webpanimation/dsp_lossless_enc_msa.c new file mode 100644 index 00000000..a1dd8705 --- /dev/null +++ b/vendor/github.com/sizeofint/webpanimation/dsp_lossless_enc_msa.c @@ -0,0 +1,148 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the COPYING file in the root of the source +// tree. An additional intellectual property rights grant can be found +// in the file PATENTS. All contributing project authors may +// be found in the AUTHORS file in the root of the source tree. +// ----------------------------------------------------------------------------- +// +// MSA variant of Image transform methods for lossless encoder. +// +// Authors: Prashant Patil (Prashant.Patil@imgtec.com) + +#include "dsp_dsp.h" + +#if defined(WEBP_USE_MSA) + +#include "dsp_lossless.h" +#include "dsp_msa_macro.h" + +#define TRANSFORM_COLOR_8(src0, src1, dst0, dst1, c0, c1, mask0, mask1) do { \ + v8i16 g0, g1, t0, t1, t2, t3; \ + v4i32 t4, t5; \ + VSHF_B2_SH(src0, src0, src1, src1, mask0, mask0, g0, g1); \ + DOTP_SB2_SH(g0, g1, c0, c0, t0, t1); \ + SRAI_H2_SH(t0, t1, 5); \ + t0 = __msa_subv_h((v8i16)src0, t0); \ + t1 = __msa_subv_h((v8i16)src1, t1); \ + t4 = __msa_srli_w((v4i32)src0, 16); \ + t5 = __msa_srli_w((v4i32)src1, 16); \ + DOTP_SB2_SH(t4, t5, c1, c1, t2, t3); \ + SRAI_H2_SH(t2, t3, 5); \ + SUB2(t0, t2, t1, t3, t0, t1); \ + VSHF_B2_UB(src0, t0, src1, t1, mask1, mask1, dst0, dst1); \ +} while (0) + +#define TRANSFORM_COLOR_4(src, dst, c0, c1, mask0, mask1) do { \ + const v16i8 g0 = VSHF_SB(src, src, mask0); \ + v8i16 t0 = __msa_dotp_s_h(c0, g0); \ + v8i16 t1; \ + v4i32 t2; \ + t0 = SRAI_H(t0, 5); \ + t0 = __msa_subv_h((v8i16)src, t0); \ + t2 = __msa_srli_w((v4i32)src, 16); \ + t1 = __msa_dotp_s_h(c1, (v16i8)t2); \ + t1 = SRAI_H(t1, 5); \ + t0 = t0 - t1; \ + dst = VSHF_UB(src, t0, mask1); \ +} while (0) + +static void TransformColor_MSA(const VP8LMultipliers* const m, uint32_t* data, + int num_pixels) { + v16u8 src0, dst0; + const v16i8 g2br = (v16i8)__msa_fill_w(m->green_to_blue_ | + (m->green_to_red_ << 16)); + const v16i8 r2b = (v16i8)__msa_fill_w(m->red_to_blue_); + const v16u8 mask0 = { 1, 255, 1, 255, 5, 255, 5, 255, 9, 255, 9, 255, + 13, 255, 13, 255 }; + const v16u8 mask1 = { 16, 1, 18, 3, 20, 5, 22, 7, 24, 9, 26, 11, + 28, 13, 30, 15 }; + + while (num_pixels >= 8) { + v16u8 src1, dst1; + LD_UB2(data, 4, src0, src1); + TRANSFORM_COLOR_8(src0, src1, dst0, dst1, g2br, r2b, mask0, mask1); + ST_UB2(dst0, dst1, data, 4); + data += 8; + num_pixels -= 8; + } + if (num_pixels > 0) { + if (num_pixels >= 4) { + src0 = LD_UB(data); + TRANSFORM_COLOR_4(src0, dst0, g2br, r2b, mask0, mask1); + ST_UB(dst0, data); + data += 4; + num_pixels -= 4; + } + if (num_pixels > 0) { + src0 = LD_UB(data); + TRANSFORM_COLOR_4(src0, dst0, g2br, r2b, mask0, mask1); + if (num_pixels == 3) { + const uint64_t pix_d = __msa_copy_s_d((v2i64)dst0, 0); + const uint32_t pix_w = __msa_copy_s_w((v4i32)dst0, 2); + SD(pix_d, data + 0); + SW(pix_w, data + 2); + } else if (num_pixels == 2) { + const uint64_t pix_d = __msa_copy_s_d((v2i64)dst0, 0); + SD(pix_d, data); + } else { + const uint32_t pix_w = __msa_copy_s_w((v4i32)dst0, 0); + SW(pix_w, data); + } + } + } +} + +static void SubtractGreenFromBlueAndRed_MSA(uint32_t* argb_data, + int num_pixels) { + int i; + uint8_t* ptemp_data = (uint8_t*)argb_data; + v16u8 src0, dst0, tmp0; + const v16u8 mask = { 1, 255, 1, 255, 5, 255, 5, 255, 9, 255, 9, 255, + 13, 255, 13, 255 }; + + while (num_pixels >= 8) { + v16u8 src1, dst1, tmp1; + LD_UB2(ptemp_data, 16, src0, src1); + VSHF_B2_UB(src0, src1, src1, src0, mask, mask, tmp0, tmp1); + SUB2(src0, tmp0, src1, tmp1, dst0, dst1); + ST_UB2(dst0, dst1, ptemp_data, 16); + ptemp_data += 8 * 4; + num_pixels -= 8; + } + if (num_pixels > 0) { + if (num_pixels >= 4) { + src0 = LD_UB(ptemp_data); + tmp0 = VSHF_UB(src0, src0, mask); + dst0 = src0 - tmp0; + ST_UB(dst0, ptemp_data); + ptemp_data += 4 * 4; + num_pixels -= 4; + } + for (i = 0; i < num_pixels; i++) { + const uint8_t b = ptemp_data[0]; + const uint8_t g = ptemp_data[1]; + const uint8_t r = ptemp_data[2]; + ptemp_data[0] = (b - g) & 0xff; + ptemp_data[2] = (r - g) & 0xff; + ptemp_data += 4; + } + } +} + +//------------------------------------------------------------------------------ +// Entry point + +extern void VP8LEncDspInitMSA(void); + +WEBP_TSAN_IGNORE_FUNCTION void VP8LEncDspInitMSA(void) { + VP8LSubtractGreenFromBlueAndRed = SubtractGreenFromBlueAndRed_MSA; + VP8LTransformColor = TransformColor_MSA; +} + +#else // !WEBP_USE_MSA + +WEBP_DSP_INIT_STUB(VP8LEncDspInitMSA) + +#endif // WEBP_USE_MSA diff --git a/vendor/github.com/sizeofint/webpanimation/dsp_lossless_enc_neon.c b/vendor/github.com/sizeofint/webpanimation/dsp_lossless_enc_neon.c new file mode 100644 index 00000000..23ebfaf7 --- /dev/null +++ b/vendor/github.com/sizeofint/webpanimation/dsp_lossless_enc_neon.c @@ -0,0 +1,144 @@ +// Copyright 2015 Google Inc. All Rights Reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the COPYING file in the root of the source +// tree. An additional intellectual property rights grant can be found +// in the file PATENTS. All contributing project authors may +// be found in the AUTHORS file in the root of the source tree. +// ----------------------------------------------------------------------------- +// +// NEON variant of methods for lossless encoder +// +// Author: Skal (pascal.massimino@gmail.com) + +#include "dsp_dsp.h" + +#if defined(WEBP_USE_NEON) + +#include + +#include "dsp_lossless.h" +#include "dsp_neon.h" + +//------------------------------------------------------------------------------ +// Subtract-Green Transform + +// vtbl?_u8 are marked unavailable for iOS arm64 with Xcode < 6.3, use +// non-standard versions there. +#if defined(__APPLE__) && defined(__aarch64__) && \ + defined(__apple_build_version__) && (__apple_build_version__< 6020037) +#define USE_VTBLQ +#endif + +#ifdef USE_VTBLQ +// 255 = byte will be zeroed +static const uint8_t kGreenShuffle[16] = { + 1, 255, 1, 255, 5, 255, 5, 255, 9, 255, 9, 255, 13, 255, 13, 255 +}; + +static WEBP_INLINE uint8x16_t DoGreenShuffle_NEON(const uint8x16_t argb, + const uint8x16_t shuffle) { + return vcombine_u8(vtbl1q_u8(argb, vget_low_u8(shuffle)), + vtbl1q_u8(argb, vget_high_u8(shuffle))); +} +#else // !USE_VTBLQ +// 255 = byte will be zeroed +static const uint8_t kGreenShuffle[8] = { 1, 255, 1, 255, 5, 255, 5, 255 }; + +static WEBP_INLINE uint8x16_t DoGreenShuffle_NEON(const uint8x16_t argb, + const uint8x8_t shuffle) { + return vcombine_u8(vtbl1_u8(vget_low_u8(argb), shuffle), + vtbl1_u8(vget_high_u8(argb), shuffle)); +} +#endif // USE_VTBLQ + +static void SubtractGreenFromBlueAndRed_NEON(uint32_t* argb_data, + int num_pixels) { + const uint32_t* const end = argb_data + (num_pixels & ~3); +#ifdef USE_VTBLQ + const uint8x16_t shuffle = vld1q_u8(kGreenShuffle); +#else + const uint8x8_t shuffle = vld1_u8(kGreenShuffle); +#endif + for (; argb_data < end; argb_data += 4) { + const uint8x16_t argb = vld1q_u8((uint8_t*)argb_data); + const uint8x16_t greens = DoGreenShuffle_NEON(argb, shuffle); + vst1q_u8((uint8_t*)argb_data, vsubq_u8(argb, greens)); + } + // fallthrough and finish off with plain-C + VP8LSubtractGreenFromBlueAndRed_C(argb_data, num_pixels & 3); +} + +//------------------------------------------------------------------------------ +// Color Transform + +static void TransformColor_NEON(const VP8LMultipliers* const m, + uint32_t* argb_data, int num_pixels) { + // sign-extended multiplying constants, pre-shifted by 6. +#define CST(X) (((int16_t)(m->X << 8)) >> 6) + const int16_t rb[8] = { + CST(green_to_blue_), CST(green_to_red_), + CST(green_to_blue_), CST(green_to_red_), + CST(green_to_blue_), CST(green_to_red_), + CST(green_to_blue_), CST(green_to_red_) + }; + const int16x8_t mults_rb = vld1q_s16(rb); + const int16_t b2[8] = { + 0, CST(red_to_blue_), 0, CST(red_to_blue_), + 0, CST(red_to_blue_), 0, CST(red_to_blue_), + }; + const int16x8_t mults_b2 = vld1q_s16(b2); +#undef CST +#ifdef USE_VTBLQ + static const uint8_t kg0g0[16] = { + 255, 1, 255, 1, 255, 5, 255, 5, 255, 9, 255, 9, 255, 13, 255, 13 + }; + const uint8x16_t shuffle = vld1q_u8(kg0g0); +#else + static const uint8_t k0g0g[8] = { 255, 1, 255, 1, 255, 5, 255, 5 }; + const uint8x8_t shuffle = vld1_u8(k0g0g); +#endif + const uint32x4_t mask_rb = vdupq_n_u32(0x00ff00ffu); // red-blue masks + int i; + for (i = 0; i + 4 <= num_pixels; i += 4) { + const uint8x16_t in = vld1q_u8((uint8_t*)(argb_data + i)); + // 0 g 0 g + const uint8x16_t greens = DoGreenShuffle_NEON(in, shuffle); + // x dr x db1 + const int16x8_t A = vqdmulhq_s16(vreinterpretq_s16_u8(greens), mults_rb); + // r 0 b 0 + const int16x8_t B = vshlq_n_s16(vreinterpretq_s16_u8(in), 8); + // x db2 0 0 + const int16x8_t C = vqdmulhq_s16(B, mults_b2); + // 0 0 x db2 + const uint32x4_t D = vshrq_n_u32(vreinterpretq_u32_s16(C), 16); + // x dr x db + const int8x16_t E = vaddq_s8(vreinterpretq_s8_u32(D), + vreinterpretq_s8_s16(A)); + // 0 dr 0 db + const uint32x4_t F = vandq_u32(vreinterpretq_u32_s8(E), mask_rb); + const int8x16_t out = vsubq_s8(vreinterpretq_s8_u8(in), + vreinterpretq_s8_u32(F)); + vst1q_s8((int8_t*)(argb_data + i), out); + } + // fallthrough and finish off with plain-C + VP8LTransformColor_C(m, argb_data + i, num_pixels - i); +} + +#undef USE_VTBLQ + +//------------------------------------------------------------------------------ +// Entry point + +extern void VP8LEncDspInitNEON(void); + +WEBP_TSAN_IGNORE_FUNCTION void VP8LEncDspInitNEON(void) { + VP8LSubtractGreenFromBlueAndRed = SubtractGreenFromBlueAndRed_NEON; + VP8LTransformColor = TransformColor_NEON; +} + +#else // !WEBP_USE_NEON + +WEBP_DSP_INIT_STUB(VP8LEncDspInitNEON) + +#endif // WEBP_USE_NEON diff --git a/vendor/github.com/sizeofint/webpanimation/dsp_lossless_enc_sse2.c b/vendor/github.com/sizeofint/webpanimation/dsp_lossless_enc_sse2.c new file mode 100644 index 00000000..6fe681fa --- /dev/null +++ b/vendor/github.com/sizeofint/webpanimation/dsp_lossless_enc_sse2.c @@ -0,0 +1,696 @@ +// Copyright 2015 Google Inc. All Rights Reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the COPYING file in the root of the source +// tree. An additional intellectual property rights grant can be found +// in the file PATENTS. All contributing project authors may +// be found in the AUTHORS file in the root of the source tree. +// ----------------------------------------------------------------------------- +// +// SSE2 variant of methods for lossless encoder +// +// Author: Skal (pascal.massimino@gmail.com) + +#include "dsp_dsp.h" + +#if defined(WEBP_USE_SSE2) +#include +#include +#include "dsp_lossless.h" +#include "dsp_common_sse2.h" +#include "dsp_lossless_common.h" + +// For sign-extended multiplying constants, pre-shifted by 5: +#define CST_5b(X) (((int16_t)((uint16_t)(X) << 8)) >> 5) + +//------------------------------------------------------------------------------ +// Subtract-Green Transform + +static void SubtractGreenFromBlueAndRed_SSE2(uint32_t* argb_data, + int num_pixels) { + int i; + for (i = 0; i + 4 <= num_pixels; i += 4) { + const __m128i in = _mm_loadu_si128((__m128i*)&argb_data[i]); // argb + const __m128i A = _mm_srli_epi16(in, 8); // 0 a 0 g + const __m128i B = _mm_shufflelo_epi16(A, _MM_SHUFFLE(2, 2, 0, 0)); + const __m128i C = _mm_shufflehi_epi16(B, _MM_SHUFFLE(2, 2, 0, 0)); // 0g0g + const __m128i out = _mm_sub_epi8(in, C); + _mm_storeu_si128((__m128i*)&argb_data[i], out); + } + // fallthrough and finish off with plain-C + if (i != num_pixels) { + VP8LSubtractGreenFromBlueAndRed_C(argb_data + i, num_pixels - i); + } +} + +//------------------------------------------------------------------------------ +// Color Transform + +#define MK_CST_16(HI, LO) \ + _mm_set1_epi32((int)(((uint32_t)(HI) << 16) | ((LO) & 0xffff))) + +static void TransformColor_SSE2(const VP8LMultipliers* const m, + uint32_t* argb_data, int num_pixels) { + const __m128i mults_rb = MK_CST_16(CST_5b(m->green_to_red_), + CST_5b(m->green_to_blue_)); + const __m128i mults_b2 = MK_CST_16(CST_5b(m->red_to_blue_), 0); + const __m128i mask_ag = _mm_set1_epi32(0xff00ff00); // alpha-green masks + const __m128i mask_rb = _mm_set1_epi32(0x00ff00ff); // red-blue masks + int i; + for (i = 0; i + 4 <= num_pixels; i += 4) { + const __m128i in = _mm_loadu_si128((__m128i*)&argb_data[i]); // argb + const __m128i A = _mm_and_si128(in, mask_ag); // a 0 g 0 + const __m128i B = _mm_shufflelo_epi16(A, _MM_SHUFFLE(2, 2, 0, 0)); + const __m128i C = _mm_shufflehi_epi16(B, _MM_SHUFFLE(2, 2, 0, 0)); // g0g0 + const __m128i D = _mm_mulhi_epi16(C, mults_rb); // x dr x db1 + const __m128i E = _mm_slli_epi16(in, 8); // r 0 b 0 + const __m128i F = _mm_mulhi_epi16(E, mults_b2); // x db2 0 0 + const __m128i G = _mm_srli_epi32(F, 16); // 0 0 x db2 + const __m128i H = _mm_add_epi8(G, D); // x dr x db + const __m128i I = _mm_and_si128(H, mask_rb); // 0 dr 0 db + const __m128i out = _mm_sub_epi8(in, I); + _mm_storeu_si128((__m128i*)&argb_data[i], out); + } + // fallthrough and finish off with plain-C + if (i != num_pixels) { + VP8LTransformColor_C(m, argb_data + i, num_pixels - i); + } +} + +//------------------------------------------------------------------------------ +#define SPAN 8 +static void CollectColorBlueTransforms_SSE2(const uint32_t* argb, int stride, + int tile_width, int tile_height, + int green_to_blue, int red_to_blue, + int histo[]) { + const __m128i mults_r = MK_CST_16(CST_5b(red_to_blue), 0); + const __m128i mults_g = MK_CST_16(0, CST_5b(green_to_blue)); + const __m128i mask_g = _mm_set1_epi32(0x00ff00); // green mask + const __m128i mask_b = _mm_set1_epi32(0x0000ff); // blue mask + int y; + for (y = 0; y < tile_height; ++y) { + const uint32_t* const src = argb + y * stride; + int i, x; + for (x = 0; x + SPAN <= tile_width; x += SPAN) { + uint16_t values[SPAN]; + const __m128i in0 = _mm_loadu_si128((__m128i*)&src[x + 0]); + const __m128i in1 = _mm_loadu_si128((__m128i*)&src[x + SPAN / 2]); + const __m128i A0 = _mm_slli_epi16(in0, 8); // r 0 | b 0 + const __m128i A1 = _mm_slli_epi16(in1, 8); + const __m128i B0 = _mm_and_si128(in0, mask_g); // 0 0 | g 0 + const __m128i B1 = _mm_and_si128(in1, mask_g); + const __m128i C0 = _mm_mulhi_epi16(A0, mults_r); // x db | 0 0 + const __m128i C1 = _mm_mulhi_epi16(A1, mults_r); + const __m128i D0 = _mm_mulhi_epi16(B0, mults_g); // 0 0 | x db + const __m128i D1 = _mm_mulhi_epi16(B1, mults_g); + const __m128i E0 = _mm_sub_epi8(in0, D0); // x x | x b' + const __m128i E1 = _mm_sub_epi8(in1, D1); + const __m128i F0 = _mm_srli_epi32(C0, 16); // 0 0 | x db + const __m128i F1 = _mm_srli_epi32(C1, 16); + const __m128i G0 = _mm_sub_epi8(E0, F0); // 0 0 | x b' + const __m128i G1 = _mm_sub_epi8(E1, F1); + const __m128i H0 = _mm_and_si128(G0, mask_b); // 0 0 | 0 b + const __m128i H1 = _mm_and_si128(G1, mask_b); + const __m128i I = _mm_packs_epi32(H0, H1); // 0 b' | 0 b' + _mm_storeu_si128((__m128i*)values, I); + for (i = 0; i < SPAN; ++i) ++histo[values[i]]; + } + } + { + const int left_over = tile_width & (SPAN - 1); + if (left_over > 0) { + VP8LCollectColorBlueTransforms_C(argb + tile_width - left_over, stride, + left_over, tile_height, + green_to_blue, red_to_blue, histo); + } + } +} + +static void CollectColorRedTransforms_SSE2(const uint32_t* argb, int stride, + int tile_width, int tile_height, + int green_to_red, int histo[]) { + const __m128i mults_g = MK_CST_16(0, CST_5b(green_to_red)); + const __m128i mask_g = _mm_set1_epi32(0x00ff00); // green mask + const __m128i mask = _mm_set1_epi32(0xff); + + int y; + for (y = 0; y < tile_height; ++y) { + const uint32_t* const src = argb + y * stride; + int i, x; + for (x = 0; x + SPAN <= tile_width; x += SPAN) { + uint16_t values[SPAN]; + const __m128i in0 = _mm_loadu_si128((__m128i*)&src[x + 0]); + const __m128i in1 = _mm_loadu_si128((__m128i*)&src[x + SPAN / 2]); + const __m128i A0 = _mm_and_si128(in0, mask_g); // 0 0 | g 0 + const __m128i A1 = _mm_and_si128(in1, mask_g); + const __m128i B0 = _mm_srli_epi32(in0, 16); // 0 0 | x r + const __m128i B1 = _mm_srli_epi32(in1, 16); + const __m128i C0 = _mm_mulhi_epi16(A0, mults_g); // 0 0 | x dr + const __m128i C1 = _mm_mulhi_epi16(A1, mults_g); + const __m128i E0 = _mm_sub_epi8(B0, C0); // x x | x r' + const __m128i E1 = _mm_sub_epi8(B1, C1); + const __m128i F0 = _mm_and_si128(E0, mask); // 0 0 | 0 r' + const __m128i F1 = _mm_and_si128(E1, mask); + const __m128i I = _mm_packs_epi32(F0, F1); + _mm_storeu_si128((__m128i*)values, I); + for (i = 0; i < SPAN; ++i) ++histo[values[i]]; + } + } + { + const int left_over = tile_width & (SPAN - 1); + if (left_over > 0) { + VP8LCollectColorRedTransforms_C(argb + tile_width - left_over, stride, + left_over, tile_height, + green_to_red, histo); + } + } +} +#undef SPAN +#undef MK_CST_16 + +//------------------------------------------------------------------------------ + +// Note we are adding uint32_t's as *signed* int32's (using _mm_add_epi32). But +// that's ok since the histogram values are less than 1<<28 (max picture size). +#define LINE_SIZE 16 // 8 or 16 +static void AddVector_SSE2(const uint32_t* a, const uint32_t* b, uint32_t* out, + int size) { + int i; + for (i = 0; i + LINE_SIZE <= size; i += LINE_SIZE) { + const __m128i a0 = _mm_loadu_si128((const __m128i*)&a[i + 0]); + const __m128i a1 = _mm_loadu_si128((const __m128i*)&a[i + 4]); +#if (LINE_SIZE == 16) + const __m128i a2 = _mm_loadu_si128((const __m128i*)&a[i + 8]); + const __m128i a3 = _mm_loadu_si128((const __m128i*)&a[i + 12]); +#endif + const __m128i b0 = _mm_loadu_si128((const __m128i*)&b[i + 0]); + const __m128i b1 = _mm_loadu_si128((const __m128i*)&b[i + 4]); +#if (LINE_SIZE == 16) + const __m128i b2 = _mm_loadu_si128((const __m128i*)&b[i + 8]); + const __m128i b3 = _mm_loadu_si128((const __m128i*)&b[i + 12]); +#endif + _mm_storeu_si128((__m128i*)&out[i + 0], _mm_add_epi32(a0, b0)); + _mm_storeu_si128((__m128i*)&out[i + 4], _mm_add_epi32(a1, b1)); +#if (LINE_SIZE == 16) + _mm_storeu_si128((__m128i*)&out[i + 8], _mm_add_epi32(a2, b2)); + _mm_storeu_si128((__m128i*)&out[i + 12], _mm_add_epi32(a3, b3)); +#endif + } + for (; i < size; ++i) { + out[i] = a[i] + b[i]; + } +} + +static void AddVectorEq_SSE2(const uint32_t* a, uint32_t* out, int size) { + int i; + for (i = 0; i + LINE_SIZE <= size; i += LINE_SIZE) { + const __m128i a0 = _mm_loadu_si128((const __m128i*)&a[i + 0]); + const __m128i a1 = _mm_loadu_si128((const __m128i*)&a[i + 4]); +#if (LINE_SIZE == 16) + const __m128i a2 = _mm_loadu_si128((const __m128i*)&a[i + 8]); + const __m128i a3 = _mm_loadu_si128((const __m128i*)&a[i + 12]); +#endif + const __m128i b0 = _mm_loadu_si128((const __m128i*)&out[i + 0]); + const __m128i b1 = _mm_loadu_si128((const __m128i*)&out[i + 4]); +#if (LINE_SIZE == 16) + const __m128i b2 = _mm_loadu_si128((const __m128i*)&out[i + 8]); + const __m128i b3 = _mm_loadu_si128((const __m128i*)&out[i + 12]); +#endif + _mm_storeu_si128((__m128i*)&out[i + 0], _mm_add_epi32(a0, b0)); + _mm_storeu_si128((__m128i*)&out[i + 4], _mm_add_epi32(a1, b1)); +#if (LINE_SIZE == 16) + _mm_storeu_si128((__m128i*)&out[i + 8], _mm_add_epi32(a2, b2)); + _mm_storeu_si128((__m128i*)&out[i + 12], _mm_add_epi32(a3, b3)); +#endif + } + for (; i < size; ++i) { + out[i] += a[i]; + } +} +#undef LINE_SIZE + +//------------------------------------------------------------------------------ +// Entropy + +// Checks whether the X or Y contribution is worth computing and adding. +// Used in loop unrolling. +#define ANALYZE_X_OR_Y(x_or_y, j) \ + do { \ + if ((x_or_y)[i + (j)] != 0) retval -= VP8LFastSLog2((x_or_y)[i + (j)]); \ + } while (0) + +// Checks whether the X + Y contribution is worth computing and adding. +// Used in loop unrolling. +#define ANALYZE_XY(j) \ + do { \ + if (tmp[j] != 0) { \ + retval -= VP8LFastSLog2(tmp[j]); \ + ANALYZE_X_OR_Y(X, j); \ + } \ + } while (0) + +#if !(defined(__i386__) || defined(_M_IX86)) +static float CombinedShannonEntropy_SSE2(const int X[256], const int Y[256]) { + int i; + double retval = 0.; + int sumX, sumXY; + int32_t tmp[4]; + __m128i zero = _mm_setzero_si128(); + // Sums up X + Y, 4 ints at a time (and will merge it at the end for sumXY). + __m128i sumXY_128 = zero; + __m128i sumX_128 = zero; + + for (i = 0; i < 256; i += 4) { + const __m128i x = _mm_loadu_si128((const __m128i*)(X + i)); + const __m128i y = _mm_loadu_si128((const __m128i*)(Y + i)); + + // Check if any X is non-zero: this actually provides a speedup as X is + // usually sparse. + if (_mm_movemask_epi8(_mm_cmpeq_epi32(x, zero)) != 0xFFFF) { + const __m128i xy_128 = _mm_add_epi32(x, y); + sumXY_128 = _mm_add_epi32(sumXY_128, xy_128); + + sumX_128 = _mm_add_epi32(sumX_128, x); + + // Analyze the different X + Y. + _mm_storeu_si128((__m128i*)tmp, xy_128); + + ANALYZE_XY(0); + ANALYZE_XY(1); + ANALYZE_XY(2); + ANALYZE_XY(3); + } else { + // X is fully 0, so only deal with Y. + sumXY_128 = _mm_add_epi32(sumXY_128, y); + + ANALYZE_X_OR_Y(Y, 0); + ANALYZE_X_OR_Y(Y, 1); + ANALYZE_X_OR_Y(Y, 2); + ANALYZE_X_OR_Y(Y, 3); + } + } + + // Sum up sumX_128 to get sumX. + _mm_storeu_si128((__m128i*)tmp, sumX_128); + sumX = tmp[3] + tmp[2] + tmp[1] + tmp[0]; + + // Sum up sumXY_128 to get sumXY. + _mm_storeu_si128((__m128i*)tmp, sumXY_128); + sumXY = tmp[3] + tmp[2] + tmp[1] + tmp[0]; + + retval += VP8LFastSLog2(sumX) + VP8LFastSLog2(sumXY); + return (float)retval; +} +#endif // !(defined(__i386__) || defined(_M_IX86)) + +#undef ANALYZE_X_OR_Y +#undef ANALYZE_XY + +//------------------------------------------------------------------------------ + +static int VectorMismatch_SSE2(const uint32_t* const array1, + const uint32_t* const array2, int length) { + int match_len; + + if (length >= 12) { + __m128i A0 = _mm_loadu_si128((const __m128i*)&array1[0]); + __m128i A1 = _mm_loadu_si128((const __m128i*)&array2[0]); + match_len = 0; + do { + // Loop unrolling and early load both provide a speedup of 10% for the + // current function. Also, max_limit can be MAX_LENGTH=4096 at most. + const __m128i cmpA = _mm_cmpeq_epi32(A0, A1); + const __m128i B0 = + _mm_loadu_si128((const __m128i*)&array1[match_len + 4]); + const __m128i B1 = + _mm_loadu_si128((const __m128i*)&array2[match_len + 4]); + if (_mm_movemask_epi8(cmpA) != 0xffff) break; + match_len += 4; + + { + const __m128i cmpB = _mm_cmpeq_epi32(B0, B1); + A0 = _mm_loadu_si128((const __m128i*)&array1[match_len + 4]); + A1 = _mm_loadu_si128((const __m128i*)&array2[match_len + 4]); + if (_mm_movemask_epi8(cmpB) != 0xffff) break; + match_len += 4; + } + } while (match_len + 12 < length); + } else { + match_len = 0; + // Unroll the potential first two loops. + if (length >= 4 && + _mm_movemask_epi8(_mm_cmpeq_epi32( + _mm_loadu_si128((const __m128i*)&array1[0]), + _mm_loadu_si128((const __m128i*)&array2[0]))) == 0xffff) { + match_len = 4; + if (length >= 8 && + _mm_movemask_epi8(_mm_cmpeq_epi32( + _mm_loadu_si128((const __m128i*)&array1[4]), + _mm_loadu_si128((const __m128i*)&array2[4]))) == 0xffff) { + match_len = 8; + } + } + } + + while (match_len < length && array1[match_len] == array2[match_len]) { + ++match_len; + } + return match_len; +} + +// Bundles multiple (1, 2, 4 or 8) pixels into a single pixel. +static void BundleColorMap_SSE2(const uint8_t* const row, int width, int xbits, + uint32_t* dst) { + int x; + assert(xbits >= 0); + assert(xbits <= 3); + switch (xbits) { + case 0: { + const __m128i ff = _mm_set1_epi16((short)0xff00); + const __m128i zero = _mm_setzero_si128(); + // Store 0xff000000 | (row[x] << 8). + for (x = 0; x + 16 <= width; x += 16, dst += 16) { + const __m128i in = _mm_loadu_si128((const __m128i*)&row[x]); + const __m128i in_lo = _mm_unpacklo_epi8(zero, in); + const __m128i dst0 = _mm_unpacklo_epi16(in_lo, ff); + const __m128i dst1 = _mm_unpackhi_epi16(in_lo, ff); + const __m128i in_hi = _mm_unpackhi_epi8(zero, in); + const __m128i dst2 = _mm_unpacklo_epi16(in_hi, ff); + const __m128i dst3 = _mm_unpackhi_epi16(in_hi, ff); + _mm_storeu_si128((__m128i*)&dst[0], dst0); + _mm_storeu_si128((__m128i*)&dst[4], dst1); + _mm_storeu_si128((__m128i*)&dst[8], dst2); + _mm_storeu_si128((__m128i*)&dst[12], dst3); + } + break; + } + case 1: { + const __m128i ff = _mm_set1_epi16((short)0xff00); + const __m128i mul = _mm_set1_epi16(0x110); + for (x = 0; x + 16 <= width; x += 16, dst += 8) { + // 0a0b | (where a/b are 4 bits). + const __m128i in = _mm_loadu_si128((const __m128i*)&row[x]); + const __m128i tmp = _mm_mullo_epi16(in, mul); // aba0 + const __m128i pack = _mm_and_si128(tmp, ff); // ab00 + const __m128i dst0 = _mm_unpacklo_epi16(pack, ff); + const __m128i dst1 = _mm_unpackhi_epi16(pack, ff); + _mm_storeu_si128((__m128i*)&dst[0], dst0); + _mm_storeu_si128((__m128i*)&dst[4], dst1); + } + break; + } + case 2: { + const __m128i mask_or = _mm_set1_epi32(0xff000000); + const __m128i mul_cst = _mm_set1_epi16(0x0104); + const __m128i mask_mul = _mm_set1_epi16(0x0f00); + for (x = 0; x + 16 <= width; x += 16, dst += 4) { + // 000a000b000c000d | (where a/b/c/d are 2 bits). + const __m128i in = _mm_loadu_si128((const __m128i*)&row[x]); + const __m128i mul = _mm_mullo_epi16(in, mul_cst); // 00ab00b000cd00d0 + const __m128i tmp = _mm_and_si128(mul, mask_mul); // 00ab000000cd0000 + const __m128i shift = _mm_srli_epi32(tmp, 12); // 00000000ab000000 + const __m128i pack = _mm_or_si128(shift, tmp); // 00000000abcd0000 + // Convert to 0xff00**00. + const __m128i res = _mm_or_si128(pack, mask_or); + _mm_storeu_si128((__m128i*)dst, res); + } + break; + } + default: { + assert(xbits == 3); + for (x = 0; x + 16 <= width; x += 16, dst += 2) { + // 0000000a00000000b... | (where a/b are 1 bit). + const __m128i in = _mm_loadu_si128((const __m128i*)&row[x]); + const __m128i shift = _mm_slli_epi64(in, 7); + const uint32_t move = _mm_movemask_epi8(shift); + dst[0] = 0xff000000 | ((move & 0xff) << 8); + dst[1] = 0xff000000 | (move & 0xff00); + } + break; + } + } + if (x != width) { + VP8LBundleColorMap_C(row + x, width - x, xbits, dst); + } +} + +//------------------------------------------------------------------------------ +// Batch version of Predictor Transform subtraction + +static WEBP_INLINE void Average2_m128i(const __m128i* const a0, + const __m128i* const a1, + __m128i* const avg) { + // (a + b) >> 1 = ((a + b + 1) >> 1) - ((a ^ b) & 1) + const __m128i ones = _mm_set1_epi8(1); + const __m128i avg1 = _mm_avg_epu8(*a0, *a1); + const __m128i one = _mm_and_si128(_mm_xor_si128(*a0, *a1), ones); + *avg = _mm_sub_epi8(avg1, one); +} + +// Predictor0: ARGB_BLACK. +static void PredictorSub0_SSE2(const uint32_t* in, const uint32_t* upper, + int num_pixels, uint32_t* out) { + int i; + const __m128i black = _mm_set1_epi32(ARGB_BLACK); + for (i = 0; i + 4 <= num_pixels; i += 4) { + const __m128i src = _mm_loadu_si128((const __m128i*)&in[i]); + const __m128i res = _mm_sub_epi8(src, black); + _mm_storeu_si128((__m128i*)&out[i], res); + } + if (i != num_pixels) { + VP8LPredictorsSub_C[0](in + i, NULL, num_pixels - i, out + i); + } + (void)upper; +} + +#define GENERATE_PREDICTOR_1(X, IN) \ + static void PredictorSub##X##_SSE2(const uint32_t* const in, \ + const uint32_t* const upper, \ + int num_pixels, uint32_t* const out) { \ + int i; \ + for (i = 0; i + 4 <= num_pixels; i += 4) { \ + const __m128i src = _mm_loadu_si128((const __m128i*)&in[i]); \ + const __m128i pred = _mm_loadu_si128((const __m128i*)&(IN)); \ + const __m128i res = _mm_sub_epi8(src, pred); \ + _mm_storeu_si128((__m128i*)&out[i], res); \ + } \ + if (i != num_pixels) { \ + VP8LPredictorsSub_C[(X)](in + i, WEBP_OFFSET_PTR(upper, i), \ + num_pixels - i, out + i); \ + } \ + } + +GENERATE_PREDICTOR_1(1, in[i - 1]) // Predictor1: L +GENERATE_PREDICTOR_1(2, upper[i]) // Predictor2: T +GENERATE_PREDICTOR_1(3, upper[i + 1]) // Predictor3: TR +GENERATE_PREDICTOR_1(4, upper[i - 1]) // Predictor4: TL +#undef GENERATE_PREDICTOR_1 + +// Predictor5: avg2(avg2(L, TR), T) +static void PredictorSub5_SSE2(const uint32_t* in, const uint32_t* upper, + int num_pixels, uint32_t* out) { + int i; + for (i = 0; i + 4 <= num_pixels; i += 4) { + const __m128i L = _mm_loadu_si128((const __m128i*)&in[i - 1]); + const __m128i T = _mm_loadu_si128((const __m128i*)&upper[i]); + const __m128i TR = _mm_loadu_si128((const __m128i*)&upper[i + 1]); + const __m128i src = _mm_loadu_si128((const __m128i*)&in[i]); + __m128i avg, pred, res; + Average2_m128i(&L, &TR, &avg); + Average2_m128i(&avg, &T, &pred); + res = _mm_sub_epi8(src, pred); + _mm_storeu_si128((__m128i*)&out[i], res); + } + if (i != num_pixels) { + VP8LPredictorsSub_C[5](in + i, upper + i, num_pixels - i, out + i); + } +} + +#define GENERATE_PREDICTOR_2(X, A, B) \ +static void PredictorSub##X##_SSE2(const uint32_t* in, const uint32_t* upper, \ + int num_pixels, uint32_t* out) { \ + int i; \ + for (i = 0; i + 4 <= num_pixels; i += 4) { \ + const __m128i tA = _mm_loadu_si128((const __m128i*)&(A)); \ + const __m128i tB = _mm_loadu_si128((const __m128i*)&(B)); \ + const __m128i src = _mm_loadu_si128((const __m128i*)&in[i]); \ + __m128i pred, res; \ + Average2_m128i(&tA, &tB, &pred); \ + res = _mm_sub_epi8(src, pred); \ + _mm_storeu_si128((__m128i*)&out[i], res); \ + } \ + if (i != num_pixels) { \ + VP8LPredictorsSub_C[(X)](in + i, upper + i, num_pixels - i, out + i); \ + } \ +} + +GENERATE_PREDICTOR_2(6, in[i - 1], upper[i - 1]) // Predictor6: avg(L, TL) +GENERATE_PREDICTOR_2(7, in[i - 1], upper[i]) // Predictor7: avg(L, T) +GENERATE_PREDICTOR_2(8, upper[i - 1], upper[i]) // Predictor8: avg(TL, T) +GENERATE_PREDICTOR_2(9, upper[i], upper[i + 1]) // Predictor9: average(T, TR) +#undef GENERATE_PREDICTOR_2 + +// Predictor10: avg(avg(L,TL), avg(T, TR)). +static void PredictorSub10_SSE2(const uint32_t* in, const uint32_t* upper, + int num_pixels, uint32_t* out) { + int i; + for (i = 0; i + 4 <= num_pixels; i += 4) { + const __m128i L = _mm_loadu_si128((const __m128i*)&in[i - 1]); + const __m128i src = _mm_loadu_si128((const __m128i*)&in[i]); + const __m128i TL = _mm_loadu_si128((const __m128i*)&upper[i - 1]); + const __m128i T = _mm_loadu_si128((const __m128i*)&upper[i]); + const __m128i TR = _mm_loadu_si128((const __m128i*)&upper[i + 1]); + __m128i avgTTR, avgLTL, avg, res; + Average2_m128i(&T, &TR, &avgTTR); + Average2_m128i(&L, &TL, &avgLTL); + Average2_m128i(&avgTTR, &avgLTL, &avg); + res = _mm_sub_epi8(src, avg); + _mm_storeu_si128((__m128i*)&out[i], res); + } + if (i != num_pixels) { + VP8LPredictorsSub_C[10](in + i, upper + i, num_pixels - i, out + i); + } +} + +// Predictor11: select. +static void GetSumAbsDiff32_SSE2(const __m128i* const A, const __m128i* const B, + __m128i* const out) { + // We can unpack with any value on the upper 32 bits, provided it's the same + // on both operands (to that their sum of abs diff is zero). Here we use *A. + const __m128i A_lo = _mm_unpacklo_epi32(*A, *A); + const __m128i B_lo = _mm_unpacklo_epi32(*B, *A); + const __m128i A_hi = _mm_unpackhi_epi32(*A, *A); + const __m128i B_hi = _mm_unpackhi_epi32(*B, *A); + const __m128i s_lo = _mm_sad_epu8(A_lo, B_lo); + const __m128i s_hi = _mm_sad_epu8(A_hi, B_hi); + *out = _mm_packs_epi32(s_lo, s_hi); +} + +static void PredictorSub11_SSE2(const uint32_t* in, const uint32_t* upper, + int num_pixels, uint32_t* out) { + int i; + for (i = 0; i + 4 <= num_pixels; i += 4) { + const __m128i L = _mm_loadu_si128((const __m128i*)&in[i - 1]); + const __m128i T = _mm_loadu_si128((const __m128i*)&upper[i]); + const __m128i TL = _mm_loadu_si128((const __m128i*)&upper[i - 1]); + const __m128i src = _mm_loadu_si128((const __m128i*)&in[i]); + __m128i pa, pb; + GetSumAbsDiff32_SSE2(&T, &TL, &pa); // pa = sum |T-TL| + GetSumAbsDiff32_SSE2(&L, &TL, &pb); // pb = sum |L-TL| + { + const __m128i mask = _mm_cmpgt_epi32(pb, pa); + const __m128i A = _mm_and_si128(mask, L); + const __m128i B = _mm_andnot_si128(mask, T); + const __m128i pred = _mm_or_si128(A, B); // pred = (L > T)? L : T + const __m128i res = _mm_sub_epi8(src, pred); + _mm_storeu_si128((__m128i*)&out[i], res); + } + } + if (i != num_pixels) { + VP8LPredictorsSub_C[11](in + i, upper + i, num_pixels - i, out + i); + } +} + +// Predictor12: ClampedSubSubtractFull. +static void PredictorSub12_SSE2(const uint32_t* in, const uint32_t* upper, + int num_pixels, uint32_t* out) { + int i; + const __m128i zero = _mm_setzero_si128(); + for (i = 0; i + 4 <= num_pixels; i += 4) { + const __m128i src = _mm_loadu_si128((const __m128i*)&in[i]); + const __m128i L = _mm_loadu_si128((const __m128i*)&in[i - 1]); + const __m128i L_lo = _mm_unpacklo_epi8(L, zero); + const __m128i L_hi = _mm_unpackhi_epi8(L, zero); + const __m128i T = _mm_loadu_si128((const __m128i*)&upper[i]); + const __m128i T_lo = _mm_unpacklo_epi8(T, zero); + const __m128i T_hi = _mm_unpackhi_epi8(T, zero); + const __m128i TL = _mm_loadu_si128((const __m128i*)&upper[i - 1]); + const __m128i TL_lo = _mm_unpacklo_epi8(TL, zero); + const __m128i TL_hi = _mm_unpackhi_epi8(TL, zero); + const __m128i diff_lo = _mm_sub_epi16(T_lo, TL_lo); + const __m128i diff_hi = _mm_sub_epi16(T_hi, TL_hi); + const __m128i pred_lo = _mm_add_epi16(L_lo, diff_lo); + const __m128i pred_hi = _mm_add_epi16(L_hi, diff_hi); + const __m128i pred = _mm_packus_epi16(pred_lo, pred_hi); + const __m128i res = _mm_sub_epi8(src, pred); + _mm_storeu_si128((__m128i*)&out[i], res); + } + if (i != num_pixels) { + VP8LPredictorsSub_C[12](in + i, upper + i, num_pixels - i, out + i); + } +} + +// Predictors13: ClampedAddSubtractHalf +static void PredictorSub13_SSE2(const uint32_t* in, const uint32_t* upper, + int num_pixels, uint32_t* out) { + int i; + const __m128i zero = _mm_setzero_si128(); + for (i = 0; i + 2 <= num_pixels; i += 2) { + // we can only process two pixels at a time + const __m128i L = _mm_loadl_epi64((const __m128i*)&in[i - 1]); + const __m128i src = _mm_loadl_epi64((const __m128i*)&in[i]); + const __m128i T = _mm_loadl_epi64((const __m128i*)&upper[i]); + const __m128i TL = _mm_loadl_epi64((const __m128i*)&upper[i - 1]); + const __m128i L_lo = _mm_unpacklo_epi8(L, zero); + const __m128i T_lo = _mm_unpacklo_epi8(T, zero); + const __m128i TL_lo = _mm_unpacklo_epi8(TL, zero); + const __m128i sum = _mm_add_epi16(T_lo, L_lo); + const __m128i avg = _mm_srli_epi16(sum, 1); + const __m128i A1 = _mm_sub_epi16(avg, TL_lo); + const __m128i bit_fix = _mm_cmpgt_epi16(TL_lo, avg); + const __m128i A2 = _mm_sub_epi16(A1, bit_fix); + const __m128i A3 = _mm_srai_epi16(A2, 1); + const __m128i A4 = _mm_add_epi16(avg, A3); + const __m128i pred = _mm_packus_epi16(A4, A4); + const __m128i res = _mm_sub_epi8(src, pred); + _mm_storel_epi64((__m128i*)&out[i], res); + } + if (i != num_pixels) { + VP8LPredictorsSub_C[13](in + i, upper + i, num_pixels - i, out + i); + } +} + +//------------------------------------------------------------------------------ +// Entry point + +extern void VP8LEncDspInitSSE2(void); + +WEBP_TSAN_IGNORE_FUNCTION void VP8LEncDspInitSSE2(void) { + VP8LSubtractGreenFromBlueAndRed = SubtractGreenFromBlueAndRed_SSE2; + VP8LTransformColor = TransformColor_SSE2; + VP8LCollectColorBlueTransforms = CollectColorBlueTransforms_SSE2; + VP8LCollectColorRedTransforms = CollectColorRedTransforms_SSE2; + VP8LAddVector = AddVector_SSE2; + VP8LAddVectorEq = AddVectorEq_SSE2; + // TODO(https://crbug.com/webp/499): this function produces different results + // from the C code due to use of double/float resulting in output differences + // when compared to -noasm. +#if !(defined(__i386__) || defined(_M_IX86)) + VP8LCombinedShannonEntropy = CombinedShannonEntropy_SSE2; +#endif + VP8LVectorMismatch = VectorMismatch_SSE2; + VP8LBundleColorMap = BundleColorMap_SSE2; + + VP8LPredictorsSub[0] = PredictorSub0_SSE2; + VP8LPredictorsSub[1] = PredictorSub1_SSE2; + VP8LPredictorsSub[2] = PredictorSub2_SSE2; + VP8LPredictorsSub[3] = PredictorSub3_SSE2; + VP8LPredictorsSub[4] = PredictorSub4_SSE2; + VP8LPredictorsSub[5] = PredictorSub5_SSE2; + VP8LPredictorsSub[6] = PredictorSub6_SSE2; + VP8LPredictorsSub[7] = PredictorSub7_SSE2; + VP8LPredictorsSub[8] = PredictorSub8_SSE2; + VP8LPredictorsSub[9] = PredictorSub9_SSE2; + VP8LPredictorsSub[10] = PredictorSub10_SSE2; + VP8LPredictorsSub[11] = PredictorSub11_SSE2; + VP8LPredictorsSub[12] = PredictorSub12_SSE2; + VP8LPredictorsSub[13] = PredictorSub13_SSE2; + VP8LPredictorsSub[14] = PredictorSub0_SSE2; // <- padding security sentinels + VP8LPredictorsSub[15] = PredictorSub0_SSE2; +} + +#else // !WEBP_USE_SSE2 + +WEBP_DSP_INIT_STUB(VP8LEncDspInitSSE2) + +#endif // WEBP_USE_SSE2 diff --git a/vendor/github.com/sizeofint/webpanimation/dsp_lossless_enc_sse41.c b/vendor/github.com/sizeofint/webpanimation/dsp_lossless_enc_sse41.c new file mode 100644 index 00000000..ffb515a6 --- /dev/null +++ b/vendor/github.com/sizeofint/webpanimation/dsp_lossless_enc_sse41.c @@ -0,0 +1,148 @@ +// Copyright 2015 Google Inc. All Rights Reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the COPYING file in the root of the source +// tree. An additional intellectual property rights grant can be found +// in the file PATENTS. All contributing project authors may +// be found in the AUTHORS file in the root of the source tree. +// ----------------------------------------------------------------------------- +// +// SSE4.1 variant of methods for lossless encoder +// +// Author: Skal (pascal.massimino@gmail.com) + +#include "dsp_dsp.h" + +#if defined(WEBP_USE_SSE41) +#include +#include +#include "dsp_lossless.h" + +// For sign-extended multiplying constants, pre-shifted by 5: +#define CST_5b(X) (((int16_t)((uint16_t)(X) << 8)) >> 5) + +//------------------------------------------------------------------------------ +// Subtract-Green Transform + +static void SubtractGreenFromBlueAndRed_SSE41(uint32_t* argb_data, + int num_pixels) { + int i; + const __m128i kCstShuffle = _mm_set_epi8(-1, 13, -1, 13, -1, 9, -1, 9, + -1, 5, -1, 5, -1, 1, -1, 1); + for (i = 0; i + 4 <= num_pixels; i += 4) { + const __m128i in = _mm_loadu_si128((__m128i*)&argb_data[i]); + const __m128i in_0g0g = _mm_shuffle_epi8(in, kCstShuffle); + const __m128i out = _mm_sub_epi8(in, in_0g0g); + _mm_storeu_si128((__m128i*)&argb_data[i], out); + } + // fallthrough and finish off with plain-C + if (i != num_pixels) { + VP8LSubtractGreenFromBlueAndRed_C(argb_data + i, num_pixels - i); + } +} + +//------------------------------------------------------------------------------ +// Color Transform + +#define SPAN 8 +static void CollectColorBlueTransforms_SSE41(const uint32_t* argb, int stride, + int tile_width, int tile_height, + int green_to_blue, int red_to_blue, + int histo[]) { + const __m128i mults_r = _mm_set1_epi16(CST_5b(red_to_blue)); + const __m128i mults_g = _mm_set1_epi16(CST_5b(green_to_blue)); + const __m128i mask_g = _mm_set1_epi16((short)0xff00); // green mask + const __m128i mask_gb = _mm_set1_epi32(0xffff); // green/blue mask + const __m128i mask_b = _mm_set1_epi16(0x00ff); // blue mask + const __m128i shuffler_lo = _mm_setr_epi8(-1, 2, -1, 6, -1, 10, -1, 14, -1, + -1, -1, -1, -1, -1, -1, -1); + const __m128i shuffler_hi = _mm_setr_epi8(-1, -1, -1, -1, -1, -1, -1, -1, -1, + 2, -1, 6, -1, 10, -1, 14); + int y; + for (y = 0; y < tile_height; ++y) { + const uint32_t* const src = argb + y * stride; + int i, x; + for (x = 0; x + SPAN <= tile_width; x += SPAN) { + uint16_t values[SPAN]; + const __m128i in0 = _mm_loadu_si128((__m128i*)&src[x + 0]); + const __m128i in1 = _mm_loadu_si128((__m128i*)&src[x + SPAN / 2]); + const __m128i r0 = _mm_shuffle_epi8(in0, shuffler_lo); + const __m128i r1 = _mm_shuffle_epi8(in1, shuffler_hi); + const __m128i r = _mm_or_si128(r0, r1); // r 0 + const __m128i gb0 = _mm_and_si128(in0, mask_gb); + const __m128i gb1 = _mm_and_si128(in1, mask_gb); + const __m128i gb = _mm_packus_epi32(gb0, gb1); // g b + const __m128i g = _mm_and_si128(gb, mask_g); // g 0 + const __m128i A = _mm_mulhi_epi16(r, mults_r); // x dbr + const __m128i B = _mm_mulhi_epi16(g, mults_g); // x dbg + const __m128i C = _mm_sub_epi8(gb, B); // x b' + const __m128i D = _mm_sub_epi8(C, A); // x b'' + const __m128i E = _mm_and_si128(D, mask_b); // 0 b'' + _mm_storeu_si128((__m128i*)values, E); + for (i = 0; i < SPAN; ++i) ++histo[values[i]]; + } + } + { + const int left_over = tile_width & (SPAN - 1); + if (left_over > 0) { + VP8LCollectColorBlueTransforms_C(argb + tile_width - left_over, stride, + left_over, tile_height, + green_to_blue, red_to_blue, histo); + } + } +} + +static void CollectColorRedTransforms_SSE41(const uint32_t* argb, int stride, + int tile_width, int tile_height, + int green_to_red, int histo[]) { + const __m128i mults_g = _mm_set1_epi16(CST_5b(green_to_red)); + const __m128i mask_g = _mm_set1_epi32(0x00ff00); // green mask + const __m128i mask = _mm_set1_epi16(0xff); + + int y; + for (y = 0; y < tile_height; ++y) { + const uint32_t* const src = argb + y * stride; + int i, x; + for (x = 0; x + SPAN <= tile_width; x += SPAN) { + uint16_t values[SPAN]; + const __m128i in0 = _mm_loadu_si128((__m128i*)&src[x + 0]); + const __m128i in1 = _mm_loadu_si128((__m128i*)&src[x + SPAN / 2]); + const __m128i g0 = _mm_and_si128(in0, mask_g); // 0 0 | g 0 + const __m128i g1 = _mm_and_si128(in1, mask_g); + const __m128i g = _mm_packus_epi32(g0, g1); // g 0 + const __m128i A0 = _mm_srli_epi32(in0, 16); // 0 0 | x r + const __m128i A1 = _mm_srli_epi32(in1, 16); + const __m128i A = _mm_packus_epi32(A0, A1); // x r + const __m128i B = _mm_mulhi_epi16(g, mults_g); // x dr + const __m128i C = _mm_sub_epi8(A, B); // x r' + const __m128i D = _mm_and_si128(C, mask); // 0 r' + _mm_storeu_si128((__m128i*)values, D); + for (i = 0; i < SPAN; ++i) ++histo[values[i]]; + } + } + { + const int left_over = tile_width & (SPAN - 1); + if (left_over > 0) { + VP8LCollectColorRedTransforms_C(argb + tile_width - left_over, stride, + left_over, tile_height, green_to_red, + histo); + } + } +} + +//------------------------------------------------------------------------------ +// Entry point + +extern void VP8LEncDspInitSSE41(void); + +WEBP_TSAN_IGNORE_FUNCTION void VP8LEncDspInitSSE41(void) { + VP8LSubtractGreenFromBlueAndRed = SubtractGreenFromBlueAndRed_SSE41; + VP8LCollectColorBlueTransforms = CollectColorBlueTransforms_SSE41; + VP8LCollectColorRedTransforms = CollectColorRedTransforms_SSE41; +} + +#else // !WEBP_USE_SSE41 + +WEBP_DSP_INIT_STUB(VP8LEncDspInitSSE41) + +#endif // WEBP_USE_SSE41 diff --git a/vendor/github.com/sizeofint/webpanimation/dsp_lossless_mips_dsp_r2.c b/vendor/github.com/sizeofint/webpanimation/dsp_lossless_mips_dsp_r2.c new file mode 100644 index 00000000..7fcfc60e --- /dev/null +++ b/vendor/github.com/sizeofint/webpanimation/dsp_lossless_mips_dsp_r2.c @@ -0,0 +1,696 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the COPYING file in the root of the source +// tree. An additional intellectual property rights grant can be found +// in the file PATENTS. All contributing project authors may +// be found in the AUTHORS file in the root of the source tree. +// ----------------------------------------------------------------------------- +// +// Image transforms and color space conversion methods for lossless decoder. +// +// Author(s): Djordje Pesut (djordje.pesut@imgtec.com) +// Jovan Zelincevic (jovan.zelincevic@imgtec.com) + +#include "dsp_dsp.h" + +#if defined(WEBP_USE_MIPS_DSP_R2) + +#include "dsp_lossless.h" +#include "dsp_lossless_common.h" + +#define MAP_COLOR_FUNCS(FUNC_NAME, TYPE, GET_INDEX, GET_VALUE) \ +static void FUNC_NAME(const TYPE* src, \ + const uint32_t* const color_map, \ + TYPE* dst, int y_start, int y_end, \ + int width) { \ + int y; \ + for (y = y_start; y < y_end; ++y) { \ + int x; \ + for (x = 0; x < (width >> 2); ++x) { \ + int tmp1, tmp2, tmp3, tmp4; \ + __asm__ volatile ( \ + ".ifc " #TYPE ", uint8_t \n\t" \ + "lbu %[tmp1], 0(%[src]) \n\t" \ + "lbu %[tmp2], 1(%[src]) \n\t" \ + "lbu %[tmp3], 2(%[src]) \n\t" \ + "lbu %[tmp4], 3(%[src]) \n\t" \ + "addiu %[src], %[src], 4 \n\t" \ + ".endif \n\t" \ + ".ifc " #TYPE ", uint32_t \n\t" \ + "lw %[tmp1], 0(%[src]) \n\t" \ + "lw %[tmp2], 4(%[src]) \n\t" \ + "lw %[tmp3], 8(%[src]) \n\t" \ + "lw %[tmp4], 12(%[src]) \n\t" \ + "ext %[tmp1], %[tmp1], 8, 8 \n\t" \ + "ext %[tmp2], %[tmp2], 8, 8 \n\t" \ + "ext %[tmp3], %[tmp3], 8, 8 \n\t" \ + "ext %[tmp4], %[tmp4], 8, 8 \n\t" \ + "addiu %[src], %[src], 16 \n\t" \ + ".endif \n\t" \ + "sll %[tmp1], %[tmp1], 2 \n\t" \ + "sll %[tmp2], %[tmp2], 2 \n\t" \ + "sll %[tmp3], %[tmp3], 2 \n\t" \ + "sll %[tmp4], %[tmp4], 2 \n\t" \ + "lwx %[tmp1], %[tmp1](%[color_map]) \n\t" \ + "lwx %[tmp2], %[tmp2](%[color_map]) \n\t" \ + "lwx %[tmp3], %[tmp3](%[color_map]) \n\t" \ + "lwx %[tmp4], %[tmp4](%[color_map]) \n\t" \ + ".ifc " #TYPE ", uint8_t \n\t" \ + "ext %[tmp1], %[tmp1], 8, 8 \n\t" \ + "ext %[tmp2], %[tmp2], 8, 8 \n\t" \ + "ext %[tmp3], %[tmp3], 8, 8 \n\t" \ + "ext %[tmp4], %[tmp4], 8, 8 \n\t" \ + "sb %[tmp1], 0(%[dst]) \n\t" \ + "sb %[tmp2], 1(%[dst]) \n\t" \ + "sb %[tmp3], 2(%[dst]) \n\t" \ + "sb %[tmp4], 3(%[dst]) \n\t" \ + "addiu %[dst], %[dst], 4 \n\t" \ + ".endif \n\t" \ + ".ifc " #TYPE ", uint32_t \n\t" \ + "sw %[tmp1], 0(%[dst]) \n\t" \ + "sw %[tmp2], 4(%[dst]) \n\t" \ + "sw %[tmp3], 8(%[dst]) \n\t" \ + "sw %[tmp4], 12(%[dst]) \n\t" \ + "addiu %[dst], %[dst], 16 \n\t" \ + ".endif \n\t" \ + : [tmp1]"=&r"(tmp1), [tmp2]"=&r"(tmp2), [tmp3]"=&r"(tmp3), \ + [tmp4]"=&r"(tmp4), [src]"+&r"(src), [dst]"+r"(dst) \ + : [color_map]"r"(color_map) \ + : "memory" \ + ); \ + } \ + for (x = 0; x < (width & 3); ++x) { \ + *dst++ = GET_VALUE(color_map[GET_INDEX(*src++)]); \ + } \ + } \ +} + +MAP_COLOR_FUNCS(MapARGB_MIPSdspR2, uint32_t, VP8GetARGBIndex, VP8GetARGBValue) +MAP_COLOR_FUNCS(MapAlpha_MIPSdspR2, uint8_t, VP8GetAlphaIndex, VP8GetAlphaValue) + +#undef MAP_COLOR_FUNCS + +static WEBP_INLINE uint32_t ClampedAddSubtractFull(uint32_t c0, uint32_t c1, + uint32_t c2) { + int temp0, temp1, temp2, temp3, temp4, temp5; + __asm__ volatile ( + "preceu.ph.qbr %[temp1], %[c0] \n\t" + "preceu.ph.qbl %[temp2], %[c0] \n\t" + "preceu.ph.qbr %[temp3], %[c1] \n\t" + "preceu.ph.qbl %[temp4], %[c1] \n\t" + "preceu.ph.qbr %[temp5], %[c2] \n\t" + "preceu.ph.qbl %[temp0], %[c2] \n\t" + "subq.ph %[temp3], %[temp3], %[temp5] \n\t" + "subq.ph %[temp4], %[temp4], %[temp0] \n\t" + "addq.ph %[temp1], %[temp1], %[temp3] \n\t" + "addq.ph %[temp2], %[temp2], %[temp4] \n\t" + "shll_s.ph %[temp1], %[temp1], 7 \n\t" + "shll_s.ph %[temp2], %[temp2], 7 \n\t" + "precrqu_s.qb.ph %[temp2], %[temp2], %[temp1] \n\t" + : [temp0]"=r"(temp0), [temp1]"=&r"(temp1), [temp2]"=&r"(temp2), + [temp3]"=&r"(temp3), [temp4]"=&r"(temp4), [temp5]"=&r"(temp5) + : [c0]"r"(c0), [c1]"r"(c1), [c2]"r"(c2) + : "memory" + ); + return temp2; +} + +static WEBP_INLINE uint32_t ClampedAddSubtractHalf(uint32_t c0, uint32_t c1, + uint32_t c2) { + int temp0, temp1, temp2, temp3, temp4, temp5; + __asm__ volatile ( + "adduh.qb %[temp5], %[c0], %[c1] \n\t" + "preceu.ph.qbr %[temp3], %[c2] \n\t" + "preceu.ph.qbr %[temp1], %[temp5] \n\t" + "preceu.ph.qbl %[temp2], %[temp5] \n\t" + "preceu.ph.qbl %[temp4], %[c2] \n\t" + "subq.ph %[temp3], %[temp1], %[temp3] \n\t" + "subq.ph %[temp4], %[temp2], %[temp4] \n\t" + "shrl.ph %[temp5], %[temp3], 15 \n\t" + "shrl.ph %[temp0], %[temp4], 15 \n\t" + "addq.ph %[temp3], %[temp3], %[temp5] \n\t" + "addq.ph %[temp4], %[temp0], %[temp4] \n\t" + "shra.ph %[temp3], %[temp3], 1 \n\t" + "shra.ph %[temp4], %[temp4], 1 \n\t" + "addq.ph %[temp1], %[temp1], %[temp3] \n\t" + "addq.ph %[temp2], %[temp2], %[temp4] \n\t" + "shll_s.ph %[temp1], %[temp1], 7 \n\t" + "shll_s.ph %[temp2], %[temp2], 7 \n\t" + "precrqu_s.qb.ph %[temp1], %[temp2], %[temp1] \n\t" + : [temp0]"=r"(temp0), [temp1]"=&r"(temp1), [temp2]"=&r"(temp2), + [temp3]"=&r"(temp3), [temp4]"=r"(temp4), [temp5]"=&r"(temp5) + : [c0]"r"(c0), [c1]"r"(c1), [c2]"r"(c2) + : "memory" + ); + return temp1; +} + +static WEBP_INLINE uint32_t Select(uint32_t a, uint32_t b, uint32_t c) { + int temp0, temp1, temp2, temp3, temp4, temp5; + __asm__ volatile ( + "cmpgdu.lt.qb %[temp1], %[c], %[b] \n\t" + "pick.qb %[temp1], %[b], %[c] \n\t" + "pick.qb %[temp2], %[c], %[b] \n\t" + "cmpgdu.lt.qb %[temp4], %[c], %[a] \n\t" + "pick.qb %[temp4], %[a], %[c] \n\t" + "pick.qb %[temp5], %[c], %[a] \n\t" + "subu.qb %[temp3], %[temp1], %[temp2] \n\t" + "subu.qb %[temp0], %[temp4], %[temp5] \n\t" + "raddu.w.qb %[temp3], %[temp3] \n\t" + "raddu.w.qb %[temp0], %[temp0] \n\t" + "subu %[temp3], %[temp3], %[temp0] \n\t" + "slti %[temp0], %[temp3], 0x1 \n\t" + "movz %[a], %[b], %[temp0] \n\t" + : [temp1]"=&r"(temp1), [temp2]"=&r"(temp2), [temp3]"=&r"(temp3), + [temp4]"=&r"(temp4), [temp5]"=&r"(temp5), [temp0]"=&r"(temp0), + [a]"+&r"(a) + : [b]"r"(b), [c]"r"(c) + ); + return a; +} + +static WEBP_INLINE uint32_t Average2(uint32_t a0, uint32_t a1) { + __asm__ volatile ( + "adduh.qb %[a0], %[a0], %[a1] \n\t" + : [a0]"+r"(a0) + : [a1]"r"(a1) + ); + return a0; +} + +static WEBP_INLINE uint32_t Average3(uint32_t a0, uint32_t a1, uint32_t a2) { + return Average2(Average2(a0, a2), a1); +} + +static WEBP_INLINE uint32_t Average4(uint32_t a0, uint32_t a1, + uint32_t a2, uint32_t a3) { + return Average2(Average2(a0, a1), Average2(a2, a3)); +} + +static uint32_t Predictor5_MIPSdspR2(uint32_t left, const uint32_t* const top) { + return Average3(left, top[0], top[1]); +} + +static uint32_t Predictor6_MIPSdspR2(uint32_t left, const uint32_t* const top) { + return Average2(left, top[-1]); +} + +static uint32_t Predictor7_MIPSdspR2(uint32_t left, const uint32_t* const top) { + return Average2(left, top[0]); +} + +static uint32_t Predictor8_MIPSdspR2(uint32_t left, const uint32_t* const top) { + (void)left; + return Average2(top[-1], top[0]); +} + +static uint32_t Predictor9_MIPSdspR2(uint32_t left, const uint32_t* const top) { + (void)left; + return Average2(top[0], top[1]); +} + +static uint32_t Predictor10_MIPSdspR2(uint32_t left, + const uint32_t* const top) { + return Average4(left, top[-1], top[0], top[1]); +} + +static uint32_t Predictor11_MIPSdspR2(uint32_t left, + const uint32_t* const top) { + return Select(top[0], left, top[-1]); +} + +static uint32_t Predictor12_MIPSdspR2(uint32_t left, + const uint32_t* const top) { + return ClampedAddSubtractFull(left, top[0], top[-1]); +} + +static uint32_t Predictor13_MIPSdspR2(uint32_t left, + const uint32_t* const top) { + return ClampedAddSubtractHalf(left, top[0], top[-1]); +} + +// Add green to blue and red channels (i.e. perform the inverse transform of +// 'subtract green'). +static void AddGreenToBlueAndRed_MIPSdspR2(const uint32_t* src, int num_pixels, + uint32_t* dst) { + uint32_t temp0, temp1, temp2, temp3, temp4, temp5, temp6, temp7; + const uint32_t* const p_loop1_end = src + (num_pixels & ~3); + const uint32_t* const p_loop2_end = src + num_pixels; + __asm__ volatile ( + ".set push \n\t" + ".set noreorder \n\t" + "beq %[src], %[p_loop1_end], 3f \n\t" + " nop \n\t" + "0: \n\t" + "lw %[temp0], 0(%[src]) \n\t" + "lw %[temp1], 4(%[src]) \n\t" + "lw %[temp2], 8(%[src]) \n\t" + "lw %[temp3], 12(%[src]) \n\t" + "ext %[temp4], %[temp0], 8, 8 \n\t" + "ext %[temp5], %[temp1], 8, 8 \n\t" + "ext %[temp6], %[temp2], 8, 8 \n\t" + "ext %[temp7], %[temp3], 8, 8 \n\t" + "addiu %[src], %[src], 16 \n\t" + "addiu %[dst], %[dst], 16 \n\t" + "replv.ph %[temp4], %[temp4] \n\t" + "replv.ph %[temp5], %[temp5] \n\t" + "replv.ph %[temp6], %[temp6] \n\t" + "replv.ph %[temp7], %[temp7] \n\t" + "addu.qb %[temp0], %[temp0], %[temp4] \n\t" + "addu.qb %[temp1], %[temp1], %[temp5] \n\t" + "addu.qb %[temp2], %[temp2], %[temp6] \n\t" + "addu.qb %[temp3], %[temp3], %[temp7] \n\t" + "sw %[temp0], -16(%[dst]) \n\t" + "sw %[temp1], -12(%[dst]) \n\t" + "sw %[temp2], -8(%[dst]) \n\t" + "bne %[src], %[p_loop1_end], 0b \n\t" + " sw %[temp3], -4(%[dst]) \n\t" + "3: \n\t" + "beq %[src], %[p_loop2_end], 2f \n\t" + " nop \n\t" + "1: \n\t" + "lw %[temp0], 0(%[src]) \n\t" + "addiu %[src], %[src], 4 \n\t" + "addiu %[dst], %[dst], 4 \n\t" + "ext %[temp4], %[temp0], 8, 8 \n\t" + "replv.ph %[temp4], %[temp4] \n\t" + "addu.qb %[temp0], %[temp0], %[temp4] \n\t" + "bne %[src], %[p_loop2_end], 1b \n\t" + " sw %[temp0], -4(%[dst]) \n\t" + "2: \n\t" + ".set pop \n\t" + : [dst]"+&r"(dst), [src]"+&r"(src), [temp0]"=&r"(temp0), + [temp1]"=&r"(temp1), [temp2]"=&r"(temp2), [temp3]"=&r"(temp3), + [temp4]"=&r"(temp4), [temp5]"=&r"(temp5), [temp6]"=&r"(temp6), + [temp7]"=&r"(temp7) + : [p_loop1_end]"r"(p_loop1_end), [p_loop2_end]"r"(p_loop2_end) + : "memory" + ); +} + +static void TransformColorInverse_MIPSdspR2(const VP8LMultipliers* const m, + const uint32_t* src, int num_pixels, + uint32_t* dst) { + int temp0, temp1, temp2, temp3, temp4, temp5; + uint32_t argb, argb1, new_red; + const uint32_t G_to_R = m->green_to_red_; + const uint32_t G_to_B = m->green_to_blue_; + const uint32_t R_to_B = m->red_to_blue_; + const uint32_t* const p_loop_end = src + (num_pixels & ~1); + __asm__ volatile ( + ".set push \n\t" + ".set noreorder \n\t" + "beq %[src], %[p_loop_end], 1f \n\t" + " nop \n\t" + "replv.ph %[temp0], %[G_to_R] \n\t" + "replv.ph %[temp1], %[G_to_B] \n\t" + "replv.ph %[temp2], %[R_to_B] \n\t" + "shll.ph %[temp0], %[temp0], 8 \n\t" + "shll.ph %[temp1], %[temp1], 8 \n\t" + "shll.ph %[temp2], %[temp2], 8 \n\t" + "shra.ph %[temp0], %[temp0], 8 \n\t" + "shra.ph %[temp1], %[temp1], 8 \n\t" + "shra.ph %[temp2], %[temp2], 8 \n\t" + "0: \n\t" + "lw %[argb], 0(%[src]) \n\t" + "lw %[argb1], 4(%[src]) \n\t" + "sw %[argb], 0(%[dst]) \n\t" + "sw %[argb1], 4(%[dst]) \n\t" + "addiu %[src], %[src], 8 \n\t" + "addiu %[dst], %[dst], 8 \n\t" + "precrq.qb.ph %[temp3], %[argb], %[argb1] \n\t" + "preceu.ph.qbra %[temp3], %[temp3] \n\t" + "shll.ph %[temp3], %[temp3], 8 \n\t" + "shra.ph %[temp3], %[temp3], 8 \n\t" + "mul.ph %[temp5], %[temp3], %[temp0] \n\t" + "mul.ph %[temp3], %[temp3], %[temp1] \n\t" + "precrq.ph.w %[new_red], %[argb], %[argb1] \n\t" + "ins %[argb1], %[argb], 16, 16 \n\t" + "shra.ph %[temp5], %[temp5], 5 \n\t" + "shra.ph %[temp3], %[temp3], 5 \n\t" + "addu.ph %[new_red], %[new_red], %[temp5] \n\t" + "addu.ph %[argb1], %[argb1], %[temp3] \n\t" + "preceu.ph.qbra %[temp5], %[new_red] \n\t" + "shll.ph %[temp4], %[temp5], 8 \n\t" + "shra.ph %[temp4], %[temp4], 8 \n\t" + "mul.ph %[temp4], %[temp4], %[temp2] \n\t" + "sb %[temp5], -2(%[dst]) \n\t" + "sra %[temp5], %[temp5], 16 \n\t" + "shra.ph %[temp4], %[temp4], 5 \n\t" + "addu.ph %[argb1], %[argb1], %[temp4] \n\t" + "preceu.ph.qbra %[temp3], %[argb1] \n\t" + "sb %[temp5], -6(%[dst]) \n\t" + "sb %[temp3], -4(%[dst]) \n\t" + "sra %[temp3], %[temp3], 16 \n\t" + "bne %[src], %[p_loop_end], 0b \n\t" + " sb %[temp3], -8(%[dst]) \n\t" + "1: \n\t" + ".set pop \n\t" + : [temp0]"=&r"(temp0), [temp1]"=&r"(temp1), [temp2]"=&r"(temp2), + [temp3]"=&r"(temp3), [temp4]"=&r"(temp4), [temp5]"=&r"(temp5), + [new_red]"=&r"(new_red), [argb]"=&r"(argb), + [argb1]"=&r"(argb1), [dst]"+&r"(dst), [src]"+&r"(src) + : [G_to_R]"r"(G_to_R), [R_to_B]"r"(R_to_B), + [G_to_B]"r"(G_to_B), [p_loop_end]"r"(p_loop_end) + : "memory", "hi", "lo" + ); + + // Fall-back to C-version for left-overs. + if (num_pixels & 1) VP8LTransformColorInverse_C(m, src, 1, dst); +} + +static void ConvertBGRAToRGB_MIPSdspR2(const uint32_t* src, + int num_pixels, uint8_t* dst) { + int temp0, temp1, temp2, temp3; + const uint32_t* const p_loop1_end = src + (num_pixels & ~3); + const uint32_t* const p_loop2_end = src + num_pixels; + __asm__ volatile ( + ".set push \n\t" + ".set noreorder \n\t" + "beq %[src], %[p_loop1_end], 3f \n\t" + " nop \n\t" + "0: \n\t" + "lw %[temp3], 12(%[src]) \n\t" + "lw %[temp2], 8(%[src]) \n\t" + "lw %[temp1], 4(%[src]) \n\t" + "lw %[temp0], 0(%[src]) \n\t" + "ins %[temp3], %[temp2], 24, 8 \n\t" + "sll %[temp2], %[temp2], 8 \n\t" + "rotr %[temp3], %[temp3], 16 \n\t" + "ins %[temp2], %[temp1], 0, 16 \n\t" + "sll %[temp1], %[temp1], 8 \n\t" + "wsbh %[temp3], %[temp3] \n\t" + "balign %[temp0], %[temp1], 1 \n\t" + "wsbh %[temp2], %[temp2] \n\t" + "wsbh %[temp0], %[temp0] \n\t" + "usw %[temp3], 8(%[dst]) \n\t" + "rotr %[temp0], %[temp0], 16 \n\t" + "usw %[temp2], 4(%[dst]) \n\t" + "addiu %[src], %[src], 16 \n\t" + "usw %[temp0], 0(%[dst]) \n\t" + "bne %[src], %[p_loop1_end], 0b \n\t" + " addiu %[dst], %[dst], 12 \n\t" + "3: \n\t" + "beq %[src], %[p_loop2_end], 2f \n\t" + " nop \n\t" + "1: \n\t" + "lw %[temp0], 0(%[src]) \n\t" + "addiu %[src], %[src], 4 \n\t" + "wsbh %[temp1], %[temp0] \n\t" + "addiu %[dst], %[dst], 3 \n\t" + "ush %[temp1], -2(%[dst]) \n\t" + "sra %[temp0], %[temp0], 16 \n\t" + "bne %[src], %[p_loop2_end], 1b \n\t" + " sb %[temp0], -3(%[dst]) \n\t" + "2: \n\t" + ".set pop \n\t" + : [temp0]"=&r"(temp0), [temp1]"=&r"(temp1), [temp2]"=&r"(temp2), + [temp3]"=&r"(temp3), [dst]"+&r"(dst), [src]"+&r"(src) + : [p_loop1_end]"r"(p_loop1_end), [p_loop2_end]"r"(p_loop2_end) + : "memory" + ); +} + +static void ConvertBGRAToRGBA_MIPSdspR2(const uint32_t* src, + int num_pixels, uint8_t* dst) { + int temp0, temp1, temp2, temp3; + const uint32_t* const p_loop1_end = src + (num_pixels & ~3); + const uint32_t* const p_loop2_end = src + num_pixels; + __asm__ volatile ( + ".set push \n\t" + ".set noreorder \n\t" + "beq %[src], %[p_loop1_end], 3f \n\t" + " nop \n\t" + "0: \n\t" + "lw %[temp0], 0(%[src]) \n\t" + "lw %[temp1], 4(%[src]) \n\t" + "lw %[temp2], 8(%[src]) \n\t" + "lw %[temp3], 12(%[src]) \n\t" + "wsbh %[temp0], %[temp0] \n\t" + "wsbh %[temp1], %[temp1] \n\t" + "wsbh %[temp2], %[temp2] \n\t" + "wsbh %[temp3], %[temp3] \n\t" + "addiu %[src], %[src], 16 \n\t" + "balign %[temp0], %[temp0], 1 \n\t" + "balign %[temp1], %[temp1], 1 \n\t" + "balign %[temp2], %[temp2], 1 \n\t" + "balign %[temp3], %[temp3], 1 \n\t" + "usw %[temp0], 0(%[dst]) \n\t" + "usw %[temp1], 4(%[dst]) \n\t" + "usw %[temp2], 8(%[dst]) \n\t" + "usw %[temp3], 12(%[dst]) \n\t" + "bne %[src], %[p_loop1_end], 0b \n\t" + " addiu %[dst], %[dst], 16 \n\t" + "3: \n\t" + "beq %[src], %[p_loop2_end], 2f \n\t" + " nop \n\t" + "1: \n\t" + "lw %[temp0], 0(%[src]) \n\t" + "wsbh %[temp0], %[temp0] \n\t" + "addiu %[src], %[src], 4 \n\t" + "balign %[temp0], %[temp0], 1 \n\t" + "usw %[temp0], 0(%[dst]) \n\t" + "bne %[src], %[p_loop2_end], 1b \n\t" + " addiu %[dst], %[dst], 4 \n\t" + "2: \n\t" + ".set pop \n\t" + : [temp0]"=&r"(temp0), [temp1]"=&r"(temp1), [temp2]"=&r"(temp2), + [temp3]"=&r"(temp3), [dst]"+&r"(dst), [src]"+&r"(src) + : [p_loop1_end]"r"(p_loop1_end), [p_loop2_end]"r"(p_loop2_end) + : "memory" + ); +} + +static void ConvertBGRAToRGBA4444_MIPSdspR2(const uint32_t* src, + int num_pixels, uint8_t* dst) { + int temp0, temp1, temp2, temp3, temp4, temp5; + const uint32_t* const p_loop1_end = src + (num_pixels & ~3); + const uint32_t* const p_loop2_end = src + num_pixels; + __asm__ volatile ( + ".set push \n\t" + ".set noreorder \n\t" + "beq %[src], %[p_loop1_end], 3f \n\t" + " nop \n\t" + "0: \n\t" + "lw %[temp0], 0(%[src]) \n\t" + "lw %[temp1], 4(%[src]) \n\t" + "lw %[temp2], 8(%[src]) \n\t" + "lw %[temp3], 12(%[src]) \n\t" + "ext %[temp4], %[temp0], 28, 4 \n\t" + "ext %[temp5], %[temp0], 12, 4 \n\t" + "ins %[temp0], %[temp4], 0, 4 \n\t" + "ext %[temp4], %[temp1], 28, 4 \n\t" + "ins %[temp0], %[temp5], 16, 4 \n\t" + "ext %[temp5], %[temp1], 12, 4 \n\t" + "ins %[temp1], %[temp4], 0, 4 \n\t" + "ext %[temp4], %[temp2], 28, 4 \n\t" + "ins %[temp1], %[temp5], 16, 4 \n\t" + "ext %[temp5], %[temp2], 12, 4 \n\t" + "ins %[temp2], %[temp4], 0, 4 \n\t" + "ext %[temp4], %[temp3], 28, 4 \n\t" + "ins %[temp2], %[temp5], 16, 4 \n\t" + "ext %[temp5], %[temp3], 12, 4 \n\t" + "ins %[temp3], %[temp4], 0, 4 \n\t" + "precr.qb.ph %[temp1], %[temp1], %[temp0] \n\t" + "ins %[temp3], %[temp5], 16, 4 \n\t" + "addiu %[src], %[src], 16 \n\t" + "precr.qb.ph %[temp3], %[temp3], %[temp2] \n\t" +#if (WEBP_SWAP_16BIT_CSP == 1) + "usw %[temp1], 0(%[dst]) \n\t" + "usw %[temp3], 4(%[dst]) \n\t" +#else + "wsbh %[temp1], %[temp1] \n\t" + "wsbh %[temp3], %[temp3] \n\t" + "usw %[temp1], 0(%[dst]) \n\t" + "usw %[temp3], 4(%[dst]) \n\t" +#endif + "bne %[src], %[p_loop1_end], 0b \n\t" + " addiu %[dst], %[dst], 8 \n\t" + "3: \n\t" + "beq %[src], %[p_loop2_end], 2f \n\t" + " nop \n\t" + "1: \n\t" + "lw %[temp0], 0(%[src]) \n\t" + "ext %[temp4], %[temp0], 28, 4 \n\t" + "ext %[temp5], %[temp0], 12, 4 \n\t" + "ins %[temp0], %[temp4], 0, 4 \n\t" + "ins %[temp0], %[temp5], 16, 4 \n\t" + "addiu %[src], %[src], 4 \n\t" + "precr.qb.ph %[temp0], %[temp0], %[temp0] \n\t" +#if (WEBP_SWAP_16BIT_CSP == 1) + "ush %[temp0], 0(%[dst]) \n\t" +#else + "wsbh %[temp0], %[temp0] \n\t" + "ush %[temp0], 0(%[dst]) \n\t" +#endif + "bne %[src], %[p_loop2_end], 1b \n\t" + " addiu %[dst], %[dst], 2 \n\t" + "2: \n\t" + ".set pop \n\t" + : [temp0]"=&r"(temp0), [temp1]"=&r"(temp1), [temp2]"=&r"(temp2), + [temp3]"=&r"(temp3), [temp4]"=&r"(temp4), [temp5]"=&r"(temp5), + [dst]"+&r"(dst), [src]"+&r"(src) + : [p_loop1_end]"r"(p_loop1_end), [p_loop2_end]"r"(p_loop2_end) + : "memory" + ); +} + +static void ConvertBGRAToRGB565_MIPSdspR2(const uint32_t* src, + int num_pixels, uint8_t* dst) { + int temp0, temp1, temp2, temp3, temp4, temp5; + const uint32_t* const p_loop1_end = src + (num_pixels & ~3); + const uint32_t* const p_loop2_end = src + num_pixels; + __asm__ volatile ( + ".set push \n\t" + ".set noreorder \n\t" + "beq %[src], %[p_loop1_end], 3f \n\t" + " nop \n\t" + "0: \n\t" + "lw %[temp0], 0(%[src]) \n\t" + "lw %[temp1], 4(%[src]) \n\t" + "lw %[temp2], 8(%[src]) \n\t" + "lw %[temp3], 12(%[src]) \n\t" + "ext %[temp4], %[temp0], 8, 16 \n\t" + "ext %[temp5], %[temp0], 5, 11 \n\t" + "ext %[temp0], %[temp0], 3, 5 \n\t" + "ins %[temp4], %[temp5], 0, 11 \n\t" + "ext %[temp5], %[temp1], 5, 11 \n\t" + "ins %[temp4], %[temp0], 0, 5 \n\t" + "ext %[temp0], %[temp1], 8, 16 \n\t" + "ext %[temp1], %[temp1], 3, 5 \n\t" + "ins %[temp0], %[temp5], 0, 11 \n\t" + "ext %[temp5], %[temp2], 5, 11 \n\t" + "ins %[temp0], %[temp1], 0, 5 \n\t" + "ext %[temp1], %[temp2], 8, 16 \n\t" + "ext %[temp2], %[temp2], 3, 5 \n\t" + "ins %[temp1], %[temp5], 0, 11 \n\t" + "ext %[temp5], %[temp3], 5, 11 \n\t" + "ins %[temp1], %[temp2], 0, 5 \n\t" + "ext %[temp2], %[temp3], 8, 16 \n\t" + "ext %[temp3], %[temp3], 3, 5 \n\t" + "ins %[temp2], %[temp5], 0, 11 \n\t" + "append %[temp0], %[temp4], 16 \n\t" + "ins %[temp2], %[temp3], 0, 5 \n\t" + "addiu %[src], %[src], 16 \n\t" + "append %[temp2], %[temp1], 16 \n\t" +#if (WEBP_SWAP_16BIT_CSP == 1) + "usw %[temp0], 0(%[dst]) \n\t" + "usw %[temp2], 4(%[dst]) \n\t" +#else + "wsbh %[temp0], %[temp0] \n\t" + "wsbh %[temp2], %[temp2] \n\t" + "usw %[temp0], 0(%[dst]) \n\t" + "usw %[temp2], 4(%[dst]) \n\t" +#endif + "bne %[src], %[p_loop1_end], 0b \n\t" + " addiu %[dst], %[dst], 8 \n\t" + "3: \n\t" + "beq %[src], %[p_loop2_end], 2f \n\t" + " nop \n\t" + "1: \n\t" + "lw %[temp0], 0(%[src]) \n\t" + "ext %[temp4], %[temp0], 8, 16 \n\t" + "ext %[temp5], %[temp0], 5, 11 \n\t" + "ext %[temp0], %[temp0], 3, 5 \n\t" + "ins %[temp4], %[temp5], 0, 11 \n\t" + "addiu %[src], %[src], 4 \n\t" + "ins %[temp4], %[temp0], 0, 5 \n\t" +#if (WEBP_SWAP_16BIT_CSP == 1) + "ush %[temp4], 0(%[dst]) \n\t" +#else + "wsbh %[temp4], %[temp4] \n\t" + "ush %[temp4], 0(%[dst]) \n\t" +#endif + "bne %[src], %[p_loop2_end], 1b \n\t" + " addiu %[dst], %[dst], 2 \n\t" + "2: \n\t" + ".set pop \n\t" + : [temp0]"=&r"(temp0), [temp1]"=&r"(temp1), [temp2]"=&r"(temp2), + [temp3]"=&r"(temp3), [temp4]"=&r"(temp4), [temp5]"=&r"(temp5), + [dst]"+&r"(dst), [src]"+&r"(src) + : [p_loop1_end]"r"(p_loop1_end), [p_loop2_end]"r"(p_loop2_end) + : "memory" + ); +} + +static void ConvertBGRAToBGR_MIPSdspR2(const uint32_t* src, + int num_pixels, uint8_t* dst) { + int temp0, temp1, temp2, temp3; + const uint32_t* const p_loop1_end = src + (num_pixels & ~3); + const uint32_t* const p_loop2_end = src + num_pixels; + __asm__ volatile ( + ".set push \n\t" + ".set noreorder \n\t" + "beq %[src], %[p_loop1_end], 3f \n\t" + " nop \n\t" + "0: \n\t" + "lw %[temp0], 0(%[src]) \n\t" + "lw %[temp1], 4(%[src]) \n\t" + "lw %[temp2], 8(%[src]) \n\t" + "lw %[temp3], 12(%[src]) \n\t" + "ins %[temp0], %[temp1], 24, 8 \n\t" + "sra %[temp1], %[temp1], 8 \n\t" + "ins %[temp1], %[temp2], 16, 16 \n\t" + "sll %[temp2], %[temp2], 8 \n\t" + "balign %[temp3], %[temp2], 1 \n\t" + "addiu %[src], %[src], 16 \n\t" + "usw %[temp0], 0(%[dst]) \n\t" + "usw %[temp1], 4(%[dst]) \n\t" + "usw %[temp3], 8(%[dst]) \n\t" + "bne %[src], %[p_loop1_end], 0b \n\t" + " addiu %[dst], %[dst], 12 \n\t" + "3: \n\t" + "beq %[src], %[p_loop2_end], 2f \n\t" + " nop \n\t" + "1: \n\t" + "lw %[temp0], 0(%[src]) \n\t" + "addiu %[src], %[src], 4 \n\t" + "addiu %[dst], %[dst], 3 \n\t" + "ush %[temp0], -3(%[dst]) \n\t" + "sra %[temp0], %[temp0], 16 \n\t" + "bne %[src], %[p_loop2_end], 1b \n\t" + " sb %[temp0], -1(%[dst]) \n\t" + "2: \n\t" + ".set pop \n\t" + : [temp0]"=&r"(temp0), [temp1]"=&r"(temp1), [temp2]"=&r"(temp2), + [temp3]"=&r"(temp3), [dst]"+&r"(dst), [src]"+&r"(src) + : [p_loop1_end]"r"(p_loop1_end), [p_loop2_end]"r"(p_loop2_end) + : "memory" + ); +} + +//------------------------------------------------------------------------------ +// Entry point + +extern void VP8LDspInitMIPSdspR2(void); + +WEBP_TSAN_IGNORE_FUNCTION void VP8LDspInitMIPSdspR2(void) { + VP8LMapColor32b = MapARGB_MIPSdspR2; + VP8LMapColor8b = MapAlpha_MIPSdspR2; + + VP8LPredictors[5] = Predictor5_MIPSdspR2; + VP8LPredictors[6] = Predictor6_MIPSdspR2; + VP8LPredictors[7] = Predictor7_MIPSdspR2; + VP8LPredictors[8] = Predictor8_MIPSdspR2; + VP8LPredictors[9] = Predictor9_MIPSdspR2; + VP8LPredictors[10] = Predictor10_MIPSdspR2; + VP8LPredictors[11] = Predictor11_MIPSdspR2; + VP8LPredictors[12] = Predictor12_MIPSdspR2; + VP8LPredictors[13] = Predictor13_MIPSdspR2; + + VP8LAddGreenToBlueAndRed = AddGreenToBlueAndRed_MIPSdspR2; + VP8LTransformColorInverse = TransformColorInverse_MIPSdspR2; + + VP8LConvertBGRAToRGB = ConvertBGRAToRGB_MIPSdspR2; + VP8LConvertBGRAToRGBA = ConvertBGRAToRGBA_MIPSdspR2; + VP8LConvertBGRAToRGBA4444 = ConvertBGRAToRGBA4444_MIPSdspR2; + VP8LConvertBGRAToRGB565 = ConvertBGRAToRGB565_MIPSdspR2; + VP8LConvertBGRAToBGR = ConvertBGRAToBGR_MIPSdspR2; +} + +#else // !WEBP_USE_MIPS_DSP_R2 + +WEBP_DSP_INIT_STUB(VP8LDspInitMIPSdspR2) + +#endif // WEBP_USE_MIPS_DSP_R2 diff --git a/vendor/github.com/sizeofint/webpanimation/dsp_lossless_msa.c b/vendor/github.com/sizeofint/webpanimation/dsp_lossless_msa.c new file mode 100644 index 00000000..d16e7d28 --- /dev/null +++ b/vendor/github.com/sizeofint/webpanimation/dsp_lossless_msa.c @@ -0,0 +1,356 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the COPYING file in the root of the source +// tree. An additional intellectual property rights grant can be found +// in the file PATENTS. All contributing project authors may +// be found in the AUTHORS file in the root of the source tree. +// ----------------------------------------------------------------------------- +// +// MSA variant of methods for lossless decoder +// +// Author: Prashant Patil (prashant.patil@imgtec.com) + +#include "dsp_dsp.h" + +#if defined(WEBP_USE_MSA) + +#include "dsp_lossless.h" +#include "dsp_msa_macro.h" + +//------------------------------------------------------------------------------ +// Colorspace conversion functions + +#define CONVERT16_BGRA_XXX(psrc, pdst, m0, m1, m2) do { \ + v16u8 src0, src1, src2, src3, dst0, dst1, dst2; \ + LD_UB4(psrc, 16, src0, src1, src2, src3); \ + VSHF_B2_UB(src0, src1, src1, src2, m0, m1, dst0, dst1); \ + dst2 = VSHF_UB(src2, src3, m2); \ + ST_UB2(dst0, dst1, pdst, 16); \ + ST_UB(dst2, pdst + 32); \ +} while (0) + +#define CONVERT12_BGRA_XXX(psrc, pdst, m0, m1, m2) do { \ + uint32_t pix_w; \ + v16u8 src0, src1, src2, dst0, dst1, dst2; \ + LD_UB3(psrc, 16, src0, src1, src2); \ + VSHF_B2_UB(src0, src1, src1, src2, m0, m1, dst0, dst1); \ + dst2 = VSHF_UB(src2, src2, m2); \ + ST_UB2(dst0, dst1, pdst, 16); \ + pix_w = __msa_copy_s_w((v4i32)dst2, 0); \ + SW(pix_w, pdst + 32); \ +} while (0) + +#define CONVERT8_BGRA_XXX(psrc, pdst, m0, m1) do { \ + uint64_t pix_d; \ + v16u8 src0, src1, src2 = { 0 }, dst0, dst1; \ + LD_UB2(psrc, 16, src0, src1); \ + VSHF_B2_UB(src0, src1, src1, src2, m0, m1, dst0, dst1); \ + ST_UB(dst0, pdst); \ + pix_d = __msa_copy_s_d((v2i64)dst1, 0); \ + SD(pix_d, pdst + 16); \ +} while (0) + +#define CONVERT4_BGRA_XXX(psrc, pdst, m) do { \ + const v16u8 src0 = LD_UB(psrc); \ + const v16u8 dst0 = VSHF_UB(src0, src0, m); \ + uint64_t pix_d = __msa_copy_s_d((v2i64)dst0, 0); \ + uint32_t pix_w = __msa_copy_s_w((v4i32)dst0, 2); \ + SD(pix_d, pdst + 0); \ + SW(pix_w, pdst + 8); \ +} while (0) + +#define CONVERT1_BGRA_BGR(psrc, pdst) do { \ + const int32_t b = (psrc)[0]; \ + const int32_t g = (psrc)[1]; \ + const int32_t r = (psrc)[2]; \ + (pdst)[0] = b; \ + (pdst)[1] = g; \ + (pdst)[2] = r; \ +} while (0) + +#define CONVERT1_BGRA_RGB(psrc, pdst) do { \ + const int32_t b = (psrc)[0]; \ + const int32_t g = (psrc)[1]; \ + const int32_t r = (psrc)[2]; \ + (pdst)[0] = r; \ + (pdst)[1] = g; \ + (pdst)[2] = b; \ +} while (0) + +#define TRANSFORM_COLOR_INVERSE_8(src0, src1, dst0, dst1, \ + c0, c1, mask0, mask1) do { \ + v8i16 g0, g1, t0, t1, t2, t3; \ + v4i32 t4, t5; \ + VSHF_B2_SH(src0, src0, src1, src1, mask0, mask0, g0, g1); \ + DOTP_SB2_SH(g0, g1, c0, c0, t0, t1); \ + SRAI_H2_SH(t0, t1, 5); \ + t0 = __msa_addv_h(t0, (v8i16)src0); \ + t1 = __msa_addv_h(t1, (v8i16)src1); \ + t4 = __msa_srli_w((v4i32)t0, 16); \ + t5 = __msa_srli_w((v4i32)t1, 16); \ + DOTP_SB2_SH(t4, t5, c1, c1, t2, t3); \ + SRAI_H2_SH(t2, t3, 5); \ + ADD2(t0, t2, t1, t3, t0, t1); \ + VSHF_B2_UB(src0, t0, src1, t1, mask1, mask1, dst0, dst1); \ +} while (0) + +#define TRANSFORM_COLOR_INVERSE_4(src, dst, c0, c1, mask0, mask1) do { \ + const v16i8 g0 = VSHF_SB(src, src, mask0); \ + v8i16 t0 = __msa_dotp_s_h(c0, g0); \ + v8i16 t1; \ + v4i32 t2; \ + t0 = SRAI_H(t0, 5); \ + t0 = __msa_addv_h(t0, (v8i16)src); \ + t2 = __msa_srli_w((v4i32)t0, 16); \ + t1 = __msa_dotp_s_h(c1, (v16i8)t2); \ + t1 = SRAI_H(t1, 5); \ + t0 = t0 + t1; \ + dst = VSHF_UB(src, t0, mask1); \ +} while (0) + +static void ConvertBGRAToRGBA_MSA(const uint32_t* src, + int num_pixels, uint8_t* dst) { + int i; + const uint8_t* ptemp_src = (const uint8_t*)src; + uint8_t* ptemp_dst = (uint8_t*)dst; + v16u8 src0, dst0; + const v16u8 mask = { 2, 1, 0, 3, 6, 5, 4, 7, 10, 9, 8, 11, 14, 13, 12, 15 }; + + while (num_pixels >= 8) { + v16u8 src1, dst1; + LD_UB2(ptemp_src, 16, src0, src1); + VSHF_B2_UB(src0, src0, src1, src1, mask, mask, dst0, dst1); + ST_UB2(dst0, dst1, ptemp_dst, 16); + ptemp_src += 32; + ptemp_dst += 32; + num_pixels -= 8; + } + if (num_pixels > 0) { + if (num_pixels >= 4) { + src0 = LD_UB(ptemp_src); + dst0 = VSHF_UB(src0, src0, mask); + ST_UB(dst0, ptemp_dst); + ptemp_src += 16; + ptemp_dst += 16; + num_pixels -= 4; + } + for (i = 0; i < num_pixels; i++) { + const uint8_t b = ptemp_src[2]; + const uint8_t g = ptemp_src[1]; + const uint8_t r = ptemp_src[0]; + const uint8_t a = ptemp_src[3]; + ptemp_dst[0] = b; + ptemp_dst[1] = g; + ptemp_dst[2] = r; + ptemp_dst[3] = a; + ptemp_src += 4; + ptemp_dst += 4; + } + } +} + +static void ConvertBGRAToBGR_MSA(const uint32_t* src, + int num_pixels, uint8_t* dst) { + const uint8_t* ptemp_src = (const uint8_t*)src; + uint8_t* ptemp_dst = (uint8_t*)dst; + const v16u8 mask0 = { 0, 1, 2, 4, 5, 6, 8, 9, 10, 12, 13, 14, + 16, 17, 18, 20 }; + const v16u8 mask1 = { 5, 6, 8, 9, 10, 12, 13, 14, 16, 17, 18, 20, + 21, 22, 24, 25 }; + const v16u8 mask2 = { 10, 12, 13, 14, 16, 17, 18, 20, 21, 22, 24, 25, + 26, 28, 29, 30 }; + + while (num_pixels >= 16) { + CONVERT16_BGRA_XXX(ptemp_src, ptemp_dst, mask0, mask1, mask2); + ptemp_src += 64; + ptemp_dst += 48; + num_pixels -= 16; + } + if (num_pixels > 0) { + if (num_pixels >= 12) { + CONVERT12_BGRA_XXX(ptemp_src, ptemp_dst, mask0, mask1, mask2); + ptemp_src += 48; + ptemp_dst += 36; + num_pixels -= 12; + } else if (num_pixels >= 8) { + CONVERT8_BGRA_XXX(ptemp_src, ptemp_dst, mask0, mask1); + ptemp_src += 32; + ptemp_dst += 24; + num_pixels -= 8; + } else if (num_pixels >= 4) { + CONVERT4_BGRA_XXX(ptemp_src, ptemp_dst, mask0); + ptemp_src += 16; + ptemp_dst += 12; + num_pixels -= 4; + } + if (num_pixels == 3) { + CONVERT1_BGRA_BGR(ptemp_src + 0, ptemp_dst + 0); + CONVERT1_BGRA_BGR(ptemp_src + 4, ptemp_dst + 3); + CONVERT1_BGRA_BGR(ptemp_src + 8, ptemp_dst + 6); + } else if (num_pixels == 2) { + CONVERT1_BGRA_BGR(ptemp_src + 0, ptemp_dst + 0); + CONVERT1_BGRA_BGR(ptemp_src + 4, ptemp_dst + 3); + } else if (num_pixels == 1) { + CONVERT1_BGRA_BGR(ptemp_src, ptemp_dst); + } + } +} + +static void ConvertBGRAToRGB_MSA(const uint32_t* src, + int num_pixels, uint8_t* dst) { + const uint8_t* ptemp_src = (const uint8_t*)src; + uint8_t* ptemp_dst = (uint8_t*)dst; + const v16u8 mask0 = { 2, 1, 0, 6, 5, 4, 10, 9, 8, 14, 13, 12, + 18, 17, 16, 22 }; + const v16u8 mask1 = { 5, 4, 10, 9, 8, 14, 13, 12, 18, 17, 16, 22, + 21, 20, 26, 25 }; + const v16u8 mask2 = { 8, 14, 13, 12, 18, 17, 16, 22, 21, 20, 26, 25, + 24, 30, 29, 28 }; + + while (num_pixels >= 16) { + CONVERT16_BGRA_XXX(ptemp_src, ptemp_dst, mask0, mask1, mask2); + ptemp_src += 64; + ptemp_dst += 48; + num_pixels -= 16; + } + if (num_pixels) { + if (num_pixels >= 12) { + CONVERT12_BGRA_XXX(ptemp_src, ptemp_dst, mask0, mask1, mask2); + ptemp_src += 48; + ptemp_dst += 36; + num_pixels -= 12; + } else if (num_pixels >= 8) { + CONVERT8_BGRA_XXX(ptemp_src, ptemp_dst, mask0, mask1); + ptemp_src += 32; + ptemp_dst += 24; + num_pixels -= 8; + } else if (num_pixels >= 4) { + CONVERT4_BGRA_XXX(ptemp_src, ptemp_dst, mask0); + ptemp_src += 16; + ptemp_dst += 12; + num_pixels -= 4; + } + if (num_pixels == 3) { + CONVERT1_BGRA_RGB(ptemp_src + 0, ptemp_dst + 0); + CONVERT1_BGRA_RGB(ptemp_src + 4, ptemp_dst + 3); + CONVERT1_BGRA_RGB(ptemp_src + 8, ptemp_dst + 6); + } else if (num_pixels == 2) { + CONVERT1_BGRA_RGB(ptemp_src + 0, ptemp_dst + 0); + CONVERT1_BGRA_RGB(ptemp_src + 4, ptemp_dst + 3); + } else if (num_pixels == 1) { + CONVERT1_BGRA_RGB(ptemp_src, ptemp_dst); + } + } +} + +static void AddGreenToBlueAndRed_MSA(const uint32_t* const src, int num_pixels, + uint32_t* dst) { + int i; + const uint8_t* in = (const uint8_t*)src; + uint8_t* out = (uint8_t*)dst; + v16u8 src0, dst0, tmp0; + const v16u8 mask = { 1, 255, 1, 255, 5, 255, 5, 255, 9, 255, 9, 255, + 13, 255, 13, 255 }; + + while (num_pixels >= 8) { + v16u8 src1, dst1, tmp1; + LD_UB2(in, 16, src0, src1); + VSHF_B2_UB(src0, src1, src1, src0, mask, mask, tmp0, tmp1); + ADD2(src0, tmp0, src1, tmp1, dst0, dst1); + ST_UB2(dst0, dst1, out, 16); + in += 32; + out += 32; + num_pixels -= 8; + } + if (num_pixels > 0) { + if (num_pixels >= 4) { + src0 = LD_UB(in); + tmp0 = VSHF_UB(src0, src0, mask); + dst0 = src0 + tmp0; + ST_UB(dst0, out); + in += 16; + out += 16; + num_pixels -= 4; + } + for (i = 0; i < num_pixels; i++) { + const uint8_t b = in[0]; + const uint8_t g = in[1]; + const uint8_t r = in[2]; + out[0] = (b + g) & 0xff; + out[1] = g; + out[2] = (r + g) & 0xff; + out[4] = in[4]; + out += 4; + } + } +} + +static void TransformColorInverse_MSA(const VP8LMultipliers* const m, + const uint32_t* src, int num_pixels, + uint32_t* dst) { + v16u8 src0, dst0; + const v16i8 g2br = (v16i8)__msa_fill_w(m->green_to_blue_ | + (m->green_to_red_ << 16)); + const v16i8 r2b = (v16i8)__msa_fill_w(m->red_to_blue_); + const v16u8 mask0 = { 1, 255, 1, 255, 5, 255, 5, 255, 9, 255, 9, 255, + 13, 255, 13, 255 }; + const v16u8 mask1 = { 16, 1, 18, 3, 20, 5, 22, 7, 24, 9, 26, 11, + 28, 13, 30, 15 }; + + while (num_pixels >= 8) { + v16u8 src1, dst1; + LD_UB2(src, 4, src0, src1); + TRANSFORM_COLOR_INVERSE_8(src0, src1, dst0, dst1, g2br, r2b, mask0, mask1); + ST_UB2(dst0, dst1, dst, 4); + src += 8; + dst += 8; + num_pixels -= 8; + } + if (num_pixels > 0) { + if (num_pixels >= 4) { + src0 = LD_UB(src); + TRANSFORM_COLOR_INVERSE_4(src0, dst0, g2br, r2b, mask0, mask1); + ST_UB(dst0, dst); + src += 4; + dst += 4; + num_pixels -= 4; + } + if (num_pixels > 0) { + src0 = LD_UB(src); + TRANSFORM_COLOR_INVERSE_4(src0, dst0, g2br, r2b, mask0, mask1); + if (num_pixels == 3) { + const uint64_t pix_d = __msa_copy_s_d((v2i64)dst0, 0); + const uint32_t pix_w = __msa_copy_s_w((v4i32)dst0, 2); + SD(pix_d, dst + 0); + SW(pix_w, dst + 2); + } else if (num_pixels == 2) { + const uint64_t pix_d = __msa_copy_s_d((v2i64)dst0, 0); + SD(pix_d, dst); + } else { + const uint32_t pix_w = __msa_copy_s_w((v4i32)dst0, 0); + SW(pix_w, dst); + } + } + } +} + +//------------------------------------------------------------------------------ +// Entry point + +extern void VP8LDspInitMSA(void); + +WEBP_TSAN_IGNORE_FUNCTION void VP8LDspInitMSA(void) { + VP8LConvertBGRAToRGBA = ConvertBGRAToRGBA_MSA; + VP8LConvertBGRAToBGR = ConvertBGRAToBGR_MSA; + VP8LConvertBGRAToRGB = ConvertBGRAToRGB_MSA; + + VP8LAddGreenToBlueAndRed = AddGreenToBlueAndRed_MSA; + VP8LTransformColorInverse = TransformColorInverse_MSA; +} + +#else // !WEBP_USE_MSA + +WEBP_DSP_INIT_STUB(VP8LDspInitMSA) + +#endif // WEBP_USE_MSA diff --git a/vendor/github.com/sizeofint/webpanimation/dsp_lossless_neon.c b/vendor/github.com/sizeofint/webpanimation/dsp_lossless_neon.c new file mode 100644 index 00000000..6d163c56 --- /dev/null +++ b/vendor/github.com/sizeofint/webpanimation/dsp_lossless_neon.c @@ -0,0 +1,641 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the COPYING file in the root of the source +// tree. An additional intellectual property rights grant can be found +// in the file PATENTS. All contributing project authors may +// be found in the AUTHORS file in the root of the source tree. +// ----------------------------------------------------------------------------- +// +// NEON variant of methods for lossless decoder +// +// Author: Skal (pascal.massimino@gmail.com) + +#include "dsp_dsp.h" + +#if defined(WEBP_USE_NEON) + +#include + +#include "dsp_lossless.h" +#include "dsp_neon.h" + +//------------------------------------------------------------------------------ +// Colorspace conversion functions + +#if !defined(WORK_AROUND_GCC) +// gcc 4.6.0 had some trouble (NDK-r9) with this code. We only use it for +// gcc-4.8.x at least. +static void ConvertBGRAToRGBA_NEON(const uint32_t* src, + int num_pixels, uint8_t* dst) { + const uint32_t* const end = src + (num_pixels & ~15); + for (; src < end; src += 16) { + uint8x16x4_t pixel = vld4q_u8((uint8_t*)src); + // swap B and R. (VSWP d0,d2 has no intrinsics equivalent!) + const uint8x16_t tmp = pixel.val[0]; + pixel.val[0] = pixel.val[2]; + pixel.val[2] = tmp; + vst4q_u8(dst, pixel); + dst += 64; + } + VP8LConvertBGRAToRGBA_C(src, num_pixels & 15, dst); // left-overs +} + +static void ConvertBGRAToBGR_NEON(const uint32_t* src, + int num_pixels, uint8_t* dst) { + const uint32_t* const end = src + (num_pixels & ~15); + for (; src < end; src += 16) { + const uint8x16x4_t pixel = vld4q_u8((uint8_t*)src); + const uint8x16x3_t tmp = { { pixel.val[0], pixel.val[1], pixel.val[2] } }; + vst3q_u8(dst, tmp); + dst += 48; + } + VP8LConvertBGRAToBGR_C(src, num_pixels & 15, dst); // left-overs +} + +static void ConvertBGRAToRGB_NEON(const uint32_t* src, + int num_pixels, uint8_t* dst) { + const uint32_t* const end = src + (num_pixels & ~15); + for (; src < end; src += 16) { + const uint8x16x4_t pixel = vld4q_u8((uint8_t*)src); + const uint8x16x3_t tmp = { { pixel.val[2], pixel.val[1], pixel.val[0] } }; + vst3q_u8(dst, tmp); + dst += 48; + } + VP8LConvertBGRAToRGB_C(src, num_pixels & 15, dst); // left-overs +} + +#else // WORK_AROUND_GCC + +// gcc-4.6.0 fallback + +static const uint8_t kRGBAShuffle[8] = { 2, 1, 0, 3, 6, 5, 4, 7 }; + +static void ConvertBGRAToRGBA_NEON(const uint32_t* src, + int num_pixels, uint8_t* dst) { + const uint32_t* const end = src + (num_pixels & ~1); + const uint8x8_t shuffle = vld1_u8(kRGBAShuffle); + for (; src < end; src += 2) { + const uint8x8_t pixels = vld1_u8((uint8_t*)src); + vst1_u8(dst, vtbl1_u8(pixels, shuffle)); + dst += 8; + } + VP8LConvertBGRAToRGBA_C(src, num_pixels & 1, dst); // left-overs +} + +static const uint8_t kBGRShuffle[3][8] = { + { 0, 1, 2, 4, 5, 6, 8, 9 }, + { 10, 12, 13, 14, 16, 17, 18, 20 }, + { 21, 22, 24, 25, 26, 28, 29, 30 } +}; + +static void ConvertBGRAToBGR_NEON(const uint32_t* src, + int num_pixels, uint8_t* dst) { + const uint32_t* const end = src + (num_pixels & ~7); + const uint8x8_t shuffle0 = vld1_u8(kBGRShuffle[0]); + const uint8x8_t shuffle1 = vld1_u8(kBGRShuffle[1]); + const uint8x8_t shuffle2 = vld1_u8(kBGRShuffle[2]); + for (; src < end; src += 8) { + uint8x8x4_t pixels; + INIT_VECTOR4(pixels, + vld1_u8((const uint8_t*)(src + 0)), + vld1_u8((const uint8_t*)(src + 2)), + vld1_u8((const uint8_t*)(src + 4)), + vld1_u8((const uint8_t*)(src + 6))); + vst1_u8(dst + 0, vtbl4_u8(pixels, shuffle0)); + vst1_u8(dst + 8, vtbl4_u8(pixels, shuffle1)); + vst1_u8(dst + 16, vtbl4_u8(pixels, shuffle2)); + dst += 8 * 3; + } + VP8LConvertBGRAToBGR_C(src, num_pixels & 7, dst); // left-overs +} + +static const uint8_t kRGBShuffle[3][8] = { + { 2, 1, 0, 6, 5, 4, 10, 9 }, + { 8, 14, 13, 12, 18, 17, 16, 22 }, + { 21, 20, 26, 25, 24, 30, 29, 28 } +}; + +static void ConvertBGRAToRGB_NEON(const uint32_t* src, + int num_pixels, uint8_t* dst) { + const uint32_t* const end = src + (num_pixels & ~7); + const uint8x8_t shuffle0 = vld1_u8(kRGBShuffle[0]); + const uint8x8_t shuffle1 = vld1_u8(kRGBShuffle[1]); + const uint8x8_t shuffle2 = vld1_u8(kRGBShuffle[2]); + for (; src < end; src += 8) { + uint8x8x4_t pixels; + INIT_VECTOR4(pixels, + vld1_u8((const uint8_t*)(src + 0)), + vld1_u8((const uint8_t*)(src + 2)), + vld1_u8((const uint8_t*)(src + 4)), + vld1_u8((const uint8_t*)(src + 6))); + vst1_u8(dst + 0, vtbl4_u8(pixels, shuffle0)); + vst1_u8(dst + 8, vtbl4_u8(pixels, shuffle1)); + vst1_u8(dst + 16, vtbl4_u8(pixels, shuffle2)); + dst += 8 * 3; + } + VP8LConvertBGRAToRGB_C(src, num_pixels & 7, dst); // left-overs +} + +#endif // !WORK_AROUND_GCC + +//------------------------------------------------------------------------------ +// Predictor Transform + +#define LOAD_U32_AS_U8(IN) vreinterpret_u8_u32(vdup_n_u32((IN))) +#define LOAD_U32P_AS_U8(IN) vreinterpret_u8_u32(vld1_u32((IN))) +#define LOADQ_U32_AS_U8(IN) vreinterpretq_u8_u32(vdupq_n_u32((IN))) +#define LOADQ_U32P_AS_U8(IN) vreinterpretq_u8_u32(vld1q_u32((IN))) +#define GET_U8_AS_U32(IN) vget_lane_u32(vreinterpret_u32_u8((IN)), 0); +#define GETQ_U8_AS_U32(IN) vgetq_lane_u32(vreinterpretq_u32_u8((IN)), 0); +#define STOREQ_U8_AS_U32P(OUT, IN) vst1q_u32((OUT), vreinterpretq_u32_u8((IN))); +#define ROTATE32_LEFT(L) vextq_u8((L), (L), 12) // D|C|B|A -> C|B|A|D + +static WEBP_INLINE uint8x8_t Average2_u8_NEON(uint32_t a0, uint32_t a1) { + const uint8x8_t A0 = LOAD_U32_AS_U8(a0); + const uint8x8_t A1 = LOAD_U32_AS_U8(a1); + return vhadd_u8(A0, A1); +} + +static WEBP_INLINE uint32_t ClampedAddSubtractHalf_NEON(uint32_t c0, + uint32_t c1, + uint32_t c2) { + const uint8x8_t avg = Average2_u8_NEON(c0, c1); + // Remove one to c2 when bigger than avg. + const uint8x8_t C2 = LOAD_U32_AS_U8(c2); + const uint8x8_t cmp = vcgt_u8(C2, avg); + const uint8x8_t C2_1 = vadd_u8(C2, cmp); + // Compute half of the difference between avg and c2. + const int8x8_t diff_avg = vreinterpret_s8_u8(vhsub_u8(avg, C2_1)); + // Compute the sum with avg and saturate. + const int16x8_t avg_16 = vreinterpretq_s16_u16(vmovl_u8(avg)); + const uint8x8_t res = vqmovun_s16(vaddw_s8(avg_16, diff_avg)); + const uint32_t output = GET_U8_AS_U32(res); + return output; +} + +static WEBP_INLINE uint32_t Average2_NEON(uint32_t a0, uint32_t a1) { + const uint8x8_t avg_u8x8 = Average2_u8_NEON(a0, a1); + const uint32_t avg = GET_U8_AS_U32(avg_u8x8); + return avg; +} + +static WEBP_INLINE uint32_t Average3_NEON(uint32_t a0, uint32_t a1, + uint32_t a2) { + const uint8x8_t avg0 = Average2_u8_NEON(a0, a2); + const uint8x8_t A1 = LOAD_U32_AS_U8(a1); + const uint32_t avg = GET_U8_AS_U32(vhadd_u8(avg0, A1)); + return avg; +} + +static uint32_t Predictor5_NEON(uint32_t left, const uint32_t* const top) { + return Average3_NEON(left, top[0], top[1]); +} +static uint32_t Predictor6_NEON(uint32_t left, const uint32_t* const top) { + return Average2_NEON(left, top[-1]); +} +static uint32_t Predictor7_NEON(uint32_t left, const uint32_t* const top) { + return Average2_NEON(left, top[0]); +} +static uint32_t Predictor13_NEON(uint32_t left, const uint32_t* const top) { + return ClampedAddSubtractHalf_NEON(left, top[0], top[-1]); +} + +// Batch versions of those functions. + +// Predictor0: ARGB_BLACK. +static void PredictorAdd0_NEON(const uint32_t* in, const uint32_t* upper, + int num_pixels, uint32_t* out) { + int i; + const uint8x16_t black = vreinterpretq_u8_u32(vdupq_n_u32(ARGB_BLACK)); + for (i = 0; i + 4 <= num_pixels; i += 4) { + const uint8x16_t src = LOADQ_U32P_AS_U8(&in[i]); + const uint8x16_t res = vaddq_u8(src, black); + STOREQ_U8_AS_U32P(&out[i], res); + } + VP8LPredictorsAdd_C[0](in + i, upper + i, num_pixels - i, out + i); +} + +// Predictor1: left. +static void PredictorAdd1_NEON(const uint32_t* in, const uint32_t* upper, + int num_pixels, uint32_t* out) { + int i; + const uint8x16_t zero = LOADQ_U32_AS_U8(0); + for (i = 0; i + 4 <= num_pixels; i += 4) { + // a | b | c | d + const uint8x16_t src = LOADQ_U32P_AS_U8(&in[i]); + // 0 | a | b | c + const uint8x16_t shift0 = vextq_u8(zero, src, 12); + // a | a + b | b + c | c + d + const uint8x16_t sum0 = vaddq_u8(src, shift0); + // 0 | 0 | a | a + b + const uint8x16_t shift1 = vextq_u8(zero, sum0, 8); + // a | a + b | a + b + c | a + b + c + d + const uint8x16_t sum1 = vaddq_u8(sum0, shift1); + const uint8x16_t prev = LOADQ_U32_AS_U8(out[i - 1]); + const uint8x16_t res = vaddq_u8(sum1, prev); + STOREQ_U8_AS_U32P(&out[i], res); + } + VP8LPredictorsAdd_C[1](in + i, upper + i, num_pixels - i, out + i); +} + +// Macro that adds 32-bit integers from IN using mod 256 arithmetic +// per 8 bit channel. +#define GENERATE_PREDICTOR_1(X, IN) \ +static void PredictorAdd##X##_NEON(const uint32_t* in, \ + const uint32_t* upper, int num_pixels, \ + uint32_t* out) { \ + int i; \ + for (i = 0; i + 4 <= num_pixels; i += 4) { \ + const uint8x16_t src = LOADQ_U32P_AS_U8(&in[i]); \ + const uint8x16_t other = LOADQ_U32P_AS_U8(&(IN)); \ + const uint8x16_t res = vaddq_u8(src, other); \ + STOREQ_U8_AS_U32P(&out[i], res); \ + } \ + VP8LPredictorsAdd_C[(X)](in + i, upper + i, num_pixels - i, out + i); \ +} +// Predictor2: Top. +GENERATE_PREDICTOR_1(2, upper[i]) +// Predictor3: Top-right. +GENERATE_PREDICTOR_1(3, upper[i + 1]) +// Predictor4: Top-left. +GENERATE_PREDICTOR_1(4, upper[i - 1]) +#undef GENERATE_PREDICTOR_1 + +// Predictor5: average(average(left, TR), T) +#define DO_PRED5(LANE) do { \ + const uint8x16_t avgLTR = vhaddq_u8(L, TR); \ + const uint8x16_t avg = vhaddq_u8(avgLTR, T); \ + const uint8x16_t res = vaddq_u8(avg, src); \ + vst1q_lane_u32(&out[i + (LANE)], vreinterpretq_u32_u8(res), (LANE)); \ + L = ROTATE32_LEFT(res); \ +} while (0) + +static void PredictorAdd5_NEON(const uint32_t* in, const uint32_t* upper, + int num_pixels, uint32_t* out) { + int i; + uint8x16_t L = LOADQ_U32_AS_U8(out[-1]); + for (i = 0; i + 4 <= num_pixels; i += 4) { + const uint8x16_t src = LOADQ_U32P_AS_U8(&in[i]); + const uint8x16_t T = LOADQ_U32P_AS_U8(&upper[i + 0]); + const uint8x16_t TR = LOADQ_U32P_AS_U8(&upper[i + 1]); + DO_PRED5(0); + DO_PRED5(1); + DO_PRED5(2); + DO_PRED5(3); + } + VP8LPredictorsAdd_C[5](in + i, upper + i, num_pixels - i, out + i); +} +#undef DO_PRED5 + +#define DO_PRED67(LANE) do { \ + const uint8x16_t avg = vhaddq_u8(L, top); \ + const uint8x16_t res = vaddq_u8(avg, src); \ + vst1q_lane_u32(&out[i + (LANE)], vreinterpretq_u32_u8(res), (LANE)); \ + L = ROTATE32_LEFT(res); \ +} while (0) + +// Predictor6: average(left, TL) +static void PredictorAdd6_NEON(const uint32_t* in, const uint32_t* upper, + int num_pixels, uint32_t* out) { + int i; + uint8x16_t L = LOADQ_U32_AS_U8(out[-1]); + for (i = 0; i + 4 <= num_pixels; i += 4) { + const uint8x16_t src = LOADQ_U32P_AS_U8(&in[i]); + const uint8x16_t top = LOADQ_U32P_AS_U8(&upper[i - 1]); + DO_PRED67(0); + DO_PRED67(1); + DO_PRED67(2); + DO_PRED67(3); + } + VP8LPredictorsAdd_C[6](in + i, upper + i, num_pixels - i, out + i); +} + +// Predictor7: average(left, T) +static void PredictorAdd7_NEON(const uint32_t* in, const uint32_t* upper, + int num_pixels, uint32_t* out) { + int i; + uint8x16_t L = LOADQ_U32_AS_U8(out[-1]); + for (i = 0; i + 4 <= num_pixels; i += 4) { + const uint8x16_t src = LOADQ_U32P_AS_U8(&in[i]); + const uint8x16_t top = LOADQ_U32P_AS_U8(&upper[i]); + DO_PRED67(0); + DO_PRED67(1); + DO_PRED67(2); + DO_PRED67(3); + } + VP8LPredictorsAdd_C[7](in + i, upper + i, num_pixels - i, out + i); +} +#undef DO_PRED67 + +#define GENERATE_PREDICTOR_2(X, IN) \ +static void PredictorAdd##X##_NEON(const uint32_t* in, \ + const uint32_t* upper, int num_pixels, \ + uint32_t* out) { \ + int i; \ + for (i = 0; i + 4 <= num_pixels; i += 4) { \ + const uint8x16_t src = LOADQ_U32P_AS_U8(&in[i]); \ + const uint8x16_t Tother = LOADQ_U32P_AS_U8(&(IN)); \ + const uint8x16_t T = LOADQ_U32P_AS_U8(&upper[i]); \ + const uint8x16_t avg = vhaddq_u8(T, Tother); \ + const uint8x16_t res = vaddq_u8(avg, src); \ + STOREQ_U8_AS_U32P(&out[i], res); \ + } \ + VP8LPredictorsAdd_C[(X)](in + i, upper + i, num_pixels - i, out + i); \ +} +// Predictor8: average TL T. +GENERATE_PREDICTOR_2(8, upper[i - 1]) +// Predictor9: average T TR. +GENERATE_PREDICTOR_2(9, upper[i + 1]) +#undef GENERATE_PREDICTOR_2 + +// Predictor10: average of (average of (L,TL), average of (T, TR)). +#define DO_PRED10(LANE) do { \ + const uint8x16_t avgLTL = vhaddq_u8(L, TL); \ + const uint8x16_t avg = vhaddq_u8(avgTTR, avgLTL); \ + const uint8x16_t res = vaddq_u8(avg, src); \ + vst1q_lane_u32(&out[i + (LANE)], vreinterpretq_u32_u8(res), (LANE)); \ + L = ROTATE32_LEFT(res); \ +} while (0) + +static void PredictorAdd10_NEON(const uint32_t* in, const uint32_t* upper, + int num_pixels, uint32_t* out) { + int i; + uint8x16_t L = LOADQ_U32_AS_U8(out[-1]); + for (i = 0; i + 4 <= num_pixels; i += 4) { + const uint8x16_t src = LOADQ_U32P_AS_U8(&in[i]); + const uint8x16_t TL = LOADQ_U32P_AS_U8(&upper[i - 1]); + const uint8x16_t T = LOADQ_U32P_AS_U8(&upper[i]); + const uint8x16_t TR = LOADQ_U32P_AS_U8(&upper[i + 1]); + const uint8x16_t avgTTR = vhaddq_u8(T, TR); + DO_PRED10(0); + DO_PRED10(1); + DO_PRED10(2); + DO_PRED10(3); + } + VP8LPredictorsAdd_C[10](in + i, upper + i, num_pixels - i, out + i); +} +#undef DO_PRED10 + +// Predictor11: select. +#define DO_PRED11(LANE) do { \ + const uint8x16_t sumLin = vaddq_u8(L, src); /* in + L */ \ + const uint8x16_t pLTL = vabdq_u8(L, TL); /* |L - TL| */ \ + const uint16x8_t sum_LTL = vpaddlq_u8(pLTL); \ + const uint32x4_t pa = vpaddlq_u16(sum_LTL); \ + const uint32x4_t mask = vcleq_u32(pa, pb); \ + const uint8x16_t res = vbslq_u8(vreinterpretq_u8_u32(mask), sumTin, sumLin); \ + vst1q_lane_u32(&out[i + (LANE)], vreinterpretq_u32_u8(res), (LANE)); \ + L = ROTATE32_LEFT(res); \ +} while (0) + +static void PredictorAdd11_NEON(const uint32_t* in, const uint32_t* upper, + int num_pixels, uint32_t* out) { + int i; + uint8x16_t L = LOADQ_U32_AS_U8(out[-1]); + for (i = 0; i + 4 <= num_pixels; i += 4) { + const uint8x16_t T = LOADQ_U32P_AS_U8(&upper[i]); + const uint8x16_t TL = LOADQ_U32P_AS_U8(&upper[i - 1]); + const uint8x16_t pTTL = vabdq_u8(T, TL); // |T - TL| + const uint16x8_t sum_TTL = vpaddlq_u8(pTTL); + const uint32x4_t pb = vpaddlq_u16(sum_TTL); + const uint8x16_t src = LOADQ_U32P_AS_U8(&in[i]); + const uint8x16_t sumTin = vaddq_u8(T, src); // in + T + DO_PRED11(0); + DO_PRED11(1); + DO_PRED11(2); + DO_PRED11(3); + } + VP8LPredictorsAdd_C[11](in + i, upper + i, num_pixels - i, out + i); +} +#undef DO_PRED11 + +// Predictor12: ClampedAddSubtractFull. +#define DO_PRED12(DIFF, LANE) do { \ + const uint8x8_t pred = \ + vqmovun_s16(vaddq_s16(vreinterpretq_s16_u16(L), (DIFF))); \ + const uint8x8_t res = \ + vadd_u8(pred, (LANE <= 1) ? vget_low_u8(src) : vget_high_u8(src)); \ + const uint16x8_t res16 = vmovl_u8(res); \ + vst1_lane_u32(&out[i + (LANE)], vreinterpret_u32_u8(res), (LANE) & 1); \ + /* rotate in the left predictor for next iteration */ \ + L = vextq_u16(res16, res16, 4); \ +} while (0) + +static void PredictorAdd12_NEON(const uint32_t* in, const uint32_t* upper, + int num_pixels, uint32_t* out) { + int i; + uint16x8_t L = vmovl_u8(LOAD_U32_AS_U8(out[-1])); + for (i = 0; i + 4 <= num_pixels; i += 4) { + // load four pixels of source + const uint8x16_t src = LOADQ_U32P_AS_U8(&in[i]); + // precompute the difference T - TL once for all, stored as s16 + const uint8x16_t TL = LOADQ_U32P_AS_U8(&upper[i - 1]); + const uint8x16_t T = LOADQ_U32P_AS_U8(&upper[i]); + const int16x8_t diff_lo = + vreinterpretq_s16_u16(vsubl_u8(vget_low_u8(T), vget_low_u8(TL))); + const int16x8_t diff_hi = + vreinterpretq_s16_u16(vsubl_u8(vget_high_u8(T), vget_high_u8(TL))); + // loop over the four reconstructed pixels + DO_PRED12(diff_lo, 0); + DO_PRED12(diff_lo, 1); + DO_PRED12(diff_hi, 2); + DO_PRED12(diff_hi, 3); + } + VP8LPredictorsAdd_C[12](in + i, upper + i, num_pixels - i, out + i); +} +#undef DO_PRED12 + +// Predictor13: ClampedAddSubtractHalf +#define DO_PRED13(LANE, LOW_OR_HI) do { \ + const uint8x16_t avg = vhaddq_u8(L, T); \ + const uint8x16_t cmp = vcgtq_u8(TL, avg); \ + const uint8x16_t TL_1 = vaddq_u8(TL, cmp); \ + /* Compute half of the difference between avg and TL'. */ \ + const int8x8_t diff_avg = \ + vreinterpret_s8_u8(LOW_OR_HI(vhsubq_u8(avg, TL_1))); \ + /* Compute the sum with avg and saturate. */ \ + const int16x8_t avg_16 = vreinterpretq_s16_u16(vmovl_u8(LOW_OR_HI(avg))); \ + const uint8x8_t delta = vqmovun_s16(vaddw_s8(avg_16, diff_avg)); \ + const uint8x8_t res = vadd_u8(LOW_OR_HI(src), delta); \ + const uint8x16_t res2 = vcombine_u8(res, res); \ + vst1_lane_u32(&out[i + (LANE)], vreinterpret_u32_u8(res), (LANE) & 1); \ + L = ROTATE32_LEFT(res2); \ +} while (0) + +static void PredictorAdd13_NEON(const uint32_t* in, const uint32_t* upper, + int num_pixels, uint32_t* out) { + int i; + uint8x16_t L = LOADQ_U32_AS_U8(out[-1]); + for (i = 0; i + 4 <= num_pixels; i += 4) { + const uint8x16_t src = LOADQ_U32P_AS_U8(&in[i]); + const uint8x16_t T = LOADQ_U32P_AS_U8(&upper[i]); + const uint8x16_t TL = LOADQ_U32P_AS_U8(&upper[i - 1]); + DO_PRED13(0, vget_low_u8); + DO_PRED13(1, vget_low_u8); + DO_PRED13(2, vget_high_u8); + DO_PRED13(3, vget_high_u8); + } + VP8LPredictorsAdd_C[13](in + i, upper + i, num_pixels - i, out + i); +} +#undef DO_PRED13 + +#undef LOAD_U32_AS_U8 +#undef LOAD_U32P_AS_U8 +#undef LOADQ_U32_AS_U8 +#undef LOADQ_U32P_AS_U8 +#undef GET_U8_AS_U32 +#undef GETQ_U8_AS_U32 +#undef STOREQ_U8_AS_U32P +#undef ROTATE32_LEFT + +//------------------------------------------------------------------------------ +// Subtract-Green Transform + +// vtbl?_u8 are marked unavailable for iOS arm64 with Xcode < 6.3, use +// non-standard versions there. +#if defined(__APPLE__) && defined(__aarch64__) && \ + defined(__apple_build_version__) && (__apple_build_version__< 6020037) +#define USE_VTBLQ +#endif + +#ifdef USE_VTBLQ +// 255 = byte will be zeroed +static const uint8_t kGreenShuffle[16] = { + 1, 255, 1, 255, 5, 255, 5, 255, 9, 255, 9, 255, 13, 255, 13, 255 +}; + +static WEBP_INLINE uint8x16_t DoGreenShuffle_NEON(const uint8x16_t argb, + const uint8x16_t shuffle) { + return vcombine_u8(vtbl1q_u8(argb, vget_low_u8(shuffle)), + vtbl1q_u8(argb, vget_high_u8(shuffle))); +} +#else // !USE_VTBLQ +// 255 = byte will be zeroed +static const uint8_t kGreenShuffle[8] = { 1, 255, 1, 255, 5, 255, 5, 255 }; + +static WEBP_INLINE uint8x16_t DoGreenShuffle_NEON(const uint8x16_t argb, + const uint8x8_t shuffle) { + return vcombine_u8(vtbl1_u8(vget_low_u8(argb), shuffle), + vtbl1_u8(vget_high_u8(argb), shuffle)); +} +#endif // USE_VTBLQ + +static void AddGreenToBlueAndRed_NEON(const uint32_t* src, int num_pixels, + uint32_t* dst) { + const uint32_t* const end = src + (num_pixels & ~3); +#ifdef USE_VTBLQ + const uint8x16_t shuffle = vld1q_u8(kGreenShuffle); +#else + const uint8x8_t shuffle = vld1_u8(kGreenShuffle); +#endif + for (; src < end; src += 4, dst += 4) { + const uint8x16_t argb = vld1q_u8((const uint8_t*)src); + const uint8x16_t greens = DoGreenShuffle_NEON(argb, shuffle); + vst1q_u8((uint8_t*)dst, vaddq_u8(argb, greens)); + } + // fallthrough and finish off with plain-C + VP8LAddGreenToBlueAndRed_C(src, num_pixels & 3, dst); +} + +//------------------------------------------------------------------------------ +// Color Transform + +static void TransformColorInverse_NEON(const VP8LMultipliers* const m, + const uint32_t* const src, + int num_pixels, uint32_t* dst) { +// sign-extended multiplying constants, pre-shifted by 6. +#define CST(X) (((int16_t)(m->X << 8)) >> 6) + const int16_t rb[8] = { + CST(green_to_blue_), CST(green_to_red_), + CST(green_to_blue_), CST(green_to_red_), + CST(green_to_blue_), CST(green_to_red_), + CST(green_to_blue_), CST(green_to_red_) + }; + const int16x8_t mults_rb = vld1q_s16(rb); + const int16_t b2[8] = { + 0, CST(red_to_blue_), 0, CST(red_to_blue_), + 0, CST(red_to_blue_), 0, CST(red_to_blue_), + }; + const int16x8_t mults_b2 = vld1q_s16(b2); +#undef CST +#ifdef USE_VTBLQ + static const uint8_t kg0g0[16] = { + 255, 1, 255, 1, 255, 5, 255, 5, 255, 9, 255, 9, 255, 13, 255, 13 + }; + const uint8x16_t shuffle = vld1q_u8(kg0g0); +#else + static const uint8_t k0g0g[8] = { 255, 1, 255, 1, 255, 5, 255, 5 }; + const uint8x8_t shuffle = vld1_u8(k0g0g); +#endif + const uint32x4_t mask_ag = vdupq_n_u32(0xff00ff00u); + int i; + for (i = 0; i + 4 <= num_pixels; i += 4) { + const uint8x16_t in = vld1q_u8((const uint8_t*)(src + i)); + const uint32x4_t a0g0 = vandq_u32(vreinterpretq_u32_u8(in), mask_ag); + // 0 g 0 g + const uint8x16_t greens = DoGreenShuffle_NEON(in, shuffle); + // x dr x db1 + const int16x8_t A = vqdmulhq_s16(vreinterpretq_s16_u8(greens), mults_rb); + // x r' x b' + const int8x16_t B = vaddq_s8(vreinterpretq_s8_u8(in), + vreinterpretq_s8_s16(A)); + // r' 0 b' 0 + const int16x8_t C = vshlq_n_s16(vreinterpretq_s16_s8(B), 8); + // x db2 0 0 + const int16x8_t D = vqdmulhq_s16(C, mults_b2); + // 0 x db2 0 + const uint32x4_t E = vshrq_n_u32(vreinterpretq_u32_s16(D), 8); + // r' x b'' 0 + const int8x16_t F = vaddq_s8(vreinterpretq_s8_u32(E), + vreinterpretq_s8_s16(C)); + // 0 r' 0 b'' + const uint16x8_t G = vshrq_n_u16(vreinterpretq_u16_s8(F), 8); + const uint32x4_t out = vorrq_u32(vreinterpretq_u32_u16(G), a0g0); + vst1q_u32(dst + i, out); + } + // Fall-back to C-version for left-overs. + VP8LTransformColorInverse_C(m, src + i, num_pixels - i, dst + i); +} + +#undef USE_VTBLQ + +//------------------------------------------------------------------------------ +// Entry point + +extern void VP8LDspInitNEON(void); + +WEBP_TSAN_IGNORE_FUNCTION void VP8LDspInitNEON(void) { + VP8LPredictors[5] = Predictor5_NEON; + VP8LPredictors[6] = Predictor6_NEON; + VP8LPredictors[7] = Predictor7_NEON; + VP8LPredictors[13] = Predictor13_NEON; + + VP8LPredictorsAdd[0] = PredictorAdd0_NEON; + VP8LPredictorsAdd[1] = PredictorAdd1_NEON; + VP8LPredictorsAdd[2] = PredictorAdd2_NEON; + VP8LPredictorsAdd[3] = PredictorAdd3_NEON; + VP8LPredictorsAdd[4] = PredictorAdd4_NEON; + VP8LPredictorsAdd[5] = PredictorAdd5_NEON; + VP8LPredictorsAdd[6] = PredictorAdd6_NEON; + VP8LPredictorsAdd[7] = PredictorAdd7_NEON; + VP8LPredictorsAdd[8] = PredictorAdd8_NEON; + VP8LPredictorsAdd[9] = PredictorAdd9_NEON; + VP8LPredictorsAdd[10] = PredictorAdd10_NEON; + VP8LPredictorsAdd[11] = PredictorAdd11_NEON; + VP8LPredictorsAdd[12] = PredictorAdd12_NEON; + VP8LPredictorsAdd[13] = PredictorAdd13_NEON; + + VP8LConvertBGRAToRGBA = ConvertBGRAToRGBA_NEON; + VP8LConvertBGRAToBGR = ConvertBGRAToBGR_NEON; + VP8LConvertBGRAToRGB = ConvertBGRAToRGB_NEON; + + VP8LAddGreenToBlueAndRed = AddGreenToBlueAndRed_NEON; + VP8LTransformColorInverse = TransformColorInverse_NEON; +} + +#else // !WEBP_USE_NEON + +WEBP_DSP_INIT_STUB(VP8LDspInitNEON) + +#endif // WEBP_USE_NEON diff --git a/vendor/github.com/sizeofint/webpanimation/dsp_lossless_sse2.c b/vendor/github.com/sizeofint/webpanimation/dsp_lossless_sse2.c new file mode 100644 index 00000000..a99c37f8 --- /dev/null +++ b/vendor/github.com/sizeofint/webpanimation/dsp_lossless_sse2.c @@ -0,0 +1,708 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the COPYING file in the root of the source +// tree. An additional intellectual property rights grant can be found +// in the file PATENTS. All contributing project authors may +// be found in the AUTHORS file in the root of the source tree. +// ----------------------------------------------------------------------------- +// +// SSE2 variant of methods for lossless decoder +// +// Author: Skal (pascal.massimino@gmail.com) + +#include "dsp_dsp.h" + +#if defined(WEBP_USE_SSE2) + +#include "dsp_common_sse2.h" +#include "dsp_lossless.h" +#include "dsp_lossless_common.h" +#include +#include + +//------------------------------------------------------------------------------ +// Predictor Transform + +static WEBP_INLINE uint32_t ClampedAddSubtractFull_SSE2(uint32_t c0, + uint32_t c1, + uint32_t c2) { + const __m128i zero = _mm_setzero_si128(); + const __m128i C0 = _mm_unpacklo_epi8(_mm_cvtsi32_si128(c0), zero); + const __m128i C1 = _mm_unpacklo_epi8(_mm_cvtsi32_si128(c1), zero); + const __m128i C2 = _mm_unpacklo_epi8(_mm_cvtsi32_si128(c2), zero); + const __m128i V1 = _mm_add_epi16(C0, C1); + const __m128i V2 = _mm_sub_epi16(V1, C2); + const __m128i b = _mm_packus_epi16(V2, V2); + const uint32_t output = _mm_cvtsi128_si32(b); + return output; +} + +static WEBP_INLINE uint32_t ClampedAddSubtractHalf_SSE2(uint32_t c0, + uint32_t c1, + uint32_t c2) { + const __m128i zero = _mm_setzero_si128(); + const __m128i C0 = _mm_unpacklo_epi8(_mm_cvtsi32_si128(c0), zero); + const __m128i C1 = _mm_unpacklo_epi8(_mm_cvtsi32_si128(c1), zero); + const __m128i B0 = _mm_unpacklo_epi8(_mm_cvtsi32_si128(c2), zero); + const __m128i avg = _mm_add_epi16(C1, C0); + const __m128i A0 = _mm_srli_epi16(avg, 1); + const __m128i A1 = _mm_sub_epi16(A0, B0); + const __m128i BgtA = _mm_cmpgt_epi16(B0, A0); + const __m128i A2 = _mm_sub_epi16(A1, BgtA); + const __m128i A3 = _mm_srai_epi16(A2, 1); + const __m128i A4 = _mm_add_epi16(A0, A3); + const __m128i A5 = _mm_packus_epi16(A4, A4); + const uint32_t output = _mm_cvtsi128_si32(A5); + return output; +} + +static WEBP_INLINE uint32_t Select_SSE2(uint32_t a, uint32_t b, uint32_t c) { + int pa_minus_pb; + const __m128i zero = _mm_setzero_si128(); + const __m128i A0 = _mm_cvtsi32_si128(a); + const __m128i B0 = _mm_cvtsi32_si128(b); + const __m128i C0 = _mm_cvtsi32_si128(c); + const __m128i AC0 = _mm_subs_epu8(A0, C0); + const __m128i CA0 = _mm_subs_epu8(C0, A0); + const __m128i BC0 = _mm_subs_epu8(B0, C0); + const __m128i CB0 = _mm_subs_epu8(C0, B0); + const __m128i AC = _mm_or_si128(AC0, CA0); + const __m128i BC = _mm_or_si128(BC0, CB0); + const __m128i pa = _mm_unpacklo_epi8(AC, zero); // |a - c| + const __m128i pb = _mm_unpacklo_epi8(BC, zero); // |b - c| + const __m128i diff = _mm_sub_epi16(pb, pa); + { + int16_t out[8]; + _mm_storeu_si128((__m128i*)out, diff); + pa_minus_pb = out[0] + out[1] + out[2] + out[3]; + } + return (pa_minus_pb <= 0) ? a : b; +} + +static WEBP_INLINE void Average2_m128i(const __m128i* const a0, + const __m128i* const a1, + __m128i* const avg) { + // (a + b) >> 1 = ((a + b + 1) >> 1) - ((a ^ b) & 1) + const __m128i ones = _mm_set1_epi8(1); + const __m128i avg1 = _mm_avg_epu8(*a0, *a1); + const __m128i one = _mm_and_si128(_mm_xor_si128(*a0, *a1), ones); + *avg = _mm_sub_epi8(avg1, one); +} + +static WEBP_INLINE void Average2_uint32_SSE2(const uint32_t a0, + const uint32_t a1, + __m128i* const avg) { + // (a + b) >> 1 = ((a + b + 1) >> 1) - ((a ^ b) & 1) + const __m128i ones = _mm_set1_epi8(1); + const __m128i A0 = _mm_cvtsi32_si128(a0); + const __m128i A1 = _mm_cvtsi32_si128(a1); + const __m128i avg1 = _mm_avg_epu8(A0, A1); + const __m128i one = _mm_and_si128(_mm_xor_si128(A0, A1), ones); + *avg = _mm_sub_epi8(avg1, one); +} + +static WEBP_INLINE __m128i Average2_uint32_16_SSE2(uint32_t a0, uint32_t a1) { + const __m128i zero = _mm_setzero_si128(); + const __m128i A0 = _mm_unpacklo_epi8(_mm_cvtsi32_si128(a0), zero); + const __m128i A1 = _mm_unpacklo_epi8(_mm_cvtsi32_si128(a1), zero); + const __m128i sum = _mm_add_epi16(A1, A0); + return _mm_srli_epi16(sum, 1); +} + +static WEBP_INLINE uint32_t Average2_SSE2(uint32_t a0, uint32_t a1) { + __m128i output; + Average2_uint32_SSE2(a0, a1, &output); + return _mm_cvtsi128_si32(output); +} + +static WEBP_INLINE uint32_t Average3_SSE2(uint32_t a0, uint32_t a1, + uint32_t a2) { + const __m128i zero = _mm_setzero_si128(); + const __m128i avg1 = Average2_uint32_16_SSE2(a0, a2); + const __m128i A1 = _mm_unpacklo_epi8(_mm_cvtsi32_si128(a1), zero); + const __m128i sum = _mm_add_epi16(avg1, A1); + const __m128i avg2 = _mm_srli_epi16(sum, 1); + const __m128i A2 = _mm_packus_epi16(avg2, avg2); + const uint32_t output = _mm_cvtsi128_si32(A2); + return output; +} + +static WEBP_INLINE uint32_t Average4_SSE2(uint32_t a0, uint32_t a1, + uint32_t a2, uint32_t a3) { + const __m128i avg1 = Average2_uint32_16_SSE2(a0, a1); + const __m128i avg2 = Average2_uint32_16_SSE2(a2, a3); + const __m128i sum = _mm_add_epi16(avg2, avg1); + const __m128i avg3 = _mm_srli_epi16(sum, 1); + const __m128i A0 = _mm_packus_epi16(avg3, avg3); + const uint32_t output = _mm_cvtsi128_si32(A0); + return output; +} + +static uint32_t Predictor5_SSE2(uint32_t left, const uint32_t* const top) { + const uint32_t pred = Average3_SSE2(left, top[0], top[1]); + return pred; +} +static uint32_t Predictor6_SSE2(uint32_t left, const uint32_t* const top) { + const uint32_t pred = Average2_SSE2(left, top[-1]); + return pred; +} +static uint32_t Predictor7_SSE2(uint32_t left, const uint32_t* const top) { + const uint32_t pred = Average2_SSE2(left, top[0]); + return pred; +} +static uint32_t Predictor8_SSE2(uint32_t left, const uint32_t* const top) { + const uint32_t pred = Average2_SSE2(top[-1], top[0]); + (void)left; + return pred; +} +static uint32_t Predictor9_SSE2(uint32_t left, const uint32_t* const top) { + const uint32_t pred = Average2_SSE2(top[0], top[1]); + (void)left; + return pred; +} +static uint32_t Predictor10_SSE2(uint32_t left, const uint32_t* const top) { + const uint32_t pred = Average4_SSE2(left, top[-1], top[0], top[1]); + return pred; +} +static uint32_t Predictor11_SSE2(uint32_t left, const uint32_t* const top) { + const uint32_t pred = Select_SSE2(top[0], left, top[-1]); + return pred; +} +static uint32_t Predictor12_SSE2(uint32_t left, const uint32_t* const top) { + const uint32_t pred = ClampedAddSubtractFull_SSE2(left, top[0], top[-1]); + return pred; +} +static uint32_t Predictor13_SSE2(uint32_t left, const uint32_t* const top) { + const uint32_t pred = ClampedAddSubtractHalf_SSE2(left, top[0], top[-1]); + return pred; +} + +// Batch versions of those functions. + +// Predictor0: ARGB_BLACK. +static void PredictorAdd0_SSE2(const uint32_t* in, const uint32_t* upper, + int num_pixels, uint32_t* out) { + int i; + const __m128i black = _mm_set1_epi32(ARGB_BLACK); + for (i = 0; i + 4 <= num_pixels; i += 4) { + const __m128i src = _mm_loadu_si128((const __m128i*)&in[i]); + const __m128i res = _mm_add_epi8(src, black); + _mm_storeu_si128((__m128i*)&out[i], res); + } + if (i != num_pixels) { + VP8LPredictorsAdd_C[0](in + i, NULL, num_pixels - i, out + i); + } + (void)upper; +} + +// Predictor1: left. +static void PredictorAdd1_SSE2(const uint32_t* in, const uint32_t* upper, + int num_pixels, uint32_t* out) { + int i; + __m128i prev = _mm_set1_epi32(out[-1]); + for (i = 0; i + 4 <= num_pixels; i += 4) { + // a | b | c | d + const __m128i src = _mm_loadu_si128((const __m128i*)&in[i]); + // 0 | a | b | c + const __m128i shift0 = _mm_slli_si128(src, 4); + // a | a + b | b + c | c + d + const __m128i sum0 = _mm_add_epi8(src, shift0); + // 0 | 0 | a | a + b + const __m128i shift1 = _mm_slli_si128(sum0, 8); + // a | a + b | a + b + c | a + b + c + d + const __m128i sum1 = _mm_add_epi8(sum0, shift1); + const __m128i res = _mm_add_epi8(sum1, prev); + _mm_storeu_si128((__m128i*)&out[i], res); + // replicate prev output on the four lanes + prev = _mm_shuffle_epi32(res, (3 << 0) | (3 << 2) | (3 << 4) | (3 << 6)); + } + if (i != num_pixels) { + VP8LPredictorsAdd_C[1](in + i, upper + i, num_pixels - i, out + i); + } +} + +// Macro that adds 32-bit integers from IN using mod 256 arithmetic +// per 8 bit channel. +#define GENERATE_PREDICTOR_1(X, IN) \ +static void PredictorAdd##X##_SSE2(const uint32_t* in, const uint32_t* upper, \ + int num_pixels, uint32_t* out) { \ + int i; \ + for (i = 0; i + 4 <= num_pixels; i += 4) { \ + const __m128i src = _mm_loadu_si128((const __m128i*)&in[i]); \ + const __m128i other = _mm_loadu_si128((const __m128i*)&(IN)); \ + const __m128i res = _mm_add_epi8(src, other); \ + _mm_storeu_si128((__m128i*)&out[i], res); \ + } \ + if (i != num_pixels) { \ + VP8LPredictorsAdd_C[(X)](in + i, upper + i, num_pixels - i, out + i); \ + } \ +} + +// Predictor2: Top. +GENERATE_PREDICTOR_1(2, upper[i]) +// Predictor3: Top-right. +GENERATE_PREDICTOR_1(3, upper[i + 1]) +// Predictor4: Top-left. +GENERATE_PREDICTOR_1(4, upper[i - 1]) +#undef GENERATE_PREDICTOR_1 + +// Due to averages with integers, values cannot be accumulated in parallel for +// predictors 5 to 7. +GENERATE_PREDICTOR_ADD(Predictor5_SSE2, PredictorAdd5_SSE2) +GENERATE_PREDICTOR_ADD(Predictor6_SSE2, PredictorAdd6_SSE2) +GENERATE_PREDICTOR_ADD(Predictor7_SSE2, PredictorAdd7_SSE2) + +#define GENERATE_PREDICTOR_2(X, IN) \ +static void PredictorAdd##X##_SSE2(const uint32_t* in, const uint32_t* upper, \ + int num_pixels, uint32_t* out) { \ + int i; \ + for (i = 0; i + 4 <= num_pixels; i += 4) { \ + const __m128i Tother = _mm_loadu_si128((const __m128i*)&(IN)); \ + const __m128i T = _mm_loadu_si128((const __m128i*)&upper[i]); \ + const __m128i src = _mm_loadu_si128((const __m128i*)&in[i]); \ + __m128i avg, res; \ + Average2_m128i(&T, &Tother, &avg); \ + res = _mm_add_epi8(avg, src); \ + _mm_storeu_si128((__m128i*)&out[i], res); \ + } \ + if (i != num_pixels) { \ + VP8LPredictorsAdd_C[(X)](in + i, upper + i, num_pixels - i, out + i); \ + } \ +} +// Predictor8: average TL T. +GENERATE_PREDICTOR_2(8, upper[i - 1]) +// Predictor9: average T TR. +GENERATE_PREDICTOR_2(9, upper[i + 1]) +#undef GENERATE_PREDICTOR_2 + +// Predictor10: average of (average of (L,TL), average of (T, TR)). +#define DO_PRED10(OUT) do { \ + __m128i avgLTL, avg; \ + Average2_m128i(&L, &TL, &avgLTL); \ + Average2_m128i(&avgTTR, &avgLTL, &avg); \ + L = _mm_add_epi8(avg, src); \ + out[i + (OUT)] = _mm_cvtsi128_si32(L); \ +} while (0) + +#define DO_PRED10_SHIFT do { \ + /* Rotate the pre-computed values for the next iteration.*/ \ + avgTTR = _mm_srli_si128(avgTTR, 4); \ + TL = _mm_srli_si128(TL, 4); \ + src = _mm_srli_si128(src, 4); \ +} while (0) + +static void PredictorAdd10_SSE2(const uint32_t* in, const uint32_t* upper, + int num_pixels, uint32_t* out) { + int i; + __m128i L = _mm_cvtsi32_si128(out[-1]); + for (i = 0; i + 4 <= num_pixels; i += 4) { + __m128i src = _mm_loadu_si128((const __m128i*)&in[i]); + __m128i TL = _mm_loadu_si128((const __m128i*)&upper[i - 1]); + const __m128i T = _mm_loadu_si128((const __m128i*)&upper[i]); + const __m128i TR = _mm_loadu_si128((const __m128i*)&upper[i + 1]); + __m128i avgTTR; + Average2_m128i(&T, &TR, &avgTTR); + DO_PRED10(0); + DO_PRED10_SHIFT; + DO_PRED10(1); + DO_PRED10_SHIFT; + DO_PRED10(2); + DO_PRED10_SHIFT; + DO_PRED10(3); + } + if (i != num_pixels) { + VP8LPredictorsAdd_C[10](in + i, upper + i, num_pixels - i, out + i); + } +} +#undef DO_PRED10 +#undef DO_PRED10_SHIFT + +// Predictor11: select. +#define DO_PRED11(OUT) do { \ + const __m128i L_lo = _mm_unpacklo_epi32(L, T); \ + const __m128i TL_lo = _mm_unpacklo_epi32(TL, T); \ + const __m128i pb = _mm_sad_epu8(L_lo, TL_lo); /* pb = sum |L-TL|*/ \ + const __m128i mask = _mm_cmpgt_epi32(pb, pa); \ + const __m128i A = _mm_and_si128(mask, L); \ + const __m128i B = _mm_andnot_si128(mask, T); \ + const __m128i pred = _mm_or_si128(A, B); /* pred = (pa > b)? L : T*/ \ + L = _mm_add_epi8(src, pred); \ + out[i + (OUT)] = _mm_cvtsi128_si32(L); \ +} while (0) + +#define DO_PRED11_SHIFT do { \ + /* Shift the pre-computed value for the next iteration.*/ \ + T = _mm_srli_si128(T, 4); \ + TL = _mm_srli_si128(TL, 4); \ + src = _mm_srli_si128(src, 4); \ + pa = _mm_srli_si128(pa, 4); \ +} while (0) + +static void PredictorAdd11_SSE2(const uint32_t* in, const uint32_t* upper, + int num_pixels, uint32_t* out) { + int i; + __m128i pa; + __m128i L = _mm_cvtsi32_si128(out[-1]); + for (i = 0; i + 4 <= num_pixels; i += 4) { + __m128i T = _mm_loadu_si128((const __m128i*)&upper[i]); + __m128i TL = _mm_loadu_si128((const __m128i*)&upper[i - 1]); + __m128i src = _mm_loadu_si128((const __m128i*)&in[i]); + { + // We can unpack with any value on the upper 32 bits, provided it's the + // same on both operands (so that their sum of abs diff is zero). Here we + // use T. + const __m128i T_lo = _mm_unpacklo_epi32(T, T); + const __m128i TL_lo = _mm_unpacklo_epi32(TL, T); + const __m128i T_hi = _mm_unpackhi_epi32(T, T); + const __m128i TL_hi = _mm_unpackhi_epi32(TL, T); + const __m128i s_lo = _mm_sad_epu8(T_lo, TL_lo); + const __m128i s_hi = _mm_sad_epu8(T_hi, TL_hi); + pa = _mm_packs_epi32(s_lo, s_hi); // pa = sum |T-TL| + } + DO_PRED11(0); + DO_PRED11_SHIFT; + DO_PRED11(1); + DO_PRED11_SHIFT; + DO_PRED11(2); + DO_PRED11_SHIFT; + DO_PRED11(3); + } + if (i != num_pixels) { + VP8LPredictorsAdd_C[11](in + i, upper + i, num_pixels - i, out + i); + } +} +#undef DO_PRED11 +#undef DO_PRED11_SHIFT + +// Predictor12: ClampedAddSubtractFull. +#define DO_PRED12(DIFF, LANE, OUT) do { \ + const __m128i all = _mm_add_epi16(L, (DIFF)); \ + const __m128i alls = _mm_packus_epi16(all, all); \ + const __m128i res = _mm_add_epi8(src, alls); \ + out[i + (OUT)] = _mm_cvtsi128_si32(res); \ + L = _mm_unpacklo_epi8(res, zero); \ +} while (0) + +#define DO_PRED12_SHIFT(DIFF, LANE) do { \ + /* Shift the pre-computed value for the next iteration.*/ \ + if ((LANE) == 0) (DIFF) = _mm_srli_si128((DIFF), 8); \ + src = _mm_srli_si128(src, 4); \ +} while (0) + +static void PredictorAdd12_SSE2(const uint32_t* in, const uint32_t* upper, + int num_pixels, uint32_t* out) { + int i; + const __m128i zero = _mm_setzero_si128(); + const __m128i L8 = _mm_cvtsi32_si128(out[-1]); + __m128i L = _mm_unpacklo_epi8(L8, zero); + for (i = 0; i + 4 <= num_pixels; i += 4) { + // Load 4 pixels at a time. + __m128i src = _mm_loadu_si128((const __m128i*)&in[i]); + const __m128i T = _mm_loadu_si128((const __m128i*)&upper[i]); + const __m128i T_lo = _mm_unpacklo_epi8(T, zero); + const __m128i T_hi = _mm_unpackhi_epi8(T, zero); + const __m128i TL = _mm_loadu_si128((const __m128i*)&upper[i - 1]); + const __m128i TL_lo = _mm_unpacklo_epi8(TL, zero); + const __m128i TL_hi = _mm_unpackhi_epi8(TL, zero); + __m128i diff_lo = _mm_sub_epi16(T_lo, TL_lo); + __m128i diff_hi = _mm_sub_epi16(T_hi, TL_hi); + DO_PRED12(diff_lo, 0, 0); + DO_PRED12_SHIFT(diff_lo, 0); + DO_PRED12(diff_lo, 1, 1); + DO_PRED12_SHIFT(diff_lo, 1); + DO_PRED12(diff_hi, 0, 2); + DO_PRED12_SHIFT(diff_hi, 0); + DO_PRED12(diff_hi, 1, 3); + } + if (i != num_pixels) { + VP8LPredictorsAdd_C[12](in + i, upper + i, num_pixels - i, out + i); + } +} +#undef DO_PRED12 +#undef DO_PRED12_SHIFT + +// Due to averages with integers, values cannot be accumulated in parallel for +// predictors 13. +GENERATE_PREDICTOR_ADD(Predictor13_SSE2, PredictorAdd13_SSE2) + +//------------------------------------------------------------------------------ +// Subtract-Green Transform + +static void AddGreenToBlueAndRed_SSE2(const uint32_t* const src, int num_pixels, + uint32_t* dst) { + int i; + for (i = 0; i + 4 <= num_pixels; i += 4) { + const __m128i in = _mm_loadu_si128((const __m128i*)&src[i]); // argb + const __m128i A = _mm_srli_epi16(in, 8); // 0 a 0 g + const __m128i B = _mm_shufflelo_epi16(A, _MM_SHUFFLE(2, 2, 0, 0)); + const __m128i C = _mm_shufflehi_epi16(B, _MM_SHUFFLE(2, 2, 0, 0)); // 0g0g + const __m128i out = _mm_add_epi8(in, C); + _mm_storeu_si128((__m128i*)&dst[i], out); + } + // fallthrough and finish off with plain-C + if (i != num_pixels) { + VP8LAddGreenToBlueAndRed_C(src + i, num_pixels - i, dst + i); + } +} + +//------------------------------------------------------------------------------ +// Color Transform + +static void TransformColorInverse_SSE2(const VP8LMultipliers* const m, + const uint32_t* const src, + int num_pixels, uint32_t* dst) { +// sign-extended multiplying constants, pre-shifted by 5. +#define CST(X) (((int16_t)(m->X << 8)) >> 5) // sign-extend +#define MK_CST_16(HI, LO) \ + _mm_set1_epi32((int)(((uint32_t)(HI) << 16) | ((LO) & 0xffff))) + const __m128i mults_rb = MK_CST_16(CST(green_to_red_), CST(green_to_blue_)); + const __m128i mults_b2 = MK_CST_16(CST(red_to_blue_), 0); +#undef MK_CST_16 +#undef CST + const __m128i mask_ag = _mm_set1_epi32(0xff00ff00); // alpha-green masks + int i; + for (i = 0; i + 4 <= num_pixels; i += 4) { + const __m128i in = _mm_loadu_si128((const __m128i*)&src[i]); // argb + const __m128i A = _mm_and_si128(in, mask_ag); // a 0 g 0 + const __m128i B = _mm_shufflelo_epi16(A, _MM_SHUFFLE(2, 2, 0, 0)); + const __m128i C = _mm_shufflehi_epi16(B, _MM_SHUFFLE(2, 2, 0, 0)); // g0g0 + const __m128i D = _mm_mulhi_epi16(C, mults_rb); // x dr x db1 + const __m128i E = _mm_add_epi8(in, D); // x r' x b' + const __m128i F = _mm_slli_epi16(E, 8); // r' 0 b' 0 + const __m128i G = _mm_mulhi_epi16(F, mults_b2); // x db2 0 0 + const __m128i H = _mm_srli_epi32(G, 8); // 0 x db2 0 + const __m128i I = _mm_add_epi8(H, F); // r' x b'' 0 + const __m128i J = _mm_srli_epi16(I, 8); // 0 r' 0 b'' + const __m128i out = _mm_or_si128(J, A); + _mm_storeu_si128((__m128i*)&dst[i], out); + } + // Fall-back to C-version for left-overs. + if (i != num_pixels) { + VP8LTransformColorInverse_C(m, src + i, num_pixels - i, dst + i); + } +} + +//------------------------------------------------------------------------------ +// Color-space conversion functions + +static void ConvertBGRAToRGB_SSE2(const uint32_t* src, int num_pixels, + uint8_t* dst) { + const __m128i* in = (const __m128i*)src; + __m128i* out = (__m128i*)dst; + + while (num_pixels >= 32) { + // Load the BGRA buffers. + __m128i in0 = _mm_loadu_si128(in + 0); + __m128i in1 = _mm_loadu_si128(in + 1); + __m128i in2 = _mm_loadu_si128(in + 2); + __m128i in3 = _mm_loadu_si128(in + 3); + __m128i in4 = _mm_loadu_si128(in + 4); + __m128i in5 = _mm_loadu_si128(in + 5); + __m128i in6 = _mm_loadu_si128(in + 6); + __m128i in7 = _mm_loadu_si128(in + 7); + VP8L32bToPlanar_SSE2(&in0, &in1, &in2, &in3); + VP8L32bToPlanar_SSE2(&in4, &in5, &in6, &in7); + // At this points, in1/in5 contains red only, in2/in6 green only ... + // Pack the colors in 24b RGB. + VP8PlanarTo24b_SSE2(&in1, &in5, &in2, &in6, &in3, &in7); + _mm_storeu_si128(out + 0, in1); + _mm_storeu_si128(out + 1, in5); + _mm_storeu_si128(out + 2, in2); + _mm_storeu_si128(out + 3, in6); + _mm_storeu_si128(out + 4, in3); + _mm_storeu_si128(out + 5, in7); + in += 8; + out += 6; + num_pixels -= 32; + } + // left-overs + if (num_pixels > 0) { + VP8LConvertBGRAToRGB_C((const uint32_t*)in, num_pixels, (uint8_t*)out); + } +} + +static void ConvertBGRAToRGBA_SSE2(const uint32_t* src, + int num_pixels, uint8_t* dst) { + const __m128i red_blue_mask = _mm_set1_epi32(0x00ff00ffu); + const __m128i* in = (const __m128i*)src; + __m128i* out = (__m128i*)dst; + while (num_pixels >= 8) { + const __m128i A1 = _mm_loadu_si128(in++); + const __m128i A2 = _mm_loadu_si128(in++); + const __m128i B1 = _mm_and_si128(A1, red_blue_mask); // R 0 B 0 + const __m128i B2 = _mm_and_si128(A2, red_blue_mask); // R 0 B 0 + const __m128i C1 = _mm_andnot_si128(red_blue_mask, A1); // 0 G 0 A + const __m128i C2 = _mm_andnot_si128(red_blue_mask, A2); // 0 G 0 A + const __m128i D1 = _mm_shufflelo_epi16(B1, _MM_SHUFFLE(2, 3, 0, 1)); + const __m128i D2 = _mm_shufflelo_epi16(B2, _MM_SHUFFLE(2, 3, 0, 1)); + const __m128i E1 = _mm_shufflehi_epi16(D1, _MM_SHUFFLE(2, 3, 0, 1)); + const __m128i E2 = _mm_shufflehi_epi16(D2, _MM_SHUFFLE(2, 3, 0, 1)); + const __m128i F1 = _mm_or_si128(E1, C1); + const __m128i F2 = _mm_or_si128(E2, C2); + _mm_storeu_si128(out++, F1); + _mm_storeu_si128(out++, F2); + num_pixels -= 8; + } + // left-overs + if (num_pixels > 0) { + VP8LConvertBGRAToRGBA_C((const uint32_t*)in, num_pixels, (uint8_t*)out); + } +} + +static void ConvertBGRAToRGBA4444_SSE2(const uint32_t* src, + int num_pixels, uint8_t* dst) { + const __m128i mask_0x0f = _mm_set1_epi8(0x0f); + const __m128i mask_0xf0 = _mm_set1_epi8(0xf0); + const __m128i* in = (const __m128i*)src; + __m128i* out = (__m128i*)dst; + while (num_pixels >= 8) { + const __m128i bgra0 = _mm_loadu_si128(in++); // bgra0|bgra1|bgra2|bgra3 + const __m128i bgra4 = _mm_loadu_si128(in++); // bgra4|bgra5|bgra6|bgra7 + const __m128i v0l = _mm_unpacklo_epi8(bgra0, bgra4); // b0b4g0g4r0r4a0a4... + const __m128i v0h = _mm_unpackhi_epi8(bgra0, bgra4); // b2b6g2g6r2r6a2a6... + const __m128i v1l = _mm_unpacklo_epi8(v0l, v0h); // b0b2b4b6g0g2g4g6... + const __m128i v1h = _mm_unpackhi_epi8(v0l, v0h); // b1b3b5b7g1g3g5g7... + const __m128i v2l = _mm_unpacklo_epi8(v1l, v1h); // b0...b7 | g0...g7 + const __m128i v2h = _mm_unpackhi_epi8(v1l, v1h); // r0...r7 | a0...a7 + const __m128i ga0 = _mm_unpackhi_epi64(v2l, v2h); // g0...g7 | a0...a7 + const __m128i rb0 = _mm_unpacklo_epi64(v2h, v2l); // r0...r7 | b0...b7 + const __m128i ga1 = _mm_srli_epi16(ga0, 4); // g0-|g1-|...|a6-|a7- + const __m128i rb1 = _mm_and_si128(rb0, mask_0xf0); // -r0|-r1|...|-b6|-a7 + const __m128i ga2 = _mm_and_si128(ga1, mask_0x0f); // g0-|g1-|...|a6-|a7- + const __m128i rgba0 = _mm_or_si128(ga2, rb1); // rg0..rg7 | ba0..ba7 + const __m128i rgba1 = _mm_srli_si128(rgba0, 8); // ba0..ba7 | 0 +#if (WEBP_SWAP_16BIT_CSP == 1) + const __m128i rgba = _mm_unpacklo_epi8(rgba1, rgba0); // barg0...barg7 +#else + const __m128i rgba = _mm_unpacklo_epi8(rgba0, rgba1); // rgba0...rgba7 +#endif + _mm_storeu_si128(out++, rgba); + num_pixels -= 8; + } + // left-overs + if (num_pixels > 0) { + VP8LConvertBGRAToRGBA4444_C((const uint32_t*)in, num_pixels, (uint8_t*)out); + } +} + +static void ConvertBGRAToRGB565_SSE2(const uint32_t* src, + int num_pixels, uint8_t* dst) { + const __m128i mask_0xe0 = _mm_set1_epi8(0xe0); + const __m128i mask_0xf8 = _mm_set1_epi8(0xf8); + const __m128i mask_0x07 = _mm_set1_epi8(0x07); + const __m128i* in = (const __m128i*)src; + __m128i* out = (__m128i*)dst; + while (num_pixels >= 8) { + const __m128i bgra0 = _mm_loadu_si128(in++); // bgra0|bgra1|bgra2|bgra3 + const __m128i bgra4 = _mm_loadu_si128(in++); // bgra4|bgra5|bgra6|bgra7 + const __m128i v0l = _mm_unpacklo_epi8(bgra0, bgra4); // b0b4g0g4r0r4a0a4... + const __m128i v0h = _mm_unpackhi_epi8(bgra0, bgra4); // b2b6g2g6r2r6a2a6... + const __m128i v1l = _mm_unpacklo_epi8(v0l, v0h); // b0b2b4b6g0g2g4g6... + const __m128i v1h = _mm_unpackhi_epi8(v0l, v0h); // b1b3b5b7g1g3g5g7... + const __m128i v2l = _mm_unpacklo_epi8(v1l, v1h); // b0...b7 | g0...g7 + const __m128i v2h = _mm_unpackhi_epi8(v1l, v1h); // r0...r7 | a0...a7 + const __m128i ga0 = _mm_unpackhi_epi64(v2l, v2h); // g0...g7 | a0...a7 + const __m128i rb0 = _mm_unpacklo_epi64(v2h, v2l); // r0...r7 | b0...b7 + const __m128i rb1 = _mm_and_si128(rb0, mask_0xf8); // -r0..-r7|-b0..-b7 + const __m128i g_lo1 = _mm_srli_epi16(ga0, 5); + const __m128i g_lo2 = _mm_and_si128(g_lo1, mask_0x07); // g0-...g7-|xx (3b) + const __m128i g_hi1 = _mm_slli_epi16(ga0, 3); + const __m128i g_hi2 = _mm_and_si128(g_hi1, mask_0xe0); // -g0...-g7|xx (3b) + const __m128i b0 = _mm_srli_si128(rb1, 8); // -b0...-b7|0 + const __m128i rg1 = _mm_or_si128(rb1, g_lo2); // gr0...gr7|xx + const __m128i b1 = _mm_srli_epi16(b0, 3); + const __m128i gb1 = _mm_or_si128(b1, g_hi2); // bg0...bg7|xx +#if (WEBP_SWAP_16BIT_CSP == 1) + const __m128i rgba = _mm_unpacklo_epi8(gb1, rg1); // rggb0...rggb7 +#else + const __m128i rgba = _mm_unpacklo_epi8(rg1, gb1); // bgrb0...bgrb7 +#endif + _mm_storeu_si128(out++, rgba); + num_pixels -= 8; + } + // left-overs + if (num_pixels > 0) { + VP8LConvertBGRAToRGB565_C((const uint32_t*)in, num_pixels, (uint8_t*)out); + } +} + +static void ConvertBGRAToBGR_SSE2(const uint32_t* src, + int num_pixels, uint8_t* dst) { + const __m128i mask_l = _mm_set_epi32(0, 0x00ffffff, 0, 0x00ffffff); + const __m128i mask_h = _mm_set_epi32(0x00ffffff, 0, 0x00ffffff, 0); + const __m128i* in = (const __m128i*)src; + const uint8_t* const end = dst + num_pixels * 3; + // the last storel_epi64 below writes 8 bytes starting at offset 18 + while (dst + 26 <= end) { + const __m128i bgra0 = _mm_loadu_si128(in++); // bgra0|bgra1|bgra2|bgra3 + const __m128i bgra4 = _mm_loadu_si128(in++); // bgra4|bgra5|bgra6|bgra7 + const __m128i a0l = _mm_and_si128(bgra0, mask_l); // bgr0|0|bgr0|0 + const __m128i a4l = _mm_and_si128(bgra4, mask_l); // bgr0|0|bgr0|0 + const __m128i a0h = _mm_and_si128(bgra0, mask_h); // 0|bgr0|0|bgr0 + const __m128i a4h = _mm_and_si128(bgra4, mask_h); // 0|bgr0|0|bgr0 + const __m128i b0h = _mm_srli_epi64(a0h, 8); // 000b|gr00|000b|gr00 + const __m128i b4h = _mm_srli_epi64(a4h, 8); // 000b|gr00|000b|gr00 + const __m128i c0 = _mm_or_si128(a0l, b0h); // rgbrgb00|rgbrgb00 + const __m128i c4 = _mm_or_si128(a4l, b4h); // rgbrgb00|rgbrgb00 + const __m128i c2 = _mm_srli_si128(c0, 8); + const __m128i c6 = _mm_srli_si128(c4, 8); + _mm_storel_epi64((__m128i*)(dst + 0), c0); + _mm_storel_epi64((__m128i*)(dst + 6), c2); + _mm_storel_epi64((__m128i*)(dst + 12), c4); + _mm_storel_epi64((__m128i*)(dst + 18), c6); + dst += 24; + num_pixels -= 8; + } + // left-overs + if (num_pixels > 0) { + VP8LConvertBGRAToBGR_C((const uint32_t*)in, num_pixels, dst); + } +} + +//------------------------------------------------------------------------------ +// Entry point + +extern void VP8LDspInitSSE2(void); + +WEBP_TSAN_IGNORE_FUNCTION void VP8LDspInitSSE2(void) { + VP8LPredictors[5] = Predictor5_SSE2; + VP8LPredictors[6] = Predictor6_SSE2; + VP8LPredictors[7] = Predictor7_SSE2; + VP8LPredictors[8] = Predictor8_SSE2; + VP8LPredictors[9] = Predictor9_SSE2; + VP8LPredictors[10] = Predictor10_SSE2; + VP8LPredictors[11] = Predictor11_SSE2; + VP8LPredictors[12] = Predictor12_SSE2; + VP8LPredictors[13] = Predictor13_SSE2; + + VP8LPredictorsAdd[0] = PredictorAdd0_SSE2; + VP8LPredictorsAdd[1] = PredictorAdd1_SSE2; + VP8LPredictorsAdd[2] = PredictorAdd2_SSE2; + VP8LPredictorsAdd[3] = PredictorAdd3_SSE2; + VP8LPredictorsAdd[4] = PredictorAdd4_SSE2; + VP8LPredictorsAdd[5] = PredictorAdd5_SSE2; + VP8LPredictorsAdd[6] = PredictorAdd6_SSE2; + VP8LPredictorsAdd[7] = PredictorAdd7_SSE2; + VP8LPredictorsAdd[8] = PredictorAdd8_SSE2; + VP8LPredictorsAdd[9] = PredictorAdd9_SSE2; + VP8LPredictorsAdd[10] = PredictorAdd10_SSE2; + VP8LPredictorsAdd[11] = PredictorAdd11_SSE2; + VP8LPredictorsAdd[12] = PredictorAdd12_SSE2; + VP8LPredictorsAdd[13] = PredictorAdd13_SSE2; + + VP8LAddGreenToBlueAndRed = AddGreenToBlueAndRed_SSE2; + VP8LTransformColorInverse = TransformColorInverse_SSE2; + + VP8LConvertBGRAToRGB = ConvertBGRAToRGB_SSE2; + VP8LConvertBGRAToRGBA = ConvertBGRAToRGBA_SSE2; + VP8LConvertBGRAToRGBA4444 = ConvertBGRAToRGBA4444_SSE2; + VP8LConvertBGRAToRGB565 = ConvertBGRAToRGB565_SSE2; + VP8LConvertBGRAToBGR = ConvertBGRAToBGR_SSE2; +} + +#else // !WEBP_USE_SSE2 + +WEBP_DSP_INIT_STUB(VP8LDspInitSSE2) + +#endif // WEBP_USE_SSE2 diff --git a/vendor/github.com/sizeofint/webpanimation/dsp_mips_macro.h b/vendor/github.com/sizeofint/webpanimation/dsp_mips_macro.h new file mode 100644 index 00000000..44aba9b7 --- /dev/null +++ b/vendor/github.com/sizeofint/webpanimation/dsp_mips_macro.h @@ -0,0 +1,200 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the COPYING file in the root of the source +// tree. An additional intellectual property rights grant can be found +// in the file PATENTS. All contributing project authors may +// be found in the AUTHORS file in the root of the source tree. +// ----------------------------------------------------------------------------- +// +// MIPS common macros + +#ifndef WEBP_DSP_MIPS_MACRO_H_ +#define WEBP_DSP_MIPS_MACRO_H_ + +#if defined(__GNUC__) && defined(__ANDROID__) && LOCAL_GCC_VERSION == 0x409 +#define WORK_AROUND_GCC +#endif + +#define STR(s) #s +#define XSTR(s) STR(s) + +// O0[31..16 | 15..0] = I0[31..16 | 15..0] + I1[31..16 | 15..0] +// O1[31..16 | 15..0] = I0[31..16 | 15..0] - I1[31..16 | 15..0] +// O - output +// I - input (macro doesn't change it) +#define ADD_SUB_HALVES(O0, O1, \ + I0, I1) \ + "addq.ph %[" #O0 "], %[" #I0 "], %[" #I1 "] \n\t" \ + "subq.ph %[" #O1 "], %[" #I0 "], %[" #I1 "] \n\t" + +// O - output +// I - input (macro doesn't change it) +// I[0/1] - offset in bytes +#define LOAD_IN_X2(O0, O1, \ + I0, I1) \ + "lh %[" #O0 "], " #I0 "(%[in]) \n\t" \ + "lh %[" #O1 "], " #I1 "(%[in]) \n\t" + +// I0 - location +// I1..I9 - offsets in bytes +#define LOAD_WITH_OFFSET_X4(O0, O1, O2, O3, \ + I0, I1, I2, I3, I4, I5, I6, I7, I8, I9) \ + "ulw %[" #O0 "], " #I1 "+" XSTR(I9) "*" #I5 "(%[" #I0 "]) \n\t" \ + "ulw %[" #O1 "], " #I2 "+" XSTR(I9) "*" #I6 "(%[" #I0 "]) \n\t" \ + "ulw %[" #O2 "], " #I3 "+" XSTR(I9) "*" #I7 "(%[" #I0 "]) \n\t" \ + "ulw %[" #O3 "], " #I4 "+" XSTR(I9) "*" #I8 "(%[" #I0 "]) \n\t" + +// O - output +// IO - input/output +// I - input (macro doesn't change it) +#define MUL_SHIFT_SUM(O0, O1, O2, O3, O4, O5, O6, O7, \ + IO0, IO1, IO2, IO3, \ + I0, I1, I2, I3, I4, I5, I6, I7) \ + "mul %[" #O0 "], %[" #I0 "], %[kC2] \n\t" \ + "mul %[" #O1 "], %[" #I0 "], %[kC1] \n\t" \ + "mul %[" #O2 "], %[" #I1 "], %[kC2] \n\t" \ + "mul %[" #O3 "], %[" #I1 "], %[kC1] \n\t" \ + "mul %[" #O4 "], %[" #I2 "], %[kC2] \n\t" \ + "mul %[" #O5 "], %[" #I2 "], %[kC1] \n\t" \ + "mul %[" #O6 "], %[" #I3 "], %[kC2] \n\t" \ + "mul %[" #O7 "], %[" #I3 "], %[kC1] \n\t" \ + "sra %[" #O0 "], %[" #O0 "], 16 \n\t" \ + "sra %[" #O1 "], %[" #O1 "], 16 \n\t" \ + "sra %[" #O2 "], %[" #O2 "], 16 \n\t" \ + "sra %[" #O3 "], %[" #O3 "], 16 \n\t" \ + "sra %[" #O4 "], %[" #O4 "], 16 \n\t" \ + "sra %[" #O5 "], %[" #O5 "], 16 \n\t" \ + "sra %[" #O6 "], %[" #O6 "], 16 \n\t" \ + "sra %[" #O7 "], %[" #O7 "], 16 \n\t" \ + "addu %[" #IO0 "], %[" #IO0 "], %[" #I4 "] \n\t" \ + "addu %[" #IO1 "], %[" #IO1 "], %[" #I5 "] \n\t" \ + "subu %[" #IO2 "], %[" #IO2 "], %[" #I6 "] \n\t" \ + "subu %[" #IO3 "], %[" #IO3 "], %[" #I7 "] \n\t" + +// O - output +// I - input (macro doesn't change it) +#define INSERT_HALF_X2(O0, O1, \ + I0, I1) \ + "ins %[" #O0 "], %[" #I0 "], 16, 16 \n\t" \ + "ins %[" #O1 "], %[" #I1 "], 16, 16 \n\t" + +// O - output +// I - input (macro doesn't change it) +#define SRA_16(O0, O1, O2, O3, \ + I0, I1, I2, I3) \ + "sra %[" #O0 "], %[" #I0 "], 16 \n\t" \ + "sra %[" #O1 "], %[" #I1 "], 16 \n\t" \ + "sra %[" #O2 "], %[" #I2 "], 16 \n\t" \ + "sra %[" #O3 "], %[" #I3 "], 16 \n\t" + +// temp0[31..16 | 15..0] = temp8[31..16 | 15..0] + temp12[31..16 | 15..0] +// temp1[31..16 | 15..0] = temp8[31..16 | 15..0] - temp12[31..16 | 15..0] +// temp0[31..16 | 15..0] = temp0[31..16 >> 3 | 15..0 >> 3] +// temp1[31..16 | 15..0] = temp1[31..16 >> 3 | 15..0 >> 3] +// O - output +// I - input (macro doesn't change it) +#define SHIFT_R_SUM_X2(O0, O1, O2, O3, O4, O5, O6, O7, \ + I0, I1, I2, I3, I4, I5, I6, I7) \ + "addq.ph %[" #O0 "], %[" #I0 "], %[" #I4 "] \n\t" \ + "subq.ph %[" #O1 "], %[" #I0 "], %[" #I4 "] \n\t" \ + "addq.ph %[" #O2 "], %[" #I1 "], %[" #I5 "] \n\t" \ + "subq.ph %[" #O3 "], %[" #I1 "], %[" #I5 "] \n\t" \ + "addq.ph %[" #O4 "], %[" #I2 "], %[" #I6 "] \n\t" \ + "subq.ph %[" #O5 "], %[" #I2 "], %[" #I6 "] \n\t" \ + "addq.ph %[" #O6 "], %[" #I3 "], %[" #I7 "] \n\t" \ + "subq.ph %[" #O7 "], %[" #I3 "], %[" #I7 "] \n\t" \ + "shra.ph %[" #O0 "], %[" #O0 "], 3 \n\t" \ + "shra.ph %[" #O1 "], %[" #O1 "], 3 \n\t" \ + "shra.ph %[" #O2 "], %[" #O2 "], 3 \n\t" \ + "shra.ph %[" #O3 "], %[" #O3 "], 3 \n\t" \ + "shra.ph %[" #O4 "], %[" #O4 "], 3 \n\t" \ + "shra.ph %[" #O5 "], %[" #O5 "], 3 \n\t" \ + "shra.ph %[" #O6 "], %[" #O6 "], 3 \n\t" \ + "shra.ph %[" #O7 "], %[" #O7 "], 3 \n\t" + +// precrq.ph.w temp0, temp8, temp2 +// temp0 = temp8[31..16] | temp2[31..16] +// ins temp2, temp8, 16, 16 +// temp2 = temp8[31..16] | temp2[15..0] +// O - output +// IO - input/output +// I - input (macro doesn't change it) +#define PACK_2_HALVES_TO_WORD(O0, O1, O2, O3, \ + IO0, IO1, IO2, IO3, \ + I0, I1, I2, I3) \ + "precrq.ph.w %[" #O0 "], %[" #I0 "], %[" #IO0 "] \n\t" \ + "precrq.ph.w %[" #O1 "], %[" #I1 "], %[" #IO1 "] \n\t" \ + "ins %[" #IO0 "], %[" #I0 "], 16, 16 \n\t" \ + "ins %[" #IO1 "], %[" #I1 "], 16, 16 \n\t" \ + "precrq.ph.w %[" #O2 "], %[" #I2 "], %[" #IO2 "] \n\t" \ + "precrq.ph.w %[" #O3 "], %[" #I3 "], %[" #IO3 "] \n\t" \ + "ins %[" #IO2 "], %[" #I2 "], 16, 16 \n\t" \ + "ins %[" #IO3 "], %[" #I3 "], 16, 16 \n\t" + +// preceu.ph.qbr temp0, temp8 +// temp0 = 0 | 0 | temp8[23..16] | temp8[7..0] +// preceu.ph.qbl temp1, temp8 +// temp1 = temp8[23..16] | temp8[7..0] | 0 | 0 +// O - output +// I - input (macro doesn't change it) +#define CONVERT_2_BYTES_TO_HALF(O0, O1, O2, O3, O4, O5, O6, O7, \ + I0, I1, I2, I3) \ + "preceu.ph.qbr %[" #O0 "], %[" #I0 "] \n\t" \ + "preceu.ph.qbl %[" #O1 "], %[" #I0 "] \n\t" \ + "preceu.ph.qbr %[" #O2 "], %[" #I1 "] \n\t" \ + "preceu.ph.qbl %[" #O3 "], %[" #I1 "] \n\t" \ + "preceu.ph.qbr %[" #O4 "], %[" #I2 "] \n\t" \ + "preceu.ph.qbl %[" #O5 "], %[" #I2 "] \n\t" \ + "preceu.ph.qbr %[" #O6 "], %[" #I3 "] \n\t" \ + "preceu.ph.qbl %[" #O7 "], %[" #I3 "] \n\t" + +// temp0[31..16 | 15..0] = temp0[31..16 | 15..0] + temp8[31..16 | 15..0] +// temp0[31..16 | 15..0] = temp0[31..16 <<(s) 7 | 15..0 <<(s) 7] +// temp1..temp7 same as temp0 +// precrqu_s.qb.ph temp0, temp1, temp0: +// temp0 = temp1[31..24] | temp1[15..8] | temp0[31..24] | temp0[15..8] +// store temp0 to dst +// IO - input/output +// I - input (macro doesn't change it) +#define STORE_SAT_SUM_X2(IO0, IO1, IO2, IO3, IO4, IO5, IO6, IO7, \ + I0, I1, I2, I3, I4, I5, I6, I7, \ + I8, I9, I10, I11, I12, I13) \ + "addq.ph %[" #IO0 "], %[" #IO0 "], %[" #I0 "] \n\t" \ + "addq.ph %[" #IO1 "], %[" #IO1 "], %[" #I1 "] \n\t" \ + "addq.ph %[" #IO2 "], %[" #IO2 "], %[" #I2 "] \n\t" \ + "addq.ph %[" #IO3 "], %[" #IO3 "], %[" #I3 "] \n\t" \ + "addq.ph %[" #IO4 "], %[" #IO4 "], %[" #I4 "] \n\t" \ + "addq.ph %[" #IO5 "], %[" #IO5 "], %[" #I5 "] \n\t" \ + "addq.ph %[" #IO6 "], %[" #IO6 "], %[" #I6 "] \n\t" \ + "addq.ph %[" #IO7 "], %[" #IO7 "], %[" #I7 "] \n\t" \ + "shll_s.ph %[" #IO0 "], %[" #IO0 "], 7 \n\t" \ + "shll_s.ph %[" #IO1 "], %[" #IO1 "], 7 \n\t" \ + "shll_s.ph %[" #IO2 "], %[" #IO2 "], 7 \n\t" \ + "shll_s.ph %[" #IO3 "], %[" #IO3 "], 7 \n\t" \ + "shll_s.ph %[" #IO4 "], %[" #IO4 "], 7 \n\t" \ + "shll_s.ph %[" #IO5 "], %[" #IO5 "], 7 \n\t" \ + "shll_s.ph %[" #IO6 "], %[" #IO6 "], 7 \n\t" \ + "shll_s.ph %[" #IO7 "], %[" #IO7 "], 7 \n\t" \ + "precrqu_s.qb.ph %[" #IO0 "], %[" #IO1 "], %[" #IO0 "] \n\t" \ + "precrqu_s.qb.ph %[" #IO2 "], %[" #IO3 "], %[" #IO2 "] \n\t" \ + "precrqu_s.qb.ph %[" #IO4 "], %[" #IO5 "], %[" #IO4 "] \n\t" \ + "precrqu_s.qb.ph %[" #IO6 "], %[" #IO7 "], %[" #IO6 "] \n\t" \ + "usw %[" #IO0 "], " XSTR(I13) "*" #I9 "(%[" #I8 "]) \n\t" \ + "usw %[" #IO2 "], " XSTR(I13) "*" #I10 "(%[" #I8 "]) \n\t" \ + "usw %[" #IO4 "], " XSTR(I13) "*" #I11 "(%[" #I8 "]) \n\t" \ + "usw %[" #IO6 "], " XSTR(I13) "*" #I12 "(%[" #I8 "]) \n\t" + +#define OUTPUT_EARLY_CLOBBER_REGS_10() \ + : [temp1]"=&r"(temp1), [temp2]"=&r"(temp2), [temp3]"=&r"(temp3), \ + [temp4]"=&r"(temp4), [temp5]"=&r"(temp5), [temp6]"=&r"(temp6), \ + [temp7]"=&r"(temp7), [temp8]"=&r"(temp8), [temp9]"=&r"(temp9), \ + [temp10]"=&r"(temp10) + +#define OUTPUT_EARLY_CLOBBER_REGS_18() \ + OUTPUT_EARLY_CLOBBER_REGS_10(), \ + [temp11]"=&r"(temp11), [temp12]"=&r"(temp12), [temp13]"=&r"(temp13), \ + [temp14]"=&r"(temp14), [temp15]"=&r"(temp15), [temp16]"=&r"(temp16), \ + [temp17]"=&r"(temp17), [temp18]"=&r"(temp18) + +#endif // WEBP_DSP_MIPS_MACRO_H_ diff --git a/vendor/github.com/sizeofint/webpanimation/dsp_msa_macro.h b/vendor/github.com/sizeofint/webpanimation/dsp_msa_macro.h new file mode 100644 index 00000000..de026a1d --- /dev/null +++ b/vendor/github.com/sizeofint/webpanimation/dsp_msa_macro.h @@ -0,0 +1,1392 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the COPYING file in the root of the source +// tree. An additional intellectual property rights grant can be found +// in the file PATENTS. All contributing project authors may +// be found in the AUTHORS file in the root of the source tree. +// ----------------------------------------------------------------------------- +// +// MSA common macros +// +// Author(s): Prashant Patil (prashant.patil@imgtec.com) + +#ifndef WEBP_DSP_MSA_MACRO_H_ +#define WEBP_DSP_MSA_MACRO_H_ + +#include +#include + +#if defined(__clang__) + #define CLANG_BUILD +#endif + +#ifdef CLANG_BUILD + #define ALPHAVAL (-1) + #define ADDVI_H(a, b) __msa_addvi_h((v8i16)a, b) + #define ADDVI_W(a, b) __msa_addvi_w((v4i32)a, b) + #define SRAI_B(a, b) __msa_srai_b((v16i8)a, b) + #define SRAI_H(a, b) __msa_srai_h((v8i16)a, b) + #define SRAI_W(a, b) __msa_srai_w((v4i32)a, b) + #define SRLI_H(a, b) __msa_srli_h((v8i16)a, b) + #define SLLI_B(a, b) __msa_slli_b((v4i32)a, b) + #define ANDI_B(a, b) __msa_andi_b((v16u8)a, b) + #define ORI_B(a, b) __msa_ori_b((v16u8)a, b) +#else + #define ALPHAVAL (0xff) + #define ADDVI_H(a, b) (a + b) + #define ADDVI_W(a, b) (a + b) + #define SRAI_B(a, b) (a >> b) + #define SRAI_H(a, b) (a >> b) + #define SRAI_W(a, b) (a >> b) + #define SRLI_H(a, b) (a << b) + #define SLLI_B(a, b) (a << b) + #define ANDI_B(a, b) (a & b) + #define ORI_B(a, b) (a | b) +#endif + +#define LD_B(RTYPE, psrc) *((RTYPE*)(psrc)) +#define LD_UB(...) LD_B(v16u8, __VA_ARGS__) +#define LD_SB(...) LD_B(v16i8, __VA_ARGS__) + +#define LD_H(RTYPE, psrc) *((RTYPE*)(psrc)) +#define LD_UH(...) LD_H(v8u16, __VA_ARGS__) +#define LD_SH(...) LD_H(v8i16, __VA_ARGS__) + +#define LD_W(RTYPE, psrc) *((RTYPE*)(psrc)) +#define LD_UW(...) LD_W(v4u32, __VA_ARGS__) +#define LD_SW(...) LD_W(v4i32, __VA_ARGS__) + +#define ST_B(RTYPE, in, pdst) *((RTYPE*)(pdst)) = in +#define ST_UB(...) ST_B(v16u8, __VA_ARGS__) +#define ST_SB(...) ST_B(v16i8, __VA_ARGS__) + +#define ST_H(RTYPE, in, pdst) *((RTYPE*)(pdst)) = in +#define ST_UH(...) ST_H(v8u16, __VA_ARGS__) +#define ST_SH(...) ST_H(v8i16, __VA_ARGS__) + +#define ST_W(RTYPE, in, pdst) *((RTYPE*)(pdst)) = in +#define ST_UW(...) ST_W(v4u32, __VA_ARGS__) +#define ST_SW(...) ST_W(v4i32, __VA_ARGS__) + +#define MSA_LOAD_FUNC(TYPE, INSTR, FUNC_NAME) \ + static inline TYPE FUNC_NAME(const void* const psrc) { \ + const uint8_t* const psrc_m = (const uint8_t*)psrc; \ + TYPE val_m; \ + asm volatile ( \ + "" #INSTR " %[val_m], %[psrc_m] \n\t" \ + : [val_m] "=r" (val_m) \ + : [psrc_m] "m" (*psrc_m)); \ + return val_m; \ + } + +#define MSA_LOAD(psrc, FUNC_NAME) FUNC_NAME(psrc) + +#define MSA_STORE_FUNC(TYPE, INSTR, FUNC_NAME) \ + static inline void FUNC_NAME(TYPE val, void* const pdst) { \ + uint8_t* const pdst_m = (uint8_t*)pdst; \ + TYPE val_m = val; \ + asm volatile ( \ + " " #INSTR " %[val_m], %[pdst_m] \n\t" \ + : [pdst_m] "=m" (*pdst_m) \ + : [val_m] "r" (val_m)); \ + } + +#define MSA_STORE(val, pdst, FUNC_NAME) FUNC_NAME(val, pdst) + +#if (__mips_isa_rev >= 6) + MSA_LOAD_FUNC(uint16_t, lh, msa_lh); + #define LH(psrc) MSA_LOAD(psrc, msa_lh) + MSA_LOAD_FUNC(uint32_t, lw, msa_lw); + #define LW(psrc) MSA_LOAD(psrc, msa_lw) + #if (__mips == 64) + MSA_LOAD_FUNC(uint64_t, ld, msa_ld); + #define LD(psrc) MSA_LOAD(psrc, msa_ld) + #else // !(__mips == 64) + #define LD(psrc) ((((uint64_t)MSA_LOAD(psrc + 4, msa_lw)) << 32) | \ + MSA_LOAD(psrc, msa_lw)) + #endif // (__mips == 64) + + MSA_STORE_FUNC(uint16_t, sh, msa_sh); + #define SH(val, pdst) MSA_STORE(val, pdst, msa_sh) + MSA_STORE_FUNC(uint32_t, sw, msa_sw); + #define SW(val, pdst) MSA_STORE(val, pdst, msa_sw) + MSA_STORE_FUNC(uint64_t, sd, msa_sd); + #define SD(val, pdst) MSA_STORE(val, pdst, msa_sd) +#else // !(__mips_isa_rev >= 6) + MSA_LOAD_FUNC(uint16_t, ulh, msa_ulh); + #define LH(psrc) MSA_LOAD(psrc, msa_ulh) + MSA_LOAD_FUNC(uint32_t, ulw, msa_ulw); + #define LW(psrc) MSA_LOAD(psrc, msa_ulw) + #if (__mips == 64) + MSA_LOAD_FUNC(uint64_t, uld, msa_uld); + #define LD(psrc) MSA_LOAD(psrc, msa_uld) + #else // !(__mips == 64) + #define LD(psrc) ((((uint64_t)MSA_LOAD(psrc + 4, msa_ulw)) << 32) | \ + MSA_LOAD(psrc, msa_ulw)) + #endif // (__mips == 64) + + MSA_STORE_FUNC(uint16_t, ush, msa_ush); + #define SH(val, pdst) MSA_STORE(val, pdst, msa_ush) + MSA_STORE_FUNC(uint32_t, usw, msa_usw); + #define SW(val, pdst) MSA_STORE(val, pdst, msa_usw) + #define SD(val, pdst) do { \ + uint8_t* const pdst_sd_m = (uint8_t*)(pdst); \ + const uint32_t val0_m = (uint32_t)(val & 0x00000000FFFFFFFF); \ + const uint32_t val1_m = (uint32_t)((val >> 32) & 0x00000000FFFFFFFF); \ + SW(val0_m, pdst_sd_m); \ + SW(val1_m, pdst_sd_m + 4); \ + } while (0) +#endif // (__mips_isa_rev >= 6) + +/* Description : Load 4 words with stride + * Arguments : Inputs - psrc, stride + * Outputs - out0, out1, out2, out3 + * Details : Load word in 'out0' from (psrc) + * Load word in 'out1' from (psrc + stride) + * Load word in 'out2' from (psrc + 2 * stride) + * Load word in 'out3' from (psrc + 3 * stride) + */ +#define LW4(psrc, stride, out0, out1, out2, out3) do { \ + const uint8_t* ptmp = (const uint8_t*)psrc; \ + out0 = LW(ptmp); \ + ptmp += stride; \ + out1 = LW(ptmp); \ + ptmp += stride; \ + out2 = LW(ptmp); \ + ptmp += stride; \ + out3 = LW(ptmp); \ +} while (0) + +/* Description : Store words with stride + * Arguments : Inputs - in0, in1, in2, in3, pdst, stride + * Details : Store word from 'in0' to (pdst) + * Store word from 'in1' to (pdst + stride) + * Store word from 'in2' to (pdst + 2 * stride) + * Store word from 'in3' to (pdst + 3 * stride) + */ +#define SW4(in0, in1, in2, in3, pdst, stride) do { \ + uint8_t* ptmp = (uint8_t*)pdst; \ + SW(in0, ptmp); \ + ptmp += stride; \ + SW(in1, ptmp); \ + ptmp += stride; \ + SW(in2, ptmp); \ + ptmp += stride; \ + SW(in3, ptmp); \ +} while (0) + +#define SW3(in0, in1, in2, pdst, stride) do { \ + uint8_t* ptmp = (uint8_t*)pdst; \ + SW(in0, ptmp); \ + ptmp += stride; \ + SW(in1, ptmp); \ + ptmp += stride; \ + SW(in2, ptmp); \ +} while (0) + +#define SW2(in0, in1, pdst, stride) do { \ + uint8_t* ptmp = (uint8_t*)pdst; \ + SW(in0, ptmp); \ + ptmp += stride; \ + SW(in1, ptmp); \ +} while (0) + +/* Description : Store 4 double words with stride + * Arguments : Inputs - in0, in1, in2, in3, pdst, stride + * Details : Store double word from 'in0' to (pdst) + * Store double word from 'in1' to (pdst + stride) + * Store double word from 'in2' to (pdst + 2 * stride) + * Store double word from 'in3' to (pdst + 3 * stride) + */ +#define SD4(in0, in1, in2, in3, pdst, stride) do { \ + uint8_t* ptmp = (uint8_t*)pdst; \ + SD(in0, ptmp); \ + ptmp += stride; \ + SD(in1, ptmp); \ + ptmp += stride; \ + SD(in2, ptmp); \ + ptmp += stride; \ + SD(in3, ptmp); \ +} while (0) + +/* Description : Load vectors with 16 byte elements with stride + * Arguments : Inputs - psrc, stride + * Outputs - out0, out1 + * Return Type - as per RTYPE + * Details : Load 16 byte elements in 'out0' from (psrc) + * Load 16 byte elements in 'out1' from (psrc + stride) + */ +#define LD_B2(RTYPE, psrc, stride, out0, out1) do { \ + out0 = LD_B(RTYPE, psrc); \ + out1 = LD_B(RTYPE, psrc + stride); \ +} while (0) +#define LD_UB2(...) LD_B2(v16u8, __VA_ARGS__) +#define LD_SB2(...) LD_B2(v16i8, __VA_ARGS__) + +#define LD_B3(RTYPE, psrc, stride, out0, out1, out2) do { \ + LD_B2(RTYPE, psrc, stride, out0, out1); \ + out2 = LD_B(RTYPE, psrc + 2 * stride); \ +} while (0) +#define LD_UB3(...) LD_B3(v16u8, __VA_ARGS__) +#define LD_SB3(...) LD_B3(v16i8, __VA_ARGS__) + +#define LD_B4(RTYPE, psrc, stride, out0, out1, out2, out3) do { \ + LD_B2(RTYPE, psrc, stride, out0, out1); \ + LD_B2(RTYPE, psrc + 2 * stride , stride, out2, out3); \ +} while (0) +#define LD_UB4(...) LD_B4(v16u8, __VA_ARGS__) +#define LD_SB4(...) LD_B4(v16i8, __VA_ARGS__) + +#define LD_B8(RTYPE, psrc, stride, \ + out0, out1, out2, out3, out4, out5, out6, out7) do { \ + LD_B4(RTYPE, psrc, stride, out0, out1, out2, out3); \ + LD_B4(RTYPE, psrc + 4 * stride, stride, out4, out5, out6, out7); \ +} while (0) +#define LD_UB8(...) LD_B8(v16u8, __VA_ARGS__) +#define LD_SB8(...) LD_B8(v16i8, __VA_ARGS__) + +/* Description : Load vectors with 8 halfword elements with stride + * Arguments : Inputs - psrc, stride + * Outputs - out0, out1 + * Details : Load 8 halfword elements in 'out0' from (psrc) + * Load 8 halfword elements in 'out1' from (psrc + stride) + */ +#define LD_H2(RTYPE, psrc, stride, out0, out1) do { \ + out0 = LD_H(RTYPE, psrc); \ + out1 = LD_H(RTYPE, psrc + stride); \ +} while (0) +#define LD_UH2(...) LD_H2(v8u16, __VA_ARGS__) +#define LD_SH2(...) LD_H2(v8i16, __VA_ARGS__) + +/* Description : Load vectors with 4 word elements with stride + * Arguments : Inputs - psrc, stride + * Outputs - out0, out1, out2, out3 + * Details : Load 4 word elements in 'out0' from (psrc + 0 * stride) + * Load 4 word elements in 'out1' from (psrc + 1 * stride) + * Load 4 word elements in 'out2' from (psrc + 2 * stride) + * Load 4 word elements in 'out3' from (psrc + 3 * stride) + */ +#define LD_W2(RTYPE, psrc, stride, out0, out1) do { \ + out0 = LD_W(RTYPE, psrc); \ + out1 = LD_W(RTYPE, psrc + stride); \ +} while (0) +#define LD_UW2(...) LD_W2(v4u32, __VA_ARGS__) +#define LD_SW2(...) LD_W2(v4i32, __VA_ARGS__) + +#define LD_W3(RTYPE, psrc, stride, out0, out1, out2) do { \ + LD_W2(RTYPE, psrc, stride, out0, out1); \ + out2 = LD_W(RTYPE, psrc + 2 * stride); \ +} while (0) +#define LD_UW3(...) LD_W3(v4u32, __VA_ARGS__) +#define LD_SW3(...) LD_W3(v4i32, __VA_ARGS__) + +#define LD_W4(RTYPE, psrc, stride, out0, out1, out2, out3) do { \ + LD_W2(RTYPE, psrc, stride, out0, out1); \ + LD_W2(RTYPE, psrc + 2 * stride, stride, out2, out3); \ +} while (0) +#define LD_UW4(...) LD_W4(v4u32, __VA_ARGS__) +#define LD_SW4(...) LD_W4(v4i32, __VA_ARGS__) + +/* Description : Store vectors of 16 byte elements with stride + * Arguments : Inputs - in0, in1, pdst, stride + * Details : Store 16 byte elements from 'in0' to (pdst) + * Store 16 byte elements from 'in1' to (pdst + stride) + */ +#define ST_B2(RTYPE, in0, in1, pdst, stride) do { \ + ST_B(RTYPE, in0, pdst); \ + ST_B(RTYPE, in1, pdst + stride); \ +} while (0) +#define ST_UB2(...) ST_B2(v16u8, __VA_ARGS__) +#define ST_SB2(...) ST_B2(v16i8, __VA_ARGS__) + +#define ST_B4(RTYPE, in0, in1, in2, in3, pdst, stride) do { \ + ST_B2(RTYPE, in0, in1, pdst, stride); \ + ST_B2(RTYPE, in2, in3, pdst + 2 * stride, stride); \ +} while (0) +#define ST_UB4(...) ST_B4(v16u8, __VA_ARGS__) +#define ST_SB4(...) ST_B4(v16i8, __VA_ARGS__) + +#define ST_B8(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, \ + pdst, stride) do { \ + ST_B4(RTYPE, in0, in1, in2, in3, pdst, stride); \ + ST_B4(RTYPE, in4, in5, in6, in7, pdst + 4 * stride, stride); \ +} while (0) +#define ST_UB8(...) ST_B8(v16u8, __VA_ARGS__) + +/* Description : Store vectors of 4 word elements with stride + * Arguments : Inputs - in0, in1, in2, in3, pdst, stride + * Details : Store 4 word elements from 'in0' to (pdst + 0 * stride) + * Store 4 word elements from 'in1' to (pdst + 1 * stride) + * Store 4 word elements from 'in2' to (pdst + 2 * stride) + * Store 4 word elements from 'in3' to (pdst + 3 * stride) + */ +#define ST_W2(RTYPE, in0, in1, pdst, stride) do { \ + ST_W(RTYPE, in0, pdst); \ + ST_W(RTYPE, in1, pdst + stride); \ +} while (0) +#define ST_UW2(...) ST_W2(v4u32, __VA_ARGS__) +#define ST_SW2(...) ST_W2(v4i32, __VA_ARGS__) + +#define ST_W3(RTYPE, in0, in1, in2, pdst, stride) do { \ + ST_W2(RTYPE, in0, in1, pdst, stride); \ + ST_W(RTYPE, in2, pdst + 2 * stride); \ +} while (0) +#define ST_UW3(...) ST_W3(v4u32, __VA_ARGS__) +#define ST_SW3(...) ST_W3(v4i32, __VA_ARGS__) + +#define ST_W4(RTYPE, in0, in1, in2, in3, pdst, stride) do { \ + ST_W2(RTYPE, in0, in1, pdst, stride); \ + ST_W2(RTYPE, in2, in3, pdst + 2 * stride, stride); \ +} while (0) +#define ST_UW4(...) ST_W4(v4u32, __VA_ARGS__) +#define ST_SW4(...) ST_W4(v4i32, __VA_ARGS__) + +/* Description : Store vectors of 8 halfword elements with stride + * Arguments : Inputs - in0, in1, pdst, stride + * Details : Store 8 halfword elements from 'in0' to (pdst) + * Store 8 halfword elements from 'in1' to (pdst + stride) + */ +#define ST_H2(RTYPE, in0, in1, pdst, stride) do { \ + ST_H(RTYPE, in0, pdst); \ + ST_H(RTYPE, in1, pdst + stride); \ +} while (0) +#define ST_UH2(...) ST_H2(v8u16, __VA_ARGS__) +#define ST_SH2(...) ST_H2(v8i16, __VA_ARGS__) + +/* Description : Store 2x4 byte block to destination memory from input vector + * Arguments : Inputs - in, stidx, pdst, stride + * Details : Index 'stidx' halfword element from 'in' vector is copied to + * the GP register and stored to (pdst) + * Index 'stidx+1' halfword element from 'in' vector is copied to + * the GP register and stored to (pdst + stride) + * Index 'stidx+2' halfword element from 'in' vector is copied to + * the GP register and stored to (pdst + 2 * stride) + * Index 'stidx+3' halfword element from 'in' vector is copied to + * the GP register and stored to (pdst + 3 * stride) + */ +#define ST2x4_UB(in, stidx, pdst, stride) do { \ + uint8_t* pblk_2x4_m = (uint8_t*)pdst; \ + const uint16_t out0_m = __msa_copy_s_h((v8i16)in, stidx); \ + const uint16_t out1_m = __msa_copy_s_h((v8i16)in, stidx + 1); \ + const uint16_t out2_m = __msa_copy_s_h((v8i16)in, stidx + 2); \ + const uint16_t out3_m = __msa_copy_s_h((v8i16)in, stidx + 3); \ + SH(out0_m, pblk_2x4_m); \ + pblk_2x4_m += stride; \ + SH(out1_m, pblk_2x4_m); \ + pblk_2x4_m += stride; \ + SH(out2_m, pblk_2x4_m); \ + pblk_2x4_m += stride; \ + SH(out3_m, pblk_2x4_m); \ +} while (0) + +/* Description : Store 4x4 byte block to destination memory from input vector + * Arguments : Inputs - in0, in1, pdst, stride + * Details : 'Idx0' word element from input vector 'in0' is copied to the + * GP register and stored to (pdst) + * 'Idx1' word element from input vector 'in0' is copied to the + * GP register and stored to (pdst + stride) + * 'Idx2' word element from input vector 'in0' is copied to the + * GP register and stored to (pdst + 2 * stride) + * 'Idx3' word element from input vector 'in0' is copied to the + * GP register and stored to (pdst + 3 * stride) + */ +#define ST4x4_UB(in0, in1, idx0, idx1, idx2, idx3, pdst, stride) do { \ + uint8_t* const pblk_4x4_m = (uint8_t*)pdst; \ + const uint32_t out0_m = __msa_copy_s_w((v4i32)in0, idx0); \ + const uint32_t out1_m = __msa_copy_s_w((v4i32)in0, idx1); \ + const uint32_t out2_m = __msa_copy_s_w((v4i32)in1, idx2); \ + const uint32_t out3_m = __msa_copy_s_w((v4i32)in1, idx3); \ + SW4(out0_m, out1_m, out2_m, out3_m, pblk_4x4_m, stride); \ +} while (0) + +#define ST4x8_UB(in0, in1, pdst, stride) do { \ + uint8_t* const pblk_4x8 = (uint8_t*)pdst; \ + ST4x4_UB(in0, in0, 0, 1, 2, 3, pblk_4x8, stride); \ + ST4x4_UB(in1, in1, 0, 1, 2, 3, pblk_4x8 + 4 * stride, stride); \ +} while (0) + +/* Description : Immediate number of elements to slide + * Arguments : Inputs - in0, in1, slide_val + * Outputs - out + * Return Type - as per RTYPE + * Details : Byte elements from 'in1' vector are slid into 'in0' by + * value specified in the 'slide_val' + */ +#define SLDI_B(RTYPE, in0, in1, slide_val) \ + (RTYPE)__msa_sldi_b((v16i8)in0, (v16i8)in1, slide_val) \ + +#define SLDI_UB(...) SLDI_B(v16u8, __VA_ARGS__) +#define SLDI_SB(...) SLDI_B(v16i8, __VA_ARGS__) +#define SLDI_SH(...) SLDI_B(v8i16, __VA_ARGS__) + +/* Description : Shuffle byte vector elements as per mask vector + * Arguments : Inputs - in0, in1, in2, in3, mask0, mask1 + * Outputs - out0, out1 + * Return Type - as per RTYPE + * Details : Byte elements from 'in0' & 'in1' are copied selectively to + * 'out0' as per control vector 'mask0' + */ +#define VSHF_B(RTYPE, in0, in1, mask) \ + (RTYPE)__msa_vshf_b((v16i8)mask, (v16i8)in1, (v16i8)in0) + +#define VSHF_UB(...) VSHF_B(v16u8, __VA_ARGS__) +#define VSHF_SB(...) VSHF_B(v16i8, __VA_ARGS__) +#define VSHF_UH(...) VSHF_B(v8u16, __VA_ARGS__) +#define VSHF_SH(...) VSHF_B(v8i16, __VA_ARGS__) + +#define VSHF_B2(RTYPE, in0, in1, in2, in3, mask0, mask1, out0, out1) do { \ + out0 = VSHF_B(RTYPE, in0, in1, mask0); \ + out1 = VSHF_B(RTYPE, in2, in3, mask1); \ +} while (0) +#define VSHF_B2_UB(...) VSHF_B2(v16u8, __VA_ARGS__) +#define VSHF_B2_SB(...) VSHF_B2(v16i8, __VA_ARGS__) +#define VSHF_B2_UH(...) VSHF_B2(v8u16, __VA_ARGS__) +#define VSHF_B2_SH(...) VSHF_B2(v8i16, __VA_ARGS__) + +/* Description : Shuffle halfword vector elements as per mask vector + * Arguments : Inputs - in0, in1, in2, in3, mask0, mask1 + * Outputs - out0, out1 + * Return Type - as per RTYPE + * Details : halfword elements from 'in0' & 'in1' are copied selectively to + * 'out0' as per control vector 'mask0' + */ +#define VSHF_H2(RTYPE, in0, in1, in2, in3, mask0, mask1, out0, out1) do { \ + out0 = (RTYPE)__msa_vshf_h((v8i16)mask0, (v8i16)in1, (v8i16)in0); \ + out1 = (RTYPE)__msa_vshf_h((v8i16)mask1, (v8i16)in3, (v8i16)in2); \ +} while (0) +#define VSHF_H2_UH(...) VSHF_H2(v8u16, __VA_ARGS__) +#define VSHF_H2_SH(...) VSHF_H2(v8i16, __VA_ARGS__) + +/* Description : Dot product of byte vector elements + * Arguments : Inputs - mult0, mult1, cnst0, cnst1 + * Outputs - out0, out1 + * Return Type - as per RTYPE + * Details : Signed byte elements from 'mult0' are multiplied with + * signed byte elements from 'cnst0' producing a result + * twice the size of input i.e. signed halfword. + * The multiplication result of adjacent odd-even elements + * are added together and written to the 'out0' vector +*/ +#define DOTP_SB2(RTYPE, mult0, mult1, cnst0, cnst1, out0, out1) do { \ + out0 = (RTYPE)__msa_dotp_s_h((v16i8)mult0, (v16i8)cnst0); \ + out1 = (RTYPE)__msa_dotp_s_h((v16i8)mult1, (v16i8)cnst1); \ +} while (0) +#define DOTP_SB2_SH(...) DOTP_SB2(v8i16, __VA_ARGS__) + +/* Description : Dot product of halfword vector elements + * Arguments : Inputs - mult0, mult1, cnst0, cnst1 + * Outputs - out0, out1 + * Return Type - as per RTYPE + * Details : Signed halfword elements from 'mult0' are multiplied with + * signed halfword elements from 'cnst0' producing a result + * twice the size of input i.e. signed word. + * The multiplication result of adjacent odd-even elements + * are added together and written to the 'out0' vector + */ +#define DOTP_SH2(RTYPE, mult0, mult1, cnst0, cnst1, out0, out1) do { \ + out0 = (RTYPE)__msa_dotp_s_w((v8i16)mult0, (v8i16)cnst0); \ + out1 = (RTYPE)__msa_dotp_s_w((v8i16)mult1, (v8i16)cnst1); \ +} while (0) +#define DOTP_SH2_SW(...) DOTP_SH2(v4i32, __VA_ARGS__) + +/* Description : Dot product of unsigned word vector elements + * Arguments : Inputs - mult0, mult1, cnst0, cnst1 + * Outputs - out0, out1 + * Return Type - as per RTYPE + * Details : Unsigned word elements from 'mult0' are multiplied with + * unsigned word elements from 'cnst0' producing a result + * twice the size of input i.e. unsigned double word. + * The multiplication result of adjacent odd-even elements + * are added together and written to the 'out0' vector + */ +#define DOTP_UW2(RTYPE, mult0, mult1, cnst0, cnst1, out0, out1) do { \ + out0 = (RTYPE)__msa_dotp_u_d((v4u32)mult0, (v4u32)cnst0); \ + out1 = (RTYPE)__msa_dotp_u_d((v4u32)mult1, (v4u32)cnst1); \ +} while (0) +#define DOTP_UW2_UD(...) DOTP_UW2(v2u64, __VA_ARGS__) + +/* Description : Dot product & addition of halfword vector elements + * Arguments : Inputs - mult0, mult1, cnst0, cnst1 + * Outputs - out0, out1 + * Return Type - as per RTYPE + * Details : Signed halfword elements from 'mult0' are multiplied with + * signed halfword elements from 'cnst0' producing a result + * twice the size of input i.e. signed word. + * The multiplication result of adjacent odd-even elements + * are added to the 'out0' vector + */ +#define DPADD_SH2(RTYPE, mult0, mult1, cnst0, cnst1, out0, out1) do { \ + out0 = (RTYPE)__msa_dpadd_s_w((v4i32)out0, (v8i16)mult0, (v8i16)cnst0); \ + out1 = (RTYPE)__msa_dpadd_s_w((v4i32)out1, (v8i16)mult1, (v8i16)cnst1); \ +} while (0) +#define DPADD_SH2_SW(...) DPADD_SH2(v4i32, __VA_ARGS__) + +/* Description : Clips all signed halfword elements of input vector + * between 0 & 255 + * Arguments : Input/output - val + * Return Type - signed halfword + */ +#define CLIP_SH_0_255(val) do { \ + const v8i16 max_m = __msa_ldi_h(255); \ + val = __msa_maxi_s_h((v8i16)val, 0); \ + val = __msa_min_s_h(max_m, (v8i16)val); \ +} while (0) + +#define CLIP_SH2_0_255(in0, in1) do { \ + CLIP_SH_0_255(in0); \ + CLIP_SH_0_255(in1); \ +} while (0) + +#define CLIP_SH4_0_255(in0, in1, in2, in3) do { \ + CLIP_SH2_0_255(in0, in1); \ + CLIP_SH2_0_255(in2, in3); \ +} while (0) + +/* Description : Clips all unsigned halfword elements of input vector + * between 0 & 255 + * Arguments : Input - in + * Output - out_m + * Return Type - unsigned halfword + */ +#define CLIP_UH_0_255(in) do { \ + const v8u16 max_m = (v8u16)__msa_ldi_h(255); \ + in = __msa_maxi_u_h((v8u16) in, 0); \ + in = __msa_min_u_h((v8u16) max_m, (v8u16) in); \ +} while (0) + +#define CLIP_UH2_0_255(in0, in1) do { \ + CLIP_UH_0_255(in0); \ + CLIP_UH_0_255(in1); \ +} while (0) + +/* Description : Clips all signed word elements of input vector + * between 0 & 255 + * Arguments : Input/output - val + * Return Type - signed word + */ +#define CLIP_SW_0_255(val) do { \ + const v4i32 max_m = __msa_ldi_w(255); \ + val = __msa_maxi_s_w((v4i32)val, 0); \ + val = __msa_min_s_w(max_m, (v4i32)val); \ +} while (0) + +#define CLIP_SW4_0_255(in0, in1, in2, in3) do { \ + CLIP_SW_0_255(in0); \ + CLIP_SW_0_255(in1); \ + CLIP_SW_0_255(in2); \ + CLIP_SW_0_255(in3); \ +} while (0) + +/* Description : Horizontal addition of 4 signed word elements of input vector + * Arguments : Input - in (signed word vector) + * Output - sum_m (i32 sum) + * Return Type - signed word (GP) + * Details : 4 signed word elements of 'in' vector are added together and + * the resulting integer sum is returned + */ +static WEBP_INLINE int32_t func_hadd_sw_s32(v4i32 in) { + const v2i64 res0_m = __msa_hadd_s_d((v4i32)in, (v4i32)in); + const v2i64 res1_m = __msa_splati_d(res0_m, 1); + const v2i64 out = res0_m + res1_m; + int32_t sum_m = __msa_copy_s_w((v4i32)out, 0); + return sum_m; +} +#define HADD_SW_S32(in) func_hadd_sw_s32(in) + +/* Description : Horizontal addition of 8 signed halfword elements + * Arguments : Input - in (signed halfword vector) + * Output - sum_m (s32 sum) + * Return Type - signed word + * Details : 8 signed halfword elements of input vector are added + * together and the resulting integer sum is returned + */ +static WEBP_INLINE int32_t func_hadd_sh_s32(v8i16 in) { + const v4i32 res = __msa_hadd_s_w(in, in); + const v2i64 res0 = __msa_hadd_s_d(res, res); + const v2i64 res1 = __msa_splati_d(res0, 1); + const v2i64 res2 = res0 + res1; + const int32_t sum_m = __msa_copy_s_w((v4i32)res2, 0); + return sum_m; +} +#define HADD_SH_S32(in) func_hadd_sh_s32(in) + +/* Description : Horizontal addition of 8 unsigned halfword elements + * Arguments : Input - in (unsigned halfword vector) + * Output - sum_m (u32 sum) + * Return Type - unsigned word + * Details : 8 unsigned halfword elements of input vector are added + * together and the resulting integer sum is returned + */ +static WEBP_INLINE uint32_t func_hadd_uh_u32(v8u16 in) { + uint32_t sum_m; + const v4u32 res_m = __msa_hadd_u_w(in, in); + v2u64 res0_m = __msa_hadd_u_d(res_m, res_m); + v2u64 res1_m = (v2u64)__msa_splati_d((v2i64)res0_m, 1); + res0_m = res0_m + res1_m; + sum_m = __msa_copy_s_w((v4i32)res0_m, 0); + return sum_m; +} +#define HADD_UH_U32(in) func_hadd_uh_u32(in) + +/* Description : Horizontal addition of signed half word vector elements + Arguments : Inputs - in0, in1 + Outputs - out0, out1 + Return Type - as per RTYPE + Details : Each signed odd half word element from 'in0' is added to + even signed half word element from 'in0' (pairwise) and the + halfword result is written in 'out0' +*/ +#define HADD_SH2(RTYPE, in0, in1, out0, out1) do { \ + out0 = (RTYPE)__msa_hadd_s_w((v8i16)in0, (v8i16)in0); \ + out1 = (RTYPE)__msa_hadd_s_w((v8i16)in1, (v8i16)in1); \ +} while (0) +#define HADD_SH2_SW(...) HADD_SH2(v4i32, __VA_ARGS__) + +#define HADD_SH4(RTYPE, in0, in1, in2, in3, out0, out1, out2, out3) do { \ + HADD_SH2(RTYPE, in0, in1, out0, out1); \ + HADD_SH2(RTYPE, in2, in3, out2, out3); \ +} while (0) +#define HADD_SH4_SW(...) HADD_SH4(v4i32, __VA_ARGS__) + +/* Description : Horizontal subtraction of unsigned byte vector elements + * Arguments : Inputs - in0, in1 + * Outputs - out0, out1 + * Return Type - as per RTYPE + * Details : Each unsigned odd byte element from 'in0' is subtracted from + * even unsigned byte element from 'in0' (pairwise) and the + * halfword result is written to 'out0' + */ +#define HSUB_UB2(RTYPE, in0, in1, out0, out1) do { \ + out0 = (RTYPE)__msa_hsub_u_h((v16u8)in0, (v16u8)in0); \ + out1 = (RTYPE)__msa_hsub_u_h((v16u8)in1, (v16u8)in1); \ +} while (0) +#define HSUB_UB2_UH(...) HSUB_UB2(v8u16, __VA_ARGS__) +#define HSUB_UB2_SH(...) HSUB_UB2(v8i16, __VA_ARGS__) +#define HSUB_UB2_SW(...) HSUB_UB2(v4i32, __VA_ARGS__) + +/* Description : Set element n input vector to GPR value + * Arguments : Inputs - in0, in1, in2, in3 + * Output - out + * Return Type - as per RTYPE + * Details : Set element 0 in vector 'out' to value specified in 'in0' + */ +#define INSERT_W2(RTYPE, in0, in1, out) do { \ + out = (RTYPE)__msa_insert_w((v4i32)out, 0, in0); \ + out = (RTYPE)__msa_insert_w((v4i32)out, 1, in1); \ +} while (0) +#define INSERT_W2_UB(...) INSERT_W2(v16u8, __VA_ARGS__) +#define INSERT_W2_SB(...) INSERT_W2(v16i8, __VA_ARGS__) + +#define INSERT_W4(RTYPE, in0, in1, in2, in3, out) do { \ + out = (RTYPE)__msa_insert_w((v4i32)out, 0, in0); \ + out = (RTYPE)__msa_insert_w((v4i32)out, 1, in1); \ + out = (RTYPE)__msa_insert_w((v4i32)out, 2, in2); \ + out = (RTYPE)__msa_insert_w((v4i32)out, 3, in3); \ +} while (0) +#define INSERT_W4_UB(...) INSERT_W4(v16u8, __VA_ARGS__) +#define INSERT_W4_SB(...) INSERT_W4(v16i8, __VA_ARGS__) +#define INSERT_W4_SW(...) INSERT_W4(v4i32, __VA_ARGS__) + +/* Description : Set element n of double word input vector to GPR value + * Arguments : Inputs - in0, in1 + * Output - out + * Return Type - as per RTYPE + * Details : Set element 0 in vector 'out' to GPR value specified in 'in0' + * Set element 1 in vector 'out' to GPR value specified in 'in1' + */ +#define INSERT_D2(RTYPE, in0, in1, out) do { \ + out = (RTYPE)__msa_insert_d((v2i64)out, 0, in0); \ + out = (RTYPE)__msa_insert_d((v2i64)out, 1, in1); \ +} while (0) +#define INSERT_D2_UB(...) INSERT_D2(v16u8, __VA_ARGS__) +#define INSERT_D2_SB(...) INSERT_D2(v16i8, __VA_ARGS__) + +/* Description : Interleave even byte elements from vectors + * Arguments : Inputs - in0, in1, in2, in3 + * Outputs - out0, out1 + * Return Type - as per RTYPE + * Details : Even byte elements of 'in0' and 'in1' are interleaved + * and written to 'out0' + */ +#define ILVEV_B2(RTYPE, in0, in1, in2, in3, out0, out1) do { \ + out0 = (RTYPE)__msa_ilvev_b((v16i8)in1, (v16i8)in0); \ + out1 = (RTYPE)__msa_ilvev_b((v16i8)in3, (v16i8)in2); \ +} while (0) +#define ILVEV_B2_UB(...) ILVEV_B2(v16u8, __VA_ARGS__) +#define ILVEV_B2_SB(...) ILVEV_B2(v16i8, __VA_ARGS__) +#define ILVEV_B2_UH(...) ILVEV_B2(v8u16, __VA_ARGS__) +#define ILVEV_B2_SH(...) ILVEV_B2(v8i16, __VA_ARGS__) +#define ILVEV_B2_SD(...) ILVEV_B2(v2i64, __VA_ARGS__) + +/* Description : Interleave odd byte elements from vectors + * Arguments : Inputs - in0, in1, in2, in3 + * Outputs - out0, out1 + * Return Type - as per RTYPE + * Details : Odd byte elements of 'in0' and 'in1' are interleaved + * and written to 'out0' + */ +#define ILVOD_B2(RTYPE, in0, in1, in2, in3, out0, out1) do { \ + out0 = (RTYPE)__msa_ilvod_b((v16i8)in1, (v16i8)in0); \ + out1 = (RTYPE)__msa_ilvod_b((v16i8)in3, (v16i8)in2); \ +} while (0) +#define ILVOD_B2_UB(...) ILVOD_B2(v16u8, __VA_ARGS__) +#define ILVOD_B2_SB(...) ILVOD_B2(v16i8, __VA_ARGS__) +#define ILVOD_B2_UH(...) ILVOD_B2(v8u16, __VA_ARGS__) +#define ILVOD_B2_SH(...) ILVOD_B2(v8i16, __VA_ARGS__) +#define ILVOD_B2_SD(...) ILVOD_B2(v2i64, __VA_ARGS__) + +/* Description : Interleave even halfword elements from vectors + * Arguments : Inputs - in0, in1, in2, in3 + * Outputs - out0, out1 + * Return Type - as per RTYPE + * Details : Even halfword elements of 'in0' and 'in1' are interleaved + * and written to 'out0' + */ +#define ILVEV_H2(RTYPE, in0, in1, in2, in3, out0, out1) do { \ + out0 = (RTYPE)__msa_ilvev_h((v8i16)in1, (v8i16)in0); \ + out1 = (RTYPE)__msa_ilvev_h((v8i16)in3, (v8i16)in2); \ +} while (0) +#define ILVEV_H2_UB(...) ILVEV_H2(v16u8, __VA_ARGS__) +#define ILVEV_H2_UH(...) ILVEV_H2(v8u16, __VA_ARGS__) +#define ILVEV_H2_SH(...) ILVEV_H2(v8i16, __VA_ARGS__) +#define ILVEV_H2_SW(...) ILVEV_H2(v4i32, __VA_ARGS__) + +/* Description : Interleave odd halfword elements from vectors + * Arguments : Inputs - in0, in1, in2, in3 + * Outputs - out0, out1 + * Return Type - as per RTYPE + * Details : Odd halfword elements of 'in0' and 'in1' are interleaved + * and written to 'out0' + */ +#define ILVOD_H2(RTYPE, in0, in1, in2, in3, out0, out1) do { \ + out0 = (RTYPE)__msa_ilvod_h((v8i16)in1, (v8i16)in0); \ + out1 = (RTYPE)__msa_ilvod_h((v8i16)in3, (v8i16)in2); \ +} while (0) +#define ILVOD_H2_UB(...) ILVOD_H2(v16u8, __VA_ARGS__) +#define ILVOD_H2_UH(...) ILVOD_H2(v8u16, __VA_ARGS__) +#define ILVOD_H2_SH(...) ILVOD_H2(v8i16, __VA_ARGS__) +#define ILVOD_H2_SW(...) ILVOD_H2(v4i32, __VA_ARGS__) + +/* Description : Interleave even word elements from vectors + * Arguments : Inputs - in0, in1, in2, in3 + * Outputs - out0, out1 + * Return Type - as per RTYPE + * Details : Even word elements of 'in0' and 'in1' are interleaved + * and written to 'out0' + */ +#define ILVEV_W2(RTYPE, in0, in1, in2, in3, out0, out1) do { \ + out0 = (RTYPE)__msa_ilvev_w((v4i32)in1, (v4i32)in0); \ + out1 = (RTYPE)__msa_ilvev_w((v4i32)in3, (v4i32)in2); \ +} while (0) +#define ILVEV_W2_UB(...) ILVEV_W2(v16u8, __VA_ARGS__) +#define ILVEV_W2_SB(...) ILVEV_W2(v16i8, __VA_ARGS__) +#define ILVEV_W2_UH(...) ILVEV_W2(v8u16, __VA_ARGS__) +#define ILVEV_W2_SD(...) ILVEV_W2(v2i64, __VA_ARGS__) + +/* Description : Interleave even-odd word elements from vectors + * Arguments : Inputs - in0, in1, in2, in3 + * Outputs - out0, out1 + * Return Type - as per RTYPE + * Details : Even word elements of 'in0' and 'in1' are interleaved + * and written to 'out0' + * Odd word elements of 'in2' and 'in3' are interleaved + * and written to 'out1' + */ +#define ILVEVOD_W2(RTYPE, in0, in1, in2, in3, out0, out1) do { \ + out0 = (RTYPE)__msa_ilvev_w((v4i32)in1, (v4i32)in0); \ + out1 = (RTYPE)__msa_ilvod_w((v4i32)in3, (v4i32)in2); \ +} while (0) +#define ILVEVOD_W2_UB(...) ILVEVOD_W2(v16u8, __VA_ARGS__) +#define ILVEVOD_W2_UH(...) ILVEVOD_W2(v8u16, __VA_ARGS__) +#define ILVEVOD_W2_SH(...) ILVEVOD_W2(v8i16, __VA_ARGS__) +#define ILVEVOD_W2_SW(...) ILVEVOD_W2(v4i32, __VA_ARGS__) + +/* Description : Interleave even-odd half-word elements from vectors + * Arguments : Inputs - in0, in1, in2, in3 + * Outputs - out0, out1 + * Return Type - as per RTYPE + * Details : Even half-word elements of 'in0' and 'in1' are interleaved + * and written to 'out0' + * Odd half-word elements of 'in2' and 'in3' are interleaved + * and written to 'out1' + */ +#define ILVEVOD_H2(RTYPE, in0, in1, in2, in3, out0, out1) do { \ + out0 = (RTYPE)__msa_ilvev_h((v8i16)in1, (v8i16)in0); \ + out1 = (RTYPE)__msa_ilvod_h((v8i16)in3, (v8i16)in2); \ +} while (0) +#define ILVEVOD_H2_UB(...) ILVEVOD_H2(v16u8, __VA_ARGS__) +#define ILVEVOD_H2_UH(...) ILVEVOD_H2(v8u16, __VA_ARGS__) +#define ILVEVOD_H2_SH(...) ILVEVOD_H2(v8i16, __VA_ARGS__) +#define ILVEVOD_H2_SW(...) ILVEVOD_H2(v4i32, __VA_ARGS__) + +/* Description : Interleave even double word elements from vectors + * Arguments : Inputs - in0, in1, in2, in3 + * Outputs - out0, out1 + * Return Type - as per RTYPE + * Details : Even double word elements of 'in0' and 'in1' are interleaved + * and written to 'out0' + */ +#define ILVEV_D2(RTYPE, in0, in1, in2, in3, out0, out1) do { \ + out0 = (RTYPE)__msa_ilvev_d((v2i64)in1, (v2i64)in0); \ + out1 = (RTYPE)__msa_ilvev_d((v2i64)in3, (v2i64)in2); \ +} while (0) +#define ILVEV_D2_UB(...) ILVEV_D2(v16u8, __VA_ARGS__) +#define ILVEV_D2_SB(...) ILVEV_D2(v16i8, __VA_ARGS__) +#define ILVEV_D2_SW(...) ILVEV_D2(v4i32, __VA_ARGS__) +#define ILVEV_D2_SD(...) ILVEV_D2(v2i64, __VA_ARGS__) + +/* Description : Interleave left half of byte elements from vectors + * Arguments : Inputs - in0, in1, in2, in3 + * Outputs - out0, out1 + * Return Type - as per RTYPE + * Details : Left half of byte elements of 'in0' and 'in1' are interleaved + * and written to 'out0'. + */ +#define ILVL_B2(RTYPE, in0, in1, in2, in3, out0, out1) do { \ + out0 = (RTYPE)__msa_ilvl_b((v16i8)in0, (v16i8)in1); \ + out1 = (RTYPE)__msa_ilvl_b((v16i8)in2, (v16i8)in3); \ +} while (0) +#define ILVL_B2_UB(...) ILVL_B2(v16u8, __VA_ARGS__) +#define ILVL_B2_SB(...) ILVL_B2(v16i8, __VA_ARGS__) +#define ILVL_B2_UH(...) ILVL_B2(v8u16, __VA_ARGS__) +#define ILVL_B2_SH(...) ILVL_B2(v8i16, __VA_ARGS__) +#define ILVL_B2_SW(...) ILVL_B2(v4i32, __VA_ARGS__) + +/* Description : Interleave right half of byte elements from vectors + * Arguments : Inputs - in0, in1, in2, in3 + * Outputs - out0, out1 + * Return Type - as per RTYPE + * Details : Right half of byte elements of 'in0' and 'in1' are interleaved + * and written to out0. + */ +#define ILVR_B2(RTYPE, in0, in1, in2, in3, out0, out1) do { \ + out0 = (RTYPE)__msa_ilvr_b((v16i8)in0, (v16i8)in1); \ + out1 = (RTYPE)__msa_ilvr_b((v16i8)in2, (v16i8)in3); \ +} while (0) +#define ILVR_B2_UB(...) ILVR_B2(v16u8, __VA_ARGS__) +#define ILVR_B2_SB(...) ILVR_B2(v16i8, __VA_ARGS__) +#define ILVR_B2_UH(...) ILVR_B2(v8u16, __VA_ARGS__) +#define ILVR_B2_SH(...) ILVR_B2(v8i16, __VA_ARGS__) +#define ILVR_B2_SW(...) ILVR_B2(v4i32, __VA_ARGS__) + +#define ILVR_B4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, \ + out0, out1, out2, out3) do { \ + ILVR_B2(RTYPE, in0, in1, in2, in3, out0, out1); \ + ILVR_B2(RTYPE, in4, in5, in6, in7, out2, out3); \ +} while (0) +#define ILVR_B4_UB(...) ILVR_B4(v16u8, __VA_ARGS__) +#define ILVR_B4_SB(...) ILVR_B4(v16i8, __VA_ARGS__) +#define ILVR_B4_UH(...) ILVR_B4(v8u16, __VA_ARGS__) +#define ILVR_B4_SH(...) ILVR_B4(v8i16, __VA_ARGS__) +#define ILVR_B4_SW(...) ILVR_B4(v4i32, __VA_ARGS__) + +/* Description : Interleave right half of halfword elements from vectors + * Arguments : Inputs - in0, in1, in2, in3 + * Outputs - out0, out1 + * Return Type - as per RTYPE + * Details : Right half of halfword elements of 'in0' and 'in1' are + * interleaved and written to 'out0'. + */ +#define ILVR_H2(RTYPE, in0, in1, in2, in3, out0, out1) do { \ + out0 = (RTYPE)__msa_ilvr_h((v8i16)in0, (v8i16)in1); \ + out1 = (RTYPE)__msa_ilvr_h((v8i16)in2, (v8i16)in3); \ +} while (0) +#define ILVR_H2_UB(...) ILVR_H2(v16u8, __VA_ARGS__) +#define ILVR_H2_SH(...) ILVR_H2(v8i16, __VA_ARGS__) +#define ILVR_H2_SW(...) ILVR_H2(v4i32, __VA_ARGS__) + +#define ILVR_H4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, \ + out0, out1, out2, out3) do { \ + ILVR_H2(RTYPE, in0, in1, in2, in3, out0, out1); \ + ILVR_H2(RTYPE, in4, in5, in6, in7, out2, out3); \ +} while (0) +#define ILVR_H4_UB(...) ILVR_H4(v16u8, __VA_ARGS__) +#define ILVR_H4_SH(...) ILVR_H4(v8i16, __VA_ARGS__) +#define ILVR_H4_SW(...) ILVR_H4(v4i32, __VA_ARGS__) + +/* Description : Interleave right half of double word elements from vectors + * Arguments : Inputs - in0, in1, in2, in3 + * Outputs - out0, out1 + * Return Type - as per RTYPE + * Details : Right half of double word elements of 'in0' and 'in1' are + * interleaved and written to 'out0'. + */ +#define ILVR_D2(RTYPE, in0, in1, in2, in3, out0, out1) do { \ + out0 = (RTYPE)__msa_ilvr_d((v2i64)in0, (v2i64)in1); \ + out1 = (RTYPE)__msa_ilvr_d((v2i64)in2, (v2i64)in3); \ +} while (0) +#define ILVR_D2_UB(...) ILVR_D2(v16u8, __VA_ARGS__) +#define ILVR_D2_SB(...) ILVR_D2(v16i8, __VA_ARGS__) +#define ILVR_D2_SH(...) ILVR_D2(v8i16, __VA_ARGS__) + +#define ILVR_D4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, \ + out0, out1, out2, out3) do { \ + ILVR_D2(RTYPE, in0, in1, in2, in3, out0, out1); \ + ILVR_D2(RTYPE, in4, in5, in6, in7, out2, out3); \ +} while (0) +#define ILVR_D4_SB(...) ILVR_D4(v16i8, __VA_ARGS__) +#define ILVR_D4_UB(...) ILVR_D4(v16u8, __VA_ARGS__) + +/* Description : Interleave both left and right half of input vectors + * Arguments : Inputs - in0, in1 + * Outputs - out0, out1 + * Return Type - as per RTYPE + * Details : Right half of byte elements from 'in0' and 'in1' are + * interleaved and written to 'out0' + */ +#define ILVRL_B2(RTYPE, in0, in1, out0, out1) do { \ + out0 = (RTYPE)__msa_ilvr_b((v16i8)in0, (v16i8)in1); \ + out1 = (RTYPE)__msa_ilvl_b((v16i8)in0, (v16i8)in1); \ +} while (0) +#define ILVRL_B2_UB(...) ILVRL_B2(v16u8, __VA_ARGS__) +#define ILVRL_B2_SB(...) ILVRL_B2(v16i8, __VA_ARGS__) +#define ILVRL_B2_UH(...) ILVRL_B2(v8u16, __VA_ARGS__) +#define ILVRL_B2_SH(...) ILVRL_B2(v8i16, __VA_ARGS__) +#define ILVRL_B2_SW(...) ILVRL_B2(v4i32, __VA_ARGS__) + +#define ILVRL_H2(RTYPE, in0, in1, out0, out1) do { \ + out0 = (RTYPE)__msa_ilvr_h((v8i16)in0, (v8i16)in1); \ + out1 = (RTYPE)__msa_ilvl_h((v8i16)in0, (v8i16)in1); \ +} while (0) +#define ILVRL_H2_UB(...) ILVRL_H2(v16u8, __VA_ARGS__) +#define ILVRL_H2_SB(...) ILVRL_H2(v16i8, __VA_ARGS__) +#define ILVRL_H2_SH(...) ILVRL_H2(v8i16, __VA_ARGS__) +#define ILVRL_H2_SW(...) ILVRL_H2(v4i32, __VA_ARGS__) +#define ILVRL_H2_UW(...) ILVRL_H2(v4u32, __VA_ARGS__) + +#define ILVRL_W2(RTYPE, in0, in1, out0, out1) do { \ + out0 = (RTYPE)__msa_ilvr_w((v4i32)in0, (v4i32)in1); \ + out1 = (RTYPE)__msa_ilvl_w((v4i32)in0, (v4i32)in1); \ +} while (0) +#define ILVRL_W2_UB(...) ILVRL_W2(v16u8, __VA_ARGS__) +#define ILVRL_W2_SH(...) ILVRL_W2(v8i16, __VA_ARGS__) +#define ILVRL_W2_SW(...) ILVRL_W2(v4i32, __VA_ARGS__) +#define ILVRL_W2_UW(...) ILVRL_W2(v4u32, __VA_ARGS__) + +/* Description : Pack even byte elements of vector pairs + * Arguments : Inputs - in0, in1, in2, in3 + * Outputs - out0, out1 + * Return Type - as per RTYPE + * Details : Even byte elements of 'in0' are copied to the left half of + * 'out0' & even byte elements of 'in1' are copied to the right + * half of 'out0'. + */ +#define PCKEV_B2(RTYPE, in0, in1, in2, in3, out0, out1) do { \ + out0 = (RTYPE)__msa_pckev_b((v16i8)in0, (v16i8)in1); \ + out1 = (RTYPE)__msa_pckev_b((v16i8)in2, (v16i8)in3); \ +} while (0) +#define PCKEV_B2_SB(...) PCKEV_B2(v16i8, __VA_ARGS__) +#define PCKEV_B2_UB(...) PCKEV_B2(v16u8, __VA_ARGS__) +#define PCKEV_B2_SH(...) PCKEV_B2(v8i16, __VA_ARGS__) +#define PCKEV_B2_SW(...) PCKEV_B2(v4i32, __VA_ARGS__) + +#define PCKEV_B4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, \ + out0, out1, out2, out3) do { \ + PCKEV_B2(RTYPE, in0, in1, in2, in3, out0, out1); \ + PCKEV_B2(RTYPE, in4, in5, in6, in7, out2, out3); \ +} while (0) +#define PCKEV_B4_SB(...) PCKEV_B4(v16i8, __VA_ARGS__) +#define PCKEV_B4_UB(...) PCKEV_B4(v16u8, __VA_ARGS__) +#define PCKEV_B4_SH(...) PCKEV_B4(v8i16, __VA_ARGS__) +#define PCKEV_B4_SW(...) PCKEV_B4(v4i32, __VA_ARGS__) + +/* Description : Pack even halfword elements of vector pairs + * Arguments : Inputs - in0, in1, in2, in3 + * Outputs - out0, out1 + * Return Type - as per RTYPE + * Details : Even halfword elements of 'in0' are copied to the left half of + * 'out0' & even halfword elements of 'in1' are copied to the + * right half of 'out0'. + */ +#define PCKEV_H2(RTYPE, in0, in1, in2, in3, out0, out1) do { \ + out0 = (RTYPE)__msa_pckev_h((v8i16)in0, (v8i16)in1); \ + out1 = (RTYPE)__msa_pckev_h((v8i16)in2, (v8i16)in3); \ +} while (0) +#define PCKEV_H2_UH(...) PCKEV_H2(v8u16, __VA_ARGS__) +#define PCKEV_H2_SH(...) PCKEV_H2(v8i16, __VA_ARGS__) +#define PCKEV_H2_SW(...) PCKEV_H2(v4i32, __VA_ARGS__) +#define PCKEV_H2_UW(...) PCKEV_H2(v4u32, __VA_ARGS__) + +/* Description : Pack even word elements of vector pairs + * Arguments : Inputs - in0, in1, in2, in3 + * Outputs - out0, out1 + * Return Type - as per RTYPE + * Details : Even word elements of 'in0' are copied to the left half of + * 'out0' & even word elements of 'in1' are copied to the + * right half of 'out0'. + */ +#define PCKEV_W2(RTYPE, in0, in1, in2, in3, out0, out1) do { \ + out0 = (RTYPE)__msa_pckev_w((v4i32)in0, (v4i32)in1); \ + out1 = (RTYPE)__msa_pckev_w((v4i32)in2, (v4i32)in3); \ +} while (0) +#define PCKEV_W2_UH(...) PCKEV_W2(v8u16, __VA_ARGS__) +#define PCKEV_W2_SH(...) PCKEV_W2(v8i16, __VA_ARGS__) +#define PCKEV_W2_SW(...) PCKEV_W2(v4i32, __VA_ARGS__) +#define PCKEV_W2_UW(...) PCKEV_W2(v4u32, __VA_ARGS__) + +/* Description : Pack odd halfword elements of vector pairs + * Arguments : Inputs - in0, in1, in2, in3 + * Outputs - out0, out1 + * Return Type - as per RTYPE + * Details : Odd halfword elements of 'in0' are copied to the left half of + * 'out0' & odd halfword elements of 'in1' are copied to the + * right half of 'out0'. + */ +#define PCKOD_H2(RTYPE, in0, in1, in2, in3, out0, out1) do { \ + out0 = (RTYPE)__msa_pckod_h((v8i16)in0, (v8i16)in1); \ + out1 = (RTYPE)__msa_pckod_h((v8i16)in2, (v8i16)in3); \ +} while (0) +#define PCKOD_H2_UH(...) PCKOD_H2(v8u16, __VA_ARGS__) +#define PCKOD_H2_SH(...) PCKOD_H2(v8i16, __VA_ARGS__) +#define PCKOD_H2_SW(...) PCKOD_H2(v4i32, __VA_ARGS__) +#define PCKOD_H2_UW(...) PCKOD_H2(v4u32, __VA_ARGS__) + +/* Description : Arithmetic immediate shift right all elements of word vector + * Arguments : Inputs - in0, in1, shift + * Outputs - in place operation + * Return Type - as per input vector RTYPE + * Details : Each element of vector 'in0' is right shifted by 'shift' and + * the result is written in-place. 'shift' is a GP variable. + */ +#define SRAI_W2(RTYPE, in0, in1, shift_val) do { \ + in0 = (RTYPE)SRAI_W(in0, shift_val); \ + in1 = (RTYPE)SRAI_W(in1, shift_val); \ +} while (0) +#define SRAI_W2_SW(...) SRAI_W2(v4i32, __VA_ARGS__) +#define SRAI_W2_UW(...) SRAI_W2(v4u32, __VA_ARGS__) + +#define SRAI_W4(RTYPE, in0, in1, in2, in3, shift_val) do { \ + SRAI_W2(RTYPE, in0, in1, shift_val); \ + SRAI_W2(RTYPE, in2, in3, shift_val); \ +} while (0) +#define SRAI_W4_SW(...) SRAI_W4(v4i32, __VA_ARGS__) +#define SRAI_W4_UW(...) SRAI_W4(v4u32, __VA_ARGS__) + +/* Description : Arithmetic shift right all elements of half-word vector + * Arguments : Inputs - in0, in1, shift + * Outputs - in place operation + * Return Type - as per input vector RTYPE + * Details : Each element of vector 'in0' is right shifted by 'shift' and + * the result is written in-place. 'shift' is a GP variable. + */ +#define SRAI_H2(RTYPE, in0, in1, shift_val) do { \ + in0 = (RTYPE)SRAI_H(in0, shift_val); \ + in1 = (RTYPE)SRAI_H(in1, shift_val); \ +} while (0) +#define SRAI_H2_SH(...) SRAI_H2(v8i16, __VA_ARGS__) +#define SRAI_H2_UH(...) SRAI_H2(v8u16, __VA_ARGS__) + +/* Description : Arithmetic rounded shift right all elements of word vector + * Arguments : Inputs - in0, in1, shift + * Outputs - in place operation + * Return Type - as per input vector RTYPE + * Details : Each element of vector 'in0' is right shifted by 'shift' and + * the result is written in-place. 'shift' is a GP variable. + */ +#define SRARI_W2(RTYPE, in0, in1, shift) do { \ + in0 = (RTYPE)__msa_srari_w((v4i32)in0, shift); \ + in1 = (RTYPE)__msa_srari_w((v4i32)in1, shift); \ +} while (0) +#define SRARI_W2_SW(...) SRARI_W2(v4i32, __VA_ARGS__) + +#define SRARI_W4(RTYPE, in0, in1, in2, in3, shift) do { \ + SRARI_W2(RTYPE, in0, in1, shift); \ + SRARI_W2(RTYPE, in2, in3, shift); \ +} while (0) +#define SRARI_W4_SH(...) SRARI_W4(v8i16, __VA_ARGS__) +#define SRARI_W4_UW(...) SRARI_W4(v4u32, __VA_ARGS__) +#define SRARI_W4_SW(...) SRARI_W4(v4i32, __VA_ARGS__) + +/* Description : Shift right arithmetic rounded double words + * Arguments : Inputs - in0, in1, shift + * Outputs - in place operation + * Return Type - as per RTYPE + * Details : Each element of vector 'in0' is shifted right arithmetically by + * the number of bits in the corresponding element in the vector + * 'shift'. The last discarded bit is added to shifted value for + * rounding and the result is written in-place. + * 'shift' is a vector. + */ +#define SRAR_D2(RTYPE, in0, in1, shift) do { \ + in0 = (RTYPE)__msa_srar_d((v2i64)in0, (v2i64)shift); \ + in1 = (RTYPE)__msa_srar_d((v2i64)in1, (v2i64)shift); \ +} while (0) +#define SRAR_D2_SW(...) SRAR_D2(v4i32, __VA_ARGS__) +#define SRAR_D2_SD(...) SRAR_D2(v2i64, __VA_ARGS__) +#define SRAR_D2_UD(...) SRAR_D2(v2u64, __VA_ARGS__) + +#define SRAR_D4(RTYPE, in0, in1, in2, in3, shift) do { \ + SRAR_D2(RTYPE, in0, in1, shift); \ + SRAR_D2(RTYPE, in2, in3, shift); \ +} while (0) +#define SRAR_D4_SD(...) SRAR_D4(v2i64, __VA_ARGS__) +#define SRAR_D4_UD(...) SRAR_D4(v2u64, __VA_ARGS__) + +/* Description : Addition of 2 pairs of half-word vectors + * Arguments : Inputs - in0, in1, in2, in3 + * Outputs - out0, out1 + * Details : Each element in 'in0' is added to 'in1' and result is written + * to 'out0'. + */ +#define ADDVI_H2(RTYPE, in0, in1, in2, in3, out0, out1) do { \ + out0 = (RTYPE)ADDVI_H(in0, in1); \ + out1 = (RTYPE)ADDVI_H(in2, in3); \ +} while (0) +#define ADDVI_H2_SH(...) ADDVI_H2(v8i16, __VA_ARGS__) +#define ADDVI_H2_UH(...) ADDVI_H2(v8u16, __VA_ARGS__) + +/* Description : Addition of 2 pairs of word vectors + * Arguments : Inputs - in0, in1, in2, in3 + * Outputs - out0, out1 + * Details : Each element in 'in0' is added to 'in1' and result is written + * to 'out0'. + */ +#define ADDVI_W2(RTYPE, in0, in1, in2, in3, out0, out1) do { \ + out0 = (RTYPE)ADDVI_W(in0, in1); \ + out1 = (RTYPE)ADDVI_W(in2, in3); \ +} while (0) +#define ADDVI_W2_SW(...) ADDVI_W2(v4i32, __VA_ARGS__) + +/* Description : Fill 2 pairs of word vectors with GP registers + * Arguments : Inputs - in0, in1 + * Outputs - out0, out1 + * Details : GP register in0 is replicated in each word element of out0 + * GP register in1 is replicated in each word element of out1 + */ +#define FILL_W2(RTYPE, in0, in1, out0, out1) do { \ + out0 = (RTYPE)__msa_fill_w(in0); \ + out1 = (RTYPE)__msa_fill_w(in1); \ +} while (0) +#define FILL_W2_SW(...) FILL_W2(v4i32, __VA_ARGS__) + +/* Description : Addition of 2 pairs of vectors + * Arguments : Inputs - in0, in1, in2, in3 + * Outputs - out0, out1 + * Details : Each element in 'in0' is added to 'in1' and result is written + * to 'out0'. + */ +#define ADD2(in0, in1, in2, in3, out0, out1) do { \ + out0 = in0 + in1; \ + out1 = in2 + in3; \ +} while (0) + +#define ADD4(in0, in1, in2, in3, in4, in5, in6, in7, \ + out0, out1, out2, out3) do { \ + ADD2(in0, in1, in2, in3, out0, out1); \ + ADD2(in4, in5, in6, in7, out2, out3); \ +} while (0) + +/* Description : Subtraction of 2 pairs of vectors + * Arguments : Inputs - in0, in1, in2, in3 + * Outputs - out0, out1 + * Details : Each element in 'in1' is subtracted from 'in0' and result is + * written to 'out0'. + */ +#define SUB2(in0, in1, in2, in3, out0, out1) do { \ + out0 = in0 - in1; \ + out1 = in2 - in3; \ +} while (0) + +#define SUB3(in0, in1, in2, in3, in4, in5, out0, out1, out2) do { \ + out0 = in0 - in1; \ + out1 = in2 - in3; \ + out2 = in4 - in5; \ +} while (0) + +#define SUB4(in0, in1, in2, in3, in4, in5, in6, in7, \ + out0, out1, out2, out3) do { \ + out0 = in0 - in1; \ + out1 = in2 - in3; \ + out2 = in4 - in5; \ + out3 = in6 - in7; \ +} while (0) + +/* Description : Addition - Subtraction of input vectors + * Arguments : Inputs - in0, in1 + * Outputs - out0, out1 + * Details : Each element in 'in1' is added to 'in0' and result is + * written to 'out0'. + * Each element in 'in1' is subtracted from 'in0' and result is + * written to 'out1'. + */ +#define ADDSUB2(in0, in1, out0, out1) do { \ + out0 = in0 + in1; \ + out1 = in0 - in1; \ +} while (0) + +/* Description : Multiplication of pairs of vectors + * Arguments : Inputs - in0, in1, in2, in3 + * Outputs - out0, out1 + * Details : Each element from 'in0' is multiplied with elements from 'in1' + * and the result is written to 'out0' + */ +#define MUL2(in0, in1, in2, in3, out0, out1) do { \ + out0 = in0 * in1; \ + out1 = in2 * in3; \ +} while (0) + +#define MUL4(in0, in1, in2, in3, in4, in5, in6, in7, \ + out0, out1, out2, out3) do { \ + MUL2(in0, in1, in2, in3, out0, out1); \ + MUL2(in4, in5, in6, in7, out2, out3); \ +} while (0) + +/* Description : Sign extend halfword elements from right half of the vector + * Arguments : Input - in (halfword vector) + * Output - out (sign extended word vector) + * Return Type - signed word + * Details : Sign bit of halfword elements from input vector 'in' is + * extracted and interleaved with same vector 'in0' to generate + * 4 word elements keeping sign intact + */ +#define UNPCK_R_SH_SW(in, out) do { \ + const v8i16 sign_m = __msa_clti_s_h((v8i16)in, 0); \ + out = (v4i32)__msa_ilvr_h(sign_m, (v8i16)in); \ +} while (0) + +/* Description : Sign extend halfword elements from input vector and return + * the result in pair of vectors + * Arguments : Input - in (halfword vector) + * Outputs - out0, out1 (sign extended word vectors) + * Return Type - signed word + * Details : Sign bit of halfword elements from input vector 'in' is + * extracted and interleaved right with same vector 'in0' to + * generate 4 signed word elements in 'out0' + * Then interleaved left with same vector 'in0' to + * generate 4 signed word elements in 'out1' + */ +#define UNPCK_SH_SW(in, out0, out1) do { \ + const v8i16 tmp_m = __msa_clti_s_h((v8i16)in, 0); \ + ILVRL_H2_SW(tmp_m, in, out0, out1); \ +} while (0) + +/* Description : Butterfly of 4 input vectors + * Arguments : Inputs - in0, in1, in2, in3 + * Outputs - out0, out1, out2, out3 + * Details : Butterfly operation + */ +#define BUTTERFLY_4(in0, in1, in2, in3, out0, out1, out2, out3) do { \ + out0 = in0 + in3; \ + out1 = in1 + in2; \ + out2 = in1 - in2; \ + out3 = in0 - in3; \ +} while (0) + +/* Description : Transpose 16x4 block into 4x16 with byte elements in vectors + * Arguments : Inputs - in0, in1, in2, in3, in4, in5, in6, in7, + * in8, in9, in10, in11, in12, in13, in14, in15 + * Outputs - out0, out1, out2, out3 + * Return Type - unsigned byte + */ +#define TRANSPOSE16x4_UB_UB(in0, in1, in2, in3, in4, in5, in6, in7, \ + in8, in9, in10, in11, in12, in13, in14, in15, \ + out0, out1, out2, out3) do { \ + v2i64 tmp0_m, tmp1_m, tmp2_m, tmp3_m, tmp4_m, tmp5_m; \ + ILVEV_W2_SD(in0, in4, in8, in12, tmp2_m, tmp3_m); \ + ILVEV_W2_SD(in1, in5, in9, in13, tmp0_m, tmp1_m); \ + ILVEV_D2_UB(tmp2_m, tmp3_m, tmp0_m, tmp1_m, out1, out3); \ + ILVEV_W2_SD(in2, in6, in10, in14, tmp4_m, tmp5_m); \ + ILVEV_W2_SD(in3, in7, in11, in15, tmp0_m, tmp1_m); \ + ILVEV_D2_SD(tmp4_m, tmp5_m, tmp0_m, tmp1_m, tmp2_m, tmp3_m); \ + ILVEV_B2_SD(out1, out3, tmp2_m, tmp3_m, tmp0_m, tmp1_m); \ + ILVEVOD_H2_UB(tmp0_m, tmp1_m, tmp0_m, tmp1_m, out0, out2); \ + ILVOD_B2_SD(out1, out3, tmp2_m, tmp3_m, tmp0_m, tmp1_m); \ + ILVEVOD_H2_UB(tmp0_m, tmp1_m, tmp0_m, tmp1_m, out1, out3); \ +} while (0) + +/* Description : Transpose 16x8 block into 8x16 with byte elements in vectors + * Arguments : Inputs - in0, in1, in2, in3, in4, in5, in6, in7, + * in8, in9, in10, in11, in12, in13, in14, in15 + * Outputs - out0, out1, out2, out3, out4, out5, out6, out7 + * Return Type - unsigned byte + */ +#define TRANSPOSE16x8_UB_UB(in0, in1, in2, in3, in4, in5, in6, in7, \ + in8, in9, in10, in11, in12, in13, in14, in15, \ + out0, out1, out2, out3, out4, out5, \ + out6, out7) do { \ + v8i16 tmp0_m, tmp1_m, tmp4_m, tmp5_m, tmp6_m, tmp7_m; \ + v4i32 tmp2_m, tmp3_m; \ + ILVEV_D2_UB(in0, in8, in1, in9, out7, out6); \ + ILVEV_D2_UB(in2, in10, in3, in11, out5, out4); \ + ILVEV_D2_UB(in4, in12, in5, in13, out3, out2); \ + ILVEV_D2_UB(in6, in14, in7, in15, out1, out0); \ + ILVEV_B2_SH(out7, out6, out5, out4, tmp0_m, tmp1_m); \ + ILVOD_B2_SH(out7, out6, out5, out4, tmp4_m, tmp5_m); \ + ILVEV_B2_UB(out3, out2, out1, out0, out5, out7); \ + ILVOD_B2_SH(out3, out2, out1, out0, tmp6_m, tmp7_m); \ + ILVEV_H2_SW(tmp0_m, tmp1_m, out5, out7, tmp2_m, tmp3_m); \ + ILVEVOD_W2_UB(tmp2_m, tmp3_m, tmp2_m, tmp3_m, out0, out4); \ + ILVOD_H2_SW(tmp0_m, tmp1_m, out5, out7, tmp2_m, tmp3_m); \ + ILVEVOD_W2_UB(tmp2_m, tmp3_m, tmp2_m, tmp3_m, out2, out6); \ + ILVEV_H2_SW(tmp4_m, tmp5_m, tmp6_m, tmp7_m, tmp2_m, tmp3_m); \ + ILVEVOD_W2_UB(tmp2_m, tmp3_m, tmp2_m, tmp3_m, out1, out5); \ + ILVOD_H2_SW(tmp4_m, tmp5_m, tmp6_m, tmp7_m, tmp2_m, tmp3_m); \ + ILVEVOD_W2_UB(tmp2_m, tmp3_m, tmp2_m, tmp3_m, out3, out7); \ +} while (0) + +/* Description : Transpose 4x4 block with word elements in vectors + * Arguments : Inputs - in0, in1, in2, in3 + * Outputs - out0, out1, out2, out3 + * Return Type - as per RTYPE + */ +#define TRANSPOSE4x4_W(RTYPE, in0, in1, in2, in3, \ + out0, out1, out2, out3) do { \ + v4i32 s0_m, s1_m, s2_m, s3_m; \ + ILVRL_W2_SW(in1, in0, s0_m, s1_m); \ + ILVRL_W2_SW(in3, in2, s2_m, s3_m); \ + out0 = (RTYPE)__msa_ilvr_d((v2i64)s2_m, (v2i64)s0_m); \ + out1 = (RTYPE)__msa_ilvl_d((v2i64)s2_m, (v2i64)s0_m); \ + out2 = (RTYPE)__msa_ilvr_d((v2i64)s3_m, (v2i64)s1_m); \ + out3 = (RTYPE)__msa_ilvl_d((v2i64)s3_m, (v2i64)s1_m); \ +} while (0) +#define TRANSPOSE4x4_SW_SW(...) TRANSPOSE4x4_W(v4i32, __VA_ARGS__) + +/* Description : Add block 4x4 + * Arguments : Inputs - in0, in1, in2, in3, pdst, stride + * Details : Least significant 4 bytes from each input vector are added to + * the destination bytes, clipped between 0-255 and stored. + */ +#define ADDBLK_ST4x4_UB(in0, in1, in2, in3, pdst, stride) do { \ + uint32_t src0_m, src1_m, src2_m, src3_m; \ + v8i16 inp0_m, inp1_m, res0_m, res1_m; \ + v16i8 dst0_m = { 0 }; \ + v16i8 dst1_m = { 0 }; \ + const v16i8 zero_m = { 0 }; \ + ILVR_D2_SH(in1, in0, in3, in2, inp0_m, inp1_m); \ + LW4(pdst, stride, src0_m, src1_m, src2_m, src3_m); \ + INSERT_W2_SB(src0_m, src1_m, dst0_m); \ + INSERT_W2_SB(src2_m, src3_m, dst1_m); \ + ILVR_B2_SH(zero_m, dst0_m, zero_m, dst1_m, res0_m, res1_m); \ + ADD2(res0_m, inp0_m, res1_m, inp1_m, res0_m, res1_m); \ + CLIP_SH2_0_255(res0_m, res1_m); \ + PCKEV_B2_SB(res0_m, res0_m, res1_m, res1_m, dst0_m, dst1_m); \ + ST4x4_UB(dst0_m, dst1_m, 0, 1, 0, 1, pdst, stride); \ +} while (0) + +/* Description : Pack even byte elements, extract 0 & 2 index words from pair + * of results and store 4 words in destination memory as per + * stride + * Arguments : Inputs - in0, in1, in2, in3, pdst, stride + */ +#define PCKEV_ST4x4_UB(in0, in1, in2, in3, pdst, stride) do { \ + v16i8 tmp0_m, tmp1_m; \ + PCKEV_B2_SB(in1, in0, in3, in2, tmp0_m, tmp1_m); \ + ST4x4_UB(tmp0_m, tmp1_m, 0, 2, 0, 2, pdst, stride); \ +} while (0) + +/* Description : average with rounding (in0 + in1 + 1) / 2. + * Arguments : Inputs - in0, in1, in2, in3, + * Outputs - out0, out1 + * Return Type - as per RTYPE + * Details : Each unsigned byte element from 'in0' vector is added with + * each unsigned byte element from 'in1' vector. Then the average + * with rounding is calculated and written to 'out0' + */ +#define AVER_UB2(RTYPE, in0, in1, in2, in3, out0, out1) do { \ + out0 = (RTYPE)__msa_aver_u_b((v16u8)in0, (v16u8)in1); \ + out1 = (RTYPE)__msa_aver_u_b((v16u8)in2, (v16u8)in3); \ +} while (0) +#define AVER_UB2_UB(...) AVER_UB2(v16u8, __VA_ARGS__) + +#endif // WEBP_DSP_MSA_MACRO_H_ diff --git a/vendor/github.com/sizeofint/webpanimation/dsp_neon.h b/vendor/github.com/sizeofint/webpanimation/dsp_neon.h new file mode 100644 index 00000000..dd464008 --- /dev/null +++ b/vendor/github.com/sizeofint/webpanimation/dsp_neon.h @@ -0,0 +1,101 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the COPYING file in the root of the source +// tree. An additional intellectual property rights grant can be found +// in the file PATENTS. All contributing project authors may +// be found in the AUTHORS file in the root of the source tree. +// ----------------------------------------------------------------------------- +// +// NEON common code. + +#ifndef WEBP_DSP_NEON_H_ +#define WEBP_DSP_NEON_H_ + +#include + +#include "dsp_dsp.h" + +// Right now, some intrinsics functions seem slower, so we disable them +// everywhere except newer clang/gcc or aarch64 where the inline assembly is +// incompatible. +#if LOCAL_CLANG_PREREQ(3,8) || LOCAL_GCC_PREREQ(4,9) || defined(__aarch64__) +#define WEBP_USE_INTRINSICS // use intrinsics when possible +#endif + +#define INIT_VECTOR2(v, a, b) do { \ + v.val[0] = a; \ + v.val[1] = b; \ +} while (0) + +#define INIT_VECTOR3(v, a, b, c) do { \ + v.val[0] = a; \ + v.val[1] = b; \ + v.val[2] = c; \ +} while (0) + +#define INIT_VECTOR4(v, a, b, c, d) do { \ + v.val[0] = a; \ + v.val[1] = b; \ + v.val[2] = c; \ + v.val[3] = d; \ +} while (0) + +// if using intrinsics, this flag avoids some functions that make gcc-4.6.3 +// crash ("internal compiler error: in immed_double_const, at emit-rtl."). +// (probably similar to gcc.gnu.org/bugzilla/show_bug.cgi?id=48183) +#if !(LOCAL_CLANG_PREREQ(3,8) || LOCAL_GCC_PREREQ(4,8) || defined(__aarch64__)) +#define WORK_AROUND_GCC +#endif + +static WEBP_INLINE int32x4x4_t Transpose4x4_NEON(const int32x4x4_t rows) { + uint64x2x2_t row01, row23; + + row01.val[0] = vreinterpretq_u64_s32(rows.val[0]); + row01.val[1] = vreinterpretq_u64_s32(rows.val[1]); + row23.val[0] = vreinterpretq_u64_s32(rows.val[2]); + row23.val[1] = vreinterpretq_u64_s32(rows.val[3]); + // Transpose 64-bit values (there's no vswp equivalent) + { + const uint64x1_t row0h = vget_high_u64(row01.val[0]); + const uint64x1_t row2l = vget_low_u64(row23.val[0]); + const uint64x1_t row1h = vget_high_u64(row01.val[1]); + const uint64x1_t row3l = vget_low_u64(row23.val[1]); + row01.val[0] = vcombine_u64(vget_low_u64(row01.val[0]), row2l); + row23.val[0] = vcombine_u64(row0h, vget_high_u64(row23.val[0])); + row01.val[1] = vcombine_u64(vget_low_u64(row01.val[1]), row3l); + row23.val[1] = vcombine_u64(row1h, vget_high_u64(row23.val[1])); + } + { + const int32x4x2_t out01 = vtrnq_s32(vreinterpretq_s32_u64(row01.val[0]), + vreinterpretq_s32_u64(row01.val[1])); + const int32x4x2_t out23 = vtrnq_s32(vreinterpretq_s32_u64(row23.val[0]), + vreinterpretq_s32_u64(row23.val[1])); + int32x4x4_t out; + out.val[0] = out01.val[0]; + out.val[1] = out01.val[1]; + out.val[2] = out23.val[0]; + out.val[3] = out23.val[1]; + return out; + } +} + +#if 0 // Useful debug macro. +#include +#define PRINT_REG(REG, SIZE) do { \ + int i; \ + printf("%s \t[%d]: 0x", #REG, SIZE); \ + if (SIZE == 8) { \ + uint8_t _tmp[8]; \ + vst1_u8(_tmp, (REG)); \ + for (i = 0; i < 8; ++i) printf("%.2x ", _tmp[i]); \ + } else if (SIZE == 16) { \ + uint16_t _tmp[4]; \ + vst1_u16(_tmp, (REG)); \ + for (i = 0; i < 4; ++i) printf("%.4x ", _tmp[i]); \ + } \ + printf("\n"); \ +} while (0) +#endif + +#endif // WEBP_DSP_NEON_H_ diff --git a/vendor/github.com/sizeofint/webpanimation/dsp_quant.h b/vendor/github.com/sizeofint/webpanimation/dsp_quant.h new file mode 100644 index 00000000..36000cad --- /dev/null +++ b/vendor/github.com/sizeofint/webpanimation/dsp_quant.h @@ -0,0 +1,85 @@ +// Copyright 2018 Google Inc. All Rights Reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the COPYING file in the root of the source +// tree. An additional intellectual property rights grant can be found +// in the file PATENTS. All contributing project authors may +// be found in the AUTHORS file in the root of the source tree. +// ----------------------------------------------------------------------------- + +#ifndef WEBP_DSP_QUANT_H_ +#define WEBP_DSP_QUANT_H_ + +#include + +#include "dsp_dsp.h" +#include "webp_types.h" + +#if defined(WEBP_USE_NEON) && !defined(WEBP_ANDROID_NEON) && \ + !defined(WEBP_HAVE_NEON_RTCD) +#include + +#define IsFlat IsFlat_NEON + +static uint32x2_t horizontal_add_uint32x4(const uint32x4_t a) { + const uint64x2_t b = vpaddlq_u32(a); + return vadd_u32(vreinterpret_u32_u64(vget_low_u64(b)), + vreinterpret_u32_u64(vget_high_u64(b))); +} + +static WEBP_INLINE int IsFlat(const int16_t* levels, int num_blocks, + int thresh) { + const int16x8_t tst_ones = vdupq_n_s16(-1); + uint32x4_t sum = vdupq_n_u32(0); + + for (int i = 0; i < num_blocks; ++i) { + // Set DC to zero. + const int16x8_t a_0 = vsetq_lane_s16(0, vld1q_s16(levels), 0); + const int16x8_t a_1 = vld1q_s16(levels + 8); + + const uint16x8_t b_0 = vshrq_n_u16(vtstq_s16(a_0, tst_ones), 15); + const uint16x8_t b_1 = vshrq_n_u16(vtstq_s16(a_1, tst_ones), 15); + + sum = vpadalq_u16(sum, b_0); + sum = vpadalq_u16(sum, b_1); + + levels += 16; + } + return thresh >= (int32_t)vget_lane_u32(horizontal_add_uint32x4(sum), 0); +} + +#else + +#define IsFlat IsFlat_C + +static WEBP_INLINE int IsFlat(const int16_t* levels, int num_blocks, + int thresh) { + int score = 0; + while (num_blocks-- > 0) { // TODO(skal): refine positional scoring? + int i; + for (i = 1; i < 16; ++i) { // omit DC, we're only interested in AC + score += (levels[i] != 0); + if (score > thresh) return 0; + } + levels += 16; + } + return 1; +} + +#endif // defined(WEBP_USE_NEON) && !defined(WEBP_ANDROID_NEON) && + // !defined(WEBP_HAVE_NEON_RTCD) + +static WEBP_INLINE int IsFlatSource16(const uint8_t* src) { + const uint32_t v = src[0] * 0x01010101u; + int i; + for (i = 0; i < 16; ++i) { + if (memcmp(src + 0, &v, 4) || memcmp(src + 4, &v, 4) || + memcmp(src + 8, &v, 4) || memcmp(src + 12, &v, 4)) { + return 0; + } + src += BPS; + } + return 1; +} + +#endif // WEBP_DSP_QUANT_H_ diff --git a/vendor/github.com/sizeofint/webpanimation/dsp_rescaler.c b/vendor/github.com/sizeofint/webpanimation/dsp_rescaler.c new file mode 100644 index 00000000..4503519d --- /dev/null +++ b/vendor/github.com/sizeofint/webpanimation/dsp_rescaler.c @@ -0,0 +1,250 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the COPYING file in the root of the source +// tree. An additional intellectual property rights grant can be found +// in the file PATENTS. All contributing project authors may +// be found in the AUTHORS file in the root of the source tree. +// ----------------------------------------------------------------------------- +// +// Rescaling functions +// +// Author: Skal (pascal.massimino@gmail.com) + +#include + +#include "dsp_dsp.h" +#include "utils_rescaler_utils.h" + +//------------------------------------------------------------------------------ +// Implementations of critical functions ImportRow / ExportRow + +#define ROUNDER (WEBP_RESCALER_ONE >> 1) +#define MULT_FIX(x, y) (((uint64_t)(x) * (y) + ROUNDER) >> WEBP_RESCALER_RFIX) +#define MULT_FIX_FLOOR(x, y) (((uint64_t)(x) * (y)) >> WEBP_RESCALER_RFIX) + +//------------------------------------------------------------------------------ +// Row import + +void WebPRescalerImportRowExpand_C(WebPRescaler* const wrk, + const uint8_t* src) { + const int x_stride = wrk->num_channels; + const int x_out_max = wrk->dst_width * wrk->num_channels; + int channel; + assert(!WebPRescalerInputDone(wrk)); + assert(wrk->x_expand); + for (channel = 0; channel < x_stride; ++channel) { + int x_in = channel; + int x_out = channel; + // simple bilinear interpolation + int accum = wrk->x_add; + int left = src[x_in]; + int right = (wrk->src_width > 1) ? src[x_in + x_stride] : left; + x_in += x_stride; + while (1) { + wrk->frow[x_out] = right * wrk->x_add + (left - right) * accum; + x_out += x_stride; + if (x_out >= x_out_max) break; + accum -= wrk->x_sub; + if (accum < 0) { + left = right; + x_in += x_stride; + assert(x_in < wrk->src_width * x_stride); + right = src[x_in]; + accum += wrk->x_add; + } + } + assert(wrk->x_sub == 0 /* <- special case for src_width=1 */ || accum == 0); + } +} + +void WebPRescalerImportRowShrink_C(WebPRescaler* const wrk, + const uint8_t* src) { + const int x_stride = wrk->num_channels; + const int x_out_max = wrk->dst_width * wrk->num_channels; + int channel; + assert(!WebPRescalerInputDone(wrk)); + assert(!wrk->x_expand); + for (channel = 0; channel < x_stride; ++channel) { + int x_in = channel; + int x_out = channel; + uint32_t sum = 0; + int accum = 0; + while (x_out < x_out_max) { + uint32_t base = 0; + accum += wrk->x_add; + while (accum > 0) { + accum -= wrk->x_sub; + assert(x_in < wrk->src_width * x_stride); + base = src[x_in]; + sum += base; + x_in += x_stride; + } + { // Emit next horizontal pixel. + const rescaler_t frac = base * (-accum); + wrk->frow[x_out] = sum * wrk->x_sub - frac; + // fresh fractional start for next pixel + sum = (int)MULT_FIX(frac, wrk->fx_scale); + } + x_out += x_stride; + } + assert(accum == 0); + } +} + +//------------------------------------------------------------------------------ +// Row export + +void WebPRescalerExportRowExpand_C(WebPRescaler* const wrk) { + int x_out; + uint8_t* const dst = wrk->dst; + rescaler_t* const irow = wrk->irow; + const int x_out_max = wrk->dst_width * wrk->num_channels; + const rescaler_t* const frow = wrk->frow; + assert(!WebPRescalerOutputDone(wrk)); + assert(wrk->y_accum <= 0); + assert(wrk->y_expand); + assert(wrk->y_sub != 0); + if (wrk->y_accum == 0) { + for (x_out = 0; x_out < x_out_max; ++x_out) { + const uint32_t J = frow[x_out]; + const int v = (int)MULT_FIX(J, wrk->fy_scale); + dst[x_out] = (v > 255) ? 255u : (uint8_t)v; + } + } else { + const uint32_t B = WEBP_RESCALER_FRAC(-wrk->y_accum, wrk->y_sub); + const uint32_t A = (uint32_t)(WEBP_RESCALER_ONE - B); + for (x_out = 0; x_out < x_out_max; ++x_out) { + const uint64_t I = (uint64_t)A * frow[x_out] + + (uint64_t)B * irow[x_out]; + const uint32_t J = (uint32_t)((I + ROUNDER) >> WEBP_RESCALER_RFIX); + const int v = (int)MULT_FIX(J, wrk->fy_scale); + dst[x_out] = (v > 255) ? 255u : (uint8_t)v; + } + } +} + +void WebPRescalerExportRowShrink_C(WebPRescaler* const wrk) { + int x_out; + uint8_t* const dst = wrk->dst; + rescaler_t* const irow = wrk->irow; + const int x_out_max = wrk->dst_width * wrk->num_channels; + const rescaler_t* const frow = wrk->frow; + const uint32_t yscale = wrk->fy_scale * (-wrk->y_accum); + assert(!WebPRescalerOutputDone(wrk)); + assert(wrk->y_accum <= 0); + assert(!wrk->y_expand); + if (yscale) { + for (x_out = 0; x_out < x_out_max; ++x_out) { + const uint32_t frac = (uint32_t)MULT_FIX_FLOOR(frow[x_out], yscale); + const int v = (int)MULT_FIX(irow[x_out] - frac, wrk->fxy_scale); + dst[x_out] = (v > 255) ? 255u : (uint8_t)v; + irow[x_out] = frac; // new fractional start + } + } else { + for (x_out = 0; x_out < x_out_max; ++x_out) { + const int v = (int)MULT_FIX(irow[x_out], wrk->fxy_scale); + dst[x_out] = (v > 255) ? 255u : (uint8_t)v; + irow[x_out] = 0; + } + } +} + +#undef MULT_FIX_FLOOR +#undef MULT_FIX +#undef ROUNDER + +//------------------------------------------------------------------------------ +// Main entry calls + +void WebPRescalerImportRow(WebPRescaler* const wrk, const uint8_t* src) { + assert(!WebPRescalerInputDone(wrk)); + if (!wrk->x_expand) { + WebPRescalerImportRowShrink(wrk, src); + } else { + WebPRescalerImportRowExpand(wrk, src); + } +} + +void WebPRescalerExportRow(WebPRescaler* const wrk) { + if (wrk->y_accum <= 0) { + assert(!WebPRescalerOutputDone(wrk)); + if (wrk->y_expand) { + WebPRescalerExportRowExpand(wrk); + } else if (wrk->fxy_scale) { + WebPRescalerExportRowShrink(wrk); + } else { // special case + int i; + assert(wrk->src_height == wrk->dst_height && wrk->x_add == 1); + assert(wrk->src_width == 1 && wrk->dst_width <= 2); + for (i = 0; i < wrk->num_channels * wrk->dst_width; ++i) { + wrk->dst[i] = wrk->irow[i]; + wrk->irow[i] = 0; + } + } + wrk->y_accum += wrk->y_add; + wrk->dst += wrk->dst_stride; + ++wrk->dst_y; + } +} + +//------------------------------------------------------------------------------ + +WebPRescalerImportRowFunc WebPRescalerImportRowExpand; +WebPRescalerImportRowFunc WebPRescalerImportRowShrink; + +WebPRescalerExportRowFunc WebPRescalerExportRowExpand; +WebPRescalerExportRowFunc WebPRescalerExportRowShrink; + +extern void WebPRescalerDspInitSSE2(void); +extern void WebPRescalerDspInitMIPS32(void); +extern void WebPRescalerDspInitMIPSdspR2(void); +extern void WebPRescalerDspInitMSA(void); +extern void WebPRescalerDspInitNEON(void); + +WEBP_DSP_INIT_FUNC(WebPRescalerDspInit) { +#if !defined(WEBP_REDUCE_SIZE) +#if !WEBP_NEON_OMIT_C_CODE + WebPRescalerExportRowExpand = WebPRescalerExportRowExpand_C; + WebPRescalerExportRowShrink = WebPRescalerExportRowShrink_C; +#endif + + WebPRescalerImportRowExpand = WebPRescalerImportRowExpand_C; + WebPRescalerImportRowShrink = WebPRescalerImportRowShrink_C; + + if (VP8GetCPUInfo != NULL) { +#if defined(WEBP_USE_SSE2) + if (VP8GetCPUInfo(kSSE2)) { + WebPRescalerDspInitSSE2(); + } +#endif +#if defined(WEBP_USE_MIPS32) + if (VP8GetCPUInfo(kMIPS32)) { + WebPRescalerDspInitMIPS32(); + } +#endif +#if defined(WEBP_USE_MIPS_DSP_R2) + if (VP8GetCPUInfo(kMIPSdspR2)) { + WebPRescalerDspInitMIPSdspR2(); + } +#endif +#if defined(WEBP_USE_MSA) + if (VP8GetCPUInfo(kMSA)) { + WebPRescalerDspInitMSA(); + } +#endif + } + +#if defined(WEBP_USE_NEON) + if (WEBP_NEON_OMIT_C_CODE || + (VP8GetCPUInfo != NULL && VP8GetCPUInfo(kNEON))) { + WebPRescalerDspInitNEON(); + } +#endif + + assert(WebPRescalerExportRowExpand != NULL); + assert(WebPRescalerExportRowShrink != NULL); + assert(WebPRescalerImportRowExpand != NULL); + assert(WebPRescalerImportRowShrink != NULL); +#endif // WEBP_REDUCE_SIZE +} diff --git a/vendor/github.com/sizeofint/webpanimation/dsp_rescaler_mips32.c b/vendor/github.com/sizeofint/webpanimation/dsp_rescaler_mips32.c new file mode 100644 index 00000000..97aac428 --- /dev/null +++ b/vendor/github.com/sizeofint/webpanimation/dsp_rescaler_mips32.c @@ -0,0 +1,295 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the COPYING file in the root of the source +// tree. An additional intellectual property rights grant can be found +// in the file PATENTS. All contributing project authors may +// be found in the AUTHORS file in the root of the source tree. +// ----------------------------------------------------------------------------- +// +// MIPS version of rescaling functions +// +// Author(s): Djordje Pesut (djordje.pesut@imgtec.com) + +#include "dsp_dsp.h" + +#if defined(WEBP_USE_MIPS32) && !defined(WEBP_REDUCE_SIZE) + +#include +#include "utils_rescaler_utils.h" + +//------------------------------------------------------------------------------ +// Row import + +static void ImportRowShrink_MIPS32(WebPRescaler* const wrk, + const uint8_t* src) { + const int x_stride = wrk->num_channels; + const int x_out_max = wrk->dst_width * wrk->num_channels; + const int fx_scale = wrk->fx_scale; + const int x_add = wrk->x_add; + const int x_sub = wrk->x_sub; + const int x_stride1 = x_stride << 2; + int channel; + assert(!wrk->x_expand); + assert(!WebPRescalerInputDone(wrk)); + + for (channel = 0; channel < x_stride; ++channel) { + const uint8_t* src1 = src + channel; + rescaler_t* frow = wrk->frow + channel; + int temp1, temp2, temp3; + int base, frac, sum; + int accum, accum1; + int loop_c = x_out_max - channel; + + __asm__ volatile ( + "li %[temp1], 0x8000 \n\t" + "li %[temp2], 0x10000 \n\t" + "li %[sum], 0 \n\t" + "li %[accum], 0 \n\t" + "1: \n\t" + "addu %[accum], %[accum], %[x_add] \n\t" + "li %[base], 0 \n\t" + "blez %[accum], 3f \n\t" + "2: \n\t" + "lbu %[base], 0(%[src1]) \n\t" + "subu %[accum], %[accum], %[x_sub] \n\t" + "addu %[src1], %[src1], %[x_stride] \n\t" + "addu %[sum], %[sum], %[base] \n\t" + "bgtz %[accum], 2b \n\t" + "3: \n\t" + "negu %[accum1], %[accum] \n\t" + "mul %[frac], %[base], %[accum1] \n\t" + "mul %[temp3], %[sum], %[x_sub] \n\t" + "subu %[loop_c], %[loop_c], %[x_stride] \n\t" + "mult %[temp1], %[temp2] \n\t" + "maddu %[frac], %[fx_scale] \n\t" + "mfhi %[sum] \n\t" + "subu %[temp3], %[temp3], %[frac] \n\t" + "sw %[temp3], 0(%[frow]) \n\t" + "addu %[frow], %[frow], %[x_stride1] \n\t" + "bgtz %[loop_c], 1b \n\t" + : [accum]"=&r"(accum), [src1]"+r"(src1), [temp3]"=&r"(temp3), + [sum]"=&r"(sum), [base]"=&r"(base), [frac]"=&r"(frac), + [frow]"+r"(frow), [accum1]"=&r"(accum1), + [temp2]"=&r"(temp2), [temp1]"=&r"(temp1) + : [x_stride]"r"(x_stride), [fx_scale]"r"(fx_scale), + [x_sub]"r"(x_sub), [x_add]"r"(x_add), + [loop_c]"r"(loop_c), [x_stride1]"r"(x_stride1) + : "memory", "hi", "lo" + ); + assert(accum == 0); + } +} + +static void ImportRowExpand_MIPS32(WebPRescaler* const wrk, + const uint8_t* src) { + const int x_stride = wrk->num_channels; + const int x_out_max = wrk->dst_width * wrk->num_channels; + const int x_add = wrk->x_add; + const int x_sub = wrk->x_sub; + const int src_width = wrk->src_width; + const int x_stride1 = x_stride << 2; + int channel; + assert(wrk->x_expand); + assert(!WebPRescalerInputDone(wrk)); + + for (channel = 0; channel < x_stride; ++channel) { + const uint8_t* src1 = src + channel; + rescaler_t* frow = wrk->frow + channel; + int temp1, temp2, temp3, temp4; + int frac; + int accum; + int x_out = channel; + + __asm__ volatile ( + "addiu %[temp3], %[src_width], -1 \n\t" + "lbu %[temp2], 0(%[src1]) \n\t" + "addu %[src1], %[src1], %[x_stride] \n\t" + "bgtz %[temp3], 0f \n\t" + "addiu %[temp1], %[temp2], 0 \n\t" + "b 3f \n\t" + "0: \n\t" + "lbu %[temp1], 0(%[src1]) \n\t" + "3: \n\t" + "addiu %[accum], %[x_add], 0 \n\t" + "1: \n\t" + "subu %[temp3], %[temp2], %[temp1] \n\t" + "mul %[temp3], %[temp3], %[accum] \n\t" + "mul %[temp4], %[temp1], %[x_add] \n\t" + "addu %[temp3], %[temp4], %[temp3] \n\t" + "sw %[temp3], 0(%[frow]) \n\t" + "addu %[frow], %[frow], %[x_stride1] \n\t" + "addu %[x_out], %[x_out], %[x_stride] \n\t" + "subu %[temp3], %[x_out], %[x_out_max] \n\t" + "bgez %[temp3], 2f \n\t" + "subu %[accum], %[accum], %[x_sub] \n\t" + "bgez %[accum], 4f \n\t" + "addiu %[temp2], %[temp1], 0 \n\t" + "addu %[src1], %[src1], %[x_stride] \n\t" + "lbu %[temp1], 0(%[src1]) \n\t" + "addu %[accum], %[accum], %[x_add] \n\t" + "4: \n\t" + "b 1b \n\t" + "2: \n\t" + : [src1]"+r"(src1), [accum]"=&r"(accum), [temp1]"=&r"(temp1), + [temp2]"=&r"(temp2), [temp3]"=&r"(temp3), [temp4]"=&r"(temp4), + [x_out]"+r"(x_out), [frac]"=&r"(frac), [frow]"+r"(frow) + : [x_stride]"r"(x_stride), [x_add]"r"(x_add), [x_sub]"r"(x_sub), + [x_stride1]"r"(x_stride1), [src_width]"r"(src_width), + [x_out_max]"r"(x_out_max) + : "memory", "hi", "lo" + ); + assert(wrk->x_sub == 0 /* <- special case for src_width=1 */ || accum == 0); + } +} + +//------------------------------------------------------------------------------ +// Row export + +static void ExportRowExpand_MIPS32(WebPRescaler* const wrk) { + uint8_t* dst = wrk->dst; + rescaler_t* irow = wrk->irow; + const int x_out_max = wrk->dst_width * wrk->num_channels; + const rescaler_t* frow = wrk->frow; + int temp0, temp1, temp3, temp4, temp5, loop_end; + const int temp2 = (int)wrk->fy_scale; + const int temp6 = x_out_max << 2; + assert(!WebPRescalerOutputDone(wrk)); + assert(wrk->y_accum <= 0); + assert(wrk->y_expand); + assert(wrk->y_sub != 0); + if (wrk->y_accum == 0) { + __asm__ volatile ( + "li %[temp3], 0x10000 \n\t" + "li %[temp4], 0x8000 \n\t" + "addu %[loop_end], %[frow], %[temp6] \n\t" + "1: \n\t" + "lw %[temp0], 0(%[frow]) \n\t" + "addiu %[dst], %[dst], 1 \n\t" + "addiu %[frow], %[frow], 4 \n\t" + "mult %[temp3], %[temp4] \n\t" + "maddu %[temp0], %[temp2] \n\t" + "mfhi %[temp5] \n\t" + "sb %[temp5], -1(%[dst]) \n\t" + "bne %[frow], %[loop_end], 1b \n\t" + : [temp0]"=&r"(temp0), [temp1]"=&r"(temp1), [temp3]"=&r"(temp3), + [temp4]"=&r"(temp4), [temp5]"=&r"(temp5), [frow]"+r"(frow), + [dst]"+r"(dst), [loop_end]"=&r"(loop_end) + : [temp2]"r"(temp2), [temp6]"r"(temp6) + : "memory", "hi", "lo" + ); + } else { + const uint32_t B = WEBP_RESCALER_FRAC(-wrk->y_accum, wrk->y_sub); + const uint32_t A = (uint32_t)(WEBP_RESCALER_ONE - B); + __asm__ volatile ( + "li %[temp3], 0x10000 \n\t" + "li %[temp4], 0x8000 \n\t" + "addu %[loop_end], %[frow], %[temp6] \n\t" + "1: \n\t" + "lw %[temp0], 0(%[frow]) \n\t" + "lw %[temp1], 0(%[irow]) \n\t" + "addiu %[dst], %[dst], 1 \n\t" + "mult %[temp3], %[temp4] \n\t" + "maddu %[A], %[temp0] \n\t" + "maddu %[B], %[temp1] \n\t" + "addiu %[frow], %[frow], 4 \n\t" + "addiu %[irow], %[irow], 4 \n\t" + "mfhi %[temp5] \n\t" + "mult %[temp3], %[temp4] \n\t" + "maddu %[temp5], %[temp2] \n\t" + "mfhi %[temp5] \n\t" + "sb %[temp5], -1(%[dst]) \n\t" + "bne %[frow], %[loop_end], 1b \n\t" + : [temp0]"=&r"(temp0), [temp1]"=&r"(temp1), [temp3]"=&r"(temp3), + [temp4]"=&r"(temp4), [temp5]"=&r"(temp5), [frow]"+r"(frow), + [irow]"+r"(irow), [dst]"+r"(dst), [loop_end]"=&r"(loop_end) + : [temp2]"r"(temp2), [temp6]"r"(temp6), [A]"r"(A), [B]"r"(B) + : "memory", "hi", "lo" + ); + } +} + +#if 0 // disabled for now. TODO(skal): make match the C-code +static void ExportRowShrink_MIPS32(WebPRescaler* const wrk) { + const int x_out_max = wrk->dst_width * wrk->num_channels; + uint8_t* dst = wrk->dst; + rescaler_t* irow = wrk->irow; + const rescaler_t* frow = wrk->frow; + const int yscale = wrk->fy_scale * (-wrk->y_accum); + int temp0, temp1, temp3, temp4, temp5, loop_end; + const int temp2 = (int)wrk->fxy_scale; + const int temp6 = x_out_max << 2; + + assert(!WebPRescalerOutputDone(wrk)); + assert(wrk->y_accum <= 0); + assert(!wrk->y_expand); + assert(wrk->fxy_scale != 0); + if (yscale) { + __asm__ volatile ( + "li %[temp3], 0x10000 \n\t" + "li %[temp4], 0x8000 \n\t" + "addu %[loop_end], %[frow], %[temp6] \n\t" + "1: \n\t" + "lw %[temp0], 0(%[frow]) \n\t" + "mult %[temp3], %[temp4] \n\t" + "addiu %[frow], %[frow], 4 \n\t" + "maddu %[temp0], %[yscale] \n\t" + "mfhi %[temp1] \n\t" + "lw %[temp0], 0(%[irow]) \n\t" + "addiu %[dst], %[dst], 1 \n\t" + "addiu %[irow], %[irow], 4 \n\t" + "subu %[temp0], %[temp0], %[temp1] \n\t" + "mult %[temp3], %[temp4] \n\t" + "maddu %[temp0], %[temp2] \n\t" + "mfhi %[temp5] \n\t" + "sw %[temp1], -4(%[irow]) \n\t" + "sb %[temp5], -1(%[dst]) \n\t" + "bne %[frow], %[loop_end], 1b \n\t" + : [temp0]"=&r"(temp0), [temp1]"=&r"(temp1), [temp3]"=&r"(temp3), + [temp4]"=&r"(temp4), [temp5]"=&r"(temp5), [frow]"+r"(frow), + [irow]"+r"(irow), [dst]"+r"(dst), [loop_end]"=&r"(loop_end) + : [temp2]"r"(temp2), [yscale]"r"(yscale), [temp6]"r"(temp6) + : "memory", "hi", "lo" + ); + } else { + __asm__ volatile ( + "li %[temp3], 0x10000 \n\t" + "li %[temp4], 0x8000 \n\t" + "addu %[loop_end], %[irow], %[temp6] \n\t" + "1: \n\t" + "lw %[temp0], 0(%[irow]) \n\t" + "addiu %[dst], %[dst], 1 \n\t" + "addiu %[irow], %[irow], 4 \n\t" + "mult %[temp3], %[temp4] \n\t" + "maddu %[temp0], %[temp2] \n\t" + "mfhi %[temp5] \n\t" + "sw $zero, -4(%[irow]) \n\t" + "sb %[temp5], -1(%[dst]) \n\t" + "bne %[irow], %[loop_end], 1b \n\t" + : [temp0]"=&r"(temp0), [temp1]"=&r"(temp1), [temp3]"=&r"(temp3), + [temp4]"=&r"(temp4), [temp5]"=&r"(temp5), [irow]"+r"(irow), + [dst]"+r"(dst), [loop_end]"=&r"(loop_end) + : [temp2]"r"(temp2), [temp6]"r"(temp6) + : "memory", "hi", "lo" + ); + } +} +#endif // 0 + +//------------------------------------------------------------------------------ +// Entry point + +extern void WebPRescalerDspInitMIPS32(void); + +WEBP_TSAN_IGNORE_FUNCTION void WebPRescalerDspInitMIPS32(void) { + WebPRescalerImportRowExpand = ImportRowExpand_MIPS32; + WebPRescalerImportRowShrink = ImportRowShrink_MIPS32; + WebPRescalerExportRowExpand = ExportRowExpand_MIPS32; +// WebPRescalerExportRowShrink = ExportRowShrink_MIPS32; +} + +#else // !WEBP_USE_MIPS32 + +WEBP_DSP_INIT_STUB(WebPRescalerDspInitMIPS32) + +#endif // WEBP_USE_MIPS32 diff --git a/vendor/github.com/sizeofint/webpanimation/dsp_rescaler_mips_dsp_r2.c b/vendor/github.com/sizeofint/webpanimation/dsp_rescaler_mips_dsp_r2.c new file mode 100644 index 00000000..0054ee53 --- /dev/null +++ b/vendor/github.com/sizeofint/webpanimation/dsp_rescaler_mips_dsp_r2.c @@ -0,0 +1,314 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the COPYING file in the root of the source +// tree. An additional intellectual property rights grant can be found +// in the file PATENTS. All contributing project authors may +// be found in the AUTHORS file in the root of the source tree. +// ----------------------------------------------------------------------------- +// +// MIPS version of rescaling functions +// +// Author(s): Djordje Pesut (djordje.pesut@imgtec.com) + +#include "dsp_dsp.h" + +#if defined(WEBP_USE_MIPS_DSP_R2) && !defined(WEBP_REDUCE_SIZE) + +#include +#include "utils_rescaler_utils.h" + +#define ROUNDER (WEBP_RESCALER_ONE >> 1) +#define MULT_FIX(x, y) (((uint64_t)(x) * (y) + ROUNDER) >> WEBP_RESCALER_RFIX) +#define MULT_FIX_FLOOR(x, y) (((uint64_t)(x) * (y)) >> WEBP_RESCALER_RFIX) + +//------------------------------------------------------------------------------ +// Row export + +#if 0 // disabled for now. TODO(skal): make match the C-code +static void ExportRowShrink_MIPSdspR2(WebPRescaler* const wrk) { + int i; + const int x_out_max = wrk->dst_width * wrk->num_channels; + uint8_t* dst = wrk->dst; + rescaler_t* irow = wrk->irow; + const rescaler_t* frow = wrk->frow; + const int yscale = wrk->fy_scale * (-wrk->y_accum); + int temp0, temp1, temp2, temp3, temp4, temp5, loop_end; + const int temp7 = (int)wrk->fxy_scale; + const int temp6 = (x_out_max & ~0x3) << 2; + assert(!WebPRescalerOutputDone(wrk)); + assert(wrk->y_accum <= 0); + assert(!wrk->y_expand); + assert(wrk->fxy_scale != 0); + if (yscale) { + if (x_out_max >= 4) { + int temp8, temp9, temp10, temp11; + __asm__ volatile ( + "li %[temp3], 0x10000 \n\t" + "li %[temp4], 0x8000 \n\t" + "addu %[loop_end], %[frow], %[temp6] \n\t" + "1: \n\t" + "lw %[temp0], 0(%[frow]) \n\t" + "lw %[temp1], 4(%[frow]) \n\t" + "lw %[temp2], 8(%[frow]) \n\t" + "lw %[temp5], 12(%[frow]) \n\t" + "mult $ac0, %[temp3], %[temp4] \n\t" + "maddu $ac0, %[temp0], %[yscale] \n\t" + "mult $ac1, %[temp3], %[temp4] \n\t" + "maddu $ac1, %[temp1], %[yscale] \n\t" + "mult $ac2, %[temp3], %[temp4] \n\t" + "maddu $ac2, %[temp2], %[yscale] \n\t" + "mult $ac3, %[temp3], %[temp4] \n\t" + "maddu $ac3, %[temp5], %[yscale] \n\t" + "addiu %[frow], %[frow], 16 \n\t" + "mfhi %[temp0], $ac0 \n\t" + "mfhi %[temp1], $ac1 \n\t" + "mfhi %[temp2], $ac2 \n\t" + "mfhi %[temp5], $ac3 \n\t" + "lw %[temp8], 0(%[irow]) \n\t" + "lw %[temp9], 4(%[irow]) \n\t" + "lw %[temp10], 8(%[irow]) \n\t" + "lw %[temp11], 12(%[irow]) \n\t" + "addiu %[dst], %[dst], 4 \n\t" + "addiu %[irow], %[irow], 16 \n\t" + "subu %[temp8], %[temp8], %[temp0] \n\t" + "subu %[temp9], %[temp9], %[temp1] \n\t" + "subu %[temp10], %[temp10], %[temp2] \n\t" + "subu %[temp11], %[temp11], %[temp5] \n\t" + "mult $ac0, %[temp3], %[temp4] \n\t" + "maddu $ac0, %[temp8], %[temp7] \n\t" + "mult $ac1, %[temp3], %[temp4] \n\t" + "maddu $ac1, %[temp9], %[temp7] \n\t" + "mult $ac2, %[temp3], %[temp4] \n\t" + "maddu $ac2, %[temp10], %[temp7] \n\t" + "mult $ac3, %[temp3], %[temp4] \n\t" + "maddu $ac3, %[temp11], %[temp7] \n\t" + "mfhi %[temp8], $ac0 \n\t" + "mfhi %[temp9], $ac1 \n\t" + "mfhi %[temp10], $ac2 \n\t" + "mfhi %[temp11], $ac3 \n\t" + "sw %[temp0], -16(%[irow]) \n\t" + "sw %[temp1], -12(%[irow]) \n\t" + "sw %[temp2], -8(%[irow]) \n\t" + "sw %[temp5], -4(%[irow]) \n\t" + "sb %[temp8], -4(%[dst]) \n\t" + "sb %[temp9], -3(%[dst]) \n\t" + "sb %[temp10], -2(%[dst]) \n\t" + "sb %[temp11], -1(%[dst]) \n\t" + "bne %[frow], %[loop_end], 1b \n\t" + : [temp0]"=&r"(temp0), [temp1]"=&r"(temp1), [temp3]"=&r"(temp3), + [temp4]"=&r"(temp4), [temp5]"=&r"(temp5), [frow]"+r"(frow), + [irow]"+r"(irow), [dst]"+r"(dst), [loop_end]"=&r"(loop_end), + [temp8]"=&r"(temp8), [temp9]"=&r"(temp9), [temp10]"=&r"(temp10), + [temp11]"=&r"(temp11), [temp2]"=&r"(temp2) + : [temp7]"r"(temp7), [yscale]"r"(yscale), [temp6]"r"(temp6) + : "memory", "hi", "lo", "$ac1hi", "$ac1lo", + "$ac2hi", "$ac2lo", "$ac3hi", "$ac3lo" + ); + } + for (i = 0; i < (x_out_max & 0x3); ++i) { + const uint32_t frac = (uint32_t)MULT_FIX_FLOOR(*frow++, yscale); + const int v = (int)MULT_FIX(*irow - frac, wrk->fxy_scale); + *dst++ = (v > 255) ? 255u : (uint8_t)v; + *irow++ = frac; // new fractional start + } + } else { + if (x_out_max >= 4) { + __asm__ volatile ( + "li %[temp3], 0x10000 \n\t" + "li %[temp4], 0x8000 \n\t" + "addu %[loop_end], %[irow], %[temp6] \n\t" + "1: \n\t" + "lw %[temp0], 0(%[irow]) \n\t" + "lw %[temp1], 4(%[irow]) \n\t" + "lw %[temp2], 8(%[irow]) \n\t" + "lw %[temp5], 12(%[irow]) \n\t" + "addiu %[dst], %[dst], 4 \n\t" + "addiu %[irow], %[irow], 16 \n\t" + "mult $ac0, %[temp3], %[temp4] \n\t" + "maddu $ac0, %[temp0], %[temp7] \n\t" + "mult $ac1, %[temp3], %[temp4] \n\t" + "maddu $ac1, %[temp1], %[temp7] \n\t" + "mult $ac2, %[temp3], %[temp4] \n\t" + "maddu $ac2, %[temp2], %[temp7] \n\t" + "mult $ac3, %[temp3], %[temp4] \n\t" + "maddu $ac3, %[temp5], %[temp7] \n\t" + "mfhi %[temp0], $ac0 \n\t" + "mfhi %[temp1], $ac1 \n\t" + "mfhi %[temp2], $ac2 \n\t" + "mfhi %[temp5], $ac3 \n\t" + "sw $zero, -16(%[irow]) \n\t" + "sw $zero, -12(%[irow]) \n\t" + "sw $zero, -8(%[irow]) \n\t" + "sw $zero, -4(%[irow]) \n\t" + "sb %[temp0], -4(%[dst]) \n\t" + "sb %[temp1], -3(%[dst]) \n\t" + "sb %[temp2], -2(%[dst]) \n\t" + "sb %[temp5], -1(%[dst]) \n\t" + "bne %[irow], %[loop_end], 1b \n\t" + : [temp0]"=&r"(temp0), [temp1]"=&r"(temp1), [temp3]"=&r"(temp3), + [temp4]"=&r"(temp4), [temp5]"=&r"(temp5), [irow]"+r"(irow), + [dst]"+r"(dst), [loop_end]"=&r"(loop_end), [temp2]"=&r"(temp2) + : [temp7]"r"(temp7), [temp6]"r"(temp6) + : "memory", "hi", "lo", "$ac1hi", "$ac1lo", + "$ac2hi", "$ac2lo", "$ac3hi", "$ac3lo" + ); + } + for (i = 0; i < (x_out_max & 0x3); ++i) { + const int v = (int)MULT_FIX_FLOOR(*irow, wrk->fxy_scale); + *dst++ = (v > 255) ? 255u : (uint8_t)v; + *irow++ = 0; + } + } +} +#endif // 0 + +static void ExportRowExpand_MIPSdspR2(WebPRescaler* const wrk) { + int i; + uint8_t* dst = wrk->dst; + rescaler_t* irow = wrk->irow; + const int x_out_max = wrk->dst_width * wrk->num_channels; + const rescaler_t* frow = wrk->frow; + int temp0, temp1, temp2, temp3, temp4, temp5, loop_end; + const int temp6 = (x_out_max & ~0x3) << 2; + const int temp7 = (int)wrk->fy_scale; + assert(!WebPRescalerOutputDone(wrk)); + assert(wrk->y_accum <= 0); + assert(wrk->y_expand); + assert(wrk->y_sub != 0); + if (wrk->y_accum == 0) { + if (x_out_max >= 4) { + __asm__ volatile ( + "li %[temp4], 0x10000 \n\t" + "li %[temp5], 0x8000 \n\t" + "addu %[loop_end], %[frow], %[temp6] \n\t" + "1: \n\t" + "lw %[temp0], 0(%[frow]) \n\t" + "lw %[temp1], 4(%[frow]) \n\t" + "lw %[temp2], 8(%[frow]) \n\t" + "lw %[temp3], 12(%[frow]) \n\t" + "addiu %[dst], %[dst], 4 \n\t" + "addiu %[frow], %[frow], 16 \n\t" + "mult $ac0, %[temp4], %[temp5] \n\t" + "maddu $ac0, %[temp0], %[temp7] \n\t" + "mult $ac1, %[temp4], %[temp5] \n\t" + "maddu $ac1, %[temp1], %[temp7] \n\t" + "mult $ac2, %[temp4], %[temp5] \n\t" + "maddu $ac2, %[temp2], %[temp7] \n\t" + "mult $ac3, %[temp4], %[temp5] \n\t" + "maddu $ac3, %[temp3], %[temp7] \n\t" + "mfhi %[temp0], $ac0 \n\t" + "mfhi %[temp1], $ac1 \n\t" + "mfhi %[temp2], $ac2 \n\t" + "mfhi %[temp3], $ac3 \n\t" + "sb %[temp0], -4(%[dst]) \n\t" + "sb %[temp1], -3(%[dst]) \n\t" + "sb %[temp2], -2(%[dst]) \n\t" + "sb %[temp3], -1(%[dst]) \n\t" + "bne %[frow], %[loop_end], 1b \n\t" + : [temp0]"=&r"(temp0), [temp1]"=&r"(temp1), [temp3]"=&r"(temp3), + [temp4]"=&r"(temp4), [temp5]"=&r"(temp5), [frow]"+r"(frow), + [dst]"+r"(dst), [loop_end]"=&r"(loop_end), [temp2]"=&r"(temp2) + : [temp7]"r"(temp7), [temp6]"r"(temp6) + : "memory", "hi", "lo", "$ac1hi", "$ac1lo", + "$ac2hi", "$ac2lo", "$ac3hi", "$ac3lo" + ); + } + for (i = 0; i < (x_out_max & 0x3); ++i) { + const uint32_t J = *frow++; + const int v = (int)MULT_FIX(J, wrk->fy_scale); + *dst++ = (v > 255) ? 255u : (uint8_t)v; + } + } else { + const uint32_t B = WEBP_RESCALER_FRAC(-wrk->y_accum, wrk->y_sub); + const uint32_t A = (uint32_t)(WEBP_RESCALER_ONE - B); + if (x_out_max >= 4) { + int temp8, temp9, temp10, temp11; + __asm__ volatile ( + "li %[temp8], 0x10000 \n\t" + "li %[temp9], 0x8000 \n\t" + "addu %[loop_end], %[frow], %[temp6] \n\t" + "1: \n\t" + "lw %[temp0], 0(%[frow]) \n\t" + "lw %[temp1], 4(%[frow]) \n\t" + "lw %[temp2], 8(%[frow]) \n\t" + "lw %[temp3], 12(%[frow]) \n\t" + "lw %[temp4], 0(%[irow]) \n\t" + "lw %[temp5], 4(%[irow]) \n\t" + "lw %[temp10], 8(%[irow]) \n\t" + "lw %[temp11], 12(%[irow]) \n\t" + "addiu %[dst], %[dst], 4 \n\t" + "mult $ac0, %[temp8], %[temp9] \n\t" + "maddu $ac0, %[A], %[temp0] \n\t" + "maddu $ac0, %[B], %[temp4] \n\t" + "mult $ac1, %[temp8], %[temp9] \n\t" + "maddu $ac1, %[A], %[temp1] \n\t" + "maddu $ac1, %[B], %[temp5] \n\t" + "mult $ac2, %[temp8], %[temp9] \n\t" + "maddu $ac2, %[A], %[temp2] \n\t" + "maddu $ac2, %[B], %[temp10] \n\t" + "mult $ac3, %[temp8], %[temp9] \n\t" + "maddu $ac3, %[A], %[temp3] \n\t" + "maddu $ac3, %[B], %[temp11] \n\t" + "addiu %[frow], %[frow], 16 \n\t" + "addiu %[irow], %[irow], 16 \n\t" + "mfhi %[temp0], $ac0 \n\t" + "mfhi %[temp1], $ac1 \n\t" + "mfhi %[temp2], $ac2 \n\t" + "mfhi %[temp3], $ac3 \n\t" + "mult $ac0, %[temp8], %[temp9] \n\t" + "maddu $ac0, %[temp0], %[temp7] \n\t" + "mult $ac1, %[temp8], %[temp9] \n\t" + "maddu $ac1, %[temp1], %[temp7] \n\t" + "mult $ac2, %[temp8], %[temp9] \n\t" + "maddu $ac2, %[temp2], %[temp7] \n\t" + "mult $ac3, %[temp8], %[temp9] \n\t" + "maddu $ac3, %[temp3], %[temp7] \n\t" + "mfhi %[temp0], $ac0 \n\t" + "mfhi %[temp1], $ac1 \n\t" + "mfhi %[temp2], $ac2 \n\t" + "mfhi %[temp3], $ac3 \n\t" + "sb %[temp0], -4(%[dst]) \n\t" + "sb %[temp1], -3(%[dst]) \n\t" + "sb %[temp2], -2(%[dst]) \n\t" + "sb %[temp3], -1(%[dst]) \n\t" + "bne %[frow], %[loop_end], 1b \n\t" + : [temp0]"=&r"(temp0), [temp1]"=&r"(temp1), [temp3]"=&r"(temp3), + [temp4]"=&r"(temp4), [temp5]"=&r"(temp5), [frow]"+r"(frow), + [irow]"+r"(irow), [dst]"+r"(dst), [loop_end]"=&r"(loop_end), + [temp8]"=&r"(temp8), [temp9]"=&r"(temp9), [temp10]"=&r"(temp10), + [temp11]"=&r"(temp11), [temp2]"=&r"(temp2) + : [temp7]"r"(temp7), [temp6]"r"(temp6), [A]"r"(A), [B]"r"(B) + : "memory", "hi", "lo", "$ac1hi", "$ac1lo", + "$ac2hi", "$ac2lo", "$ac3hi", "$ac3lo" + ); + } + for (i = 0; i < (x_out_max & 0x3); ++i) { + const uint64_t I = (uint64_t)A * *frow++ + + (uint64_t)B * *irow++; + const uint32_t J = (uint32_t)((I + ROUNDER) >> WEBP_RESCALER_RFIX); + const int v = (int)MULT_FIX(J, wrk->fy_scale); + *dst++ = (v > 255) ? 255u : (uint8_t)v; + } + } +} + +#undef MULT_FIX_FLOOR +#undef MULT_FIX +#undef ROUNDER + +//------------------------------------------------------------------------------ +// Entry point + +extern void WebPRescalerDspInitMIPSdspR2(void); + +WEBP_TSAN_IGNORE_FUNCTION void WebPRescalerDspInitMIPSdspR2(void) { + WebPRescalerExportRowExpand = ExportRowExpand_MIPSdspR2; +// WebPRescalerExportRowShrink = ExportRowShrink_MIPSdspR2; +} + +#else // !WEBP_USE_MIPS_DSP_R2 + +WEBP_DSP_INIT_STUB(WebPRescalerDspInitMIPSdspR2) + +#endif // WEBP_USE_MIPS_DSP_R2 diff --git a/vendor/github.com/sizeofint/webpanimation/dsp_rescaler_msa.c b/vendor/github.com/sizeofint/webpanimation/dsp_rescaler_msa.c new file mode 100644 index 00000000..fb99e34f --- /dev/null +++ b/vendor/github.com/sizeofint/webpanimation/dsp_rescaler_msa.c @@ -0,0 +1,443 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the COPYING file in the root of the source +// tree. An additional intellectual property rights grant can be found +// in the file PATENTS. All contributing project authors may +// be found in the AUTHORS file in the root of the source tree. +// ----------------------------------------------------------------------------- +// +// MSA version of rescaling functions +// +// Author: Prashant Patil (prashant.patil@imgtec.com) + +#include "dsp_dsp.h" + +#if defined(WEBP_USE_MSA) && !defined(WEBP_REDUCE_SIZE) + +#include + +#include "utils_rescaler_utils.h" +#include "dsp_msa_macro.h" + +#define ROUNDER (WEBP_RESCALER_ONE >> 1) +#define MULT_FIX(x, y) (((uint64_t)(x) * (y) + ROUNDER) >> WEBP_RESCALER_RFIX) +#define MULT_FIX_FLOOR(x, y) (((uint64_t)(x) * (y)) >> WEBP_RESCALER_RFIX) + +#define CALC_MULT_FIX_16(in0, in1, in2, in3, scale, shift, dst) do { \ + v4u32 tmp0, tmp1, tmp2, tmp3; \ + v16u8 t0, t1, t2, t3, t4, t5; \ + v2u64 out0, out1, out2, out3; \ + ILVRL_W2_UW(zero, in0, tmp0, tmp1); \ + ILVRL_W2_UW(zero, in1, tmp2, tmp3); \ + DOTP_UW2_UD(tmp0, tmp1, scale, scale, out0, out1); \ + DOTP_UW2_UD(tmp2, tmp3, scale, scale, out2, out3); \ + SRAR_D4_UD(out0, out1, out2, out3, shift); \ + PCKEV_B2_UB(out1, out0, out3, out2, t0, t1); \ + ILVRL_W2_UW(zero, in2, tmp0, tmp1); \ + ILVRL_W2_UW(zero, in3, tmp2, tmp3); \ + DOTP_UW2_UD(tmp0, tmp1, scale, scale, out0, out1); \ + DOTP_UW2_UD(tmp2, tmp3, scale, scale, out2, out3); \ + SRAR_D4_UD(out0, out1, out2, out3, shift); \ + PCKEV_B2_UB(out1, out0, out3, out2, t2, t3); \ + PCKEV_B2_UB(t1, t0, t3, t2, t4, t5); \ + dst = (v16u8)__msa_pckev_b((v16i8)t5, (v16i8)t4); \ +} while (0) + +#define CALC_MULT_FIX_4(in0, scale, shift, dst) do { \ + v4u32 tmp0, tmp1; \ + v16i8 t0, t1; \ + v2u64 out0, out1; \ + ILVRL_W2_UW(zero, in0, tmp0, tmp1); \ + DOTP_UW2_UD(tmp0, tmp1, scale, scale, out0, out1); \ + SRAR_D2_UD(out0, out1, shift); \ + t0 = __msa_pckev_b((v16i8)out1, (v16i8)out0); \ + t1 = __msa_pckev_b(t0, t0); \ + t0 = __msa_pckev_b(t1, t1); \ + dst = __msa_copy_s_w((v4i32)t0, 0); \ +} while (0) + +#define CALC_MULT_FIX1_16(in0, in1, in2, in3, fyscale, shift, \ + dst0, dst1, dst2, dst3) do { \ + v4u32 tmp0, tmp1, tmp2, tmp3; \ + v2u64 out0, out1, out2, out3; \ + ILVRL_W2_UW(zero, in0, tmp0, tmp1); \ + ILVRL_W2_UW(zero, in1, tmp2, tmp3); \ + DOTP_UW2_UD(tmp0, tmp1, fyscale, fyscale, out0, out1); \ + DOTP_UW2_UD(tmp2, tmp3, fyscale, fyscale, out2, out3); \ + SRAR_D4_UD(out0, out1, out2, out3, shift); \ + PCKEV_W2_UW(out1, out0, out3, out2, dst0, dst1); \ + ILVRL_W2_UW(zero, in2, tmp0, tmp1); \ + ILVRL_W2_UW(zero, in3, tmp2, tmp3); \ + DOTP_UW2_UD(tmp0, tmp1, fyscale, fyscale, out0, out1); \ + DOTP_UW2_UD(tmp2, tmp3, fyscale, fyscale, out2, out3); \ + SRAR_D4_UD(out0, out1, out2, out3, shift); \ + PCKEV_W2_UW(out1, out0, out3, out2, dst2, dst3); \ +} while (0) + +#define CALC_MULT_FIX1_4(in0, scale, shift, dst) do { \ + v4u32 tmp0, tmp1; \ + v2u64 out0, out1; \ + ILVRL_W2_UW(zero, in0, tmp0, tmp1); \ + DOTP_UW2_UD(tmp0, tmp1, scale, scale, out0, out1); \ + SRAR_D2_UD(out0, out1, shift); \ + dst = (v4u32)__msa_pckev_w((v4i32)out1, (v4i32)out0); \ +} while (0) + +#define CALC_MULT_FIX2_16(in0, in1, in2, in3, mult, scale, shift, \ + dst0, dst1) do { \ + v4u32 tmp0, tmp1, tmp2, tmp3; \ + v2u64 out0, out1, out2, out3; \ + ILVRL_W2_UW(in0, in2, tmp0, tmp1); \ + ILVRL_W2_UW(in1, in3, tmp2, tmp3); \ + DOTP_UW2_UD(tmp0, tmp1, mult, mult, out0, out1); \ + DOTP_UW2_UD(tmp2, tmp3, mult, mult, out2, out3); \ + SRAR_D4_UD(out0, out1, out2, out3, shift); \ + DOTP_UW2_UD(out0, out1, scale, scale, out0, out1); \ + DOTP_UW2_UD(out2, out3, scale, scale, out2, out3); \ + SRAR_D4_UD(out0, out1, out2, out3, shift); \ + PCKEV_B2_UB(out1, out0, out3, out2, dst0, dst1); \ +} while (0) + +#define CALC_MULT_FIX2_4(in0, in1, mult, scale, shift, dst) do { \ + v4u32 tmp0, tmp1; \ + v2u64 out0, out1; \ + v16i8 t0, t1; \ + ILVRL_W2_UW(in0, in1, tmp0, tmp1); \ + DOTP_UW2_UD(tmp0, tmp1, mult, mult, out0, out1); \ + SRAR_D2_UD(out0, out1, shift); \ + DOTP_UW2_UD(out0, out1, scale, scale, out0, out1); \ + SRAR_D2_UD(out0, out1, shift); \ + t0 = __msa_pckev_b((v16i8)out1, (v16i8)out0); \ + t1 = __msa_pckev_b(t0, t0); \ + t0 = __msa_pckev_b(t1, t1); \ + dst = __msa_copy_s_w((v4i32)t0, 0); \ +} while (0) + +static WEBP_INLINE void ExportRowExpand_0(const uint32_t* frow, uint8_t* dst, + int length, + WebPRescaler* const wrk) { + const v4u32 scale = (v4u32)__msa_fill_w(wrk->fy_scale); + const v4u32 shift = (v4u32)__msa_fill_w(WEBP_RESCALER_RFIX); + const v4i32 zero = { 0 }; + + while (length >= 16) { + v4u32 src0, src1, src2, src3; + v16u8 out; + LD_UW4(frow, 4, src0, src1, src2, src3); + CALC_MULT_FIX_16(src0, src1, src2, src3, scale, shift, out); + ST_UB(out, dst); + length -= 16; + frow += 16; + dst += 16; + } + if (length > 0) { + int x_out; + if (length >= 12) { + uint32_t val0_m, val1_m, val2_m; + v4u32 src0, src1, src2; + LD_UW3(frow, 4, src0, src1, src2); + CALC_MULT_FIX_4(src0, scale, shift, val0_m); + CALC_MULT_FIX_4(src1, scale, shift, val1_m); + CALC_MULT_FIX_4(src2, scale, shift, val2_m); + SW3(val0_m, val1_m, val2_m, dst, 4); + length -= 12; + frow += 12; + dst += 12; + } else if (length >= 8) { + uint32_t val0_m, val1_m; + v4u32 src0, src1; + LD_UW2(frow, 4, src0, src1); + CALC_MULT_FIX_4(src0, scale, shift, val0_m); + CALC_MULT_FIX_4(src1, scale, shift, val1_m); + SW2(val0_m, val1_m, dst, 4); + length -= 8; + frow += 8; + dst += 8; + } else if (length >= 4) { + uint32_t val0_m; + const v4u32 src0 = LD_UW(frow); + CALC_MULT_FIX_4(src0, scale, shift, val0_m); + SW(val0_m, dst); + length -= 4; + frow += 4; + dst += 4; + } + for (x_out = 0; x_out < length; ++x_out) { + const uint32_t J = frow[x_out]; + const int v = (int)MULT_FIX(J, wrk->fy_scale); + dst[x_out] = (v > 255) ? 255u : (uint8_t)v; + } + } +} + +static WEBP_INLINE void ExportRowExpand_1(const uint32_t* frow, uint32_t* irow, + uint8_t* dst, int length, + WebPRescaler* const wrk) { + const uint32_t B = WEBP_RESCALER_FRAC(-wrk->y_accum, wrk->y_sub); + const uint32_t A = (uint32_t)(WEBP_RESCALER_ONE - B); + const v4i32 B1 = __msa_fill_w(B); + const v4i32 A1 = __msa_fill_w(A); + const v4i32 AB = __msa_ilvr_w(A1, B1); + const v4u32 scale = (v4u32)__msa_fill_w(wrk->fy_scale); + const v4u32 shift = (v4u32)__msa_fill_w(WEBP_RESCALER_RFIX); + + while (length >= 16) { + v4u32 frow0, frow1, frow2, frow3, irow0, irow1, irow2, irow3; + v16u8 t0, t1, t2, t3, t4, t5; + LD_UW4(frow, 4, frow0, frow1, frow2, frow3); + LD_UW4(irow, 4, irow0, irow1, irow2, irow3); + CALC_MULT_FIX2_16(frow0, frow1, irow0, irow1, AB, scale, shift, t0, t1); + CALC_MULT_FIX2_16(frow2, frow3, irow2, irow3, AB, scale, shift, t2, t3); + PCKEV_B2_UB(t1, t0, t3, t2, t4, t5); + t0 = (v16u8)__msa_pckev_b((v16i8)t5, (v16i8)t4); + ST_UB(t0, dst); + frow += 16; + irow += 16; + dst += 16; + length -= 16; + } + if (length > 0) { + int x_out; + if (length >= 12) { + uint32_t val0_m, val1_m, val2_m; + v4u32 frow0, frow1, frow2, irow0, irow1, irow2; + LD_UW3(frow, 4, frow0, frow1, frow2); + LD_UW3(irow, 4, irow0, irow1, irow2); + CALC_MULT_FIX2_4(frow0, irow0, AB, scale, shift, val0_m); + CALC_MULT_FIX2_4(frow1, irow1, AB, scale, shift, val1_m); + CALC_MULT_FIX2_4(frow2, irow2, AB, scale, shift, val2_m); + SW3(val0_m, val1_m, val2_m, dst, 4); + frow += 12; + irow += 12; + dst += 12; + length -= 12; + } else if (length >= 8) { + uint32_t val0_m, val1_m; + v4u32 frow0, frow1, irow0, irow1; + LD_UW2(frow, 4, frow0, frow1); + LD_UW2(irow, 4, irow0, irow1); + CALC_MULT_FIX2_4(frow0, irow0, AB, scale, shift, val0_m); + CALC_MULT_FIX2_4(frow1, irow1, AB, scale, shift, val1_m); + SW2(val0_m, val1_m, dst, 4); + frow += 4; + irow += 4; + dst += 4; + length -= 4; + } else if (length >= 4) { + uint32_t val0_m; + const v4u32 frow0 = LD_UW(frow + 0); + const v4u32 irow0 = LD_UW(irow + 0); + CALC_MULT_FIX2_4(frow0, irow0, AB, scale, shift, val0_m); + SW(val0_m, dst); + frow += 4; + irow += 4; + dst += 4; + length -= 4; + } + for (x_out = 0; x_out < length; ++x_out) { + const uint64_t I = (uint64_t)A * frow[x_out] + + (uint64_t)B * irow[x_out]; + const uint32_t J = (uint32_t)((I + ROUNDER) >> WEBP_RESCALER_RFIX); + const int v = (int)MULT_FIX(J, wrk->fy_scale); + dst[x_out] = (v > 255) ? 255u : (uint8_t)v; + } + } +} + +static void RescalerExportRowExpand_MIPSdspR2(WebPRescaler* const wrk) { + uint8_t* dst = wrk->dst; + rescaler_t* irow = wrk->irow; + const int x_out_max = wrk->dst_width * wrk->num_channels; + const rescaler_t* frow = wrk->frow; + assert(!WebPRescalerOutputDone(wrk)); + assert(wrk->y_accum <= 0); + assert(wrk->y_expand); + assert(wrk->y_sub != 0); + if (wrk->y_accum == 0) { + ExportRowExpand_0(frow, dst, x_out_max, wrk); + } else { + ExportRowExpand_1(frow, irow, dst, x_out_max, wrk); + } +} + +#if 0 // disabled for now. TODO(skal): make match the C-code +static WEBP_INLINE void ExportRowShrink_0(const uint32_t* frow, uint32_t* irow, + uint8_t* dst, int length, + const uint32_t yscale, + WebPRescaler* const wrk) { + const v4u32 y_scale = (v4u32)__msa_fill_w(yscale); + const v4u32 fxyscale = (v4u32)__msa_fill_w(wrk->fxy_scale); + const v4u32 shiftval = (v4u32)__msa_fill_w(WEBP_RESCALER_RFIX); + const v4i32 zero = { 0 }; + + while (length >= 16) { + v4u32 src0, src1, src2, src3, frac0, frac1, frac2, frac3; + v16u8 out; + LD_UW4(frow, 4, src0, src1, src2, src3); + CALC_MULT_FIX1_16(src0, src1, src2, src3, y_scale, shiftval, + frac0, frac1, frac2, frac3); + LD_UW4(irow, 4, src0, src1, src2, src3); + SUB4(src0, frac0, src1, frac1, src2, frac2, src3, frac3, + src0, src1, src2, src3); + CALC_MULT_FIX_16(src0, src1, src2, src3, fxyscale, shiftval, out); + ST_UB(out, dst); + ST_UW4(frac0, frac1, frac2, frac3, irow, 4); + frow += 16; + irow += 16; + dst += 16; + length -= 16; + } + if (length > 0) { + int x_out; + if (length >= 12) { + uint32_t val0_m, val1_m, val2_m; + v4u32 src0, src1, src2, frac0, frac1, frac2; + LD_UW3(frow, 4, src0, src1, src2); + CALC_MULT_FIX1_4(src0, y_scale, shiftval, frac0); + CALC_MULT_FIX1_4(src1, y_scale, shiftval, frac1); + CALC_MULT_FIX1_4(src2, y_scale, shiftval, frac2); + LD_UW3(irow, 4, src0, src1, src2); + SUB3(src0, frac0, src1, frac1, src2, frac2, src0, src1, src2); + CALC_MULT_FIX_4(src0, fxyscale, shiftval, val0_m); + CALC_MULT_FIX_4(src1, fxyscale, shiftval, val1_m); + CALC_MULT_FIX_4(src2, fxyscale, shiftval, val2_m); + SW3(val0_m, val1_m, val2_m, dst, 4); + ST_UW3(frac0, frac1, frac2, irow, 4); + frow += 12; + irow += 12; + dst += 12; + length -= 12; + } else if (length >= 8) { + uint32_t val0_m, val1_m; + v4u32 src0, src1, frac0, frac1; + LD_UW2(frow, 4, src0, src1); + CALC_MULT_FIX1_4(src0, y_scale, shiftval, frac0); + CALC_MULT_FIX1_4(src1, y_scale, shiftval, frac1); + LD_UW2(irow, 4, src0, src1); + SUB2(src0, frac0, src1, frac1, src0, src1); + CALC_MULT_FIX_4(src0, fxyscale, shiftval, val0_m); + CALC_MULT_FIX_4(src1, fxyscale, shiftval, val1_m); + SW2(val0_m, val1_m, dst, 4); + ST_UW2(frac0, frac1, irow, 4); + frow += 8; + irow += 8; + dst += 8; + length -= 8; + } else if (length >= 4) { + uint32_t val0_m; + v4u32 frac0; + v4u32 src0 = LD_UW(frow); + CALC_MULT_FIX1_4(src0, y_scale, shiftval, frac0); + src0 = LD_UW(irow); + src0 = src0 - frac0; + CALC_MULT_FIX_4(src0, fxyscale, shiftval, val0_m); + SW(val0_m, dst); + ST_UW(frac0, irow); + frow += 4; + irow += 4; + dst += 4; + length -= 4; + } + for (x_out = 0; x_out < length; ++x_out) { + const uint32_t frac = (uint32_t)MULT_FIX_FLOOR(frow[x_out], yscale); + const int v = (int)MULT_FIX(irow[x_out] - frac, wrk->fxy_scale); + dst[x_out] = (v > 255) ? 255u : (uint8_t)v; + irow[x_out] = frac; + } + } +} + +static WEBP_INLINE void ExportRowShrink_1(uint32_t* irow, uint8_t* dst, + int length, + WebPRescaler* const wrk) { + const v4u32 scale = (v4u32)__msa_fill_w(wrk->fxy_scale); + const v4u32 shift = (v4u32)__msa_fill_w(WEBP_RESCALER_RFIX); + const v4i32 zero = { 0 }; + + while (length >= 16) { + v4u32 src0, src1, src2, src3; + v16u8 dst0; + LD_UW4(irow, 4, src0, src1, src2, src3); + CALC_MULT_FIX_16(src0, src1, src2, src3, scale, shift, dst0); + ST_UB(dst0, dst); + ST_SW4(zero, zero, zero, zero, irow, 4); + length -= 16; + irow += 16; + dst += 16; + } + if (length > 0) { + int x_out; + if (length >= 12) { + uint32_t val0_m, val1_m, val2_m; + v4u32 src0, src1, src2; + LD_UW3(irow, 4, src0, src1, src2); + CALC_MULT_FIX_4(src0, scale, shift, val0_m); + CALC_MULT_FIX_4(src1, scale, shift, val1_m); + CALC_MULT_FIX_4(src2, scale, shift, val2_m); + SW3(val0_m, val1_m, val2_m, dst, 4); + ST_SW3(zero, zero, zero, irow, 4); + length -= 12; + irow += 12; + dst += 12; + } else if (length >= 8) { + uint32_t val0_m, val1_m; + v4u32 src0, src1; + LD_UW2(irow, 4, src0, src1); + CALC_MULT_FIX_4(src0, scale, shift, val0_m); + CALC_MULT_FIX_4(src1, scale, shift, val1_m); + SW2(val0_m, val1_m, dst, 4); + ST_SW2(zero, zero, irow, 4); + length -= 8; + irow += 8; + dst += 8; + } else if (length >= 4) { + uint32_t val0_m; + const v4u32 src0 = LD_UW(irow + 0); + CALC_MULT_FIX_4(src0, scale, shift, val0_m); + SW(val0_m, dst); + ST_SW(zero, irow); + length -= 4; + irow += 4; + dst += 4; + } + for (x_out = 0; x_out < length; ++x_out) { + const int v = (int)MULT_FIX(irow[x_out], wrk->fxy_scale); + dst[x_out] = (v > 255) ? 255u : (uint8_t)v; + irow[x_out] = 0; + } + } +} + +static void RescalerExportRowShrink_MIPSdspR2(WebPRescaler* const wrk) { + uint8_t* dst = wrk->dst; + rescaler_t* irow = wrk->irow; + const int x_out_max = wrk->dst_width * wrk->num_channels; + const rescaler_t* frow = wrk->frow; + const uint32_t yscale = wrk->fy_scale * (-wrk->y_accum); + assert(!WebPRescalerOutputDone(wrk)); + assert(wrk->y_accum <= 0); + assert(!wrk->y_expand); + if (yscale) { + ExportRowShrink_0(frow, irow, dst, x_out_max, yscale, wrk); + } else { + ExportRowShrink_1(irow, dst, x_out_max, wrk); + } +} +#endif // 0 + +//------------------------------------------------------------------------------ +// Entry point + +extern void WebPRescalerDspInitMSA(void); + +WEBP_TSAN_IGNORE_FUNCTION void WebPRescalerDspInitMSA(void) { + WebPRescalerExportRowExpand = RescalerExportRowExpand_MIPSdspR2; +// WebPRescalerExportRowShrink = RescalerExportRowShrink_MIPSdspR2; +} + +#else // !WEBP_USE_MSA + +WEBP_DSP_INIT_STUB(WebPRescalerDspInitMSA) + +#endif // WEBP_USE_MSA diff --git a/vendor/github.com/sizeofint/webpanimation/dsp_rescaler_neon.c b/vendor/github.com/sizeofint/webpanimation/dsp_rescaler_neon.c new file mode 100644 index 00000000..8b066e51 --- /dev/null +++ b/vendor/github.com/sizeofint/webpanimation/dsp_rescaler_neon.c @@ -0,0 +1,192 @@ +// Copyright 2015 Google Inc. All Rights Reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the COPYING file in the root of the source +// tree. An additional intellectual property rights grant can be found +// in the file PATENTS. All contributing project authors may +// be found in the AUTHORS file in the root of the source tree. +// ----------------------------------------------------------------------------- +// +// NEON version of rescaling functions +// +// Author: Skal (pascal.massimino@gmail.com) + +#include "dsp_dsp.h" + +#if defined(WEBP_USE_NEON) && !defined(WEBP_REDUCE_SIZE) + +#include +#include +#include "dsp_neon.h" +#include "utils_rescaler_utils.h" + +#define ROUNDER (WEBP_RESCALER_ONE >> 1) +#define MULT_FIX_C(x, y) (((uint64_t)(x) * (y) + ROUNDER) >> WEBP_RESCALER_RFIX) +#define MULT_FIX_FLOOR_C(x, y) (((uint64_t)(x) * (y)) >> WEBP_RESCALER_RFIX) + +#define LOAD_32x4(SRC, DST) const uint32x4_t DST = vld1q_u32((SRC)) +#define LOAD_32x8(SRC, DST0, DST1) \ + LOAD_32x4(SRC + 0, DST0); \ + LOAD_32x4(SRC + 4, DST1) + +#define STORE_32x8(SRC0, SRC1, DST) do { \ + vst1q_u32((DST) + 0, SRC0); \ + vst1q_u32((DST) + 4, SRC1); \ +} while (0); + +#if (WEBP_RESCALER_RFIX == 32) +#define MAKE_HALF_CST(C) vdupq_n_s32((int32_t)((C) >> 1)) +// note: B is actualy scale>>1. See MAKE_HALF_CST +#define MULT_FIX(A, B) \ + vreinterpretq_u32_s32(vqrdmulhq_s32(vreinterpretq_s32_u32((A)), (B))) +#define MULT_FIX_FLOOR(A, B) \ + vreinterpretq_u32_s32(vqdmulhq_s32(vreinterpretq_s32_u32((A)), (B))) +#else +#error "MULT_FIX/WEBP_RESCALER_RFIX need some more work" +#endif + +static uint32x4_t Interpolate_NEON(const rescaler_t* const frow, + const rescaler_t* const irow, + uint32_t A, uint32_t B) { + LOAD_32x4(frow, A0); + LOAD_32x4(irow, B0); + const uint64x2_t C0 = vmull_n_u32(vget_low_u32(A0), A); + const uint64x2_t C1 = vmull_n_u32(vget_high_u32(A0), A); + const uint64x2_t D0 = vmlal_n_u32(C0, vget_low_u32(B0), B); + const uint64x2_t D1 = vmlal_n_u32(C1, vget_high_u32(B0), B); + const uint32x4_t E = vcombine_u32( + vrshrn_n_u64(D0, WEBP_RESCALER_RFIX), + vrshrn_n_u64(D1, WEBP_RESCALER_RFIX)); + return E; +} + +static void RescalerExportRowExpand_NEON(WebPRescaler* const wrk) { + int x_out; + uint8_t* const dst = wrk->dst; + rescaler_t* const irow = wrk->irow; + const int x_out_max = wrk->dst_width * wrk->num_channels; + const int max_span = x_out_max & ~7; + const rescaler_t* const frow = wrk->frow; + const uint32_t fy_scale = wrk->fy_scale; + const int32x4_t fy_scale_half = MAKE_HALF_CST(fy_scale); + assert(!WebPRescalerOutputDone(wrk)); + assert(wrk->y_accum <= 0); + assert(wrk->y_expand); + assert(wrk->y_sub != 0); + if (wrk->y_accum == 0) { + for (x_out = 0; x_out < max_span; x_out += 8) { + LOAD_32x4(frow + x_out + 0, A0); + LOAD_32x4(frow + x_out + 4, A1); + const uint32x4_t B0 = MULT_FIX(A0, fy_scale_half); + const uint32x4_t B1 = MULT_FIX(A1, fy_scale_half); + const uint16x4_t C0 = vmovn_u32(B0); + const uint16x4_t C1 = vmovn_u32(B1); + const uint8x8_t D = vqmovn_u16(vcombine_u16(C0, C1)); + vst1_u8(dst + x_out, D); + } + for (; x_out < x_out_max; ++x_out) { + const uint32_t J = frow[x_out]; + const int v = (int)MULT_FIX_C(J, fy_scale); + dst[x_out] = (v > 255) ? 255u : (uint8_t)v; + } + } else { + const uint32_t B = WEBP_RESCALER_FRAC(-wrk->y_accum, wrk->y_sub); + const uint32_t A = (uint32_t)(WEBP_RESCALER_ONE - B); + for (x_out = 0; x_out < max_span; x_out += 8) { + const uint32x4_t C0 = + Interpolate_NEON(frow + x_out + 0, irow + x_out + 0, A, B); + const uint32x4_t C1 = + Interpolate_NEON(frow + x_out + 4, irow + x_out + 4, A, B); + const uint32x4_t D0 = MULT_FIX(C0, fy_scale_half); + const uint32x4_t D1 = MULT_FIX(C1, fy_scale_half); + const uint16x4_t E0 = vmovn_u32(D0); + const uint16x4_t E1 = vmovn_u32(D1); + const uint8x8_t F = vqmovn_u16(vcombine_u16(E0, E1)); + vst1_u8(dst + x_out, F); + } + for (; x_out < x_out_max; ++x_out) { + const uint64_t I = (uint64_t)A * frow[x_out] + + (uint64_t)B * irow[x_out]; + const uint32_t J = (uint32_t)((I + ROUNDER) >> WEBP_RESCALER_RFIX); + const int v = (int)MULT_FIX_C(J, fy_scale); + dst[x_out] = (v > 255) ? 255u : (uint8_t)v; + } + } +} + +static void RescalerExportRowShrink_NEON(WebPRescaler* const wrk) { + int x_out; + uint8_t* const dst = wrk->dst; + rescaler_t* const irow = wrk->irow; + const int x_out_max = wrk->dst_width * wrk->num_channels; + const int max_span = x_out_max & ~7; + const rescaler_t* const frow = wrk->frow; + const uint32_t yscale = wrk->fy_scale * (-wrk->y_accum); + const uint32_t fxy_scale = wrk->fxy_scale; + const uint32x4_t zero = vdupq_n_u32(0); + const int32x4_t yscale_half = MAKE_HALF_CST(yscale); + const int32x4_t fxy_scale_half = MAKE_HALF_CST(fxy_scale); + assert(!WebPRescalerOutputDone(wrk)); + assert(wrk->y_accum <= 0); + assert(!wrk->y_expand); + if (yscale) { + for (x_out = 0; x_out < max_span; x_out += 8) { + LOAD_32x8(frow + x_out, in0, in1); + LOAD_32x8(irow + x_out, in2, in3); + const uint32x4_t A0 = MULT_FIX_FLOOR(in0, yscale_half); + const uint32x4_t A1 = MULT_FIX_FLOOR(in1, yscale_half); + const uint32x4_t B0 = vqsubq_u32(in2, A0); + const uint32x4_t B1 = vqsubq_u32(in3, A1); + const uint32x4_t C0 = MULT_FIX(B0, fxy_scale_half); + const uint32x4_t C1 = MULT_FIX(B1, fxy_scale_half); + const uint16x4_t D0 = vmovn_u32(C0); + const uint16x4_t D1 = vmovn_u32(C1); + const uint8x8_t E = vqmovn_u16(vcombine_u16(D0, D1)); + vst1_u8(dst + x_out, E); + STORE_32x8(A0, A1, irow + x_out); + } + for (; x_out < x_out_max; ++x_out) { + const uint32_t frac = (uint32_t)MULT_FIX_FLOOR_C(frow[x_out], yscale); + const int v = (int)MULT_FIX_C(irow[x_out] - frac, fxy_scale); + dst[x_out] = (v > 255) ? 255u : (uint8_t)v; + irow[x_out] = frac; // new fractional start + } + } else { + for (x_out = 0; x_out < max_span; x_out += 8) { + LOAD_32x8(irow + x_out, in0, in1); + const uint32x4_t A0 = MULT_FIX(in0, fxy_scale_half); + const uint32x4_t A1 = MULT_FIX(in1, fxy_scale_half); + const uint16x4_t B0 = vmovn_u32(A0); + const uint16x4_t B1 = vmovn_u32(A1); + const uint8x8_t C = vqmovn_u16(vcombine_u16(B0, B1)); + vst1_u8(dst + x_out, C); + STORE_32x8(zero, zero, irow + x_out); + } + for (; x_out < x_out_max; ++x_out) { + const int v = (int)MULT_FIX_C(irow[x_out], fxy_scale); + dst[x_out] = (v > 255) ? 255u : (uint8_t)v; + irow[x_out] = 0; + } + } +} + +#undef MULT_FIX_FLOOR_C +#undef MULT_FIX_C +#undef MULT_FIX_FLOOR +#undef MULT_FIX +#undef ROUNDER + +//------------------------------------------------------------------------------ + +extern void WebPRescalerDspInitNEON(void); + +WEBP_TSAN_IGNORE_FUNCTION void WebPRescalerDspInitNEON(void) { + WebPRescalerExportRowExpand = RescalerExportRowExpand_NEON; + WebPRescalerExportRowShrink = RescalerExportRowShrink_NEON; +} + +#else // !WEBP_USE_NEON + +WEBP_DSP_INIT_STUB(WebPRescalerDspInitNEON) + +#endif // WEBP_USE_NEON diff --git a/vendor/github.com/sizeofint/webpanimation/dsp_rescaler_sse2.c b/vendor/github.com/sizeofint/webpanimation/dsp_rescaler_sse2.c new file mode 100644 index 00000000..09469d9b --- /dev/null +++ b/vendor/github.com/sizeofint/webpanimation/dsp_rescaler_sse2.c @@ -0,0 +1,366 @@ +// Copyright 2015 Google Inc. All Rights Reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the COPYING file in the root of the source +// tree. An additional intellectual property rights grant can be found +// in the file PATENTS. All contributing project authors may +// be found in the AUTHORS file in the root of the source tree. +// ----------------------------------------------------------------------------- +// +// SSE2 Rescaling functions +// +// Author: Skal (pascal.massimino@gmail.com) + +#include "dsp_dsp.h" + +#if defined(WEBP_USE_SSE2) && !defined(WEBP_REDUCE_SIZE) +#include + +#include +#include "utils_rescaler_utils.h" +#include "utils_utils.h" + +//------------------------------------------------------------------------------ +// Implementations of critical functions ImportRow / ExportRow + +#define ROUNDER (WEBP_RESCALER_ONE >> 1) +#define MULT_FIX(x, y) (((uint64_t)(x) * (y) + ROUNDER) >> WEBP_RESCALER_RFIX) +#define MULT_FIX_FLOOR(x, y) (((uint64_t)(x) * (y)) >> WEBP_RESCALER_RFIX) + +// input: 8 bytes ABCDEFGH -> output: A0E0B0F0C0G0D0H0 +static void LoadTwoPixels_SSE2(const uint8_t* const src, __m128i* out) { + const __m128i zero = _mm_setzero_si128(); + const __m128i A = _mm_loadl_epi64((const __m128i*)(src)); // ABCDEFGH + const __m128i B = _mm_unpacklo_epi8(A, zero); // A0B0C0D0E0F0G0H0 + const __m128i C = _mm_srli_si128(B, 8); // E0F0G0H0 + *out = _mm_unpacklo_epi16(B, C); +} + +// input: 8 bytes ABCDEFGH -> output: A0B0C0D0E0F0G0H0 +static void LoadEightPixels_SSE2(const uint8_t* const src, __m128i* out) { + const __m128i zero = _mm_setzero_si128(); + const __m128i A = _mm_loadl_epi64((const __m128i*)(src)); // ABCDEFGH + *out = _mm_unpacklo_epi8(A, zero); +} + +static void RescalerImportRowExpand_SSE2(WebPRescaler* const wrk, + const uint8_t* src) { + rescaler_t* frow = wrk->frow; + const rescaler_t* const frow_end = frow + wrk->dst_width * wrk->num_channels; + const int x_add = wrk->x_add; + int accum = x_add; + __m128i cur_pixels; + + // SSE2 implementation only works with 16b signed arithmetic at max. + if (wrk->src_width < 8 || accum >= (1 << 15)) { + WebPRescalerImportRowExpand_C(wrk, src); + return; + } + + assert(!WebPRescalerInputDone(wrk)); + assert(wrk->x_expand); + if (wrk->num_channels == 4) { + LoadTwoPixels_SSE2(src, &cur_pixels); + src += 4; + while (1) { + const __m128i mult = _mm_set1_epi32(((x_add - accum) << 16) | accum); + const __m128i out = _mm_madd_epi16(cur_pixels, mult); + _mm_storeu_si128((__m128i*)frow, out); + frow += 4; + if (frow >= frow_end) break; + accum -= wrk->x_sub; + if (accum < 0) { + LoadTwoPixels_SSE2(src, &cur_pixels); + src += 4; + accum += x_add; + } + } + } else { + int left; + const uint8_t* const src_limit = src + wrk->src_width - 8; + LoadEightPixels_SSE2(src, &cur_pixels); + src += 7; + left = 7; + while (1) { + const __m128i mult = _mm_cvtsi32_si128(((x_add - accum) << 16) | accum); + const __m128i out = _mm_madd_epi16(cur_pixels, mult); + assert(sizeof(*frow) == sizeof(uint32_t)); + WebPUint32ToMem((uint8_t*)frow, _mm_cvtsi128_si32(out)); + frow += 1; + if (frow >= frow_end) break; + accum -= wrk->x_sub; + if (accum < 0) { + if (--left) { + cur_pixels = _mm_srli_si128(cur_pixels, 2); + } else if (src <= src_limit) { + LoadEightPixels_SSE2(src, &cur_pixels); + src += 7; + left = 7; + } else { // tail + cur_pixels = _mm_srli_si128(cur_pixels, 2); + cur_pixels = _mm_insert_epi16(cur_pixels, src[1], 1); + src += 1; + left = 1; + } + accum += x_add; + } + } + } + assert(accum == 0); +} + +static void RescalerImportRowShrink_SSE2(WebPRescaler* const wrk, + const uint8_t* src) { + const int x_sub = wrk->x_sub; + int accum = 0; + const __m128i zero = _mm_setzero_si128(); + const __m128i mult0 = _mm_set1_epi16(x_sub); + const __m128i mult1 = _mm_set1_epi32(wrk->fx_scale); + const __m128i rounder = _mm_set_epi32(0, ROUNDER, 0, ROUNDER); + __m128i sum = zero; + rescaler_t* frow = wrk->frow; + const rescaler_t* const frow_end = wrk->frow + 4 * wrk->dst_width; + + if (wrk->num_channels != 4 || wrk->x_add > (x_sub << 7)) { + WebPRescalerImportRowShrink_C(wrk, src); + return; + } + assert(!WebPRescalerInputDone(wrk)); + assert(!wrk->x_expand); + + for (; frow < frow_end; frow += 4) { + __m128i base = zero; + accum += wrk->x_add; + while (accum > 0) { + const __m128i A = _mm_cvtsi32_si128(WebPMemToUint32(src)); + src += 4; + base = _mm_unpacklo_epi8(A, zero); + // To avoid overflow, we need: base * x_add / x_sub < 32768 + // => x_add < x_sub << 7. That's a 1/128 reduction ratio limit. + sum = _mm_add_epi16(sum, base); + accum -= x_sub; + } + { // Emit next horizontal pixel. + const __m128i mult = _mm_set1_epi16(-accum); + const __m128i frac0 = _mm_mullo_epi16(base, mult); // 16b x 16b -> 32b + const __m128i frac1 = _mm_mulhi_epu16(base, mult); + const __m128i frac = _mm_unpacklo_epi16(frac0, frac1); // frac is 32b + const __m128i A0 = _mm_mullo_epi16(sum, mult0); + const __m128i A1 = _mm_mulhi_epu16(sum, mult0); + const __m128i B0 = _mm_unpacklo_epi16(A0, A1); // sum * x_sub + const __m128i frow_out = _mm_sub_epi32(B0, frac); // sum * x_sub - frac + const __m128i D0 = _mm_srli_epi64(frac, 32); + const __m128i D1 = _mm_mul_epu32(frac, mult1); // 32b x 16b -> 64b + const __m128i D2 = _mm_mul_epu32(D0, mult1); + const __m128i E1 = _mm_add_epi64(D1, rounder); + const __m128i E2 = _mm_add_epi64(D2, rounder); + const __m128i F1 = _mm_shuffle_epi32(E1, 1 | (3 << 2)); + const __m128i F2 = _mm_shuffle_epi32(E2, 1 | (3 << 2)); + const __m128i G = _mm_unpacklo_epi32(F1, F2); + sum = _mm_packs_epi32(G, zero); + _mm_storeu_si128((__m128i*)frow, frow_out); + } + } + assert(accum == 0); +} + +//------------------------------------------------------------------------------ +// Row export + +// load *src as epi64, multiply by mult and store result in [out0 ... out3] +static WEBP_INLINE void LoadDispatchAndMult_SSE2(const rescaler_t* const src, + const __m128i* const mult, + __m128i* const out0, + __m128i* const out1, + __m128i* const out2, + __m128i* const out3) { + const __m128i A0 = _mm_loadu_si128((const __m128i*)(src + 0)); + const __m128i A1 = _mm_loadu_si128((const __m128i*)(src + 4)); + const __m128i A2 = _mm_srli_epi64(A0, 32); + const __m128i A3 = _mm_srli_epi64(A1, 32); + if (mult != NULL) { + *out0 = _mm_mul_epu32(A0, *mult); + *out1 = _mm_mul_epu32(A1, *mult); + *out2 = _mm_mul_epu32(A2, *mult); + *out3 = _mm_mul_epu32(A3, *mult); + } else { + *out0 = A0; + *out1 = A1; + *out2 = A2; + *out3 = A3; + } +} + +static WEBP_INLINE void ProcessRow_SSE2(const __m128i* const A0, + const __m128i* const A1, + const __m128i* const A2, + const __m128i* const A3, + const __m128i* const mult, + uint8_t* const dst) { + const __m128i rounder = _mm_set_epi32(0, ROUNDER, 0, ROUNDER); + const __m128i mask = _mm_set_epi32(0xffffffffu, 0, 0xffffffffu, 0); + const __m128i B0 = _mm_mul_epu32(*A0, *mult); + const __m128i B1 = _mm_mul_epu32(*A1, *mult); + const __m128i B2 = _mm_mul_epu32(*A2, *mult); + const __m128i B3 = _mm_mul_epu32(*A3, *mult); + const __m128i C0 = _mm_add_epi64(B0, rounder); + const __m128i C1 = _mm_add_epi64(B1, rounder); + const __m128i C2 = _mm_add_epi64(B2, rounder); + const __m128i C3 = _mm_add_epi64(B3, rounder); + const __m128i D0 = _mm_srli_epi64(C0, WEBP_RESCALER_RFIX); + const __m128i D1 = _mm_srli_epi64(C1, WEBP_RESCALER_RFIX); +#if (WEBP_RESCALER_RFIX < 32) + const __m128i D2 = + _mm_and_si128(_mm_slli_epi64(C2, 32 - WEBP_RESCALER_RFIX), mask); + const __m128i D3 = + _mm_and_si128(_mm_slli_epi64(C3, 32 - WEBP_RESCALER_RFIX), mask); +#else + const __m128i D2 = _mm_and_si128(C2, mask); + const __m128i D3 = _mm_and_si128(C3, mask); +#endif + const __m128i E0 = _mm_or_si128(D0, D2); + const __m128i E1 = _mm_or_si128(D1, D3); + const __m128i F = _mm_packs_epi32(E0, E1); + const __m128i G = _mm_packus_epi16(F, F); + _mm_storel_epi64((__m128i*)dst, G); +} + +static void RescalerExportRowExpand_SSE2(WebPRescaler* const wrk) { + int x_out; + uint8_t* const dst = wrk->dst; + rescaler_t* const irow = wrk->irow; + const int x_out_max = wrk->dst_width * wrk->num_channels; + const rescaler_t* const frow = wrk->frow; + const __m128i mult = _mm_set_epi32(0, wrk->fy_scale, 0, wrk->fy_scale); + + assert(!WebPRescalerOutputDone(wrk)); + assert(wrk->y_accum <= 0 && wrk->y_sub + wrk->y_accum >= 0); + assert(wrk->y_expand); + if (wrk->y_accum == 0) { + for (x_out = 0; x_out + 8 <= x_out_max; x_out += 8) { + __m128i A0, A1, A2, A3; + LoadDispatchAndMult_SSE2(frow + x_out, NULL, &A0, &A1, &A2, &A3); + ProcessRow_SSE2(&A0, &A1, &A2, &A3, &mult, dst + x_out); + } + for (; x_out < x_out_max; ++x_out) { + const uint32_t J = frow[x_out]; + const int v = (int)MULT_FIX(J, wrk->fy_scale); + dst[x_out] = (v > 255) ? 255u : (uint8_t)v; + } + } else { + const uint32_t B = WEBP_RESCALER_FRAC(-wrk->y_accum, wrk->y_sub); + const uint32_t A = (uint32_t)(WEBP_RESCALER_ONE - B); + const __m128i mA = _mm_set_epi32(0, A, 0, A); + const __m128i mB = _mm_set_epi32(0, B, 0, B); + const __m128i rounder = _mm_set_epi32(0, ROUNDER, 0, ROUNDER); + for (x_out = 0; x_out + 8 <= x_out_max; x_out += 8) { + __m128i A0, A1, A2, A3, B0, B1, B2, B3; + LoadDispatchAndMult_SSE2(frow + x_out, &mA, &A0, &A1, &A2, &A3); + LoadDispatchAndMult_SSE2(irow + x_out, &mB, &B0, &B1, &B2, &B3); + { + const __m128i C0 = _mm_add_epi64(A0, B0); + const __m128i C1 = _mm_add_epi64(A1, B1); + const __m128i C2 = _mm_add_epi64(A2, B2); + const __m128i C3 = _mm_add_epi64(A3, B3); + const __m128i D0 = _mm_add_epi64(C0, rounder); + const __m128i D1 = _mm_add_epi64(C1, rounder); + const __m128i D2 = _mm_add_epi64(C2, rounder); + const __m128i D3 = _mm_add_epi64(C3, rounder); + const __m128i E0 = _mm_srli_epi64(D0, WEBP_RESCALER_RFIX); + const __m128i E1 = _mm_srli_epi64(D1, WEBP_RESCALER_RFIX); + const __m128i E2 = _mm_srli_epi64(D2, WEBP_RESCALER_RFIX); + const __m128i E3 = _mm_srli_epi64(D3, WEBP_RESCALER_RFIX); + ProcessRow_SSE2(&E0, &E1, &E2, &E3, &mult, dst + x_out); + } + } + for (; x_out < x_out_max; ++x_out) { + const uint64_t I = (uint64_t)A * frow[x_out] + + (uint64_t)B * irow[x_out]; + const uint32_t J = (uint32_t)((I + ROUNDER) >> WEBP_RESCALER_RFIX); + const int v = (int)MULT_FIX(J, wrk->fy_scale); + dst[x_out] = (v > 255) ? 255u : (uint8_t)v; + } + } +} + +static void RescalerExportRowShrink_SSE2(WebPRescaler* const wrk) { + int x_out; + uint8_t* const dst = wrk->dst; + rescaler_t* const irow = wrk->irow; + const int x_out_max = wrk->dst_width * wrk->num_channels; + const rescaler_t* const frow = wrk->frow; + const uint32_t yscale = wrk->fy_scale * (-wrk->y_accum); + assert(!WebPRescalerOutputDone(wrk)); + assert(wrk->y_accum <= 0); + assert(!wrk->y_expand); + if (yscale) { + const int scale_xy = wrk->fxy_scale; + const __m128i mult_xy = _mm_set_epi32(0, scale_xy, 0, scale_xy); + const __m128i mult_y = _mm_set_epi32(0, yscale, 0, yscale); + for (x_out = 0; x_out + 8 <= x_out_max; x_out += 8) { + __m128i A0, A1, A2, A3, B0, B1, B2, B3; + LoadDispatchAndMult_SSE2(irow + x_out, NULL, &A0, &A1, &A2, &A3); + LoadDispatchAndMult_SSE2(frow + x_out, &mult_y, &B0, &B1, &B2, &B3); + { + const __m128i D0 = _mm_srli_epi64(B0, WEBP_RESCALER_RFIX); // = frac + const __m128i D1 = _mm_srli_epi64(B1, WEBP_RESCALER_RFIX); + const __m128i D2 = _mm_srli_epi64(B2, WEBP_RESCALER_RFIX); + const __m128i D3 = _mm_srli_epi64(B3, WEBP_RESCALER_RFIX); + const __m128i E0 = _mm_sub_epi64(A0, D0); // irow[x] - frac + const __m128i E1 = _mm_sub_epi64(A1, D1); + const __m128i E2 = _mm_sub_epi64(A2, D2); + const __m128i E3 = _mm_sub_epi64(A3, D3); + const __m128i F2 = _mm_slli_epi64(D2, 32); + const __m128i F3 = _mm_slli_epi64(D3, 32); + const __m128i G0 = _mm_or_si128(D0, F2); + const __m128i G1 = _mm_or_si128(D1, F3); + _mm_storeu_si128((__m128i*)(irow + x_out + 0), G0); + _mm_storeu_si128((__m128i*)(irow + x_out + 4), G1); + ProcessRow_SSE2(&E0, &E1, &E2, &E3, &mult_xy, dst + x_out); + } + } + for (; x_out < x_out_max; ++x_out) { + const uint32_t frac = (int)MULT_FIX_FLOOR(frow[x_out], yscale); + const int v = (int)MULT_FIX(irow[x_out] - frac, wrk->fxy_scale); + dst[x_out] = (v > 255) ? 255u : (uint8_t)v; + irow[x_out] = frac; // new fractional start + } + } else { + const uint32_t scale = wrk->fxy_scale; + const __m128i mult = _mm_set_epi32(0, scale, 0, scale); + const __m128i zero = _mm_setzero_si128(); + for (x_out = 0; x_out + 8 <= x_out_max; x_out += 8) { + __m128i A0, A1, A2, A3; + LoadDispatchAndMult_SSE2(irow + x_out, NULL, &A0, &A1, &A2, &A3); + _mm_storeu_si128((__m128i*)(irow + x_out + 0), zero); + _mm_storeu_si128((__m128i*)(irow + x_out + 4), zero); + ProcessRow_SSE2(&A0, &A1, &A2, &A3, &mult, dst + x_out); + } + for (; x_out < x_out_max; ++x_out) { + const int v = (int)MULT_FIX(irow[x_out], scale); + dst[x_out] = (v > 255) ? 255u : (uint8_t)v; + irow[x_out] = 0; + } + } +} + +#undef MULT_FIX_FLOOR +#undef MULT_FIX +#undef ROUNDER + +//------------------------------------------------------------------------------ + +extern void WebPRescalerDspInitSSE2(void); + +WEBP_TSAN_IGNORE_FUNCTION void WebPRescalerDspInitSSE2(void) { + WebPRescalerImportRowExpand = RescalerImportRowExpand_SSE2; + WebPRescalerImportRowShrink = RescalerImportRowShrink_SSE2; + WebPRescalerExportRowExpand = RescalerExportRowExpand_SSE2; + WebPRescalerExportRowShrink = RescalerExportRowShrink_SSE2; +} + +#else // !WEBP_USE_SSE2 + +WEBP_DSP_INIT_STUB(WebPRescalerDspInitSSE2) + +#endif // WEBP_USE_SSE2 diff --git a/vendor/github.com/sizeofint/webpanimation/dsp_ssim.c b/vendor/github.com/sizeofint/webpanimation/dsp_ssim.c new file mode 100644 index 00000000..8a3f4646 --- /dev/null +++ b/vendor/github.com/sizeofint/webpanimation/dsp_ssim.c @@ -0,0 +1,159 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the COPYING file in the root of the source +// tree. An additional intellectual property rights grant can be found +// in the file PATENTS. All contributing project authors may +// be found in the AUTHORS file in the root of the source tree. +// ----------------------------------------------------------------------------- +// +// distortion calculation +// +// Author: Skal (pascal.massimino@gmail.com) + +#include +#include // for abs() + +#include "dsp_dsp.h" + +#if !defined(WEBP_REDUCE_SIZE) + +//------------------------------------------------------------------------------ +// SSIM / PSNR + +// hat-shaped filter. Sum of coefficients is equal to 16. +static const uint32_t kWeight[2 * VP8_SSIM_KERNEL + 1] = { + 1, 2, 3, 4, 3, 2, 1 +}; +static const uint32_t kWeightSum = 16 * 16; // sum{kWeight}^2 + +static WEBP_INLINE double SSIMCalculation( + const VP8DistoStats* const stats, uint32_t N /*num samples*/) { + const uint32_t w2 = N * N; + const uint32_t C1 = 20 * w2; + const uint32_t C2 = 60 * w2; + const uint32_t C3 = 8 * 8 * w2; // 'dark' limit ~= 6 + const uint64_t xmxm = (uint64_t)stats->xm * stats->xm; + const uint64_t ymym = (uint64_t)stats->ym * stats->ym; + if (xmxm + ymym >= C3) { + const int64_t xmym = (int64_t)stats->xm * stats->ym; + const int64_t sxy = (int64_t)stats->xym * N - xmym; // can be negative + const uint64_t sxx = (uint64_t)stats->xxm * N - xmxm; + const uint64_t syy = (uint64_t)stats->yym * N - ymym; + // we descale by 8 to prevent overflow during the fnum/fden multiply. + const uint64_t num_S = (2 * (uint64_t)(sxy < 0 ? 0 : sxy) + C2) >> 8; + const uint64_t den_S = (sxx + syy + C2) >> 8; + const uint64_t fnum = (2 * xmym + C1) * num_S; + const uint64_t fden = (xmxm + ymym + C1) * den_S; + const double r = (double)fnum / fden; + assert(r >= 0. && r <= 1.0); + return r; + } + return 1.; // area is too dark to contribute meaningfully +} + +double VP8SSIMFromStats(const VP8DistoStats* const stats) { + return SSIMCalculation(stats, kWeightSum); +} + +double VP8SSIMFromStatsClipped(const VP8DistoStats* const stats) { + return SSIMCalculation(stats, stats->w); +} + +static double SSIMGetClipped_C(const uint8_t* src1, int stride1, + const uint8_t* src2, int stride2, + int xo, int yo, int W, int H) { + VP8DistoStats stats = { 0, 0, 0, 0, 0, 0 }; + const int ymin = (yo - VP8_SSIM_KERNEL < 0) ? 0 : yo - VP8_SSIM_KERNEL; + const int ymax = (yo + VP8_SSIM_KERNEL > H - 1) ? H - 1 + : yo + VP8_SSIM_KERNEL; + const int xmin = (xo - VP8_SSIM_KERNEL < 0) ? 0 : xo - VP8_SSIM_KERNEL; + const int xmax = (xo + VP8_SSIM_KERNEL > W - 1) ? W - 1 + : xo + VP8_SSIM_KERNEL; + int x, y; + src1 += ymin * stride1; + src2 += ymin * stride2; + for (y = ymin; y <= ymax; ++y, src1 += stride1, src2 += stride2) { + for (x = xmin; x <= xmax; ++x) { + const uint32_t w = kWeight[VP8_SSIM_KERNEL + x - xo] + * kWeight[VP8_SSIM_KERNEL + y - yo]; + const uint32_t s1 = src1[x]; + const uint32_t s2 = src2[x]; + stats.w += w; + stats.xm += w * s1; + stats.ym += w * s2; + stats.xxm += w * s1 * s1; + stats.xym += w * s1 * s2; + stats.yym += w * s2 * s2; + } + } + return VP8SSIMFromStatsClipped(&stats); +} + +static double SSIMGet_C(const uint8_t* src1, int stride1, + const uint8_t* src2, int stride2) { + VP8DistoStats stats = { 0, 0, 0, 0, 0, 0 }; + int x, y; + for (y = 0; y <= 2 * VP8_SSIM_KERNEL; ++y, src1 += stride1, src2 += stride2) { + for (x = 0; x <= 2 * VP8_SSIM_KERNEL; ++x) { + const uint32_t w = kWeight[x] * kWeight[y]; + const uint32_t s1 = src1[x]; + const uint32_t s2 = src2[x]; + stats.xm += w * s1; + stats.ym += w * s2; + stats.xxm += w * s1 * s1; + stats.xym += w * s1 * s2; + stats.yym += w * s2 * s2; + } + } + return VP8SSIMFromStats(&stats); +} + +#endif // !defined(WEBP_REDUCE_SIZE) + +//------------------------------------------------------------------------------ + +#if !defined(WEBP_DISABLE_STATS) +static uint32_t AccumulateSSE_C(const uint8_t* src1, + const uint8_t* src2, int len) { + int i; + uint32_t sse2 = 0; + assert(len <= 65535); // to ensure that accumulation fits within uint32_t + for (i = 0; i < len; ++i) { + const int32_t diff = src1[i] - src2[i]; + sse2 += diff * diff; + } + return sse2; +} +#endif + +//------------------------------------------------------------------------------ + +#if !defined(WEBP_REDUCE_SIZE) +VP8SSIMGetFunc VP8SSIMGet; +VP8SSIMGetClippedFunc VP8SSIMGetClipped; +#endif +#if !defined(WEBP_DISABLE_STATS) +VP8AccumulateSSEFunc VP8AccumulateSSE; +#endif + +extern void VP8SSIMDspInitSSE2(void); + +WEBP_DSP_INIT_FUNC(VP8SSIMDspInit) { +#if !defined(WEBP_REDUCE_SIZE) + VP8SSIMGetClipped = SSIMGetClipped_C; + VP8SSIMGet = SSIMGet_C; +#endif + +#if !defined(WEBP_DISABLE_STATS) + VP8AccumulateSSE = AccumulateSSE_C; +#endif + + if (VP8GetCPUInfo != NULL) { +#if defined(WEBP_USE_SSE2) + if (VP8GetCPUInfo(kSSE2)) { + VP8SSIMDspInitSSE2(); + } +#endif + } +} diff --git a/vendor/github.com/sizeofint/webpanimation/dsp_ssim_sse2.c b/vendor/github.com/sizeofint/webpanimation/dsp_ssim_sse2.c new file mode 100644 index 00000000..82d5d7ab --- /dev/null +++ b/vendor/github.com/sizeofint/webpanimation/dsp_ssim_sse2.c @@ -0,0 +1,165 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the COPYING file in the root of the source +// tree. An additional intellectual property rights grant can be found +// in the file PATENTS. All contributing project authors may +// be found in the AUTHORS file in the root of the source tree. +// ----------------------------------------------------------------------------- +// +// SSE2 version of distortion calculation +// +// Author: Skal (pascal.massimino@gmail.com) + +#include "dsp_dsp.h" + +#if defined(WEBP_USE_SSE2) + +#include +#include + +#include "dsp_common_sse2.h" + +#if !defined(WEBP_DISABLE_STATS) + +// Helper function +static WEBP_INLINE void SubtractAndSquare_SSE2(const __m128i a, const __m128i b, + __m128i* const sum) { + // take abs(a-b) in 8b + const __m128i a_b = _mm_subs_epu8(a, b); + const __m128i b_a = _mm_subs_epu8(b, a); + const __m128i abs_a_b = _mm_or_si128(a_b, b_a); + // zero-extend to 16b + const __m128i zero = _mm_setzero_si128(); + const __m128i C0 = _mm_unpacklo_epi8(abs_a_b, zero); + const __m128i C1 = _mm_unpackhi_epi8(abs_a_b, zero); + // multiply with self + const __m128i sum1 = _mm_madd_epi16(C0, C0); + const __m128i sum2 = _mm_madd_epi16(C1, C1); + *sum = _mm_add_epi32(sum1, sum2); +} + +//------------------------------------------------------------------------------ +// SSIM / PSNR entry point + +static uint32_t AccumulateSSE_SSE2(const uint8_t* src1, + const uint8_t* src2, int len) { + int i = 0; + uint32_t sse2 = 0; + if (len >= 16) { + const int limit = len - 32; + int32_t tmp[4]; + __m128i sum1; + __m128i sum = _mm_setzero_si128(); + __m128i a0 = _mm_loadu_si128((const __m128i*)&src1[i]); + __m128i b0 = _mm_loadu_si128((const __m128i*)&src2[i]); + i += 16; + while (i <= limit) { + const __m128i a1 = _mm_loadu_si128((const __m128i*)&src1[i]); + const __m128i b1 = _mm_loadu_si128((const __m128i*)&src2[i]); + __m128i sum2; + i += 16; + SubtractAndSquare_SSE2(a0, b0, &sum1); + sum = _mm_add_epi32(sum, sum1); + a0 = _mm_loadu_si128((const __m128i*)&src1[i]); + b0 = _mm_loadu_si128((const __m128i*)&src2[i]); + i += 16; + SubtractAndSquare_SSE2(a1, b1, &sum2); + sum = _mm_add_epi32(sum, sum2); + } + SubtractAndSquare_SSE2(a0, b0, &sum1); + sum = _mm_add_epi32(sum, sum1); + _mm_storeu_si128((__m128i*)tmp, sum); + sse2 += (tmp[3] + tmp[2] + tmp[1] + tmp[0]); + } + + for (; i < len; ++i) { + const int32_t diff = src1[i] - src2[i]; + sse2 += diff * diff; + } + return sse2; +} +#endif // !defined(WEBP_DISABLE_STATS) + +#if !defined(WEBP_REDUCE_SIZE) + +static uint32_t HorizontalAdd16b_SSE2(const __m128i* const m) { + uint16_t tmp[8]; + const __m128i a = _mm_srli_si128(*m, 8); + const __m128i b = _mm_add_epi16(*m, a); + _mm_storeu_si128((__m128i*)tmp, b); + return (uint32_t)tmp[3] + tmp[2] + tmp[1] + tmp[0]; +} + +static uint32_t HorizontalAdd32b_SSE2(const __m128i* const m) { + const __m128i a = _mm_srli_si128(*m, 8); + const __m128i b = _mm_add_epi32(*m, a); + const __m128i c = _mm_add_epi32(b, _mm_srli_si128(b, 4)); + return (uint32_t)_mm_cvtsi128_si32(c); +} + +static const uint16_t kWeight[] = { 1, 2, 3, 4, 3, 2, 1, 0 }; + +#define ACCUMULATE_ROW(WEIGHT) do { \ + /* compute row weight (Wx * Wy) */ \ + const __m128i Wy = _mm_set1_epi16((WEIGHT)); \ + const __m128i W = _mm_mullo_epi16(Wx, Wy); \ + /* process 8 bytes at a time (7 bytes, actually) */ \ + const __m128i a0 = _mm_loadl_epi64((const __m128i*)src1); \ + const __m128i b0 = _mm_loadl_epi64((const __m128i*)src2); \ + /* convert to 16b and multiply by weight */ \ + const __m128i a1 = _mm_unpacklo_epi8(a0, zero); \ + const __m128i b1 = _mm_unpacklo_epi8(b0, zero); \ + const __m128i wa1 = _mm_mullo_epi16(a1, W); \ + const __m128i wb1 = _mm_mullo_epi16(b1, W); \ + /* accumulate */ \ + xm = _mm_add_epi16(xm, wa1); \ + ym = _mm_add_epi16(ym, wb1); \ + xxm = _mm_add_epi32(xxm, _mm_madd_epi16(a1, wa1)); \ + xym = _mm_add_epi32(xym, _mm_madd_epi16(a1, wb1)); \ + yym = _mm_add_epi32(yym, _mm_madd_epi16(b1, wb1)); \ + src1 += stride1; \ + src2 += stride2; \ +} while (0) + +static double SSIMGet_SSE2(const uint8_t* src1, int stride1, + const uint8_t* src2, int stride2) { + VP8DistoStats stats; + const __m128i zero = _mm_setzero_si128(); + __m128i xm = zero, ym = zero; // 16b accums + __m128i xxm = zero, yym = zero, xym = zero; // 32b accum + const __m128i Wx = _mm_loadu_si128((const __m128i*)kWeight); + assert(2 * VP8_SSIM_KERNEL + 1 == 7); + ACCUMULATE_ROW(1); + ACCUMULATE_ROW(2); + ACCUMULATE_ROW(3); + ACCUMULATE_ROW(4); + ACCUMULATE_ROW(3); + ACCUMULATE_ROW(2); + ACCUMULATE_ROW(1); + stats.xm = HorizontalAdd16b_SSE2(&xm); + stats.ym = HorizontalAdd16b_SSE2(&ym); + stats.xxm = HorizontalAdd32b_SSE2(&xxm); + stats.xym = HorizontalAdd32b_SSE2(&xym); + stats.yym = HorizontalAdd32b_SSE2(&yym); + return VP8SSIMFromStats(&stats); +} + +#endif // !defined(WEBP_REDUCE_SIZE) + +extern void VP8SSIMDspInitSSE2(void); + +WEBP_TSAN_IGNORE_FUNCTION void VP8SSIMDspInitSSE2(void) { +#if !defined(WEBP_DISABLE_STATS) + VP8AccumulateSSE = AccumulateSSE_SSE2; +#endif +#if !defined(WEBP_REDUCE_SIZE) + VP8SSIMGet = SSIMGet_SSE2; +#endif +} + +#else // !WEBP_USE_SSE2 + +WEBP_DSP_INIT_STUB(VP8SSIMDspInitSSE2) + +#endif // WEBP_USE_SSE2 diff --git a/vendor/github.com/sizeofint/webpanimation/dsp_upsampling.c b/vendor/github.com/sizeofint/webpanimation/dsp_upsampling.c new file mode 100644 index 00000000..2d9bd719 --- /dev/null +++ b/vendor/github.com/sizeofint/webpanimation/dsp_upsampling.c @@ -0,0 +1,327 @@ +// Copyright 2011 Google Inc. All Rights Reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the COPYING file in the root of the source +// tree. An additional intellectual property rights grant can be found +// in the file PATENTS. All contributing project authors may +// be found in the AUTHORS file in the root of the source tree. +// ----------------------------------------------------------------------------- +// +// YUV to RGB upsampling functions. +// +// Author: somnath@google.com (Somnath Banerjee) + +#include "dsp_dsp.h" +#include "dsp_yuv.h" + +#include + +//------------------------------------------------------------------------------ +// Fancy upsampler + +#ifdef FANCY_UPSAMPLING + +// Fancy upsampling functions to convert YUV to RGB +WebPUpsampleLinePairFunc WebPUpsamplers[MODE_LAST]; + +// Given samples laid out in a square as: +// [a b] +// [c d] +// we interpolate u/v as: +// ([9*a + 3*b + 3*c + d 3*a + 9*b + 3*c + d] + [8 8]) / 16 +// ([3*a + b + 9*c + 3*d a + 3*b + 3*c + 9*d] [8 8]) / 16 + +// We process u and v together stashed into 32bit (16bit each). +#define LOAD_UV(u, v) ((u) | ((v) << 16)) + +#define UPSAMPLE_FUNC(FUNC_NAME, FUNC, XSTEP) \ +static void FUNC_NAME(const uint8_t* top_y, const uint8_t* bottom_y, \ + const uint8_t* top_u, const uint8_t* top_v, \ + const uint8_t* cur_u, const uint8_t* cur_v, \ + uint8_t* top_dst, uint8_t* bottom_dst, int len) { \ + int x; \ + const int last_pixel_pair = (len - 1) >> 1; \ + uint32_t tl_uv = LOAD_UV(top_u[0], top_v[0]); /* top-left sample */ \ + uint32_t l_uv = LOAD_UV(cur_u[0], cur_v[0]); /* left-sample */ \ + assert(top_y != NULL); \ + { \ + const uint32_t uv0 = (3 * tl_uv + l_uv + 0x00020002u) >> 2; \ + FUNC(top_y[0], uv0 & 0xff, (uv0 >> 16), top_dst); \ + } \ + if (bottom_y != NULL) { \ + const uint32_t uv0 = (3 * l_uv + tl_uv + 0x00020002u) >> 2; \ + FUNC(bottom_y[0], uv0 & 0xff, (uv0 >> 16), bottom_dst); \ + } \ + for (x = 1; x <= last_pixel_pair; ++x) { \ + const uint32_t t_uv = LOAD_UV(top_u[x], top_v[x]); /* top sample */ \ + const uint32_t uv = LOAD_UV(cur_u[x], cur_v[x]); /* sample */ \ + /* precompute invariant values associated with first and second diagonals*/\ + const uint32_t avg = tl_uv + t_uv + l_uv + uv + 0x00080008u; \ + const uint32_t diag_12 = (avg + 2 * (t_uv + l_uv)) >> 3; \ + const uint32_t diag_03 = (avg + 2 * (tl_uv + uv)) >> 3; \ + { \ + const uint32_t uv0 = (diag_12 + tl_uv) >> 1; \ + const uint32_t uv1 = (diag_03 + t_uv) >> 1; \ + FUNC(top_y[2 * x - 1], uv0 & 0xff, (uv0 >> 16), \ + top_dst + (2 * x - 1) * (XSTEP)); \ + FUNC(top_y[2 * x - 0], uv1 & 0xff, (uv1 >> 16), \ + top_dst + (2 * x - 0) * (XSTEP)); \ + } \ + if (bottom_y != NULL) { \ + const uint32_t uv0 = (diag_03 + l_uv) >> 1; \ + const uint32_t uv1 = (diag_12 + uv) >> 1; \ + FUNC(bottom_y[2 * x - 1], uv0 & 0xff, (uv0 >> 16), \ + bottom_dst + (2 * x - 1) * (XSTEP)); \ + FUNC(bottom_y[2 * x + 0], uv1 & 0xff, (uv1 >> 16), \ + bottom_dst + (2 * x + 0) * (XSTEP)); \ + } \ + tl_uv = t_uv; \ + l_uv = uv; \ + } \ + if (!(len & 1)) { \ + { \ + const uint32_t uv0 = (3 * tl_uv + l_uv + 0x00020002u) >> 2; \ + FUNC(top_y[len - 1], uv0 & 0xff, (uv0 >> 16), \ + top_dst + (len - 1) * (XSTEP)); \ + } \ + if (bottom_y != NULL) { \ + const uint32_t uv0 = (3 * l_uv + tl_uv + 0x00020002u) >> 2; \ + FUNC(bottom_y[len - 1], uv0 & 0xff, (uv0 >> 16), \ + bottom_dst + (len - 1) * (XSTEP)); \ + } \ + } \ +} + +// All variants implemented. +#if !WEBP_NEON_OMIT_C_CODE +UPSAMPLE_FUNC(UpsampleRgbaLinePair_C, VP8YuvToRgba, 4) +UPSAMPLE_FUNC(UpsampleBgraLinePair_C, VP8YuvToBgra, 4) +#if !defined(WEBP_REDUCE_CSP) +UPSAMPLE_FUNC(UpsampleArgbLinePair_C, VP8YuvToArgb, 4) +UPSAMPLE_FUNC(UpsampleRgbLinePair_C, VP8YuvToRgb, 3) +UPSAMPLE_FUNC(UpsampleBgrLinePair_C, VP8YuvToBgr, 3) +UPSAMPLE_FUNC(UpsampleRgba4444LinePair_C, VP8YuvToRgba4444, 2) +UPSAMPLE_FUNC(UpsampleRgb565LinePair_C, VP8YuvToRgb565, 2) +#else +static void EmptyUpsampleFunc(const uint8_t* top_y, const uint8_t* bottom_y, + const uint8_t* top_u, const uint8_t* top_v, + const uint8_t* cur_u, const uint8_t* cur_v, + uint8_t* top_dst, uint8_t* bottom_dst, int len) { + (void)top_y; + (void)bottom_y; + (void)top_u; + (void)top_v; + (void)cur_u; + (void)cur_v; + (void)top_dst; + (void)bottom_dst; + (void)len; + assert(0); // COLORSPACE SUPPORT NOT COMPILED +} +#define UpsampleArgbLinePair_C EmptyUpsampleFunc +#define UpsampleRgbLinePair_C EmptyUpsampleFunc +#define UpsampleBgrLinePair_C EmptyUpsampleFunc +#define UpsampleRgba4444LinePair_C EmptyUpsampleFunc +#define UpsampleRgb565LinePair_C EmptyUpsampleFunc +#endif // WEBP_REDUCE_CSP + +#endif + +#undef LOAD_UV +#undef UPSAMPLE_FUNC + +#endif // FANCY_UPSAMPLING + +//------------------------------------------------------------------------------ + +#if !defined(FANCY_UPSAMPLING) +#define DUAL_SAMPLE_FUNC(FUNC_NAME, FUNC) \ +static void FUNC_NAME(const uint8_t* top_y, const uint8_t* bot_y, \ + const uint8_t* top_u, const uint8_t* top_v, \ + const uint8_t* bot_u, const uint8_t* bot_v, \ + uint8_t* top_dst, uint8_t* bot_dst, int len) { \ + const int half_len = len >> 1; \ + int x; \ + assert(top_dst != NULL); \ + { \ + for (x = 0; x < half_len; ++x) { \ + FUNC(top_y[2 * x + 0], top_u[x], top_v[x], top_dst + 8 * x + 0); \ + FUNC(top_y[2 * x + 1], top_u[x], top_v[x], top_dst + 8 * x + 4); \ + } \ + if (len & 1) FUNC(top_y[2 * x + 0], top_u[x], top_v[x], top_dst + 8 * x); \ + } \ + if (bot_dst != NULL) { \ + for (x = 0; x < half_len; ++x) { \ + FUNC(bot_y[2 * x + 0], bot_u[x], bot_v[x], bot_dst + 8 * x + 0); \ + FUNC(bot_y[2 * x + 1], bot_u[x], bot_v[x], bot_dst + 8 * x + 4); \ + } \ + if (len & 1) FUNC(bot_y[2 * x + 0], bot_u[x], bot_v[x], bot_dst + 8 * x); \ + } \ +} + +DUAL_SAMPLE_FUNC(DualLineSamplerBGRA, VP8YuvToBgra) +DUAL_SAMPLE_FUNC(DualLineSamplerARGB, VP8YuvToArgb) +#undef DUAL_SAMPLE_FUNC + +#endif // !FANCY_UPSAMPLING + +WebPUpsampleLinePairFunc WebPGetLinePairConverter(int alpha_is_last) { + WebPInitUpsamplers(); +#ifdef FANCY_UPSAMPLING + return WebPUpsamplers[alpha_is_last ? MODE_BGRA : MODE_ARGB]; +#else + return (alpha_is_last ? DualLineSamplerBGRA : DualLineSamplerARGB); +#endif +} + +//------------------------------------------------------------------------------ +// YUV444 converter + +#define YUV444_FUNC(FUNC_NAME, FUNC, XSTEP) \ +extern void FUNC_NAME(const uint8_t* y, const uint8_t* u, const uint8_t* v, \ + uint8_t* dst, int len); \ +void FUNC_NAME(const uint8_t* y, const uint8_t* u, const uint8_t* v, \ + uint8_t* dst, int len) { \ + int i; \ + for (i = 0; i < len; ++i) FUNC(y[i], u[i], v[i], &dst[i * (XSTEP)]); \ +} + +YUV444_FUNC(WebPYuv444ToRgba_C, VP8YuvToRgba, 4) +YUV444_FUNC(WebPYuv444ToBgra_C, VP8YuvToBgra, 4) +#if !defined(WEBP_REDUCE_CSP) +YUV444_FUNC(WebPYuv444ToRgb_C, VP8YuvToRgb, 3) +YUV444_FUNC(WebPYuv444ToBgr_C, VP8YuvToBgr, 3) +YUV444_FUNC(WebPYuv444ToArgb_C, VP8YuvToArgb, 4) +YUV444_FUNC(WebPYuv444ToRgba4444_C, VP8YuvToRgba4444, 2) +YUV444_FUNC(WebPYuv444ToRgb565_C, VP8YuvToRgb565, 2) +#else +static void EmptyYuv444Func(const uint8_t* y, + const uint8_t* u, const uint8_t* v, + uint8_t* dst, int len) { + (void)y; + (void)u; + (void)v; + (void)dst; + (void)len; +} +#define WebPYuv444ToRgb_C EmptyYuv444Func +#define WebPYuv444ToBgr_C EmptyYuv444Func +#define WebPYuv444ToArgb_C EmptyYuv444Func +#define WebPYuv444ToRgba4444_C EmptyYuv444Func +#define WebPYuv444ToRgb565_C EmptyYuv444Func +#endif // WEBP_REDUCE_CSP + +#undef YUV444_FUNC + +WebPYUV444Converter WebPYUV444Converters[MODE_LAST]; + +extern void WebPInitYUV444ConvertersMIPSdspR2(void); +extern void WebPInitYUV444ConvertersSSE2(void); +extern void WebPInitYUV444ConvertersSSE41(void); + +WEBP_DSP_INIT_FUNC(WebPInitYUV444Converters) { + WebPYUV444Converters[MODE_RGBA] = WebPYuv444ToRgba_C; + WebPYUV444Converters[MODE_BGRA] = WebPYuv444ToBgra_C; + WebPYUV444Converters[MODE_RGB] = WebPYuv444ToRgb_C; + WebPYUV444Converters[MODE_BGR] = WebPYuv444ToBgr_C; + WebPYUV444Converters[MODE_ARGB] = WebPYuv444ToArgb_C; + WebPYUV444Converters[MODE_RGBA_4444] = WebPYuv444ToRgba4444_C; + WebPYUV444Converters[MODE_RGB_565] = WebPYuv444ToRgb565_C; + WebPYUV444Converters[MODE_rgbA] = WebPYuv444ToRgba_C; + WebPYUV444Converters[MODE_bgrA] = WebPYuv444ToBgra_C; + WebPYUV444Converters[MODE_Argb] = WebPYuv444ToArgb_C; + WebPYUV444Converters[MODE_rgbA_4444] = WebPYuv444ToRgba4444_C; + + if (VP8GetCPUInfo != NULL) { +#if defined(WEBP_USE_SSE2) + if (VP8GetCPUInfo(kSSE2)) { + WebPInitYUV444ConvertersSSE2(); + } +#endif +#if defined(WEBP_USE_SSE41) + if (VP8GetCPUInfo(kSSE4_1)) { + WebPInitYUV444ConvertersSSE41(); + } +#endif +#if defined(WEBP_USE_MIPS_DSP_R2) + if (VP8GetCPUInfo(kMIPSdspR2)) { + WebPInitYUV444ConvertersMIPSdspR2(); + } +#endif + } +} + +//------------------------------------------------------------------------------ +// Main calls + +extern void WebPInitUpsamplersSSE2(void); +extern void WebPInitUpsamplersSSE41(void); +extern void WebPInitUpsamplersNEON(void); +extern void WebPInitUpsamplersMIPSdspR2(void); +extern void WebPInitUpsamplersMSA(void); + +WEBP_DSP_INIT_FUNC(WebPInitUpsamplers) { +#ifdef FANCY_UPSAMPLING +#if !WEBP_NEON_OMIT_C_CODE + WebPUpsamplers[MODE_RGBA] = UpsampleRgbaLinePair_C; + WebPUpsamplers[MODE_BGRA] = UpsampleBgraLinePair_C; + WebPUpsamplers[MODE_rgbA] = UpsampleRgbaLinePair_C; + WebPUpsamplers[MODE_bgrA] = UpsampleBgraLinePair_C; + WebPUpsamplers[MODE_RGB] = UpsampleRgbLinePair_C; + WebPUpsamplers[MODE_BGR] = UpsampleBgrLinePair_C; + WebPUpsamplers[MODE_ARGB] = UpsampleArgbLinePair_C; + WebPUpsamplers[MODE_RGBA_4444] = UpsampleRgba4444LinePair_C; + WebPUpsamplers[MODE_RGB_565] = UpsampleRgb565LinePair_C; + WebPUpsamplers[MODE_Argb] = UpsampleArgbLinePair_C; + WebPUpsamplers[MODE_rgbA_4444] = UpsampleRgba4444LinePair_C; +#endif + + // If defined, use CPUInfo() to overwrite some pointers with faster versions. + if (VP8GetCPUInfo != NULL) { +#if defined(WEBP_USE_SSE2) + if (VP8GetCPUInfo(kSSE2)) { + WebPInitUpsamplersSSE2(); + } +#endif +#if defined(WEBP_USE_SSE41) + if (VP8GetCPUInfo(kSSE4_1)) { + WebPInitUpsamplersSSE41(); + } +#endif +#if defined(WEBP_USE_MIPS_DSP_R2) + if (VP8GetCPUInfo(kMIPSdspR2)) { + WebPInitUpsamplersMIPSdspR2(); + } +#endif +#if defined(WEBP_USE_MSA) + if (VP8GetCPUInfo(kMSA)) { + WebPInitUpsamplersMSA(); + } +#endif + } + +#if defined(WEBP_USE_NEON) + if (WEBP_NEON_OMIT_C_CODE || + (VP8GetCPUInfo != NULL && VP8GetCPUInfo(kNEON))) { + WebPInitUpsamplersNEON(); + } +#endif + + assert(WebPUpsamplers[MODE_RGBA] != NULL); + assert(WebPUpsamplers[MODE_BGRA] != NULL); + assert(WebPUpsamplers[MODE_rgbA] != NULL); + assert(WebPUpsamplers[MODE_bgrA] != NULL); +#if !defined(WEBP_REDUCE_CSP) || !WEBP_NEON_OMIT_C_CODE + assert(WebPUpsamplers[MODE_RGB] != NULL); + assert(WebPUpsamplers[MODE_BGR] != NULL); + assert(WebPUpsamplers[MODE_ARGB] != NULL); + assert(WebPUpsamplers[MODE_RGBA_4444] != NULL); + assert(WebPUpsamplers[MODE_RGB_565] != NULL); + assert(WebPUpsamplers[MODE_Argb] != NULL); + assert(WebPUpsamplers[MODE_rgbA_4444] != NULL); +#endif + +#endif // FANCY_UPSAMPLING +} + +//------------------------------------------------------------------------------ diff --git a/vendor/github.com/sizeofint/webpanimation/dsp_upsampling_mips_dsp_r2.c b/vendor/github.com/sizeofint/webpanimation/dsp_upsampling_mips_dsp_r2.c new file mode 100644 index 00000000..f43b8d0a --- /dev/null +++ b/vendor/github.com/sizeofint/webpanimation/dsp_upsampling_mips_dsp_r2.c @@ -0,0 +1,291 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the COPYING file in the root of the source +// tree. An additional intellectual property rights grant can be found +// in the file PATENTS. All contributing project authors may +// be found in the AUTHORS file in the root of the source tree. +// ----------------------------------------------------------------------------- +// +// YUV to RGB upsampling functions. +// +// Author(s): Branimir Vasic (branimir.vasic@imgtec.com) +// Djordje Pesut (djordje.pesut@imgtec.com) + +#include "dsp_dsp.h" + +#if defined(WEBP_USE_MIPS_DSP_R2) + +#include +#include "dsp_yuv.h" + +#define YUV_TO_RGB(Y, U, V, R, G, B) do { \ + const int t1 = MultHi(Y, 19077); \ + const int t2 = MultHi(V, 13320); \ + R = MultHi(V, 26149); \ + G = MultHi(U, 6419); \ + B = MultHi(U, 33050); \ + R = t1 + R; \ + G = t1 - G; \ + B = t1 + B; \ + R = R - 14234; \ + G = G - t2 + 8708; \ + B = B - 17685; \ + __asm__ volatile ( \ + "shll_s.w %[" #R "], %[" #R "], 17 \n\t" \ + "shll_s.w %[" #G "], %[" #G "], 17 \n\t" \ + "shll_s.w %[" #B "], %[" #B "], 17 \n\t" \ + "precrqu_s.qb.ph %[" #R "], %[" #R "], $zero \n\t" \ + "precrqu_s.qb.ph %[" #G "], %[" #G "], $zero \n\t" \ + "precrqu_s.qb.ph %[" #B "], %[" #B "], $zero \n\t" \ + "srl %[" #R "], %[" #R "], 24 \n\t" \ + "srl %[" #G "], %[" #G "], 24 \n\t" \ + "srl %[" #B "], %[" #B "], 24 \n\t" \ + : [R]"+r"(R), [G]"+r"(G), [B]"+r"(B) \ + : \ + ); \ + } while (0) + +#if !defined(WEBP_REDUCE_CSP) +static WEBP_INLINE void YuvToRgb(int y, int u, int v, uint8_t* const rgb) { + int r, g, b; + YUV_TO_RGB(y, u, v, r, g, b); + rgb[0] = r; + rgb[1] = g; + rgb[2] = b; +} +static WEBP_INLINE void YuvToBgr(int y, int u, int v, uint8_t* const bgr) { + int r, g, b; + YUV_TO_RGB(y, u, v, r, g, b); + bgr[0] = b; + bgr[1] = g; + bgr[2] = r; +} +static WEBP_INLINE void YuvToRgb565(int y, int u, int v, uint8_t* const rgb) { + int r, g, b; + YUV_TO_RGB(y, u, v, r, g, b); + { + const int rg = (r & 0xf8) | (g >> 5); + const int gb = ((g << 3) & 0xe0) | (b >> 3); +#if (WEBP_SWAP_16BIT_CSP == 1) + rgb[0] = gb; + rgb[1] = rg; +#else + rgb[0] = rg; + rgb[1] = gb; +#endif + } +} +static WEBP_INLINE void YuvToRgba4444(int y, int u, int v, + uint8_t* const argb) { + int r, g, b; + YUV_TO_RGB(y, u, v, r, g, b); + { + const int rg = (r & 0xf0) | (g >> 4); + const int ba = (b & 0xf0) | 0x0f; // overwrite the lower 4 bits +#if (WEBP_SWAP_16BIT_CSP == 1) + argb[0] = ba; + argb[1] = rg; +#else + argb[0] = rg; + argb[1] = ba; +#endif + } +} +#endif // WEBP_REDUCE_CSP + +//----------------------------------------------------------------------------- +// Alpha handling variants + +#if !defined(WEBP_REDUCE_CSP) +static WEBP_INLINE void YuvToArgb(uint8_t y, uint8_t u, uint8_t v, + uint8_t* const argb) { + int r, g, b; + YUV_TO_RGB(y, u, v, r, g, b); + argb[0] = 0xff; + argb[1] = r; + argb[2] = g; + argb[3] = b; +} +#endif // WEBP_REDUCE_CSP +static WEBP_INLINE void YuvToBgra(uint8_t y, uint8_t u, uint8_t v, + uint8_t* const bgra) { + int r, g, b; + YUV_TO_RGB(y, u, v, r, g, b); + bgra[0] = b; + bgra[1] = g; + bgra[2] = r; + bgra[3] = 0xff; +} +static WEBP_INLINE void YuvToRgba(uint8_t y, uint8_t u, uint8_t v, + uint8_t* const rgba) { + int r, g, b; + YUV_TO_RGB(y, u, v, r, g, b); + rgba[0] = r; + rgba[1] = g; + rgba[2] = b; + rgba[3] = 0xff; +} + +//------------------------------------------------------------------------------ +// Fancy upsampler + +#ifdef FANCY_UPSAMPLING + +// Given samples laid out in a square as: +// [a b] +// [c d] +// we interpolate u/v as: +// ([9*a + 3*b + 3*c + d 3*a + 9*b + 3*c + d] + [8 8]) / 16 +// ([3*a + b + 9*c + 3*d a + 3*b + 3*c + 9*d] [8 8]) / 16 + +// We process u and v together stashed into 32bit (16bit each). +#define LOAD_UV(u, v) ((u) | ((v) << 16)) + +#define UPSAMPLE_FUNC(FUNC_NAME, FUNC, XSTEP) \ +static void FUNC_NAME(const uint8_t* top_y, const uint8_t* bottom_y, \ + const uint8_t* top_u, const uint8_t* top_v, \ + const uint8_t* cur_u, const uint8_t* cur_v, \ + uint8_t* top_dst, uint8_t* bottom_dst, int len) { \ + int x; \ + const int last_pixel_pair = (len - 1) >> 1; \ + uint32_t tl_uv = LOAD_UV(top_u[0], top_v[0]); /* top-left sample */ \ + uint32_t l_uv = LOAD_UV(cur_u[0], cur_v[0]); /* left-sample */ \ + assert(top_y != NULL); \ + { \ + const uint32_t uv0 = (3 * tl_uv + l_uv + 0x00020002u) >> 2; \ + FUNC(top_y[0], uv0 & 0xff, (uv0 >> 16), top_dst); \ + } \ + if (bottom_y != NULL) { \ + const uint32_t uv0 = (3 * l_uv + tl_uv + 0x00020002u) >> 2; \ + FUNC(bottom_y[0], uv0 & 0xff, (uv0 >> 16), bottom_dst); \ + } \ + for (x = 1; x <= last_pixel_pair; ++x) { \ + const uint32_t t_uv = LOAD_UV(top_u[x], top_v[x]); /* top sample */ \ + const uint32_t uv = LOAD_UV(cur_u[x], cur_v[x]); /* sample */ \ + /* precompute invariant values associated with first and second diagonals*/\ + const uint32_t avg = tl_uv + t_uv + l_uv + uv + 0x00080008u; \ + const uint32_t diag_12 = (avg + 2 * (t_uv + l_uv)) >> 3; \ + const uint32_t diag_03 = (avg + 2 * (tl_uv + uv)) >> 3; \ + { \ + const uint32_t uv0 = (diag_12 + tl_uv) >> 1; \ + const uint32_t uv1 = (diag_03 + t_uv) >> 1; \ + FUNC(top_y[2 * x - 1], uv0 & 0xff, (uv0 >> 16), \ + top_dst + (2 * x - 1) * XSTEP); \ + FUNC(top_y[2 * x - 0], uv1 & 0xff, (uv1 >> 16), \ + top_dst + (2 * x - 0) * XSTEP); \ + } \ + if (bottom_y != NULL) { \ + const uint32_t uv0 = (diag_03 + l_uv) >> 1; \ + const uint32_t uv1 = (diag_12 + uv) >> 1; \ + FUNC(bottom_y[2 * x - 1], uv0 & 0xff, (uv0 >> 16), \ + bottom_dst + (2 * x - 1) * XSTEP); \ + FUNC(bottom_y[2 * x + 0], uv1 & 0xff, (uv1 >> 16), \ + bottom_dst + (2 * x + 0) * XSTEP); \ + } \ + tl_uv = t_uv; \ + l_uv = uv; \ + } \ + if (!(len & 1)) { \ + { \ + const uint32_t uv0 = (3 * tl_uv + l_uv + 0x00020002u) >> 2; \ + FUNC(top_y[len - 1], uv0 & 0xff, (uv0 >> 16), \ + top_dst + (len - 1) * XSTEP); \ + } \ + if (bottom_y != NULL) { \ + const uint32_t uv0 = (3 * l_uv + tl_uv + 0x00020002u) >> 2; \ + FUNC(bottom_y[len - 1], uv0 & 0xff, (uv0 >> 16), \ + bottom_dst + (len - 1) * XSTEP); \ + } \ + } \ +} + +// All variants implemented. +UPSAMPLE_FUNC(UpsampleRgbaLinePair, YuvToRgba, 4) +UPSAMPLE_FUNC(UpsampleBgraLinePair, YuvToBgra, 4) +#if !defined(WEBP_REDUCE_CSP) +UPSAMPLE_FUNC(UpsampleRgbLinePair, YuvToRgb, 3) +UPSAMPLE_FUNC(UpsampleBgrLinePair, YuvToBgr, 3) +UPSAMPLE_FUNC(UpsampleArgbLinePair, YuvToArgb, 4) +UPSAMPLE_FUNC(UpsampleRgba4444LinePair, YuvToRgba4444, 2) +UPSAMPLE_FUNC(UpsampleRgb565LinePair, YuvToRgb565, 2) +#endif // WEBP_REDUCE_CSP + +#undef LOAD_UV +#undef UPSAMPLE_FUNC + +//------------------------------------------------------------------------------ +// Entry point + +extern void WebPInitUpsamplersMIPSdspR2(void); + +WEBP_TSAN_IGNORE_FUNCTION void WebPInitUpsamplersMIPSdspR2(void) { + WebPUpsamplers[MODE_RGBA] = UpsampleRgbaLinePair; + WebPUpsamplers[MODE_BGRA] = UpsampleBgraLinePair; + WebPUpsamplers[MODE_rgbA] = UpsampleRgbaLinePair; + WebPUpsamplers[MODE_bgrA] = UpsampleBgraLinePair; +#if !defined(WEBP_REDUCE_CSP) + WebPUpsamplers[MODE_RGB] = UpsampleRgbLinePair; + WebPUpsamplers[MODE_BGR] = UpsampleBgrLinePair; + WebPUpsamplers[MODE_ARGB] = UpsampleArgbLinePair; + WebPUpsamplers[MODE_RGBA_4444] = UpsampleRgba4444LinePair; + WebPUpsamplers[MODE_RGB_565] = UpsampleRgb565LinePair; + WebPUpsamplers[MODE_Argb] = UpsampleArgbLinePair; + WebPUpsamplers[MODE_rgbA_4444] = UpsampleRgba4444LinePair; +#endif // WEBP_REDUCE_CSP +} + +#endif // FANCY_UPSAMPLING + +//------------------------------------------------------------------------------ +// YUV444 converter + +#define YUV444_FUNC(FUNC_NAME, FUNC, XSTEP) \ +static void FUNC_NAME(const uint8_t* y, const uint8_t* u, const uint8_t* v, \ + uint8_t* dst, int len) { \ + int i; \ + for (i = 0; i < len; ++i) FUNC(y[i], u[i], v[i], &dst[i * XSTEP]); \ +} + +YUV444_FUNC(Yuv444ToRgba, YuvToRgba, 4) +YUV444_FUNC(Yuv444ToBgra, YuvToBgra, 4) +#if !defined(WEBP_REDUCE_CSP) +YUV444_FUNC(Yuv444ToRgb, YuvToRgb, 3) +YUV444_FUNC(Yuv444ToBgr, YuvToBgr, 3) +YUV444_FUNC(Yuv444ToArgb, YuvToArgb, 4) +YUV444_FUNC(Yuv444ToRgba4444, YuvToRgba4444, 2) +YUV444_FUNC(Yuv444ToRgb565, YuvToRgb565, 2) +#endif // WEBP_REDUCE_CSP + +#undef YUV444_FUNC + +//------------------------------------------------------------------------------ +// Entry point + +extern void WebPInitYUV444ConvertersMIPSdspR2(void); + +WEBP_TSAN_IGNORE_FUNCTION void WebPInitYUV444ConvertersMIPSdspR2(void) { + WebPYUV444Converters[MODE_RGBA] = Yuv444ToRgba; + WebPYUV444Converters[MODE_BGRA] = Yuv444ToBgra; + WebPYUV444Converters[MODE_rgbA] = Yuv444ToRgba; + WebPYUV444Converters[MODE_bgrA] = Yuv444ToBgra; +#if !defined(WEBP_REDUCE_CSP) + WebPYUV444Converters[MODE_RGB] = Yuv444ToRgb; + WebPYUV444Converters[MODE_BGR] = Yuv444ToBgr; + WebPYUV444Converters[MODE_ARGB] = Yuv444ToArgb; + WebPYUV444Converters[MODE_RGBA_4444] = Yuv444ToRgba4444; + WebPYUV444Converters[MODE_RGB_565] = Yuv444ToRgb565; + WebPYUV444Converters[MODE_Argb] = Yuv444ToArgb; + WebPYUV444Converters[MODE_rgbA_4444] = Yuv444ToRgba4444; +#endif // WEBP_REDUCE_CSP +} + +#else // !WEBP_USE_MIPS_DSP_R2 + +WEBP_DSP_INIT_STUB(WebPInitYUV444ConvertersMIPSdspR2) + +#endif // WEBP_USE_MIPS_DSP_R2 + +#if !(defined(FANCY_UPSAMPLING) && defined(WEBP_USE_MIPS_DSP_R2)) +WEBP_DSP_INIT_STUB(WebPInitUpsamplersMIPSdspR2) +#endif diff --git a/vendor/github.com/sizeofint/webpanimation/dsp_upsampling_msa.c b/vendor/github.com/sizeofint/webpanimation/dsp_upsampling_msa.c new file mode 100644 index 00000000..a4d44f14 --- /dev/null +++ b/vendor/github.com/sizeofint/webpanimation/dsp_upsampling_msa.c @@ -0,0 +1,688 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the COPYING file in the root of the source +// tree. An additional intellectual property rights grant can be found +// in the file PATENTS. All contributing project authors may +// be found in the AUTHORS file in the root of the source tree. +// ----------------------------------------------------------------------------- +// +// MSA version of YUV to RGB upsampling functions. +// +// Author: Prashant Patil (prashant.patil@imgtec.com) + +#include +#include "dsp_dsp.h" + +#if defined(WEBP_USE_MSA) + +#include "dsp_msa_macro.h" +#include "dsp_yuv.h" + +#ifdef FANCY_UPSAMPLING + +#define ILVR_UW2(in, out0, out1) do { \ + const v8i16 t0 = (v8i16)__msa_ilvr_b((v16i8)zero, (v16i8)in); \ + out0 = (v4u32)__msa_ilvr_h((v8i16)zero, t0); \ + out1 = (v4u32)__msa_ilvl_h((v8i16)zero, t0); \ +} while (0) + +#define ILVRL_UW4(in, out0, out1, out2, out3) do { \ + v16u8 t0, t1; \ + ILVRL_B2_UB(zero, in, t0, t1); \ + ILVRL_H2_UW(zero, t0, out0, out1); \ + ILVRL_H2_UW(zero, t1, out2, out3); \ +} while (0) + +#define MULTHI_16(in0, in1, in2, in3, cnst, out0, out1) do { \ + const v4i32 const0 = (v4i32)__msa_fill_w(cnst * 256); \ + v4u32 temp0, temp1, temp2, temp3; \ + MUL4(in0, const0, in1, const0, in2, const0, in3, const0, \ + temp0, temp1, temp2, temp3); \ + PCKOD_H2_UH(temp1, temp0, temp3, temp2, out0, out1); \ +} while (0) + +#define MULTHI_8(in0, in1, cnst, out0) do { \ + const v4i32 const0 = (v4i32)__msa_fill_w(cnst * 256); \ + v4u32 temp0, temp1; \ + MUL2(in0, const0, in1, const0, temp0, temp1); \ + out0 = (v8u16)__msa_pckod_h((v8i16)temp1, (v8i16)temp0); \ +} while (0) + +#define CALC_R16(y0, y1, v0, v1, dst) do { \ + const v8i16 const_a = (v8i16)__msa_fill_h(14234); \ + const v8i16 a0 = __msa_adds_s_h((v8i16)y0, (v8i16)v0); \ + const v8i16 a1 = __msa_adds_s_h((v8i16)y1, (v8i16)v1); \ + v8i16 b0 = __msa_subs_s_h(a0, const_a); \ + v8i16 b1 = __msa_subs_s_h(a1, const_a); \ + SRAI_H2_SH(b0, b1, 6); \ + CLIP_SH2_0_255(b0, b1); \ + dst = (v16u8)__msa_pckev_b((v16i8)b1, (v16i8)b0); \ +} while (0) + +#define CALC_R8(y0, v0, dst) do { \ + const v8i16 const_a = (v8i16)__msa_fill_h(14234); \ + const v8i16 a0 = __msa_adds_s_h((v8i16)y0, (v8i16)v0); \ + v8i16 b0 = __msa_subs_s_h(a0, const_a); \ + b0 = SRAI_H(b0, 6); \ + CLIP_SH_0_255(b0); \ + dst = (v16u8)__msa_pckev_b((v16i8)b0, (v16i8)b0); \ +} while (0) + +#define CALC_G16(y0, y1, u0, u1, v0, v1, dst) do { \ + const v8i16 const_a = (v8i16)__msa_fill_h(8708); \ + v8i16 a0 = __msa_subs_s_h((v8i16)y0, (v8i16)u0); \ + v8i16 a1 = __msa_subs_s_h((v8i16)y1, (v8i16)u1); \ + const v8i16 b0 = __msa_subs_s_h(a0, (v8i16)v0); \ + const v8i16 b1 = __msa_subs_s_h(a1, (v8i16)v1); \ + a0 = __msa_adds_s_h(b0, const_a); \ + a1 = __msa_adds_s_h(b1, const_a); \ + SRAI_H2_SH(a0, a1, 6); \ + CLIP_SH2_0_255(a0, a1); \ + dst = (v16u8)__msa_pckev_b((v16i8)a1, (v16i8)a0); \ +} while (0) + +#define CALC_G8(y0, u0, v0, dst) do { \ + const v8i16 const_a = (v8i16)__msa_fill_h(8708); \ + v8i16 a0 = __msa_subs_s_h((v8i16)y0, (v8i16)u0); \ + const v8i16 b0 = __msa_subs_s_h(a0, (v8i16)v0); \ + a0 = __msa_adds_s_h(b0, const_a); \ + a0 = SRAI_H(a0, 6); \ + CLIP_SH_0_255(a0); \ + dst = (v16u8)__msa_pckev_b((v16i8)a0, (v16i8)a0); \ +} while (0) + +#define CALC_B16(y0, y1, u0, u1, dst) do { \ + const v8u16 const_a = (v8u16)__msa_fill_h(17685); \ + const v8u16 a0 = __msa_adds_u_h((v8u16)y0, u0); \ + const v8u16 a1 = __msa_adds_u_h((v8u16)y1, u1); \ + v8u16 b0 = __msa_subs_u_h(a0, const_a); \ + v8u16 b1 = __msa_subs_u_h(a1, const_a); \ + SRAI_H2_UH(b0, b1, 6); \ + CLIP_UH2_0_255(b0, b1); \ + dst = (v16u8)__msa_pckev_b((v16i8)b1, (v16i8)b0); \ +} while (0) + +#define CALC_B8(y0, u0, dst) do { \ + const v8u16 const_a = (v8u16)__msa_fill_h(17685); \ + const v8u16 a0 = __msa_adds_u_h((v8u16)y0, u0); \ + v8u16 b0 = __msa_subs_u_h(a0, const_a); \ + b0 = SRAI_H(b0, 6); \ + CLIP_UH_0_255(b0); \ + dst = (v16u8)__msa_pckev_b((v16i8)b0, (v16i8)b0); \ +} while (0) + +#define CALC_RGB16(y, u, v, R, G, B) do { \ + const v16u8 zero = { 0 }; \ + v8u16 y0, y1, u0, u1, v0, v1; \ + v4u32 p0, p1, p2, p3; \ + const v16u8 in_y = LD_UB(y); \ + const v16u8 in_u = LD_UB(u); \ + const v16u8 in_v = LD_UB(v); \ + ILVRL_UW4(in_y, p0, p1, p2, p3); \ + MULTHI_16(p0, p1, p2, p3, 19077, y0, y1); \ + ILVRL_UW4(in_v, p0, p1, p2, p3); \ + MULTHI_16(p0, p1, p2, p3, 26149, v0, v1); \ + CALC_R16(y0, y1, v0, v1, R); \ + MULTHI_16(p0, p1, p2, p3, 13320, v0, v1); \ + ILVRL_UW4(in_u, p0, p1, p2, p3); \ + MULTHI_16(p0, p1, p2, p3, 6419, u0, u1); \ + CALC_G16(y0, y1, u0, u1, v0, v1, G); \ + MULTHI_16(p0, p1, p2, p3, 33050, u0, u1); \ + CALC_B16(y0, y1, u0, u1, B); \ +} while (0) + +#define CALC_RGB8(y, u, v, R, G, B) do { \ + const v16u8 zero = { 0 }; \ + v8u16 y0, u0, v0; \ + v4u32 p0, p1; \ + const v16u8 in_y = LD_UB(y); \ + const v16u8 in_u = LD_UB(u); \ + const v16u8 in_v = LD_UB(v); \ + ILVR_UW2(in_y, p0, p1); \ + MULTHI_8(p0, p1, 19077, y0); \ + ILVR_UW2(in_v, p0, p1); \ + MULTHI_8(p0, p1, 26149, v0); \ + CALC_R8(y0, v0, R); \ + MULTHI_8(p0, p1, 13320, v0); \ + ILVR_UW2(in_u, p0, p1); \ + MULTHI_8(p0, p1, 6419, u0); \ + CALC_G8(y0, u0, v0, G); \ + MULTHI_8(p0, p1, 33050, u0); \ + CALC_B8(y0, u0, B); \ +} while (0) + +#define STORE16_3(a0, a1, a2, dst) do { \ + const v16u8 mask0 = { 0, 1, 16, 2, 3, 17, 4, 5, 18, 6, 7, 19, \ + 8, 9, 20, 10 }; \ + const v16u8 mask1 = { 0, 21, 1, 2, 22, 3, 4, 23, 5, 6, 24, 7, \ + 8, 25, 9, 10 }; \ + const v16u8 mask2 = { 26, 0, 1, 27, 2, 3, 28, 4, 5, 29, 6, 7, \ + 30, 8, 9, 31 }; \ + v16u8 out0, out1, out2, tmp0, tmp1, tmp2; \ + ILVRL_B2_UB(a1, a0, tmp0, tmp1); \ + out0 = VSHF_UB(tmp0, a2, mask0); \ + tmp2 = SLDI_UB(tmp1, tmp0, 11); \ + out1 = VSHF_UB(tmp2, a2, mask1); \ + tmp2 = SLDI_UB(tmp1, tmp1, 6); \ + out2 = VSHF_UB(tmp2, a2, mask2); \ + ST_UB(out0, dst + 0); \ + ST_UB(out1, dst + 16); \ + ST_UB(out2, dst + 32); \ +} while (0) + +#define STORE8_3(a0, a1, a2, dst) do { \ + int64_t out_m; \ + const v16u8 mask0 = { 0, 1, 16, 2, 3, 17, 4, 5, 18, 6, 7, 19, \ + 8, 9, 20, 10 }; \ + const v16u8 mask1 = { 11, 21, 12, 13, 22, 14, 15, 23, \ + 255, 255, 255, 255, 255, 255, 255, 255 }; \ + const v16u8 tmp0 = (v16u8)__msa_ilvr_b((v16i8)a1, (v16i8)a0); \ + v16u8 out0, out1; \ + VSHF_B2_UB(tmp0, a2, tmp0, a2, mask0, mask1, out0, out1); \ + ST_UB(out0, dst); \ + out_m = __msa_copy_s_d((v2i64)out1, 0); \ + SD(out_m, dst + 16); \ +} while (0) + +#define STORE16_4(a0, a1, a2, a3, dst) do { \ + v16u8 tmp0, tmp1, tmp2, tmp3; \ + v16u8 out0, out1, out2, out3; \ + ILVRL_B2_UB(a1, a0, tmp0, tmp1); \ + ILVRL_B2_UB(a3, a2, tmp2, tmp3); \ + ILVRL_H2_UB(tmp2, tmp0, out0, out1); \ + ILVRL_H2_UB(tmp3, tmp1, out2, out3); \ + ST_UB(out0, dst + 0); \ + ST_UB(out1, dst + 16); \ + ST_UB(out2, dst + 32); \ + ST_UB(out3, dst + 48); \ +} while (0) + +#define STORE8_4(a0, a1, a2, a3, dst) do { \ + v16u8 tmp0, tmp1, tmp2, tmp3; \ + ILVR_B2_UB(a1, a0, a3, a2, tmp0, tmp1); \ + ILVRL_H2_UB(tmp1, tmp0, tmp2, tmp3); \ + ST_UB(tmp2, dst + 0); \ + ST_UB(tmp3, dst + 16); \ +} while (0) + +#define STORE2_16(a0, a1, dst) do { \ + v16u8 out0, out1; \ + ILVRL_B2_UB(a1, a0, out0, out1); \ + ST_UB(out0, dst + 0); \ + ST_UB(out1, dst + 16); \ +} while (0) + +#define STORE2_8(a0, a1, dst) do { \ + const v16u8 out0 = (v16u8)__msa_ilvr_b((v16i8)a1, (v16i8)a0); \ + ST_UB(out0, dst); \ +} while (0) + +#define CALC_RGBA4444(y, u, v, out0, out1, N, dst) do { \ + CALC_RGB##N(y, u, v, R, G, B); \ + tmp0 = ANDI_B(R, 0xf0); \ + tmp1 = SRAI_B(G, 4); \ + RG = tmp0 | tmp1; \ + tmp0 = ANDI_B(B, 0xf0); \ + BA = ORI_B(tmp0, 0x0f); \ + STORE2_##N(out0, out1, dst); \ +} while (0) + +#define CALC_RGB565(y, u, v, out0, out1, N, dst) do { \ + CALC_RGB##N(y, u, v, R, G, B); \ + tmp0 = ANDI_B(R, 0xf8); \ + tmp1 = SRAI_B(G, 5); \ + RG = tmp0 | tmp1; \ + tmp0 = SLLI_B(G, 3); \ + tmp1 = ANDI_B(tmp0, 0xe0); \ + tmp0 = SRAI_B(B, 3); \ + GB = tmp0 | tmp1; \ + STORE2_##N(out0, out1, dst); \ +} while (0) + +static WEBP_INLINE int Clip8(int v) { + return v < 0 ? 0 : v > 255 ? 255 : v; +} + +static void YuvToRgb(int y, int u, int v, uint8_t* const rgb) { + const int y1 = MultHi(y, 19077); + const int r1 = y1 + MultHi(v, 26149) - 14234; + const int g1 = y1 - MultHi(u, 6419) - MultHi(v, 13320) + 8708; + const int b1 = y1 + MultHi(u, 33050) - 17685; + rgb[0] = Clip8(r1 >> 6); + rgb[1] = Clip8(g1 >> 6); + rgb[2] = Clip8(b1 >> 6); +} + +static void YuvToBgr(int y, int u, int v, uint8_t* const bgr) { + const int y1 = MultHi(y, 19077); + const int r1 = y1 + MultHi(v, 26149) - 14234; + const int g1 = y1 - MultHi(u, 6419) - MultHi(v, 13320) + 8708; + const int b1 = y1 + MultHi(u, 33050) - 17685; + bgr[0] = Clip8(b1 >> 6); + bgr[1] = Clip8(g1 >> 6); + bgr[2] = Clip8(r1 >> 6); +} + +#if !defined(WEBP_REDUCE_CSP) +static void YuvToRgb565(int y, int u, int v, uint8_t* const rgb) { + const int y1 = MultHi(y, 19077); + const int r1 = y1 + MultHi(v, 26149) - 14234; + const int g1 = y1 - MultHi(u, 6419) - MultHi(v, 13320) + 8708; + const int b1 = y1 + MultHi(u, 33050) - 17685; + const int r = Clip8(r1 >> 6); + const int g = Clip8(g1 >> 6); + const int b = Clip8(b1 >> 6); + const int rg = (r & 0xf8) | (g >> 5); + const int gb = ((g << 3) & 0xe0) | (b >> 3); +#if (WEBP_SWAP_16BIT_CSP == 1) + rgb[0] = gb; + rgb[1] = rg; +#else + rgb[0] = rg; + rgb[1] = gb; +#endif +} + +static void YuvToRgba4444(int y, int u, int v, uint8_t* const argb) { + const int y1 = MultHi(y, 19077); + const int r1 = y1 + MultHi(v, 26149) - 14234; + const int g1 = y1 - MultHi(u, 6419) - MultHi(v, 13320) + 8708; + const int b1 = y1 + MultHi(u, 33050) - 17685; + const int r = Clip8(r1 >> 6); + const int g = Clip8(g1 >> 6); + const int b = Clip8(b1 >> 6); + const int rg = (r & 0xf0) | (g >> 4); + const int ba = (b & 0xf0) | 0x0f; // overwrite the lower 4 bits +#if (WEBP_SWAP_16BIT_CSP == 1) + argb[0] = ba; + argb[1] = rg; +#else + argb[0] = rg; + argb[1] = ba; +#endif +} + +static void YuvToArgb(uint8_t y, uint8_t u, uint8_t v, uint8_t* const argb) { + argb[0] = 0xff; + YuvToRgb(y, u, v, argb + 1); +} +#endif // WEBP_REDUCE_CSP + +static void YuvToBgra(uint8_t y, uint8_t u, uint8_t v, uint8_t* const bgra) { + YuvToBgr(y, u, v, bgra); + bgra[3] = 0xff; +} + +static void YuvToRgba(uint8_t y, uint8_t u, uint8_t v, uint8_t* const rgba) { + YuvToRgb(y, u, v, rgba); + rgba[3] = 0xff; +} + +#if !defined(WEBP_REDUCE_CSP) +static void YuvToRgbLine(const uint8_t* y, const uint8_t* u, + const uint8_t* v, uint8_t* dst, int length) { + v16u8 R, G, B; + while (length >= 16) { + CALC_RGB16(y, u, v, R, G, B); + STORE16_3(R, G, B, dst); + y += 16; + u += 16; + v += 16; + dst += 16 * 3; + length -= 16; + } + if (length > 8) { + uint8_t temp[3 * 16] = { 0 }; + memcpy(temp, y, length * sizeof(*temp)); + CALC_RGB16(temp, u, v, R, G, B); + STORE16_3(R, G, B, temp); + memcpy(dst, temp, length * 3 * sizeof(*dst)); + } else if (length > 0) { + uint8_t temp[3 * 8] = { 0 }; + memcpy(temp, y, length * sizeof(*temp)); + CALC_RGB8(temp, u, v, R, G, B); + STORE8_3(R, G, B, temp); + memcpy(dst, temp, length * 3 * sizeof(*dst)); + } +} + +static void YuvToBgrLine(const uint8_t* y, const uint8_t* u, + const uint8_t* v, uint8_t* dst, int length) { + v16u8 R, G, B; + while (length >= 16) { + CALC_RGB16(y, u, v, R, G, B); + STORE16_3(B, G, R, dst); + y += 16; + u += 16; + v += 16; + dst += 16 * 3; + length -= 16; + } + if (length > 8) { + uint8_t temp[3 * 16] = { 0 }; + memcpy(temp, y, length * sizeof(*temp)); + CALC_RGB16(temp, u, v, R, G, B); + STORE16_3(B, G, R, temp); + memcpy(dst, temp, length * 3 * sizeof(*dst)); + } else if (length > 0) { + uint8_t temp[3 * 8] = { 0 }; + memcpy(temp, y, length * sizeof(*temp)); + CALC_RGB8(temp, u, v, R, G, B); + STORE8_3(B, G, R, temp); + memcpy(dst, temp, length * 3 * sizeof(*dst)); + } +} +#endif // WEBP_REDUCE_CSP + +static void YuvToRgbaLine(const uint8_t* y, const uint8_t* u, + const uint8_t* v, uint8_t* dst, int length) { + v16u8 R, G, B; + const v16u8 A = (v16u8)__msa_ldi_b(ALPHAVAL); + while (length >= 16) { + CALC_RGB16(y, u, v, R, G, B); + STORE16_4(R, G, B, A, dst); + y += 16; + u += 16; + v += 16; + dst += 16 * 4; + length -= 16; + } + if (length > 8) { + uint8_t temp[4 * 16] = { 0 }; + memcpy(temp, y, length * sizeof(*temp)); + CALC_RGB16(&temp[0], u, v, R, G, B); + STORE16_4(R, G, B, A, temp); + memcpy(dst, temp, length * 4 * sizeof(*dst)); + } else if (length > 0) { + uint8_t temp[4 * 8] = { 0 }; + memcpy(temp, y, length * sizeof(*temp)); + CALC_RGB8(temp, u, v, R, G, B); + STORE8_4(R, G, B, A, temp); + memcpy(dst, temp, length * 4 * sizeof(*dst)); + } +} + +static void YuvToBgraLine(const uint8_t* y, const uint8_t* u, + const uint8_t* v, uint8_t* dst, int length) { + v16u8 R, G, B; + const v16u8 A = (v16u8)__msa_ldi_b(ALPHAVAL); + while (length >= 16) { + CALC_RGB16(y, u, v, R, G, B); + STORE16_4(B, G, R, A, dst); + y += 16; + u += 16; + v += 16; + dst += 16 * 4; + length -= 16; + } + if (length > 8) { + uint8_t temp[4 * 16] = { 0 }; + memcpy(temp, y, length * sizeof(*temp)); + CALC_RGB16(temp, u, v, R, G, B); + STORE16_4(B, G, R, A, temp); + memcpy(dst, temp, length * 4 * sizeof(*dst)); + } else if (length > 0) { + uint8_t temp[4 * 8] = { 0 }; + memcpy(temp, y, length * sizeof(*temp)); + CALC_RGB8(temp, u, v, R, G, B); + STORE8_4(B, G, R, A, temp); + memcpy(dst, temp, length * 4 * sizeof(*dst)); + } +} + +#if !defined(WEBP_REDUCE_CSP) +static void YuvToArgbLine(const uint8_t* y, const uint8_t* u, + const uint8_t* v, uint8_t* dst, int length) { + v16u8 R, G, B; + const v16u8 A = (v16u8)__msa_ldi_b(ALPHAVAL); + while (length >= 16) { + CALC_RGB16(y, u, v, R, G, B); + STORE16_4(A, R, G, B, dst); + y += 16; + u += 16; + v += 16; + dst += 16 * 4; + length -= 16; + } + if (length > 8) { + uint8_t temp[4 * 16] = { 0 }; + memcpy(temp, y, length * sizeof(*temp)); + CALC_RGB16(temp, u, v, R, G, B); + STORE16_4(A, R, G, B, temp); + memcpy(dst, temp, length * 4 * sizeof(*dst)); + } else if (length > 0) { + uint8_t temp[4 * 8] = { 0 }; + memcpy(temp, y, length * sizeof(*temp)); + CALC_RGB8(temp, u, v, R, G, B); + STORE8_4(A, R, G, B, temp); + memcpy(dst, temp, length * 4 * sizeof(*dst)); + } +} + +static void YuvToRgba4444Line(const uint8_t* y, const uint8_t* u, + const uint8_t* v, uint8_t* dst, int length) { + v16u8 R, G, B, RG, BA, tmp0, tmp1; + while (length >= 16) { +#if (WEBP_SWAP_16BIT_CSP == 1) + CALC_RGBA4444(y, u, v, BA, RG, 16, dst); +#else + CALC_RGBA4444(y, u, v, RG, BA, 16, dst); +#endif + y += 16; + u += 16; + v += 16; + dst += 16 * 2; + length -= 16; + } + if (length > 8) { + uint8_t temp[2 * 16] = { 0 }; + memcpy(temp, y, length * sizeof(*temp)); +#if (WEBP_SWAP_16BIT_CSP == 1) + CALC_RGBA4444(temp, u, v, BA, RG, 16, temp); +#else + CALC_RGBA4444(temp, u, v, RG, BA, 16, temp); +#endif + memcpy(dst, temp, length * 2 * sizeof(*dst)); + } else if (length > 0) { + uint8_t temp[2 * 8] = { 0 }; + memcpy(temp, y, length * sizeof(*temp)); +#if (WEBP_SWAP_16BIT_CSP == 1) + CALC_RGBA4444(temp, u, v, BA, RG, 8, temp); +#else + CALC_RGBA4444(temp, u, v, RG, BA, 8, temp); +#endif + memcpy(dst, temp, length * 2 * sizeof(*dst)); + } +} + +static void YuvToRgb565Line(const uint8_t* y, const uint8_t* u, + const uint8_t* v, uint8_t* dst, int length) { + v16u8 R, G, B, RG, GB, tmp0, tmp1; + while (length >= 16) { +#if (WEBP_SWAP_16BIT_CSP == 1) + CALC_RGB565(y, u, v, GB, RG, 16, dst); +#else + CALC_RGB565(y, u, v, RG, GB, 16, dst); +#endif + y += 16; + u += 16; + v += 16; + dst += 16 * 2; + length -= 16; + } + if (length > 8) { + uint8_t temp[2 * 16] = { 0 }; + memcpy(temp, y, length * sizeof(*temp)); +#if (WEBP_SWAP_16BIT_CSP == 1) + CALC_RGB565(temp, u, v, GB, RG, 16, temp); +#else + CALC_RGB565(temp, u, v, RG, GB, 16, temp); +#endif + memcpy(dst, temp, length * 2 * sizeof(*dst)); + } else if (length > 0) { + uint8_t temp[2 * 8] = { 0 }; + memcpy(temp, y, length * sizeof(*temp)); +#if (WEBP_SWAP_16BIT_CSP == 1) + CALC_RGB565(temp, u, v, GB, RG, 8, temp); +#else + CALC_RGB565(temp, u, v, RG, GB, 8, temp); +#endif + memcpy(dst, temp, length * 2 * sizeof(*dst)); + } +} +#endif // WEBP_REDUCE_CSP + +#define UPSAMPLE_32PIXELS(a, b, c, d) do { \ + v16u8 s = __msa_aver_u_b(a, d); \ + v16u8 t = __msa_aver_u_b(b, c); \ + const v16u8 st = s ^ t; \ + v16u8 ad = a ^ d; \ + v16u8 bc = b ^ c; \ + v16u8 t0 = ad | bc; \ + v16u8 t1 = t0 | st; \ + v16u8 t2 = ANDI_B(t1, 1); \ + v16u8 t3 = __msa_aver_u_b(s, t); \ + const v16u8 k = t3 - t2; \ + v16u8 diag1, diag2; \ + AVER_UB2_UB(t, k, s, k, t0, t1); \ + bc = bc & st; \ + ad = ad & st; \ + t = t ^ k; \ + s = s ^ k; \ + t2 = bc | t; \ + t3 = ad | s; \ + t2 = ANDI_B(t2, 1); \ + t3 = ANDI_B(t3, 1); \ + SUB2(t0, t2, t1, t3, diag1, diag2); \ + AVER_UB2_UB(a, diag1, b, diag2, t0, t1); \ + ILVRL_B2_UB(t1, t0, a, b); \ + if (pbot_y != NULL) { \ + AVER_UB2_UB(c, diag2, d, diag1, t0, t1); \ + ILVRL_B2_UB(t1, t0, c, d); \ + } \ +} while (0) + +#define UPSAMPLE_FUNC(FUNC_NAME, FUNC, XSTEP) \ +static void FUNC_NAME(const uint8_t* top_y, const uint8_t* bot_y, \ + const uint8_t* top_u, const uint8_t* top_v, \ + const uint8_t* cur_u, const uint8_t* cur_v, \ + uint8_t* top_dst, uint8_t* bot_dst, int len) \ +{ \ + int size = (len - 1) >> 1; \ + uint8_t temp_u[64]; \ + uint8_t temp_v[64]; \ + const uint32_t tl_uv = ((top_u[0]) | ((top_v[0]) << 16)); \ + const uint32_t l_uv = ((cur_u[0]) | ((cur_v[0]) << 16)); \ + const uint32_t uv0 = (3 * tl_uv + l_uv + 0x00020002u) >> 2; \ + const uint8_t* ptop_y = &top_y[1]; \ + uint8_t* ptop_dst = top_dst + XSTEP; \ + const uint8_t* pbot_y = &bot_y[1]; \ + uint8_t* pbot_dst = bot_dst + XSTEP; \ + \ + FUNC(top_y[0], uv0 & 0xff, (uv0 >> 16), top_dst); \ + if (bot_y != NULL) { \ + const uint32_t uv1 = (3 * l_uv + tl_uv + 0x00020002u) >> 2; \ + FUNC(bot_y[0], uv1 & 0xff, (uv1 >> 16), bot_dst); \ + } \ + while (size >= 16) { \ + v16u8 tu0, tu1, tv0, tv1, cu0, cu1, cv0, cv1; \ + LD_UB2(top_u, 1, tu0, tu1); \ + LD_UB2(cur_u, 1, cu0, cu1); \ + LD_UB2(top_v, 1, tv0, tv1); \ + LD_UB2(cur_v, 1, cv0, cv1); \ + UPSAMPLE_32PIXELS(tu0, tu1, cu0, cu1); \ + UPSAMPLE_32PIXELS(tv0, tv1, cv0, cv1); \ + ST_UB4(tu0, tu1, cu0, cu1, &temp_u[0], 16); \ + ST_UB4(tv0, tv1, cv0, cv1, &temp_v[0], 16); \ + FUNC##Line(ptop_y, &temp_u[ 0], &temp_v[0], ptop_dst, 32); \ + if (bot_y != NULL) { \ + FUNC##Line(pbot_y, &temp_u[32], &temp_v[32], pbot_dst, 32); \ + } \ + ptop_y += 32; \ + pbot_y += 32; \ + ptop_dst += XSTEP * 32; \ + pbot_dst += XSTEP * 32; \ + top_u += 16; \ + top_v += 16; \ + cur_u += 16; \ + cur_v += 16; \ + size -= 16; \ + } \ + if (size > 0) { \ + v16u8 tu0, tu1, tv0, tv1, cu0, cu1, cv0, cv1; \ + memcpy(&temp_u[ 0], top_u, 17 * sizeof(uint8_t)); \ + memcpy(&temp_u[32], cur_u, 17 * sizeof(uint8_t)); \ + memcpy(&temp_v[ 0], top_v, 17 * sizeof(uint8_t)); \ + memcpy(&temp_v[32], cur_v, 17 * sizeof(uint8_t)); \ + LD_UB2(&temp_u[ 0], 1, tu0, tu1); \ + LD_UB2(&temp_u[32], 1, cu0, cu1); \ + LD_UB2(&temp_v[ 0], 1, tv0, tv1); \ + LD_UB2(&temp_v[32], 1, cv0, cv1); \ + UPSAMPLE_32PIXELS(tu0, tu1, cu0, cu1); \ + UPSAMPLE_32PIXELS(tv0, tv1, cv0, cv1); \ + ST_UB4(tu0, tu1, cu0, cu1, &temp_u[0], 16); \ + ST_UB4(tv0, tv1, cv0, cv1, &temp_v[0], 16); \ + FUNC##Line(ptop_y, &temp_u[ 0], &temp_v[0], ptop_dst, size * 2); \ + if (bot_y != NULL) { \ + FUNC##Line(pbot_y, &temp_u[32], &temp_v[32], pbot_dst, size * 2); \ + } \ + top_u += size; \ + top_v += size; \ + cur_u += size; \ + cur_v += size; \ + } \ + if (!(len & 1)) { \ + const uint32_t t0 = ((top_u[0]) | ((top_v[0]) << 16)); \ + const uint32_t c0 = ((cur_u[0]) | ((cur_v[0]) << 16)); \ + const uint32_t tmp0 = (3 * t0 + c0 + 0x00020002u) >> 2; \ + FUNC(top_y[len - 1], tmp0 & 0xff, (tmp0 >> 16), \ + top_dst + (len - 1) * XSTEP); \ + if (bot_y != NULL) { \ + const uint32_t tmp1 = (3 * c0 + t0 + 0x00020002u) >> 2; \ + FUNC(bot_y[len - 1], tmp1 & 0xff, (tmp1 >> 16), \ + bot_dst + (len - 1) * XSTEP); \ + } \ + } \ +} + +UPSAMPLE_FUNC(UpsampleRgbaLinePair, YuvToRgba, 4) +UPSAMPLE_FUNC(UpsampleBgraLinePair, YuvToBgra, 4) +#if !defined(WEBP_REDUCE_CSP) +UPSAMPLE_FUNC(UpsampleRgbLinePair, YuvToRgb, 3) +UPSAMPLE_FUNC(UpsampleBgrLinePair, YuvToBgr, 3) +UPSAMPLE_FUNC(UpsampleArgbLinePair, YuvToArgb, 4) +UPSAMPLE_FUNC(UpsampleRgba4444LinePair, YuvToRgba4444, 2) +UPSAMPLE_FUNC(UpsampleRgb565LinePair, YuvToRgb565, 2) +#endif // WEBP_REDUCE_CSP + +//------------------------------------------------------------------------------ +// Entry point + +extern WebPUpsampleLinePairFunc WebPUpsamplers[/* MODE_LAST */]; + +extern void WebPInitUpsamplersMSA(void); + +WEBP_TSAN_IGNORE_FUNCTION void WebPInitUpsamplersMSA(void) { + WebPUpsamplers[MODE_RGBA] = UpsampleRgbaLinePair; + WebPUpsamplers[MODE_BGRA] = UpsampleBgraLinePair; + WebPUpsamplers[MODE_rgbA] = UpsampleRgbaLinePair; + WebPUpsamplers[MODE_bgrA] = UpsampleBgraLinePair; +#if !defined(WEBP_REDUCE_CSP) + WebPUpsamplers[MODE_RGB] = UpsampleRgbLinePair; + WebPUpsamplers[MODE_BGR] = UpsampleBgrLinePair; + WebPUpsamplers[MODE_ARGB] = UpsampleArgbLinePair; + WebPUpsamplers[MODE_Argb] = UpsampleArgbLinePair; + WebPUpsamplers[MODE_RGB_565] = UpsampleRgb565LinePair; + WebPUpsamplers[MODE_RGBA_4444] = UpsampleRgba4444LinePair; + WebPUpsamplers[MODE_rgbA_4444] = UpsampleRgba4444LinePair; +#endif // WEBP_REDUCE_CSP +} + +#endif // FANCY_UPSAMPLING + +#endif // WEBP_USE_MSA + +#if !(defined(FANCY_UPSAMPLING) && defined(WEBP_USE_MSA)) +WEBP_DSP_INIT_STUB(WebPInitUpsamplersMSA) +#endif diff --git a/vendor/github.com/sizeofint/webpanimation/dsp_upsampling_neon.c b/vendor/github.com/sizeofint/webpanimation/dsp_upsampling_neon.c new file mode 100644 index 00000000..632bde20 --- /dev/null +++ b/vendor/github.com/sizeofint/webpanimation/dsp_upsampling_neon.c @@ -0,0 +1,285 @@ +// Copyright 2011 Google Inc. All Rights Reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the COPYING file in the root of the source +// tree. An additional intellectual property rights grant can be found +// in the file PATENTS. All contributing project authors may +// be found in the AUTHORS file in the root of the source tree. +// ----------------------------------------------------------------------------- +// +// NEON version of YUV to RGB upsampling functions. +// +// Author: mans@mansr.com (Mans Rullgard) +// Based on SSE code by: somnath@google.com (Somnath Banerjee) + +#include "dsp_dsp.h" + +#if defined(WEBP_USE_NEON) + +#include +#include +#include +#include "dsp_neon.h" +#include "dsp_yuv.h" + +#ifdef FANCY_UPSAMPLING + +//----------------------------------------------------------------------------- +// U/V upsampling + +// Loads 9 pixels each from rows r1 and r2 and generates 16 pixels. +#define UPSAMPLE_16PIXELS(r1, r2, out) do { \ + const uint8x8_t a = vld1_u8(r1 + 0); \ + const uint8x8_t b = vld1_u8(r1 + 1); \ + const uint8x8_t c = vld1_u8(r2 + 0); \ + const uint8x8_t d = vld1_u8(r2 + 1); \ + /* a + b + c + d */ \ + const uint16x8_t ad = vaddl_u8(a, d); \ + const uint16x8_t bc = vaddl_u8(b, c); \ + const uint16x8_t abcd = vaddq_u16(ad, bc); \ + /* 3a + b + c + 3d */ \ + const uint16x8_t al = vaddq_u16(abcd, vshlq_n_u16(ad, 1)); \ + /* a + 3b + 3c + d */ \ + const uint16x8_t bl = vaddq_u16(abcd, vshlq_n_u16(bc, 1)); \ + \ + const uint8x8_t diag2 = vshrn_n_u16(al, 3); \ + const uint8x8_t diag1 = vshrn_n_u16(bl, 3); \ + \ + const uint8x8_t A = vrhadd_u8(a, diag1); \ + const uint8x8_t B = vrhadd_u8(b, diag2); \ + const uint8x8_t C = vrhadd_u8(c, diag2); \ + const uint8x8_t D = vrhadd_u8(d, diag1); \ + \ + uint8x8x2_t A_B, C_D; \ + INIT_VECTOR2(A_B, A, B); \ + INIT_VECTOR2(C_D, C, D); \ + vst2_u8(out + 0, A_B); \ + vst2_u8(out + 32, C_D); \ +} while (0) + +// Turn the macro into a function for reducing code-size when non-critical +static void Upsample16Pixels_NEON(const uint8_t* r1, const uint8_t* r2, + uint8_t* out) { + UPSAMPLE_16PIXELS(r1, r2, out); +} + +#define UPSAMPLE_LAST_BLOCK(tb, bb, num_pixels, out) { \ + uint8_t r1[9], r2[9]; \ + memcpy(r1, (tb), (num_pixels)); \ + memcpy(r2, (bb), (num_pixels)); \ + /* replicate last byte */ \ + memset(r1 + (num_pixels), r1[(num_pixels) - 1], 9 - (num_pixels)); \ + memset(r2 + (num_pixels), r2[(num_pixels) - 1], 9 - (num_pixels)); \ + Upsample16Pixels_NEON(r1, r2, out); \ +} + +//----------------------------------------------------------------------------- +// YUV->RGB conversion + +// note: we represent the 33050 large constant as 32768 + 282 +static const int16_t kCoeffs1[4] = { 19077, 26149, 6419, 13320 }; + +#define v255 vdup_n_u8(255) + +#define STORE_Rgb(out, r, g, b) do { \ + uint8x8x3_t r_g_b; \ + INIT_VECTOR3(r_g_b, r, g, b); \ + vst3_u8(out, r_g_b); \ +} while (0) + +#define STORE_Bgr(out, r, g, b) do { \ + uint8x8x3_t b_g_r; \ + INIT_VECTOR3(b_g_r, b, g, r); \ + vst3_u8(out, b_g_r); \ +} while (0) + +#define STORE_Rgba(out, r, g, b) do { \ + uint8x8x4_t r_g_b_v255; \ + INIT_VECTOR4(r_g_b_v255, r, g, b, v255); \ + vst4_u8(out, r_g_b_v255); \ +} while (0) + +#define STORE_Bgra(out, r, g, b) do { \ + uint8x8x4_t b_g_r_v255; \ + INIT_VECTOR4(b_g_r_v255, b, g, r, v255); \ + vst4_u8(out, b_g_r_v255); \ +} while (0) + +#define STORE_Argb(out, r, g, b) do { \ + uint8x8x4_t v255_r_g_b; \ + INIT_VECTOR4(v255_r_g_b, v255, r, g, b); \ + vst4_u8(out, v255_r_g_b); \ +} while (0) + +#if !defined(WEBP_SWAP_16BIT_CSP) +#define ZIP_U8(lo, hi) vzip_u8((lo), (hi)) +#else +#define ZIP_U8(lo, hi) vzip_u8((hi), (lo)) +#endif + +#define STORE_Rgba4444(out, r, g, b) do { \ + const uint8x8_t rg = vsri_n_u8(r, g, 4); /* shift g, insert r */ \ + const uint8x8_t ba = vsri_n_u8(b, v255, 4); /* shift a, insert b */ \ + const uint8x8x2_t rgba4444 = ZIP_U8(rg, ba); \ + vst1q_u8(out, vcombine_u8(rgba4444.val[0], rgba4444.val[1])); \ +} while (0) + +#define STORE_Rgb565(out, r, g, b) do { \ + const uint8x8_t rg = vsri_n_u8(r, g, 5); /* shift g and insert r */ \ + const uint8x8_t g1 = vshl_n_u8(g, 3); /* pre-shift g: 3bits */ \ + const uint8x8_t gb = vsri_n_u8(g1, b, 3); /* shift b and insert g */ \ + const uint8x8x2_t rgb565 = ZIP_U8(rg, gb); \ + vst1q_u8(out, vcombine_u8(rgb565.val[0], rgb565.val[1])); \ +} while (0) + +#define CONVERT8(FMT, XSTEP, N, src_y, src_uv, out, cur_x) do { \ + int i; \ + for (i = 0; i < N; i += 8) { \ + const int off = ((cur_x) + i) * XSTEP; \ + const uint8x8_t y = vld1_u8((src_y) + (cur_x) + i); \ + const uint8x8_t u = vld1_u8((src_uv) + i + 0); \ + const uint8x8_t v = vld1_u8((src_uv) + i + 16); \ + const int16x8_t Y0 = vreinterpretq_s16_u16(vshll_n_u8(y, 7)); \ + const int16x8_t U0 = vreinterpretq_s16_u16(vshll_n_u8(u, 7)); \ + const int16x8_t V0 = vreinterpretq_s16_u16(vshll_n_u8(v, 7)); \ + const int16x8_t Y1 = vqdmulhq_lane_s16(Y0, coeff1, 0); \ + const int16x8_t R0 = vqdmulhq_lane_s16(V0, coeff1, 1); \ + const int16x8_t G0 = vqdmulhq_lane_s16(U0, coeff1, 2); \ + const int16x8_t G1 = vqdmulhq_lane_s16(V0, coeff1, 3); \ + const int16x8_t B0 = vqdmulhq_n_s16(U0, 282); \ + const int16x8_t R1 = vqaddq_s16(Y1, R_Rounder); \ + const int16x8_t G2 = vqaddq_s16(Y1, G_Rounder); \ + const int16x8_t B1 = vqaddq_s16(Y1, B_Rounder); \ + const int16x8_t R2 = vqaddq_s16(R0, R1); \ + const int16x8_t G3 = vqaddq_s16(G0, G1); \ + const int16x8_t B2 = vqaddq_s16(B0, B1); \ + const int16x8_t G4 = vqsubq_s16(G2, G3); \ + const int16x8_t B3 = vqaddq_s16(B2, U0); \ + const uint8x8_t R = vqshrun_n_s16(R2, YUV_FIX2); \ + const uint8x8_t G = vqshrun_n_s16(G4, YUV_FIX2); \ + const uint8x8_t B = vqshrun_n_s16(B3, YUV_FIX2); \ + STORE_ ## FMT(out + off, R, G, B); \ + } \ +} while (0) + +#define CONVERT1(FUNC, XSTEP, N, src_y, src_uv, rgb, cur_x) { \ + int i; \ + for (i = 0; i < N; i++) { \ + const int off = ((cur_x) + i) * XSTEP; \ + const int y = src_y[(cur_x) + i]; \ + const int u = (src_uv)[i]; \ + const int v = (src_uv)[i + 16]; \ + FUNC(y, u, v, rgb + off); \ + } \ +} + +#define CONVERT2RGB_8(FMT, XSTEP, top_y, bottom_y, uv, \ + top_dst, bottom_dst, cur_x, len) { \ + CONVERT8(FMT, XSTEP, len, top_y, uv, top_dst, cur_x); \ + if (bottom_y != NULL) { \ + CONVERT8(FMT, XSTEP, len, bottom_y, (uv) + 32, bottom_dst, cur_x); \ + } \ +} + +#define CONVERT2RGB_1(FUNC, XSTEP, top_y, bottom_y, uv, \ + top_dst, bottom_dst, cur_x, len) { \ + CONVERT1(FUNC, XSTEP, len, top_y, uv, top_dst, cur_x); \ + if (bottom_y != NULL) { \ + CONVERT1(FUNC, XSTEP, len, bottom_y, (uv) + 32, bottom_dst, cur_x); \ + } \ +} + +#define NEON_UPSAMPLE_FUNC(FUNC_NAME, FMT, XSTEP) \ +static void FUNC_NAME(const uint8_t* top_y, const uint8_t* bottom_y, \ + const uint8_t* top_u, const uint8_t* top_v, \ + const uint8_t* cur_u, const uint8_t* cur_v, \ + uint8_t* top_dst, uint8_t* bottom_dst, int len) { \ + int block; \ + /* 16 byte aligned array to cache reconstructed u and v */ \ + uint8_t uv_buf[2 * 32 + 15]; \ + uint8_t* const r_uv = (uint8_t*)((uintptr_t)(uv_buf + 15) & ~15); \ + const int uv_len = (len + 1) >> 1; \ + /* 9 pixels must be read-able for each block */ \ + const int num_blocks = (uv_len - 1) >> 3; \ + const int leftover = uv_len - num_blocks * 8; \ + const int last_pos = 1 + 16 * num_blocks; \ + \ + const int u_diag = ((top_u[0] + cur_u[0]) >> 1) + 1; \ + const int v_diag = ((top_v[0] + cur_v[0]) >> 1) + 1; \ + \ + const int16x4_t coeff1 = vld1_s16(kCoeffs1); \ + const int16x8_t R_Rounder = vdupq_n_s16(-14234); \ + const int16x8_t G_Rounder = vdupq_n_s16(8708); \ + const int16x8_t B_Rounder = vdupq_n_s16(-17685); \ + \ + /* Treat the first pixel in regular way */ \ + assert(top_y != NULL); \ + { \ + const int u0 = (top_u[0] + u_diag) >> 1; \ + const int v0 = (top_v[0] + v_diag) >> 1; \ + VP8YuvTo ## FMT(top_y[0], u0, v0, top_dst); \ + } \ + if (bottom_y != NULL) { \ + const int u0 = (cur_u[0] + u_diag) >> 1; \ + const int v0 = (cur_v[0] + v_diag) >> 1; \ + VP8YuvTo ## FMT(bottom_y[0], u0, v0, bottom_dst); \ + } \ + \ + for (block = 0; block < num_blocks; ++block) { \ + UPSAMPLE_16PIXELS(top_u, cur_u, r_uv); \ + UPSAMPLE_16PIXELS(top_v, cur_v, r_uv + 16); \ + CONVERT2RGB_8(FMT, XSTEP, top_y, bottom_y, r_uv, \ + top_dst, bottom_dst, 16 * block + 1, 16); \ + top_u += 8; \ + cur_u += 8; \ + top_v += 8; \ + cur_v += 8; \ + } \ + \ + UPSAMPLE_LAST_BLOCK(top_u, cur_u, leftover, r_uv); \ + UPSAMPLE_LAST_BLOCK(top_v, cur_v, leftover, r_uv + 16); \ + CONVERT2RGB_1(VP8YuvTo ## FMT, XSTEP, top_y, bottom_y, r_uv, \ + top_dst, bottom_dst, last_pos, len - last_pos); \ +} + +// NEON variants of the fancy upsampler. +NEON_UPSAMPLE_FUNC(UpsampleRgbaLinePair_NEON, Rgba, 4) +NEON_UPSAMPLE_FUNC(UpsampleBgraLinePair_NEON, Bgra, 4) +#if !defined(WEBP_REDUCE_CSP) +NEON_UPSAMPLE_FUNC(UpsampleRgbLinePair_NEON, Rgb, 3) +NEON_UPSAMPLE_FUNC(UpsampleBgrLinePair_NEON, Bgr, 3) +NEON_UPSAMPLE_FUNC(UpsampleArgbLinePair_NEON, Argb, 4) +NEON_UPSAMPLE_FUNC(UpsampleRgba4444LinePair_NEON, Rgba4444, 2) +NEON_UPSAMPLE_FUNC(UpsampleRgb565LinePair_NEON, Rgb565, 2) +#endif // WEBP_REDUCE_CSP + +//------------------------------------------------------------------------------ +// Entry point + +extern WebPUpsampleLinePairFunc WebPUpsamplers[/* MODE_LAST */]; + +extern void WebPInitUpsamplersNEON(void); + +WEBP_TSAN_IGNORE_FUNCTION void WebPInitUpsamplersNEON(void) { + WebPUpsamplers[MODE_RGBA] = UpsampleRgbaLinePair_NEON; + WebPUpsamplers[MODE_BGRA] = UpsampleBgraLinePair_NEON; + WebPUpsamplers[MODE_rgbA] = UpsampleRgbaLinePair_NEON; + WebPUpsamplers[MODE_bgrA] = UpsampleBgraLinePair_NEON; +#if !defined(WEBP_REDUCE_CSP) + WebPUpsamplers[MODE_RGB] = UpsampleRgbLinePair_NEON; + WebPUpsamplers[MODE_BGR] = UpsampleBgrLinePair_NEON; + WebPUpsamplers[MODE_ARGB] = UpsampleArgbLinePair_NEON; + WebPUpsamplers[MODE_Argb] = UpsampleArgbLinePair_NEON; + WebPUpsamplers[MODE_RGB_565] = UpsampleRgb565LinePair_NEON; + WebPUpsamplers[MODE_RGBA_4444] = UpsampleRgba4444LinePair_NEON; + WebPUpsamplers[MODE_rgbA_4444] = UpsampleRgba4444LinePair_NEON; +#endif // WEBP_REDUCE_CSP +} + +#endif // FANCY_UPSAMPLING + +#endif // WEBP_USE_NEON + +#if !(defined(FANCY_UPSAMPLING) && defined(WEBP_USE_NEON)) +WEBP_DSP_INIT_STUB(WebPInitUpsamplersNEON) +#endif diff --git a/vendor/github.com/sizeofint/webpanimation/dsp_upsampling_sse2.c b/vendor/github.com/sizeofint/webpanimation/dsp_upsampling_sse2.c new file mode 100644 index 00000000..e206633e --- /dev/null +++ b/vendor/github.com/sizeofint/webpanimation/dsp_upsampling_sse2.c @@ -0,0 +1,267 @@ +// Copyright 2011 Google Inc. All Rights Reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the COPYING file in the root of the source +// tree. An additional intellectual property rights grant can be found +// in the file PATENTS. All contributing project authors may +// be found in the AUTHORS file in the root of the source tree. +// ----------------------------------------------------------------------------- +// +// SSE2 version of YUV to RGB upsampling functions. +// +// Author: somnath@google.com (Somnath Banerjee) + +#include "dsp_dsp.h" + +#if defined(WEBP_USE_SSE2) + +#include +#include +#include +#include "dsp_yuv.h" + +#ifdef FANCY_UPSAMPLING + +// We compute (9*a + 3*b + 3*c + d + 8) / 16 as follows +// u = (9*a + 3*b + 3*c + d + 8) / 16 +// = (a + (a + 3*b + 3*c + d) / 8 + 1) / 2 +// = (a + m + 1) / 2 +// where m = (a + 3*b + 3*c + d) / 8 +// = ((a + b + c + d) / 2 + b + c) / 4 +// +// Let's say k = (a + b + c + d) / 4. +// We can compute k as +// k = (s + t + 1) / 2 - ((a^d) | (b^c) | (s^t)) & 1 +// where s = (a + d + 1) / 2 and t = (b + c + 1) / 2 +// +// Then m can be written as +// m = (k + t + 1) / 2 - (((b^c) & (s^t)) | (k^t)) & 1 + +// Computes out = (k + in + 1) / 2 - ((ij & (s^t)) | (k^in)) & 1 +#define GET_M(ij, in, out) do { \ + const __m128i tmp0 = _mm_avg_epu8(k, (in)); /* (k + in + 1) / 2 */ \ + const __m128i tmp1 = _mm_and_si128((ij), st); /* (ij) & (s^t) */ \ + const __m128i tmp2 = _mm_xor_si128(k, (in)); /* (k^in) */ \ + const __m128i tmp3 = _mm_or_si128(tmp1, tmp2); /* ((ij) & (s^t)) | (k^in) */\ + const __m128i tmp4 = _mm_and_si128(tmp3, one); /* & 1 -> lsb_correction */ \ + (out) = _mm_sub_epi8(tmp0, tmp4); /* (k + in + 1) / 2 - lsb_correction */ \ +} while (0) + +// pack and store two alternating pixel rows +#define PACK_AND_STORE(a, b, da, db, out) do { \ + const __m128i t_a = _mm_avg_epu8(a, da); /* (9a + 3b + 3c + d + 8) / 16 */ \ + const __m128i t_b = _mm_avg_epu8(b, db); /* (3a + 9b + c + 3d + 8) / 16 */ \ + const __m128i t_1 = _mm_unpacklo_epi8(t_a, t_b); \ + const __m128i t_2 = _mm_unpackhi_epi8(t_a, t_b); \ + _mm_store_si128(((__m128i*)(out)) + 0, t_1); \ + _mm_store_si128(((__m128i*)(out)) + 1, t_2); \ +} while (0) + +// Loads 17 pixels each from rows r1 and r2 and generates 32 pixels. +#define UPSAMPLE_32PIXELS(r1, r2, out) { \ + const __m128i one = _mm_set1_epi8(1); \ + const __m128i a = _mm_loadu_si128((const __m128i*)&(r1)[0]); \ + const __m128i b = _mm_loadu_si128((const __m128i*)&(r1)[1]); \ + const __m128i c = _mm_loadu_si128((const __m128i*)&(r2)[0]); \ + const __m128i d = _mm_loadu_si128((const __m128i*)&(r2)[1]); \ + \ + const __m128i s = _mm_avg_epu8(a, d); /* s = (a + d + 1) / 2 */ \ + const __m128i t = _mm_avg_epu8(b, c); /* t = (b + c + 1) / 2 */ \ + const __m128i st = _mm_xor_si128(s, t); /* st = s^t */ \ + \ + const __m128i ad = _mm_xor_si128(a, d); /* ad = a^d */ \ + const __m128i bc = _mm_xor_si128(b, c); /* bc = b^c */ \ + \ + const __m128i t1 = _mm_or_si128(ad, bc); /* (a^d) | (b^c) */ \ + const __m128i t2 = _mm_or_si128(t1, st); /* (a^d) | (b^c) | (s^t) */ \ + const __m128i t3 = _mm_and_si128(t2, one); /* (a^d) | (b^c) | (s^t) & 1 */ \ + const __m128i t4 = _mm_avg_epu8(s, t); \ + const __m128i k = _mm_sub_epi8(t4, t3); /* k = (a + b + c + d) / 4 */ \ + __m128i diag1, diag2; \ + \ + GET_M(bc, t, diag1); /* diag1 = (a + 3b + 3c + d) / 8 */ \ + GET_M(ad, s, diag2); /* diag2 = (3a + b + c + 3d) / 8 */ \ + \ + /* pack the alternate pixels */ \ + PACK_AND_STORE(a, b, diag1, diag2, (out) + 0); /* store top */ \ + PACK_AND_STORE(c, d, diag2, diag1, (out) + 2 * 32); /* store bottom */ \ +} + +// Turn the macro into a function for reducing code-size when non-critical +static void Upsample32Pixels_SSE2(const uint8_t r1[], const uint8_t r2[], + uint8_t* const out) { + UPSAMPLE_32PIXELS(r1, r2, out); +} + +#define UPSAMPLE_LAST_BLOCK(tb, bb, num_pixels, out) { \ + uint8_t r1[17], r2[17]; \ + memcpy(r1, (tb), (num_pixels)); \ + memcpy(r2, (bb), (num_pixels)); \ + /* replicate last byte */ \ + memset(r1 + (num_pixels), r1[(num_pixels) - 1], 17 - (num_pixels)); \ + memset(r2 + (num_pixels), r2[(num_pixels) - 1], 17 - (num_pixels)); \ + /* using the shared function instead of the macro saves ~3k code size */ \ + Upsample32Pixels_SSE2(r1, r2, out); \ +} + +#define CONVERT2RGB_32(FUNC, XSTEP, top_y, bottom_y, \ + top_dst, bottom_dst, cur_x) do { \ + FUNC##32_SSE2((top_y) + (cur_x), r_u, r_v, (top_dst) + (cur_x) * (XSTEP)); \ + if ((bottom_y) != NULL) { \ + FUNC##32_SSE2((bottom_y) + (cur_x), r_u + 64, r_v + 64, \ + (bottom_dst) + (cur_x) * (XSTEP)); \ + } \ +} while (0) + +#define SSE2_UPSAMPLE_FUNC(FUNC_NAME, FUNC, XSTEP) \ +static void FUNC_NAME(const uint8_t* top_y, const uint8_t* bottom_y, \ + const uint8_t* top_u, const uint8_t* top_v, \ + const uint8_t* cur_u, const uint8_t* cur_v, \ + uint8_t* top_dst, uint8_t* bottom_dst, int len) { \ + int uv_pos, pos; \ + /* 16byte-aligned array to cache reconstructed u and v */ \ + uint8_t uv_buf[14 * 32 + 15] = { 0 }; \ + uint8_t* const r_u = (uint8_t*)((uintptr_t)(uv_buf + 15) & ~15); \ + uint8_t* const r_v = r_u + 32; \ + \ + assert(top_y != NULL); \ + { /* Treat the first pixel in regular way */ \ + const int u_diag = ((top_u[0] + cur_u[0]) >> 1) + 1; \ + const int v_diag = ((top_v[0] + cur_v[0]) >> 1) + 1; \ + const int u0_t = (top_u[0] + u_diag) >> 1; \ + const int v0_t = (top_v[0] + v_diag) >> 1; \ + FUNC(top_y[0], u0_t, v0_t, top_dst); \ + if (bottom_y != NULL) { \ + const int u0_b = (cur_u[0] + u_diag) >> 1; \ + const int v0_b = (cur_v[0] + v_diag) >> 1; \ + FUNC(bottom_y[0], u0_b, v0_b, bottom_dst); \ + } \ + } \ + /* For UPSAMPLE_32PIXELS, 17 u/v values must be read-able for each block */ \ + for (pos = 1, uv_pos = 0; pos + 32 + 1 <= len; pos += 32, uv_pos += 16) { \ + UPSAMPLE_32PIXELS(top_u + uv_pos, cur_u + uv_pos, r_u); \ + UPSAMPLE_32PIXELS(top_v + uv_pos, cur_v + uv_pos, r_v); \ + CONVERT2RGB_32(FUNC, XSTEP, top_y, bottom_y, top_dst, bottom_dst, pos); \ + } \ + if (len > 1) { \ + const int left_over = ((len + 1) >> 1) - (pos >> 1); \ + uint8_t* const tmp_top_dst = r_u + 4 * 32; \ + uint8_t* const tmp_bottom_dst = tmp_top_dst + 4 * 32; \ + uint8_t* const tmp_top = tmp_bottom_dst + 4 * 32; \ + uint8_t* const tmp_bottom = (bottom_y == NULL) ? NULL : tmp_top + 32; \ + assert(left_over > 0); \ + UPSAMPLE_LAST_BLOCK(top_u + uv_pos, cur_u + uv_pos, left_over, r_u); \ + UPSAMPLE_LAST_BLOCK(top_v + uv_pos, cur_v + uv_pos, left_over, r_v); \ + memcpy(tmp_top, top_y + pos, len - pos); \ + if (bottom_y != NULL) memcpy(tmp_bottom, bottom_y + pos, len - pos); \ + CONVERT2RGB_32(FUNC, XSTEP, tmp_top, tmp_bottom, tmp_top_dst, \ + tmp_bottom_dst, 0); \ + memcpy(top_dst + pos * (XSTEP), tmp_top_dst, (len - pos) * (XSTEP)); \ + if (bottom_y != NULL) { \ + memcpy(bottom_dst + pos * (XSTEP), tmp_bottom_dst, \ + (len - pos) * (XSTEP)); \ + } \ + } \ +} + +// SSE2 variants of the fancy upsampler. +SSE2_UPSAMPLE_FUNC(UpsampleRgbaLinePair_SSE2, VP8YuvToRgba, 4) +SSE2_UPSAMPLE_FUNC(UpsampleBgraLinePair_SSE2, VP8YuvToBgra, 4) + +#if !defined(WEBP_REDUCE_CSP) +SSE2_UPSAMPLE_FUNC(UpsampleRgbLinePair_SSE2, VP8YuvToRgb, 3) +SSE2_UPSAMPLE_FUNC(UpsampleBgrLinePair_SSE2, VP8YuvToBgr, 3) +SSE2_UPSAMPLE_FUNC(UpsampleArgbLinePair_SSE2, VP8YuvToArgb, 4) +SSE2_UPSAMPLE_FUNC(UpsampleRgba4444LinePair_SSE2, VP8YuvToRgba4444, 2) +SSE2_UPSAMPLE_FUNC(UpsampleRgb565LinePair_SSE2, VP8YuvToRgb565, 2) +#endif // WEBP_REDUCE_CSP + +#undef GET_M +#undef PACK_AND_STORE +#undef UPSAMPLE_32PIXELS +#undef UPSAMPLE_LAST_BLOCK +#undef CONVERT2RGB +#undef CONVERT2RGB_32 +#undef SSE2_UPSAMPLE_FUNC + +//------------------------------------------------------------------------------ +// Entry point + +extern WebPUpsampleLinePairFunc WebPUpsamplers[/* MODE_LAST */]; + +extern void WebPInitUpsamplersSSE2(void); + +WEBP_TSAN_IGNORE_FUNCTION void WebPInitUpsamplersSSE2(void) { + WebPUpsamplers[MODE_RGBA] = UpsampleRgbaLinePair_SSE2; + WebPUpsamplers[MODE_BGRA] = UpsampleBgraLinePair_SSE2; + WebPUpsamplers[MODE_rgbA] = UpsampleRgbaLinePair_SSE2; + WebPUpsamplers[MODE_bgrA] = UpsampleBgraLinePair_SSE2; +#if !defined(WEBP_REDUCE_CSP) + WebPUpsamplers[MODE_RGB] = UpsampleRgbLinePair_SSE2; + WebPUpsamplers[MODE_BGR] = UpsampleBgrLinePair_SSE2; + WebPUpsamplers[MODE_ARGB] = UpsampleArgbLinePair_SSE2; + WebPUpsamplers[MODE_Argb] = UpsampleArgbLinePair_SSE2; + WebPUpsamplers[MODE_RGB_565] = UpsampleRgb565LinePair_SSE2; + WebPUpsamplers[MODE_RGBA_4444] = UpsampleRgba4444LinePair_SSE2; + WebPUpsamplers[MODE_rgbA_4444] = UpsampleRgba4444LinePair_SSE2; +#endif // WEBP_REDUCE_CSP +} + +#endif // FANCY_UPSAMPLING + +//------------------------------------------------------------------------------ + +extern WebPYUV444Converter WebPYUV444Converters[/* MODE_LAST */]; +extern void WebPInitYUV444ConvertersSSE2(void); + +#define YUV444_FUNC(FUNC_NAME, CALL, CALL_C, XSTEP) \ +extern void CALL_C(const uint8_t* y, const uint8_t* u, const uint8_t* v, \ + uint8_t* dst, int len); \ +static void FUNC_NAME(const uint8_t* y, const uint8_t* u, const uint8_t* v, \ + uint8_t* dst, int len) { \ + int i; \ + const int max_len = len & ~31; \ + for (i = 0; i < max_len; i += 32) { \ + CALL(y + i, u + i, v + i, dst + i * (XSTEP)); \ + } \ + if (i < len) { /* C-fallback */ \ + CALL_C(y + i, u + i, v + i, dst + i * (XSTEP), len - i); \ + } \ +} + +YUV444_FUNC(Yuv444ToRgba_SSE2, VP8YuvToRgba32_SSE2, WebPYuv444ToRgba_C, 4); +YUV444_FUNC(Yuv444ToBgra_SSE2, VP8YuvToBgra32_SSE2, WebPYuv444ToBgra_C, 4); +#if !defined(WEBP_REDUCE_CSP) +YUV444_FUNC(Yuv444ToRgb_SSE2, VP8YuvToRgb32_SSE2, WebPYuv444ToRgb_C, 3); +YUV444_FUNC(Yuv444ToBgr_SSE2, VP8YuvToBgr32_SSE2, WebPYuv444ToBgr_C, 3); +YUV444_FUNC(Yuv444ToArgb_SSE2, VP8YuvToArgb32_SSE2, WebPYuv444ToArgb_C, 4) +YUV444_FUNC(Yuv444ToRgba4444_SSE2, VP8YuvToRgba444432_SSE2, \ + WebPYuv444ToRgba4444_C, 2) +YUV444_FUNC(Yuv444ToRgb565_SSE2, VP8YuvToRgb56532_SSE2, WebPYuv444ToRgb565_C, 2) +#endif // WEBP_REDUCE_CSP + +WEBP_TSAN_IGNORE_FUNCTION void WebPInitYUV444ConvertersSSE2(void) { + WebPYUV444Converters[MODE_RGBA] = Yuv444ToRgba_SSE2; + WebPYUV444Converters[MODE_BGRA] = Yuv444ToBgra_SSE2; + WebPYUV444Converters[MODE_rgbA] = Yuv444ToRgba_SSE2; + WebPYUV444Converters[MODE_bgrA] = Yuv444ToBgra_SSE2; +#if !defined(WEBP_REDUCE_CSP) + WebPYUV444Converters[MODE_RGB] = Yuv444ToRgb_SSE2; + WebPYUV444Converters[MODE_BGR] = Yuv444ToBgr_SSE2; + WebPYUV444Converters[MODE_ARGB] = Yuv444ToArgb_SSE2; + WebPYUV444Converters[MODE_RGBA_4444] = Yuv444ToRgba4444_SSE2; + WebPYUV444Converters[MODE_RGB_565] = Yuv444ToRgb565_SSE2; + WebPYUV444Converters[MODE_Argb] = Yuv444ToArgb_SSE2; + WebPYUV444Converters[MODE_rgbA_4444] = Yuv444ToRgba4444_SSE2; +#endif // WEBP_REDUCE_CSP +} + +#else + +WEBP_DSP_INIT_STUB(WebPInitYUV444ConvertersSSE2) + +#endif // WEBP_USE_SSE2 + +#if !(defined(FANCY_UPSAMPLING) && defined(WEBP_USE_SSE2)) +WEBP_DSP_INIT_STUB(WebPInitUpsamplersSSE2) +#endif diff --git a/vendor/github.com/sizeofint/webpanimation/dsp_upsampling_sse41.c b/vendor/github.com/sizeofint/webpanimation/dsp_upsampling_sse41.c new file mode 100644 index 00000000..0abfed3c --- /dev/null +++ b/vendor/github.com/sizeofint/webpanimation/dsp_upsampling_sse41.c @@ -0,0 +1,239 @@ +// Copyright 2011 Google Inc. All Rights Reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the COPYING file in the root of the source +// tree. An additional intellectual property rights grant can be found +// in the file PATENTS. All contributing project authors may +// be found in the AUTHORS file in the root of the source tree. +// ----------------------------------------------------------------------------- +// +// SSE41 version of YUV to RGB upsampling functions. +// +// Author: somnath@google.com (Somnath Banerjee) + +#include "dsp_dsp.h" + +#if defined(WEBP_USE_SSE41) + +#include +#include +#include +#include "dsp_yuv.h" + +#ifdef FANCY_UPSAMPLING + +#if !defined(WEBP_REDUCE_CSP) + +// We compute (9*a + 3*b + 3*c + d + 8) / 16 as follows +// u = (9*a + 3*b + 3*c + d + 8) / 16 +// = (a + (a + 3*b + 3*c + d) / 8 + 1) / 2 +// = (a + m + 1) / 2 +// where m = (a + 3*b + 3*c + d) / 8 +// = ((a + b + c + d) / 2 + b + c) / 4 +// +// Let's say k = (a + b + c + d) / 4. +// We can compute k as +// k = (s + t + 1) / 2 - ((a^d) | (b^c) | (s^t)) & 1 +// where s = (a + d + 1) / 2 and t = (b + c + 1) / 2 +// +// Then m can be written as +// m = (k + t + 1) / 2 - (((b^c) & (s^t)) | (k^t)) & 1 + +// Computes out = (k + in + 1) / 2 - ((ij & (s^t)) | (k^in)) & 1 +#define GET_M(ij, in, out) do { \ + const __m128i tmp0 = _mm_avg_epu8(k, (in)); /* (k + in + 1) / 2 */ \ + const __m128i tmp1 = _mm_and_si128((ij), st); /* (ij) & (s^t) */ \ + const __m128i tmp2 = _mm_xor_si128(k, (in)); /* (k^in) */ \ + const __m128i tmp3 = _mm_or_si128(tmp1, tmp2); /* ((ij) & (s^t)) | (k^in) */\ + const __m128i tmp4 = _mm_and_si128(tmp3, one); /* & 1 -> lsb_correction */ \ + (out) = _mm_sub_epi8(tmp0, tmp4); /* (k + in + 1) / 2 - lsb_correction */ \ +} while (0) + +// pack and store two alternating pixel rows +#define PACK_AND_STORE(a, b, da, db, out) do { \ + const __m128i t_a = _mm_avg_epu8(a, da); /* (9a + 3b + 3c + d + 8) / 16 */ \ + const __m128i t_b = _mm_avg_epu8(b, db); /* (3a + 9b + c + 3d + 8) / 16 */ \ + const __m128i t_1 = _mm_unpacklo_epi8(t_a, t_b); \ + const __m128i t_2 = _mm_unpackhi_epi8(t_a, t_b); \ + _mm_store_si128(((__m128i*)(out)) + 0, t_1); \ + _mm_store_si128(((__m128i*)(out)) + 1, t_2); \ +} while (0) + +// Loads 17 pixels each from rows r1 and r2 and generates 32 pixels. +#define UPSAMPLE_32PIXELS(r1, r2, out) { \ + const __m128i one = _mm_set1_epi8(1); \ + const __m128i a = _mm_loadu_si128((const __m128i*)&(r1)[0]); \ + const __m128i b = _mm_loadu_si128((const __m128i*)&(r1)[1]); \ + const __m128i c = _mm_loadu_si128((const __m128i*)&(r2)[0]); \ + const __m128i d = _mm_loadu_si128((const __m128i*)&(r2)[1]); \ + \ + const __m128i s = _mm_avg_epu8(a, d); /* s = (a + d + 1) / 2 */ \ + const __m128i t = _mm_avg_epu8(b, c); /* t = (b + c + 1) / 2 */ \ + const __m128i st = _mm_xor_si128(s, t); /* st = s^t */ \ + \ + const __m128i ad = _mm_xor_si128(a, d); /* ad = a^d */ \ + const __m128i bc = _mm_xor_si128(b, c); /* bc = b^c */ \ + \ + const __m128i t1 = _mm_or_si128(ad, bc); /* (a^d) | (b^c) */ \ + const __m128i t2 = _mm_or_si128(t1, st); /* (a^d) | (b^c) | (s^t) */ \ + const __m128i t3 = _mm_and_si128(t2, one); /* (a^d) | (b^c) | (s^t) & 1 */ \ + const __m128i t4 = _mm_avg_epu8(s, t); \ + const __m128i k = _mm_sub_epi8(t4, t3); /* k = (a + b + c + d) / 4 */ \ + __m128i diag1, diag2; \ + \ + GET_M(bc, t, diag1); /* diag1 = (a + 3b + 3c + d) / 8 */ \ + GET_M(ad, s, diag2); /* diag2 = (3a + b + c + 3d) / 8 */ \ + \ + /* pack the alternate pixels */ \ + PACK_AND_STORE(a, b, diag1, diag2, (out) + 0); /* store top */ \ + PACK_AND_STORE(c, d, diag2, diag1, (out) + 2 * 32); /* store bottom */ \ +} + +// Turn the macro into a function for reducing code-size when non-critical +static void Upsample32Pixels_SSE41(const uint8_t r1[], const uint8_t r2[], + uint8_t* const out) { + UPSAMPLE_32PIXELS(r1, r2, out); +} + +#define UPSAMPLE_LAST_BLOCK(tb, bb, num_pixels, out) { \ + uint8_t r1[17], r2[17]; \ + memcpy(r1, (tb), (num_pixels)); \ + memcpy(r2, (bb), (num_pixels)); \ + /* replicate last byte */ \ + memset(r1 + (num_pixels), r1[(num_pixels) - 1], 17 - (num_pixels)); \ + memset(r2 + (num_pixels), r2[(num_pixels) - 1], 17 - (num_pixels)); \ + /* using the shared function instead of the macro saves ~3k code size */ \ + Upsample32Pixels_SSE41(r1, r2, out); \ +} + +#define CONVERT2RGB_32(FUNC, XSTEP, top_y, bottom_y, \ + top_dst, bottom_dst, cur_x) do { \ + FUNC##32_SSE41((top_y) + (cur_x), r_u, r_v, (top_dst) + (cur_x) * (XSTEP)); \ + if ((bottom_y) != NULL) { \ + FUNC##32_SSE41((bottom_y) + (cur_x), r_u + 64, r_v + 64, \ + (bottom_dst) + (cur_x) * (XSTEP)); \ + } \ +} while (0) + +#define SSE4_UPSAMPLE_FUNC(FUNC_NAME, FUNC, XSTEP) \ +static void FUNC_NAME(const uint8_t* top_y, const uint8_t* bottom_y, \ + const uint8_t* top_u, const uint8_t* top_v, \ + const uint8_t* cur_u, const uint8_t* cur_v, \ + uint8_t* top_dst, uint8_t* bottom_dst, int len) { \ + int uv_pos, pos; \ + /* 16byte-aligned array to cache reconstructed u and v */ \ + uint8_t uv_buf[14 * 32 + 15] = { 0 }; \ + uint8_t* const r_u = (uint8_t*)((uintptr_t)(uv_buf + 15) & ~15); \ + uint8_t* const r_v = r_u + 32; \ + \ + assert(top_y != NULL); \ + { /* Treat the first pixel in regular way */ \ + const int u_diag = ((top_u[0] + cur_u[0]) >> 1) + 1; \ + const int v_diag = ((top_v[0] + cur_v[0]) >> 1) + 1; \ + const int u0_t = (top_u[0] + u_diag) >> 1; \ + const int v0_t = (top_v[0] + v_diag) >> 1; \ + FUNC(top_y[0], u0_t, v0_t, top_dst); \ + if (bottom_y != NULL) { \ + const int u0_b = (cur_u[0] + u_diag) >> 1; \ + const int v0_b = (cur_v[0] + v_diag) >> 1; \ + FUNC(bottom_y[0], u0_b, v0_b, bottom_dst); \ + } \ + } \ + /* For UPSAMPLE_32PIXELS, 17 u/v values must be read-able for each block */ \ + for (pos = 1, uv_pos = 0; pos + 32 + 1 <= len; pos += 32, uv_pos += 16) { \ + UPSAMPLE_32PIXELS(top_u + uv_pos, cur_u + uv_pos, r_u); \ + UPSAMPLE_32PIXELS(top_v + uv_pos, cur_v + uv_pos, r_v); \ + CONVERT2RGB_32(FUNC, XSTEP, top_y, bottom_y, top_dst, bottom_dst, pos); \ + } \ + if (len > 1) { \ + const int left_over = ((len + 1) >> 1) - (pos >> 1); \ + uint8_t* const tmp_top_dst = r_u + 4 * 32; \ + uint8_t* const tmp_bottom_dst = tmp_top_dst + 4 * 32; \ + uint8_t* const tmp_top = tmp_bottom_dst + 4 * 32; \ + uint8_t* const tmp_bottom = (bottom_y == NULL) ? NULL : tmp_top + 32; \ + assert(left_over > 0); \ + UPSAMPLE_LAST_BLOCK(top_u + uv_pos, cur_u + uv_pos, left_over, r_u); \ + UPSAMPLE_LAST_BLOCK(top_v + uv_pos, cur_v + uv_pos, left_over, r_v); \ + memcpy(tmp_top, top_y + pos, len - pos); \ + if (bottom_y != NULL) memcpy(tmp_bottom, bottom_y + pos, len - pos); \ + CONVERT2RGB_32(FUNC, XSTEP, tmp_top, tmp_bottom, tmp_top_dst, \ + tmp_bottom_dst, 0); \ + memcpy(top_dst + pos * (XSTEP), tmp_top_dst, (len - pos) * (XSTEP)); \ + if (bottom_y != NULL) { \ + memcpy(bottom_dst + pos * (XSTEP), tmp_bottom_dst, \ + (len - pos) * (XSTEP)); \ + } \ + } \ +} + +// SSE4 variants of the fancy upsampler. +SSE4_UPSAMPLE_FUNC(UpsampleRgbLinePair_SSE41, VP8YuvToRgb, 3) +SSE4_UPSAMPLE_FUNC(UpsampleBgrLinePair_SSE41, VP8YuvToBgr, 3) + +#undef GET_M +#undef PACK_AND_STORE +#undef UPSAMPLE_32PIXELS +#undef UPSAMPLE_LAST_BLOCK +#undef CONVERT2RGB +#undef CONVERT2RGB_32 +#undef SSE4_UPSAMPLE_FUNC + +#endif // WEBP_REDUCE_CSP + +//------------------------------------------------------------------------------ +// Entry point + +extern WebPUpsampleLinePairFunc WebPUpsamplers[/* MODE_LAST */]; + +extern void WebPInitUpsamplersSSE41(void); + +WEBP_TSAN_IGNORE_FUNCTION void WebPInitUpsamplersSSE41(void) { +#if !defined(WEBP_REDUCE_CSP) + WebPUpsamplers[MODE_RGB] = UpsampleRgbLinePair_SSE41; + WebPUpsamplers[MODE_BGR] = UpsampleBgrLinePair_SSE41; +#endif // WEBP_REDUCE_CSP +} + +#endif // FANCY_UPSAMPLING + +//------------------------------------------------------------------------------ + +extern WebPYUV444Converter WebPYUV444Converters[/* MODE_LAST */]; +extern void WebPInitYUV444ConvertersSSE41(void); + +#define YUV444_FUNC(FUNC_NAME, CALL, CALL_C, XSTEP) \ +extern void CALL_C(const uint8_t* y, const uint8_t* u, const uint8_t* v, \ + uint8_t* dst, int len); \ +static void FUNC_NAME(const uint8_t* y, const uint8_t* u, const uint8_t* v, \ + uint8_t* dst, int len) { \ + int i; \ + const int max_len = len & ~31; \ + for (i = 0; i < max_len; i += 32) { \ + CALL(y + i, u + i, v + i, dst + i * (XSTEP)); \ + } \ + if (i < len) { /* C-fallback */ \ + CALL_C(y + i, u + i, v + i, dst + i * (XSTEP), len - i); \ + } \ +} + +#if !defined(WEBP_REDUCE_CSP) +YUV444_FUNC(Yuv444ToRgb_SSE41, VP8YuvToRgb32_SSE41, WebPYuv444ToRgb_C, 3); +YUV444_FUNC(Yuv444ToBgr_SSE41, VP8YuvToBgr32_SSE41, WebPYuv444ToBgr_C, 3); +#endif // WEBP_REDUCE_CSP + +WEBP_TSAN_IGNORE_FUNCTION void WebPInitYUV444ConvertersSSE41(void) { +#if !defined(WEBP_REDUCE_CSP) + WebPYUV444Converters[MODE_RGB] = Yuv444ToRgb_SSE41; + WebPYUV444Converters[MODE_BGR] = Yuv444ToBgr_SSE41; +#endif // WEBP_REDUCE_CSP +} + +#else + +WEBP_DSP_INIT_STUB(WebPInitYUV444ConvertersSSE41) + +#endif // WEBP_USE_SSE41 + +#if !(defined(FANCY_UPSAMPLING) && defined(WEBP_USE_SSE41)) +WEBP_DSP_INIT_STUB(WebPInitUpsamplersSSE41) +#endif diff --git a/vendor/github.com/sizeofint/webpanimation/dsp_yuv.c b/vendor/github.com/sizeofint/webpanimation/dsp_yuv.c new file mode 100644 index 00000000..395dff56 --- /dev/null +++ b/vendor/github.com/sizeofint/webpanimation/dsp_yuv.c @@ -0,0 +1,308 @@ +// Copyright 2010 Google Inc. All Rights Reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the COPYING file in the root of the source +// tree. An additional intellectual property rights grant can be found +// in the file PATENTS. All contributing project authors may +// be found in the AUTHORS file in the root of the source tree. +// ----------------------------------------------------------------------------- +// +// YUV->RGB conversion functions +// +// Author: Skal (pascal.massimino@gmail.com) + +#include "dsp_yuv.h" + +#include +#include + +//----------------------------------------------------------------------------- +// Plain-C version + +#define ROW_FUNC(FUNC_NAME, FUNC, XSTEP) \ +static void FUNC_NAME(const uint8_t* y, \ + const uint8_t* u, const uint8_t* v, \ + uint8_t* dst, int len) { \ + const uint8_t* const end = dst + (len & ~1) * (XSTEP); \ + while (dst != end) { \ + FUNC(y[0], u[0], v[0], dst); \ + FUNC(y[1], u[0], v[0], dst + (XSTEP)); \ + y += 2; \ + ++u; \ + ++v; \ + dst += 2 * (XSTEP); \ + } \ + if (len & 1) { \ + FUNC(y[0], u[0], v[0], dst); \ + } \ +} \ + +// All variants implemented. +ROW_FUNC(YuvToRgbRow, VP8YuvToRgb, 3) +ROW_FUNC(YuvToBgrRow, VP8YuvToBgr, 3) +ROW_FUNC(YuvToRgbaRow, VP8YuvToRgba, 4) +ROW_FUNC(YuvToBgraRow, VP8YuvToBgra, 4) +ROW_FUNC(YuvToArgbRow, VP8YuvToArgb, 4) +ROW_FUNC(YuvToRgba4444Row, VP8YuvToRgba4444, 2) +ROW_FUNC(YuvToRgb565Row, VP8YuvToRgb565, 2) + +#undef ROW_FUNC + +// Main call for processing a plane with a WebPSamplerRowFunc function: +void WebPSamplerProcessPlane(const uint8_t* y, int y_stride, + const uint8_t* u, const uint8_t* v, int uv_stride, + uint8_t* dst, int dst_stride, + int width, int height, WebPSamplerRowFunc func) { + int j; + for (j = 0; j < height; ++j) { + func(y, u, v, dst, width); + y += y_stride; + if (j & 1) { + u += uv_stride; + v += uv_stride; + } + dst += dst_stride; + } +} + +//----------------------------------------------------------------------------- +// Main call + +WebPSamplerRowFunc WebPSamplers[MODE_LAST]; + +extern void WebPInitSamplersSSE2(void); +extern void WebPInitSamplersSSE41(void); +extern void WebPInitSamplersMIPS32(void); +extern void WebPInitSamplersMIPSdspR2(void); + +WEBP_DSP_INIT_FUNC(WebPInitSamplers) { + WebPSamplers[MODE_RGB] = YuvToRgbRow; + WebPSamplers[MODE_RGBA] = YuvToRgbaRow; + WebPSamplers[MODE_BGR] = YuvToBgrRow; + WebPSamplers[MODE_BGRA] = YuvToBgraRow; + WebPSamplers[MODE_ARGB] = YuvToArgbRow; + WebPSamplers[MODE_RGBA_4444] = YuvToRgba4444Row; + WebPSamplers[MODE_RGB_565] = YuvToRgb565Row; + WebPSamplers[MODE_rgbA] = YuvToRgbaRow; + WebPSamplers[MODE_bgrA] = YuvToBgraRow; + WebPSamplers[MODE_Argb] = YuvToArgbRow; + WebPSamplers[MODE_rgbA_4444] = YuvToRgba4444Row; + + // If defined, use CPUInfo() to overwrite some pointers with faster versions. + if (VP8GetCPUInfo != NULL) { +#if defined(WEBP_USE_SSE2) + if (VP8GetCPUInfo(kSSE2)) { + WebPInitSamplersSSE2(); + } +#endif // WEBP_USE_SSE2 +#if defined(WEBP_USE_SSE41) + if (VP8GetCPUInfo(kSSE4_1)) { + WebPInitSamplersSSE41(); + } +#endif // WEBP_USE_SSE41 +#if defined(WEBP_USE_MIPS32) + if (VP8GetCPUInfo(kMIPS32)) { + WebPInitSamplersMIPS32(); + } +#endif // WEBP_USE_MIPS32 +#if defined(WEBP_USE_MIPS_DSP_R2) + if (VP8GetCPUInfo(kMIPSdspR2)) { + WebPInitSamplersMIPSdspR2(); + } +#endif // WEBP_USE_MIPS_DSP_R2 + } +} + +//----------------------------------------------------------------------------- +// ARGB -> YUV converters + +static void ConvertARGBToY_C(const uint32_t* argb, uint8_t* y, int width) { + int i; + for (i = 0; i < width; ++i) { + const uint32_t p = argb[i]; + y[i] = VP8RGBToY((p >> 16) & 0xff, (p >> 8) & 0xff, (p >> 0) & 0xff, + YUV_HALF); + } +} + +void WebPConvertARGBToUV_C(const uint32_t* argb, uint8_t* u, uint8_t* v, + int src_width, int do_store) { + // No rounding. Last pixel is dealt with separately. + const int uv_width = src_width >> 1; + int i; + for (i = 0; i < uv_width; ++i) { + const uint32_t v0 = argb[2 * i + 0]; + const uint32_t v1 = argb[2 * i + 1]; + // VP8RGBToU/V expects four accumulated pixels. Hence we need to + // scale r/g/b value by a factor 2. We just shift v0/v1 one bit less. + const int r = ((v0 >> 15) & 0x1fe) + ((v1 >> 15) & 0x1fe); + const int g = ((v0 >> 7) & 0x1fe) + ((v1 >> 7) & 0x1fe); + const int b = ((v0 << 1) & 0x1fe) + ((v1 << 1) & 0x1fe); + const int tmp_u = VP8RGBToU(r, g, b, YUV_HALF << 2); + const int tmp_v = VP8RGBToV(r, g, b, YUV_HALF << 2); + if (do_store) { + u[i] = tmp_u; + v[i] = tmp_v; + } else { + // Approximated average-of-four. But it's an acceptable diff. + u[i] = (u[i] + tmp_u + 1) >> 1; + v[i] = (v[i] + tmp_v + 1) >> 1; + } + } + if (src_width & 1) { // last pixel + const uint32_t v0 = argb[2 * i + 0]; + const int r = (v0 >> 14) & 0x3fc; + const int g = (v0 >> 6) & 0x3fc; + const int b = (v0 << 2) & 0x3fc; + const int tmp_u = VP8RGBToU(r, g, b, YUV_HALF << 2); + const int tmp_v = VP8RGBToV(r, g, b, YUV_HALF << 2); + if (do_store) { + u[i] = tmp_u; + v[i] = tmp_v; + } else { + u[i] = (u[i] + tmp_u + 1) >> 1; + v[i] = (v[i] + tmp_v + 1) >> 1; + } + } +} + +//----------------------------------------------------------------------------- + +static void ConvertRGB24ToY_C(const uint8_t* rgb, uint8_t* y, int width) { + int i; + for (i = 0; i < width; ++i, rgb += 3) { + y[i] = VP8RGBToY(rgb[0], rgb[1], rgb[2], YUV_HALF); + } +} + +static void ConvertBGR24ToY_C(const uint8_t* bgr, uint8_t* y, int width) { + int i; + for (i = 0; i < width; ++i, bgr += 3) { + y[i] = VP8RGBToY(bgr[2], bgr[1], bgr[0], YUV_HALF); + } +} + +void WebPConvertRGBA32ToUV_C(const uint16_t* rgb, + uint8_t* u, uint8_t* v, int width) { + int i; + for (i = 0; i < width; i += 1, rgb += 4) { + const int r = rgb[0], g = rgb[1], b = rgb[2]; + u[i] = VP8RGBToU(r, g, b, YUV_HALF << 2); + v[i] = VP8RGBToV(r, g, b, YUV_HALF << 2); + } +} + +//----------------------------------------------------------------------------- + +#if !WEBP_NEON_OMIT_C_CODE +#define MAX_Y ((1 << 10) - 1) // 10b precision over 16b-arithmetic +static uint16_t clip_y(int v) { + return (v < 0) ? 0 : (v > MAX_Y) ? MAX_Y : (uint16_t)v; +} + +static uint64_t SharpYUVUpdateY_C(const uint16_t* ref, const uint16_t* src, + uint16_t* dst, int len) { + uint64_t diff = 0; + int i; + for (i = 0; i < len; ++i) { + const int diff_y = ref[i] - src[i]; + const int new_y = (int)dst[i] + diff_y; + dst[i] = clip_y(new_y); + diff += (uint64_t)abs(diff_y); + } + return diff; +} + +static void SharpYUVUpdateRGB_C(const int16_t* ref, const int16_t* src, + int16_t* dst, int len) { + int i; + for (i = 0; i < len; ++i) { + const int diff_uv = ref[i] - src[i]; + dst[i] += diff_uv; + } +} + +static void SharpYUVFilterRow_C(const int16_t* A, const int16_t* B, int len, + const uint16_t* best_y, uint16_t* out) { + int i; + for (i = 0; i < len; ++i, ++A, ++B) { + const int v0 = (A[0] * 9 + A[1] * 3 + B[0] * 3 + B[1] + 8) >> 4; + const int v1 = (A[1] * 9 + A[0] * 3 + B[1] * 3 + B[0] + 8) >> 4; + out[2 * i + 0] = clip_y(best_y[2 * i + 0] + v0); + out[2 * i + 1] = clip_y(best_y[2 * i + 1] + v1); + } +} +#endif // !WEBP_NEON_OMIT_C_CODE + +#undef MAX_Y + +//----------------------------------------------------------------------------- + +void (*WebPConvertRGB24ToY)(const uint8_t* rgb, uint8_t* y, int width); +void (*WebPConvertBGR24ToY)(const uint8_t* bgr, uint8_t* y, int width); +void (*WebPConvertRGBA32ToUV)(const uint16_t* rgb, + uint8_t* u, uint8_t* v, int width); + +void (*WebPConvertARGBToY)(const uint32_t* argb, uint8_t* y, int width); +void (*WebPConvertARGBToUV)(const uint32_t* argb, uint8_t* u, uint8_t* v, + int src_width, int do_store); + +uint64_t (*WebPSharpYUVUpdateY)(const uint16_t* ref, const uint16_t* src, + uint16_t* dst, int len); +void (*WebPSharpYUVUpdateRGB)(const int16_t* ref, const int16_t* src, + int16_t* dst, int len); +void (*WebPSharpYUVFilterRow)(const int16_t* A, const int16_t* B, int len, + const uint16_t* best_y, uint16_t* out); + +extern void WebPInitConvertARGBToYUVSSE2(void); +extern void WebPInitConvertARGBToYUVSSE41(void); +extern void WebPInitConvertARGBToYUVNEON(void); +extern void WebPInitSharpYUVSSE2(void); +extern void WebPInitSharpYUVNEON(void); + +WEBP_DSP_INIT_FUNC(WebPInitConvertARGBToYUV) { + WebPConvertARGBToY = ConvertARGBToY_C; + WebPConvertARGBToUV = WebPConvertARGBToUV_C; + + WebPConvertRGB24ToY = ConvertRGB24ToY_C; + WebPConvertBGR24ToY = ConvertBGR24ToY_C; + + WebPConvertRGBA32ToUV = WebPConvertRGBA32ToUV_C; + +#if !WEBP_NEON_OMIT_C_CODE + WebPSharpYUVUpdateY = SharpYUVUpdateY_C; + WebPSharpYUVUpdateRGB = SharpYUVUpdateRGB_C; + WebPSharpYUVFilterRow = SharpYUVFilterRow_C; +#endif + + if (VP8GetCPUInfo != NULL) { +#if defined(WEBP_USE_SSE2) + if (VP8GetCPUInfo(kSSE2)) { + WebPInitConvertARGBToYUVSSE2(); + WebPInitSharpYUVSSE2(); + } +#endif // WEBP_USE_SSE2 +#if defined(WEBP_USE_SSE41) + if (VP8GetCPUInfo(kSSE4_1)) { + WebPInitConvertARGBToYUVSSE41(); + } +#endif // WEBP_USE_SSE41 + } + +#if defined(WEBP_USE_NEON) + if (WEBP_NEON_OMIT_C_CODE || + (VP8GetCPUInfo != NULL && VP8GetCPUInfo(kNEON))) { + WebPInitConvertARGBToYUVNEON(); + WebPInitSharpYUVNEON(); + } +#endif // WEBP_USE_NEON + + assert(WebPConvertARGBToY != NULL); + assert(WebPConvertARGBToUV != NULL); + assert(WebPConvertRGB24ToY != NULL); + assert(WebPConvertBGR24ToY != NULL); + assert(WebPConvertRGBA32ToUV != NULL); + assert(WebPSharpYUVUpdateY != NULL); + assert(WebPSharpYUVUpdateRGB != NULL); + assert(WebPSharpYUVFilterRow != NULL); +} diff --git a/vendor/github.com/sizeofint/webpanimation/dsp_yuv.h b/vendor/github.com/sizeofint/webpanimation/dsp_yuv.h new file mode 100644 index 00000000..7dec9d51 --- /dev/null +++ b/vendor/github.com/sizeofint/webpanimation/dsp_yuv.h @@ -0,0 +1,210 @@ +// Copyright 2010 Google Inc. All Rights Reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the COPYING file in the root of the source +// tree. An additional intellectual property rights grant can be found +// in the file PATENTS. All contributing project authors may +// be found in the AUTHORS file in the root of the source tree. +// ----------------------------------------------------------------------------- +// +// inline YUV<->RGB conversion function +// +// The exact naming is Y'CbCr, following the ITU-R BT.601 standard. +// More information at: http://en.wikipedia.org/wiki/YCbCr +// Y = 0.2569 * R + 0.5044 * G + 0.0979 * B + 16 +// U = -0.1483 * R - 0.2911 * G + 0.4394 * B + 128 +// V = 0.4394 * R - 0.3679 * G - 0.0715 * B + 128 +// We use 16bit fixed point operations for RGB->YUV conversion (YUV_FIX). +// +// For the Y'CbCr to RGB conversion, the BT.601 specification reads: +// R = 1.164 * (Y-16) + 1.596 * (V-128) +// G = 1.164 * (Y-16) - 0.813 * (V-128) - 0.391 * (U-128) +// B = 1.164 * (Y-16) + 2.018 * (U-128) +// where Y is in the [16,235] range, and U/V in the [16,240] range. +// +// The fixed-point implementation used here is: +// R = (19077 . y + 26149 . v - 14234) >> 6 +// G = (19077 . y - 6419 . u - 13320 . v + 8708) >> 6 +// B = (19077 . y + 33050 . u - 17685) >> 6 +// where the '.' operator is the mulhi_epu16 variant: +// a . b = ((a << 8) * b) >> 16 +// that preserves 8 bits of fractional precision before final descaling. + +// Author: Skal (pascal.massimino@gmail.com) + +#ifndef WEBP_DSP_YUV_H_ +#define WEBP_DSP_YUV_H_ + +#include "dsp_dsp.h" +#include "dec_vp8_dec.h" + +//------------------------------------------------------------------------------ +// YUV -> RGB conversion + +#ifdef __cplusplus +extern "C" { +#endif + +enum { + YUV_FIX = 16, // fixed-point precision for RGB->YUV + YUV_HALF = 1 << (YUV_FIX - 1), + + YUV_FIX2 = 6, // fixed-point precision for YUV->RGB + YUV_MASK2 = (256 << YUV_FIX2) - 1 +}; + +//------------------------------------------------------------------------------ +// slower on x86 by ~7-8%, but bit-exact with the SSE2/NEON version + +static WEBP_INLINE int MultHi(int v, int coeff) { // _mm_mulhi_epu16 emulation + return (v * coeff) >> 8; +} + +static WEBP_INLINE int VP8Clip8(int v) { + return ((v & ~YUV_MASK2) == 0) ? (v >> YUV_FIX2) : (v < 0) ? 0 : 255; +} + +static WEBP_INLINE int VP8YUVToR(int y, int v) { + return VP8Clip8(MultHi(y, 19077) + MultHi(v, 26149) - 14234); +} + +static WEBP_INLINE int VP8YUVToG(int y, int u, int v) { + return VP8Clip8(MultHi(y, 19077) - MultHi(u, 6419) - MultHi(v, 13320) + 8708); +} + +static WEBP_INLINE int VP8YUVToB(int y, int u) { + return VP8Clip8(MultHi(y, 19077) + MultHi(u, 33050) - 17685); +} + +static WEBP_INLINE void VP8YuvToRgb(int y, int u, int v, + uint8_t* const rgb) { + rgb[0] = VP8YUVToR(y, v); + rgb[1] = VP8YUVToG(y, u, v); + rgb[2] = VP8YUVToB(y, u); +} + +static WEBP_INLINE void VP8YuvToBgr(int y, int u, int v, + uint8_t* const bgr) { + bgr[0] = VP8YUVToB(y, u); + bgr[1] = VP8YUVToG(y, u, v); + bgr[2] = VP8YUVToR(y, v); +} + +static WEBP_INLINE void VP8YuvToRgb565(int y, int u, int v, + uint8_t* const rgb) { + const int r = VP8YUVToR(y, v); // 5 usable bits + const int g = VP8YUVToG(y, u, v); // 6 usable bits + const int b = VP8YUVToB(y, u); // 5 usable bits + const int rg = (r & 0xf8) | (g >> 5); + const int gb = ((g << 3) & 0xe0) | (b >> 3); +#if (WEBP_SWAP_16BIT_CSP == 1) + rgb[0] = gb; + rgb[1] = rg; +#else + rgb[0] = rg; + rgb[1] = gb; +#endif +} + +static WEBP_INLINE void VP8YuvToRgba4444(int y, int u, int v, + uint8_t* const argb) { + const int r = VP8YUVToR(y, v); // 4 usable bits + const int g = VP8YUVToG(y, u, v); // 4 usable bits + const int b = VP8YUVToB(y, u); // 4 usable bits + const int rg = (r & 0xf0) | (g >> 4); + const int ba = (b & 0xf0) | 0x0f; // overwrite the lower 4 bits +#if (WEBP_SWAP_16BIT_CSP == 1) + argb[0] = ba; + argb[1] = rg; +#else + argb[0] = rg; + argb[1] = ba; +#endif +} + +//----------------------------------------------------------------------------- +// Alpha handling variants + +static WEBP_INLINE void VP8YuvToArgb(uint8_t y, uint8_t u, uint8_t v, + uint8_t* const argb) { + argb[0] = 0xff; + VP8YuvToRgb(y, u, v, argb + 1); +} + +static WEBP_INLINE void VP8YuvToBgra(uint8_t y, uint8_t u, uint8_t v, + uint8_t* const bgra) { + VP8YuvToBgr(y, u, v, bgra); + bgra[3] = 0xff; +} + +static WEBP_INLINE void VP8YuvToRgba(uint8_t y, uint8_t u, uint8_t v, + uint8_t* const rgba) { + VP8YuvToRgb(y, u, v, rgba); + rgba[3] = 0xff; +} + +//----------------------------------------------------------------------------- +// SSE2 extra functions (mostly for upsampling_sse2.c) + +#if defined(WEBP_USE_SSE2) + +// Process 32 pixels and store the result (16b, 24b or 32b per pixel) in *dst. +void VP8YuvToRgba32_SSE2(const uint8_t* y, const uint8_t* u, const uint8_t* v, + uint8_t* dst); +void VP8YuvToRgb32_SSE2(const uint8_t* y, const uint8_t* u, const uint8_t* v, + uint8_t* dst); +void VP8YuvToBgra32_SSE2(const uint8_t* y, const uint8_t* u, const uint8_t* v, + uint8_t* dst); +void VP8YuvToBgr32_SSE2(const uint8_t* y, const uint8_t* u, const uint8_t* v, + uint8_t* dst); +void VP8YuvToArgb32_SSE2(const uint8_t* y, const uint8_t* u, const uint8_t* v, + uint8_t* dst); +void VP8YuvToRgba444432_SSE2(const uint8_t* y, const uint8_t* u, + const uint8_t* v, uint8_t* dst); +void VP8YuvToRgb56532_SSE2(const uint8_t* y, const uint8_t* u, const uint8_t* v, + uint8_t* dst); + +#endif // WEBP_USE_SSE2 + +//----------------------------------------------------------------------------- +// SSE41 extra functions (mostly for upsampling_sse41.c) + +#if defined(WEBP_USE_SSE41) + +// Process 32 pixels and store the result (16b, 24b or 32b per pixel) in *dst. +void VP8YuvToRgb32_SSE41(const uint8_t* y, const uint8_t* u, const uint8_t* v, + uint8_t* dst); +void VP8YuvToBgr32_SSE41(const uint8_t* y, const uint8_t* u, const uint8_t* v, + uint8_t* dst); + +#endif // WEBP_USE_SSE41 + +//------------------------------------------------------------------------------ +// RGB -> YUV conversion + +// Stub functions that can be called with various rounding values: +static WEBP_INLINE int VP8ClipUV(int uv, int rounding) { + uv = (uv + rounding + (128 << (YUV_FIX + 2))) >> (YUV_FIX + 2); + return ((uv & ~0xff) == 0) ? uv : (uv < 0) ? 0 : 255; +} + +static WEBP_INLINE int VP8RGBToY(int r, int g, int b, int rounding) { + const int luma = 16839 * r + 33059 * g + 6420 * b; + return (luma + rounding + (16 << YUV_FIX)) >> YUV_FIX; // no need to clip +} + +static WEBP_INLINE int VP8RGBToU(int r, int g, int b, int rounding) { + const int u = -9719 * r - 19081 * g + 28800 * b; + return VP8ClipUV(u, rounding); +} + +static WEBP_INLINE int VP8RGBToV(int r, int g, int b, int rounding) { + const int v = +28800 * r - 24116 * g - 4684 * b; + return VP8ClipUV(v, rounding); +} + +#ifdef __cplusplus +} // extern "C" +#endif + +#endif // WEBP_DSP_YUV_H_ diff --git a/vendor/github.com/sizeofint/webpanimation/dsp_yuv_mips32.c b/vendor/github.com/sizeofint/webpanimation/dsp_yuv_mips32.c new file mode 100644 index 00000000..83637995 --- /dev/null +++ b/vendor/github.com/sizeofint/webpanimation/dsp_yuv_mips32.c @@ -0,0 +1,103 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the COPYING file in the root of the source +// tree. An additional intellectual property rights grant can be found +// in the file PATENTS. All contributing project authors may +// be found in the AUTHORS file in the root of the source tree. +// ----------------------------------------------------------------------------- +// +// MIPS version of YUV to RGB upsampling functions. +// +// Author(s): Djordje Pesut (djordje.pesut@imgtec.com) +// Jovan Zelincevic (jovan.zelincevic@imgtec.com) + +#include "dsp_dsp.h" + +#if defined(WEBP_USE_MIPS32) + +#include "dsp_yuv.h" + +//------------------------------------------------------------------------------ +// simple point-sampling + +#define ROW_FUNC(FUNC_NAME, XSTEP, R, G, B, A) \ +static void FUNC_NAME(const uint8_t* y, \ + const uint8_t* u, const uint8_t* v, \ + uint8_t* dst, int len) { \ + int i, r, g, b; \ + int temp0, temp1, temp2, temp3, temp4; \ + for (i = 0; i < (len >> 1); i++) { \ + temp1 = MultHi(v[0], 26149); \ + temp3 = MultHi(v[0], 13320); \ + temp2 = MultHi(u[0], 6419); \ + temp4 = MultHi(u[0], 33050); \ + temp0 = MultHi(y[0], 19077); \ + temp1 -= 14234; \ + temp3 -= 8708; \ + temp2 += temp3; \ + temp4 -= 17685; \ + r = VP8Clip8(temp0 + temp1); \ + g = VP8Clip8(temp0 - temp2); \ + b = VP8Clip8(temp0 + temp4); \ + temp0 = MultHi(y[1], 19077); \ + dst[R] = r; \ + dst[G] = g; \ + dst[B] = b; \ + if (A) dst[A] = 0xff; \ + r = VP8Clip8(temp0 + temp1); \ + g = VP8Clip8(temp0 - temp2); \ + b = VP8Clip8(temp0 + temp4); \ + dst[R + XSTEP] = r; \ + dst[G + XSTEP] = g; \ + dst[B + XSTEP] = b; \ + if (A) dst[A + XSTEP] = 0xff; \ + y += 2; \ + ++u; \ + ++v; \ + dst += 2 * XSTEP; \ + } \ + if (len & 1) { \ + temp1 = MultHi(v[0], 26149); \ + temp3 = MultHi(v[0], 13320); \ + temp2 = MultHi(u[0], 6419); \ + temp4 = MultHi(u[0], 33050); \ + temp0 = MultHi(y[0], 19077); \ + temp1 -= 14234; \ + temp3 -= 8708; \ + temp2 += temp3; \ + temp4 -= 17685; \ + r = VP8Clip8(temp0 + temp1); \ + g = VP8Clip8(temp0 - temp2); \ + b = VP8Clip8(temp0 + temp4); \ + dst[R] = r; \ + dst[G] = g; \ + dst[B] = b; \ + if (A) dst[A] = 0xff; \ + } \ +} + +ROW_FUNC(YuvToRgbRow_MIPS32, 3, 0, 1, 2, 0) +ROW_FUNC(YuvToRgbaRow_MIPS32, 4, 0, 1, 2, 3) +ROW_FUNC(YuvToBgrRow_MIPS32, 3, 2, 1, 0, 0) +ROW_FUNC(YuvToBgraRow_MIPS32, 4, 2, 1, 0, 3) + +#undef ROW_FUNC + +//------------------------------------------------------------------------------ +// Entry point + +extern void WebPInitSamplersMIPS32(void); + +WEBP_TSAN_IGNORE_FUNCTION void WebPInitSamplersMIPS32(void) { + WebPSamplers[MODE_RGB] = YuvToRgbRow_MIPS32; + WebPSamplers[MODE_RGBA] = YuvToRgbaRow_MIPS32; + WebPSamplers[MODE_BGR] = YuvToBgrRow_MIPS32; + WebPSamplers[MODE_BGRA] = YuvToBgraRow_MIPS32; +} + +#else // !WEBP_USE_MIPS32 + +WEBP_DSP_INIT_STUB(WebPInitSamplersMIPS32) + +#endif // WEBP_USE_MIPS32 diff --git a/vendor/github.com/sizeofint/webpanimation/dsp_yuv_mips_dsp_r2.c b/vendor/github.com/sizeofint/webpanimation/dsp_yuv_mips_dsp_r2.c new file mode 100644 index 00000000..54d78266 --- /dev/null +++ b/vendor/github.com/sizeofint/webpanimation/dsp_yuv_mips_dsp_r2.c @@ -0,0 +1,134 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the COPYING file in the root of the source +// tree. An additional intellectual property rights grant can be found +// in the file PATENTS. All contributing project authors may +// be found in the AUTHORS file in the root of the source tree. +// ----------------------------------------------------------------------------- +// +// MIPS DSPr2 version of YUV to RGB upsampling functions. +// +// Author(s): Branimir Vasic (branimir.vasic@imgtec.com) +// Djordje Pesut (djordje.pesut@imgtec.com) + +#include "dsp_dsp.h" + +#if defined(WEBP_USE_MIPS_DSP_R2) + +#include "dsp_yuv.h" + +//------------------------------------------------------------------------------ +// simple point-sampling + +#define ROW_FUNC_PART_1() \ + "lbu %[temp3], 0(%[v]) \n\t" \ + "lbu %[temp4], 0(%[u]) \n\t" \ + "lbu %[temp0], 0(%[y]) \n\t" \ + "mul %[temp1], %[t_con_1], %[temp3] \n\t" \ + "mul %[temp3], %[t_con_2], %[temp3] \n\t" \ + "mul %[temp2], %[t_con_3], %[temp4] \n\t" \ + "mul %[temp4], %[t_con_4], %[temp4] \n\t" \ + "mul %[temp0], %[t_con_5], %[temp0] \n\t" \ + "subu %[temp1], %[temp1], %[t_con_6] \n\t" \ + "subu %[temp3], %[temp3], %[t_con_7] \n\t" \ + "addu %[temp2], %[temp2], %[temp3] \n\t" \ + "subu %[temp4], %[temp4], %[t_con_8] \n\t" \ + +#define ROW_FUNC_PART_2(R, G, B, K) \ + "addu %[temp5], %[temp0], %[temp1] \n\t" \ + "subu %[temp6], %[temp0], %[temp2] \n\t" \ + "addu %[temp7], %[temp0], %[temp4] \n\t" \ +".if " #K " \n\t" \ + "lbu %[temp0], 1(%[y]) \n\t" \ +".endif \n\t" \ + "shll_s.w %[temp5], %[temp5], 17 \n\t" \ + "shll_s.w %[temp6], %[temp6], 17 \n\t" \ +".if " #K " \n\t" \ + "mul %[temp0], %[t_con_5], %[temp0] \n\t" \ +".endif \n\t" \ + "shll_s.w %[temp7], %[temp7], 17 \n\t" \ + "precrqu_s.qb.ph %[temp5], %[temp5], $zero \n\t" \ + "precrqu_s.qb.ph %[temp6], %[temp6], $zero \n\t" \ + "precrqu_s.qb.ph %[temp7], %[temp7], $zero \n\t" \ + "srl %[temp5], %[temp5], 24 \n\t" \ + "srl %[temp6], %[temp6], 24 \n\t" \ + "srl %[temp7], %[temp7], 24 \n\t" \ + "sb %[temp5], " #R "(%[dst]) \n\t" \ + "sb %[temp6], " #G "(%[dst]) \n\t" \ + "sb %[temp7], " #B "(%[dst]) \n\t" \ + +#define ASM_CLOBBER_LIST() \ + : [temp0]"=&r"(temp0), [temp1]"=&r"(temp1), [temp2]"=&r"(temp2), \ + [temp3]"=&r"(temp3), [temp4]"=&r"(temp4), [temp5]"=&r"(temp5), \ + [temp6]"=&r"(temp6), [temp7]"=&r"(temp7) \ + : [t_con_1]"r"(t_con_1), [t_con_2]"r"(t_con_2), [t_con_3]"r"(t_con_3), \ + [t_con_4]"r"(t_con_4), [t_con_5]"r"(t_con_5), [t_con_6]"r"(t_con_6), \ + [u]"r"(u), [v]"r"(v), [y]"r"(y), [dst]"r"(dst), \ + [t_con_7]"r"(t_con_7), [t_con_8]"r"(t_con_8) \ + : "memory", "hi", "lo" \ + +#define ROW_FUNC(FUNC_NAME, XSTEP, R, G, B, A) \ +static void FUNC_NAME(const uint8_t* y, \ + const uint8_t* u, const uint8_t* v, \ + uint8_t* dst, int len) { \ + int i; \ + uint32_t temp0, temp1, temp2, temp3, temp4, temp5, temp6, temp7; \ + const int t_con_1 = 26149; \ + const int t_con_2 = 13320; \ + const int t_con_3 = 6419; \ + const int t_con_4 = 33050; \ + const int t_con_5 = 19077; \ + const int t_con_6 = 14234; \ + const int t_con_7 = 8708; \ + const int t_con_8 = 17685; \ + for (i = 0; i < (len >> 1); i++) { \ + __asm__ volatile ( \ + ROW_FUNC_PART_1() \ + ROW_FUNC_PART_2(R, G, B, 1) \ + ROW_FUNC_PART_2(R + XSTEP, G + XSTEP, B + XSTEP, 0) \ + ASM_CLOBBER_LIST() \ + ); \ + if (A) dst[A] = dst[A + XSTEP] = 0xff; \ + y += 2; \ + ++u; \ + ++v; \ + dst += 2 * XSTEP; \ + } \ + if (len & 1) { \ + __asm__ volatile ( \ + ROW_FUNC_PART_1() \ + ROW_FUNC_PART_2(R, G, B, 0) \ + ASM_CLOBBER_LIST() \ + ); \ + if (A) dst[A] = 0xff; \ + } \ +} + +ROW_FUNC(YuvToRgbRow_MIPSdspR2, 3, 0, 1, 2, 0) +ROW_FUNC(YuvToRgbaRow_MIPSdspR2, 4, 0, 1, 2, 3) +ROW_FUNC(YuvToBgrRow_MIPSdspR2, 3, 2, 1, 0, 0) +ROW_FUNC(YuvToBgraRow_MIPSdspR2, 4, 2, 1, 0, 3) + +#undef ROW_FUNC +#undef ASM_CLOBBER_LIST +#undef ROW_FUNC_PART_2 +#undef ROW_FUNC_PART_1 + +//------------------------------------------------------------------------------ +// Entry point + +extern void WebPInitSamplersMIPSdspR2(void); + +WEBP_TSAN_IGNORE_FUNCTION void WebPInitSamplersMIPSdspR2(void) { + WebPSamplers[MODE_RGB] = YuvToRgbRow_MIPSdspR2; + WebPSamplers[MODE_RGBA] = YuvToRgbaRow_MIPSdspR2; + WebPSamplers[MODE_BGR] = YuvToBgrRow_MIPSdspR2; + WebPSamplers[MODE_BGRA] = YuvToBgraRow_MIPSdspR2; +} + +#else // !WEBP_USE_MIPS_DSP_R2 + +WEBP_DSP_INIT_STUB(WebPInitSamplersMIPSdspR2) + +#endif // WEBP_USE_MIPS_DSP_R2 diff --git a/vendor/github.com/sizeofint/webpanimation/dsp_yuv_neon.c b/vendor/github.com/sizeofint/webpanimation/dsp_yuv_neon.c new file mode 100644 index 00000000..49e1ed8e --- /dev/null +++ b/vendor/github.com/sizeofint/webpanimation/dsp_yuv_neon.c @@ -0,0 +1,288 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the COPYING file in the root of the source +// tree. An additional intellectual property rights grant can be found +// in the file PATENTS. All contributing project authors may +// be found in the AUTHORS file in the root of the source tree. +// ----------------------------------------------------------------------------- +// +// YUV->RGB conversion functions +// +// Author: Skal (pascal.massimino@gmail.com) + +#include "dsp_yuv.h" + +#if defined(WEBP_USE_NEON) + +#include +#include + +#include "dsp_neon.h" + +//----------------------------------------------------------------------------- + +static uint8x8_t ConvertRGBToY_NEON(const uint8x8_t R, + const uint8x8_t G, + const uint8x8_t B) { + const uint16x8_t r = vmovl_u8(R); + const uint16x8_t g = vmovl_u8(G); + const uint16x8_t b = vmovl_u8(B); + const uint16x4_t r_lo = vget_low_u16(r); + const uint16x4_t r_hi = vget_high_u16(r); + const uint16x4_t g_lo = vget_low_u16(g); + const uint16x4_t g_hi = vget_high_u16(g); + const uint16x4_t b_lo = vget_low_u16(b); + const uint16x4_t b_hi = vget_high_u16(b); + const uint32x4_t tmp0_lo = vmull_n_u16( r_lo, 16839u); + const uint32x4_t tmp0_hi = vmull_n_u16( r_hi, 16839u); + const uint32x4_t tmp1_lo = vmlal_n_u16(tmp0_lo, g_lo, 33059u); + const uint32x4_t tmp1_hi = vmlal_n_u16(tmp0_hi, g_hi, 33059u); + const uint32x4_t tmp2_lo = vmlal_n_u16(tmp1_lo, b_lo, 6420u); + const uint32x4_t tmp2_hi = vmlal_n_u16(tmp1_hi, b_hi, 6420u); + const uint16x8_t Y1 = vcombine_u16(vrshrn_n_u32(tmp2_lo, 16), + vrshrn_n_u32(tmp2_hi, 16)); + const uint16x8_t Y2 = vaddq_u16(Y1, vdupq_n_u16(16)); + return vqmovn_u16(Y2); +} + +static void ConvertRGB24ToY_NEON(const uint8_t* rgb, uint8_t* y, int width) { + int i; + for (i = 0; i + 8 <= width; i += 8, rgb += 3 * 8) { + const uint8x8x3_t RGB = vld3_u8(rgb); + const uint8x8_t Y = ConvertRGBToY_NEON(RGB.val[0], RGB.val[1], RGB.val[2]); + vst1_u8(y + i, Y); + } + for (; i < width; ++i, rgb += 3) { // left-over + y[i] = VP8RGBToY(rgb[0], rgb[1], rgb[2], YUV_HALF); + } +} + +static void ConvertBGR24ToY_NEON(const uint8_t* bgr, uint8_t* y, int width) { + int i; + for (i = 0; i + 8 <= width; i += 8, bgr += 3 * 8) { + const uint8x8x3_t BGR = vld3_u8(bgr); + const uint8x8_t Y = ConvertRGBToY_NEON(BGR.val[2], BGR.val[1], BGR.val[0]); + vst1_u8(y + i, Y); + } + for (; i < width; ++i, bgr += 3) { // left-over + y[i] = VP8RGBToY(bgr[2], bgr[1], bgr[0], YUV_HALF); + } +} + +static void ConvertARGBToY_NEON(const uint32_t* argb, uint8_t* y, int width) { + int i; + for (i = 0; i + 8 <= width; i += 8) { + const uint8x8x4_t RGB = vld4_u8((const uint8_t*)&argb[i]); + const uint8x8_t Y = ConvertRGBToY_NEON(RGB.val[2], RGB.val[1], RGB.val[0]); + vst1_u8(y + i, Y); + } + for (; i < width; ++i) { // left-over + const uint32_t p = argb[i]; + y[i] = VP8RGBToY((p >> 16) & 0xff, (p >> 8) & 0xff, (p >> 0) & 0xff, + YUV_HALF); + } +} + +//----------------------------------------------------------------------------- + +// computes: DST_s16 = [(C0 * r + C1 * g + C2 * b) >> 16] + CST +#define MULTIPLY_16b_PREAMBLE(r, g, b) \ + const int16x4_t r_lo = vreinterpret_s16_u16(vget_low_u16(r)); \ + const int16x4_t r_hi = vreinterpret_s16_u16(vget_high_u16(r)); \ + const int16x4_t g_lo = vreinterpret_s16_u16(vget_low_u16(g)); \ + const int16x4_t g_hi = vreinterpret_s16_u16(vget_high_u16(g)); \ + const int16x4_t b_lo = vreinterpret_s16_u16(vget_low_u16(b)); \ + const int16x4_t b_hi = vreinterpret_s16_u16(vget_high_u16(b)) + +#define MULTIPLY_16b(C0, C1, C2, CST, DST_s16) do { \ + const int32x4_t tmp0_lo = vmull_n_s16( r_lo, C0); \ + const int32x4_t tmp0_hi = vmull_n_s16( r_hi, C0); \ + const int32x4_t tmp1_lo = vmlal_n_s16(tmp0_lo, g_lo, C1); \ + const int32x4_t tmp1_hi = vmlal_n_s16(tmp0_hi, g_hi, C1); \ + const int32x4_t tmp2_lo = vmlal_n_s16(tmp1_lo, b_lo, C2); \ + const int32x4_t tmp2_hi = vmlal_n_s16(tmp1_hi, b_hi, C2); \ + const int16x8_t tmp3 = vcombine_s16(vshrn_n_s32(tmp2_lo, 16), \ + vshrn_n_s32(tmp2_hi, 16)); \ + DST_s16 = vaddq_s16(tmp3, vdupq_n_s16(CST)); \ +} while (0) + +// This needs to be a macro, since (128 << SHIFT) needs to be an immediate. +#define CONVERT_RGB_TO_UV(r, g, b, SHIFT, U_DST, V_DST) do { \ + MULTIPLY_16b_PREAMBLE(r, g, b); \ + MULTIPLY_16b(-9719, -19081, 28800, 128 << SHIFT, U_DST); \ + MULTIPLY_16b(28800, -24116, -4684, 128 << SHIFT, V_DST); \ +} while (0) + +static void ConvertRGBA32ToUV_NEON(const uint16_t* rgb, + uint8_t* u, uint8_t* v, int width) { + int i; + for (i = 0; i + 8 <= width; i += 8, rgb += 4 * 8) { + const uint16x8x4_t RGB = vld4q_u16((const uint16_t*)rgb); + int16x8_t U, V; + CONVERT_RGB_TO_UV(RGB.val[0], RGB.val[1], RGB.val[2], 2, U, V); + vst1_u8(u + i, vqrshrun_n_s16(U, 2)); + vst1_u8(v + i, vqrshrun_n_s16(V, 2)); + } + for (; i < width; i += 1, rgb += 4) { + const int r = rgb[0], g = rgb[1], b = rgb[2]; + u[i] = VP8RGBToU(r, g, b, YUV_HALF << 2); + v[i] = VP8RGBToV(r, g, b, YUV_HALF << 2); + } +} + +static void ConvertARGBToUV_NEON(const uint32_t* argb, uint8_t* u, uint8_t* v, + int src_width, int do_store) { + int i; + for (i = 0; i + 16 <= src_width; i += 16, u += 8, v += 8) { + const uint8x16x4_t RGB = vld4q_u8((const uint8_t*)&argb[i]); + const uint16x8_t R = vpaddlq_u8(RGB.val[2]); // pair-wise adds + const uint16x8_t G = vpaddlq_u8(RGB.val[1]); + const uint16x8_t B = vpaddlq_u8(RGB.val[0]); + int16x8_t U_tmp, V_tmp; + CONVERT_RGB_TO_UV(R, G, B, 1, U_tmp, V_tmp); + { + const uint8x8_t U = vqrshrun_n_s16(U_tmp, 1); + const uint8x8_t V = vqrshrun_n_s16(V_tmp, 1); + if (do_store) { + vst1_u8(u, U); + vst1_u8(v, V); + } else { + const uint8x8_t prev_u = vld1_u8(u); + const uint8x8_t prev_v = vld1_u8(v); + vst1_u8(u, vrhadd_u8(U, prev_u)); + vst1_u8(v, vrhadd_u8(V, prev_v)); + } + } + } + if (i < src_width) { // left-over + WebPConvertARGBToUV_C(argb + i, u, v, src_width - i, do_store); + } +} + + +//------------------------------------------------------------------------------ + +extern void WebPInitConvertARGBToYUVNEON(void); + +WEBP_TSAN_IGNORE_FUNCTION void WebPInitConvertARGBToYUVNEON(void) { + WebPConvertRGB24ToY = ConvertRGB24ToY_NEON; + WebPConvertBGR24ToY = ConvertBGR24ToY_NEON; + WebPConvertARGBToY = ConvertARGBToY_NEON; + WebPConvertARGBToUV = ConvertARGBToUV_NEON; + WebPConvertRGBA32ToUV = ConvertRGBA32ToUV_NEON; +} + +//------------------------------------------------------------------------------ + +#define MAX_Y ((1 << 10) - 1) // 10b precision over 16b-arithmetic +static uint16_t clip_y_NEON(int v) { + return (v < 0) ? 0 : (v > MAX_Y) ? MAX_Y : (uint16_t)v; +} + +static uint64_t SharpYUVUpdateY_NEON(const uint16_t* ref, const uint16_t* src, + uint16_t* dst, int len) { + int i; + const int16x8_t zero = vdupq_n_s16(0); + const int16x8_t max = vdupq_n_s16(MAX_Y); + uint64x2_t sum = vdupq_n_u64(0); + uint64_t diff; + + for (i = 0; i + 8 <= len; i += 8) { + const int16x8_t A = vreinterpretq_s16_u16(vld1q_u16(ref + i)); + const int16x8_t B = vreinterpretq_s16_u16(vld1q_u16(src + i)); + const int16x8_t C = vreinterpretq_s16_u16(vld1q_u16(dst + i)); + const int16x8_t D = vsubq_s16(A, B); // diff_y + const int16x8_t F = vaddq_s16(C, D); // new_y + const uint16x8_t H = + vreinterpretq_u16_s16(vmaxq_s16(vminq_s16(F, max), zero)); + const int16x8_t I = vabsq_s16(D); // abs(diff_y) + vst1q_u16(dst + i, H); + sum = vpadalq_u32(sum, vpaddlq_u16(vreinterpretq_u16_s16(I))); + } + diff = vgetq_lane_u64(sum, 0) + vgetq_lane_u64(sum, 1); + for (; i < len; ++i) { + const int diff_y = ref[i] - src[i]; + const int new_y = (int)(dst[i]) + diff_y; + dst[i] = clip_y_NEON(new_y); + diff += (uint64_t)(abs(diff_y)); + } + return diff; +} + +static void SharpYUVUpdateRGB_NEON(const int16_t* ref, const int16_t* src, + int16_t* dst, int len) { + int i; + for (i = 0; i + 8 <= len; i += 8) { + const int16x8_t A = vld1q_s16(ref + i); + const int16x8_t B = vld1q_s16(src + i); + const int16x8_t C = vld1q_s16(dst + i); + const int16x8_t D = vsubq_s16(A, B); // diff_uv + const int16x8_t E = vaddq_s16(C, D); // new_uv + vst1q_s16(dst + i, E); + } + for (; i < len; ++i) { + const int diff_uv = ref[i] - src[i]; + dst[i] += diff_uv; + } +} + +static void SharpYUVFilterRow_NEON(const int16_t* A, const int16_t* B, int len, + const uint16_t* best_y, uint16_t* out) { + int i; + const int16x8_t max = vdupq_n_s16(MAX_Y); + const int16x8_t zero = vdupq_n_s16(0); + for (i = 0; i + 8 <= len; i += 8) { + const int16x8_t a0 = vld1q_s16(A + i + 0); + const int16x8_t a1 = vld1q_s16(A + i + 1); + const int16x8_t b0 = vld1q_s16(B + i + 0); + const int16x8_t b1 = vld1q_s16(B + i + 1); + const int16x8_t a0b1 = vaddq_s16(a0, b1); + const int16x8_t a1b0 = vaddq_s16(a1, b0); + const int16x8_t a0a1b0b1 = vaddq_s16(a0b1, a1b0); // A0+A1+B0+B1 + const int16x8_t a0b1_2 = vaddq_s16(a0b1, a0b1); // 2*(A0+B1) + const int16x8_t a1b0_2 = vaddq_s16(a1b0, a1b0); // 2*(A1+B0) + const int16x8_t c0 = vshrq_n_s16(vaddq_s16(a0b1_2, a0a1b0b1), 3); + const int16x8_t c1 = vshrq_n_s16(vaddq_s16(a1b0_2, a0a1b0b1), 3); + const int16x8_t d0 = vaddq_s16(c1, a0); + const int16x8_t d1 = vaddq_s16(c0, a1); + const int16x8_t e0 = vrshrq_n_s16(d0, 1); + const int16x8_t e1 = vrshrq_n_s16(d1, 1); + const int16x8x2_t f = vzipq_s16(e0, e1); + const int16x8_t g0 = vreinterpretq_s16_u16(vld1q_u16(best_y + 2 * i + 0)); + const int16x8_t g1 = vreinterpretq_s16_u16(vld1q_u16(best_y + 2 * i + 8)); + const int16x8_t h0 = vaddq_s16(g0, f.val[0]); + const int16x8_t h1 = vaddq_s16(g1, f.val[1]); + const int16x8_t i0 = vmaxq_s16(vminq_s16(h0, max), zero); + const int16x8_t i1 = vmaxq_s16(vminq_s16(h1, max), zero); + vst1q_u16(out + 2 * i + 0, vreinterpretq_u16_s16(i0)); + vst1q_u16(out + 2 * i + 8, vreinterpretq_u16_s16(i1)); + } + for (; i < len; ++i) { + const int a0b1 = A[i + 0] + B[i + 1]; + const int a1b0 = A[i + 1] + B[i + 0]; + const int a0a1b0b1 = a0b1 + a1b0 + 8; + const int v0 = (8 * A[i + 0] + 2 * a1b0 + a0a1b0b1) >> 4; + const int v1 = (8 * A[i + 1] + 2 * a0b1 + a0a1b0b1) >> 4; + out[2 * i + 0] = clip_y_NEON(best_y[2 * i + 0] + v0); + out[2 * i + 1] = clip_y_NEON(best_y[2 * i + 1] + v1); + } +} +#undef MAX_Y + +//------------------------------------------------------------------------------ + +extern void WebPInitSharpYUVNEON(void); + +WEBP_TSAN_IGNORE_FUNCTION void WebPInitSharpYUVNEON(void) { + WebPSharpYUVUpdateY = SharpYUVUpdateY_NEON; + WebPSharpYUVUpdateRGB = SharpYUVUpdateRGB_NEON; + WebPSharpYUVFilterRow = SharpYUVFilterRow_NEON; +} + +#else // !WEBP_USE_NEON + +WEBP_DSP_INIT_STUB(WebPInitConvertARGBToYUVNEON) +WEBP_DSP_INIT_STUB(WebPInitSharpYUVNEON) + +#endif // WEBP_USE_NEON diff --git a/vendor/github.com/sizeofint/webpanimation/dsp_yuv_sse2.c b/vendor/github.com/sizeofint/webpanimation/dsp_yuv_sse2.c new file mode 100644 index 00000000..47321ff5 --- /dev/null +++ b/vendor/github.com/sizeofint/webpanimation/dsp_yuv_sse2.c @@ -0,0 +1,874 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the COPYING file in the root of the source +// tree. An additional intellectual property rights grant can be found +// in the file PATENTS. All contributing project authors may +// be found in the AUTHORS file in the root of the source tree. +// ----------------------------------------------------------------------------- +// +// YUV->RGB conversion functions +// +// Author: Skal (pascal.massimino@gmail.com) + +#include "dsp_yuv.h" + +#if defined(WEBP_USE_SSE2) + +#include "dsp_common_sse2.h" +#include +#include + +//----------------------------------------------------------------------------- +// Convert spans of 32 pixels to various RGB formats for the fancy upsampler. + +// These constants are 14b fixed-point version of ITU-R BT.601 constants. +// R = (19077 * y + 26149 * v - 14234) >> 6 +// G = (19077 * y - 6419 * u - 13320 * v + 8708) >> 6 +// B = (19077 * y + 33050 * u - 17685) >> 6 +static void ConvertYUV444ToRGB_SSE2(const __m128i* const Y0, + const __m128i* const U0, + const __m128i* const V0, + __m128i* const R, + __m128i* const G, + __m128i* const B) { + const __m128i k19077 = _mm_set1_epi16(19077); + const __m128i k26149 = _mm_set1_epi16(26149); + const __m128i k14234 = _mm_set1_epi16(14234); + // 33050 doesn't fit in a signed short: only use this with unsigned arithmetic + const __m128i k33050 = _mm_set1_epi16((short)33050); + const __m128i k17685 = _mm_set1_epi16(17685); + const __m128i k6419 = _mm_set1_epi16(6419); + const __m128i k13320 = _mm_set1_epi16(13320); + const __m128i k8708 = _mm_set1_epi16(8708); + + const __m128i Y1 = _mm_mulhi_epu16(*Y0, k19077); + + const __m128i R0 = _mm_mulhi_epu16(*V0, k26149); + const __m128i R1 = _mm_sub_epi16(Y1, k14234); + const __m128i R2 = _mm_add_epi16(R1, R0); + + const __m128i G0 = _mm_mulhi_epu16(*U0, k6419); + const __m128i G1 = _mm_mulhi_epu16(*V0, k13320); + const __m128i G2 = _mm_add_epi16(Y1, k8708); + const __m128i G3 = _mm_add_epi16(G0, G1); + const __m128i G4 = _mm_sub_epi16(G2, G3); + + // be careful with the saturated *unsigned* arithmetic here! + const __m128i B0 = _mm_mulhi_epu16(*U0, k33050); + const __m128i B1 = _mm_adds_epu16(B0, Y1); + const __m128i B2 = _mm_subs_epu16(B1, k17685); + + // use logical shift for B2, which can be larger than 32767 + *R = _mm_srai_epi16(R2, 6); // range: [-14234, 30815] + *G = _mm_srai_epi16(G4, 6); // range: [-10953, 27710] + *B = _mm_srli_epi16(B2, 6); // range: [0, 34238] +} + +// Load the bytes into the *upper* part of 16b words. That's "<< 8", basically. +static WEBP_INLINE __m128i Load_HI_16_SSE2(const uint8_t* src) { + const __m128i zero = _mm_setzero_si128(); + return _mm_unpacklo_epi8(zero, _mm_loadl_epi64((const __m128i*)src)); +} + +// Load and replicate the U/V samples +static WEBP_INLINE __m128i Load_UV_HI_8_SSE2(const uint8_t* src) { + const __m128i zero = _mm_setzero_si128(); + const __m128i tmp0 = _mm_cvtsi32_si128(*(const uint32_t*)src); + const __m128i tmp1 = _mm_unpacklo_epi8(zero, tmp0); + return _mm_unpacklo_epi16(tmp1, tmp1); // replicate samples +} + +// Convert 32 samples of YUV444 to R/G/B +static void YUV444ToRGB_SSE2(const uint8_t* const y, + const uint8_t* const u, + const uint8_t* const v, + __m128i* const R, __m128i* const G, + __m128i* const B) { + const __m128i Y0 = Load_HI_16_SSE2(y), U0 = Load_HI_16_SSE2(u), + V0 = Load_HI_16_SSE2(v); + ConvertYUV444ToRGB_SSE2(&Y0, &U0, &V0, R, G, B); +} + +// Convert 32 samples of YUV420 to R/G/B +static void YUV420ToRGB_SSE2(const uint8_t* const y, + const uint8_t* const u, + const uint8_t* const v, + __m128i* const R, __m128i* const G, + __m128i* const B) { + const __m128i Y0 = Load_HI_16_SSE2(y), U0 = Load_UV_HI_8_SSE2(u), + V0 = Load_UV_HI_8_SSE2(v); + ConvertYUV444ToRGB_SSE2(&Y0, &U0, &V0, R, G, B); +} + +// Pack R/G/B/A results into 32b output. +static WEBP_INLINE void PackAndStore4_SSE2(const __m128i* const R, + const __m128i* const G, + const __m128i* const B, + const __m128i* const A, + uint8_t* const dst) { + const __m128i rb = _mm_packus_epi16(*R, *B); + const __m128i ga = _mm_packus_epi16(*G, *A); + const __m128i rg = _mm_unpacklo_epi8(rb, ga); + const __m128i ba = _mm_unpackhi_epi8(rb, ga); + const __m128i RGBA_lo = _mm_unpacklo_epi16(rg, ba); + const __m128i RGBA_hi = _mm_unpackhi_epi16(rg, ba); + _mm_storeu_si128((__m128i*)(dst + 0), RGBA_lo); + _mm_storeu_si128((__m128i*)(dst + 16), RGBA_hi); +} + +// Pack R/G/B/A results into 16b output. +static WEBP_INLINE void PackAndStore4444_SSE2(const __m128i* const R, + const __m128i* const G, + const __m128i* const B, + const __m128i* const A, + uint8_t* const dst) { +#if (WEBP_SWAP_16BIT_CSP == 0) + const __m128i rg0 = _mm_packus_epi16(*R, *G); + const __m128i ba0 = _mm_packus_epi16(*B, *A); +#else + const __m128i rg0 = _mm_packus_epi16(*B, *A); + const __m128i ba0 = _mm_packus_epi16(*R, *G); +#endif + const __m128i mask_0xf0 = _mm_set1_epi8(0xf0); + const __m128i rb1 = _mm_unpacklo_epi8(rg0, ba0); // rbrbrbrbrb... + const __m128i ga1 = _mm_unpackhi_epi8(rg0, ba0); // gagagagaga... + const __m128i rb2 = _mm_and_si128(rb1, mask_0xf0); + const __m128i ga2 = _mm_srli_epi16(_mm_and_si128(ga1, mask_0xf0), 4); + const __m128i rgba4444 = _mm_or_si128(rb2, ga2); + _mm_storeu_si128((__m128i*)dst, rgba4444); +} + +// Pack R/G/B results into 16b output. +static WEBP_INLINE void PackAndStore565_SSE2(const __m128i* const R, + const __m128i* const G, + const __m128i* const B, + uint8_t* const dst) { + const __m128i r0 = _mm_packus_epi16(*R, *R); + const __m128i g0 = _mm_packus_epi16(*G, *G); + const __m128i b0 = _mm_packus_epi16(*B, *B); + const __m128i r1 = _mm_and_si128(r0, _mm_set1_epi8(0xf8)); + const __m128i b1 = _mm_and_si128(_mm_srli_epi16(b0, 3), _mm_set1_epi8(0x1f)); + const __m128i g1 = _mm_srli_epi16(_mm_and_si128(g0, _mm_set1_epi8(0xe0)), 5); + const __m128i g2 = _mm_slli_epi16(_mm_and_si128(g0, _mm_set1_epi8(0x1c)), 3); + const __m128i rg = _mm_or_si128(r1, g1); + const __m128i gb = _mm_or_si128(g2, b1); +#if (WEBP_SWAP_16BIT_CSP == 0) + const __m128i rgb565 = _mm_unpacklo_epi8(rg, gb); +#else + const __m128i rgb565 = _mm_unpacklo_epi8(gb, rg); +#endif + _mm_storeu_si128((__m128i*)dst, rgb565); +} + +// Pack the planar buffers +// rrrr... rrrr... gggg... gggg... bbbb... bbbb.... +// triplet by triplet in the output buffer rgb as rgbrgbrgbrgb ... +static WEBP_INLINE void PlanarTo24b_SSE2(__m128i* const in0, __m128i* const in1, + __m128i* const in2, __m128i* const in3, + __m128i* const in4, __m128i* const in5, + uint8_t* const rgb) { + // The input is 6 registers of sixteen 8b but for the sake of explanation, + // let's take 6 registers of four 8b values. + // To pack, we will keep taking one every two 8b integer and move it + // around as follows: + // Input: + // r0r1r2r3 | r4r5r6r7 | g0g1g2g3 | g4g5g6g7 | b0b1b2b3 | b4b5b6b7 + // Split the 6 registers in two sets of 3 registers: the first set as the even + // 8b bytes, the second the odd ones: + // r0r2r4r6 | g0g2g4g6 | b0b2b4b6 | r1r3r5r7 | g1g3g5g7 | b1b3b5b7 + // Repeat the same permutations twice more: + // r0r4g0g4 | b0b4r1r5 | g1g5b1b5 | r2r6g2g6 | b2b6r3r7 | g3g7b3b7 + // r0g0b0r1 | g1b1r2g2 | b2r3g3b3 | r4g4b4r5 | g5b5r6g6 | b6r7g7b7 + VP8PlanarTo24b_SSE2(in0, in1, in2, in3, in4, in5); + + _mm_storeu_si128((__m128i*)(rgb + 0), *in0); + _mm_storeu_si128((__m128i*)(rgb + 16), *in1); + _mm_storeu_si128((__m128i*)(rgb + 32), *in2); + _mm_storeu_si128((__m128i*)(rgb + 48), *in3); + _mm_storeu_si128((__m128i*)(rgb + 64), *in4); + _mm_storeu_si128((__m128i*)(rgb + 80), *in5); +} + +void VP8YuvToRgba32_SSE2(const uint8_t* y, const uint8_t* u, const uint8_t* v, + uint8_t* dst) { + const __m128i kAlpha = _mm_set1_epi16(255); + int n; + for (n = 0; n < 32; n += 8, dst += 32) { + __m128i R, G, B; + YUV444ToRGB_SSE2(y + n, u + n, v + n, &R, &G, &B); + PackAndStore4_SSE2(&R, &G, &B, &kAlpha, dst); + } +} + +void VP8YuvToBgra32_SSE2(const uint8_t* y, const uint8_t* u, const uint8_t* v, + uint8_t* dst) { + const __m128i kAlpha = _mm_set1_epi16(255); + int n; + for (n = 0; n < 32; n += 8, dst += 32) { + __m128i R, G, B; + YUV444ToRGB_SSE2(y + n, u + n, v + n, &R, &G, &B); + PackAndStore4_SSE2(&B, &G, &R, &kAlpha, dst); + } +} + +void VP8YuvToArgb32_SSE2(const uint8_t* y, const uint8_t* u, const uint8_t* v, + uint8_t* dst) { + const __m128i kAlpha = _mm_set1_epi16(255); + int n; + for (n = 0; n < 32; n += 8, dst += 32) { + __m128i R, G, B; + YUV444ToRGB_SSE2(y + n, u + n, v + n, &R, &G, &B); + PackAndStore4_SSE2(&kAlpha, &R, &G, &B, dst); + } +} + +void VP8YuvToRgba444432_SSE2(const uint8_t* y, const uint8_t* u, + const uint8_t* v, uint8_t* dst) { + const __m128i kAlpha = _mm_set1_epi16(255); + int n; + for (n = 0; n < 32; n += 8, dst += 16) { + __m128i R, G, B; + YUV444ToRGB_SSE2(y + n, u + n, v + n, &R, &G, &B); + PackAndStore4444_SSE2(&R, &G, &B, &kAlpha, dst); + } +} + +void VP8YuvToRgb56532_SSE2(const uint8_t* y, const uint8_t* u, const uint8_t* v, + uint8_t* dst) { + int n; + for (n = 0; n < 32; n += 8, dst += 16) { + __m128i R, G, B; + YUV444ToRGB_SSE2(y + n, u + n, v + n, &R, &G, &B); + PackAndStore565_SSE2(&R, &G, &B, dst); + } +} + +void VP8YuvToRgb32_SSE2(const uint8_t* y, const uint8_t* u, const uint8_t* v, + uint8_t* dst) { + __m128i R0, R1, R2, R3, G0, G1, G2, G3, B0, B1, B2, B3; + __m128i rgb0, rgb1, rgb2, rgb3, rgb4, rgb5; + + YUV444ToRGB_SSE2(y + 0, u + 0, v + 0, &R0, &G0, &B0); + YUV444ToRGB_SSE2(y + 8, u + 8, v + 8, &R1, &G1, &B1); + YUV444ToRGB_SSE2(y + 16, u + 16, v + 16, &R2, &G2, &B2); + YUV444ToRGB_SSE2(y + 24, u + 24, v + 24, &R3, &G3, &B3); + + // Cast to 8b and store as RRRRGGGGBBBB. + rgb0 = _mm_packus_epi16(R0, R1); + rgb1 = _mm_packus_epi16(R2, R3); + rgb2 = _mm_packus_epi16(G0, G1); + rgb3 = _mm_packus_epi16(G2, G3); + rgb4 = _mm_packus_epi16(B0, B1); + rgb5 = _mm_packus_epi16(B2, B3); + + // Pack as RGBRGBRGBRGB. + PlanarTo24b_SSE2(&rgb0, &rgb1, &rgb2, &rgb3, &rgb4, &rgb5, dst); +} + +void VP8YuvToBgr32_SSE2(const uint8_t* y, const uint8_t* u, const uint8_t* v, + uint8_t* dst) { + __m128i R0, R1, R2, R3, G0, G1, G2, G3, B0, B1, B2, B3; + __m128i bgr0, bgr1, bgr2, bgr3, bgr4, bgr5; + + YUV444ToRGB_SSE2(y + 0, u + 0, v + 0, &R0, &G0, &B0); + YUV444ToRGB_SSE2(y + 8, u + 8, v + 8, &R1, &G1, &B1); + YUV444ToRGB_SSE2(y + 16, u + 16, v + 16, &R2, &G2, &B2); + YUV444ToRGB_SSE2(y + 24, u + 24, v + 24, &R3, &G3, &B3); + + // Cast to 8b and store as BBBBGGGGRRRR. + bgr0 = _mm_packus_epi16(B0, B1); + bgr1 = _mm_packus_epi16(B2, B3); + bgr2 = _mm_packus_epi16(G0, G1); + bgr3 = _mm_packus_epi16(G2, G3); + bgr4 = _mm_packus_epi16(R0, R1); + bgr5= _mm_packus_epi16(R2, R3); + + // Pack as BGRBGRBGRBGR. + PlanarTo24b_SSE2(&bgr0, &bgr1, &bgr2, &bgr3, &bgr4, &bgr5, dst); +} + +//----------------------------------------------------------------------------- +// Arbitrary-length row conversion functions + +static void YuvToRgbaRow_SSE2(const uint8_t* y, + const uint8_t* u, const uint8_t* v, + uint8_t* dst, int len) { + const __m128i kAlpha = _mm_set1_epi16(255); + int n; + for (n = 0; n + 8 <= len; n += 8, dst += 32) { + __m128i R, G, B; + YUV420ToRGB_SSE2(y, u, v, &R, &G, &B); + PackAndStore4_SSE2(&R, &G, &B, &kAlpha, dst); + y += 8; + u += 4; + v += 4; + } + for (; n < len; ++n) { // Finish off + VP8YuvToRgba(y[0], u[0], v[0], dst); + dst += 4; + y += 1; + u += (n & 1); + v += (n & 1); + } +} + +static void YuvToBgraRow_SSE2(const uint8_t* y, + const uint8_t* u, const uint8_t* v, + uint8_t* dst, int len) { + const __m128i kAlpha = _mm_set1_epi16(255); + int n; + for (n = 0; n + 8 <= len; n += 8, dst += 32) { + __m128i R, G, B; + YUV420ToRGB_SSE2(y, u, v, &R, &G, &B); + PackAndStore4_SSE2(&B, &G, &R, &kAlpha, dst); + y += 8; + u += 4; + v += 4; + } + for (; n < len; ++n) { // Finish off + VP8YuvToBgra(y[0], u[0], v[0], dst); + dst += 4; + y += 1; + u += (n & 1); + v += (n & 1); + } +} + +static void YuvToArgbRow_SSE2(const uint8_t* y, + const uint8_t* u, const uint8_t* v, + uint8_t* dst, int len) { + const __m128i kAlpha = _mm_set1_epi16(255); + int n; + for (n = 0; n + 8 <= len; n += 8, dst += 32) { + __m128i R, G, B; + YUV420ToRGB_SSE2(y, u, v, &R, &G, &B); + PackAndStore4_SSE2(&kAlpha, &R, &G, &B, dst); + y += 8; + u += 4; + v += 4; + } + for (; n < len; ++n) { // Finish off + VP8YuvToArgb(y[0], u[0], v[0], dst); + dst += 4; + y += 1; + u += (n & 1); + v += (n & 1); + } +} + +static void YuvToRgbRow_SSE2(const uint8_t* y, + const uint8_t* u, const uint8_t* v, + uint8_t* dst, int len) { + int n; + for (n = 0; n + 32 <= len; n += 32, dst += 32 * 3) { + __m128i R0, R1, R2, R3, G0, G1, G2, G3, B0, B1, B2, B3; + __m128i rgb0, rgb1, rgb2, rgb3, rgb4, rgb5; + + YUV420ToRGB_SSE2(y + 0, u + 0, v + 0, &R0, &G0, &B0); + YUV420ToRGB_SSE2(y + 8, u + 4, v + 4, &R1, &G1, &B1); + YUV420ToRGB_SSE2(y + 16, u + 8, v + 8, &R2, &G2, &B2); + YUV420ToRGB_SSE2(y + 24, u + 12, v + 12, &R3, &G3, &B3); + + // Cast to 8b and store as RRRRGGGGBBBB. + rgb0 = _mm_packus_epi16(R0, R1); + rgb1 = _mm_packus_epi16(R2, R3); + rgb2 = _mm_packus_epi16(G0, G1); + rgb3 = _mm_packus_epi16(G2, G3); + rgb4 = _mm_packus_epi16(B0, B1); + rgb5 = _mm_packus_epi16(B2, B3); + + // Pack as RGBRGBRGBRGB. + PlanarTo24b_SSE2(&rgb0, &rgb1, &rgb2, &rgb3, &rgb4, &rgb5, dst); + + y += 32; + u += 16; + v += 16; + } + for (; n < len; ++n) { // Finish off + VP8YuvToRgb(y[0], u[0], v[0], dst); + dst += 3; + y += 1; + u += (n & 1); + v += (n & 1); + } +} + +static void YuvToBgrRow_SSE2(const uint8_t* y, + const uint8_t* u, const uint8_t* v, + uint8_t* dst, int len) { + int n; + for (n = 0; n + 32 <= len; n += 32, dst += 32 * 3) { + __m128i R0, R1, R2, R3, G0, G1, G2, G3, B0, B1, B2, B3; + __m128i bgr0, bgr1, bgr2, bgr3, bgr4, bgr5; + + YUV420ToRGB_SSE2(y + 0, u + 0, v + 0, &R0, &G0, &B0); + YUV420ToRGB_SSE2(y + 8, u + 4, v + 4, &R1, &G1, &B1); + YUV420ToRGB_SSE2(y + 16, u + 8, v + 8, &R2, &G2, &B2); + YUV420ToRGB_SSE2(y + 24, u + 12, v + 12, &R3, &G3, &B3); + + // Cast to 8b and store as BBBBGGGGRRRR. + bgr0 = _mm_packus_epi16(B0, B1); + bgr1 = _mm_packus_epi16(B2, B3); + bgr2 = _mm_packus_epi16(G0, G1); + bgr3 = _mm_packus_epi16(G2, G3); + bgr4 = _mm_packus_epi16(R0, R1); + bgr5 = _mm_packus_epi16(R2, R3); + + // Pack as BGRBGRBGRBGR. + PlanarTo24b_SSE2(&bgr0, &bgr1, &bgr2, &bgr3, &bgr4, &bgr5, dst); + + y += 32; + u += 16; + v += 16; + } + for (; n < len; ++n) { // Finish off + VP8YuvToBgr(y[0], u[0], v[0], dst); + dst += 3; + y += 1; + u += (n & 1); + v += (n & 1); + } +} + +//------------------------------------------------------------------------------ +// Entry point + +extern void WebPInitSamplersSSE2(void); + +WEBP_TSAN_IGNORE_FUNCTION void WebPInitSamplersSSE2(void) { + WebPSamplers[MODE_RGB] = YuvToRgbRow_SSE2; + WebPSamplers[MODE_RGBA] = YuvToRgbaRow_SSE2; + WebPSamplers[MODE_BGR] = YuvToBgrRow_SSE2; + WebPSamplers[MODE_BGRA] = YuvToBgraRow_SSE2; + WebPSamplers[MODE_ARGB] = YuvToArgbRow_SSE2; +} + +//------------------------------------------------------------------------------ +// RGB24/32 -> YUV converters + +// Load eight 16b-words from *src. +#define LOAD_16(src) _mm_loadu_si128((const __m128i*)(src)) +// Store either 16b-words into *dst +#define STORE_16(V, dst) _mm_storeu_si128((__m128i*)(dst), (V)) + +// Function that inserts a value of the second half of the in buffer in between +// every two char of the first half. +static WEBP_INLINE void RGB24PackedToPlanarHelper_SSE2( + const __m128i* const in /*in[6]*/, __m128i* const out /*out[6]*/) { + out[0] = _mm_unpacklo_epi8(in[0], in[3]); + out[1] = _mm_unpackhi_epi8(in[0], in[3]); + out[2] = _mm_unpacklo_epi8(in[1], in[4]); + out[3] = _mm_unpackhi_epi8(in[1], in[4]); + out[4] = _mm_unpacklo_epi8(in[2], in[5]); + out[5] = _mm_unpackhi_epi8(in[2], in[5]); +} + +// Unpack the 8b input rgbrgbrgbrgb ... as contiguous registers: +// rrrr... rrrr... gggg... gggg... bbbb... bbbb.... +// Similar to PlanarTo24bHelper(), but in reverse order. +static WEBP_INLINE void RGB24PackedToPlanar_SSE2( + const uint8_t* const rgb, __m128i* const out /*out[6]*/) { + __m128i tmp[6]; + tmp[0] = _mm_loadu_si128((const __m128i*)(rgb + 0)); + tmp[1] = _mm_loadu_si128((const __m128i*)(rgb + 16)); + tmp[2] = _mm_loadu_si128((const __m128i*)(rgb + 32)); + tmp[3] = _mm_loadu_si128((const __m128i*)(rgb + 48)); + tmp[4] = _mm_loadu_si128((const __m128i*)(rgb + 64)); + tmp[5] = _mm_loadu_si128((const __m128i*)(rgb + 80)); + + RGB24PackedToPlanarHelper_SSE2(tmp, out); + RGB24PackedToPlanarHelper_SSE2(out, tmp); + RGB24PackedToPlanarHelper_SSE2(tmp, out); + RGB24PackedToPlanarHelper_SSE2(out, tmp); + RGB24PackedToPlanarHelper_SSE2(tmp, out); +} + +// Convert 8 packed ARGB to r[], g[], b[] +static WEBP_INLINE void RGB32PackedToPlanar_SSE2(const uint32_t* const argb, + __m128i* const rgb /*in[6]*/) { + const __m128i zero = _mm_setzero_si128(); + __m128i a0 = LOAD_16(argb + 0); + __m128i a1 = LOAD_16(argb + 4); + __m128i a2 = LOAD_16(argb + 8); + __m128i a3 = LOAD_16(argb + 12); + VP8L32bToPlanar_SSE2(&a0, &a1, &a2, &a3); + rgb[0] = _mm_unpacklo_epi8(a1, zero); + rgb[1] = _mm_unpackhi_epi8(a1, zero); + rgb[2] = _mm_unpacklo_epi8(a2, zero); + rgb[3] = _mm_unpackhi_epi8(a2, zero); + rgb[4] = _mm_unpacklo_epi8(a3, zero); + rgb[5] = _mm_unpackhi_epi8(a3, zero); +} + +// This macro computes (RG * MULT_RG + GB * MULT_GB + ROUNDER) >> DESCALE_FIX +// It's a macro and not a function because we need to use immediate values with +// srai_epi32, e.g. +#define TRANSFORM(RG_LO, RG_HI, GB_LO, GB_HI, MULT_RG, MULT_GB, \ + ROUNDER, DESCALE_FIX, OUT) do { \ + const __m128i V0_lo = _mm_madd_epi16(RG_LO, MULT_RG); \ + const __m128i V0_hi = _mm_madd_epi16(RG_HI, MULT_RG); \ + const __m128i V1_lo = _mm_madd_epi16(GB_LO, MULT_GB); \ + const __m128i V1_hi = _mm_madd_epi16(GB_HI, MULT_GB); \ + const __m128i V2_lo = _mm_add_epi32(V0_lo, V1_lo); \ + const __m128i V2_hi = _mm_add_epi32(V0_hi, V1_hi); \ + const __m128i V3_lo = _mm_add_epi32(V2_lo, ROUNDER); \ + const __m128i V3_hi = _mm_add_epi32(V2_hi, ROUNDER); \ + const __m128i V5_lo = _mm_srai_epi32(V3_lo, DESCALE_FIX); \ + const __m128i V5_hi = _mm_srai_epi32(V3_hi, DESCALE_FIX); \ + (OUT) = _mm_packs_epi32(V5_lo, V5_hi); \ +} while (0) + +#define MK_CST_16(A, B) _mm_set_epi16((B), (A), (B), (A), (B), (A), (B), (A)) +static WEBP_INLINE void ConvertRGBToY_SSE2(const __m128i* const R, + const __m128i* const G, + const __m128i* const B, + __m128i* const Y) { + const __m128i kRG_y = MK_CST_16(16839, 33059 - 16384); + const __m128i kGB_y = MK_CST_16(16384, 6420); + const __m128i kHALF_Y = _mm_set1_epi32((16 << YUV_FIX) + YUV_HALF); + + const __m128i RG_lo = _mm_unpacklo_epi16(*R, *G); + const __m128i RG_hi = _mm_unpackhi_epi16(*R, *G); + const __m128i GB_lo = _mm_unpacklo_epi16(*G, *B); + const __m128i GB_hi = _mm_unpackhi_epi16(*G, *B); + TRANSFORM(RG_lo, RG_hi, GB_lo, GB_hi, kRG_y, kGB_y, kHALF_Y, YUV_FIX, *Y); +} + +static WEBP_INLINE void ConvertRGBToUV_SSE2(const __m128i* const R, + const __m128i* const G, + const __m128i* const B, + __m128i* const U, + __m128i* const V) { + const __m128i kRG_u = MK_CST_16(-9719, -19081); + const __m128i kGB_u = MK_CST_16(0, 28800); + const __m128i kRG_v = MK_CST_16(28800, 0); + const __m128i kGB_v = MK_CST_16(-24116, -4684); + const __m128i kHALF_UV = _mm_set1_epi32(((128 << YUV_FIX) + YUV_HALF) << 2); + + const __m128i RG_lo = _mm_unpacklo_epi16(*R, *G); + const __m128i RG_hi = _mm_unpackhi_epi16(*R, *G); + const __m128i GB_lo = _mm_unpacklo_epi16(*G, *B); + const __m128i GB_hi = _mm_unpackhi_epi16(*G, *B); + TRANSFORM(RG_lo, RG_hi, GB_lo, GB_hi, kRG_u, kGB_u, + kHALF_UV, YUV_FIX + 2, *U); + TRANSFORM(RG_lo, RG_hi, GB_lo, GB_hi, kRG_v, kGB_v, + kHALF_UV, YUV_FIX + 2, *V); +} + +#undef MK_CST_16 +#undef TRANSFORM + +static void ConvertRGB24ToY_SSE2(const uint8_t* rgb, uint8_t* y, int width) { + const int max_width = width & ~31; + int i; + for (i = 0; i < max_width; rgb += 3 * 16 * 2) { + __m128i rgb_plane[6]; + int j; + + RGB24PackedToPlanar_SSE2(rgb, rgb_plane); + + for (j = 0; j < 2; ++j, i += 16) { + const __m128i zero = _mm_setzero_si128(); + __m128i r, g, b, Y0, Y1; + + // Convert to 16-bit Y. + r = _mm_unpacklo_epi8(rgb_plane[0 + j], zero); + g = _mm_unpacklo_epi8(rgb_plane[2 + j], zero); + b = _mm_unpacklo_epi8(rgb_plane[4 + j], zero); + ConvertRGBToY_SSE2(&r, &g, &b, &Y0); + + // Convert to 16-bit Y. + r = _mm_unpackhi_epi8(rgb_plane[0 + j], zero); + g = _mm_unpackhi_epi8(rgb_plane[2 + j], zero); + b = _mm_unpackhi_epi8(rgb_plane[4 + j], zero); + ConvertRGBToY_SSE2(&r, &g, &b, &Y1); + + // Cast to 8-bit and store. + STORE_16(_mm_packus_epi16(Y0, Y1), y + i); + } + } + for (; i < width; ++i, rgb += 3) { // left-over + y[i] = VP8RGBToY(rgb[0], rgb[1], rgb[2], YUV_HALF); + } +} + +static void ConvertBGR24ToY_SSE2(const uint8_t* bgr, uint8_t* y, int width) { + const int max_width = width & ~31; + int i; + for (i = 0; i < max_width; bgr += 3 * 16 * 2) { + __m128i bgr_plane[6]; + int j; + + RGB24PackedToPlanar_SSE2(bgr, bgr_plane); + + for (j = 0; j < 2; ++j, i += 16) { + const __m128i zero = _mm_setzero_si128(); + __m128i r, g, b, Y0, Y1; + + // Convert to 16-bit Y. + b = _mm_unpacklo_epi8(bgr_plane[0 + j], zero); + g = _mm_unpacklo_epi8(bgr_plane[2 + j], zero); + r = _mm_unpacklo_epi8(bgr_plane[4 + j], zero); + ConvertRGBToY_SSE2(&r, &g, &b, &Y0); + + // Convert to 16-bit Y. + b = _mm_unpackhi_epi8(bgr_plane[0 + j], zero); + g = _mm_unpackhi_epi8(bgr_plane[2 + j], zero); + r = _mm_unpackhi_epi8(bgr_plane[4 + j], zero); + ConvertRGBToY_SSE2(&r, &g, &b, &Y1); + + // Cast to 8-bit and store. + STORE_16(_mm_packus_epi16(Y0, Y1), y + i); + } + } + for (; i < width; ++i, bgr += 3) { // left-over + y[i] = VP8RGBToY(bgr[2], bgr[1], bgr[0], YUV_HALF); + } +} + +static void ConvertARGBToY_SSE2(const uint32_t* argb, uint8_t* y, int width) { + const int max_width = width & ~15; + int i; + for (i = 0; i < max_width; i += 16) { + __m128i Y0, Y1, rgb[6]; + RGB32PackedToPlanar_SSE2(&argb[i], rgb); + ConvertRGBToY_SSE2(&rgb[0], &rgb[2], &rgb[4], &Y0); + ConvertRGBToY_SSE2(&rgb[1], &rgb[3], &rgb[5], &Y1); + STORE_16(_mm_packus_epi16(Y0, Y1), y + i); + } + for (; i < width; ++i) { // left-over + const uint32_t p = argb[i]; + y[i] = VP8RGBToY((p >> 16) & 0xff, (p >> 8) & 0xff, (p >> 0) & 0xff, + YUV_HALF); + } +} + +// Horizontal add (doubled) of two 16b values, result is 16b. +// in: A | B | C | D | ... -> out: 2*(A+B) | 2*(C+D) | ... +static void HorizontalAddPack_SSE2(const __m128i* const A, + const __m128i* const B, + __m128i* const out) { + const __m128i k2 = _mm_set1_epi16(2); + const __m128i C = _mm_madd_epi16(*A, k2); + const __m128i D = _mm_madd_epi16(*B, k2); + *out = _mm_packs_epi32(C, D); +} + +static void ConvertARGBToUV_SSE2(const uint32_t* argb, + uint8_t* u, uint8_t* v, + int src_width, int do_store) { + const int max_width = src_width & ~31; + int i; + for (i = 0; i < max_width; i += 32, u += 16, v += 16) { + __m128i rgb[6], U0, V0, U1, V1; + RGB32PackedToPlanar_SSE2(&argb[i], rgb); + HorizontalAddPack_SSE2(&rgb[0], &rgb[1], &rgb[0]); + HorizontalAddPack_SSE2(&rgb[2], &rgb[3], &rgb[2]); + HorizontalAddPack_SSE2(&rgb[4], &rgb[5], &rgb[4]); + ConvertRGBToUV_SSE2(&rgb[0], &rgb[2], &rgb[4], &U0, &V0); + + RGB32PackedToPlanar_SSE2(&argb[i + 16], rgb); + HorizontalAddPack_SSE2(&rgb[0], &rgb[1], &rgb[0]); + HorizontalAddPack_SSE2(&rgb[2], &rgb[3], &rgb[2]); + HorizontalAddPack_SSE2(&rgb[4], &rgb[5], &rgb[4]); + ConvertRGBToUV_SSE2(&rgb[0], &rgb[2], &rgb[4], &U1, &V1); + + U0 = _mm_packus_epi16(U0, U1); + V0 = _mm_packus_epi16(V0, V1); + if (!do_store) { + const __m128i prev_u = LOAD_16(u); + const __m128i prev_v = LOAD_16(v); + U0 = _mm_avg_epu8(U0, prev_u); + V0 = _mm_avg_epu8(V0, prev_v); + } + STORE_16(U0, u); + STORE_16(V0, v); + } + if (i < src_width) { // left-over + WebPConvertARGBToUV_C(argb + i, u, v, src_width - i, do_store); + } +} + +// Convert 16 packed ARGB 16b-values to r[], g[], b[] +static WEBP_INLINE void RGBA32PackedToPlanar_16b_SSE2( + const uint16_t* const rgbx, + __m128i* const r, __m128i* const g, __m128i* const b) { + const __m128i in0 = LOAD_16(rgbx + 0); // r0 | g0 | b0 |x| r1 | g1 | b1 |x + const __m128i in1 = LOAD_16(rgbx + 8); // r2 | g2 | b2 |x| r3 | g3 | b3 |x + const __m128i in2 = LOAD_16(rgbx + 16); // r4 | ... + const __m128i in3 = LOAD_16(rgbx + 24); // r6 | ... + // column-wise transpose + const __m128i A0 = _mm_unpacklo_epi16(in0, in1); + const __m128i A1 = _mm_unpackhi_epi16(in0, in1); + const __m128i A2 = _mm_unpacklo_epi16(in2, in3); + const __m128i A3 = _mm_unpackhi_epi16(in2, in3); + const __m128i B0 = _mm_unpacklo_epi16(A0, A1); // r0 r1 r2 r3 | g0 g1 .. + const __m128i B1 = _mm_unpackhi_epi16(A0, A1); // b0 b1 b2 b3 | x x x x + const __m128i B2 = _mm_unpacklo_epi16(A2, A3); // r4 r5 r6 r7 | g4 g5 .. + const __m128i B3 = _mm_unpackhi_epi16(A2, A3); // b4 b5 b6 b7 | x x x x + *r = _mm_unpacklo_epi64(B0, B2); + *g = _mm_unpackhi_epi64(B0, B2); + *b = _mm_unpacklo_epi64(B1, B3); +} + +static void ConvertRGBA32ToUV_SSE2(const uint16_t* rgb, + uint8_t* u, uint8_t* v, int width) { + const int max_width = width & ~15; + const uint16_t* const last_rgb = rgb + 4 * max_width; + while (rgb < last_rgb) { + __m128i r, g, b, U0, V0, U1, V1; + RGBA32PackedToPlanar_16b_SSE2(rgb + 0, &r, &g, &b); + ConvertRGBToUV_SSE2(&r, &g, &b, &U0, &V0); + RGBA32PackedToPlanar_16b_SSE2(rgb + 32, &r, &g, &b); + ConvertRGBToUV_SSE2(&r, &g, &b, &U1, &V1); + STORE_16(_mm_packus_epi16(U0, U1), u); + STORE_16(_mm_packus_epi16(V0, V1), v); + u += 16; + v += 16; + rgb += 2 * 32; + } + if (max_width < width) { // left-over + WebPConvertRGBA32ToUV_C(rgb, u, v, width - max_width); + } +} + +//------------------------------------------------------------------------------ + +extern void WebPInitConvertARGBToYUVSSE2(void); + +WEBP_TSAN_IGNORE_FUNCTION void WebPInitConvertARGBToYUVSSE2(void) { + WebPConvertARGBToY = ConvertARGBToY_SSE2; + WebPConvertARGBToUV = ConvertARGBToUV_SSE2; + + WebPConvertRGB24ToY = ConvertRGB24ToY_SSE2; + WebPConvertBGR24ToY = ConvertBGR24ToY_SSE2; + + WebPConvertRGBA32ToUV = ConvertRGBA32ToUV_SSE2; +} + +//------------------------------------------------------------------------------ + +#define MAX_Y ((1 << 10) - 1) // 10b precision over 16b-arithmetic +static uint16_t clip_y(int v) { + return (v < 0) ? 0 : (v > MAX_Y) ? MAX_Y : (uint16_t)v; +} + +static uint64_t SharpYUVUpdateY_SSE2(const uint16_t* ref, const uint16_t* src, + uint16_t* dst, int len) { + uint64_t diff = 0; + uint32_t tmp[4]; + int i; + const __m128i zero = _mm_setzero_si128(); + const __m128i max = _mm_set1_epi16(MAX_Y); + const __m128i one = _mm_set1_epi16(1); + __m128i sum = zero; + + for (i = 0; i + 8 <= len; i += 8) { + const __m128i A = _mm_loadu_si128((const __m128i*)(ref + i)); + const __m128i B = _mm_loadu_si128((const __m128i*)(src + i)); + const __m128i C = _mm_loadu_si128((const __m128i*)(dst + i)); + const __m128i D = _mm_sub_epi16(A, B); // diff_y + const __m128i E = _mm_cmpgt_epi16(zero, D); // sign (-1 or 0) + const __m128i F = _mm_add_epi16(C, D); // new_y + const __m128i G = _mm_or_si128(E, one); // -1 or 1 + const __m128i H = _mm_max_epi16(_mm_min_epi16(F, max), zero); + const __m128i I = _mm_madd_epi16(D, G); // sum(abs(...)) + _mm_storeu_si128((__m128i*)(dst + i), H); + sum = _mm_add_epi32(sum, I); + } + _mm_storeu_si128((__m128i*)tmp, sum); + diff = tmp[3] + tmp[2] + tmp[1] + tmp[0]; + for (; i < len; ++i) { + const int diff_y = ref[i] - src[i]; + const int new_y = (int)dst[i] + diff_y; + dst[i] = clip_y(new_y); + diff += (uint64_t)abs(diff_y); + } + return diff; +} + +static void SharpYUVUpdateRGB_SSE2(const int16_t* ref, const int16_t* src, + int16_t* dst, int len) { + int i = 0; + for (i = 0; i + 8 <= len; i += 8) { + const __m128i A = _mm_loadu_si128((const __m128i*)(ref + i)); + const __m128i B = _mm_loadu_si128((const __m128i*)(src + i)); + const __m128i C = _mm_loadu_si128((const __m128i*)(dst + i)); + const __m128i D = _mm_sub_epi16(A, B); // diff_uv + const __m128i E = _mm_add_epi16(C, D); // new_uv + _mm_storeu_si128((__m128i*)(dst + i), E); + } + for (; i < len; ++i) { + const int diff_uv = ref[i] - src[i]; + dst[i] += diff_uv; + } +} + +static void SharpYUVFilterRow_SSE2(const int16_t* A, const int16_t* B, int len, + const uint16_t* best_y, uint16_t* out) { + int i; + const __m128i kCst8 = _mm_set1_epi16(8); + const __m128i max = _mm_set1_epi16(MAX_Y); + const __m128i zero = _mm_setzero_si128(); + for (i = 0; i + 8 <= len; i += 8) { + const __m128i a0 = _mm_loadu_si128((const __m128i*)(A + i + 0)); + const __m128i a1 = _mm_loadu_si128((const __m128i*)(A + i + 1)); + const __m128i b0 = _mm_loadu_si128((const __m128i*)(B + i + 0)); + const __m128i b1 = _mm_loadu_si128((const __m128i*)(B + i + 1)); + const __m128i a0b1 = _mm_add_epi16(a0, b1); + const __m128i a1b0 = _mm_add_epi16(a1, b0); + const __m128i a0a1b0b1 = _mm_add_epi16(a0b1, a1b0); // A0+A1+B0+B1 + const __m128i a0a1b0b1_8 = _mm_add_epi16(a0a1b0b1, kCst8); + const __m128i a0b1_2 = _mm_add_epi16(a0b1, a0b1); // 2*(A0+B1) + const __m128i a1b0_2 = _mm_add_epi16(a1b0, a1b0); // 2*(A1+B0) + const __m128i c0 = _mm_srai_epi16(_mm_add_epi16(a0b1_2, a0a1b0b1_8), 3); + const __m128i c1 = _mm_srai_epi16(_mm_add_epi16(a1b0_2, a0a1b0b1_8), 3); + const __m128i d0 = _mm_add_epi16(c1, a0); + const __m128i d1 = _mm_add_epi16(c0, a1); + const __m128i e0 = _mm_srai_epi16(d0, 1); + const __m128i e1 = _mm_srai_epi16(d1, 1); + const __m128i f0 = _mm_unpacklo_epi16(e0, e1); + const __m128i f1 = _mm_unpackhi_epi16(e0, e1); + const __m128i g0 = _mm_loadu_si128((const __m128i*)(best_y + 2 * i + 0)); + const __m128i g1 = _mm_loadu_si128((const __m128i*)(best_y + 2 * i + 8)); + const __m128i h0 = _mm_add_epi16(g0, f0); + const __m128i h1 = _mm_add_epi16(g1, f1); + const __m128i i0 = _mm_max_epi16(_mm_min_epi16(h0, max), zero); + const __m128i i1 = _mm_max_epi16(_mm_min_epi16(h1, max), zero); + _mm_storeu_si128((__m128i*)(out + 2 * i + 0), i0); + _mm_storeu_si128((__m128i*)(out + 2 * i + 8), i1); + } + for (; i < len; ++i) { + // (9 * A0 + 3 * A1 + 3 * B0 + B1 + 8) >> 4 = + // = (8 * A0 + 2 * (A1 + B0) + (A0 + A1 + B0 + B1 + 8)) >> 4 + // We reuse the common sub-expressions. + const int a0b1 = A[i + 0] + B[i + 1]; + const int a1b0 = A[i + 1] + B[i + 0]; + const int a0a1b0b1 = a0b1 + a1b0 + 8; + const int v0 = (8 * A[i + 0] + 2 * a1b0 + a0a1b0b1) >> 4; + const int v1 = (8 * A[i + 1] + 2 * a0b1 + a0a1b0b1) >> 4; + out[2 * i + 0] = clip_y(best_y[2 * i + 0] + v0); + out[2 * i + 1] = clip_y(best_y[2 * i + 1] + v1); + } +} + +#undef MAX_Y + +//------------------------------------------------------------------------------ + +extern void WebPInitSharpYUVSSE2(void); + +WEBP_TSAN_IGNORE_FUNCTION void WebPInitSharpYUVSSE2(void) { + WebPSharpYUVUpdateY = SharpYUVUpdateY_SSE2; + WebPSharpYUVUpdateRGB = SharpYUVUpdateRGB_SSE2; + WebPSharpYUVFilterRow = SharpYUVFilterRow_SSE2; +} + +#else // !WEBP_USE_SSE2 + +WEBP_DSP_INIT_STUB(WebPInitSamplersSSE2) +WEBP_DSP_INIT_STUB(WebPInitConvertARGBToYUVSSE2) +WEBP_DSP_INIT_STUB(WebPInitSharpYUVSSE2) + +#endif // WEBP_USE_SSE2 diff --git a/vendor/github.com/sizeofint/webpanimation/dsp_yuv_sse41.c b/vendor/github.com/sizeofint/webpanimation/dsp_yuv_sse41.c new file mode 100644 index 00000000..b8fef0a9 --- /dev/null +++ b/vendor/github.com/sizeofint/webpanimation/dsp_yuv_sse41.c @@ -0,0 +1,613 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the COPYING file in the root of the source +// tree. An additional intellectual property rights grant can be found +// in the file PATENTS. All contributing project authors may +// be found in the AUTHORS file in the root of the source tree. +// ----------------------------------------------------------------------------- +// +// YUV->RGB conversion functions +// +// Author: Skal (pascal.massimino@gmail.com) + +#include "dsp_yuv.h" + +#if defined(WEBP_USE_SSE41) + +#include "dsp_common_sse41.h" +#include +#include + +//----------------------------------------------------------------------------- +// Convert spans of 32 pixels to various RGB formats for the fancy upsampler. + +// These constants are 14b fixed-point version of ITU-R BT.601 constants. +// R = (19077 * y + 26149 * v - 14234) >> 6 +// G = (19077 * y - 6419 * u - 13320 * v + 8708) >> 6 +// B = (19077 * y + 33050 * u - 17685) >> 6 +static void ConvertYUV444ToRGB_SSE41(const __m128i* const Y0, + const __m128i* const U0, + const __m128i* const V0, + __m128i* const R, + __m128i* const G, + __m128i* const B) { + const __m128i k19077 = _mm_set1_epi16(19077); + const __m128i k26149 = _mm_set1_epi16(26149); + const __m128i k14234 = _mm_set1_epi16(14234); + // 33050 doesn't fit in a signed short: only use this with unsigned arithmetic + const __m128i k33050 = _mm_set1_epi16((short)33050); + const __m128i k17685 = _mm_set1_epi16(17685); + const __m128i k6419 = _mm_set1_epi16(6419); + const __m128i k13320 = _mm_set1_epi16(13320); + const __m128i k8708 = _mm_set1_epi16(8708); + + const __m128i Y1 = _mm_mulhi_epu16(*Y0, k19077); + + const __m128i R0 = _mm_mulhi_epu16(*V0, k26149); + const __m128i R1 = _mm_sub_epi16(Y1, k14234); + const __m128i R2 = _mm_add_epi16(R1, R0); + + const __m128i G0 = _mm_mulhi_epu16(*U0, k6419); + const __m128i G1 = _mm_mulhi_epu16(*V0, k13320); + const __m128i G2 = _mm_add_epi16(Y1, k8708); + const __m128i G3 = _mm_add_epi16(G0, G1); + const __m128i G4 = _mm_sub_epi16(G2, G3); + + // be careful with the saturated *unsigned* arithmetic here! + const __m128i B0 = _mm_mulhi_epu16(*U0, k33050); + const __m128i B1 = _mm_adds_epu16(B0, Y1); + const __m128i B2 = _mm_subs_epu16(B1, k17685); + + // use logical shift for B2, which can be larger than 32767 + *R = _mm_srai_epi16(R2, 6); // range: [-14234, 30815] + *G = _mm_srai_epi16(G4, 6); // range: [-10953, 27710] + *B = _mm_srli_epi16(B2, 6); // range: [0, 34238] +} + +// Load the bytes into the *upper* part of 16b words. That's "<< 8", basically. +static WEBP_INLINE __m128i Load_HI_16_SSE41(const uint8_t* src) { + const __m128i zero = _mm_setzero_si128(); + return _mm_unpacklo_epi8(zero, _mm_loadl_epi64((const __m128i*)src)); +} + +// Load and replicate the U/V samples +static WEBP_INLINE __m128i Load_UV_HI_8_SSE41(const uint8_t* src) { + const __m128i zero = _mm_setzero_si128(); + const __m128i tmp0 = _mm_cvtsi32_si128(*(const uint32_t*)src); + const __m128i tmp1 = _mm_unpacklo_epi8(zero, tmp0); + return _mm_unpacklo_epi16(tmp1, tmp1); // replicate samples +} + +// Convert 32 samples of YUV444 to R/G/B +static void YUV444ToRGB_SSE41(const uint8_t* const y, + const uint8_t* const u, + const uint8_t* const v, + __m128i* const R, __m128i* const G, + __m128i* const B) { + const __m128i Y0 = Load_HI_16_SSE41(y), U0 = Load_HI_16_SSE41(u), + V0 = Load_HI_16_SSE41(v); + ConvertYUV444ToRGB_SSE41(&Y0, &U0, &V0, R, G, B); +} + +// Convert 32 samples of YUV420 to R/G/B +static void YUV420ToRGB_SSE41(const uint8_t* const y, + const uint8_t* const u, + const uint8_t* const v, + __m128i* const R, __m128i* const G, + __m128i* const B) { + const __m128i Y0 = Load_HI_16_SSE41(y), U0 = Load_UV_HI_8_SSE41(u), + V0 = Load_UV_HI_8_SSE41(v); + ConvertYUV444ToRGB_SSE41(&Y0, &U0, &V0, R, G, B); +} + +// Pack the planar buffers +// rrrr... rrrr... gggg... gggg... bbbb... bbbb.... +// triplet by triplet in the output buffer rgb as rgbrgbrgbrgb ... +static WEBP_INLINE void PlanarTo24b_SSE41( + __m128i* const in0, __m128i* const in1, __m128i* const in2, + __m128i* const in3, __m128i* const in4, __m128i* const in5, + uint8_t* const rgb) { + // The input is 6 registers of sixteen 8b but for the sake of explanation, + // let's take 6 registers of four 8b values. + // To pack, we will keep taking one every two 8b integer and move it + // around as follows: + // Input: + // r0r1r2r3 | r4r5r6r7 | g0g1g2g3 | g4g5g6g7 | b0b1b2b3 | b4b5b6b7 + // Split the 6 registers in two sets of 3 registers: the first set as the even + // 8b bytes, the second the odd ones: + // r0r2r4r6 | g0g2g4g6 | b0b2b4b6 | r1r3r5r7 | g1g3g5g7 | b1b3b5b7 + // Repeat the same permutations twice more: + // r0r4g0g4 | b0b4r1r5 | g1g5b1b5 | r2r6g2g6 | b2b6r3r7 | g3g7b3b7 + // r0g0b0r1 | g1b1r2g2 | b2r3g3b3 | r4g4b4r5 | g5b5r6g6 | b6r7g7b7 + VP8PlanarTo24b_SSE41(in0, in1, in2, in3, in4, in5); + + _mm_storeu_si128((__m128i*)(rgb + 0), *in0); + _mm_storeu_si128((__m128i*)(rgb + 16), *in1); + _mm_storeu_si128((__m128i*)(rgb + 32), *in2); + _mm_storeu_si128((__m128i*)(rgb + 48), *in3); + _mm_storeu_si128((__m128i*)(rgb + 64), *in4); + _mm_storeu_si128((__m128i*)(rgb + 80), *in5); +} + +void VP8YuvToRgb32_SSE41(const uint8_t* y, const uint8_t* u, const uint8_t* v, + uint8_t* dst) { + __m128i R0, R1, R2, R3, G0, G1, G2, G3, B0, B1, B2, B3; + __m128i rgb0, rgb1, rgb2, rgb3, rgb4, rgb5; + + YUV444ToRGB_SSE41(y + 0, u + 0, v + 0, &R0, &G0, &B0); + YUV444ToRGB_SSE41(y + 8, u + 8, v + 8, &R1, &G1, &B1); + YUV444ToRGB_SSE41(y + 16, u + 16, v + 16, &R2, &G2, &B2); + YUV444ToRGB_SSE41(y + 24, u + 24, v + 24, &R3, &G3, &B3); + + // Cast to 8b and store as RRRRGGGGBBBB. + rgb0 = _mm_packus_epi16(R0, R1); + rgb1 = _mm_packus_epi16(R2, R3); + rgb2 = _mm_packus_epi16(G0, G1); + rgb3 = _mm_packus_epi16(G2, G3); + rgb4 = _mm_packus_epi16(B0, B1); + rgb5 = _mm_packus_epi16(B2, B3); + + // Pack as RGBRGBRGBRGB. + PlanarTo24b_SSE41(&rgb0, &rgb1, &rgb2, &rgb3, &rgb4, &rgb5, dst); +} + +void VP8YuvToBgr32_SSE41(const uint8_t* y, const uint8_t* u, const uint8_t* v, + uint8_t* dst) { + __m128i R0, R1, R2, R3, G0, G1, G2, G3, B0, B1, B2, B3; + __m128i bgr0, bgr1, bgr2, bgr3, bgr4, bgr5; + + YUV444ToRGB_SSE41(y + 0, u + 0, v + 0, &R0, &G0, &B0); + YUV444ToRGB_SSE41(y + 8, u + 8, v + 8, &R1, &G1, &B1); + YUV444ToRGB_SSE41(y + 16, u + 16, v + 16, &R2, &G2, &B2); + YUV444ToRGB_SSE41(y + 24, u + 24, v + 24, &R3, &G3, &B3); + + // Cast to 8b and store as BBBBGGGGRRRR. + bgr0 = _mm_packus_epi16(B0, B1); + bgr1 = _mm_packus_epi16(B2, B3); + bgr2 = _mm_packus_epi16(G0, G1); + bgr3 = _mm_packus_epi16(G2, G3); + bgr4 = _mm_packus_epi16(R0, R1); + bgr5= _mm_packus_epi16(R2, R3); + + // Pack as BGRBGRBGRBGR. + PlanarTo24b_SSE41(&bgr0, &bgr1, &bgr2, &bgr3, &bgr4, &bgr5, dst); +} + +//----------------------------------------------------------------------------- +// Arbitrary-length row conversion functions + +static void YuvToRgbRow_SSE41(const uint8_t* y, + const uint8_t* u, const uint8_t* v, + uint8_t* dst, int len) { + int n; + for (n = 0; n + 32 <= len; n += 32, dst += 32 * 3) { + __m128i R0, R1, R2, R3, G0, G1, G2, G3, B0, B1, B2, B3; + __m128i rgb0, rgb1, rgb2, rgb3, rgb4, rgb5; + + YUV420ToRGB_SSE41(y + 0, u + 0, v + 0, &R0, &G0, &B0); + YUV420ToRGB_SSE41(y + 8, u + 4, v + 4, &R1, &G1, &B1); + YUV420ToRGB_SSE41(y + 16, u + 8, v + 8, &R2, &G2, &B2); + YUV420ToRGB_SSE41(y + 24, u + 12, v + 12, &R3, &G3, &B3); + + // Cast to 8b and store as RRRRGGGGBBBB. + rgb0 = _mm_packus_epi16(R0, R1); + rgb1 = _mm_packus_epi16(R2, R3); + rgb2 = _mm_packus_epi16(G0, G1); + rgb3 = _mm_packus_epi16(G2, G3); + rgb4 = _mm_packus_epi16(B0, B1); + rgb5 = _mm_packus_epi16(B2, B3); + + // Pack as RGBRGBRGBRGB. + PlanarTo24b_SSE41(&rgb0, &rgb1, &rgb2, &rgb3, &rgb4, &rgb5, dst); + + y += 32; + u += 16; + v += 16; + } + for (; n < len; ++n) { // Finish off + VP8YuvToRgb(y[0], u[0], v[0], dst); + dst += 3; + y += 1; + u += (n & 1); + v += (n & 1); + } +} + +static void YuvToBgrRow_SSE41(const uint8_t* y, + const uint8_t* u, const uint8_t* v, + uint8_t* dst, int len) { + int n; + for (n = 0; n + 32 <= len; n += 32, dst += 32 * 3) { + __m128i R0, R1, R2, R3, G0, G1, G2, G3, B0, B1, B2, B3; + __m128i bgr0, bgr1, bgr2, bgr3, bgr4, bgr5; + + YUV420ToRGB_SSE41(y + 0, u + 0, v + 0, &R0, &G0, &B0); + YUV420ToRGB_SSE41(y + 8, u + 4, v + 4, &R1, &G1, &B1); + YUV420ToRGB_SSE41(y + 16, u + 8, v + 8, &R2, &G2, &B2); + YUV420ToRGB_SSE41(y + 24, u + 12, v + 12, &R3, &G3, &B3); + + // Cast to 8b and store as BBBBGGGGRRRR. + bgr0 = _mm_packus_epi16(B0, B1); + bgr1 = _mm_packus_epi16(B2, B3); + bgr2 = _mm_packus_epi16(G0, G1); + bgr3 = _mm_packus_epi16(G2, G3); + bgr4 = _mm_packus_epi16(R0, R1); + bgr5 = _mm_packus_epi16(R2, R3); + + // Pack as BGRBGRBGRBGR. + PlanarTo24b_SSE41(&bgr0, &bgr1, &bgr2, &bgr3, &bgr4, &bgr5, dst); + + y += 32; + u += 16; + v += 16; + } + for (; n < len; ++n) { // Finish off + VP8YuvToBgr(y[0], u[0], v[0], dst); + dst += 3; + y += 1; + u += (n & 1); + v += (n & 1); + } +} + +//------------------------------------------------------------------------------ +// Entry point + +extern void WebPInitSamplersSSE41(void); + +WEBP_TSAN_IGNORE_FUNCTION void WebPInitSamplersSSE41(void) { + WebPSamplers[MODE_RGB] = YuvToRgbRow_SSE41; + WebPSamplers[MODE_BGR] = YuvToBgrRow_SSE41; +} + +//------------------------------------------------------------------------------ +// RGB24/32 -> YUV converters + +// Load eight 16b-words from *src. +#define LOAD_16(src) _mm_loadu_si128((const __m128i*)(src)) +// Store either 16b-words into *dst +#define STORE_16(V, dst) _mm_storeu_si128((__m128i*)(dst), (V)) + +#define WEBP_SSE41_SHUFF(OUT) do { \ + const __m128i tmp0 = _mm_shuffle_epi8(A0, shuff0); \ + const __m128i tmp1 = _mm_shuffle_epi8(A1, shuff1); \ + const __m128i tmp2 = _mm_shuffle_epi8(A2, shuff2); \ + const __m128i tmp3 = _mm_shuffle_epi8(A3, shuff0); \ + const __m128i tmp4 = _mm_shuffle_epi8(A4, shuff1); \ + const __m128i tmp5 = _mm_shuffle_epi8(A5, shuff2); \ + \ + /* OR everything to get one channel */ \ + const __m128i tmp6 = _mm_or_si128(tmp0, tmp1); \ + const __m128i tmp7 = _mm_or_si128(tmp3, tmp4); \ + out[OUT + 0] = _mm_or_si128(tmp6, tmp2); \ + out[OUT + 1] = _mm_or_si128(tmp7, tmp5); \ +} while (0); + +// Unpack the 8b input rgbrgbrgbrgb ... as contiguous registers: +// rrrr... rrrr... gggg... gggg... bbbb... bbbb.... +// Similar to PlanarTo24bHelper(), but in reverse order. +static WEBP_INLINE void RGB24PackedToPlanar_SSE41( + const uint8_t* const rgb, __m128i* const out /*out[6]*/) { + const __m128i A0 = _mm_loadu_si128((const __m128i*)(rgb + 0)); + const __m128i A1 = _mm_loadu_si128((const __m128i*)(rgb + 16)); + const __m128i A2 = _mm_loadu_si128((const __m128i*)(rgb + 32)); + const __m128i A3 = _mm_loadu_si128((const __m128i*)(rgb + 48)); + const __m128i A4 = _mm_loadu_si128((const __m128i*)(rgb + 64)); + const __m128i A5 = _mm_loadu_si128((const __m128i*)(rgb + 80)); + + // Compute RR. + { + const __m128i shuff0 = _mm_set_epi8( + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 15, 12, 9, 6, 3, 0); + const __m128i shuff1 = _mm_set_epi8( + -1, -1, -1, -1, -1, 14, 11, 8, 5, 2, -1, -1, -1, -1, -1, -1); + const __m128i shuff2 = _mm_set_epi8( + 13, 10, 7, 4, 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1); + WEBP_SSE41_SHUFF(0) + } + // Compute GG. + { + const __m128i shuff0 = _mm_set_epi8( + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 13, 10, 7, 4, 1); + const __m128i shuff1 = _mm_set_epi8( + -1, -1, -1, -1, -1, 15, 12, 9, 6, 3, 0, -1, -1, -1, -1, -1); + const __m128i shuff2 = _mm_set_epi8( + 14, 11, 8, 5, 2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1); + WEBP_SSE41_SHUFF(2) + } + // Compute BB. + { + const __m128i shuff0 = _mm_set_epi8( + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 14, 11, 8, 5, 2); + const __m128i shuff1 = _mm_set_epi8( + -1, -1, -1, -1, -1, -1, 13, 10, 7, 4, 1, -1, -1, -1, -1, -1); + const __m128i shuff2 = _mm_set_epi8( + 15, 12, 9, 6, 3, 0, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1); + WEBP_SSE41_SHUFF(4) + } +} + +#undef WEBP_SSE41_SHUFF + +// Convert 8 packed ARGB to r[], g[], b[] +static WEBP_INLINE void RGB32PackedToPlanar_SSE41( + const uint32_t* const argb, __m128i* const rgb /*in[6]*/) { + const __m128i zero = _mm_setzero_si128(); + __m128i a0 = LOAD_16(argb + 0); + __m128i a1 = LOAD_16(argb + 4); + __m128i a2 = LOAD_16(argb + 8); + __m128i a3 = LOAD_16(argb + 12); + VP8L32bToPlanar_SSE41(&a0, &a1, &a2, &a3); + rgb[0] = _mm_unpacklo_epi8(a1, zero); + rgb[1] = _mm_unpackhi_epi8(a1, zero); + rgb[2] = _mm_unpacklo_epi8(a2, zero); + rgb[3] = _mm_unpackhi_epi8(a2, zero); + rgb[4] = _mm_unpacklo_epi8(a3, zero); + rgb[5] = _mm_unpackhi_epi8(a3, zero); +} + +// This macro computes (RG * MULT_RG + GB * MULT_GB + ROUNDER) >> DESCALE_FIX +// It's a macro and not a function because we need to use immediate values with +// srai_epi32, e.g. +#define TRANSFORM(RG_LO, RG_HI, GB_LO, GB_HI, MULT_RG, MULT_GB, \ + ROUNDER, DESCALE_FIX, OUT) do { \ + const __m128i V0_lo = _mm_madd_epi16(RG_LO, MULT_RG); \ + const __m128i V0_hi = _mm_madd_epi16(RG_HI, MULT_RG); \ + const __m128i V1_lo = _mm_madd_epi16(GB_LO, MULT_GB); \ + const __m128i V1_hi = _mm_madd_epi16(GB_HI, MULT_GB); \ + const __m128i V2_lo = _mm_add_epi32(V0_lo, V1_lo); \ + const __m128i V2_hi = _mm_add_epi32(V0_hi, V1_hi); \ + const __m128i V3_lo = _mm_add_epi32(V2_lo, ROUNDER); \ + const __m128i V3_hi = _mm_add_epi32(V2_hi, ROUNDER); \ + const __m128i V5_lo = _mm_srai_epi32(V3_lo, DESCALE_FIX); \ + const __m128i V5_hi = _mm_srai_epi32(V3_hi, DESCALE_FIX); \ + (OUT) = _mm_packs_epi32(V5_lo, V5_hi); \ +} while (0) + +#define MK_CST_16(A, B) _mm_set_epi16((B), (A), (B), (A), (B), (A), (B), (A)) +static WEBP_INLINE void ConvertRGBToY_SSE41(const __m128i* const R, + const __m128i* const G, + const __m128i* const B, + __m128i* const Y) { + const __m128i kRG_y = MK_CST_16(16839, 33059 - 16384); + const __m128i kGB_y = MK_CST_16(16384, 6420); + const __m128i kHALF_Y = _mm_set1_epi32((16 << YUV_FIX) + YUV_HALF); + + const __m128i RG_lo = _mm_unpacklo_epi16(*R, *G); + const __m128i RG_hi = _mm_unpackhi_epi16(*R, *G); + const __m128i GB_lo = _mm_unpacklo_epi16(*G, *B); + const __m128i GB_hi = _mm_unpackhi_epi16(*G, *B); + TRANSFORM(RG_lo, RG_hi, GB_lo, GB_hi, kRG_y, kGB_y, kHALF_Y, YUV_FIX, *Y); +} + +static WEBP_INLINE void ConvertRGBToUV_SSE41(const __m128i* const R, + const __m128i* const G, + const __m128i* const B, + __m128i* const U, + __m128i* const V) { + const __m128i kRG_u = MK_CST_16(-9719, -19081); + const __m128i kGB_u = MK_CST_16(0, 28800); + const __m128i kRG_v = MK_CST_16(28800, 0); + const __m128i kGB_v = MK_CST_16(-24116, -4684); + const __m128i kHALF_UV = _mm_set1_epi32(((128 << YUV_FIX) + YUV_HALF) << 2); + + const __m128i RG_lo = _mm_unpacklo_epi16(*R, *G); + const __m128i RG_hi = _mm_unpackhi_epi16(*R, *G); + const __m128i GB_lo = _mm_unpacklo_epi16(*G, *B); + const __m128i GB_hi = _mm_unpackhi_epi16(*G, *B); + TRANSFORM(RG_lo, RG_hi, GB_lo, GB_hi, kRG_u, kGB_u, + kHALF_UV, YUV_FIX + 2, *U); + TRANSFORM(RG_lo, RG_hi, GB_lo, GB_hi, kRG_v, kGB_v, + kHALF_UV, YUV_FIX + 2, *V); +} + +#undef MK_CST_16 +#undef TRANSFORM + +static void ConvertRGB24ToY_SSE41(const uint8_t* rgb, uint8_t* y, int width) { + const int max_width = width & ~31; + int i; + for (i = 0; i < max_width; rgb += 3 * 16 * 2) { + __m128i rgb_plane[6]; + int j; + + RGB24PackedToPlanar_SSE41(rgb, rgb_plane); + + for (j = 0; j < 2; ++j, i += 16) { + const __m128i zero = _mm_setzero_si128(); + __m128i r, g, b, Y0, Y1; + + // Convert to 16-bit Y. + r = _mm_unpacklo_epi8(rgb_plane[0 + j], zero); + g = _mm_unpacklo_epi8(rgb_plane[2 + j], zero); + b = _mm_unpacklo_epi8(rgb_plane[4 + j], zero); + ConvertRGBToY_SSE41(&r, &g, &b, &Y0); + + // Convert to 16-bit Y. + r = _mm_unpackhi_epi8(rgb_plane[0 + j], zero); + g = _mm_unpackhi_epi8(rgb_plane[2 + j], zero); + b = _mm_unpackhi_epi8(rgb_plane[4 + j], zero); + ConvertRGBToY_SSE41(&r, &g, &b, &Y1); + + // Cast to 8-bit and store. + STORE_16(_mm_packus_epi16(Y0, Y1), y + i); + } + } + for (; i < width; ++i, rgb += 3) { // left-over + y[i] = VP8RGBToY(rgb[0], rgb[1], rgb[2], YUV_HALF); + } +} + +static void ConvertBGR24ToY_SSE41(const uint8_t* bgr, uint8_t* y, int width) { + const int max_width = width & ~31; + int i; + for (i = 0; i < max_width; bgr += 3 * 16 * 2) { + __m128i bgr_plane[6]; + int j; + + RGB24PackedToPlanar_SSE41(bgr, bgr_plane); + + for (j = 0; j < 2; ++j, i += 16) { + const __m128i zero = _mm_setzero_si128(); + __m128i r, g, b, Y0, Y1; + + // Convert to 16-bit Y. + b = _mm_unpacklo_epi8(bgr_plane[0 + j], zero); + g = _mm_unpacklo_epi8(bgr_plane[2 + j], zero); + r = _mm_unpacklo_epi8(bgr_plane[4 + j], zero); + ConvertRGBToY_SSE41(&r, &g, &b, &Y0); + + // Convert to 16-bit Y. + b = _mm_unpackhi_epi8(bgr_plane[0 + j], zero); + g = _mm_unpackhi_epi8(bgr_plane[2 + j], zero); + r = _mm_unpackhi_epi8(bgr_plane[4 + j], zero); + ConvertRGBToY_SSE41(&r, &g, &b, &Y1); + + // Cast to 8-bit and store. + STORE_16(_mm_packus_epi16(Y0, Y1), y + i); + } + } + for (; i < width; ++i, bgr += 3) { // left-over + y[i] = VP8RGBToY(bgr[2], bgr[1], bgr[0], YUV_HALF); + } +} + +static void ConvertARGBToY_SSE41(const uint32_t* argb, uint8_t* y, int width) { + const int max_width = width & ~15; + int i; + for (i = 0; i < max_width; i += 16) { + __m128i Y0, Y1, rgb[6]; + RGB32PackedToPlanar_SSE41(&argb[i], rgb); + ConvertRGBToY_SSE41(&rgb[0], &rgb[2], &rgb[4], &Y0); + ConvertRGBToY_SSE41(&rgb[1], &rgb[3], &rgb[5], &Y1); + STORE_16(_mm_packus_epi16(Y0, Y1), y + i); + } + for (; i < width; ++i) { // left-over + const uint32_t p = argb[i]; + y[i] = VP8RGBToY((p >> 16) & 0xff, (p >> 8) & 0xff, (p >> 0) & 0xff, + YUV_HALF); + } +} + +// Horizontal add (doubled) of two 16b values, result is 16b. +// in: A | B | C | D | ... -> out: 2*(A+B) | 2*(C+D) | ... +static void HorizontalAddPack_SSE41(const __m128i* const A, + const __m128i* const B, + __m128i* const out) { + const __m128i k2 = _mm_set1_epi16(2); + const __m128i C = _mm_madd_epi16(*A, k2); + const __m128i D = _mm_madd_epi16(*B, k2); + *out = _mm_packs_epi32(C, D); +} + +static void ConvertARGBToUV_SSE41(const uint32_t* argb, + uint8_t* u, uint8_t* v, + int src_width, int do_store) { + const int max_width = src_width & ~31; + int i; + for (i = 0; i < max_width; i += 32, u += 16, v += 16) { + __m128i rgb[6], U0, V0, U1, V1; + RGB32PackedToPlanar_SSE41(&argb[i], rgb); + HorizontalAddPack_SSE41(&rgb[0], &rgb[1], &rgb[0]); + HorizontalAddPack_SSE41(&rgb[2], &rgb[3], &rgb[2]); + HorizontalAddPack_SSE41(&rgb[4], &rgb[5], &rgb[4]); + ConvertRGBToUV_SSE41(&rgb[0], &rgb[2], &rgb[4], &U0, &V0); + + RGB32PackedToPlanar_SSE41(&argb[i + 16], rgb); + HorizontalAddPack_SSE41(&rgb[0], &rgb[1], &rgb[0]); + HorizontalAddPack_SSE41(&rgb[2], &rgb[3], &rgb[2]); + HorizontalAddPack_SSE41(&rgb[4], &rgb[5], &rgb[4]); + ConvertRGBToUV_SSE41(&rgb[0], &rgb[2], &rgb[4], &U1, &V1); + + U0 = _mm_packus_epi16(U0, U1); + V0 = _mm_packus_epi16(V0, V1); + if (!do_store) { + const __m128i prev_u = LOAD_16(u); + const __m128i prev_v = LOAD_16(v); + U0 = _mm_avg_epu8(U0, prev_u); + V0 = _mm_avg_epu8(V0, prev_v); + } + STORE_16(U0, u); + STORE_16(V0, v); + } + if (i < src_width) { // left-over + WebPConvertARGBToUV_C(argb + i, u, v, src_width - i, do_store); + } +} + +// Convert 16 packed ARGB 16b-values to r[], g[], b[] +static WEBP_INLINE void RGBA32PackedToPlanar_16b_SSE41( + const uint16_t* const rgbx, + __m128i* const r, __m128i* const g, __m128i* const b) { + const __m128i in0 = LOAD_16(rgbx + 0); // r0 | g0 | b0 |x| r1 | g1 | b1 |x + const __m128i in1 = LOAD_16(rgbx + 8); // r2 | g2 | b2 |x| r3 | g3 | b3 |x + const __m128i in2 = LOAD_16(rgbx + 16); // r4 | ... + const __m128i in3 = LOAD_16(rgbx + 24); // r6 | ... + // aarrggbb as 16-bit. + const __m128i shuff0 = + _mm_set_epi8(-1, -1, -1, -1, 13, 12, 5, 4, 11, 10, 3, 2, 9, 8, 1, 0); + const __m128i shuff1 = + _mm_set_epi8(13, 12, 5, 4, -1, -1, -1, -1, 11, 10, 3, 2, 9, 8, 1, 0); + const __m128i A0 = _mm_shuffle_epi8(in0, shuff0); + const __m128i A1 = _mm_shuffle_epi8(in1, shuff1); + const __m128i A2 = _mm_shuffle_epi8(in2, shuff0); + const __m128i A3 = _mm_shuffle_epi8(in3, shuff1); + // R0R1G0G1 + // B0B1**** + // R2R3G2G3 + // B2B3**** + // (OR is used to free port 5 for the unpack) + const __m128i B0 = _mm_unpacklo_epi32(A0, A1); + const __m128i B1 = _mm_or_si128(A0, A1); + const __m128i B2 = _mm_unpacklo_epi32(A2, A3); + const __m128i B3 = _mm_or_si128(A2, A3); + // Gather the channels. + *r = _mm_unpacklo_epi64(B0, B2); + *g = _mm_unpackhi_epi64(B0, B2); + *b = _mm_unpackhi_epi64(B1, B3); +} + +static void ConvertRGBA32ToUV_SSE41(const uint16_t* rgb, + uint8_t* u, uint8_t* v, int width) { + const int max_width = width & ~15; + const uint16_t* const last_rgb = rgb + 4 * max_width; + while (rgb < last_rgb) { + __m128i r, g, b, U0, V0, U1, V1; + RGBA32PackedToPlanar_16b_SSE41(rgb + 0, &r, &g, &b); + ConvertRGBToUV_SSE41(&r, &g, &b, &U0, &V0); + RGBA32PackedToPlanar_16b_SSE41(rgb + 32, &r, &g, &b); + ConvertRGBToUV_SSE41(&r, &g, &b, &U1, &V1); + STORE_16(_mm_packus_epi16(U0, U1), u); + STORE_16(_mm_packus_epi16(V0, V1), v); + u += 16; + v += 16; + rgb += 2 * 32; + } + if (max_width < width) { // left-over + WebPConvertRGBA32ToUV_C(rgb, u, v, width - max_width); + } +} + +//------------------------------------------------------------------------------ + +extern void WebPInitConvertARGBToYUVSSE41(void); + +WEBP_TSAN_IGNORE_FUNCTION void WebPInitConvertARGBToYUVSSE41(void) { + WebPConvertARGBToY = ConvertARGBToY_SSE41; + WebPConvertARGBToUV = ConvertARGBToUV_SSE41; + + WebPConvertRGB24ToY = ConvertRGB24ToY_SSE41; + WebPConvertBGR24ToY = ConvertBGR24ToY_SSE41; + + WebPConvertRGBA32ToUV = ConvertRGBA32ToUV_SSE41; +} + +//------------------------------------------------------------------------------ + +#else // !WEBP_USE_SSE41 + +WEBP_DSP_INIT_STUB(WebPInitSamplersSSE41) +WEBP_DSP_INIT_STUB(WebPInitConvertARGBToYUVSSE41) + +#endif // WEBP_USE_SSE41 diff --git a/vendor/github.com/sizeofint/webpanimation/enc_alpha_enc.c b/vendor/github.com/sizeofint/webpanimation/enc_alpha_enc.c new file mode 100644 index 00000000..2312e565 --- /dev/null +++ b/vendor/github.com/sizeofint/webpanimation/enc_alpha_enc.c @@ -0,0 +1,443 @@ +// Copyright 2011 Google Inc. All Rights Reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the COPYING file in the root of the source +// tree. An additional intellectual property rights grant can be found +// in the file PATENTS. All contributing project authors may +// be found in the AUTHORS file in the root of the source tree. +// ----------------------------------------------------------------------------- +// +// Alpha-plane compression. +// +// Author: Skal (pascal.massimino@gmail.com) + +#include +#include + +#include "enc_vp8i_enc.h" +#include "dsp_dsp.h" +#include "utils_filters_utils.h" +#include "utils_quant_levels_utils.h" +#include "utils_utils.h" +#include "webp_format_constants.h" + +// ----------------------------------------------------------------------------- +// Encodes the given alpha data via specified compression method 'method'. +// The pre-processing (quantization) is performed if 'quality' is less than 100. +// For such cases, the encoding is lossy. The valid range is [0, 100] for +// 'quality' and [0, 1] for 'method': +// 'method = 0' - No compression; +// 'method = 1' - Use lossless coder on the alpha plane only +// 'filter' values [0, 4] correspond to prediction modes none, horizontal, +// vertical & gradient filters. The prediction mode 4 will try all the +// prediction modes 0 to 3 and pick the best one. +// 'effort_level': specifies how much effort must be spent to try and reduce +// the compressed output size. In range 0 (quick) to 6 (slow). +// +// 'output' corresponds to the buffer containing compressed alpha data. +// This buffer is allocated by this method and caller should call +// WebPSafeFree(*output) when done. +// 'output_size' corresponds to size of this compressed alpha buffer. +// +// Returns 1 on successfully encoding the alpha and +// 0 if either: +// invalid quality or method, or +// memory allocation for the compressed data fails. + +#include "enc_vp8li_enc.h" + +static int EncodeLossless(const uint8_t* const data, int width, int height, + int effort_level, // in [0..6] range + int use_quality_100, VP8LBitWriter* const bw, + WebPAuxStats* const stats) { + int ok = 0; + WebPConfig config; + WebPPicture picture; + + WebPPictureInit(&picture); + picture.width = width; + picture.height = height; + picture.use_argb = 1; + picture.stats = stats; + if (!WebPPictureAlloc(&picture)) return 0; + + // Transfer the alpha values to the green channel. + WebPDispatchAlphaToGreen(data, width, picture.width, picture.height, + picture.argb, picture.argb_stride); + + WebPConfigInit(&config); + config.lossless = 1; + // Enable exact, or it would alter RGB values of transparent alpha, which is + // normally OK but not here since we are not encoding the input image but an + // internal encoding-related image containing necessary exact information in + // RGB channels. + config.exact = 1; + config.method = effort_level; // impact is very small + // Set a low default quality for encoding alpha. Ensure that Alpha quality at + // lower methods (3 and below) is less than the threshold for triggering + // costly 'BackwardReferencesTraceBackwards'. + // If the alpha quality is set to 100 and the method to 6, allow for a high + // lossless quality to trigger the cruncher. + config.quality = + (use_quality_100 && effort_level == 6) ? 100 : 8.f * effort_level; + assert(config.quality >= 0 && config.quality <= 100.f); + + // TODO(urvang): Temporary fix to avoid generating images that trigger + // a decoder bug related to alpha with color cache. + // See: https://code.google.com/p/webp/issues/detail?id=239 + // Need to re-enable this later. + ok = (VP8LEncodeStream(&config, &picture, bw, 0 /*use_cache*/) == VP8_ENC_OK); + WebPPictureFree(&picture); + ok = ok && !bw->error_; + if (!ok) { + VP8LBitWriterWipeOut(bw); + return 0; + } + return 1; +} + +// ----------------------------------------------------------------------------- + +// Small struct to hold the result of a filter mode compression attempt. +typedef struct { + size_t score; + VP8BitWriter bw; + WebPAuxStats stats; +} FilterTrial; + +// This function always returns an initialized 'bw' object, even upon error. +static int EncodeAlphaInternal(const uint8_t* const data, int width, int height, + int method, int filter, int reduce_levels, + int effort_level, // in [0..6] range + uint8_t* const tmp_alpha, + FilterTrial* result) { + int ok = 0; + const uint8_t* alpha_src; + WebPFilterFunc filter_func; + uint8_t header; + const size_t data_size = width * height; + const uint8_t* output = NULL; + size_t output_size = 0; + VP8LBitWriter tmp_bw; + + assert((uint64_t)data_size == (uint64_t)width * height); // as per spec + assert(filter >= 0 && filter < WEBP_FILTER_LAST); + assert(method >= ALPHA_NO_COMPRESSION); + assert(method <= ALPHA_LOSSLESS_COMPRESSION); + assert(sizeof(header) == ALPHA_HEADER_LEN); + + filter_func = WebPFilters[filter]; + if (filter_func != NULL) { + filter_func(data, width, height, width, tmp_alpha); + alpha_src = tmp_alpha; + } else { + alpha_src = data; + } + + if (method != ALPHA_NO_COMPRESSION) { + ok = VP8LBitWriterInit(&tmp_bw, data_size >> 3); + ok = ok && EncodeLossless(alpha_src, width, height, effort_level, + !reduce_levels, &tmp_bw, &result->stats); + if (ok) { + output = VP8LBitWriterFinish(&tmp_bw); + output_size = VP8LBitWriterNumBytes(&tmp_bw); + if (output_size > data_size) { + // compressed size is larger than source! Revert to uncompressed mode. + method = ALPHA_NO_COMPRESSION; + VP8LBitWriterWipeOut(&tmp_bw); + } + } else { + VP8LBitWriterWipeOut(&tmp_bw); + return 0; + } + } + + if (method == ALPHA_NO_COMPRESSION) { + output = alpha_src; + output_size = data_size; + ok = 1; + } + + // Emit final result. + header = method | (filter << 2); + if (reduce_levels) header |= ALPHA_PREPROCESSED_LEVELS << 4; + + VP8BitWriterInit(&result->bw, ALPHA_HEADER_LEN + output_size); + ok = ok && VP8BitWriterAppend(&result->bw, &header, ALPHA_HEADER_LEN); + ok = ok && VP8BitWriterAppend(&result->bw, output, output_size); + + if (method != ALPHA_NO_COMPRESSION) { + VP8LBitWriterWipeOut(&tmp_bw); + } + ok = ok && !result->bw.error_; + result->score = VP8BitWriterSize(&result->bw); + return ok; +} + +// ----------------------------------------------------------------------------- + +static int GetNumColors(const uint8_t* data, int width, int height, + int stride) { + int j; + int colors = 0; + uint8_t color[256] = { 0 }; + + for (j = 0; j < height; ++j) { + int i; + const uint8_t* const p = data + j * stride; + for (i = 0; i < width; ++i) { + color[p[i]] = 1; + } + } + for (j = 0; j < 256; ++j) { + if (color[j] > 0) ++colors; + } + return colors; +} + +#define FILTER_TRY_NONE (1 << WEBP_FILTER_NONE) +#define FILTER_TRY_ALL ((1 << WEBP_FILTER_LAST) - 1) + +// Given the input 'filter' option, return an OR'd bit-set of filters to try. +static uint32_t GetFilterMap(const uint8_t* alpha, int width, int height, + int filter, int effort_level) { + uint32_t bit_map = 0U; + if (filter == WEBP_FILTER_FAST) { + // Quick estimate of the best candidate. + int try_filter_none = (effort_level > 3); + const int kMinColorsForFilterNone = 16; + const int kMaxColorsForFilterNone = 192; + const int num_colors = GetNumColors(alpha, width, height, width); + // For low number of colors, NONE yields better compression. + filter = (num_colors <= kMinColorsForFilterNone) + ? WEBP_FILTER_NONE + : WebPEstimateBestFilter(alpha, width, height, width); + bit_map |= 1 << filter; + // For large number of colors, try FILTER_NONE in addition to the best + // filter as well. + if (try_filter_none || num_colors > kMaxColorsForFilterNone) { + bit_map |= FILTER_TRY_NONE; + } + } else if (filter == WEBP_FILTER_NONE) { + bit_map = FILTER_TRY_NONE; + } else { // WEBP_FILTER_BEST -> try all + bit_map = FILTER_TRY_ALL; + } + return bit_map; +} + +static void InitFilterTrial(FilterTrial* const score) { + score->score = (size_t)~0U; + VP8BitWriterInit(&score->bw, 0); +} + +static int ApplyFiltersAndEncode(const uint8_t* alpha, int width, int height, + size_t data_size, int method, int filter, + int reduce_levels, int effort_level, + uint8_t** const output, + size_t* const output_size, + WebPAuxStats* const stats) { + int ok = 1; + FilterTrial best; + uint32_t try_map = + GetFilterMap(alpha, width, height, filter, effort_level); + InitFilterTrial(&best); + + if (try_map != FILTER_TRY_NONE) { + uint8_t* filtered_alpha = (uint8_t*)WebPSafeMalloc(1ULL, data_size); + if (filtered_alpha == NULL) return 0; + + for (filter = WEBP_FILTER_NONE; ok && try_map; ++filter, try_map >>= 1) { + if (try_map & 1) { + FilterTrial trial; + ok = EncodeAlphaInternal(alpha, width, height, method, filter, + reduce_levels, effort_level, filtered_alpha, + &trial); + if (ok && trial.score < best.score) { + VP8BitWriterWipeOut(&best.bw); + best = trial; + } else { + VP8BitWriterWipeOut(&trial.bw); + } + } + } + WebPSafeFree(filtered_alpha); + } else { + ok = EncodeAlphaInternal(alpha, width, height, method, WEBP_FILTER_NONE, + reduce_levels, effort_level, NULL, &best); + } + if (ok) { +#if !defined(WEBP_DISABLE_STATS) + if (stats != NULL) { + stats->lossless_features = best.stats.lossless_features; + stats->histogram_bits = best.stats.histogram_bits; + stats->transform_bits = best.stats.transform_bits; + stats->cache_bits = best.stats.cache_bits; + stats->palette_size = best.stats.palette_size; + stats->lossless_size = best.stats.lossless_size; + stats->lossless_hdr_size = best.stats.lossless_hdr_size; + stats->lossless_data_size = best.stats.lossless_data_size; + } +#else + (void)stats; +#endif + *output_size = VP8BitWriterSize(&best.bw); + *output = VP8BitWriterBuf(&best.bw); + } else { + VP8BitWriterWipeOut(&best.bw); + } + return ok; +} + +static int EncodeAlpha(VP8Encoder* const enc, + int quality, int method, int filter, + int effort_level, + uint8_t** const output, size_t* const output_size) { + const WebPPicture* const pic = enc->pic_; + const int width = pic->width; + const int height = pic->height; + + uint8_t* quant_alpha = NULL; + const size_t data_size = width * height; + uint64_t sse = 0; + int ok = 1; + const int reduce_levels = (quality < 100); + + // quick sanity checks + assert((uint64_t)data_size == (uint64_t)width * height); // as per spec + assert(enc != NULL && pic != NULL && pic->a != NULL); + assert(output != NULL && output_size != NULL); + assert(width > 0 && height > 0); + assert(pic->a_stride >= width); + assert(filter >= WEBP_FILTER_NONE && filter <= WEBP_FILTER_FAST); + + if (quality < 0 || quality > 100) { + return 0; + } + + if (method < ALPHA_NO_COMPRESSION || method > ALPHA_LOSSLESS_COMPRESSION) { + return 0; + } + + if (method == ALPHA_NO_COMPRESSION) { + // Don't filter, as filtering will make no impact on compressed size. + filter = WEBP_FILTER_NONE; + } + + quant_alpha = (uint8_t*)WebPSafeMalloc(1ULL, data_size); + if (quant_alpha == NULL) { + return 0; + } + + // Extract alpha data (width x height) from raw_data (stride x height). + WebPCopyPlane(pic->a, pic->a_stride, quant_alpha, width, width, height); + + if (reduce_levels) { // No Quantization required for 'quality = 100'. + // 16 alpha levels gives quite a low MSE w.r.t original alpha plane hence + // mapped to moderate quality 70. Hence Quality:[0, 70] -> Levels:[2, 16] + // and Quality:]70, 100] -> Levels:]16, 256]. + const int alpha_levels = (quality <= 70) ? (2 + quality / 5) + : (16 + (quality - 70) * 8); + ok = QuantizeLevels(quant_alpha, width, height, alpha_levels, &sse); + } + + if (ok) { + VP8FiltersInit(); + ok = ApplyFiltersAndEncode(quant_alpha, width, height, data_size, method, + filter, reduce_levels, effort_level, output, + output_size, pic->stats); +#if !defined(WEBP_DISABLE_STATS) + if (pic->stats != NULL) { // need stats? + pic->stats->coded_size += (int)(*output_size); + enc->sse_[3] = sse; + } +#endif + } + + WebPSafeFree(quant_alpha); + return ok; +} + +//------------------------------------------------------------------------------ +// Main calls + +static int CompressAlphaJob(void* arg1, void* dummy) { + VP8Encoder* const enc = (VP8Encoder*)arg1; + const WebPConfig* config = enc->config_; + uint8_t* alpha_data = NULL; + size_t alpha_size = 0; + const int effort_level = config->method; // maps to [0..6] + const WEBP_FILTER_TYPE filter = + (config->alpha_filtering == 0) ? WEBP_FILTER_NONE : + (config->alpha_filtering == 1) ? WEBP_FILTER_FAST : + WEBP_FILTER_BEST; + if (!EncodeAlpha(enc, config->alpha_quality, config->alpha_compression, + filter, effort_level, &alpha_data, &alpha_size)) { + return 0; + } + if (alpha_size != (uint32_t)alpha_size) { // Sanity check. + WebPSafeFree(alpha_data); + return 0; + } + enc->alpha_data_size_ = (uint32_t)alpha_size; + enc->alpha_data_ = alpha_data; + (void)dummy; + return 1; +} + +void VP8EncInitAlpha(VP8Encoder* const enc) { + WebPInitAlphaProcessing(); + enc->has_alpha_ = WebPPictureHasTransparency(enc->pic_); + enc->alpha_data_ = NULL; + enc->alpha_data_size_ = 0; + if (enc->thread_level_ > 0) { + WebPWorker* const worker = &enc->alpha_worker_; + WebPGetWorkerInterface()->Init(worker); + worker->data1 = enc; + worker->data2 = NULL; + worker->hook = CompressAlphaJob; + } +} + +int VP8EncStartAlpha(VP8Encoder* const enc) { + if (enc->has_alpha_) { + if (enc->thread_level_ > 0) { + WebPWorker* const worker = &enc->alpha_worker_; + // Makes sure worker is good to go. + if (!WebPGetWorkerInterface()->Reset(worker)) { + return 0; + } + WebPGetWorkerInterface()->Launch(worker); + return 1; + } else { + return CompressAlphaJob(enc, NULL); // just do the job right away + } + } + return 1; +} + +int VP8EncFinishAlpha(VP8Encoder* const enc) { + if (enc->has_alpha_) { + if (enc->thread_level_ > 0) { + WebPWorker* const worker = &enc->alpha_worker_; + if (!WebPGetWorkerInterface()->Sync(worker)) return 0; // error + } + } + return WebPReportProgress(enc->pic_, enc->percent_ + 20, &enc->percent_); +} + +int VP8EncDeleteAlpha(VP8Encoder* const enc) { + int ok = 1; + if (enc->thread_level_ > 0) { + WebPWorker* const worker = &enc->alpha_worker_; + // finish anything left in flight + ok = WebPGetWorkerInterface()->Sync(worker); + // still need to end the worker, even if !ok + WebPGetWorkerInterface()->End(worker); + } + WebPSafeFree(enc->alpha_data_); + enc->alpha_data_ = NULL; + enc->alpha_data_size_ = 0; + enc->has_alpha_ = 0; + return ok; +} diff --git a/vendor/github.com/sizeofint/webpanimation/enc_analysis_enc.c b/vendor/github.com/sizeofint/webpanimation/enc_analysis_enc.c new file mode 100644 index 00000000..97441c8f --- /dev/null +++ b/vendor/github.com/sizeofint/webpanimation/enc_analysis_enc.c @@ -0,0 +1,475 @@ +// Copyright 2011 Google Inc. All Rights Reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the COPYING file in the root of the source +// tree. An additional intellectual property rights grant can be found +// in the file PATENTS. All contributing project authors may +// be found in the AUTHORS file in the root of the source tree. +// ----------------------------------------------------------------------------- +// +// Macroblock analysis +// +// Author: Skal (pascal.massimino@gmail.com) + +#include +#include +#include + +#include "enc_vp8i_enc.h" +#include "enc_cost_enc.h" +#include "utils_utils.h" + +#define MAX_ITERS_K_MEANS 6 + +//------------------------------------------------------------------------------ +// Smooth the segment map by replacing isolated block by the majority of its +// neighbours. + +static void SmoothSegmentMap(VP8Encoder* const enc) { + int n, x, y; + const int w = enc->mb_w_; + const int h = enc->mb_h_; + const int majority_cnt_3_x_3_grid = 5; + uint8_t* const tmp = (uint8_t*)WebPSafeMalloc(w * h, sizeof(*tmp)); + assert((uint64_t)(w * h) == (uint64_t)w * h); // no overflow, as per spec + + if (tmp == NULL) return; + for (y = 1; y < h - 1; ++y) { + for (x = 1; x < w - 1; ++x) { + int cnt[NUM_MB_SEGMENTS] = { 0 }; + const VP8MBInfo* const mb = &enc->mb_info_[x + w * y]; + int majority_seg = mb->segment_; + // Check the 8 neighbouring segment values. + cnt[mb[-w - 1].segment_]++; // top-left + cnt[mb[-w + 0].segment_]++; // top + cnt[mb[-w + 1].segment_]++; // top-right + cnt[mb[ - 1].segment_]++; // left + cnt[mb[ + 1].segment_]++; // right + cnt[mb[ w - 1].segment_]++; // bottom-left + cnt[mb[ w + 0].segment_]++; // bottom + cnt[mb[ w + 1].segment_]++; // bottom-right + for (n = 0; n < NUM_MB_SEGMENTS; ++n) { + if (cnt[n] >= majority_cnt_3_x_3_grid) { + majority_seg = n; + break; + } + } + tmp[x + y * w] = majority_seg; + } + } + for (y = 1; y < h - 1; ++y) { + for (x = 1; x < w - 1; ++x) { + VP8MBInfo* const mb = &enc->mb_info_[x + w * y]; + mb->segment_ = tmp[x + y * w]; + } + } + WebPSafeFree(tmp); +} + +//------------------------------------------------------------------------------ +// set segment susceptibility alpha_ / beta_ + +static WEBP_INLINE int clip(int v, int m, int M) { + return (v < m) ? m : (v > M) ? M : v; +} + +static void SetSegmentAlphas(VP8Encoder* const enc, + const int centers[NUM_MB_SEGMENTS], + int mid) { + const int nb = enc->segment_hdr_.num_segments_; + int min = centers[0], max = centers[0]; + int n; + + if (nb > 1) { + for (n = 0; n < nb; ++n) { + if (min > centers[n]) min = centers[n]; + if (max < centers[n]) max = centers[n]; + } + } + if (max == min) max = min + 1; + assert(mid <= max && mid >= min); + for (n = 0; n < nb; ++n) { + const int alpha = 255 * (centers[n] - mid) / (max - min); + const int beta = 255 * (centers[n] - min) / (max - min); + enc->dqm_[n].alpha_ = clip(alpha, -127, 127); + enc->dqm_[n].beta_ = clip(beta, 0, 255); + } +} + +//------------------------------------------------------------------------------ +// Compute susceptibility based on DCT-coeff histograms: +// the higher, the "easier" the macroblock is to compress. + +#define MAX_ALPHA 255 // 8b of precision for susceptibilities. +#define ALPHA_SCALE (2 * MAX_ALPHA) // scaling factor for alpha. +#define DEFAULT_ALPHA (-1) +#define IS_BETTER_ALPHA(alpha, best_alpha) ((alpha) > (best_alpha)) + +static int FinalAlphaValue(int alpha) { + alpha = MAX_ALPHA - alpha; + return clip(alpha, 0, MAX_ALPHA); +} + +static int GetAlpha(const VP8Histogram* const histo) { + // 'alpha' will later be clipped to [0..MAX_ALPHA] range, clamping outer + // values which happen to be mostly noise. This leaves the maximum precision + // for handling the useful small values which contribute most. + const int max_value = histo->max_value; + const int last_non_zero = histo->last_non_zero; + const int alpha = + (max_value > 1) ? ALPHA_SCALE * last_non_zero / max_value : 0; + return alpha; +} + +static void InitHistogram(VP8Histogram* const histo) { + histo->max_value = 0; + histo->last_non_zero = 1; +} + +//------------------------------------------------------------------------------ +// Simplified k-Means, to assign Nb segments based on alpha-histogram + +static void AssignSegments(VP8Encoder* const enc, + const int alphas[MAX_ALPHA + 1]) { + // 'num_segments_' is previously validated and <= NUM_MB_SEGMENTS, but an + // explicit check is needed to avoid spurious warning about 'n + 1' exceeding + // array bounds of 'centers' with some compilers (noticed with gcc-4.9). + const int nb = (enc->segment_hdr_.num_segments_ < NUM_MB_SEGMENTS) ? + enc->segment_hdr_.num_segments_ : NUM_MB_SEGMENTS; + int centers[NUM_MB_SEGMENTS]; + int weighted_average = 0; + int map[MAX_ALPHA + 1]; + int a, n, k; + int min_a = 0, max_a = MAX_ALPHA, range_a; + // 'int' type is ok for histo, and won't overflow + int accum[NUM_MB_SEGMENTS], dist_accum[NUM_MB_SEGMENTS]; + + assert(nb >= 1); + assert(nb <= NUM_MB_SEGMENTS); + + // bracket the input + for (n = 0; n <= MAX_ALPHA && alphas[n] == 0; ++n) {} + min_a = n; + for (n = MAX_ALPHA; n > min_a && alphas[n] == 0; --n) {} + max_a = n; + range_a = max_a - min_a; + + // Spread initial centers evenly + for (k = 0, n = 1; k < nb; ++k, n += 2) { + assert(n < 2 * nb); + centers[k] = min_a + (n * range_a) / (2 * nb); + } + + for (k = 0; k < MAX_ITERS_K_MEANS; ++k) { // few iters are enough + int total_weight; + int displaced; + // Reset stats + for (n = 0; n < nb; ++n) { + accum[n] = 0; + dist_accum[n] = 0; + } + // Assign nearest center for each 'a' + n = 0; // track the nearest center for current 'a' + for (a = min_a; a <= max_a; ++a) { + if (alphas[a]) { + while (n + 1 < nb && abs(a - centers[n + 1]) < abs(a - centers[n])) { + n++; + } + map[a] = n; + // accumulate contribution into best centroid + dist_accum[n] += a * alphas[a]; + accum[n] += alphas[a]; + } + } + // All point are classified. Move the centroids to the + // center of their respective cloud. + displaced = 0; + weighted_average = 0; + total_weight = 0; + for (n = 0; n < nb; ++n) { + if (accum[n]) { + const int new_center = (dist_accum[n] + accum[n] / 2) / accum[n]; + displaced += abs(centers[n] - new_center); + centers[n] = new_center; + weighted_average += new_center * accum[n]; + total_weight += accum[n]; + } + } + weighted_average = (weighted_average + total_weight / 2) / total_weight; + if (displaced < 5) break; // no need to keep on looping... + } + + // Map each original value to the closest centroid + for (n = 0; n < enc->mb_w_ * enc->mb_h_; ++n) { + VP8MBInfo* const mb = &enc->mb_info_[n]; + const int alpha = mb->alpha_; + mb->segment_ = map[alpha]; + mb->alpha_ = centers[map[alpha]]; // for the record. + } + + if (nb > 1) { + const int smooth = (enc->config_->preprocessing & 1); + if (smooth) SmoothSegmentMap(enc); + } + + SetSegmentAlphas(enc, centers, weighted_average); // pick some alphas. +} + +//------------------------------------------------------------------------------ +// Macroblock analysis: collect histogram for each mode, deduce the maximal +// susceptibility and set best modes for this macroblock. +// Segment assignment is done later. + +// Number of modes to inspect for alpha_ evaluation. We don't need to test all +// the possible modes during the analysis phase: we risk falling into a local +// optimum, or be subject to boundary effect +#define MAX_INTRA16_MODE 2 +#define MAX_INTRA4_MODE 2 +#define MAX_UV_MODE 2 + +static int MBAnalyzeBestIntra16Mode(VP8EncIterator* const it) { + const int max_mode = MAX_INTRA16_MODE; + int mode; + int best_alpha = DEFAULT_ALPHA; + int best_mode = 0; + + VP8MakeLuma16Preds(it); + for (mode = 0; mode < max_mode; ++mode) { + VP8Histogram histo; + int alpha; + + InitHistogram(&histo); + VP8CollectHistogram(it->yuv_in_ + Y_OFF_ENC, + it->yuv_p_ + VP8I16ModeOffsets[mode], + 0, 16, &histo); + alpha = GetAlpha(&histo); + if (IS_BETTER_ALPHA(alpha, best_alpha)) { + best_alpha = alpha; + best_mode = mode; + } + } + VP8SetIntra16Mode(it, best_mode); + return best_alpha; +} + +static int FastMBAnalyze(VP8EncIterator* const it) { + // Empirical cut-off value, should be around 16 (~=block size). We use the + // [8-17] range and favor intra4 at high quality, intra16 for low quality. + const int q = (int)it->enc_->config_->quality; + const uint32_t kThreshold = 8 + (17 - 8) * q / 100; + int k; + uint32_t dc[16], m, m2; + for (k = 0; k < 16; k += 4) { + VP8Mean16x4(it->yuv_in_ + Y_OFF_ENC + k * BPS, &dc[k]); + } + for (m = 0, m2 = 0, k = 0; k < 16; ++k) { + m += dc[k]; + m2 += dc[k] * dc[k]; + } + if (kThreshold * m2 < m * m) { + VP8SetIntra16Mode(it, 0); // DC16 + } else { + const uint8_t modes[16] = { 0 }; // DC4 + VP8SetIntra4Mode(it, modes); + } + return 0; +} + +static int MBAnalyzeBestUVMode(VP8EncIterator* const it) { + int best_alpha = DEFAULT_ALPHA; + int smallest_alpha = 0; + int best_mode = 0; + const int max_mode = MAX_UV_MODE; + int mode; + + VP8MakeChroma8Preds(it); + for (mode = 0; mode < max_mode; ++mode) { + VP8Histogram histo; + int alpha; + InitHistogram(&histo); + VP8CollectHistogram(it->yuv_in_ + U_OFF_ENC, + it->yuv_p_ + VP8UVModeOffsets[mode], + 16, 16 + 4 + 4, &histo); + alpha = GetAlpha(&histo); + if (IS_BETTER_ALPHA(alpha, best_alpha)) { + best_alpha = alpha; + } + // The best prediction mode tends to be the one with the smallest alpha. + if (mode == 0 || alpha < smallest_alpha) { + smallest_alpha = alpha; + best_mode = mode; + } + } + VP8SetIntraUVMode(it, best_mode); + return best_alpha; +} + +static void MBAnalyze(VP8EncIterator* const it, + int alphas[MAX_ALPHA + 1], + int* const alpha, int* const uv_alpha) { + const VP8Encoder* const enc = it->enc_; + int best_alpha, best_uv_alpha; + + VP8SetIntra16Mode(it, 0); // default: Intra16, DC_PRED + VP8SetSkip(it, 0); // not skipped + VP8SetSegment(it, 0); // default segment, spec-wise. + + if (enc->method_ <= 1) { + best_alpha = FastMBAnalyze(it); + } else { + best_alpha = MBAnalyzeBestIntra16Mode(it); + } + best_uv_alpha = MBAnalyzeBestUVMode(it); + + // Final susceptibility mix + best_alpha = (3 * best_alpha + best_uv_alpha + 2) >> 2; + best_alpha = FinalAlphaValue(best_alpha); + alphas[best_alpha]++; + it->mb_->alpha_ = best_alpha; // for later remapping. + + // Accumulate for later complexity analysis. + *alpha += best_alpha; // mixed susceptibility (not just luma) + *uv_alpha += best_uv_alpha; +} + +static void DefaultMBInfo(VP8MBInfo* const mb) { + mb->type_ = 1; // I16x16 + mb->uv_mode_ = 0; + mb->skip_ = 0; // not skipped + mb->segment_ = 0; // default segment + mb->alpha_ = 0; +} + +//------------------------------------------------------------------------------ +// Main analysis loop: +// Collect all susceptibilities for each macroblock and record their +// distribution in alphas[]. Segments is assigned a-posteriori, based on +// this histogram. +// We also pick an intra16 prediction mode, which shouldn't be considered +// final except for fast-encode settings. We can also pick some intra4 modes +// and decide intra4/intra16, but that's usually almost always a bad choice at +// this stage. + +static void ResetAllMBInfo(VP8Encoder* const enc) { + int n; + for (n = 0; n < enc->mb_w_ * enc->mb_h_; ++n) { + DefaultMBInfo(&enc->mb_info_[n]); + } + // Default susceptibilities. + enc->dqm_[0].alpha_ = 0; + enc->dqm_[0].beta_ = 0; + // Note: we can't compute this alpha_ / uv_alpha_ -> set to default value. + enc->alpha_ = 0; + enc->uv_alpha_ = 0; + WebPReportProgress(enc->pic_, enc->percent_ + 20, &enc->percent_); +} + +// struct used to collect job result +typedef struct { + WebPWorker worker; + int alphas[MAX_ALPHA + 1]; + int alpha, uv_alpha; + VP8EncIterator it; + int delta_progress; +} SegmentJob; + +// main work call +static int DoSegmentsJob(void* arg1, void* arg2) { + SegmentJob* const job = (SegmentJob*)arg1; + VP8EncIterator* const it = (VP8EncIterator*)arg2; + int ok = 1; + if (!VP8IteratorIsDone(it)) { + uint8_t tmp[32 + WEBP_ALIGN_CST]; + uint8_t* const scratch = (uint8_t*)WEBP_ALIGN(tmp); + do { + // Let's pretend we have perfect lossless reconstruction. + VP8IteratorImport(it, scratch); + MBAnalyze(it, job->alphas, &job->alpha, &job->uv_alpha); + ok = VP8IteratorProgress(it, job->delta_progress); + } while (ok && VP8IteratorNext(it)); + } + return ok; +} + +static void MergeJobs(const SegmentJob* const src, SegmentJob* const dst) { + int i; + for (i = 0; i <= MAX_ALPHA; ++i) dst->alphas[i] += src->alphas[i]; + dst->alpha += src->alpha; + dst->uv_alpha += src->uv_alpha; +} + +// initialize the job struct with some tasks to perform +static void InitSegmentJob(VP8Encoder* const enc, SegmentJob* const job, + int start_row, int end_row) { + WebPGetWorkerInterface()->Init(&job->worker); + job->worker.data1 = job; + job->worker.data2 = &job->it; + job->worker.hook = DoSegmentsJob; + VP8IteratorInit(enc, &job->it); + VP8IteratorSetRow(&job->it, start_row); + VP8IteratorSetCountDown(&job->it, (end_row - start_row) * enc->mb_w_); + memset(job->alphas, 0, sizeof(job->alphas)); + job->alpha = 0; + job->uv_alpha = 0; + // only one of both jobs can record the progress, since we don't + // expect the user's hook to be multi-thread safe + job->delta_progress = (start_row == 0) ? 20 : 0; +} + +// main entry point +int VP8EncAnalyze(VP8Encoder* const enc) { + int ok = 1; + const int do_segments = + enc->config_->emulate_jpeg_size || // We need the complexity evaluation. + (enc->segment_hdr_.num_segments_ > 1) || + (enc->method_ <= 1); // for method 0 - 1, we need preds_[] to be filled. + if (do_segments) { + const int last_row = enc->mb_h_; + // We give a little more than a half work to the main thread. + const int split_row = (9 * last_row + 15) >> 4; + const int total_mb = last_row * enc->mb_w_; +#ifdef WEBP_USE_THREAD + const int kMinSplitRow = 2; // minimal rows needed for mt to be worth it + const int do_mt = (enc->thread_level_ > 0) && (split_row >= kMinSplitRow); +#else + const int do_mt = 0; +#endif + const WebPWorkerInterface* const worker_interface = + WebPGetWorkerInterface(); + SegmentJob main_job; + if (do_mt) { + SegmentJob side_job; + // Note the use of '&' instead of '&&' because we must call the functions + // no matter what. + InitSegmentJob(enc, &main_job, 0, split_row); + InitSegmentJob(enc, &side_job, split_row, last_row); + // we don't need to call Reset() on main_job.worker, since we're calling + // WebPWorkerExecute() on it + ok &= worker_interface->Reset(&side_job.worker); + // launch the two jobs in parallel + if (ok) { + worker_interface->Launch(&side_job.worker); + worker_interface->Execute(&main_job.worker); + ok &= worker_interface->Sync(&side_job.worker); + ok &= worker_interface->Sync(&main_job.worker); + } + worker_interface->End(&side_job.worker); + if (ok) MergeJobs(&side_job, &main_job); // merge results together + } else { + // Even for single-thread case, we use the generic Worker tools. + InitSegmentJob(enc, &main_job, 0, last_row); + worker_interface->Execute(&main_job.worker); + ok &= worker_interface->Sync(&main_job.worker); + } + worker_interface->End(&main_job.worker); + if (ok) { + enc->alpha_ = main_job.alpha / total_mb; + enc->uv_alpha_ = main_job.uv_alpha / total_mb; + AssignSegments(enc, main_job.alphas); + } + } else { // Use only one default segment. + ResetAllMBInfo(enc); + } + return ok; +} + diff --git a/vendor/github.com/sizeofint/webpanimation/enc_backward_references_cost_enc.c b/vendor/github.com/sizeofint/webpanimation/enc_backward_references_cost_enc.c new file mode 100644 index 00000000..d12488cd --- /dev/null +++ b/vendor/github.com/sizeofint/webpanimation/enc_backward_references_cost_enc.c @@ -0,0 +1,790 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the COPYING file in the root of the source +// tree. An additional intellectual property rights grant can be found +// in the file PATENTS. All contributing project authors may +// be found in the AUTHORS file in the root of the source tree. +// ----------------------------------------------------------------------------- +// +// Improves a given set of backward references by analyzing its bit cost. +// The algorithm is similar to the Zopfli compression algorithm but tailored to +// images. +// +// Author: Vincent Rabaud (vrabaud@google.com) +// + +#include + +#include "enc_backward_references_enc.h" +#include "enc_histogram_enc.h" +#include "dsp_lossless_common.h" +#include "utils_color_cache_utils.h" +#include "utils_utils.h" + +#define VALUES_IN_BYTE 256 + +extern void VP8LClearBackwardRefs(VP8LBackwardRefs* const refs); +extern int VP8LDistanceToPlaneCode(int xsize, int dist); +extern void VP8LBackwardRefsCursorAdd(VP8LBackwardRefs* const refs, + const PixOrCopy v); + +typedef struct { + double alpha_[VALUES_IN_BYTE]; + double red_[VALUES_IN_BYTE]; + double blue_[VALUES_IN_BYTE]; + double distance_[NUM_DISTANCE_CODES]; + double* literal_; +} CostModel; + +static void ConvertPopulationCountTableToBitEstimates( + int num_symbols, const uint32_t population_counts[], double output[]) { + uint32_t sum = 0; + int nonzeros = 0; + int i; + for (i = 0; i < num_symbols; ++i) { + sum += population_counts[i]; + if (population_counts[i] > 0) { + ++nonzeros; + } + } + if (nonzeros <= 1) { + memset(output, 0, num_symbols * sizeof(*output)); + } else { + const double logsum = VP8LFastLog2(sum); + for (i = 0; i < num_symbols; ++i) { + output[i] = logsum - VP8LFastLog2(population_counts[i]); + } + } +} + +static int CostModelBuild(CostModel* const m, int xsize, int cache_bits, + const VP8LBackwardRefs* const refs) { + int ok = 0; + VP8LRefsCursor c = VP8LRefsCursorInit(refs); + VP8LHistogram* const histo = VP8LAllocateHistogram(cache_bits); + if (histo == NULL) goto Error; + + // The following code is similar to VP8LHistogramCreate but converts the + // distance to plane code. + VP8LHistogramInit(histo, cache_bits, /*init_arrays=*/ 1); + while (VP8LRefsCursorOk(&c)) { + VP8LHistogramAddSinglePixOrCopy(histo, c.cur_pos, VP8LDistanceToPlaneCode, + xsize); + VP8LRefsCursorNext(&c); + } + + ConvertPopulationCountTableToBitEstimates( + VP8LHistogramNumCodes(histo->palette_code_bits_), + histo->literal_, m->literal_); + ConvertPopulationCountTableToBitEstimates( + VALUES_IN_BYTE, histo->red_, m->red_); + ConvertPopulationCountTableToBitEstimates( + VALUES_IN_BYTE, histo->blue_, m->blue_); + ConvertPopulationCountTableToBitEstimates( + VALUES_IN_BYTE, histo->alpha_, m->alpha_); + ConvertPopulationCountTableToBitEstimates( + NUM_DISTANCE_CODES, histo->distance_, m->distance_); + ok = 1; + + Error: + VP8LFreeHistogram(histo); + return ok; +} + +static WEBP_INLINE double GetLiteralCost(const CostModel* const m, uint32_t v) { + return m->alpha_[v >> 24] + + m->red_[(v >> 16) & 0xff] + + m->literal_[(v >> 8) & 0xff] + + m->blue_[v & 0xff]; +} + +static WEBP_INLINE double GetCacheCost(const CostModel* const m, uint32_t idx) { + const int literal_idx = VALUES_IN_BYTE + NUM_LENGTH_CODES + idx; + return m->literal_[literal_idx]; +} + +static WEBP_INLINE double GetLengthCost(const CostModel* const m, + uint32_t length) { + int code, extra_bits; + VP8LPrefixEncodeBits(length, &code, &extra_bits); + return m->literal_[VALUES_IN_BYTE + code] + extra_bits; +} + +static WEBP_INLINE double GetDistanceCost(const CostModel* const m, + uint32_t distance) { + int code, extra_bits; + VP8LPrefixEncodeBits(distance, &code, &extra_bits); + return m->distance_[code] + extra_bits; +} + +static WEBP_INLINE void AddSingleLiteralWithCostModel( + const uint32_t* const argb, VP8LColorCache* const hashers, + const CostModel* const cost_model, int idx, int use_color_cache, + float prev_cost, float* const cost, uint16_t* const dist_array) { + double cost_val = prev_cost; + const uint32_t color = argb[idx]; + const int ix = use_color_cache ? VP8LColorCacheContains(hashers, color) : -1; + if (ix >= 0) { + // use_color_cache is true and hashers contains color + const double mul0 = 0.68; + cost_val += GetCacheCost(cost_model, ix) * mul0; + } else { + const double mul1 = 0.82; + if (use_color_cache) VP8LColorCacheInsert(hashers, color); + cost_val += GetLiteralCost(cost_model, color) * mul1; + } + if (cost[idx] > cost_val) { + cost[idx] = (float)cost_val; + dist_array[idx] = 1; // only one is inserted. + } +} + +// ----------------------------------------------------------------------------- +// CostManager and interval handling + +// Empirical value to avoid high memory consumption but good for performance. +#define COST_CACHE_INTERVAL_SIZE_MAX 500 + +// To perform backward reference every pixel at index index_ is considered and +// the cost for the MAX_LENGTH following pixels computed. Those following pixels +// at index index_ + k (k from 0 to MAX_LENGTH) have a cost of: +// cost_ = distance cost at index + GetLengthCost(cost_model, k) +// and the minimum value is kept. GetLengthCost(cost_model, k) is cached in an +// array of size MAX_LENGTH. +// Instead of performing MAX_LENGTH comparisons per pixel, we keep track of the +// minimal values using intervals of constant cost. +// An interval is defined by the index_ of the pixel that generated it and +// is only useful in a range of indices from start_ to end_ (exclusive), i.e. +// it contains the minimum value for pixels between start_ and end_. +// Intervals are stored in a linked list and ordered by start_. When a new +// interval has a better value, old intervals are split or removed. There are +// therefore no overlapping intervals. +typedef struct CostInterval CostInterval; +struct CostInterval { + float cost_; + int start_; + int end_; + int index_; + CostInterval* previous_; + CostInterval* next_; +}; + +// The GetLengthCost(cost_model, k) are cached in a CostCacheInterval. +typedef struct { + double cost_; + int start_; + int end_; // Exclusive. +} CostCacheInterval; + +// This structure is in charge of managing intervals and costs. +// It caches the different CostCacheInterval, caches the different +// GetLengthCost(cost_model, k) in cost_cache_ and the CostInterval's (whose +// count_ is limited by COST_CACHE_INTERVAL_SIZE_MAX). +#define COST_MANAGER_MAX_FREE_LIST 10 +typedef struct { + CostInterval* head_; + int count_; // The number of stored intervals. + CostCacheInterval* cache_intervals_; + size_t cache_intervals_size_; + double cost_cache_[MAX_LENGTH]; // Contains the GetLengthCost(cost_model, k). + float* costs_; + uint16_t* dist_array_; + // Most of the time, we only need few intervals -> use a free-list, to avoid + // fragmentation with small allocs in most common cases. + CostInterval intervals_[COST_MANAGER_MAX_FREE_LIST]; + CostInterval* free_intervals_; + // These are regularly malloc'd remains. This list can't grow larger than than + // size COST_CACHE_INTERVAL_SIZE_MAX - COST_MANAGER_MAX_FREE_LIST, note. + CostInterval* recycled_intervals_; +} CostManager; + +static void CostIntervalAddToFreeList(CostManager* const manager, + CostInterval* const interval) { + interval->next_ = manager->free_intervals_; + manager->free_intervals_ = interval; +} + +static int CostIntervalIsInFreeList(const CostManager* const manager, + const CostInterval* const interval) { + return (interval >= &manager->intervals_[0] && + interval <= &manager->intervals_[COST_MANAGER_MAX_FREE_LIST - 1]); +} + +static void CostManagerInitFreeList(CostManager* const manager) { + int i; + manager->free_intervals_ = NULL; + for (i = 0; i < COST_MANAGER_MAX_FREE_LIST; ++i) { + CostIntervalAddToFreeList(manager, &manager->intervals_[i]); + } +} + +static void DeleteIntervalList(CostManager* const manager, + const CostInterval* interval) { + while (interval != NULL) { + const CostInterval* const next = interval->next_; + if (!CostIntervalIsInFreeList(manager, interval)) { + WebPSafeFree((void*)interval); + } // else: do nothing + interval = next; + } +} + +static void CostManagerClear(CostManager* const manager) { + if (manager == NULL) return; + + WebPSafeFree(manager->costs_); + WebPSafeFree(manager->cache_intervals_); + + // Clear the interval lists. + DeleteIntervalList(manager, manager->head_); + manager->head_ = NULL; + DeleteIntervalList(manager, manager->recycled_intervals_); + manager->recycled_intervals_ = NULL; + + // Reset pointers, count_ and cache_intervals_size_. + memset(manager, 0, sizeof(*manager)); + CostManagerInitFreeList(manager); +} + +static int CostManagerInit(CostManager* const manager, + uint16_t* const dist_array, int pix_count, + const CostModel* const cost_model) { + int i; + const int cost_cache_size = (pix_count > MAX_LENGTH) ? MAX_LENGTH : pix_count; + + manager->costs_ = NULL; + manager->cache_intervals_ = NULL; + manager->head_ = NULL; + manager->recycled_intervals_ = NULL; + manager->count_ = 0; + manager->dist_array_ = dist_array; + CostManagerInitFreeList(manager); + + // Fill in the cost_cache_. + manager->cache_intervals_size_ = 1; + manager->cost_cache_[0] = GetLengthCost(cost_model, 0); + for (i = 1; i < cost_cache_size; ++i) { + manager->cost_cache_[i] = GetLengthCost(cost_model, i); + // Get the number of bound intervals. + if (manager->cost_cache_[i] != manager->cost_cache_[i - 1]) { + ++manager->cache_intervals_size_; + } + } + + // With the current cost model, we usually have below 20 intervals. + // The worst case scenario with a cost model would be if every length has a + // different cost, hence MAX_LENGTH but that is impossible with the current + // implementation that spirals around a pixel. + assert(manager->cache_intervals_size_ <= MAX_LENGTH); + manager->cache_intervals_ = (CostCacheInterval*)WebPSafeMalloc( + manager->cache_intervals_size_, sizeof(*manager->cache_intervals_)); + if (manager->cache_intervals_ == NULL) { + CostManagerClear(manager); + return 0; + } + + // Fill in the cache_intervals_. + { + CostCacheInterval* cur = manager->cache_intervals_; + + // Consecutive values in cost_cache_ are compared and if a big enough + // difference is found, a new interval is created and bounded. + cur->start_ = 0; + cur->end_ = 1; + cur->cost_ = manager->cost_cache_[0]; + for (i = 1; i < cost_cache_size; ++i) { + const double cost_val = manager->cost_cache_[i]; + if (cost_val != cur->cost_) { + ++cur; + // Initialize an interval. + cur->start_ = i; + cur->cost_ = cost_val; + } + cur->end_ = i + 1; + } + } + + manager->costs_ = (float*)WebPSafeMalloc(pix_count, sizeof(*manager->costs_)); + if (manager->costs_ == NULL) { + CostManagerClear(manager); + return 0; + } + // Set the initial costs_ high for every pixel as we will keep the minimum. + for (i = 0; i < pix_count; ++i) manager->costs_[i] = 1e38f; + + return 1; +} + +// Given the cost and the position that define an interval, update the cost at +// pixel 'i' if it is smaller than the previously computed value. +static WEBP_INLINE void UpdateCost(CostManager* const manager, int i, + int position, float cost) { + const int k = i - position; + assert(k >= 0 && k < MAX_LENGTH); + + if (manager->costs_[i] > cost) { + manager->costs_[i] = cost; + manager->dist_array_[i] = k + 1; + } +} + +// Given the cost and the position that define an interval, update the cost for +// all the pixels between 'start' and 'end' excluded. +static WEBP_INLINE void UpdateCostPerInterval(CostManager* const manager, + int start, int end, int position, + float cost) { + int i; + for (i = start; i < end; ++i) UpdateCost(manager, i, position, cost); +} + +// Given two intervals, make 'prev' be the previous one of 'next' in 'manager'. +static WEBP_INLINE void ConnectIntervals(CostManager* const manager, + CostInterval* const prev, + CostInterval* const next) { + if (prev != NULL) { + prev->next_ = next; + } else { + manager->head_ = next; + } + + if (next != NULL) next->previous_ = prev; +} + +// Pop an interval in the manager. +static WEBP_INLINE void PopInterval(CostManager* const manager, + CostInterval* const interval) { + if (interval == NULL) return; + + ConnectIntervals(manager, interval->previous_, interval->next_); + if (CostIntervalIsInFreeList(manager, interval)) { + CostIntervalAddToFreeList(manager, interval); + } else { // recycle regularly malloc'd intervals too + interval->next_ = manager->recycled_intervals_; + manager->recycled_intervals_ = interval; + } + --manager->count_; + assert(manager->count_ >= 0); +} + +// Update the cost at index i by going over all the stored intervals that +// overlap with i. +// If 'do_clean_intervals' is set to something different than 0, intervals that +// end before 'i' will be popped. +static WEBP_INLINE void UpdateCostAtIndex(CostManager* const manager, int i, + int do_clean_intervals) { + CostInterval* current = manager->head_; + + while (current != NULL && current->start_ <= i) { + CostInterval* const next = current->next_; + if (current->end_ <= i) { + if (do_clean_intervals) { + // We have an outdated interval, remove it. + PopInterval(manager, current); + } + } else { + UpdateCost(manager, i, current->index_, current->cost_); + } + current = next; + } +} + +// Given a current orphan interval and its previous interval, before +// it was orphaned (which can be NULL), set it at the right place in the list +// of intervals using the start_ ordering and the previous interval as a hint. +static WEBP_INLINE void PositionOrphanInterval(CostManager* const manager, + CostInterval* const current, + CostInterval* previous) { + assert(current != NULL); + + if (previous == NULL) previous = manager->head_; + while (previous != NULL && current->start_ < previous->start_) { + previous = previous->previous_; + } + while (previous != NULL && previous->next_ != NULL && + previous->next_->start_ < current->start_) { + previous = previous->next_; + } + + if (previous != NULL) { + ConnectIntervals(manager, current, previous->next_); + } else { + ConnectIntervals(manager, current, manager->head_); + } + ConnectIntervals(manager, previous, current); +} + +// Insert an interval in the list contained in the manager by starting at +// interval_in as a hint. The intervals are sorted by start_ value. +static WEBP_INLINE void InsertInterval(CostManager* const manager, + CostInterval* const interval_in, + float cost, int position, int start, + int end) { + CostInterval* interval_new; + + if (start >= end) return; + if (manager->count_ >= COST_CACHE_INTERVAL_SIZE_MAX) { + // Serialize the interval if we cannot store it. + UpdateCostPerInterval(manager, start, end, position, cost); + return; + } + if (manager->free_intervals_ != NULL) { + interval_new = manager->free_intervals_; + manager->free_intervals_ = interval_new->next_; + } else if (manager->recycled_intervals_ != NULL) { + interval_new = manager->recycled_intervals_; + manager->recycled_intervals_ = interval_new->next_; + } else { // malloc for good + interval_new = (CostInterval*)WebPSafeMalloc(1, sizeof(*interval_new)); + if (interval_new == NULL) { + // Write down the interval if we cannot create it. + UpdateCostPerInterval(manager, start, end, position, cost); + return; + } + } + + interval_new->cost_ = cost; + interval_new->index_ = position; + interval_new->start_ = start; + interval_new->end_ = end; + PositionOrphanInterval(manager, interval_new, interval_in); + + ++manager->count_; +} + +// Given a new cost interval defined by its start at position, its length value +// and distance_cost, add its contributions to the previous intervals and costs. +// If handling the interval or one of its subintervals becomes to heavy, its +// contribution is added to the costs right away. +static WEBP_INLINE void PushInterval(CostManager* const manager, + double distance_cost, int position, + int len) { + size_t i; + CostInterval* interval = manager->head_; + CostInterval* interval_next; + const CostCacheInterval* const cost_cache_intervals = + manager->cache_intervals_; + // If the interval is small enough, no need to deal with the heavy + // interval logic, just serialize it right away. This constant is empirical. + const int kSkipDistance = 10; + + if (len < kSkipDistance) { + int j; + for (j = position; j < position + len; ++j) { + const int k = j - position; + float cost_tmp; + assert(k >= 0 && k < MAX_LENGTH); + cost_tmp = (float)(distance_cost + manager->cost_cache_[k]); + + if (manager->costs_[j] > cost_tmp) { + manager->costs_[j] = cost_tmp; + manager->dist_array_[j] = k + 1; + } + } + return; + } + + for (i = 0; i < manager->cache_intervals_size_ && + cost_cache_intervals[i].start_ < len; + ++i) { + // Define the intersection of the ith interval with the new one. + int start = position + cost_cache_intervals[i].start_; + const int end = position + (cost_cache_intervals[i].end_ > len + ? len + : cost_cache_intervals[i].end_); + const float cost = (float)(distance_cost + cost_cache_intervals[i].cost_); + + for (; interval != NULL && interval->start_ < end; + interval = interval_next) { + interval_next = interval->next_; + + // Make sure we have some overlap + if (start >= interval->end_) continue; + + if (cost >= interval->cost_) { + // When intervals are represented, the lower, the better. + // [**********************************************************[ + // start end + // [----------------------------------[ + // interval->start_ interval->end_ + // If we are worse than what we already have, add whatever we have so + // far up to interval. + const int start_new = interval->end_; + InsertInterval(manager, interval, cost, position, start, + interval->start_); + start = start_new; + if (start >= end) break; + continue; + } + + if (start <= interval->start_) { + if (interval->end_ <= end) { + // [----------------------------------[ + // interval->start_ interval->end_ + // [**************************************************************[ + // start end + // We can safely remove the old interval as it is fully included. + PopInterval(manager, interval); + } else { + // [------------------------------------[ + // interval->start_ interval->end_ + // [*****************************[ + // start end + interval->start_ = end; + break; + } + } else { + if (end < interval->end_) { + // [--------------------------------------------------------------[ + // interval->start_ interval->end_ + // [*****************************[ + // start end + // We have to split the old interval as it fully contains the new one. + const int end_original = interval->end_; + interval->end_ = start; + InsertInterval(manager, interval, interval->cost_, interval->index_, + end, end_original); + interval = interval->next_; + break; + } else { + // [------------------------------------[ + // interval->start_ interval->end_ + // [*****************************[ + // start end + interval->end_ = start; + } + } + } + // Insert the remaining interval from start to end. + InsertInterval(manager, interval, cost, position, start, end); + } +} + +static int BackwardReferencesHashChainDistanceOnly( + int xsize, int ysize, const uint32_t* const argb, int cache_bits, + const VP8LHashChain* const hash_chain, const VP8LBackwardRefs* const refs, + uint16_t* const dist_array) { + int i; + int ok = 0; + int cc_init = 0; + const int pix_count = xsize * ysize; + const int use_color_cache = (cache_bits > 0); + const size_t literal_array_size = + sizeof(double) * (NUM_LITERAL_CODES + NUM_LENGTH_CODES + + ((cache_bits > 0) ? (1 << cache_bits) : 0)); + const size_t cost_model_size = sizeof(CostModel) + literal_array_size; + CostModel* const cost_model = + (CostModel*)WebPSafeCalloc(1ULL, cost_model_size); + VP8LColorCache hashers; + CostManager* cost_manager = + (CostManager*)WebPSafeMalloc(1ULL, sizeof(*cost_manager)); + int offset_prev = -1, len_prev = -1; + double offset_cost = -1; + int first_offset_is_constant = -1; // initialized with 'impossible' value + int reach = 0; + + if (cost_model == NULL || cost_manager == NULL) goto Error; + + cost_model->literal_ = (double*)(cost_model + 1); + if (use_color_cache) { + cc_init = VP8LColorCacheInit(&hashers, cache_bits); + if (!cc_init) goto Error; + } + + if (!CostModelBuild(cost_model, xsize, cache_bits, refs)) { + goto Error; + } + + if (!CostManagerInit(cost_manager, dist_array, pix_count, cost_model)) { + goto Error; + } + + // We loop one pixel at a time, but store all currently best points to + // non-processed locations from this point. + dist_array[0] = 0; + // Add first pixel as literal. + AddSingleLiteralWithCostModel(argb, &hashers, cost_model, 0, use_color_cache, + 0.f, cost_manager->costs_, dist_array); + + for (i = 1; i < pix_count; ++i) { + const float prev_cost = cost_manager->costs_[i - 1]; + int offset, len; + VP8LHashChainFindCopy(hash_chain, i, &offset, &len); + + // Try adding the pixel as a literal. + AddSingleLiteralWithCostModel(argb, &hashers, cost_model, i, + use_color_cache, prev_cost, + cost_manager->costs_, dist_array); + + // If we are dealing with a non-literal. + if (len >= 2) { + if (offset != offset_prev) { + const int code = VP8LDistanceToPlaneCode(xsize, offset); + offset_cost = GetDistanceCost(cost_model, code); + first_offset_is_constant = 1; + PushInterval(cost_manager, prev_cost + offset_cost, i, len); + } else { + assert(offset_cost >= 0); + assert(len_prev >= 0); + assert(first_offset_is_constant == 0 || first_offset_is_constant == 1); + // Instead of considering all contributions from a pixel i by calling: + // PushInterval(cost_manager, prev_cost + offset_cost, i, len); + // we optimize these contributions in case offset_cost stays the same + // for consecutive pixels. This describes a set of pixels similar to a + // previous set (e.g. constant color regions). + if (first_offset_is_constant) { + reach = i - 1 + len_prev - 1; + first_offset_is_constant = 0; + } + + if (i + len - 1 > reach) { + // We can only be go further with the same offset if the previous + // length was maxed, hence len_prev == len == MAX_LENGTH. + // TODO(vrabaud), bump i to the end right away (insert cache and + // update cost). + // TODO(vrabaud), check if one of the points in between does not have + // a lower cost. + // Already consider the pixel at "reach" to add intervals that are + // better than whatever we add. + int offset_j, len_j = 0; + int j; + assert(len == MAX_LENGTH || len == pix_count - i); + // Figure out the last consecutive pixel within [i, reach + 1] with + // the same offset. + for (j = i; j <= reach; ++j) { + VP8LHashChainFindCopy(hash_chain, j + 1, &offset_j, &len_j); + if (offset_j != offset) { + VP8LHashChainFindCopy(hash_chain, j, &offset_j, &len_j); + break; + } + } + // Update the cost at j - 1 and j. + UpdateCostAtIndex(cost_manager, j - 1, 0); + UpdateCostAtIndex(cost_manager, j, 0); + + PushInterval(cost_manager, cost_manager->costs_[j - 1] + offset_cost, + j, len_j); + reach = j + len_j - 1; + } + } + } + + UpdateCostAtIndex(cost_manager, i, 1); + offset_prev = offset; + len_prev = len; + } + + ok = !refs->error_; +Error: + if (cc_init) VP8LColorCacheClear(&hashers); + CostManagerClear(cost_manager); + WebPSafeFree(cost_model); + WebPSafeFree(cost_manager); + return ok; +} + +// We pack the path at the end of *dist_array and return +// a pointer to this part of the array. Example: +// dist_array = [1x2xx3x2] => packed [1x2x1232], chosen_path = [1232] +static void TraceBackwards(uint16_t* const dist_array, + int dist_array_size, + uint16_t** const chosen_path, + int* const chosen_path_size) { + uint16_t* path = dist_array + dist_array_size; + uint16_t* cur = dist_array + dist_array_size - 1; + while (cur >= dist_array) { + const int k = *cur; + --path; + *path = k; + cur -= k; + } + *chosen_path = path; + *chosen_path_size = (int)(dist_array + dist_array_size - path); +} + +static int BackwardReferencesHashChainFollowChosenPath( + const uint32_t* const argb, int cache_bits, + const uint16_t* const chosen_path, int chosen_path_size, + const VP8LHashChain* const hash_chain, VP8LBackwardRefs* const refs) { + const int use_color_cache = (cache_bits > 0); + int ix; + int i = 0; + int ok = 0; + int cc_init = 0; + VP8LColorCache hashers; + + if (use_color_cache) { + cc_init = VP8LColorCacheInit(&hashers, cache_bits); + if (!cc_init) goto Error; + } + + VP8LClearBackwardRefs(refs); + for (ix = 0; ix < chosen_path_size; ++ix) { + const int len = chosen_path[ix]; + if (len != 1) { + int k; + const int offset = VP8LHashChainFindOffset(hash_chain, i); + VP8LBackwardRefsCursorAdd(refs, PixOrCopyCreateCopy(offset, len)); + if (use_color_cache) { + for (k = 0; k < len; ++k) { + VP8LColorCacheInsert(&hashers, argb[i + k]); + } + } + i += len; + } else { + PixOrCopy v; + const int idx = + use_color_cache ? VP8LColorCacheContains(&hashers, argb[i]) : -1; + if (idx >= 0) { + // use_color_cache is true and hashers contains argb[i] + // push pixel as a color cache index + v = PixOrCopyCreateCacheIdx(idx); + } else { + if (use_color_cache) VP8LColorCacheInsert(&hashers, argb[i]); + v = PixOrCopyCreateLiteral(argb[i]); + } + VP8LBackwardRefsCursorAdd(refs, v); + ++i; + } + } + ok = !refs->error_; + Error: + if (cc_init) VP8LColorCacheClear(&hashers); + return ok; +} + +// Returns 1 on success. +extern int VP8LBackwardReferencesTraceBackwards( + int xsize, int ysize, const uint32_t* const argb, int cache_bits, + const VP8LHashChain* const hash_chain, + const VP8LBackwardRefs* const refs_src, VP8LBackwardRefs* const refs_dst); +int VP8LBackwardReferencesTraceBackwards(int xsize, int ysize, + const uint32_t* const argb, + int cache_bits, + const VP8LHashChain* const hash_chain, + const VP8LBackwardRefs* const refs_src, + VP8LBackwardRefs* const refs_dst) { + int ok = 0; + const int dist_array_size = xsize * ysize; + uint16_t* chosen_path = NULL; + int chosen_path_size = 0; + uint16_t* dist_array = + (uint16_t*)WebPSafeMalloc(dist_array_size, sizeof(*dist_array)); + + if (dist_array == NULL) goto Error; + + if (!BackwardReferencesHashChainDistanceOnly( + xsize, ysize, argb, cache_bits, hash_chain, refs_src, dist_array)) { + goto Error; + } + TraceBackwards(dist_array, dist_array_size, &chosen_path, &chosen_path_size); + if (!BackwardReferencesHashChainFollowChosenPath( + argb, cache_bits, chosen_path, chosen_path_size, hash_chain, + refs_dst)) { + goto Error; + } + ok = 1; + Error: + WebPSafeFree(dist_array); + return ok; +} diff --git a/vendor/github.com/sizeofint/webpanimation/enc_backward_references_enc.c b/vendor/github.com/sizeofint/webpanimation/enc_backward_references_enc.c new file mode 100644 index 00000000..a2c4b201 --- /dev/null +++ b/vendor/github.com/sizeofint/webpanimation/enc_backward_references_enc.c @@ -0,0 +1,1030 @@ +// Copyright 2012 Google Inc. All Rights Reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the COPYING file in the root of the source +// tree. An additional intellectual property rights grant can be found +// in the file PATENTS. All contributing project authors may +// be found in the AUTHORS file in the root of the source tree. +// ----------------------------------------------------------------------------- +// +// Author: Jyrki Alakuijala (jyrki@google.com) +// + +#include +#include +#include + +#include "dsp_dsp.h" +#include "dsp_lossless.h" +#include "dsp_lossless_common.h" +#include "enc_backward_references_enc.h" +#include "enc_histogram_enc.h" +#include "utils_color_cache_utils.h" +#include "utils_utils.h" + +#define MIN_BLOCK_SIZE 256 // minimum block size for backward references + +#define MAX_ENTROPY (1e30f) + +// 1M window (4M bytes) minus 120 special codes for short distances. +#define WINDOW_SIZE ((1 << WINDOW_SIZE_BITS) - 120) + +// Minimum number of pixels for which it is cheaper to encode a +// distance + length instead of each pixel as a literal. +#define MIN_LENGTH 4 + +// ----------------------------------------------------------------------------- + +static const uint8_t plane_to_code_lut[128] = { + 96, 73, 55, 39, 23, 13, 5, 1, 255, 255, 255, 255, 255, 255, 255, 255, + 101, 78, 58, 42, 26, 16, 8, 2, 0, 3, 9, 17, 27, 43, 59, 79, + 102, 86, 62, 46, 32, 20, 10, 6, 4, 7, 11, 21, 33, 47, 63, 87, + 105, 90, 70, 52, 37, 28, 18, 14, 12, 15, 19, 29, 38, 53, 71, 91, + 110, 99, 82, 66, 48, 35, 30, 24, 22, 25, 31, 36, 49, 67, 83, 100, + 115, 108, 94, 76, 64, 50, 44, 40, 34, 41, 45, 51, 65, 77, 95, 109, + 118, 113, 103, 92, 80, 68, 60, 56, 54, 57, 61, 69, 81, 93, 104, 114, + 119, 116, 111, 106, 97, 88, 84, 74, 72, 75, 85, 89, 98, 107, 112, 117 +}; + +extern int VP8LDistanceToPlaneCode(int xsize, int dist); +int VP8LDistanceToPlaneCode(int xsize, int dist) { + const int yoffset = dist / xsize; + const int xoffset = dist - yoffset * xsize; + if (xoffset <= 8 && yoffset < 8) { + return plane_to_code_lut[yoffset * 16 + 8 - xoffset] + 1; + } else if (xoffset > xsize - 8 && yoffset < 7) { + return plane_to_code_lut[(yoffset + 1) * 16 + 8 + (xsize - xoffset)] + 1; + } + return dist + 120; +} + +// Returns the exact index where array1 and array2 are different. For an index +// inferior or equal to best_len_match, the return value just has to be strictly +// inferior to best_len_match. The current behavior is to return 0 if this index +// is best_len_match, and the index itself otherwise. +// If no two elements are the same, it returns max_limit. +static WEBP_INLINE int FindMatchLength(const uint32_t* const array1, + const uint32_t* const array2, + int best_len_match, int max_limit) { + // Before 'expensive' linear match, check if the two arrays match at the + // current best length index. + if (array1[best_len_match] != array2[best_len_match]) return 0; + + return VP8LVectorMismatch(array1, array2, max_limit); +} + +// ----------------------------------------------------------------------------- +// VP8LBackwardRefs + +struct PixOrCopyBlock { + PixOrCopyBlock* next_; // next block (or NULL) + PixOrCopy* start_; // data start + int size_; // currently used size +}; + +extern void VP8LClearBackwardRefs(VP8LBackwardRefs* const refs); +void VP8LClearBackwardRefs(VP8LBackwardRefs* const refs) { + assert(refs != NULL); + if (refs->tail_ != NULL) { + *refs->tail_ = refs->free_blocks_; // recycle all blocks at once + } + refs->free_blocks_ = refs->refs_; + refs->tail_ = &refs->refs_; + refs->last_block_ = NULL; + refs->refs_ = NULL; +} + +void VP8LBackwardRefsClear(VP8LBackwardRefs* const refs) { + assert(refs != NULL); + VP8LClearBackwardRefs(refs); + while (refs->free_blocks_ != NULL) { + PixOrCopyBlock* const next = refs->free_blocks_->next_; + WebPSafeFree(refs->free_blocks_); + refs->free_blocks_ = next; + } +} + +// Swaps the content of two VP8LBackwardRefs. +static void BackwardRefsSwap(VP8LBackwardRefs* const refs1, + VP8LBackwardRefs* const refs2) { + const int point_to_refs1 = + (refs1->tail_ != NULL && refs1->tail_ == &refs1->refs_); + const int point_to_refs2 = + (refs2->tail_ != NULL && refs2->tail_ == &refs2->refs_); + const VP8LBackwardRefs tmp = *refs1; + *refs1 = *refs2; + *refs2 = tmp; + if (point_to_refs2) refs1->tail_ = &refs1->refs_; + if (point_to_refs1) refs2->tail_ = &refs2->refs_; +} + +void VP8LBackwardRefsInit(VP8LBackwardRefs* const refs, int block_size) { + assert(refs != NULL); + memset(refs, 0, sizeof(*refs)); + refs->tail_ = &refs->refs_; + refs->block_size_ = + (block_size < MIN_BLOCK_SIZE) ? MIN_BLOCK_SIZE : block_size; +} + +VP8LRefsCursor VP8LRefsCursorInit(const VP8LBackwardRefs* const refs) { + VP8LRefsCursor c; + c.cur_block_ = refs->refs_; + if (refs->refs_ != NULL) { + c.cur_pos = c.cur_block_->start_; + c.last_pos_ = c.cur_pos + c.cur_block_->size_; + } else { + c.cur_pos = NULL; + c.last_pos_ = NULL; + } + return c; +} + +void VP8LRefsCursorNextBlock(VP8LRefsCursor* const c) { + PixOrCopyBlock* const b = c->cur_block_->next_; + c->cur_pos = (b == NULL) ? NULL : b->start_; + c->last_pos_ = (b == NULL) ? NULL : b->start_ + b->size_; + c->cur_block_ = b; +} + +// Create a new block, either from the free list or allocated +static PixOrCopyBlock* BackwardRefsNewBlock(VP8LBackwardRefs* const refs) { + PixOrCopyBlock* b = refs->free_blocks_; + if (b == NULL) { // allocate new memory chunk + const size_t total_size = + sizeof(*b) + refs->block_size_ * sizeof(*b->start_); + b = (PixOrCopyBlock*)WebPSafeMalloc(1ULL, total_size); + if (b == NULL) { + refs->error_ |= 1; + return NULL; + } + b->start_ = (PixOrCopy*)((uint8_t*)b + sizeof(*b)); // not always aligned + } else { // recycle from free-list + refs->free_blocks_ = b->next_; + } + *refs->tail_ = b; + refs->tail_ = &b->next_; + refs->last_block_ = b; + b->next_ = NULL; + b->size_ = 0; + return b; +} + +// Return 1 on success, 0 on error. +static int BackwardRefsClone(const VP8LBackwardRefs* const from, + VP8LBackwardRefs* const to) { + const PixOrCopyBlock* block_from = from->refs_; + VP8LClearBackwardRefs(to); + while (block_from != NULL) { + PixOrCopyBlock* const block_to = BackwardRefsNewBlock(to); + if (block_to == NULL) return 0; + memcpy(block_to->start_, block_from->start_, + block_from->size_ * sizeof(PixOrCopy)); + block_to->size_ = block_from->size_; + block_from = block_from->next_; + } + return 1; +} + +extern void VP8LBackwardRefsCursorAdd(VP8LBackwardRefs* const refs, + const PixOrCopy v); +void VP8LBackwardRefsCursorAdd(VP8LBackwardRefs* const refs, + const PixOrCopy v) { + PixOrCopyBlock* b = refs->last_block_; + if (b == NULL || b->size_ == refs->block_size_) { + b = BackwardRefsNewBlock(refs); + if (b == NULL) return; // refs->error_ is set + } + b->start_[b->size_++] = v; +} + +// ----------------------------------------------------------------------------- +// Hash chains + +int VP8LHashChainInit(VP8LHashChain* const p, int size) { + assert(p->size_ == 0); + assert(p->offset_length_ == NULL); + assert(size > 0); + p->offset_length_ = + (uint32_t*)WebPSafeMalloc(size, sizeof(*p->offset_length_)); + if (p->offset_length_ == NULL) return 0; + p->size_ = size; + + return 1; +} + +void VP8LHashChainClear(VP8LHashChain* const p) { + assert(p != NULL); + WebPSafeFree(p->offset_length_); + + p->size_ = 0; + p->offset_length_ = NULL; +} + +// ----------------------------------------------------------------------------- + +static const uint32_t kHashMultiplierHi = 0xc6a4a793u; +static const uint32_t kHashMultiplierLo = 0x5bd1e996u; + +static WEBP_UBSAN_IGNORE_UNSIGNED_OVERFLOW WEBP_INLINE +uint32_t GetPixPairHash64(const uint32_t* const argb) { + uint32_t key; + key = argb[1] * kHashMultiplierHi; + key += argb[0] * kHashMultiplierLo; + key = key >> (32 - HASH_BITS); + return key; +} + +// Returns the maximum number of hash chain lookups to do for a +// given compression quality. Return value in range [8, 86]. +static int GetMaxItersForQuality(int quality) { + return 8 + (quality * quality) / 128; +} + +static int GetWindowSizeForHashChain(int quality, int xsize) { + const int max_window_size = (quality > 75) ? WINDOW_SIZE + : (quality > 50) ? (xsize << 8) + : (quality > 25) ? (xsize << 6) + : (xsize << 4); + assert(xsize > 0); + return (max_window_size > WINDOW_SIZE) ? WINDOW_SIZE : max_window_size; +} + +static WEBP_INLINE int MaxFindCopyLength(int len) { + return (len < MAX_LENGTH) ? len : MAX_LENGTH; +} + +int VP8LHashChainFill(VP8LHashChain* const p, int quality, + const uint32_t* const argb, int xsize, int ysize, + int low_effort) { + const int size = xsize * ysize; + const int iter_max = GetMaxItersForQuality(quality); + const uint32_t window_size = GetWindowSizeForHashChain(quality, xsize); + int pos; + int argb_comp; + uint32_t base_position; + int32_t* hash_to_first_index; + // Temporarily use the p->offset_length_ as a hash chain. + int32_t* chain = (int32_t*)p->offset_length_; + assert(size > 0); + assert(p->size_ != 0); + assert(p->offset_length_ != NULL); + + if (size <= 2) { + p->offset_length_[0] = p->offset_length_[size - 1] = 0; + return 1; + } + + hash_to_first_index = + (int32_t*)WebPSafeMalloc(HASH_SIZE, sizeof(*hash_to_first_index)); + if (hash_to_first_index == NULL) return 0; + + // Set the int32_t array to -1. + memset(hash_to_first_index, 0xff, HASH_SIZE * sizeof(*hash_to_first_index)); + // Fill the chain linking pixels with the same hash. + argb_comp = (argb[0] == argb[1]); + for (pos = 0; pos < size - 2;) { + uint32_t hash_code; + const int argb_comp_next = (argb[pos + 1] == argb[pos + 2]); + if (argb_comp && argb_comp_next) { + // Consecutive pixels with the same color will share the same hash. + // We therefore use a different hash: the color and its repetition + // length. + uint32_t tmp[2]; + uint32_t len = 1; + tmp[0] = argb[pos]; + // Figure out how far the pixels are the same. + // The last pixel has a different 64 bit hash, as its next pixel does + // not have the same color, so we just need to get to the last pixel equal + // to its follower. + while (pos + (int)len + 2 < size && argb[pos + len + 2] == argb[pos]) { + ++len; + } + if (len > MAX_LENGTH) { + // Skip the pixels that match for distance=1 and length>MAX_LENGTH + // because they are linked to their predecessor and we automatically + // check that in the main for loop below. Skipping means setting no + // predecessor in the chain, hence -1. + memset(chain + pos, 0xff, (len - MAX_LENGTH) * sizeof(*chain)); + pos += len - MAX_LENGTH; + len = MAX_LENGTH; + } + // Process the rest of the hash chain. + while (len) { + tmp[1] = len--; + hash_code = GetPixPairHash64(tmp); + chain[pos] = hash_to_first_index[hash_code]; + hash_to_first_index[hash_code] = pos++; + } + argb_comp = 0; + } else { + // Just move one pixel forward. + hash_code = GetPixPairHash64(argb + pos); + chain[pos] = hash_to_first_index[hash_code]; + hash_to_first_index[hash_code] = pos++; + argb_comp = argb_comp_next; + } + } + // Process the penultimate pixel. + chain[pos] = hash_to_first_index[GetPixPairHash64(argb + pos)]; + + WebPSafeFree(hash_to_first_index); + + // Find the best match interval at each pixel, defined by an offset to the + // pixel and a length. The right-most pixel cannot match anything to the right + // (hence a best length of 0) and the left-most pixel nothing to the left + // (hence an offset of 0). + assert(size > 2); + p->offset_length_[0] = p->offset_length_[size - 1] = 0; + for (base_position = size - 2; base_position > 0;) { + const int max_len = MaxFindCopyLength(size - 1 - base_position); + const uint32_t* const argb_start = argb + base_position; + int iter = iter_max; + int best_length = 0; + uint32_t best_distance = 0; + uint32_t best_argb; + const int min_pos = + (base_position > window_size) ? base_position - window_size : 0; + const int length_max = (max_len < 256) ? max_len : 256; + uint32_t max_base_position; + + pos = chain[base_position]; + if (!low_effort) { + int curr_length; + // Heuristic: use the comparison with the above line as an initialization. + if (base_position >= (uint32_t)xsize) { + curr_length = FindMatchLength(argb_start - xsize, argb_start, + best_length, max_len); + if (curr_length > best_length) { + best_length = curr_length; + best_distance = xsize; + } + --iter; + } + // Heuristic: compare to the previous pixel. + curr_length = + FindMatchLength(argb_start - 1, argb_start, best_length, max_len); + if (curr_length > best_length) { + best_length = curr_length; + best_distance = 1; + } + --iter; + // Skip the for loop if we already have the maximum. + if (best_length == MAX_LENGTH) pos = min_pos - 1; + } + best_argb = argb_start[best_length]; + + for (; pos >= min_pos && --iter; pos = chain[pos]) { + int curr_length; + assert(base_position > (uint32_t)pos); + + if (argb[pos + best_length] != best_argb) continue; + + curr_length = VP8LVectorMismatch(argb + pos, argb_start, max_len); + if (best_length < curr_length) { + best_length = curr_length; + best_distance = base_position - pos; + best_argb = argb_start[best_length]; + // Stop if we have reached a good enough length. + if (best_length >= length_max) break; + } + } + // We have the best match but in case the two intervals continue matching + // to the left, we have the best matches for the left-extended pixels. + max_base_position = base_position; + while (1) { + assert(best_length <= MAX_LENGTH); + assert(best_distance <= WINDOW_SIZE); + p->offset_length_[base_position] = + (best_distance << MAX_LENGTH_BITS) | (uint32_t)best_length; + --base_position; + // Stop if we don't have a match or if we are out of bounds. + if (best_distance == 0 || base_position == 0) break; + // Stop if we cannot extend the matching intervals to the left. + if (base_position < best_distance || + argb[base_position - best_distance] != argb[base_position]) { + break; + } + // Stop if we are matching at its limit because there could be a closer + // matching interval with the same maximum length. Then again, if the + // matching interval is as close as possible (best_distance == 1), we will + // never find anything better so let's continue. + if (best_length == MAX_LENGTH && best_distance != 1 && + base_position + MAX_LENGTH < max_base_position) { + break; + } + if (best_length < MAX_LENGTH) { + ++best_length; + max_base_position = base_position; + } + } + } + return 1; +} + +static WEBP_INLINE void AddSingleLiteral(uint32_t pixel, int use_color_cache, + VP8LColorCache* const hashers, + VP8LBackwardRefs* const refs) { + PixOrCopy v; + if (use_color_cache) { + const uint32_t key = VP8LColorCacheGetIndex(hashers, pixel); + if (VP8LColorCacheLookup(hashers, key) == pixel) { + v = PixOrCopyCreateCacheIdx(key); + } else { + v = PixOrCopyCreateLiteral(pixel); + VP8LColorCacheSet(hashers, key, pixel); + } + } else { + v = PixOrCopyCreateLiteral(pixel); + } + VP8LBackwardRefsCursorAdd(refs, v); +} + +static int BackwardReferencesRle(int xsize, int ysize, + const uint32_t* const argb, + int cache_bits, VP8LBackwardRefs* const refs) { + const int pix_count = xsize * ysize; + int i, k; + const int use_color_cache = (cache_bits > 0); + VP8LColorCache hashers; + + if (use_color_cache && !VP8LColorCacheInit(&hashers, cache_bits)) { + return 0; + } + VP8LClearBackwardRefs(refs); + // Add first pixel as literal. + AddSingleLiteral(argb[0], use_color_cache, &hashers, refs); + i = 1; + while (i < pix_count) { + const int max_len = MaxFindCopyLength(pix_count - i); + const int rle_len = FindMatchLength(argb + i, argb + i - 1, 0, max_len); + const int prev_row_len = (i < xsize) ? 0 : + FindMatchLength(argb + i, argb + i - xsize, 0, max_len); + if (rle_len >= prev_row_len && rle_len >= MIN_LENGTH) { + VP8LBackwardRefsCursorAdd(refs, PixOrCopyCreateCopy(1, rle_len)); + // We don't need to update the color cache here since it is always the + // same pixel being copied, and that does not change the color cache + // state. + i += rle_len; + } else if (prev_row_len >= MIN_LENGTH) { + VP8LBackwardRefsCursorAdd(refs, PixOrCopyCreateCopy(xsize, prev_row_len)); + if (use_color_cache) { + for (k = 0; k < prev_row_len; ++k) { + VP8LColorCacheInsert(&hashers, argb[i + k]); + } + } + i += prev_row_len; + } else { + AddSingleLiteral(argb[i], use_color_cache, &hashers, refs); + i++; + } + } + if (use_color_cache) VP8LColorCacheClear(&hashers); + return !refs->error_; +} + +static int BackwardReferencesLz77(int xsize, int ysize, + const uint32_t* const argb, int cache_bits, + const VP8LHashChain* const hash_chain, + VP8LBackwardRefs* const refs) { + int i; + int i_last_check = -1; + int ok = 0; + int cc_init = 0; + const int use_color_cache = (cache_bits > 0); + const int pix_count = xsize * ysize; + VP8LColorCache hashers; + + if (use_color_cache) { + cc_init = VP8LColorCacheInit(&hashers, cache_bits); + if (!cc_init) goto Error; + } + VP8LClearBackwardRefs(refs); + for (i = 0; i < pix_count;) { + // Alternative#1: Code the pixels starting at 'i' using backward reference. + int offset = 0; + int len = 0; + int j; + VP8LHashChainFindCopy(hash_chain, i, &offset, &len); + if (len >= MIN_LENGTH) { + const int len_ini = len; + int max_reach = 0; + const int j_max = + (i + len_ini >= pix_count) ? pix_count - 1 : i + len_ini; + // Only start from what we have not checked already. + i_last_check = (i > i_last_check) ? i : i_last_check; + // We know the best match for the current pixel but we try to find the + // best matches for the current pixel AND the next one combined. + // The naive method would use the intervals: + // [i,i+len) + [i+len, length of best match at i+len) + // while we check if we can use: + // [i,j) (where j<=i+len) + [j, length of best match at j) + for (j = i_last_check + 1; j <= j_max; ++j) { + const int len_j = VP8LHashChainFindLength(hash_chain, j); + const int reach = + j + (len_j >= MIN_LENGTH ? len_j : 1); // 1 for single literal. + if (reach > max_reach) { + len = j - i; + max_reach = reach; + if (max_reach >= pix_count) break; + } + } + } else { + len = 1; + } + // Go with literal or backward reference. + assert(len > 0); + if (len == 1) { + AddSingleLiteral(argb[i], use_color_cache, &hashers, refs); + } else { + VP8LBackwardRefsCursorAdd(refs, PixOrCopyCreateCopy(offset, len)); + if (use_color_cache) { + for (j = i; j < i + len; ++j) VP8LColorCacheInsert(&hashers, argb[j]); + } + } + i += len; + } + + ok = !refs->error_; + Error: + if (cc_init) VP8LColorCacheClear(&hashers); + return ok; +} + +// Compute an LZ77 by forcing matches to happen within a given distance cost. +// We therefore limit the algorithm to the lowest 32 values in the PlaneCode +// definition. +#define WINDOW_OFFSETS_SIZE_MAX 32 +static int BackwardReferencesLz77Box(int xsize, int ysize, + const uint32_t* const argb, int cache_bits, + const VP8LHashChain* const hash_chain_best, + VP8LHashChain* hash_chain, + VP8LBackwardRefs* const refs) { + int i; + const int pix_count = xsize * ysize; + uint16_t* counts; + int window_offsets[WINDOW_OFFSETS_SIZE_MAX] = {0}; + int window_offsets_new[WINDOW_OFFSETS_SIZE_MAX] = {0}; + int window_offsets_size = 0; + int window_offsets_new_size = 0; + uint16_t* const counts_ini = + (uint16_t*)WebPSafeMalloc(xsize * ysize, sizeof(*counts_ini)); + int best_offset_prev = -1, best_length_prev = -1; + if (counts_ini == NULL) return 0; + + // counts[i] counts how many times a pixel is repeated starting at position i. + i = pix_count - 2; + counts = counts_ini + i; + counts[1] = 1; + for (; i >= 0; --i, --counts) { + if (argb[i] == argb[i + 1]) { + // Max out the counts to MAX_LENGTH. + counts[0] = counts[1] + (counts[1] != MAX_LENGTH); + } else { + counts[0] = 1; + } + } + + // Figure out the window offsets around a pixel. They are stored in a + // spiraling order around the pixel as defined by VP8LDistanceToPlaneCode. + { + int x, y; + for (y = 0; y <= 6; ++y) { + for (x = -6; x <= 6; ++x) { + const int offset = y * xsize + x; + int plane_code; + // Ignore offsets that bring us after the pixel. + if (offset <= 0) continue; + plane_code = VP8LDistanceToPlaneCode(xsize, offset) - 1; + if (plane_code >= WINDOW_OFFSETS_SIZE_MAX) continue; + window_offsets[plane_code] = offset; + } + } + // For narrow images, not all plane codes are reached, so remove those. + for (i = 0; i < WINDOW_OFFSETS_SIZE_MAX; ++i) { + if (window_offsets[i] == 0) continue; + window_offsets[window_offsets_size++] = window_offsets[i]; + } + // Given a pixel P, find the offsets that reach pixels unreachable from P-1 + // with any of the offsets in window_offsets[]. + for (i = 0; i < window_offsets_size; ++i) { + int j; + int is_reachable = 0; + for (j = 0; j < window_offsets_size && !is_reachable; ++j) { + is_reachable |= (window_offsets[i] == window_offsets[j] + 1); + } + if (!is_reachable) { + window_offsets_new[window_offsets_new_size] = window_offsets[i]; + ++window_offsets_new_size; + } + } + } + + hash_chain->offset_length_[0] = 0; + for (i = 1; i < pix_count; ++i) { + int ind; + int best_length = VP8LHashChainFindLength(hash_chain_best, i); + int best_offset; + int do_compute = 1; + + if (best_length >= MAX_LENGTH) { + // Do not recompute the best match if we already have a maximal one in the + // window. + best_offset = VP8LHashChainFindOffset(hash_chain_best, i); + for (ind = 0; ind < window_offsets_size; ++ind) { + if (best_offset == window_offsets[ind]) { + do_compute = 0; + break; + } + } + } + if (do_compute) { + // Figure out if we should use the offset/length from the previous pixel + // as an initial guess and therefore only inspect the offsets in + // window_offsets_new[]. + const int use_prev = + (best_length_prev > 1) && (best_length_prev < MAX_LENGTH); + const int num_ind = + use_prev ? window_offsets_new_size : window_offsets_size; + best_length = use_prev ? best_length_prev - 1 : 0; + best_offset = use_prev ? best_offset_prev : 0; + // Find the longest match in a window around the pixel. + for (ind = 0; ind < num_ind; ++ind) { + int curr_length = 0; + int j = i; + int j_offset = + use_prev ? i - window_offsets_new[ind] : i - window_offsets[ind]; + if (j_offset < 0 || argb[j_offset] != argb[i]) continue; + // The longest match is the sum of how many times each pixel is + // repeated. + do { + const int counts_j_offset = counts_ini[j_offset]; + const int counts_j = counts_ini[j]; + if (counts_j_offset != counts_j) { + curr_length += + (counts_j_offset < counts_j) ? counts_j_offset : counts_j; + break; + } + // The same color is repeated counts_pos times at j_offset and j. + curr_length += counts_j_offset; + j_offset += counts_j_offset; + j += counts_j_offset; + } while (curr_length <= MAX_LENGTH && j < pix_count && + argb[j_offset] == argb[j]); + if (best_length < curr_length) { + best_offset = + use_prev ? window_offsets_new[ind] : window_offsets[ind]; + if (curr_length >= MAX_LENGTH) { + best_length = MAX_LENGTH; + break; + } else { + best_length = curr_length; + } + } + } + } + + assert(i + best_length <= pix_count); + assert(best_length <= MAX_LENGTH); + if (best_length <= MIN_LENGTH) { + hash_chain->offset_length_[i] = 0; + best_offset_prev = 0; + best_length_prev = 0; + } else { + hash_chain->offset_length_[i] = + (best_offset << MAX_LENGTH_BITS) | (uint32_t)best_length; + best_offset_prev = best_offset; + best_length_prev = best_length; + } + } + hash_chain->offset_length_[0] = 0; + WebPSafeFree(counts_ini); + + return BackwardReferencesLz77(xsize, ysize, argb, cache_bits, hash_chain, + refs); +} + +// ----------------------------------------------------------------------------- + +static void BackwardReferences2DLocality(int xsize, + const VP8LBackwardRefs* const refs) { + VP8LRefsCursor c = VP8LRefsCursorInit(refs); + while (VP8LRefsCursorOk(&c)) { + if (PixOrCopyIsCopy(c.cur_pos)) { + const int dist = c.cur_pos->argb_or_distance; + const int transformed_dist = VP8LDistanceToPlaneCode(xsize, dist); + c.cur_pos->argb_or_distance = transformed_dist; + } + VP8LRefsCursorNext(&c); + } +} + +// Evaluate optimal cache bits for the local color cache. +// The input *best_cache_bits sets the maximum cache bits to use (passing 0 +// implies disabling the local color cache). The local color cache is also +// disabled for the lower (<= 25) quality. +// Returns 0 in case of memory error. +static int CalculateBestCacheSize(const uint32_t* argb, int quality, + const VP8LBackwardRefs* const refs, + int* const best_cache_bits) { + int i; + const int cache_bits_max = (quality <= 25) ? 0 : *best_cache_bits; + double entropy_min = MAX_ENTROPY; + int cc_init[MAX_COLOR_CACHE_BITS + 1] = { 0 }; + VP8LColorCache hashers[MAX_COLOR_CACHE_BITS + 1]; + VP8LRefsCursor c = VP8LRefsCursorInit(refs); + VP8LHistogram* histos[MAX_COLOR_CACHE_BITS + 1] = { NULL }; + int ok = 0; + + assert(cache_bits_max >= 0 && cache_bits_max <= MAX_COLOR_CACHE_BITS); + + if (cache_bits_max == 0) { + *best_cache_bits = 0; + // Local color cache is disabled. + return 1; + } + + // Allocate data. + for (i = 0; i <= cache_bits_max; ++i) { + histos[i] = VP8LAllocateHistogram(i); + if (histos[i] == NULL) goto Error; + VP8LHistogramInit(histos[i], i, /*init_arrays=*/ 1); + if (i == 0) continue; + cc_init[i] = VP8LColorCacheInit(&hashers[i], i); + if (!cc_init[i]) goto Error; + } + + // Find the cache_bits giving the lowest entropy. The search is done in a + // brute-force way as the function (entropy w.r.t cache_bits) can be + // anything in practice. + while (VP8LRefsCursorOk(&c)) { + const PixOrCopy* const v = c.cur_pos; + if (PixOrCopyIsLiteral(v)) { + const uint32_t pix = *argb++; + const uint32_t a = (pix >> 24) & 0xff; + const uint32_t r = (pix >> 16) & 0xff; + const uint32_t g = (pix >> 8) & 0xff; + const uint32_t b = (pix >> 0) & 0xff; + // The keys of the caches can be derived from the longest one. + int key = VP8LHashPix(pix, 32 - cache_bits_max); + // Do not use the color cache for cache_bits = 0. + ++histos[0]->blue_[b]; + ++histos[0]->literal_[g]; + ++histos[0]->red_[r]; + ++histos[0]->alpha_[a]; + // Deal with cache_bits > 0. + for (i = cache_bits_max; i >= 1; --i, key >>= 1) { + if (VP8LColorCacheLookup(&hashers[i], key) == pix) { + ++histos[i]->literal_[NUM_LITERAL_CODES + NUM_LENGTH_CODES + key]; + } else { + VP8LColorCacheSet(&hashers[i], key, pix); + ++histos[i]->blue_[b]; + ++histos[i]->literal_[g]; + ++histos[i]->red_[r]; + ++histos[i]->alpha_[a]; + } + } + } else { + int code, extra_bits, extra_bits_value; + // We should compute the contribution of the (distance,length) + // histograms but those are the same independently from the cache size. + // As those constant contributions are in the end added to the other + // histogram contributions, we can ignore them, except for the length + // prefix that is part of the literal_ histogram. + int len = PixOrCopyLength(v); + uint32_t argb_prev = *argb ^ 0xffffffffu; + VP8LPrefixEncode(len, &code, &extra_bits, &extra_bits_value); + for (i = 0; i <= cache_bits_max; ++i) { + ++histos[i]->literal_[NUM_LITERAL_CODES + code]; + } + // Update the color caches. + do { + if (*argb != argb_prev) { + // Efficiency: insert only if the color changes. + int key = VP8LHashPix(*argb, 32 - cache_bits_max); + for (i = cache_bits_max; i >= 1; --i, key >>= 1) { + hashers[i].colors_[key] = *argb; + } + argb_prev = *argb; + } + argb++; + } while (--len != 0); + } + VP8LRefsCursorNext(&c); + } + + for (i = 0; i <= cache_bits_max; ++i) { + const double entropy = VP8LHistogramEstimateBits(histos[i]); + if (i == 0 || entropy < entropy_min) { + entropy_min = entropy; + *best_cache_bits = i; + } + } + ok = 1; +Error: + for (i = 0; i <= cache_bits_max; ++i) { + if (cc_init[i]) VP8LColorCacheClear(&hashers[i]); + VP8LFreeHistogram(histos[i]); + } + return ok; +} + +// Update (in-place) backward references for specified cache_bits. +static int BackwardRefsWithLocalCache(const uint32_t* const argb, + int cache_bits, + VP8LBackwardRefs* const refs) { + int pixel_index = 0; + VP8LColorCache hashers; + VP8LRefsCursor c = VP8LRefsCursorInit(refs); + if (!VP8LColorCacheInit(&hashers, cache_bits)) return 0; + + while (VP8LRefsCursorOk(&c)) { + PixOrCopy* const v = c.cur_pos; + if (PixOrCopyIsLiteral(v)) { + const uint32_t argb_literal = v->argb_or_distance; + const int ix = VP8LColorCacheContains(&hashers, argb_literal); + if (ix >= 0) { + // hashers contains argb_literal + *v = PixOrCopyCreateCacheIdx(ix); + } else { + VP8LColorCacheInsert(&hashers, argb_literal); + } + ++pixel_index; + } else { + // refs was created without local cache, so it can not have cache indexes. + int k; + assert(PixOrCopyIsCopy(v)); + for (k = 0; k < v->len; ++k) { + VP8LColorCacheInsert(&hashers, argb[pixel_index++]); + } + } + VP8LRefsCursorNext(&c); + } + VP8LColorCacheClear(&hashers); + return 1; +} + +static VP8LBackwardRefs* GetBackwardReferencesLowEffort( + int width, int height, const uint32_t* const argb, + int* const cache_bits, const VP8LHashChain* const hash_chain, + VP8LBackwardRefs* const refs_lz77) { + *cache_bits = 0; + if (!BackwardReferencesLz77(width, height, argb, 0, hash_chain, refs_lz77)) { + return NULL; + } + BackwardReferences2DLocality(width, refs_lz77); + return refs_lz77; +} + +extern int VP8LBackwardReferencesTraceBackwards( + int xsize, int ysize, const uint32_t* const argb, int cache_bits, + const VP8LHashChain* const hash_chain, + const VP8LBackwardRefs* const refs_src, VP8LBackwardRefs* const refs_dst); +static int GetBackwardReferences(int width, int height, + const uint32_t* const argb, int quality, + int lz77_types_to_try, int cache_bits_max, + int do_no_cache, + const VP8LHashChain* const hash_chain, + VP8LBackwardRefs* const refs, + int* const cache_bits_best) { + VP8LHistogram* histo = NULL; + int i, lz77_type; + // Index 0 is for a color cache, index 1 for no cache (if needed). + int lz77_types_best[2] = {0, 0}; + double bit_costs_best[2] = {DBL_MAX, DBL_MAX}; + VP8LHashChain hash_chain_box; + VP8LBackwardRefs* const refs_tmp = &refs[do_no_cache ? 2 : 1]; + int status = 0; + memset(&hash_chain_box, 0, sizeof(hash_chain_box)); + + histo = VP8LAllocateHistogram(MAX_COLOR_CACHE_BITS); + if (histo == NULL) goto Error; + + for (lz77_type = 1; lz77_types_to_try; + lz77_types_to_try &= ~lz77_type, lz77_type <<= 1) { + int res = 0; + double bit_cost = 0.; + if ((lz77_types_to_try & lz77_type) == 0) continue; + switch (lz77_type) { + case kLZ77RLE: + res = BackwardReferencesRle(width, height, argb, 0, refs_tmp); + break; + case kLZ77Standard: + // Compute LZ77 with no cache (0 bits), as the ideal LZ77 with a color + // cache is not that different in practice. + res = BackwardReferencesLz77(width, height, argb, 0, hash_chain, + refs_tmp); + break; + case kLZ77Box: + if (!VP8LHashChainInit(&hash_chain_box, width * height)) goto Error; + res = BackwardReferencesLz77Box(width, height, argb, 0, hash_chain, + &hash_chain_box, refs_tmp); + break; + default: + assert(0); + } + if (!res) goto Error; + + // Start with the no color cache case. + for (i = 1; i >= 0; --i) { + int cache_bits = (i == 1) ? 0 : cache_bits_max; + + if (i == 1 && !do_no_cache) continue; + + if (i == 0) { + // Try with a color cache. + if (!CalculateBestCacheSize(argb, quality, refs_tmp, &cache_bits)) { + goto Error; + } + if (cache_bits > 0) { + if (!BackwardRefsWithLocalCache(argb, cache_bits, refs_tmp)) { + goto Error; + } + } + } + + if (i == 0 && do_no_cache && cache_bits == 0) { + // No need to re-compute bit_cost as it was computed at i == 1. + } else { + VP8LHistogramCreate(histo, refs_tmp, cache_bits); + bit_cost = VP8LHistogramEstimateBits(histo); + } + + if (bit_cost < bit_costs_best[i]) { + if (i == 1) { + // Do not swap as the full cache analysis would have the wrong + // VP8LBackwardRefs to start with. + if (!BackwardRefsClone(refs_tmp, &refs[1])) goto Error; + } else { + BackwardRefsSwap(refs_tmp, &refs[0]); + } + bit_costs_best[i] = bit_cost; + lz77_types_best[i] = lz77_type; + if (i == 0) *cache_bits_best = cache_bits; + } + } + } + assert(lz77_types_best[0] > 0); + assert(!do_no_cache || lz77_types_best[1] > 0); + + // Improve on simple LZ77 but only for high quality (TraceBackwards is + // costly). + for (i = 1; i >= 0; --i) { + if (i == 1 && !do_no_cache) continue; + if ((lz77_types_best[i] == kLZ77Standard || + lz77_types_best[i] == kLZ77Box) && + quality >= 25) { + const VP8LHashChain* const hash_chain_tmp = + (lz77_types_best[i] == kLZ77Standard) ? hash_chain : &hash_chain_box; + const int cache_bits = (i == 1) ? 0 : *cache_bits_best; + if (VP8LBackwardReferencesTraceBackwards(width, height, argb, cache_bits, + hash_chain_tmp, &refs[i], + refs_tmp)) { + double bit_cost_trace; + VP8LHistogramCreate(histo, refs_tmp, cache_bits); + bit_cost_trace = VP8LHistogramEstimateBits(histo); + if (bit_cost_trace < bit_costs_best[i]) { + BackwardRefsSwap(refs_tmp, &refs[i]); + } + } + } + + BackwardReferences2DLocality(width, &refs[i]); + + if (i == 1 && lz77_types_best[0] == lz77_types_best[1] && + *cache_bits_best == 0) { + // If the best cache size is 0 and we have the same best LZ77, just copy + // the data over and stop here. + if (!BackwardRefsClone(&refs[1], &refs[0])) goto Error; + break; + } + } + status = 1; + +Error: + VP8LHashChainClear(&hash_chain_box); + VP8LFreeHistogram(histo); + return status; +} + +WebPEncodingError VP8LGetBackwardReferences( + int width, int height, const uint32_t* const argb, int quality, + int low_effort, int lz77_types_to_try, int cache_bits_max, int do_no_cache, + const VP8LHashChain* const hash_chain, VP8LBackwardRefs* const refs, + int* const cache_bits_best) { + if (low_effort) { + VP8LBackwardRefs* refs_best; + *cache_bits_best = cache_bits_max; + refs_best = GetBackwardReferencesLowEffort( + width, height, argb, cache_bits_best, hash_chain, refs); + if (refs_best == NULL) return VP8_ENC_ERROR_OUT_OF_MEMORY; + // Set it in first position. + BackwardRefsSwap(refs_best, &refs[0]); + } else { + if (!GetBackwardReferences(width, height, argb, quality, lz77_types_to_try, + cache_bits_max, do_no_cache, hash_chain, refs, + cache_bits_best)) { + return VP8_ENC_ERROR_OUT_OF_MEMORY; + } + } + return VP8_ENC_OK; +} diff --git a/vendor/github.com/sizeofint/webpanimation/enc_backward_references_enc.h b/vendor/github.com/sizeofint/webpanimation/enc_backward_references_enc.h new file mode 100644 index 00000000..ba6bf317 --- /dev/null +++ b/vendor/github.com/sizeofint/webpanimation/enc_backward_references_enc.h @@ -0,0 +1,240 @@ +// Copyright 2012 Google Inc. All Rights Reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the COPYING file in the root of the source +// tree. An additional intellectual property rights grant can be found +// in the file PATENTS. All contributing project authors may +// be found in the AUTHORS file in the root of the source tree. +// ----------------------------------------------------------------------------- +// +// Author: Jyrki Alakuijala (jyrki@google.com) +// + +#ifndef WEBP_ENC_BACKWARD_REFERENCES_ENC_H_ +#define WEBP_ENC_BACKWARD_REFERENCES_ENC_H_ + +#include +#include +#include "webp_types.h" +#include "webp_encode.h" +#include "webp_format_constants.h" + +#ifdef __cplusplus +extern "C" { +#endif + +// The maximum allowed limit is 11. +#define MAX_COLOR_CACHE_BITS 10 + +// ----------------------------------------------------------------------------- +// PixOrCopy + +enum Mode { + kLiteral, + kCacheIdx, + kCopy, + kNone +}; + +typedef struct { + // mode as uint8_t to make the memory layout to be exactly 8 bytes. + uint8_t mode; + uint16_t len; + uint32_t argb_or_distance; +} PixOrCopy; + +static WEBP_INLINE PixOrCopy PixOrCopyCreateCopy(uint32_t distance, + uint16_t len) { + PixOrCopy retval; + retval.mode = kCopy; + retval.argb_or_distance = distance; + retval.len = len; + return retval; +} + +static WEBP_INLINE PixOrCopy PixOrCopyCreateCacheIdx(int idx) { + PixOrCopy retval; + assert(idx >= 0); + assert(idx < (1 << MAX_COLOR_CACHE_BITS)); + retval.mode = kCacheIdx; + retval.argb_or_distance = idx; + retval.len = 1; + return retval; +} + +static WEBP_INLINE PixOrCopy PixOrCopyCreateLiteral(uint32_t argb) { + PixOrCopy retval; + retval.mode = kLiteral; + retval.argb_or_distance = argb; + retval.len = 1; + return retval; +} + +static WEBP_INLINE int PixOrCopyIsLiteral(const PixOrCopy* const p) { + return (p->mode == kLiteral); +} + +static WEBP_INLINE int PixOrCopyIsCacheIdx(const PixOrCopy* const p) { + return (p->mode == kCacheIdx); +} + +static WEBP_INLINE int PixOrCopyIsCopy(const PixOrCopy* const p) { + return (p->mode == kCopy); +} + +static WEBP_INLINE uint32_t PixOrCopyLiteral(const PixOrCopy* const p, + int component) { + assert(p->mode == kLiteral); + return (p->argb_or_distance >> (component * 8)) & 0xff; +} + +static WEBP_INLINE uint32_t PixOrCopyLength(const PixOrCopy* const p) { + return p->len; +} + +static WEBP_INLINE uint32_t PixOrCopyCacheIdx(const PixOrCopy* const p) { + assert(p->mode == kCacheIdx); + assert(p->argb_or_distance < (1U << MAX_COLOR_CACHE_BITS)); + return p->argb_or_distance; +} + +static WEBP_INLINE uint32_t PixOrCopyDistance(const PixOrCopy* const p) { + assert(p->mode == kCopy); + return p->argb_or_distance; +} + +// ----------------------------------------------------------------------------- +// VP8LHashChain + +#define HASH_BITS 18 +#define HASH_SIZE (1 << HASH_BITS) + +// If you change this, you need MAX_LENGTH_BITS + WINDOW_SIZE_BITS <= 32 as it +// is used in VP8LHashChain. +#define MAX_LENGTH_BITS 12 +#define WINDOW_SIZE_BITS 20 +// We want the max value to be attainable and stored in MAX_LENGTH_BITS bits. +#define MAX_LENGTH ((1 << MAX_LENGTH_BITS) - 1) +#if MAX_LENGTH_BITS + WINDOW_SIZE_BITS > 32 +#error "MAX_LENGTH_BITS + WINDOW_SIZE_BITS > 32" +#endif + +typedef struct VP8LHashChain VP8LHashChain; +struct VP8LHashChain { + // The 20 most significant bits contain the offset at which the best match + // is found. These 20 bits are the limit defined by GetWindowSizeForHashChain + // (through WINDOW_SIZE = 1<<20). + // The lower 12 bits contain the length of the match. The 12 bit limit is + // defined in MaxFindCopyLength with MAX_LENGTH=4096. + uint32_t* offset_length_; + // This is the maximum size of the hash_chain that can be constructed. + // Typically this is the pixel count (width x height) for a given image. + int size_; +}; + +// Must be called first, to set size. +int VP8LHashChainInit(VP8LHashChain* const p, int size); +// Pre-compute the best matches for argb. +int VP8LHashChainFill(VP8LHashChain* const p, int quality, + const uint32_t* const argb, int xsize, int ysize, + int low_effort); +void VP8LHashChainClear(VP8LHashChain* const p); // release memory + +static WEBP_INLINE int VP8LHashChainFindOffset(const VP8LHashChain* const p, + const int base_position) { + return p->offset_length_[base_position] >> MAX_LENGTH_BITS; +} + +static WEBP_INLINE int VP8LHashChainFindLength(const VP8LHashChain* const p, + const int base_position) { + return p->offset_length_[base_position] & ((1U << MAX_LENGTH_BITS) - 1); +} + +static WEBP_INLINE void VP8LHashChainFindCopy(const VP8LHashChain* const p, + int base_position, + int* const offset_ptr, + int* const length_ptr) { + *offset_ptr = VP8LHashChainFindOffset(p, base_position); + *length_ptr = VP8LHashChainFindLength(p, base_position); +} + +// ----------------------------------------------------------------------------- +// VP8LBackwardRefs (block-based backward-references storage) + +// maximum number of reference blocks the image will be segmented into +#define MAX_REFS_BLOCK_PER_IMAGE 16 + +typedef struct PixOrCopyBlock PixOrCopyBlock; // forward declaration +typedef struct VP8LBackwardRefs VP8LBackwardRefs; + +// Container for blocks chain +struct VP8LBackwardRefs { + int block_size_; // common block-size + int error_; // set to true if some memory error occurred + PixOrCopyBlock* refs_; // list of currently used blocks + PixOrCopyBlock** tail_; // for list recycling + PixOrCopyBlock* free_blocks_; // free-list + PixOrCopyBlock* last_block_; // used for adding new refs (internal) +}; + +// Initialize the object. 'block_size' is the common block size to store +// references (typically, width * height / MAX_REFS_BLOCK_PER_IMAGE). +void VP8LBackwardRefsInit(VP8LBackwardRefs* const refs, int block_size); +// Release memory for backward references. +void VP8LBackwardRefsClear(VP8LBackwardRefs* const refs); + +// Cursor for iterating on references content +typedef struct { + // public: + PixOrCopy* cur_pos; // current position + // private: + PixOrCopyBlock* cur_block_; // current block in the refs list + const PixOrCopy* last_pos_; // sentinel for switching to next block +} VP8LRefsCursor; + +// Returns a cursor positioned at the beginning of the references list. +VP8LRefsCursor VP8LRefsCursorInit(const VP8LBackwardRefs* const refs); +// Returns true if cursor is pointing at a valid position. +static WEBP_INLINE int VP8LRefsCursorOk(const VP8LRefsCursor* const c) { + return (c->cur_pos != NULL); +} +// Move to next block of references. Internal, not to be called directly. +void VP8LRefsCursorNextBlock(VP8LRefsCursor* const c); +// Move to next position, or NULL. Should not be called if !VP8LRefsCursorOk(). +static WEBP_INLINE void VP8LRefsCursorNext(VP8LRefsCursor* const c) { + assert(c != NULL); + assert(VP8LRefsCursorOk(c)); + if (++c->cur_pos == c->last_pos_) VP8LRefsCursorNextBlock(c); +} + +// ----------------------------------------------------------------------------- +// Main entry points + +enum VP8LLZ77Type { + kLZ77Standard = 1, + kLZ77RLE = 2, + kLZ77Box = 4 +}; + +// Evaluates best possible backward references for specified quality. +// The input cache_bits to 'VP8LGetBackwardReferences' sets the maximum cache +// bits to use (passing 0 implies disabling the local color cache). +// The optimal cache bits is evaluated and set for the *cache_bits_best +// parameter with the matching refs_best. +// If do_no_cache == 0, refs is an array of 2 values and the best +// VP8LBackwardRefs is put in the first element. +// If do_no_cache != 0, refs is an array of 3 values and the best +// VP8LBackwardRefs is put in the first element, the best value with no-cache in +// the second element. +// In both cases, the last element is used as temporary internally. +WebPEncodingError VP8LGetBackwardReferences( + int width, int height, const uint32_t* const argb, int quality, + int low_effort, int lz77_types_to_try, int cache_bits_max, int do_no_cache, + const VP8LHashChain* const hash_chain, VP8LBackwardRefs* const refs, + int* const cache_bits_best); + +#ifdef __cplusplus +} +#endif + +#endif // WEBP_ENC_BACKWARD_REFERENCES_ENC_H_ diff --git a/vendor/github.com/sizeofint/webpanimation/enc_config_enc.c b/vendor/github.com/sizeofint/webpanimation/enc_config_enc.c new file mode 100644 index 00000000..d596bdb5 --- /dev/null +++ b/vendor/github.com/sizeofint/webpanimation/enc_config_enc.c @@ -0,0 +1,157 @@ +// Copyright 2011 Google Inc. All Rights Reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the COPYING file in the root of the source +// tree. An additional intellectual property rights grant can be found +// in the file PATENTS. All contributing project authors may +// be found in the AUTHORS file in the root of the source tree. +// ----------------------------------------------------------------------------- +// +// Coding tools configuration +// +// Author: Skal (pascal.massimino@gmail.com) + +#ifdef HAVE_CONFIG_H +#include "config.h" +#endif + +#include "webp_encode.h" + +//------------------------------------------------------------------------------ +// WebPConfig +//------------------------------------------------------------------------------ + +int WebPConfigInitInternal(WebPConfig* config, + WebPPreset preset, float quality, int version) { + if (WEBP_ABI_IS_INCOMPATIBLE(version, WEBP_ENCODER_ABI_VERSION)) { + return 0; // caller/system version mismatch! + } + if (config == NULL) return 0; + + config->quality = quality; + config->target_size = 0; + config->target_PSNR = 0.; + config->method = 4; + config->sns_strength = 50; + config->filter_strength = 60; // mid-filtering + config->filter_sharpness = 0; + config->filter_type = 1; // default: strong (so U/V is filtered too) + config->partitions = 0; + config->segments = 4; + config->pass = 1; + config->qmin = 0; + config->qmax = 100; + config->show_compressed = 0; + config->preprocessing = 0; + config->autofilter = 0; + config->partition_limit = 0; + config->alpha_compression = 1; + config->alpha_filtering = 1; + config->alpha_quality = 100; + config->lossless = 0; + config->exact = 0; + config->image_hint = WEBP_HINT_DEFAULT; + config->emulate_jpeg_size = 0; + config->thread_level = 0; + config->low_memory = 0; + config->near_lossless = 100; + config->use_delta_palette = 0; + config->use_sharp_yuv = 0; + + // TODO(skal): tune. + switch (preset) { + case WEBP_PRESET_PICTURE: + config->sns_strength = 80; + config->filter_sharpness = 4; + config->filter_strength = 35; + config->preprocessing &= ~2; // no dithering + break; + case WEBP_PRESET_PHOTO: + config->sns_strength = 80; + config->filter_sharpness = 3; + config->filter_strength = 30; + config->preprocessing |= 2; + break; + case WEBP_PRESET_DRAWING: + config->sns_strength = 25; + config->filter_sharpness = 6; + config->filter_strength = 10; + break; + case WEBP_PRESET_ICON: + config->sns_strength = 0; + config->filter_strength = 0; // disable filtering to retain sharpness + config->preprocessing &= ~2; // no dithering + break; + case WEBP_PRESET_TEXT: + config->sns_strength = 0; + config->filter_strength = 0; // disable filtering to retain sharpness + config->preprocessing &= ~2; // no dithering + config->segments = 2; + break; + case WEBP_PRESET_DEFAULT: + default: + break; + } + return WebPValidateConfig(config); +} + +int WebPValidateConfig(const WebPConfig* config) { + if (config == NULL) return 0; + if (config->quality < 0 || config->quality > 100) return 0; + if (config->target_size < 0) return 0; + if (config->target_PSNR < 0) return 0; + if (config->method < 0 || config->method > 6) return 0; + if (config->segments < 1 || config->segments > 4) return 0; + if (config->sns_strength < 0 || config->sns_strength > 100) return 0; + if (config->filter_strength < 0 || config->filter_strength > 100) return 0; + if (config->filter_sharpness < 0 || config->filter_sharpness > 7) return 0; + if (config->filter_type < 0 || config->filter_type > 1) return 0; + if (config->autofilter < 0 || config->autofilter > 1) return 0; + if (config->pass < 1 || config->pass > 10) return 0; + if (config->qmin < 0 || config->qmax > 100 || config->qmin > config->qmax) { + return 0; + } + if (config->show_compressed < 0 || config->show_compressed > 1) return 0; + if (config->preprocessing < 0 || config->preprocessing > 7) return 0; + if (config->partitions < 0 || config->partitions > 3) return 0; + if (config->partition_limit < 0 || config->partition_limit > 100) return 0; + if (config->alpha_compression < 0) return 0; + if (config->alpha_filtering < 0) return 0; + if (config->alpha_quality < 0 || config->alpha_quality > 100) return 0; + if (config->lossless < 0 || config->lossless > 1) return 0; + if (config->near_lossless < 0 || config->near_lossless > 100) return 0; + if (config->image_hint >= WEBP_HINT_LAST) return 0; + if (config->emulate_jpeg_size < 0 || config->emulate_jpeg_size > 1) return 0; + if (config->thread_level < 0 || config->thread_level > 1) return 0; + if (config->low_memory < 0 || config->low_memory > 1) return 0; + if (config->exact < 0 || config->exact > 1) return 0; + if (config->use_delta_palette < 0 || config->use_delta_palette > 1) { + return 0; + } + if (config->use_sharp_yuv < 0 || config->use_sharp_yuv > 1) return 0; + + return 1; +} + +//------------------------------------------------------------------------------ + +#define MAX_LEVEL 9 + +// Mapping between -z level and -m / -q parameter settings. +static const struct { + uint8_t method_; + uint8_t quality_; +} kLosslessPresets[MAX_LEVEL + 1] = { + { 0, 0 }, { 1, 20 }, { 2, 25 }, { 3, 30 }, { 3, 50 }, + { 4, 50 }, { 4, 75 }, { 4, 90 }, { 5, 90 }, { 6, 100 } +}; + +int WebPConfigLosslessPreset(WebPConfig* config, int level) { + if (config == NULL || level < 0 || level > MAX_LEVEL) return 0; + config->lossless = 1; + config->method = kLosslessPresets[level].method_; + config->quality = kLosslessPresets[level].quality_; + return 1; +} + +//------------------------------------------------------------------------------ diff --git a/vendor/github.com/sizeofint/webpanimation/enc_cost_enc.c b/vendor/github.com/sizeofint/webpanimation/enc_cost_enc.c new file mode 100644 index 00000000..ebd1cde0 --- /dev/null +++ b/vendor/github.com/sizeofint/webpanimation/enc_cost_enc.c @@ -0,0 +1,342 @@ +// Copyright 2011 Google Inc. All Rights Reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the COPYING file in the root of the source +// tree. An additional intellectual property rights grant can be found +// in the file PATENTS. All contributing project authors may +// be found in the AUTHORS file in the root of the source tree. +// ----------------------------------------------------------------------------- +// +// Cost tables for level and modes +// +// Author: Skal (pascal.massimino@gmail.com) + +#include "enc_cost_enc.h" + +//------------------------------------------------------------------------------ +// Level cost tables + +// For each given level, the following table gives the pattern of contexts to +// use for coding it (in [][0]) as well as the bit value to use for each +// context (in [][1]). +const uint16_t VP8LevelCodes[MAX_VARIABLE_LEVEL][2] = { + {0x001, 0x000}, {0x007, 0x001}, {0x00f, 0x005}, + {0x00f, 0x00d}, {0x033, 0x003}, {0x033, 0x003}, {0x033, 0x023}, + {0x033, 0x023}, {0x033, 0x023}, {0x033, 0x023}, {0x0d3, 0x013}, + {0x0d3, 0x013}, {0x0d3, 0x013}, {0x0d3, 0x013}, {0x0d3, 0x013}, + {0x0d3, 0x013}, {0x0d3, 0x013}, {0x0d3, 0x013}, {0x0d3, 0x093}, + {0x0d3, 0x093}, {0x0d3, 0x093}, {0x0d3, 0x093}, {0x0d3, 0x093}, + {0x0d3, 0x093}, {0x0d3, 0x093}, {0x0d3, 0x093}, {0x0d3, 0x093}, + {0x0d3, 0x093}, {0x0d3, 0x093}, {0x0d3, 0x093}, {0x0d3, 0x093}, + {0x0d3, 0x093}, {0x0d3, 0x093}, {0x0d3, 0x093}, {0x153, 0x053}, + {0x153, 0x053}, {0x153, 0x053}, {0x153, 0x053}, {0x153, 0x053}, + {0x153, 0x053}, {0x153, 0x053}, {0x153, 0x053}, {0x153, 0x053}, + {0x153, 0x053}, {0x153, 0x053}, {0x153, 0x053}, {0x153, 0x053}, + {0x153, 0x053}, {0x153, 0x053}, {0x153, 0x053}, {0x153, 0x053}, + {0x153, 0x053}, {0x153, 0x053}, {0x153, 0x053}, {0x153, 0x053}, + {0x153, 0x053}, {0x153, 0x053}, {0x153, 0x053}, {0x153, 0x053}, + {0x153, 0x053}, {0x153, 0x053}, {0x153, 0x053}, {0x153, 0x053}, + {0x153, 0x053}, {0x153, 0x053}, {0x153, 0x053}, {0x153, 0x153} +}; + +static int VariableLevelCost(int level, const uint8_t probas[NUM_PROBAS]) { + int pattern = VP8LevelCodes[level - 1][0]; + int bits = VP8LevelCodes[level - 1][1]; + int cost = 0; + int i; + for (i = 2; pattern; ++i) { + if (pattern & 1) { + cost += VP8BitCost(bits & 1, probas[i]); + } + bits >>= 1; + pattern >>= 1; + } + return cost; +} + +//------------------------------------------------------------------------------ +// Pre-calc level costs once for all + +void VP8CalculateLevelCosts(VP8EncProba* const proba) { + int ctype, band, ctx; + + if (!proba->dirty_) return; // nothing to do. + + for (ctype = 0; ctype < NUM_TYPES; ++ctype) { + int n; + for (band = 0; band < NUM_BANDS; ++band) { + for (ctx = 0; ctx < NUM_CTX; ++ctx) { + const uint8_t* const p = proba->coeffs_[ctype][band][ctx]; + uint16_t* const table = proba->level_cost_[ctype][band][ctx]; + const int cost0 = (ctx > 0) ? VP8BitCost(1, p[0]) : 0; + const int cost_base = VP8BitCost(1, p[1]) + cost0; + int v; + table[0] = VP8BitCost(0, p[1]) + cost0; + for (v = 1; v <= MAX_VARIABLE_LEVEL; ++v) { + table[v] = cost_base + VariableLevelCost(v, p); + } + // Starting at level 67 and up, the variable part of the cost is + // actually constant. + } + } + for (n = 0; n < 16; ++n) { // replicate bands. We don't need to sentinel. + for (ctx = 0; ctx < NUM_CTX; ++ctx) { + proba->remapped_costs_[ctype][n][ctx] = + proba->level_cost_[ctype][VP8EncBands[n]][ctx]; + } + } + } + proba->dirty_ = 0; +} + +//------------------------------------------------------------------------------ +// Mode cost tables. + +// These are the fixed probabilities (in the coding trees) turned into bit-cost +// by calling VP8BitCost(). +const uint16_t VP8FixedCostsUV[4] = { 302, 984, 439, 642 }; +// note: these values include the fixed VP8BitCost(1, 145) mode selection cost. +const uint16_t VP8FixedCostsI16[4] = { 663, 919, 872, 919 }; +const uint16_t VP8FixedCostsI4[NUM_BMODES][NUM_BMODES][NUM_BMODES] = { + { { 40, 1151, 1723, 1874, 2103, 2019, 1628, 1777, 2226, 2137 }, + { 192, 469, 1296, 1308, 1849, 1794, 1781, 1703, 1713, 1522 }, + { 142, 910, 762, 1684, 1849, 1576, 1460, 1305, 1801, 1657 }, + { 559, 641, 1370, 421, 1182, 1569, 1612, 1725, 863, 1007 }, + { 299, 1059, 1256, 1108, 636, 1068, 1581, 1883, 869, 1142 }, + { 277, 1111, 707, 1362, 1089, 672, 1603, 1541, 1545, 1291 }, + { 214, 781, 1609, 1303, 1632, 2229, 726, 1560, 1713, 918 }, + { 152, 1037, 1046, 1759, 1983, 2174, 1358, 742, 1740, 1390 }, + { 512, 1046, 1420, 753, 752, 1297, 1486, 1613, 460, 1207 }, + { 424, 827, 1362, 719, 1462, 1202, 1199, 1476, 1199, 538 } }, + { { 240, 402, 1134, 1491, 1659, 1505, 1517, 1555, 1979, 2099 }, + { 467, 242, 960, 1232, 1714, 1620, 1834, 1570, 1676, 1391 }, + { 500, 455, 463, 1507, 1699, 1282, 1564, 982, 2114, 2114 }, + { 672, 643, 1372, 331, 1589, 1667, 1453, 1938, 996, 876 }, + { 458, 783, 1037, 911, 738, 968, 1165, 1518, 859, 1033 }, + { 504, 815, 504, 1139, 1219, 719, 1506, 1085, 1268, 1268 }, + { 333, 630, 1445, 1239, 1883, 3672, 799, 1548, 1865, 598 }, + { 399, 644, 746, 1342, 1856, 1350, 1493, 613, 1855, 1015 }, + { 622, 749, 1205, 608, 1066, 1408, 1290, 1406, 546, 971 }, + { 500, 753, 1041, 668, 1230, 1617, 1297, 1425, 1383, 523 } }, + { { 394, 553, 523, 1502, 1536, 981, 1608, 1142, 1666, 2181 }, + { 655, 430, 375, 1411, 1861, 1220, 1677, 1135, 1978, 1553 }, + { 690, 640, 245, 1954, 2070, 1194, 1528, 982, 1972, 2232 }, + { 559, 834, 741, 867, 1131, 980, 1225, 852, 1092, 784 }, + { 690, 875, 516, 959, 673, 894, 1056, 1190, 1528, 1126 }, + { 740, 951, 384, 1277, 1177, 492, 1579, 1155, 1846, 1513 }, + { 323, 775, 1062, 1776, 3062, 1274, 813, 1188, 1372, 655 }, + { 488, 971, 484, 1767, 1515, 1775, 1115, 503, 1539, 1461 }, + { 740, 1006, 998, 709, 851, 1230, 1337, 788, 741, 721 }, + { 522, 1073, 573, 1045, 1346, 887, 1046, 1146, 1203, 697 } }, + { { 105, 864, 1442, 1009, 1934, 1840, 1519, 1920, 1673, 1579 }, + { 534, 305, 1193, 683, 1388, 2164, 1802, 1894, 1264, 1170 }, + { 305, 518, 877, 1108, 1426, 3215, 1425, 1064, 1320, 1242 }, + { 683, 732, 1927, 257, 1493, 2048, 1858, 1552, 1055, 947 }, + { 394, 814, 1024, 660, 959, 1556, 1282, 1289, 893, 1047 }, + { 528, 615, 996, 940, 1201, 635, 1094, 2515, 803, 1358 }, + { 347, 614, 1609, 1187, 3133, 1345, 1007, 1339, 1017, 667 }, + { 218, 740, 878, 1605, 3650, 3650, 1345, 758, 1357, 1617 }, + { 672, 750, 1541, 558, 1257, 1599, 1870, 2135, 402, 1087 }, + { 592, 684, 1161, 430, 1092, 1497, 1475, 1489, 1095, 822 } }, + { { 228, 1056, 1059, 1368, 752, 982, 1512, 1518, 987, 1782 }, + { 494, 514, 818, 942, 965, 892, 1610, 1356, 1048, 1363 }, + { 512, 648, 591, 1042, 761, 991, 1196, 1454, 1309, 1463 }, + { 683, 749, 1043, 676, 841, 1396, 1133, 1138, 654, 939 }, + { 622, 1101, 1126, 994, 361, 1077, 1203, 1318, 877, 1219 }, + { 631, 1068, 857, 1650, 651, 477, 1650, 1419, 828, 1170 }, + { 555, 727, 1068, 1335, 3127, 1339, 820, 1331, 1077, 429 }, + { 504, 879, 624, 1398, 889, 889, 1392, 808, 891, 1406 }, + { 683, 1602, 1289, 977, 578, 983, 1280, 1708, 406, 1122 }, + { 399, 865, 1433, 1070, 1072, 764, 968, 1477, 1223, 678 } }, + { { 333, 760, 935, 1638, 1010, 529, 1646, 1410, 1472, 2219 }, + { 512, 494, 750, 1160, 1215, 610, 1870, 1868, 1628, 1169 }, + { 572, 646, 492, 1934, 1208, 603, 1580, 1099, 1398, 1995 }, + { 786, 789, 942, 581, 1018, 951, 1599, 1207, 731, 768 }, + { 690, 1015, 672, 1078, 582, 504, 1693, 1438, 1108, 2897 }, + { 768, 1267, 571, 2005, 1243, 244, 2881, 1380, 1786, 1453 }, + { 452, 899, 1293, 903, 1311, 3100, 465, 1311, 1319, 813 }, + { 394, 927, 942, 1103, 1358, 1104, 946, 593, 1363, 1109 }, + { 559, 1005, 1007, 1016, 658, 1173, 1021, 1164, 623, 1028 }, + { 564, 796, 632, 1005, 1014, 863, 2316, 1268, 938, 764 } }, + { { 266, 606, 1098, 1228, 1497, 1243, 948, 1030, 1734, 1461 }, + { 366, 585, 901, 1060, 1407, 1247, 876, 1134, 1620, 1054 }, + { 452, 565, 542, 1729, 1479, 1479, 1016, 886, 2938, 1150 }, + { 555, 1088, 1533, 950, 1354, 895, 834, 1019, 1021, 496 }, + { 704, 815, 1193, 971, 973, 640, 1217, 2214, 832, 578 }, + { 672, 1245, 579, 871, 875, 774, 872, 1273, 1027, 949 }, + { 296, 1134, 2050, 1784, 1636, 3425, 442, 1550, 2076, 722 }, + { 342, 982, 1259, 1846, 1848, 1848, 622, 568, 1847, 1052 }, + { 555, 1064, 1304, 828, 746, 1343, 1075, 1329, 1078, 494 }, + { 288, 1167, 1285, 1174, 1639, 1639, 833, 2254, 1304, 509 } }, + { { 342, 719, 767, 1866, 1757, 1270, 1246, 550, 1746, 2151 }, + { 483, 653, 694, 1509, 1459, 1410, 1218, 507, 1914, 1266 }, + { 488, 757, 447, 2979, 1813, 1268, 1654, 539, 1849, 2109 }, + { 522, 1097, 1085, 851, 1365, 1111, 851, 901, 961, 605 }, + { 709, 716, 841, 728, 736, 945, 941, 862, 2845, 1057 }, + { 512, 1323, 500, 1336, 1083, 681, 1342, 717, 1604, 1350 }, + { 452, 1155, 1372, 1900, 1501, 3290, 311, 944, 1919, 922 }, + { 403, 1520, 977, 2132, 1733, 3522, 1076, 276, 3335, 1547 }, + { 559, 1374, 1101, 615, 673, 2462, 974, 795, 984, 984 }, + { 547, 1122, 1062, 812, 1410, 951, 1140, 622, 1268, 651 } }, + { { 165, 982, 1235, 938, 1334, 1366, 1659, 1578, 964, 1612 }, + { 592, 422, 925, 847, 1139, 1112, 1387, 2036, 861, 1041 }, + { 403, 837, 732, 770, 941, 1658, 1250, 809, 1407, 1407 }, + { 896, 874, 1071, 381, 1568, 1722, 1437, 2192, 480, 1035 }, + { 640, 1098, 1012, 1032, 684, 1382, 1581, 2106, 416, 865 }, + { 559, 1005, 819, 914, 710, 770, 1418, 920, 838, 1435 }, + { 415, 1258, 1245, 870, 1278, 3067, 770, 1021, 1287, 522 }, + { 406, 990, 601, 1009, 1265, 1265, 1267, 759, 1017, 1277 }, + { 968, 1182, 1329, 788, 1032, 1292, 1705, 1714, 203, 1403 }, + { 732, 877, 1279, 471, 901, 1161, 1545, 1294, 755, 755 } }, + { { 111, 931, 1378, 1185, 1933, 1648, 1148, 1714, 1873, 1307 }, + { 406, 414, 1030, 1023, 1910, 1404, 1313, 1647, 1509, 793 }, + { 342, 640, 575, 1088, 1241, 1349, 1161, 1350, 1756, 1502 }, + { 559, 766, 1185, 357, 1682, 1428, 1329, 1897, 1219, 802 }, + { 473, 909, 1164, 771, 719, 2508, 1427, 1432, 722, 782 }, + { 342, 892, 785, 1145, 1150, 794, 1296, 1550, 973, 1057 }, + { 208, 1036, 1326, 1343, 1606, 3395, 815, 1455, 1618, 712 }, + { 228, 928, 890, 1046, 3499, 1711, 994, 829, 1720, 1318 }, + { 768, 724, 1058, 636, 991, 1075, 1319, 1324, 616, 825 }, + { 305, 1167, 1358, 899, 1587, 1587, 987, 1988, 1332, 501 } } +}; + +//------------------------------------------------------------------------------ +// helper functions for residuals struct VP8Residual. + +void VP8InitResidual(int first, int coeff_type, + VP8Encoder* const enc, VP8Residual* const res) { + res->coeff_type = coeff_type; + res->prob = enc->proba_.coeffs_[coeff_type]; + res->stats = enc->proba_.stats_[coeff_type]; + res->costs = enc->proba_.remapped_costs_[coeff_type]; + res->first = first; +} + +//------------------------------------------------------------------------------ +// Mode costs + +int VP8GetCostLuma4(VP8EncIterator* const it, const int16_t levels[16]) { + const int x = (it->i4_ & 3), y = (it->i4_ >> 2); + VP8Residual res; + VP8Encoder* const enc = it->enc_; + int R = 0; + int ctx; + + VP8InitResidual(0, 3, enc, &res); + ctx = it->top_nz_[x] + it->left_nz_[y]; + VP8SetResidualCoeffs(levels, &res); + R += VP8GetResidualCost(ctx, &res); + return R; +} + +int VP8GetCostLuma16(VP8EncIterator* const it, const VP8ModeScore* const rd) { + VP8Residual res; + VP8Encoder* const enc = it->enc_; + int x, y; + int R = 0; + + VP8IteratorNzToBytes(it); // re-import the non-zero context + + // DC + VP8InitResidual(0, 1, enc, &res); + VP8SetResidualCoeffs(rd->y_dc_levels, &res); + R += VP8GetResidualCost(it->top_nz_[8] + it->left_nz_[8], &res); + + // AC + VP8InitResidual(1, 0, enc, &res); + for (y = 0; y < 4; ++y) { + for (x = 0; x < 4; ++x) { + const int ctx = it->top_nz_[x] + it->left_nz_[y]; + VP8SetResidualCoeffs(rd->y_ac_levels[x + y * 4], &res); + R += VP8GetResidualCost(ctx, &res); + it->top_nz_[x] = it->left_nz_[y] = (res.last >= 0); + } + } + return R; +} + +int VP8GetCostUV(VP8EncIterator* const it, const VP8ModeScore* const rd) { + VP8Residual res; + VP8Encoder* const enc = it->enc_; + int ch, x, y; + int R = 0; + + VP8IteratorNzToBytes(it); // re-import the non-zero context + + VP8InitResidual(0, 2, enc, &res); + for (ch = 0; ch <= 2; ch += 2) { + for (y = 0; y < 2; ++y) { + for (x = 0; x < 2; ++x) { + const int ctx = it->top_nz_[4 + ch + x] + it->left_nz_[4 + ch + y]; + VP8SetResidualCoeffs(rd->uv_levels[ch * 2 + x + y * 2], &res); + R += VP8GetResidualCost(ctx, &res); + it->top_nz_[4 + ch + x] = it->left_nz_[4 + ch + y] = (res.last >= 0); + } + } + } + return R; +} + + +//------------------------------------------------------------------------------ +// Recording of token probabilities. + +// We keep the table-free variant around for reference, in case. +#define USE_LEVEL_CODE_TABLE + +// Simulate block coding, but only record statistics. +// Note: no need to record the fixed probas. +int VP8RecordCoeffs(int ctx, const VP8Residual* const res) { + int n = res->first; + // should be stats[VP8EncBands[n]], but it's equivalent for n=0 or 1 + proba_t* s = res->stats[n][ctx]; + if (res->last < 0) { + VP8RecordStats(0, s + 0); + return 0; + } + while (n <= res->last) { + int v; + VP8RecordStats(1, s + 0); // order of record doesn't matter + while ((v = res->coeffs[n++]) == 0) { + VP8RecordStats(0, s + 1); + s = res->stats[VP8EncBands[n]][0]; + } + VP8RecordStats(1, s + 1); + if (!VP8RecordStats(2u < (unsigned int)(v + 1), s + 2)) { // v = -1 or 1 + s = res->stats[VP8EncBands[n]][1]; + } else { + v = abs(v); +#if !defined(USE_LEVEL_CODE_TABLE) + if (!VP8RecordStats(v > 4, s + 3)) { + if (VP8RecordStats(v != 2, s + 4)) + VP8RecordStats(v == 4, s + 5); + } else if (!VP8RecordStats(v > 10, s + 6)) { + VP8RecordStats(v > 6, s + 7); + } else if (!VP8RecordStats((v >= 3 + (8 << 2)), s + 8)) { + VP8RecordStats((v >= 3 + (8 << 1)), s + 9); + } else { + VP8RecordStats((v >= 3 + (8 << 3)), s + 10); + } +#else + if (v > MAX_VARIABLE_LEVEL) { + v = MAX_VARIABLE_LEVEL; + } + + { + const int bits = VP8LevelCodes[v - 1][1]; + int pattern = VP8LevelCodes[v - 1][0]; + int i; + for (i = 0; (pattern >>= 1) != 0; ++i) { + const int mask = 2 << i; + if (pattern & 1) VP8RecordStats(!!(bits & mask), s + 3 + i); + } + } +#endif + s = res->stats[VP8EncBands[n]][2]; + } + } + if (n < 16) VP8RecordStats(0, s + 0); + return 1; +} + +//------------------------------------------------------------------------------ diff --git a/vendor/github.com/sizeofint/webpanimation/enc_cost_enc.h b/vendor/github.com/sizeofint/webpanimation/enc_cost_enc.h new file mode 100644 index 00000000..87352d41 --- /dev/null +++ b/vendor/github.com/sizeofint/webpanimation/enc_cost_enc.h @@ -0,0 +1,82 @@ +// Copyright 2011 Google Inc. All Rights Reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the COPYING file in the root of the source +// tree. An additional intellectual property rights grant can be found +// in the file PATENTS. All contributing project authors may +// be found in the AUTHORS file in the root of the source tree. +// ----------------------------------------------------------------------------- +// +// Cost tables for level and modes. +// +// Author: Skal (pascal.massimino@gmail.com) + +#ifndef WEBP_ENC_COST_ENC_H_ +#define WEBP_ENC_COST_ENC_H_ + +#include +#include +#include "enc_vp8i_enc.h" + +#ifdef __cplusplus +extern "C" { +#endif + +// On-the-fly info about the current set of residuals. Handy to avoid +// passing zillions of params. +typedef struct VP8Residual VP8Residual; +struct VP8Residual { + int first; + int last; + const int16_t* coeffs; + + int coeff_type; + ProbaArray* prob; + StatsArray* stats; + CostArrayPtr costs; +}; + +void VP8InitResidual(int first, int coeff_type, + VP8Encoder* const enc, VP8Residual* const res); + +int VP8RecordCoeffs(int ctx, const VP8Residual* const res); + +// Record proba context used. +static WEBP_INLINE int VP8RecordStats(int bit, proba_t* const stats) { + proba_t p = *stats; + // An overflow is inbound. Note we handle this at 0xfffe0000u instead of + // 0xffff0000u to make sure p + 1u does not overflow. + if (p >= 0xfffe0000u) { + p = ((p + 1u) >> 1) & 0x7fff7fffu; // -> divide the stats by 2. + } + // record bit count (lower 16 bits) and increment total count (upper 16 bits). + p += 0x00010000u + bit; + *stats = p; + return bit; +} + +// Cost of coding one event with probability 'proba'. +static WEBP_INLINE int VP8BitCost(int bit, uint8_t proba) { + return !bit ? VP8EntropyCost[proba] : VP8EntropyCost[255 - proba]; +} + +// Level cost calculations +extern const uint16_t VP8LevelCodes[MAX_VARIABLE_LEVEL][2]; +void VP8CalculateLevelCosts(VP8EncProba* const proba); +static WEBP_INLINE int VP8LevelCost(const uint16_t* const table, int level) { + return VP8LevelFixedCosts[level] + + table[(level > MAX_VARIABLE_LEVEL) ? MAX_VARIABLE_LEVEL : level]; +} + +// Mode costs +extern const uint16_t VP8FixedCostsUV[4]; +extern const uint16_t VP8FixedCostsI16[4]; +extern const uint16_t VP8FixedCostsI4[NUM_BMODES][NUM_BMODES][NUM_BMODES]; + +//------------------------------------------------------------------------------ + +#ifdef __cplusplus +} // extern "C" +#endif + +#endif // WEBP_ENC_COST_ENC_H_ diff --git a/vendor/github.com/sizeofint/webpanimation/enc_filter_enc.c b/vendor/github.com/sizeofint/webpanimation/enc_filter_enc.c new file mode 100644 index 00000000..38e0ac0a --- /dev/null +++ b/vendor/github.com/sizeofint/webpanimation/enc_filter_enc.c @@ -0,0 +1,235 @@ +// Copyright 2011 Google Inc. All Rights Reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the COPYING file in the root of the source +// tree. An additional intellectual property rights grant can be found +// in the file PATENTS. All contributing project authors may +// be found in the AUTHORS file in the root of the source tree. +// ----------------------------------------------------------------------------- +// +// Selecting filter level +// +// Author: somnath@google.com (Somnath Banerjee) + +#include +#include "enc_vp8i_enc.h" +#include "dsp_dsp.h" + +// This table gives, for a given sharpness, the filtering strength to be +// used (at least) in order to filter a given edge step delta. +// This is constructed by brute force inspection: for all delta, we iterate +// over all possible filtering strength / thresh until needs_filter() returns +// true. +#define MAX_DELTA_SIZE 64 +static const uint8_t kLevelsFromDelta[8][MAX_DELTA_SIZE] = { + { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, + 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, + 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, + 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63 }, + { 0, 1, 2, 3, 5, 6, 7, 8, 9, 11, 12, 13, 14, 15, 17, 18, + 20, 21, 23, 24, 26, 27, 29, 30, 32, 33, 35, 36, 38, 39, 41, 42, + 44, 45, 47, 48, 50, 51, 53, 54, 56, 57, 59, 60, 62, 63, 63, 63, + 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63 }, + { 0, 1, 2, 3, 5, 6, 7, 8, 9, 11, 12, 13, 14, 16, 17, 19, + 20, 22, 23, 25, 26, 28, 29, 31, 32, 34, 35, 37, 38, 40, 41, 43, + 44, 46, 47, 49, 50, 52, 53, 55, 56, 58, 59, 61, 62, 63, 63, 63, + 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63 }, + { 0, 1, 2, 3, 5, 6, 7, 8, 9, 11, 12, 13, 15, 16, 18, 19, + 21, 22, 24, 25, 27, 28, 30, 31, 33, 34, 36, 37, 39, 40, 42, 43, + 45, 46, 48, 49, 51, 52, 54, 55, 57, 58, 60, 61, 63, 63, 63, 63, + 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63 }, + { 0, 1, 2, 3, 5, 6, 7, 8, 9, 11, 12, 14, 15, 17, 18, 20, + 21, 23, 24, 26, 27, 29, 30, 32, 33, 35, 36, 38, 39, 41, 42, 44, + 45, 47, 48, 50, 51, 53, 54, 56, 57, 59, 60, 62, 63, 63, 63, 63, + 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63 }, + { 0, 1, 2, 4, 5, 7, 8, 9, 11, 12, 13, 15, 16, 17, 19, 20, + 22, 23, 25, 26, 28, 29, 31, 32, 34, 35, 37, 38, 40, 41, 43, 44, + 46, 47, 49, 50, 52, 53, 55, 56, 58, 59, 61, 62, 63, 63, 63, 63, + 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63 }, + { 0, 1, 2, 4, 5, 7, 8, 9, 11, 12, 13, 15, 16, 18, 19, 21, + 22, 24, 25, 27, 28, 30, 31, 33, 34, 36, 37, 39, 40, 42, 43, 45, + 46, 48, 49, 51, 52, 54, 55, 57, 58, 60, 61, 63, 63, 63, 63, 63, + 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63 }, + { 0, 1, 2, 4, 5, 7, 8, 9, 11, 12, 14, 15, 17, 18, 20, 21, + 23, 24, 26, 27, 29, 30, 32, 33, 35, 36, 38, 39, 41, 42, 44, 45, + 47, 48, 50, 51, 53, 54, 56, 57, 59, 60, 62, 63, 63, 63, 63, 63, + 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63 } +}; + +int VP8FilterStrengthFromDelta(int sharpness, int delta) { + const int pos = (delta < MAX_DELTA_SIZE) ? delta : MAX_DELTA_SIZE - 1; + assert(sharpness >= 0 && sharpness <= 7); + return kLevelsFromDelta[sharpness][pos]; +} + +//------------------------------------------------------------------------------ +// Paragraph 15.4: compute the inner-edge filtering strength + +#if !defined(WEBP_REDUCE_SIZE) + +static int GetILevel(int sharpness, int level) { + if (sharpness > 0) { + if (sharpness > 4) { + level >>= 2; + } else { + level >>= 1; + } + if (level > 9 - sharpness) { + level = 9 - sharpness; + } + } + if (level < 1) level = 1; + return level; +} + +static void DoFilter(const VP8EncIterator* const it, int level) { + const VP8Encoder* const enc = it->enc_; + const int ilevel = GetILevel(enc->config_->filter_sharpness, level); + const int limit = 2 * level + ilevel; + + uint8_t* const y_dst = it->yuv_out2_ + Y_OFF_ENC; + uint8_t* const u_dst = it->yuv_out2_ + U_OFF_ENC; + uint8_t* const v_dst = it->yuv_out2_ + V_OFF_ENC; + + // copy current block to yuv_out2_ + memcpy(y_dst, it->yuv_out_, YUV_SIZE_ENC * sizeof(uint8_t)); + + if (enc->filter_hdr_.simple_ == 1) { // simple + VP8SimpleHFilter16i(y_dst, BPS, limit); + VP8SimpleVFilter16i(y_dst, BPS, limit); + } else { // complex + const int hev_thresh = (level >= 40) ? 2 : (level >= 15) ? 1 : 0; + VP8HFilter16i(y_dst, BPS, limit, ilevel, hev_thresh); + VP8HFilter8i(u_dst, v_dst, BPS, limit, ilevel, hev_thresh); + VP8VFilter16i(y_dst, BPS, limit, ilevel, hev_thresh); + VP8VFilter8i(u_dst, v_dst, BPS, limit, ilevel, hev_thresh); + } +} + +//------------------------------------------------------------------------------ +// SSIM metric for one macroblock + +static double GetMBSSIM(const uint8_t* yuv1, const uint8_t* yuv2) { + int x, y; + double sum = 0.; + + // compute SSIM in a 10 x 10 window + for (y = VP8_SSIM_KERNEL; y < 16 - VP8_SSIM_KERNEL; y++) { + for (x = VP8_SSIM_KERNEL; x < 16 - VP8_SSIM_KERNEL; x++) { + sum += VP8SSIMGetClipped(yuv1 + Y_OFF_ENC, BPS, yuv2 + Y_OFF_ENC, BPS, + x, y, 16, 16); + } + } + for (x = 1; x < 7; x++) { + for (y = 1; y < 7; y++) { + sum += VP8SSIMGetClipped(yuv1 + U_OFF_ENC, BPS, yuv2 + U_OFF_ENC, BPS, + x, y, 8, 8); + sum += VP8SSIMGetClipped(yuv1 + V_OFF_ENC, BPS, yuv2 + V_OFF_ENC, BPS, + x, y, 8, 8); + } + } + return sum; +} + +#endif // !defined(WEBP_REDUCE_SIZE) + +//------------------------------------------------------------------------------ +// Exposed APIs: Encoder should call the following 3 functions to adjust +// loop filter strength + +void VP8InitFilter(VP8EncIterator* const it) { +#if !defined(WEBP_REDUCE_SIZE) + if (it->lf_stats_ != NULL) { + int s, i; + for (s = 0; s < NUM_MB_SEGMENTS; s++) { + for (i = 0; i < MAX_LF_LEVELS; i++) { + (*it->lf_stats_)[s][i] = 0; + } + } + VP8SSIMDspInit(); + } +#else + (void)it; +#endif +} + +void VP8StoreFilterStats(VP8EncIterator* const it) { +#if !defined(WEBP_REDUCE_SIZE) + int d; + VP8Encoder* const enc = it->enc_; + const int s = it->mb_->segment_; + const int level0 = enc->dqm_[s].fstrength_; + + // explore +/-quant range of values around level0 + const int delta_min = -enc->dqm_[s].quant_; + const int delta_max = enc->dqm_[s].quant_; + const int step_size = (delta_max - delta_min >= 4) ? 4 : 1; + + if (it->lf_stats_ == NULL) return; + + // NOTE: Currently we are applying filter only across the sublock edges + // There are two reasons for that. + // 1. Applying filter on macro block edges will change the pixels in + // the left and top macro blocks. That will be hard to restore + // 2. Macro Blocks on the bottom and right are not yet compressed. So we + // cannot apply filter on the right and bottom macro block edges. + if (it->mb_->type_ == 1 && it->mb_->skip_) return; + + // Always try filter level zero + (*it->lf_stats_)[s][0] += GetMBSSIM(it->yuv_in_, it->yuv_out_); + + for (d = delta_min; d <= delta_max; d += step_size) { + const int level = level0 + d; + if (level <= 0 || level >= MAX_LF_LEVELS) { + continue; + } + DoFilter(it, level); + (*it->lf_stats_)[s][level] += GetMBSSIM(it->yuv_in_, it->yuv_out2_); + } +#else // defined(WEBP_REDUCE_SIZE) + (void)it; +#endif // !defined(WEBP_REDUCE_SIZE) +} + +void VP8AdjustFilterStrength(VP8EncIterator* const it) { + VP8Encoder* const enc = it->enc_; +#if !defined(WEBP_REDUCE_SIZE) + if (it->lf_stats_ != NULL) { + int s; + for (s = 0; s < NUM_MB_SEGMENTS; s++) { + int i, best_level = 0; + // Improvement over filter level 0 should be at least 1e-5 (relatively) + double best_v = 1.00001 * (*it->lf_stats_)[s][0]; + for (i = 1; i < MAX_LF_LEVELS; i++) { + const double v = (*it->lf_stats_)[s][i]; + if (v > best_v) { + best_v = v; + best_level = i; + } + } + enc->dqm_[s].fstrength_ = best_level; + } + return; + } +#endif // !defined(WEBP_REDUCE_SIZE) + if (enc->config_->filter_strength > 0) { + int max_level = 0; + int s; + for (s = 0; s < NUM_MB_SEGMENTS; s++) { + VP8SegmentInfo* const dqm = &enc->dqm_[s]; + // this '>> 3' accounts for some inverse WHT scaling + const int delta = (dqm->max_edge_ * dqm->y2_.q_[1]) >> 3; + const int level = + VP8FilterStrengthFromDelta(enc->filter_hdr_.sharpness_, delta); + if (level > dqm->fstrength_) { + dqm->fstrength_ = level; + } + if (max_level < dqm->fstrength_) { + max_level = dqm->fstrength_; + } + } + enc->filter_hdr_.level_ = max_level; + } +} + +// ----------------------------------------------------------------------------- diff --git a/vendor/github.com/sizeofint/webpanimation/enc_frame_enc.c b/vendor/github.com/sizeofint/webpanimation/enc_frame_enc.c new file mode 100644 index 00000000..2919ee97 --- /dev/null +++ b/vendor/github.com/sizeofint/webpanimation/enc_frame_enc.c @@ -0,0 +1,894 @@ +// Copyright 2011 Google Inc. All Rights Reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the COPYING file in the root of the source +// tree. An additional intellectual property rights grant can be found +// in the file PATENTS. All contributing project authors may +// be found in the AUTHORS file in the root of the source tree. +// ----------------------------------------------------------------------------- +// +// frame coding and analysis +// +// Author: Skal (pascal.massimino@gmail.com) + +#include +#include + +#include "enc_cost_enc.h" +#include "enc_vp8i_enc.h" +#include "dsp_dsp.h" +#include "webp_format_constants.h" + +#define SEGMENT_VISU 0 +#define DEBUG_SEARCH 0 // useful to track search convergence + +//------------------------------------------------------------------------------ +// multi-pass convergence + +#define HEADER_SIZE_ESTIMATE (RIFF_HEADER_SIZE + CHUNK_HEADER_SIZE + \ + VP8_FRAME_HEADER_SIZE) +#define DQ_LIMIT 0.4 // convergence is considered reached if dq < DQ_LIMIT +// we allow 2k of extra head-room in PARTITION0 limit. +#define PARTITION0_SIZE_LIMIT ((VP8_MAX_PARTITION0_SIZE - 2048ULL) << 11) + +static float Clamp(float v, float min, float max) { + return (v < min) ? min : (v > max) ? max : v; +} + +typedef struct { // struct for organizing convergence in either size or PSNR + int is_first; + float dq; + float q, last_q; + float qmin, qmax; + double value, last_value; // PSNR or size + double target; + int do_size_search; +} PassStats; + +static int InitPassStats(const VP8Encoder* const enc, PassStats* const s) { + const uint64_t target_size = (uint64_t)enc->config_->target_size; + const int do_size_search = (target_size != 0); + const float target_PSNR = enc->config_->target_PSNR; + + s->is_first = 1; + s->dq = 10.f; + s->qmin = 1.f * enc->config_->qmin; + s->qmax = 1.f * enc->config_->qmax; + s->q = s->last_q = Clamp(enc->config_->quality, s->qmin, s->qmax); + s->target = do_size_search ? (double)target_size + : (target_PSNR > 0.) ? target_PSNR + : 40.; // default, just in case + s->value = s->last_value = 0.; + s->do_size_search = do_size_search; + return do_size_search; +} + +static float ComputeNextQ(PassStats* const s) { + float dq; + if (s->is_first) { + dq = (s->value > s->target) ? -s->dq : s->dq; + s->is_first = 0; + } else if (s->value != s->last_value) { + const double slope = (s->target - s->value) / (s->last_value - s->value); + dq = (float)(slope * (s->last_q - s->q)); + } else { + dq = 0.; // we're done?! + } + // Limit variable to avoid large swings. + s->dq = Clamp(dq, -30.f, 30.f); + s->last_q = s->q; + s->last_value = s->value; + s->q = Clamp(s->q + s->dq, s->qmin, s->qmax); + return s->q; +} + +//------------------------------------------------------------------------------ +// Tables for level coding + +const uint8_t VP8Cat3[] = { 173, 148, 140 }; +const uint8_t VP8Cat4[] = { 176, 155, 140, 135 }; +const uint8_t VP8Cat5[] = { 180, 157, 141, 134, 130 }; +const uint8_t VP8Cat6[] = + { 254, 254, 243, 230, 196, 177, 153, 140, 133, 130, 129 }; + +//------------------------------------------------------------------------------ +// Reset the statistics about: number of skips, token proba, level cost,... + +static void ResetStats(VP8Encoder* const enc) { + VP8EncProba* const proba = &enc->proba_; + VP8CalculateLevelCosts(proba); + proba->nb_skip_ = 0; +} + +//------------------------------------------------------------------------------ +// Skip decision probability + +#define SKIP_PROBA_THRESHOLD 250 // value below which using skip_proba is OK. + +static int CalcSkipProba(uint64_t nb, uint64_t total) { + return (int)(total ? (total - nb) * 255 / total : 255); +} + +// Returns the bit-cost for coding the skip probability. +static int FinalizeSkipProba(VP8Encoder* const enc) { + VP8EncProba* const proba = &enc->proba_; + const int nb_mbs = enc->mb_w_ * enc->mb_h_; + const int nb_events = proba->nb_skip_; + int size; + proba->skip_proba_ = CalcSkipProba(nb_events, nb_mbs); + proba->use_skip_proba_ = (proba->skip_proba_ < SKIP_PROBA_THRESHOLD); + size = 256; // 'use_skip_proba' bit + if (proba->use_skip_proba_) { + size += nb_events * VP8BitCost(1, proba->skip_proba_) + + (nb_mbs - nb_events) * VP8BitCost(0, proba->skip_proba_); + size += 8 * 256; // cost of signaling the skip_proba_ itself. + } + return size; +} + +// Collect statistics and deduce probabilities for next coding pass. +// Return the total bit-cost for coding the probability updates. +static int CalcTokenProba(int nb, int total) { + assert(nb <= total); + return nb ? (255 - nb * 255 / total) : 255; +} + +// Cost of coding 'nb' 1's and 'total-nb' 0's using 'proba' probability. +static int BranchCost(int nb, int total, int proba) { + return nb * VP8BitCost(1, proba) + (total - nb) * VP8BitCost(0, proba); +} + +static void ResetTokenStats(VP8Encoder* const enc) { + VP8EncProba* const proba = &enc->proba_; + memset(proba->stats_, 0, sizeof(proba->stats_)); +} + +static int FinalizeTokenProbas(VP8EncProba* const proba) { + int has_changed = 0; + int size = 0; + int t, b, c, p; + for (t = 0; t < NUM_TYPES; ++t) { + for (b = 0; b < NUM_BANDS; ++b) { + for (c = 0; c < NUM_CTX; ++c) { + for (p = 0; p < NUM_PROBAS; ++p) { + const proba_t stats = proba->stats_[t][b][c][p]; + const int nb = (stats >> 0) & 0xffff; + const int total = (stats >> 16) & 0xffff; + const int update_proba = VP8CoeffsUpdateProba[t][b][c][p]; + const int old_p = VP8CoeffsProba0[t][b][c][p]; + const int new_p = CalcTokenProba(nb, total); + const int old_cost = BranchCost(nb, total, old_p) + + VP8BitCost(0, update_proba); + const int new_cost = BranchCost(nb, total, new_p) + + VP8BitCost(1, update_proba) + + 8 * 256; + const int use_new_p = (old_cost > new_cost); + size += VP8BitCost(use_new_p, update_proba); + if (use_new_p) { // only use proba that seem meaningful enough. + proba->coeffs_[t][b][c][p] = new_p; + has_changed |= (new_p != old_p); + size += 8 * 256; + } else { + proba->coeffs_[t][b][c][p] = old_p; + } + } + } + } + } + proba->dirty_ = has_changed; + return size; +} + +//------------------------------------------------------------------------------ +// Finalize Segment probability based on the coding tree + +static int GetProba(int a, int b) { + const int total = a + b; + return (total == 0) ? 255 // that's the default probability. + : (255 * a + total / 2) / total; // rounded proba +} + +static void ResetSegments(VP8Encoder* const enc) { + int n; + for (n = 0; n < enc->mb_w_ * enc->mb_h_; ++n) { + enc->mb_info_[n].segment_ = 0; + } +} + +static void SetSegmentProbas(VP8Encoder* const enc) { + int p[NUM_MB_SEGMENTS] = { 0 }; + int n; + + for (n = 0; n < enc->mb_w_ * enc->mb_h_; ++n) { + const VP8MBInfo* const mb = &enc->mb_info_[n]; + ++p[mb->segment_]; + } +#if !defined(WEBP_DISABLE_STATS) + if (enc->pic_->stats != NULL) { + for (n = 0; n < NUM_MB_SEGMENTS; ++n) { + enc->pic_->stats->segment_size[n] = p[n]; + } + } +#endif + if (enc->segment_hdr_.num_segments_ > 1) { + uint8_t* const probas = enc->proba_.segments_; + probas[0] = GetProba(p[0] + p[1], p[2] + p[3]); + probas[1] = GetProba(p[0], p[1]); + probas[2] = GetProba(p[2], p[3]); + + enc->segment_hdr_.update_map_ = + (probas[0] != 255) || (probas[1] != 255) || (probas[2] != 255); + if (!enc->segment_hdr_.update_map_) ResetSegments(enc); + enc->segment_hdr_.size_ = + p[0] * (VP8BitCost(0, probas[0]) + VP8BitCost(0, probas[1])) + + p[1] * (VP8BitCost(0, probas[0]) + VP8BitCost(1, probas[1])) + + p[2] * (VP8BitCost(1, probas[0]) + VP8BitCost(0, probas[2])) + + p[3] * (VP8BitCost(1, probas[0]) + VP8BitCost(1, probas[2])); + } else { + enc->segment_hdr_.update_map_ = 0; + enc->segment_hdr_.size_ = 0; + } +} + +//------------------------------------------------------------------------------ +// Coefficient coding + +static int PutCoeffs(VP8BitWriter* const bw, int ctx, const VP8Residual* res) { + int n = res->first; + // should be prob[VP8EncBands[n]], but it's equivalent for n=0 or 1 + const uint8_t* p = res->prob[n][ctx]; + if (!VP8PutBit(bw, res->last >= 0, p[0])) { + return 0; + } + + while (n < 16) { + const int c = res->coeffs[n++]; + const int sign = c < 0; + int v = sign ? -c : c; + if (!VP8PutBit(bw, v != 0, p[1])) { + p = res->prob[VP8EncBands[n]][0]; + continue; + } + if (!VP8PutBit(bw, v > 1, p[2])) { + p = res->prob[VP8EncBands[n]][1]; + } else { + if (!VP8PutBit(bw, v > 4, p[3])) { + if (VP8PutBit(bw, v != 2, p[4])) { + VP8PutBit(bw, v == 4, p[5]); + } + } else if (!VP8PutBit(bw, v > 10, p[6])) { + if (!VP8PutBit(bw, v > 6, p[7])) { + VP8PutBit(bw, v == 6, 159); + } else { + VP8PutBit(bw, v >= 9, 165); + VP8PutBit(bw, !(v & 1), 145); + } + } else { + int mask; + const uint8_t* tab; + if (v < 3 + (8 << 1)) { // VP8Cat3 (3b) + VP8PutBit(bw, 0, p[8]); + VP8PutBit(bw, 0, p[9]); + v -= 3 + (8 << 0); + mask = 1 << 2; + tab = VP8Cat3; + } else if (v < 3 + (8 << 2)) { // VP8Cat4 (4b) + VP8PutBit(bw, 0, p[8]); + VP8PutBit(bw, 1, p[9]); + v -= 3 + (8 << 1); + mask = 1 << 3; + tab = VP8Cat4; + } else if (v < 3 + (8 << 3)) { // VP8Cat5 (5b) + VP8PutBit(bw, 1, p[8]); + VP8PutBit(bw, 0, p[10]); + v -= 3 + (8 << 2); + mask = 1 << 4; + tab = VP8Cat5; + } else { // VP8Cat6 (11b) + VP8PutBit(bw, 1, p[8]); + VP8PutBit(bw, 1, p[10]); + v -= 3 + (8 << 3); + mask = 1 << 10; + tab = VP8Cat6; + } + while (mask) { + VP8PutBit(bw, !!(v & mask), *tab++); + mask >>= 1; + } + } + p = res->prob[VP8EncBands[n]][2]; + } + VP8PutBitUniform(bw, sign); + if (n == 16 || !VP8PutBit(bw, n <= res->last, p[0])) { + return 1; // EOB + } + } + return 1; +} + +static void CodeResiduals(VP8BitWriter* const bw, VP8EncIterator* const it, + const VP8ModeScore* const rd) { + int x, y, ch; + VP8Residual res; + uint64_t pos1, pos2, pos3; + const int i16 = (it->mb_->type_ == 1); + const int segment = it->mb_->segment_; + VP8Encoder* const enc = it->enc_; + + VP8IteratorNzToBytes(it); + + pos1 = VP8BitWriterPos(bw); + if (i16) { + VP8InitResidual(0, 1, enc, &res); + VP8SetResidualCoeffs(rd->y_dc_levels, &res); + it->top_nz_[8] = it->left_nz_[8] = + PutCoeffs(bw, it->top_nz_[8] + it->left_nz_[8], &res); + VP8InitResidual(1, 0, enc, &res); + } else { + VP8InitResidual(0, 3, enc, &res); + } + + // luma-AC + for (y = 0; y < 4; ++y) { + for (x = 0; x < 4; ++x) { + const int ctx = it->top_nz_[x] + it->left_nz_[y]; + VP8SetResidualCoeffs(rd->y_ac_levels[x + y * 4], &res); + it->top_nz_[x] = it->left_nz_[y] = PutCoeffs(bw, ctx, &res); + } + } + pos2 = VP8BitWriterPos(bw); + + // U/V + VP8InitResidual(0, 2, enc, &res); + for (ch = 0; ch <= 2; ch += 2) { + for (y = 0; y < 2; ++y) { + for (x = 0; x < 2; ++x) { + const int ctx = it->top_nz_[4 + ch + x] + it->left_nz_[4 + ch + y]; + VP8SetResidualCoeffs(rd->uv_levels[ch * 2 + x + y * 2], &res); + it->top_nz_[4 + ch + x] = it->left_nz_[4 + ch + y] = + PutCoeffs(bw, ctx, &res); + } + } + } + pos3 = VP8BitWriterPos(bw); + it->luma_bits_ = pos2 - pos1; + it->uv_bits_ = pos3 - pos2; + it->bit_count_[segment][i16] += it->luma_bits_; + it->bit_count_[segment][2] += it->uv_bits_; + VP8IteratorBytesToNz(it); +} + +// Same as CodeResiduals, but doesn't actually write anything. +// Instead, it just records the event distribution. +static void RecordResiduals(VP8EncIterator* const it, + const VP8ModeScore* const rd) { + int x, y, ch; + VP8Residual res; + VP8Encoder* const enc = it->enc_; + + VP8IteratorNzToBytes(it); + + if (it->mb_->type_ == 1) { // i16x16 + VP8InitResidual(0, 1, enc, &res); + VP8SetResidualCoeffs(rd->y_dc_levels, &res); + it->top_nz_[8] = it->left_nz_[8] = + VP8RecordCoeffs(it->top_nz_[8] + it->left_nz_[8], &res); + VP8InitResidual(1, 0, enc, &res); + } else { + VP8InitResidual(0, 3, enc, &res); + } + + // luma-AC + for (y = 0; y < 4; ++y) { + for (x = 0; x < 4; ++x) { + const int ctx = it->top_nz_[x] + it->left_nz_[y]; + VP8SetResidualCoeffs(rd->y_ac_levels[x + y * 4], &res); + it->top_nz_[x] = it->left_nz_[y] = VP8RecordCoeffs(ctx, &res); + } + } + + // U/V + VP8InitResidual(0, 2, enc, &res); + for (ch = 0; ch <= 2; ch += 2) { + for (y = 0; y < 2; ++y) { + for (x = 0; x < 2; ++x) { + const int ctx = it->top_nz_[4 + ch + x] + it->left_nz_[4 + ch + y]; + VP8SetResidualCoeffs(rd->uv_levels[ch * 2 + x + y * 2], &res); + it->top_nz_[4 + ch + x] = it->left_nz_[4 + ch + y] = + VP8RecordCoeffs(ctx, &res); + } + } + } + + VP8IteratorBytesToNz(it); +} + +//------------------------------------------------------------------------------ +// Token buffer + +#if !defined(DISABLE_TOKEN_BUFFER) + +static int RecordTokens(VP8EncIterator* const it, const VP8ModeScore* const rd, + VP8TBuffer* const tokens) { + int x, y, ch; + VP8Residual res; + VP8Encoder* const enc = it->enc_; + + VP8IteratorNzToBytes(it); + if (it->mb_->type_ == 1) { // i16x16 + const int ctx = it->top_nz_[8] + it->left_nz_[8]; + VP8InitResidual(0, 1, enc, &res); + VP8SetResidualCoeffs(rd->y_dc_levels, &res); + it->top_nz_[8] = it->left_nz_[8] = + VP8RecordCoeffTokens(ctx, &res, tokens); + VP8InitResidual(1, 0, enc, &res); + } else { + VP8InitResidual(0, 3, enc, &res); + } + + // luma-AC + for (y = 0; y < 4; ++y) { + for (x = 0; x < 4; ++x) { + const int ctx = it->top_nz_[x] + it->left_nz_[y]; + VP8SetResidualCoeffs(rd->y_ac_levels[x + y * 4], &res); + it->top_nz_[x] = it->left_nz_[y] = + VP8RecordCoeffTokens(ctx, &res, tokens); + } + } + + // U/V + VP8InitResidual(0, 2, enc, &res); + for (ch = 0; ch <= 2; ch += 2) { + for (y = 0; y < 2; ++y) { + for (x = 0; x < 2; ++x) { + const int ctx = it->top_nz_[4 + ch + x] + it->left_nz_[4 + ch + y]; + VP8SetResidualCoeffs(rd->uv_levels[ch * 2 + x + y * 2], &res); + it->top_nz_[4 + ch + x] = it->left_nz_[4 + ch + y] = + VP8RecordCoeffTokens(ctx, &res, tokens); + } + } + } + VP8IteratorBytesToNz(it); + return !tokens->error_; +} + +#endif // !DISABLE_TOKEN_BUFFER + +//------------------------------------------------------------------------------ +// ExtraInfo map / Debug function + +#if !defined(WEBP_DISABLE_STATS) + +#if SEGMENT_VISU +static void SetBlock(uint8_t* p, int value, int size) { + int y; + for (y = 0; y < size; ++y) { + memset(p, value, size); + p += BPS; + } +} +#endif + +static void ResetSSE(VP8Encoder* const enc) { + enc->sse_[0] = 0; + enc->sse_[1] = 0; + enc->sse_[2] = 0; + // Note: enc->sse_[3] is managed by alpha.c + enc->sse_count_ = 0; +} + +static void StoreSSE(const VP8EncIterator* const it) { + VP8Encoder* const enc = it->enc_; + const uint8_t* const in = it->yuv_in_; + const uint8_t* const out = it->yuv_out_; + // Note: not totally accurate at boundary. And doesn't include in-loop filter. + enc->sse_[0] += VP8SSE16x16(in + Y_OFF_ENC, out + Y_OFF_ENC); + enc->sse_[1] += VP8SSE8x8(in + U_OFF_ENC, out + U_OFF_ENC); + enc->sse_[2] += VP8SSE8x8(in + V_OFF_ENC, out + V_OFF_ENC); + enc->sse_count_ += 16 * 16; +} + +static void StoreSideInfo(const VP8EncIterator* const it) { + VP8Encoder* const enc = it->enc_; + const VP8MBInfo* const mb = it->mb_; + WebPPicture* const pic = enc->pic_; + + if (pic->stats != NULL) { + StoreSSE(it); + enc->block_count_[0] += (mb->type_ == 0); + enc->block_count_[1] += (mb->type_ == 1); + enc->block_count_[2] += (mb->skip_ != 0); + } + + if (pic->extra_info != NULL) { + uint8_t* const info = &pic->extra_info[it->x_ + it->y_ * enc->mb_w_]; + switch (pic->extra_info_type) { + case 1: *info = mb->type_; break; + case 2: *info = mb->segment_; break; + case 3: *info = enc->dqm_[mb->segment_].quant_; break; + case 4: *info = (mb->type_ == 1) ? it->preds_[0] : 0xff; break; + case 5: *info = mb->uv_mode_; break; + case 6: { + const int b = (int)((it->luma_bits_ + it->uv_bits_ + 7) >> 3); + *info = (b > 255) ? 255 : b; break; + } + case 7: *info = mb->alpha_; break; + default: *info = 0; break; + } + } +#if SEGMENT_VISU // visualize segments and prediction modes + SetBlock(it->yuv_out_ + Y_OFF_ENC, mb->segment_ * 64, 16); + SetBlock(it->yuv_out_ + U_OFF_ENC, it->preds_[0] * 64, 8); + SetBlock(it->yuv_out_ + V_OFF_ENC, mb->uv_mode_ * 64, 8); +#endif +} + +static void ResetSideInfo(const VP8EncIterator* const it) { + VP8Encoder* const enc = it->enc_; + WebPPicture* const pic = enc->pic_; + if (pic->stats != NULL) { + memset(enc->block_count_, 0, sizeof(enc->block_count_)); + } + ResetSSE(enc); +} +#else // defined(WEBP_DISABLE_STATS) +static void ResetSSE(VP8Encoder* const enc) { + (void)enc; +} +static void StoreSideInfo(const VP8EncIterator* const it) { + VP8Encoder* const enc = it->enc_; + WebPPicture* const pic = enc->pic_; + if (pic->extra_info != NULL) { + if (it->x_ == 0 && it->y_ == 0) { // only do it once, at start + memset(pic->extra_info, 0, + enc->mb_w_ * enc->mb_h_ * sizeof(*pic->extra_info)); + } + } +} + +static void ResetSideInfo(const VP8EncIterator* const it) { + (void)it; +} +#endif // !defined(WEBP_DISABLE_STATS) + +static double GetPSNR(uint64_t mse, uint64_t size) { + return (mse > 0 && size > 0) ? 10. * log10(255. * 255. * size / mse) : 99; +} + +//------------------------------------------------------------------------------ +// StatLoop(): only collect statistics (number of skips, token usage, ...). +// This is used for deciding optimal probabilities. It also modifies the +// quantizer value if some target (size, PSNR) was specified. + +static void SetLoopParams(VP8Encoder* const enc, float q) { + // Make sure the quality parameter is inside valid bounds + q = Clamp(q, 0.f, 100.f); + + VP8SetSegmentParams(enc, q); // setup segment quantizations and filters + SetSegmentProbas(enc); // compute segment probabilities + + ResetStats(enc); + ResetSSE(enc); +} + +static uint64_t OneStatPass(VP8Encoder* const enc, VP8RDLevel rd_opt, + int nb_mbs, int percent_delta, + PassStats* const s) { + VP8EncIterator it; + uint64_t size = 0; + uint64_t size_p0 = 0; + uint64_t distortion = 0; + const uint64_t pixel_count = nb_mbs * 384; + + VP8IteratorInit(enc, &it); + SetLoopParams(enc, s->q); + do { + VP8ModeScore info; + VP8IteratorImport(&it, NULL); + if (VP8Decimate(&it, &info, rd_opt)) { + // Just record the number of skips and act like skip_proba is not used. + ++enc->proba_.nb_skip_; + } + RecordResiduals(&it, &info); + size += info.R + info.H; + size_p0 += info.H; + distortion += info.D; + if (percent_delta && !VP8IteratorProgress(&it, percent_delta)) { + return 0; + } + VP8IteratorSaveBoundary(&it); + } while (VP8IteratorNext(&it) && --nb_mbs > 0); + + size_p0 += enc->segment_hdr_.size_; + if (s->do_size_search) { + size += FinalizeSkipProba(enc); + size += FinalizeTokenProbas(&enc->proba_); + size = ((size + size_p0 + 1024) >> 11) + HEADER_SIZE_ESTIMATE; + s->value = (double)size; + } else { + s->value = GetPSNR(distortion, pixel_count); + } + return size_p0; +} + +static int StatLoop(VP8Encoder* const enc) { + const int method = enc->method_; + const int do_search = enc->do_search_; + const int fast_probe = ((method == 0 || method == 3) && !do_search); + int num_pass_left = enc->config_->pass; + const int task_percent = 20; + const int percent_per_pass = + (task_percent + num_pass_left / 2) / num_pass_left; + const int final_percent = enc->percent_ + task_percent; + const VP8RDLevel rd_opt = + (method >= 3 || do_search) ? RD_OPT_BASIC : RD_OPT_NONE; + int nb_mbs = enc->mb_w_ * enc->mb_h_; + PassStats stats; + + InitPassStats(enc, &stats); + ResetTokenStats(enc); + + // Fast mode: quick analysis pass over few mbs. Better than nothing. + if (fast_probe) { + if (method == 3) { // we need more stats for method 3 to be reliable. + nb_mbs = (nb_mbs > 200) ? nb_mbs >> 1 : 100; + } else { + nb_mbs = (nb_mbs > 200) ? nb_mbs >> 2 : 50; + } + } + + while (num_pass_left-- > 0) { + const int is_last_pass = (fabs(stats.dq) <= DQ_LIMIT) || + (num_pass_left == 0) || + (enc->max_i4_header_bits_ == 0); + const uint64_t size_p0 = + OneStatPass(enc, rd_opt, nb_mbs, percent_per_pass, &stats); + if (size_p0 == 0) return 0; +#if (DEBUG_SEARCH > 0) + printf("#%d value:%.1lf -> %.1lf q:%.2f -> %.2f\n", + num_pass_left, stats.last_value, stats.value, stats.last_q, stats.q); +#endif + if (enc->max_i4_header_bits_ > 0 && size_p0 > PARTITION0_SIZE_LIMIT) { + ++num_pass_left; + enc->max_i4_header_bits_ >>= 1; // strengthen header bit limitation... + continue; // ...and start over + } + if (is_last_pass) { + break; + } + // If no target size: just do several pass without changing 'q' + if (do_search) { + ComputeNextQ(&stats); + if (fabs(stats.dq) <= DQ_LIMIT) break; + } + } + if (!do_search || !stats.do_size_search) { + // Need to finalize probas now, since it wasn't done during the search. + FinalizeSkipProba(enc); + FinalizeTokenProbas(&enc->proba_); + } + VP8CalculateLevelCosts(&enc->proba_); // finalize costs + return WebPReportProgress(enc->pic_, final_percent, &enc->percent_); +} + +//------------------------------------------------------------------------------ +// Main loops +// + +static const uint8_t kAverageBytesPerMB[8] = { 50, 24, 16, 9, 7, 5, 3, 2 }; + +static int PreLoopInitialize(VP8Encoder* const enc) { + int p; + int ok = 1; + const int average_bytes_per_MB = kAverageBytesPerMB[enc->base_quant_ >> 4]; + const int bytes_per_parts = + enc->mb_w_ * enc->mb_h_ * average_bytes_per_MB / enc->num_parts_; + // Initialize the bit-writers + for (p = 0; ok && p < enc->num_parts_; ++p) { + ok = VP8BitWriterInit(enc->parts_ + p, bytes_per_parts); + } + if (!ok) { + VP8EncFreeBitWriters(enc); // malloc error occurred + WebPEncodingSetError(enc->pic_, VP8_ENC_ERROR_OUT_OF_MEMORY); + } + return ok; +} + +static int PostLoopFinalize(VP8EncIterator* const it, int ok) { + VP8Encoder* const enc = it->enc_; + if (ok) { // Finalize the partitions, check for extra errors. + int p; + for (p = 0; p < enc->num_parts_; ++p) { + VP8BitWriterFinish(enc->parts_ + p); + ok &= !enc->parts_[p].error_; + } + } + + if (ok) { // All good. Finish up. +#if !defined(WEBP_DISABLE_STATS) + if (enc->pic_->stats != NULL) { // finalize byte counters... + int i, s; + for (i = 0; i <= 2; ++i) { + for (s = 0; s < NUM_MB_SEGMENTS; ++s) { + enc->residual_bytes_[i][s] = (int)((it->bit_count_[s][i] + 7) >> 3); + } + } + } +#endif + VP8AdjustFilterStrength(it); // ...and store filter stats. + } else { + // Something bad happened -> need to do some memory cleanup. + VP8EncFreeBitWriters(enc); + } + return ok; +} + +//------------------------------------------------------------------------------ +// VP8EncLoop(): does the final bitstream coding. + +static void ResetAfterSkip(VP8EncIterator* const it) { + if (it->mb_->type_ == 1) { + *it->nz_ = 0; // reset all predictors + it->left_nz_[8] = 0; + } else { + *it->nz_ &= (1 << 24); // preserve the dc_nz bit + } +} + +int VP8EncLoop(VP8Encoder* const enc) { + VP8EncIterator it; + int ok = PreLoopInitialize(enc); + if (!ok) return 0; + + StatLoop(enc); // stats-collection loop + + VP8IteratorInit(enc, &it); + VP8InitFilter(&it); + do { + VP8ModeScore info; + const int dont_use_skip = !enc->proba_.use_skip_proba_; + const VP8RDLevel rd_opt = enc->rd_opt_level_; + + VP8IteratorImport(&it, NULL); + // Warning! order is important: first call VP8Decimate() and + // *then* decide how to code the skip decision if there's one. + if (!VP8Decimate(&it, &info, rd_opt) || dont_use_skip) { + CodeResiduals(it.bw_, &it, &info); + } else { // reset predictors after a skip + ResetAfterSkip(&it); + } + StoreSideInfo(&it); + VP8StoreFilterStats(&it); + VP8IteratorExport(&it); + ok = VP8IteratorProgress(&it, 20); + VP8IteratorSaveBoundary(&it); + } while (ok && VP8IteratorNext(&it)); + + return PostLoopFinalize(&it, ok); +} + +//------------------------------------------------------------------------------ +// Single pass using Token Buffer. + +#if !defined(DISABLE_TOKEN_BUFFER) + +#define MIN_COUNT 96 // minimum number of macroblocks before updating stats + +int VP8EncTokenLoop(VP8Encoder* const enc) { + // Roughly refresh the proba eight times per pass + int max_count = (enc->mb_w_ * enc->mb_h_) >> 3; + int num_pass_left = enc->config_->pass; + const int do_search = enc->do_search_; + VP8EncIterator it; + VP8EncProba* const proba = &enc->proba_; + const VP8RDLevel rd_opt = enc->rd_opt_level_; + const uint64_t pixel_count = enc->mb_w_ * enc->mb_h_ * 384; + PassStats stats; + int ok; + + InitPassStats(enc, &stats); + ok = PreLoopInitialize(enc); + if (!ok) return 0; + + if (max_count < MIN_COUNT) max_count = MIN_COUNT; + + assert(enc->num_parts_ == 1); + assert(enc->use_tokens_); + assert(proba->use_skip_proba_ == 0); + assert(rd_opt >= RD_OPT_BASIC); // otherwise, token-buffer won't be useful + assert(num_pass_left > 0); + + while (ok && num_pass_left-- > 0) { + const int is_last_pass = (fabs(stats.dq) <= DQ_LIMIT) || + (num_pass_left == 0) || + (enc->max_i4_header_bits_ == 0); + uint64_t size_p0 = 0; + uint64_t distortion = 0; + int cnt = max_count; + VP8IteratorInit(enc, &it); + SetLoopParams(enc, stats.q); + if (is_last_pass) { + ResetTokenStats(enc); + VP8InitFilter(&it); // don't collect stats until last pass (too costly) + } + VP8TBufferClear(&enc->tokens_); + do { + VP8ModeScore info; + VP8IteratorImport(&it, NULL); + if (--cnt < 0) { + FinalizeTokenProbas(proba); + VP8CalculateLevelCosts(proba); // refresh cost tables for rd-opt + cnt = max_count; + } + VP8Decimate(&it, &info, rd_opt); + ok = RecordTokens(&it, &info, &enc->tokens_); + if (!ok) { + WebPEncodingSetError(enc->pic_, VP8_ENC_ERROR_OUT_OF_MEMORY); + break; + } + size_p0 += info.H; + distortion += info.D; + if (is_last_pass) { + StoreSideInfo(&it); + VP8StoreFilterStats(&it); + VP8IteratorExport(&it); + ok = VP8IteratorProgress(&it, 20); + } + VP8IteratorSaveBoundary(&it); + } while (ok && VP8IteratorNext(&it)); + if (!ok) break; + + size_p0 += enc->segment_hdr_.size_; + if (stats.do_size_search) { + uint64_t size = FinalizeTokenProbas(&enc->proba_); + size += VP8EstimateTokenSize(&enc->tokens_, + (const uint8_t*)proba->coeffs_); + size = (size + size_p0 + 1024) >> 11; // -> size in bytes + size += HEADER_SIZE_ESTIMATE; + stats.value = (double)size; + } else { // compute and store PSNR + stats.value = GetPSNR(distortion, pixel_count); + } + +#if (DEBUG_SEARCH > 0) + printf("#%2d metric:%.1lf -> %.1lf last_q=%.2lf q=%.2lf dq=%.2lf " + " range:[%.1f, %.1f]\n", + num_pass_left, stats.last_value, stats.value, + stats.last_q, stats.q, stats.dq, stats.qmin, stats.qmax); +#endif + if (enc->max_i4_header_bits_ > 0 && size_p0 > PARTITION0_SIZE_LIMIT) { + ++num_pass_left; + enc->max_i4_header_bits_ >>= 1; // strengthen header bit limitation... + if (is_last_pass) { + ResetSideInfo(&it); + } + continue; // ...and start over + } + if (is_last_pass) { + break; // done + } + if (do_search) { + ComputeNextQ(&stats); // Adjust q + } + } + if (ok) { + if (!stats.do_size_search) { + FinalizeTokenProbas(&enc->proba_); + } + ok = VP8EmitTokens(&enc->tokens_, enc->parts_ + 0, + (const uint8_t*)proba->coeffs_, 1); + } + ok = ok && WebPReportProgress(enc->pic_, enc->percent_ + 20, &enc->percent_); + return PostLoopFinalize(&it, ok); +} + +#else + +int VP8EncTokenLoop(VP8Encoder* const enc) { + (void)enc; + return 0; // we shouldn't be here. +} + +#endif // DISABLE_TOKEN_BUFFER + +//------------------------------------------------------------------------------ diff --git a/vendor/github.com/sizeofint/webpanimation/enc_histogram_enc.c b/vendor/github.com/sizeofint/webpanimation/enc_histogram_enc.c new file mode 100644 index 00000000..fad6b44d --- /dev/null +++ b/vendor/github.com/sizeofint/webpanimation/enc_histogram_enc.c @@ -0,0 +1,1250 @@ +// Copyright 2012 Google Inc. All Rights Reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the COPYING file in the root of the source +// tree. An additional intellectual property rights grant can be found +// in the file PATENTS. All contributing project authors may +// be found in the AUTHORS file in the root of the source tree. +// ----------------------------------------------------------------------------- +// +// Author: Jyrki Alakuijala (jyrki@google.com) +// +#ifdef HAVE_CONFIG_H +#include "config.h" +#endif + +#include + +#include "enc_backward_references_enc.h" +#include "enc_histogram_enc.h" +#include "dsp_lossless.h" +#include "dsp_lossless_common.h" +#include "utils_utils.h" + +#define MAX_COST 1.e38 + +// Number of partitions for the three dominant (literal, red and blue) symbol +// costs. +#define NUM_PARTITIONS 4 +// The size of the bin-hash corresponding to the three dominant costs. +#define BIN_SIZE (NUM_PARTITIONS * NUM_PARTITIONS * NUM_PARTITIONS) +// Maximum number of histograms allowed in greedy combining algorithm. +#define MAX_HISTO_GREEDY 100 + +static void HistogramClear(VP8LHistogram* const p) { + uint32_t* const literal = p->literal_; + const int cache_bits = p->palette_code_bits_; + const int histo_size = VP8LGetHistogramSize(cache_bits); + memset(p, 0, histo_size); + p->palette_code_bits_ = cache_bits; + p->literal_ = literal; +} + +// Swap two histogram pointers. +static void HistogramSwap(VP8LHistogram** const A, VP8LHistogram** const B) { + VP8LHistogram* const tmp = *A; + *A = *B; + *B = tmp; +} + +static void HistogramCopy(const VP8LHistogram* const src, + VP8LHistogram* const dst) { + uint32_t* const dst_literal = dst->literal_; + const int dst_cache_bits = dst->palette_code_bits_; + const int literal_size = VP8LHistogramNumCodes(dst_cache_bits); + const int histo_size = VP8LGetHistogramSize(dst_cache_bits); + assert(src->palette_code_bits_ == dst_cache_bits); + memcpy(dst, src, histo_size); + dst->literal_ = dst_literal; + memcpy(dst->literal_, src->literal_, literal_size * sizeof(*dst->literal_)); +} + +int VP8LGetHistogramSize(int cache_bits) { + const int literal_size = VP8LHistogramNumCodes(cache_bits); + const size_t total_size = sizeof(VP8LHistogram) + sizeof(int) * literal_size; + assert(total_size <= (size_t)0x7fffffff); + return (int)total_size; +} + +void VP8LFreeHistogram(VP8LHistogram* const histo) { + WebPSafeFree(histo); +} + +void VP8LFreeHistogramSet(VP8LHistogramSet* const histo) { + WebPSafeFree(histo); +} + +void VP8LHistogramStoreRefs(const VP8LBackwardRefs* const refs, + VP8LHistogram* const histo) { + VP8LRefsCursor c = VP8LRefsCursorInit(refs); + while (VP8LRefsCursorOk(&c)) { + VP8LHistogramAddSinglePixOrCopy(histo, c.cur_pos, NULL, 0); + VP8LRefsCursorNext(&c); + } +} + +void VP8LHistogramCreate(VP8LHistogram* const p, + const VP8LBackwardRefs* const refs, + int palette_code_bits) { + if (palette_code_bits >= 0) { + p->palette_code_bits_ = palette_code_bits; + } + HistogramClear(p); + VP8LHistogramStoreRefs(refs, p); +} + +void VP8LHistogramInit(VP8LHistogram* const p, int palette_code_bits, + int init_arrays) { + p->palette_code_bits_ = palette_code_bits; + if (init_arrays) { + HistogramClear(p); + } else { + p->trivial_symbol_ = 0; + p->bit_cost_ = 0.; + p->literal_cost_ = 0.; + p->red_cost_ = 0.; + p->blue_cost_ = 0.; + memset(p->is_used_, 0, sizeof(p->is_used_)); + } +} + +VP8LHistogram* VP8LAllocateHistogram(int cache_bits) { + VP8LHistogram* histo = NULL; + const int total_size = VP8LGetHistogramSize(cache_bits); + uint8_t* const memory = (uint8_t*)WebPSafeMalloc(total_size, sizeof(*memory)); + if (memory == NULL) return NULL; + histo = (VP8LHistogram*)memory; + // literal_ won't necessary be aligned. + histo->literal_ = (uint32_t*)(memory + sizeof(VP8LHistogram)); + VP8LHistogramInit(histo, cache_bits, /*init_arrays=*/ 0); + return histo; +} + +// Resets the pointers of the histograms to point to the bit buffer in the set. +static void HistogramSetResetPointers(VP8LHistogramSet* const set, + int cache_bits) { + int i; + const int histo_size = VP8LGetHistogramSize(cache_bits); + uint8_t* memory = (uint8_t*) (set->histograms); + memory += set->max_size * sizeof(*set->histograms); + for (i = 0; i < set->max_size; ++i) { + memory = (uint8_t*) WEBP_ALIGN(memory); + set->histograms[i] = (VP8LHistogram*) memory; + // literal_ won't necessary be aligned. + set->histograms[i]->literal_ = (uint32_t*)(memory + sizeof(VP8LHistogram)); + memory += histo_size; + } +} + +// Returns the total size of the VP8LHistogramSet. +static size_t HistogramSetTotalSize(int size, int cache_bits) { + const int histo_size = VP8LGetHistogramSize(cache_bits); + return (sizeof(VP8LHistogramSet) + size * (sizeof(VP8LHistogram*) + + histo_size + WEBP_ALIGN_CST)); +} + +VP8LHistogramSet* VP8LAllocateHistogramSet(int size, int cache_bits) { + int i; + VP8LHistogramSet* set; + const size_t total_size = HistogramSetTotalSize(size, cache_bits); + uint8_t* memory = (uint8_t*)WebPSafeMalloc(total_size, sizeof(*memory)); + if (memory == NULL) return NULL; + + set = (VP8LHistogramSet*)memory; + memory += sizeof(*set); + set->histograms = (VP8LHistogram**)memory; + set->max_size = size; + set->size = size; + HistogramSetResetPointers(set, cache_bits); + for (i = 0; i < size; ++i) { + VP8LHistogramInit(set->histograms[i], cache_bits, /*init_arrays=*/ 0); + } + return set; +} + +void VP8LHistogramSetClear(VP8LHistogramSet* const set) { + int i; + const int cache_bits = set->histograms[0]->palette_code_bits_; + const int size = set->max_size; + const size_t total_size = HistogramSetTotalSize(size, cache_bits); + uint8_t* memory = (uint8_t*)set; + + memset(memory, 0, total_size); + memory += sizeof(*set); + set->histograms = (VP8LHistogram**)memory; + set->max_size = size; + set->size = size; + HistogramSetResetPointers(set, cache_bits); + for (i = 0; i < size; ++i) { + set->histograms[i]->palette_code_bits_ = cache_bits; + } +} + +// Removes the histogram 'i' from 'set' by setting it to NULL. +static void HistogramSetRemoveHistogram(VP8LHistogramSet* const set, int i, + int* const num_used) { + assert(set->histograms[i] != NULL); + set->histograms[i] = NULL; + --*num_used; + // If we remove the last valid one, shrink until the next valid one. + if (i == set->size - 1) { + while (set->size >= 1 && set->histograms[set->size - 1] == NULL) { + --set->size; + } + } +} + +// ----------------------------------------------------------------------------- + +void VP8LHistogramAddSinglePixOrCopy(VP8LHistogram* const histo, + const PixOrCopy* const v, + int (*const distance_modifier)(int, int), + int distance_modifier_arg0) { + if (PixOrCopyIsLiteral(v)) { + ++histo->alpha_[PixOrCopyLiteral(v, 3)]; + ++histo->red_[PixOrCopyLiteral(v, 2)]; + ++histo->literal_[PixOrCopyLiteral(v, 1)]; + ++histo->blue_[PixOrCopyLiteral(v, 0)]; + } else if (PixOrCopyIsCacheIdx(v)) { + const int literal_ix = + NUM_LITERAL_CODES + NUM_LENGTH_CODES + PixOrCopyCacheIdx(v); + assert(histo->palette_code_bits_ != 0); + ++histo->literal_[literal_ix]; + } else { + int code, extra_bits; + VP8LPrefixEncodeBits(PixOrCopyLength(v), &code, &extra_bits); + ++histo->literal_[NUM_LITERAL_CODES + code]; + if (distance_modifier == NULL) { + VP8LPrefixEncodeBits(PixOrCopyDistance(v), &code, &extra_bits); + } else { + VP8LPrefixEncodeBits( + distance_modifier(distance_modifier_arg0, PixOrCopyDistance(v)), + &code, &extra_bits); + } + ++histo->distance_[code]; + } +} + +// ----------------------------------------------------------------------------- +// Entropy-related functions. + +static WEBP_INLINE double BitsEntropyRefine(const VP8LBitEntropy* entropy) { + double mix; + if (entropy->nonzeros < 5) { + if (entropy->nonzeros <= 1) { + return 0; + } + // Two symbols, they will be 0 and 1 in a Huffman code. + // Let's mix in a bit of entropy to favor good clustering when + // distributions of these are combined. + if (entropy->nonzeros == 2) { + return 0.99 * entropy->sum + 0.01 * entropy->entropy; + } + // No matter what the entropy says, we cannot be better than min_limit + // with Huffman coding. I am mixing a bit of entropy into the + // min_limit since it produces much better (~0.5 %) compression results + // perhaps because of better entropy clustering. + if (entropy->nonzeros == 3) { + mix = 0.95; + } else { + mix = 0.7; // nonzeros == 4. + } + } else { + mix = 0.627; + } + + { + double min_limit = 2 * entropy->sum - entropy->max_val; + min_limit = mix * min_limit + (1.0 - mix) * entropy->entropy; + return (entropy->entropy < min_limit) ? min_limit : entropy->entropy; + } +} + +double VP8LBitsEntropy(const uint32_t* const array, int n) { + VP8LBitEntropy entropy; + VP8LBitsEntropyUnrefined(array, n, &entropy); + + return BitsEntropyRefine(&entropy); +} + +static double InitialHuffmanCost(void) { + // Small bias because Huffman code length is typically not stored in + // full length. + static const int kHuffmanCodeOfHuffmanCodeSize = CODE_LENGTH_CODES * 3; + static const double kSmallBias = 9.1; + return kHuffmanCodeOfHuffmanCodeSize - kSmallBias; +} + +// Finalize the Huffman cost based on streak numbers and length type (<3 or >=3) +static double FinalHuffmanCost(const VP8LStreaks* const stats) { + // The constants in this function are experimental and got rounded from + // their original values in 1/8 when switched to 1/1024. + double retval = InitialHuffmanCost(); + // Second coefficient: Many zeros in the histogram are covered efficiently + // by a run-length encode. Originally 2/8. + retval += stats->counts[0] * 1.5625 + 0.234375 * stats->streaks[0][1]; + // Second coefficient: Constant values are encoded less efficiently, but still + // RLE'ed. Originally 6/8. + retval += stats->counts[1] * 2.578125 + 0.703125 * stats->streaks[1][1]; + // 0s are usually encoded more efficiently than non-0s. + // Originally 15/8. + retval += 1.796875 * stats->streaks[0][0]; + // Originally 26/8. + retval += 3.28125 * stats->streaks[1][0]; + return retval; +} + +// Get the symbol entropy for the distribution 'population'. +// Set 'trivial_sym', if there's only one symbol present in the distribution. +static double PopulationCost(const uint32_t* const population, int length, + uint32_t* const trivial_sym, + uint8_t* const is_used) { + VP8LBitEntropy bit_entropy; + VP8LStreaks stats; + VP8LGetEntropyUnrefined(population, length, &bit_entropy, &stats); + if (trivial_sym != NULL) { + *trivial_sym = (bit_entropy.nonzeros == 1) ? bit_entropy.nonzero_code + : VP8L_NON_TRIVIAL_SYM; + } + // The histogram is used if there is at least one non-zero streak. + *is_used = (stats.streaks[1][0] != 0 || stats.streaks[1][1] != 0); + + return BitsEntropyRefine(&bit_entropy) + FinalHuffmanCost(&stats); +} + +// trivial_at_end is 1 if the two histograms only have one element that is +// non-zero: both the zero-th one, or both the last one. +static WEBP_INLINE double GetCombinedEntropy(const uint32_t* const X, + const uint32_t* const Y, + int length, int is_X_used, + int is_Y_used, + int trivial_at_end) { + VP8LStreaks stats; + if (trivial_at_end) { + // This configuration is due to palettization that transforms an indexed + // pixel into 0xff000000 | (pixel << 8) in VP8LBundleColorMap. + // BitsEntropyRefine is 0 for histograms with only one non-zero value. + // Only FinalHuffmanCost needs to be evaluated. + memset(&stats, 0, sizeof(stats)); + // Deal with the non-zero value at index 0 or length-1. + stats.streaks[1][0] = 1; + // Deal with the following/previous zero streak. + stats.counts[0] = 1; + stats.streaks[0][1] = length - 1; + return FinalHuffmanCost(&stats); + } else { + VP8LBitEntropy bit_entropy; + if (is_X_used) { + if (is_Y_used) { + VP8LGetCombinedEntropyUnrefined(X, Y, length, &bit_entropy, &stats); + } else { + VP8LGetEntropyUnrefined(X, length, &bit_entropy, &stats); + } + } else { + if (is_Y_used) { + VP8LGetEntropyUnrefined(Y, length, &bit_entropy, &stats); + } else { + memset(&stats, 0, sizeof(stats)); + stats.counts[0] = 1; + stats.streaks[0][length > 3] = length; + VP8LBitEntropyInit(&bit_entropy); + } + } + + return BitsEntropyRefine(&bit_entropy) + FinalHuffmanCost(&stats); + } +} + +// Estimates the Entropy + Huffman + other block overhead size cost. +double VP8LHistogramEstimateBits(VP8LHistogram* const p) { + return + PopulationCost(p->literal_, VP8LHistogramNumCodes(p->palette_code_bits_), + NULL, &p->is_used_[0]) + + PopulationCost(p->red_, NUM_LITERAL_CODES, NULL, &p->is_used_[1]) + + PopulationCost(p->blue_, NUM_LITERAL_CODES, NULL, &p->is_used_[2]) + + PopulationCost(p->alpha_, NUM_LITERAL_CODES, NULL, &p->is_used_[3]) + + PopulationCost(p->distance_, NUM_DISTANCE_CODES, NULL, &p->is_used_[4]) + + VP8LExtraCost(p->literal_ + NUM_LITERAL_CODES, NUM_LENGTH_CODES) + + VP8LExtraCost(p->distance_, NUM_DISTANCE_CODES); +} + +// ----------------------------------------------------------------------------- +// Various histogram combine/cost-eval functions + +static int GetCombinedHistogramEntropy(const VP8LHistogram* const a, + const VP8LHistogram* const b, + double cost_threshold, + double* cost) { + const int palette_code_bits = a->palette_code_bits_; + int trivial_at_end = 0; + assert(a->palette_code_bits_ == b->palette_code_bits_); + *cost += GetCombinedEntropy(a->literal_, b->literal_, + VP8LHistogramNumCodes(palette_code_bits), + a->is_used_[0], b->is_used_[0], 0); + *cost += VP8LExtraCostCombined(a->literal_ + NUM_LITERAL_CODES, + b->literal_ + NUM_LITERAL_CODES, + NUM_LENGTH_CODES); + if (*cost > cost_threshold) return 0; + + if (a->trivial_symbol_ != VP8L_NON_TRIVIAL_SYM && + a->trivial_symbol_ == b->trivial_symbol_) { + // A, R and B are all 0 or 0xff. + const uint32_t color_a = (a->trivial_symbol_ >> 24) & 0xff; + const uint32_t color_r = (a->trivial_symbol_ >> 16) & 0xff; + const uint32_t color_b = (a->trivial_symbol_ >> 0) & 0xff; + if ((color_a == 0 || color_a == 0xff) && + (color_r == 0 || color_r == 0xff) && + (color_b == 0 || color_b == 0xff)) { + trivial_at_end = 1; + } + } + + *cost += + GetCombinedEntropy(a->red_, b->red_, NUM_LITERAL_CODES, a->is_used_[1], + b->is_used_[1], trivial_at_end); + if (*cost > cost_threshold) return 0; + + *cost += + GetCombinedEntropy(a->blue_, b->blue_, NUM_LITERAL_CODES, a->is_used_[2], + b->is_used_[2], trivial_at_end); + if (*cost > cost_threshold) return 0; + + *cost += + GetCombinedEntropy(a->alpha_, b->alpha_, NUM_LITERAL_CODES, + a->is_used_[3], b->is_used_[3], trivial_at_end); + if (*cost > cost_threshold) return 0; + + *cost += + GetCombinedEntropy(a->distance_, b->distance_, NUM_DISTANCE_CODES, + a->is_used_[4], b->is_used_[4], 0); + *cost += + VP8LExtraCostCombined(a->distance_, b->distance_, NUM_DISTANCE_CODES); + if (*cost > cost_threshold) return 0; + + return 1; +} + +static WEBP_INLINE void HistogramAdd(const VP8LHistogram* const a, + const VP8LHistogram* const b, + VP8LHistogram* const out) { + VP8LHistogramAdd(a, b, out); + out->trivial_symbol_ = (a->trivial_symbol_ == b->trivial_symbol_) + ? a->trivial_symbol_ + : VP8L_NON_TRIVIAL_SYM; +} + +// Performs out = a + b, computing the cost C(a+b) - C(a) - C(b) while comparing +// to the threshold value 'cost_threshold'. The score returned is +// Score = C(a+b) - C(a) - C(b), where C(a) + C(b) is known and fixed. +// Since the previous score passed is 'cost_threshold', we only need to compare +// the partial cost against 'cost_threshold + C(a) + C(b)' to possibly bail-out +// early. +static double HistogramAddEval(const VP8LHistogram* const a, + const VP8LHistogram* const b, + VP8LHistogram* const out, + double cost_threshold) { + double cost = 0; + const double sum_cost = a->bit_cost_ + b->bit_cost_; + cost_threshold += sum_cost; + + if (GetCombinedHistogramEntropy(a, b, cost_threshold, &cost)) { + HistogramAdd(a, b, out); + out->bit_cost_ = cost; + out->palette_code_bits_ = a->palette_code_bits_; + } + + return cost - sum_cost; +} + +// Same as HistogramAddEval(), except that the resulting histogram +// is not stored. Only the cost C(a+b) - C(a) is evaluated. We omit +// the term C(b) which is constant over all the evaluations. +static double HistogramAddThresh(const VP8LHistogram* const a, + const VP8LHistogram* const b, + double cost_threshold) { + double cost; + assert(a != NULL && b != NULL); + cost = -a->bit_cost_; + GetCombinedHistogramEntropy(a, b, cost_threshold, &cost); + return cost; +} + +// ----------------------------------------------------------------------------- + +// The structure to keep track of cost range for the three dominant entropy +// symbols. +// TODO(skal): Evaluate if float can be used here instead of double for +// representing the entropy costs. +typedef struct { + double literal_max_; + double literal_min_; + double red_max_; + double red_min_; + double blue_max_; + double blue_min_; +} DominantCostRange; + +static void DominantCostRangeInit(DominantCostRange* const c) { + c->literal_max_ = 0.; + c->literal_min_ = MAX_COST; + c->red_max_ = 0.; + c->red_min_ = MAX_COST; + c->blue_max_ = 0.; + c->blue_min_ = MAX_COST; +} + +static void UpdateDominantCostRange( + const VP8LHistogram* const h, DominantCostRange* const c) { + if (c->literal_max_ < h->literal_cost_) c->literal_max_ = h->literal_cost_; + if (c->literal_min_ > h->literal_cost_) c->literal_min_ = h->literal_cost_; + if (c->red_max_ < h->red_cost_) c->red_max_ = h->red_cost_; + if (c->red_min_ > h->red_cost_) c->red_min_ = h->red_cost_; + if (c->blue_max_ < h->blue_cost_) c->blue_max_ = h->blue_cost_; + if (c->blue_min_ > h->blue_cost_) c->blue_min_ = h->blue_cost_; +} + +static void UpdateHistogramCost(VP8LHistogram* const h) { + uint32_t alpha_sym, red_sym, blue_sym; + const double alpha_cost = + PopulationCost(h->alpha_, NUM_LITERAL_CODES, &alpha_sym, + &h->is_used_[3]); + const double distance_cost = + PopulationCost(h->distance_, NUM_DISTANCE_CODES, NULL, &h->is_used_[4]) + + VP8LExtraCost(h->distance_, NUM_DISTANCE_CODES); + const int num_codes = VP8LHistogramNumCodes(h->palette_code_bits_); + h->literal_cost_ = + PopulationCost(h->literal_, num_codes, NULL, &h->is_used_[0]) + + VP8LExtraCost(h->literal_ + NUM_LITERAL_CODES, NUM_LENGTH_CODES); + h->red_cost_ = + PopulationCost(h->red_, NUM_LITERAL_CODES, &red_sym, &h->is_used_[1]); + h->blue_cost_ = + PopulationCost(h->blue_, NUM_LITERAL_CODES, &blue_sym, &h->is_used_[2]); + h->bit_cost_ = h->literal_cost_ + h->red_cost_ + h->blue_cost_ + + alpha_cost + distance_cost; + if ((alpha_sym | red_sym | blue_sym) == VP8L_NON_TRIVIAL_SYM) { + h->trivial_symbol_ = VP8L_NON_TRIVIAL_SYM; + } else { + h->trivial_symbol_ = + ((uint32_t)alpha_sym << 24) | (red_sym << 16) | (blue_sym << 0); + } +} + +static int GetBinIdForEntropy(double min, double max, double val) { + const double range = max - min; + if (range > 0.) { + const double delta = val - min; + return (int)((NUM_PARTITIONS - 1e-6) * delta / range); + } else { + return 0; + } +} + +static int GetHistoBinIndex(const VP8LHistogram* const h, + const DominantCostRange* const c, int low_effort) { + int bin_id = GetBinIdForEntropy(c->literal_min_, c->literal_max_, + h->literal_cost_); + assert(bin_id < NUM_PARTITIONS); + if (!low_effort) { + bin_id = bin_id * NUM_PARTITIONS + + GetBinIdForEntropy(c->red_min_, c->red_max_, h->red_cost_); + bin_id = bin_id * NUM_PARTITIONS + + GetBinIdForEntropy(c->blue_min_, c->blue_max_, h->blue_cost_); + assert(bin_id < BIN_SIZE); + } + return bin_id; +} + +// Construct the histograms from backward references. +static void HistogramBuild( + int xsize, int histo_bits, const VP8LBackwardRefs* const backward_refs, + VP8LHistogramSet* const image_histo) { + int x = 0, y = 0; + const int histo_xsize = VP8LSubSampleSize(xsize, histo_bits); + VP8LHistogram** const histograms = image_histo->histograms; + VP8LRefsCursor c = VP8LRefsCursorInit(backward_refs); + assert(histo_bits > 0); + VP8LHistogramSetClear(image_histo); + while (VP8LRefsCursorOk(&c)) { + const PixOrCopy* const v = c.cur_pos; + const int ix = (y >> histo_bits) * histo_xsize + (x >> histo_bits); + VP8LHistogramAddSinglePixOrCopy(histograms[ix], v, NULL, 0); + x += PixOrCopyLength(v); + while (x >= xsize) { + x -= xsize; + ++y; + } + VP8LRefsCursorNext(&c); + } +} + +// Copies the histograms and computes its bit_cost. +static const uint16_t kInvalidHistogramSymbol = (uint16_t)(-1); +static void HistogramCopyAndAnalyze(VP8LHistogramSet* const orig_histo, + VP8LHistogramSet* const image_histo, + int* const num_used, + uint16_t* const histogram_symbols) { + int i, cluster_id; + int num_used_orig = *num_used; + VP8LHistogram** const orig_histograms = orig_histo->histograms; + VP8LHistogram** const histograms = image_histo->histograms; + assert(image_histo->max_size == orig_histo->max_size); + for (cluster_id = 0, i = 0; i < orig_histo->max_size; ++i) { + VP8LHistogram* const histo = orig_histograms[i]; + UpdateHistogramCost(histo); + + // Skip the histogram if it is completely empty, which can happen for tiles + // with no information (when they are skipped because of LZ77). + if (!histo->is_used_[0] && !histo->is_used_[1] && !histo->is_used_[2] + && !histo->is_used_[3] && !histo->is_used_[4]) { + // The first histogram is always used. If an histogram is empty, we set + // its id to be the same as the previous one: this will improve + // compressibility for later LZ77. + assert(i > 0); + HistogramSetRemoveHistogram(image_histo, i, num_used); + HistogramSetRemoveHistogram(orig_histo, i, &num_used_orig); + histogram_symbols[i] = kInvalidHistogramSymbol; + } else { + // Copy histograms from orig_histo[] to image_histo[]. + HistogramCopy(histo, histograms[i]); + histogram_symbols[i] = cluster_id++; + assert(cluster_id <= image_histo->max_size); + } + } +} + +// Partition histograms to different entropy bins for three dominant (literal, +// red and blue) symbol costs and compute the histogram aggregate bit_cost. +static void HistogramAnalyzeEntropyBin(VP8LHistogramSet* const image_histo, + uint16_t* const bin_map, + int low_effort) { + int i; + VP8LHistogram** const histograms = image_histo->histograms; + const int histo_size = image_histo->size; + DominantCostRange cost_range; + DominantCostRangeInit(&cost_range); + + // Analyze the dominant (literal, red and blue) entropy costs. + for (i = 0; i < histo_size; ++i) { + if (histograms[i] == NULL) continue; + UpdateDominantCostRange(histograms[i], &cost_range); + } + + // bin-hash histograms on three of the dominant (literal, red and blue) + // symbol costs and store the resulting bin_id for each histogram. + for (i = 0; i < histo_size; ++i) { + // bin_map[i] is not set to a special value as its use will later be guarded + // by another (histograms[i] == NULL). + if (histograms[i] == NULL) continue; + bin_map[i] = GetHistoBinIndex(histograms[i], &cost_range, low_effort); + } +} + +// Merges some histograms with same bin_id together if it's advantageous. +// Sets the remaining histograms to NULL. +static void HistogramCombineEntropyBin(VP8LHistogramSet* const image_histo, + int* num_used, + const uint16_t* const clusters, + uint16_t* const cluster_mappings, + VP8LHistogram* cur_combo, + const uint16_t* const bin_map, + int num_bins, + double combine_cost_factor, + int low_effort) { + VP8LHistogram** const histograms = image_histo->histograms; + int idx; + struct { + int16_t first; // position of the histogram that accumulates all + // histograms with the same bin_id + uint16_t num_combine_failures; // number of combine failures per bin_id + } bin_info[BIN_SIZE]; + + assert(num_bins <= BIN_SIZE); + for (idx = 0; idx < num_bins; ++idx) { + bin_info[idx].first = -1; + bin_info[idx].num_combine_failures = 0; + } + + // By default, a cluster matches itself. + for (idx = 0; idx < *num_used; ++idx) cluster_mappings[idx] = idx; + for (idx = 0; idx < image_histo->size; ++idx) { + int bin_id, first; + if (histograms[idx] == NULL) continue; + bin_id = bin_map[idx]; + first = bin_info[bin_id].first; + if (first == -1) { + bin_info[bin_id].first = idx; + } else if (low_effort) { + HistogramAdd(histograms[idx], histograms[first], histograms[first]); + HistogramSetRemoveHistogram(image_histo, idx, num_used); + cluster_mappings[clusters[idx]] = clusters[first]; + } else { + // try to merge #idx into #first (both share the same bin_id) + const double bit_cost = histograms[idx]->bit_cost_; + const double bit_cost_thresh = -bit_cost * combine_cost_factor; + const double curr_cost_diff = + HistogramAddEval(histograms[first], histograms[idx], + cur_combo, bit_cost_thresh); + if (curr_cost_diff < bit_cost_thresh) { + // Try to merge two histograms only if the combo is a trivial one or + // the two candidate histograms are already non-trivial. + // For some images, 'try_combine' turns out to be false for a lot of + // histogram pairs. In that case, we fallback to combining + // histograms as usual to avoid increasing the header size. + const int try_combine = + (cur_combo->trivial_symbol_ != VP8L_NON_TRIVIAL_SYM) || + ((histograms[idx]->trivial_symbol_ == VP8L_NON_TRIVIAL_SYM) && + (histograms[first]->trivial_symbol_ == VP8L_NON_TRIVIAL_SYM)); + const int max_combine_failures = 32; + if (try_combine || + bin_info[bin_id].num_combine_failures >= max_combine_failures) { + // move the (better) merged histogram to its final slot + HistogramSwap(&cur_combo, &histograms[first]); + HistogramSetRemoveHistogram(image_histo, idx, num_used); + cluster_mappings[clusters[idx]] = clusters[first]; + } else { + ++bin_info[bin_id].num_combine_failures; + } + } + } + } + if (low_effort) { + // for low_effort case, update the final cost when everything is merged + for (idx = 0; idx < image_histo->size; ++idx) { + if (histograms[idx] == NULL) continue; + UpdateHistogramCost(histograms[idx]); + } + } +} + +// Implement a Lehmer random number generator with a multiplicative constant of +// 48271 and a modulo constant of 2^31 - 1. +static uint32_t MyRand(uint32_t* const seed) { + *seed = (uint32_t)(((uint64_t)(*seed) * 48271u) % 2147483647u); + assert(*seed > 0); + return *seed; +} + +// ----------------------------------------------------------------------------- +// Histogram pairs priority queue + +// Pair of histograms. Negative idx1 value means that pair is out-of-date. +typedef struct { + int idx1; + int idx2; + double cost_diff; + double cost_combo; +} HistogramPair; + +typedef struct { + HistogramPair* queue; + int size; + int max_size; +} HistoQueue; + +static int HistoQueueInit(HistoQueue* const histo_queue, const int max_size) { + histo_queue->size = 0; + histo_queue->max_size = max_size; + // We allocate max_size + 1 because the last element at index "size" is + // used as temporary data (and it could be up to max_size). + histo_queue->queue = (HistogramPair*)WebPSafeMalloc( + histo_queue->max_size + 1, sizeof(*histo_queue->queue)); + return histo_queue->queue != NULL; +} + +static void HistoQueueClear(HistoQueue* const histo_queue) { + assert(histo_queue != NULL); + WebPSafeFree(histo_queue->queue); + histo_queue->size = 0; + histo_queue->max_size = 0; +} + +// Pop a specific pair in the queue by replacing it with the last one +// and shrinking the queue. +static void HistoQueuePopPair(HistoQueue* const histo_queue, + HistogramPair* const pair) { + assert(pair >= histo_queue->queue && + pair < (histo_queue->queue + histo_queue->size)); + assert(histo_queue->size > 0); + *pair = histo_queue->queue[histo_queue->size - 1]; + --histo_queue->size; +} + +// Check whether a pair in the queue should be updated as head or not. +static void HistoQueueUpdateHead(HistoQueue* const histo_queue, + HistogramPair* const pair) { + assert(pair->cost_diff < 0.); + assert(pair >= histo_queue->queue && + pair < (histo_queue->queue + histo_queue->size)); + assert(histo_queue->size > 0); + if (pair->cost_diff < histo_queue->queue[0].cost_diff) { + // Replace the best pair. + const HistogramPair tmp = histo_queue->queue[0]; + histo_queue->queue[0] = *pair; + *pair = tmp; + } +} + +// Update the cost diff and combo of a pair of histograms. This needs to be +// called when the the histograms have been merged with a third one. +static void HistoQueueUpdatePair(const VP8LHistogram* const h1, + const VP8LHistogram* const h2, + double threshold, + HistogramPair* const pair) { + const double sum_cost = h1->bit_cost_ + h2->bit_cost_; + pair->cost_combo = 0.; + GetCombinedHistogramEntropy(h1, h2, sum_cost + threshold, &pair->cost_combo); + pair->cost_diff = pair->cost_combo - sum_cost; +} + +// Create a pair from indices "idx1" and "idx2" provided its cost +// is inferior to "threshold", a negative entropy. +// It returns the cost of the pair, or 0. if it superior to threshold. +static double HistoQueuePush(HistoQueue* const histo_queue, + VP8LHistogram** const histograms, int idx1, + int idx2, double threshold) { + const VP8LHistogram* h1; + const VP8LHistogram* h2; + HistogramPair pair; + + // Stop here if the queue is full. + if (histo_queue->size == histo_queue->max_size) return 0.; + assert(threshold <= 0.); + if (idx1 > idx2) { + const int tmp = idx2; + idx2 = idx1; + idx1 = tmp; + } + pair.idx1 = idx1; + pair.idx2 = idx2; + h1 = histograms[idx1]; + h2 = histograms[idx2]; + + HistoQueueUpdatePair(h1, h2, threshold, &pair); + + // Do not even consider the pair if it does not improve the entropy. + if (pair.cost_diff >= threshold) return 0.; + + histo_queue->queue[histo_queue->size++] = pair; + HistoQueueUpdateHead(histo_queue, &histo_queue->queue[histo_queue->size - 1]); + + return pair.cost_diff; +} + +// ----------------------------------------------------------------------------- + +// Combines histograms by continuously choosing the one with the highest cost +// reduction. +static int HistogramCombineGreedy(VP8LHistogramSet* const image_histo, + int* const num_used) { + int ok = 0; + const int image_histo_size = image_histo->size; + int i, j; + VP8LHistogram** const histograms = image_histo->histograms; + // Priority queue of histogram pairs. + HistoQueue histo_queue; + + // image_histo_size^2 for the queue size is safe. If you look at + // HistogramCombineGreedy, and imagine that UpdateQueueFront always pushes + // data to the queue, you insert at most: + // - image_histo_size*(image_histo_size-1)/2 (the first two for loops) + // - image_histo_size - 1 in the last for loop at the first iteration of + // the while loop, image_histo_size - 2 at the second iteration ... + // therefore image_histo_size*(image_histo_size-1)/2 overall too + if (!HistoQueueInit(&histo_queue, image_histo_size * image_histo_size)) { + goto End; + } + + for (i = 0; i < image_histo_size; ++i) { + if (image_histo->histograms[i] == NULL) continue; + for (j = i + 1; j < image_histo_size; ++j) { + // Initialize queue. + if (image_histo->histograms[j] == NULL) continue; + HistoQueuePush(&histo_queue, histograms, i, j, 0.); + } + } + + while (histo_queue.size > 0) { + const int idx1 = histo_queue.queue[0].idx1; + const int idx2 = histo_queue.queue[0].idx2; + HistogramAdd(histograms[idx2], histograms[idx1], histograms[idx1]); + histograms[idx1]->bit_cost_ = histo_queue.queue[0].cost_combo; + + // Remove merged histogram. + HistogramSetRemoveHistogram(image_histo, idx2, num_used); + + // Remove pairs intersecting the just combined best pair. + for (i = 0; i < histo_queue.size;) { + HistogramPair* const p = histo_queue.queue + i; + if (p->idx1 == idx1 || p->idx2 == idx1 || + p->idx1 == idx2 || p->idx2 == idx2) { + HistoQueuePopPair(&histo_queue, p); + } else { + HistoQueueUpdateHead(&histo_queue, p); + ++i; + } + } + + // Push new pairs formed with combined histogram to the queue. + for (i = 0; i < image_histo->size; ++i) { + if (i == idx1 || image_histo->histograms[i] == NULL) continue; + HistoQueuePush(&histo_queue, image_histo->histograms, idx1, i, 0.); + } + } + + ok = 1; + + End: + HistoQueueClear(&histo_queue); + return ok; +} + +// Perform histogram aggregation using a stochastic approach. +// 'do_greedy' is set to 1 if a greedy approach needs to be performed +// afterwards, 0 otherwise. +static int PairComparison(const void* idx1, const void* idx2) { + // To be used with bsearch: <0 when *idx1<*idx2, >0 if >, 0 when ==. + return (*(int*) idx1 - *(int*) idx2); +} +static int HistogramCombineStochastic(VP8LHistogramSet* const image_histo, + int* const num_used, int min_cluster_size, + int* const do_greedy) { + int j, iter; + uint32_t seed = 1; + int tries_with_no_success = 0; + const int outer_iters = *num_used; + const int num_tries_no_success = outer_iters / 2; + VP8LHistogram** const histograms = image_histo->histograms; + // Priority queue of histogram pairs. Its size of 'kHistoQueueSize' + // impacts the quality of the compression and the speed: the smaller the + // faster but the worse for the compression. + HistoQueue histo_queue; + const int kHistoQueueSize = 9; + int ok = 0; + // mapping from an index in image_histo with no NULL histogram to the full + // blown image_histo. + int* mappings; + + if (*num_used < min_cluster_size) { + *do_greedy = 1; + return 1; + } + + mappings = (int*) WebPSafeMalloc(*num_used, sizeof(*mappings)); + if (mappings == NULL) return 0; + if (!HistoQueueInit(&histo_queue, kHistoQueueSize)) goto End; + // Fill the initial mapping. + for (j = 0, iter = 0; iter < image_histo->size; ++iter) { + if (histograms[iter] == NULL) continue; + mappings[j++] = iter; + } + assert(j == *num_used); + + // Collapse similar histograms in 'image_histo'. + for (iter = 0; + iter < outer_iters && *num_used >= min_cluster_size && + ++tries_with_no_success < num_tries_no_success; + ++iter) { + int* mapping_index; + double best_cost = + (histo_queue.size == 0) ? 0. : histo_queue.queue[0].cost_diff; + int best_idx1 = -1, best_idx2 = 1; + const uint32_t rand_range = (*num_used - 1) * (*num_used); + // (*num_used) / 2 was chosen empirically. Less means faster but worse + // compression. + const int num_tries = (*num_used) / 2; + + // Pick random samples. + for (j = 0; *num_used >= 2 && j < num_tries; ++j) { + double curr_cost; + // Choose two different histograms at random and try to combine them. + const uint32_t tmp = MyRand(&seed) % rand_range; + uint32_t idx1 = tmp / (*num_used - 1); + uint32_t idx2 = tmp % (*num_used - 1); + if (idx2 >= idx1) ++idx2; + idx1 = mappings[idx1]; + idx2 = mappings[idx2]; + + // Calculate cost reduction on combination. + curr_cost = + HistoQueuePush(&histo_queue, histograms, idx1, idx2, best_cost); + if (curr_cost < 0) { // found a better pair? + best_cost = curr_cost; + // Empty the queue if we reached full capacity. + if (histo_queue.size == histo_queue.max_size) break; + } + } + if (histo_queue.size == 0) continue; + + // Get the best histograms. + best_idx1 = histo_queue.queue[0].idx1; + best_idx2 = histo_queue.queue[0].idx2; + assert(best_idx1 < best_idx2); + // Pop best_idx2 from mappings. + mapping_index = (int*) bsearch(&best_idx2, mappings, *num_used, + sizeof(best_idx2), &PairComparison); + assert(mapping_index != NULL); + memmove(mapping_index, mapping_index + 1, sizeof(*mapping_index) * + ((*num_used) - (mapping_index - mappings) - 1)); + // Merge the histograms and remove best_idx2 from the queue. + HistogramAdd(histograms[best_idx2], histograms[best_idx1], + histograms[best_idx1]); + histograms[best_idx1]->bit_cost_ = histo_queue.queue[0].cost_combo; + HistogramSetRemoveHistogram(image_histo, best_idx2, num_used); + // Parse the queue and update each pair that deals with best_idx1, + // best_idx2 or image_histo_size. + for (j = 0; j < histo_queue.size;) { + HistogramPair* const p = histo_queue.queue + j; + const int is_idx1_best = p->idx1 == best_idx1 || p->idx1 == best_idx2; + const int is_idx2_best = p->idx2 == best_idx1 || p->idx2 == best_idx2; + int do_eval = 0; + // The front pair could have been duplicated by a random pick so + // check for it all the time nevertheless. + if (is_idx1_best && is_idx2_best) { + HistoQueuePopPair(&histo_queue, p); + continue; + } + // Any pair containing one of the two best indices should only refer to + // best_idx1. Its cost should also be updated. + if (is_idx1_best) { + p->idx1 = best_idx1; + do_eval = 1; + } else if (is_idx2_best) { + p->idx2 = best_idx1; + do_eval = 1; + } + // Make sure the index order is respected. + if (p->idx1 > p->idx2) { + const int tmp = p->idx2; + p->idx2 = p->idx1; + p->idx1 = tmp; + } + if (do_eval) { + // Re-evaluate the cost of an updated pair. + HistoQueueUpdatePair(histograms[p->idx1], histograms[p->idx2], 0., p); + if (p->cost_diff >= 0.) { + HistoQueuePopPair(&histo_queue, p); + continue; + } + } + HistoQueueUpdateHead(&histo_queue, p); + ++j; + } + tries_with_no_success = 0; + } + *do_greedy = (*num_used <= min_cluster_size); + ok = 1; + +End: + HistoQueueClear(&histo_queue); + WebPSafeFree(mappings); + return ok; +} + +// ----------------------------------------------------------------------------- +// Histogram refinement + +// Find the best 'out' histogram for each of the 'in' histograms. +// At call-time, 'out' contains the histograms of the clusters. +// Note: we assume that out[]->bit_cost_ is already up-to-date. +static void HistogramRemap(const VP8LHistogramSet* const in, + VP8LHistogramSet* const out, + uint16_t* const symbols) { + int i; + VP8LHistogram** const in_histo = in->histograms; + VP8LHistogram** const out_histo = out->histograms; + const int in_size = out->max_size; + const int out_size = out->size; + if (out_size > 1) { + for (i = 0; i < in_size; ++i) { + int best_out = 0; + double best_bits = MAX_COST; + int k; + if (in_histo[i] == NULL) { + // Arbitrarily set to the previous value if unused to help future LZ77. + symbols[i] = symbols[i - 1]; + continue; + } + for (k = 0; k < out_size; ++k) { + double cur_bits; + cur_bits = HistogramAddThresh(out_histo[k], in_histo[i], best_bits); + if (k == 0 || cur_bits < best_bits) { + best_bits = cur_bits; + best_out = k; + } + } + symbols[i] = best_out; + } + } else { + assert(out_size == 1); + for (i = 0; i < in_size; ++i) { + symbols[i] = 0; + } + } + + // Recompute each out based on raw and symbols. + VP8LHistogramSetClear(out); + out->size = out_size; + + for (i = 0; i < in_size; ++i) { + int idx; + if (in_histo[i] == NULL) continue; + idx = symbols[i]; + HistogramAdd(in_histo[i], out_histo[idx], out_histo[idx]); + } +} + +static double GetCombineCostFactor(int histo_size, int quality) { + double combine_cost_factor = 0.16; + if (quality < 90) { + if (histo_size > 256) combine_cost_factor /= 2.; + if (histo_size > 512) combine_cost_factor /= 2.; + if (histo_size > 1024) combine_cost_factor /= 2.; + if (quality <= 50) combine_cost_factor /= 2.; + } + return combine_cost_factor; +} + +// Given a HistogramSet 'set', the mapping of clusters 'cluster_mapping' and the +// current assignment of the cells in 'symbols', merge the clusters and +// assign the smallest possible clusters values. +static void OptimizeHistogramSymbols(const VP8LHistogramSet* const set, + uint16_t* const cluster_mappings, + int num_clusters, + uint16_t* const cluster_mappings_tmp, + uint16_t* const symbols) { + int i, cluster_max; + int do_continue = 1; + // First, assign the lowest cluster to each pixel. + while (do_continue) { + do_continue = 0; + for (i = 0; i < num_clusters; ++i) { + int k; + k = cluster_mappings[i]; + while (k != cluster_mappings[k]) { + cluster_mappings[k] = cluster_mappings[cluster_mappings[k]]; + k = cluster_mappings[k]; + } + if (k != cluster_mappings[i]) { + do_continue = 1; + cluster_mappings[i] = k; + } + } + } + // Create a mapping from a cluster id to its minimal version. + cluster_max = 0; + memset(cluster_mappings_tmp, 0, + set->max_size * sizeof(*cluster_mappings_tmp)); + assert(cluster_mappings[0] == 0); + // Re-map the ids. + for (i = 0; i < set->max_size; ++i) { + int cluster; + if (symbols[i] == kInvalidHistogramSymbol) continue; + cluster = cluster_mappings[symbols[i]]; + assert(symbols[i] < num_clusters); + if (cluster > 0 && cluster_mappings_tmp[cluster] == 0) { + ++cluster_max; + cluster_mappings_tmp[cluster] = cluster_max; + } + symbols[i] = cluster_mappings_tmp[cluster]; + } + + // Make sure all cluster values are used. + cluster_max = 0; + for (i = 0; i < set->max_size; ++i) { + if (symbols[i] == kInvalidHistogramSymbol) continue; + if (symbols[i] <= cluster_max) continue; + ++cluster_max; + assert(symbols[i] == cluster_max); + } +} + +static void RemoveEmptyHistograms(VP8LHistogramSet* const image_histo) { + uint32_t size; + int i; + for (i = 0, size = 0; i < image_histo->size; ++i) { + if (image_histo->histograms[i] == NULL) continue; + image_histo->histograms[size++] = image_histo->histograms[i]; + } + image_histo->size = size; +} + +int VP8LGetHistoImageSymbols(int xsize, int ysize, + const VP8LBackwardRefs* const refs, + int quality, int low_effort, + int histo_bits, int cache_bits, + VP8LHistogramSet* const image_histo, + VP8LHistogram* const tmp_histo, + uint16_t* const histogram_symbols) { + int ok = 0; + const int histo_xsize = histo_bits ? VP8LSubSampleSize(xsize, histo_bits) : 1; + const int histo_ysize = histo_bits ? VP8LSubSampleSize(ysize, histo_bits) : 1; + const int image_histo_raw_size = histo_xsize * histo_ysize; + VP8LHistogramSet* const orig_histo = + VP8LAllocateHistogramSet(image_histo_raw_size, cache_bits); + // Don't attempt linear bin-partition heuristic for + // histograms of small sizes (as bin_map will be very sparse) and + // maximum quality q==100 (to preserve the compression gains at that level). + const int entropy_combine_num_bins = low_effort ? NUM_PARTITIONS : BIN_SIZE; + int entropy_combine; + uint16_t* const map_tmp = + WebPSafeMalloc(2 * image_histo_raw_size, sizeof(map_tmp)); + uint16_t* const cluster_mappings = map_tmp + image_histo_raw_size; + int num_used = image_histo_raw_size; + if (orig_histo == NULL || map_tmp == NULL) goto Error; + + // Construct the histograms from backward references. + HistogramBuild(xsize, histo_bits, refs, orig_histo); + // Copies the histograms and computes its bit_cost. + // histogram_symbols is optimized + HistogramCopyAndAnalyze(orig_histo, image_histo, &num_used, + histogram_symbols); + + entropy_combine = + (num_used > entropy_combine_num_bins * 2) && (quality < 100); + + if (entropy_combine) { + uint16_t* const bin_map = map_tmp; + const double combine_cost_factor = + GetCombineCostFactor(image_histo_raw_size, quality); + const uint32_t num_clusters = num_used; + + HistogramAnalyzeEntropyBin(image_histo, bin_map, low_effort); + // Collapse histograms with similar entropy. + HistogramCombineEntropyBin(image_histo, &num_used, histogram_symbols, + cluster_mappings, tmp_histo, bin_map, + entropy_combine_num_bins, combine_cost_factor, + low_effort); + OptimizeHistogramSymbols(image_histo, cluster_mappings, num_clusters, + map_tmp, histogram_symbols); + } + + // Don't combine the histograms using stochastic and greedy heuristics for + // low-effort compression mode. + if (!low_effort || !entropy_combine) { + const float x = quality / 100.f; + // cubic ramp between 1 and MAX_HISTO_GREEDY: + const int threshold_size = (int)(1 + (x * x * x) * (MAX_HISTO_GREEDY - 1)); + int do_greedy; + if (!HistogramCombineStochastic(image_histo, &num_used, threshold_size, + &do_greedy)) { + goto Error; + } + if (do_greedy) { + RemoveEmptyHistograms(image_histo); + if (!HistogramCombineGreedy(image_histo, &num_used)) { + goto Error; + } + } + } + + // Find the optimal map from original histograms to the final ones. + RemoveEmptyHistograms(image_histo); + HistogramRemap(orig_histo, image_histo, histogram_symbols); + + ok = 1; + + Error: + VP8LFreeHistogramSet(orig_histo); + WebPSafeFree(map_tmp); + return ok; +} diff --git a/vendor/github.com/sizeofint/webpanimation/enc_histogram_enc.h b/vendor/github.com/sizeofint/webpanimation/enc_histogram_enc.h new file mode 100644 index 00000000..59b308c2 --- /dev/null +++ b/vendor/github.com/sizeofint/webpanimation/enc_histogram_enc.h @@ -0,0 +1,128 @@ +// Copyright 2012 Google Inc. All Rights Reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the COPYING file in the root of the source +// tree. An additional intellectual property rights grant can be found +// in the file PATENTS. All contributing project authors may +// be found in the AUTHORS file in the root of the source tree. +// ----------------------------------------------------------------------------- +// +// Author: Jyrki Alakuijala (jyrki@google.com) +// +// Models the histograms of literal and distance codes. + +#ifndef WEBP_ENC_HISTOGRAM_ENC_H_ +#define WEBP_ENC_HISTOGRAM_ENC_H_ + +#include + +#include "enc_backward_references_enc.h" +#include "webp_format_constants.h" +#include "webp_types.h" + +#ifdef __cplusplus +extern "C" { +#endif + +// Not a trivial literal symbol. +#define VP8L_NON_TRIVIAL_SYM (0xffffffff) + +// A simple container for histograms of data. +typedef struct { + // literal_ contains green literal, palette-code and + // copy-length-prefix histogram + uint32_t* literal_; // Pointer to the allocated buffer for literal. + uint32_t red_[NUM_LITERAL_CODES]; + uint32_t blue_[NUM_LITERAL_CODES]; + uint32_t alpha_[NUM_LITERAL_CODES]; + // Backward reference prefix-code histogram. + uint32_t distance_[NUM_DISTANCE_CODES]; + int palette_code_bits_; + uint32_t trivial_symbol_; // True, if histograms for Red, Blue & Alpha + // literal symbols are single valued. + double bit_cost_; // cached value of bit cost. + double literal_cost_; // Cached values of dominant entropy costs: + double red_cost_; // literal, red & blue. + double blue_cost_; + uint8_t is_used_[5]; // 5 for literal, red, blue, alpha, distance +} VP8LHistogram; + +// Collection of histograms with fixed capacity, allocated as one +// big memory chunk. Can be destroyed by calling WebPSafeFree(). +typedef struct { + int size; // number of slots currently in use + int max_size; // maximum capacity + VP8LHistogram** histograms; +} VP8LHistogramSet; + +// Create the histogram. +// +// The input data is the PixOrCopy data, which models the literals, stop +// codes and backward references (both distances and lengths). Also: if +// palette_code_bits is >= 0, initialize the histogram with this value. +void VP8LHistogramCreate(VP8LHistogram* const p, + const VP8LBackwardRefs* const refs, + int palette_code_bits); + +// Return the size of the histogram for a given palette_code_bits. +int VP8LGetHistogramSize(int palette_code_bits); + +// Set the palette_code_bits and reset the stats. +// If init_arrays is true, the arrays are also filled with 0's. +void VP8LHistogramInit(VP8LHistogram* const p, int palette_code_bits, + int init_arrays); + +// Collect all the references into a histogram (without reset) +void VP8LHistogramStoreRefs(const VP8LBackwardRefs* const refs, + VP8LHistogram* const histo); + +// Free the memory allocated for the histogram. +void VP8LFreeHistogram(VP8LHistogram* const histo); + +// Free the memory allocated for the histogram set. +void VP8LFreeHistogramSet(VP8LHistogramSet* const histo); + +// Allocate an array of pointer to histograms, allocated and initialized +// using 'cache_bits'. Return NULL in case of memory error. +VP8LHistogramSet* VP8LAllocateHistogramSet(int size, int cache_bits); + +// Set the histograms in set to 0. +void VP8LHistogramSetClear(VP8LHistogramSet* const set); + +// Allocate and initialize histogram object with specified 'cache_bits'. +// Returns NULL in case of memory error. +// Special case of VP8LAllocateHistogramSet, with size equals 1. +VP8LHistogram* VP8LAllocateHistogram(int cache_bits); + +// Accumulate a token 'v' into a histogram. +void VP8LHistogramAddSinglePixOrCopy(VP8LHistogram* const histo, + const PixOrCopy* const v, + int (*const distance_modifier)(int, int), + int distance_modifier_arg0); + +static WEBP_INLINE int VP8LHistogramNumCodes(int palette_code_bits) { + return NUM_LITERAL_CODES + NUM_LENGTH_CODES + + ((palette_code_bits > 0) ? (1 << palette_code_bits) : 0); +} + +// Builds the histogram image. +int VP8LGetHistoImageSymbols(int xsize, int ysize, + const VP8LBackwardRefs* const refs, + int quality, int low_effort, + int histogram_bits, int cache_bits, + VP8LHistogramSet* const image_in, + VP8LHistogram* const tmp_histo, + uint16_t* const histogram_symbols); + +// Returns the entropy for the symbols in the input array. +double VP8LBitsEntropy(const uint32_t* const array, int n); + +// Estimate how many bits the combined entropy of literals and distance +// approximately maps to. +double VP8LHistogramEstimateBits(VP8LHistogram* const p); + +#ifdef __cplusplus +} +#endif + +#endif // WEBP_ENC_HISTOGRAM_ENC_H_ diff --git a/vendor/github.com/sizeofint/webpanimation/enc_iterator_enc.c b/vendor/github.com/sizeofint/webpanimation/enc_iterator_enc.c new file mode 100644 index 00000000..17a9ff3c --- /dev/null +++ b/vendor/github.com/sizeofint/webpanimation/enc_iterator_enc.c @@ -0,0 +1,459 @@ +// Copyright 2011 Google Inc. All Rights Reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the COPYING file in the root of the source +// tree. An additional intellectual property rights grant can be found +// in the file PATENTS. All contributing project authors may +// be found in the AUTHORS file in the root of the source tree. +// ----------------------------------------------------------------------------- +// +// VP8Iterator: block iterator +// +// Author: Skal (pascal.massimino@gmail.com) + +#include + +#include "enc_vp8i_enc.h" + +//------------------------------------------------------------------------------ +// VP8Iterator +//------------------------------------------------------------------------------ + +static void InitLeft(VP8EncIterator* const it) { + it->y_left_[-1] = it->u_left_[-1] = it->v_left_[-1] = + (it->y_ > 0) ? 129 : 127; + memset(it->y_left_, 129, 16); + memset(it->u_left_, 129, 8); + memset(it->v_left_, 129, 8); + it->left_nz_[8] = 0; + if (it->top_derr_ != NULL) { + memset(&it->left_derr_, 0, sizeof(it->left_derr_)); + } +} + +static void InitTop(VP8EncIterator* const it) { + const VP8Encoder* const enc = it->enc_; + const size_t top_size = enc->mb_w_ * 16; + memset(enc->y_top_, 127, 2 * top_size); + memset(enc->nz_, 0, enc->mb_w_ * sizeof(*enc->nz_)); + if (enc->top_derr_ != NULL) { + memset(enc->top_derr_, 0, enc->mb_w_ * sizeof(*enc->top_derr_)); + } +} + +void VP8IteratorSetRow(VP8EncIterator* const it, int y) { + VP8Encoder* const enc = it->enc_; + it->x_ = 0; + it->y_ = y; + it->bw_ = &enc->parts_[y & (enc->num_parts_ - 1)]; + it->preds_ = enc->preds_ + y * 4 * enc->preds_w_; + it->nz_ = enc->nz_; + it->mb_ = enc->mb_info_ + y * enc->mb_w_; + it->y_top_ = enc->y_top_; + it->uv_top_ = enc->uv_top_; + InitLeft(it); +} + +void VP8IteratorReset(VP8EncIterator* const it) { + VP8Encoder* const enc = it->enc_; + VP8IteratorSetRow(it, 0); + VP8IteratorSetCountDown(it, enc->mb_w_ * enc->mb_h_); // default + InitTop(it); + memset(it->bit_count_, 0, sizeof(it->bit_count_)); + it->do_trellis_ = 0; +} + +void VP8IteratorSetCountDown(VP8EncIterator* const it, int count_down) { + it->count_down_ = it->count_down0_ = count_down; +} + +int VP8IteratorIsDone(const VP8EncIterator* const it) { + return (it->count_down_ <= 0); +} + +void VP8IteratorInit(VP8Encoder* const enc, VP8EncIterator* const it) { + it->enc_ = enc; + it->yuv_in_ = (uint8_t*)WEBP_ALIGN(it->yuv_mem_); + it->yuv_out_ = it->yuv_in_ + YUV_SIZE_ENC; + it->yuv_out2_ = it->yuv_out_ + YUV_SIZE_ENC; + it->yuv_p_ = it->yuv_out2_ + YUV_SIZE_ENC; + it->lf_stats_ = enc->lf_stats_; + it->percent0_ = enc->percent_; + it->y_left_ = (uint8_t*)WEBP_ALIGN(it->yuv_left_mem_ + 1); + it->u_left_ = it->y_left_ + 16 + 16; + it->v_left_ = it->u_left_ + 16; + it->top_derr_ = enc->top_derr_; + VP8IteratorReset(it); +} + +int VP8IteratorProgress(const VP8EncIterator* const it, int delta) { + VP8Encoder* const enc = it->enc_; + if (delta && enc->pic_->progress_hook != NULL) { + const int done = it->count_down0_ - it->count_down_; + const int percent = (it->count_down0_ <= 0) + ? it->percent0_ + : it->percent0_ + delta * done / it->count_down0_; + return WebPReportProgress(enc->pic_, percent, &enc->percent_); + } + return 1; +} + +//------------------------------------------------------------------------------ +// Import the source samples into the cache. Takes care of replicating +// boundary pixels if necessary. + +static WEBP_INLINE int MinSize(int a, int b) { return (a < b) ? a : b; } + +static void ImportBlock(const uint8_t* src, int src_stride, + uint8_t* dst, int w, int h, int size) { + int i; + for (i = 0; i < h; ++i) { + memcpy(dst, src, w); + if (w < size) { + memset(dst + w, dst[w - 1], size - w); + } + dst += BPS; + src += src_stride; + } + for (i = h; i < size; ++i) { + memcpy(dst, dst - BPS, size); + dst += BPS; + } +} + +static void ImportLine(const uint8_t* src, int src_stride, + uint8_t* dst, int len, int total_len) { + int i; + for (i = 0; i < len; ++i, src += src_stride) dst[i] = *src; + for (; i < total_len; ++i) dst[i] = dst[len - 1]; +} + +void VP8IteratorImport(VP8EncIterator* const it, uint8_t* const tmp_32) { + const VP8Encoder* const enc = it->enc_; + const int x = it->x_, y = it->y_; + const WebPPicture* const pic = enc->pic_; + const uint8_t* const ysrc = pic->y + (y * pic->y_stride + x) * 16; + const uint8_t* const usrc = pic->u + (y * pic->uv_stride + x) * 8; + const uint8_t* const vsrc = pic->v + (y * pic->uv_stride + x) * 8; + const int w = MinSize(pic->width - x * 16, 16); + const int h = MinSize(pic->height - y * 16, 16); + const int uv_w = (w + 1) >> 1; + const int uv_h = (h + 1) >> 1; + + ImportBlock(ysrc, pic->y_stride, it->yuv_in_ + Y_OFF_ENC, w, h, 16); + ImportBlock(usrc, pic->uv_stride, it->yuv_in_ + U_OFF_ENC, uv_w, uv_h, 8); + ImportBlock(vsrc, pic->uv_stride, it->yuv_in_ + V_OFF_ENC, uv_w, uv_h, 8); + + if (tmp_32 == NULL) return; + + // Import source (uncompressed) samples into boundary. + if (x == 0) { + InitLeft(it); + } else { + if (y == 0) { + it->y_left_[-1] = it->u_left_[-1] = it->v_left_[-1] = 127; + } else { + it->y_left_[-1] = ysrc[- 1 - pic->y_stride]; + it->u_left_[-1] = usrc[- 1 - pic->uv_stride]; + it->v_left_[-1] = vsrc[- 1 - pic->uv_stride]; + } + ImportLine(ysrc - 1, pic->y_stride, it->y_left_, h, 16); + ImportLine(usrc - 1, pic->uv_stride, it->u_left_, uv_h, 8); + ImportLine(vsrc - 1, pic->uv_stride, it->v_left_, uv_h, 8); + } + + it->y_top_ = tmp_32 + 0; + it->uv_top_ = tmp_32 + 16; + if (y == 0) { + memset(tmp_32, 127, 32 * sizeof(*tmp_32)); + } else { + ImportLine(ysrc - pic->y_stride, 1, tmp_32, w, 16); + ImportLine(usrc - pic->uv_stride, 1, tmp_32 + 16, uv_w, 8); + ImportLine(vsrc - pic->uv_stride, 1, tmp_32 + 16 + 8, uv_w, 8); + } +} + +//------------------------------------------------------------------------------ +// Copy back the compressed samples into user space if requested. + +static void ExportBlock(const uint8_t* src, uint8_t* dst, int dst_stride, + int w, int h) { + while (h-- > 0) { + memcpy(dst, src, w); + dst += dst_stride; + src += BPS; + } +} + +void VP8IteratorExport(const VP8EncIterator* const it) { + const VP8Encoder* const enc = it->enc_; + if (enc->config_->show_compressed) { + const int x = it->x_, y = it->y_; + const uint8_t* const ysrc = it->yuv_out_ + Y_OFF_ENC; + const uint8_t* const usrc = it->yuv_out_ + U_OFF_ENC; + const uint8_t* const vsrc = it->yuv_out_ + V_OFF_ENC; + const WebPPicture* const pic = enc->pic_; + uint8_t* const ydst = pic->y + (y * pic->y_stride + x) * 16; + uint8_t* const udst = pic->u + (y * pic->uv_stride + x) * 8; + uint8_t* const vdst = pic->v + (y * pic->uv_stride + x) * 8; + int w = (pic->width - x * 16); + int h = (pic->height - y * 16); + + if (w > 16) w = 16; + if (h > 16) h = 16; + + // Luma plane + ExportBlock(ysrc, ydst, pic->y_stride, w, h); + + { // U/V planes + const int uv_w = (w + 1) >> 1; + const int uv_h = (h + 1) >> 1; + ExportBlock(usrc, udst, pic->uv_stride, uv_w, uv_h); + ExportBlock(vsrc, vdst, pic->uv_stride, uv_w, uv_h); + } + } +} + +//------------------------------------------------------------------------------ +// Non-zero contexts setup/teardown + +// Nz bits: +// 0 1 2 3 Y +// 4 5 6 7 +// 8 9 10 11 +// 12 13 14 15 +// 16 17 U +// 18 19 +// 20 21 V +// 22 23 +// 24 DC-intra16 + +// Convert packed context to byte array +#define BIT(nz, n) (!!((nz) & (1 << (n)))) + +void VP8IteratorNzToBytes(VP8EncIterator* const it) { + const int tnz = it->nz_[0], lnz = it->nz_[-1]; + int* const top_nz = it->top_nz_; + int* const left_nz = it->left_nz_; + + // Top-Y + top_nz[0] = BIT(tnz, 12); + top_nz[1] = BIT(tnz, 13); + top_nz[2] = BIT(tnz, 14); + top_nz[3] = BIT(tnz, 15); + // Top-U + top_nz[4] = BIT(tnz, 18); + top_nz[5] = BIT(tnz, 19); + // Top-V + top_nz[6] = BIT(tnz, 22); + top_nz[7] = BIT(tnz, 23); + // DC + top_nz[8] = BIT(tnz, 24); + + // left-Y + left_nz[0] = BIT(lnz, 3); + left_nz[1] = BIT(lnz, 7); + left_nz[2] = BIT(lnz, 11); + left_nz[3] = BIT(lnz, 15); + // left-U + left_nz[4] = BIT(lnz, 17); + left_nz[5] = BIT(lnz, 19); + // left-V + left_nz[6] = BIT(lnz, 21); + left_nz[7] = BIT(lnz, 23); + // left-DC is special, iterated separately +} + +void VP8IteratorBytesToNz(VP8EncIterator* const it) { + uint32_t nz = 0; + const int* const top_nz = it->top_nz_; + const int* const left_nz = it->left_nz_; + // top + nz |= (top_nz[0] << 12) | (top_nz[1] << 13); + nz |= (top_nz[2] << 14) | (top_nz[3] << 15); + nz |= (top_nz[4] << 18) | (top_nz[5] << 19); + nz |= (top_nz[6] << 22) | (top_nz[7] << 23); + nz |= (top_nz[8] << 24); // we propagate the _top_ bit, esp. for intra4 + // left + nz |= (left_nz[0] << 3) | (left_nz[1] << 7); + nz |= (left_nz[2] << 11); + nz |= (left_nz[4] << 17) | (left_nz[6] << 21); + + *it->nz_ = nz; +} + +#undef BIT + +//------------------------------------------------------------------------------ +// Advance to the next position, doing the bookkeeping. + +void VP8IteratorSaveBoundary(VP8EncIterator* const it) { + VP8Encoder* const enc = it->enc_; + const int x = it->x_, y = it->y_; + const uint8_t* const ysrc = it->yuv_out_ + Y_OFF_ENC; + const uint8_t* const uvsrc = it->yuv_out_ + U_OFF_ENC; + if (x < enc->mb_w_ - 1) { // left + int i; + for (i = 0; i < 16; ++i) { + it->y_left_[i] = ysrc[15 + i * BPS]; + } + for (i = 0; i < 8; ++i) { + it->u_left_[i] = uvsrc[7 + i * BPS]; + it->v_left_[i] = uvsrc[15 + i * BPS]; + } + // top-left (before 'top'!) + it->y_left_[-1] = it->y_top_[15]; + it->u_left_[-1] = it->uv_top_[0 + 7]; + it->v_left_[-1] = it->uv_top_[8 + 7]; + } + if (y < enc->mb_h_ - 1) { // top + memcpy(it->y_top_, ysrc + 15 * BPS, 16); + memcpy(it->uv_top_, uvsrc + 7 * BPS, 8 + 8); + } +} + +int VP8IteratorNext(VP8EncIterator* const it) { + if (++it->x_ == it->enc_->mb_w_) { + VP8IteratorSetRow(it, ++it->y_); + } else { + it->preds_ += 4; + it->mb_ += 1; + it->nz_ += 1; + it->y_top_ += 16; + it->uv_top_ += 16; + } + return (0 < --it->count_down_); +} + +//------------------------------------------------------------------------------ +// Helper function to set mode properties + +void VP8SetIntra16Mode(const VP8EncIterator* const it, int mode) { + uint8_t* preds = it->preds_; + int y; + for (y = 0; y < 4; ++y) { + memset(preds, mode, 4); + preds += it->enc_->preds_w_; + } + it->mb_->type_ = 1; +} + +void VP8SetIntra4Mode(const VP8EncIterator* const it, const uint8_t* modes) { + uint8_t* preds = it->preds_; + int y; + for (y = 4; y > 0; --y) { + memcpy(preds, modes, 4 * sizeof(*modes)); + preds += it->enc_->preds_w_; + modes += 4; + } + it->mb_->type_ = 0; +} + +void VP8SetIntraUVMode(const VP8EncIterator* const it, int mode) { + it->mb_->uv_mode_ = mode; +} + +void VP8SetSkip(const VP8EncIterator* const it, int skip) { + it->mb_->skip_ = skip; +} + +void VP8SetSegment(const VP8EncIterator* const it, int segment) { + it->mb_->segment_ = segment; +} + +//------------------------------------------------------------------------------ +// Intra4x4 sub-blocks iteration +// +// We store and update the boundary samples into an array of 37 pixels. They +// are updated as we iterate and reconstructs each intra4x4 blocks in turn. +// The position of the samples has the following snake pattern: +// +// 16|17 18 19 20|21 22 23 24|25 26 27 28|29 30 31 32|33 34 35 36 <- Top-right +// --+-----------+-----------+-----------+-----------+ +// 15| 19| 23| 27| 31| +// 14| 18| 22| 26| 30| +// 13| 17| 21| 25| 29| +// 12|13 14 15 16|17 18 19 20|21 22 23 24|25 26 27 28| +// --+-----------+-----------+-----------+-----------+ +// 11| 15| 19| 23| 27| +// 10| 14| 18| 22| 26| +// 9| 13| 17| 21| 25| +// 8| 9 10 11 12|13 14 15 16|17 18 19 20|21 22 23 24| +// --+-----------+-----------+-----------+-----------+ +// 7| 11| 15| 19| 23| +// 6| 10| 14| 18| 22| +// 5| 9| 13| 17| 21| +// 4| 5 6 7 8| 9 10 11 12|13 14 15 16|17 18 19 20| +// --+-----------+-----------+-----------+-----------+ +// 3| 7| 11| 15| 19| +// 2| 6| 10| 14| 18| +// 1| 5| 9| 13| 17| +// 0| 1 2 3 4| 5 6 7 8| 9 10 11 12|13 14 15 16| +// --+-----------+-----------+-----------+-----------+ + +// Array to record the position of the top sample to pass to the prediction +// functions in dsp.c. +static const uint8_t VP8TopLeftI4[16] = { + 17, 21, 25, 29, + 13, 17, 21, 25, + 9, 13, 17, 21, + 5, 9, 13, 17 +}; + +void VP8IteratorStartI4(VP8EncIterator* const it) { + const VP8Encoder* const enc = it->enc_; + int i; + + it->i4_ = 0; // first 4x4 sub-block + it->i4_top_ = it->i4_boundary_ + VP8TopLeftI4[0]; + + // Import the boundary samples + for (i = 0; i < 17; ++i) { // left + it->i4_boundary_[i] = it->y_left_[15 - i]; + } + for (i = 0; i < 16; ++i) { // top + it->i4_boundary_[17 + i] = it->y_top_[i]; + } + // top-right samples have a special case on the far right of the picture + if (it->x_ < enc->mb_w_ - 1) { + for (i = 16; i < 16 + 4; ++i) { + it->i4_boundary_[17 + i] = it->y_top_[i]; + } + } else { // else, replicate the last valid pixel four times + for (i = 16; i < 16 + 4; ++i) { + it->i4_boundary_[17 + i] = it->i4_boundary_[17 + 15]; + } + } + VP8IteratorNzToBytes(it); // import the non-zero context +} + +int VP8IteratorRotateI4(VP8EncIterator* const it, + const uint8_t* const yuv_out) { + const uint8_t* const blk = yuv_out + VP8Scan[it->i4_]; + uint8_t* const top = it->i4_top_; + int i; + + // Update the cache with 7 fresh samples + for (i = 0; i <= 3; ++i) { + top[-4 + i] = blk[i + 3 * BPS]; // store future top samples + } + if ((it->i4_ & 3) != 3) { // if not on the right sub-blocks #3, #7, #11, #15 + for (i = 0; i <= 2; ++i) { // store future left samples + top[i] = blk[3 + (2 - i) * BPS]; + } + } else { // else replicate top-right samples, as says the specs. + for (i = 0; i <= 3; ++i) { + top[i] = top[i + 4]; + } + } + // move pointers to next sub-block + ++it->i4_; + if (it->i4_ == 16) { // we're done + return 0; + } + + it->i4_top_ = it->i4_boundary_ + VP8TopLeftI4[it->i4_]; + return 1; +} + +//------------------------------------------------------------------------------ diff --git a/vendor/github.com/sizeofint/webpanimation/enc_near_lossless_enc.c b/vendor/github.com/sizeofint/webpanimation/enc_near_lossless_enc.c new file mode 100644 index 00000000..db0ccdc5 --- /dev/null +++ b/vendor/github.com/sizeofint/webpanimation/enc_near_lossless_enc.c @@ -0,0 +1,151 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the COPYING file in the root of the source +// tree. An additional intellectual property rights grant can be found +// in the file PATENTS. All contributing project authors may +// be found in the AUTHORS file in the root of the source tree. +// ----------------------------------------------------------------------------- +// +// Near-lossless image preprocessing adjusts pixel values to help +// compressibility with a guarantee of maximum deviation between original and +// resulting pixel values. +// +// Author: Jyrki Alakuijala (jyrki@google.com) +// Converted to C by Aleksander Kramarz (akramarz@google.com) + +#include +#include + +#include "dsp_lossless_common.h" +#include "utils_utils.h" +#include "enc_vp8li_enc.h" + +#if (WEBP_NEAR_LOSSLESS == 1) + +#define MIN_DIM_FOR_NEAR_LOSSLESS 64 +#define MAX_LIMIT_BITS 5 + +// Quantizes the value up or down to a multiple of 1<> 1) + ((a >> bits) & 1); + assert(bits > 0); + if (biased > 0xff) return 0xff; + return biased & ~mask; +} + +// Applies FindClosestDiscretized to all channels of pixel. +static uint32_t ClosestDiscretizedArgb(uint32_t a, int bits) { + return + (FindClosestDiscretized(a >> 24, bits) << 24) | + (FindClosestDiscretized((a >> 16) & 0xff, bits) << 16) | + (FindClosestDiscretized((a >> 8) & 0xff, bits) << 8) | + (FindClosestDiscretized(a & 0xff, bits)); +} + +// Checks if distance between corresponding channel values of pixels a and b +// is within the given limit. +static int IsNear(uint32_t a, uint32_t b, int limit) { + int k; + for (k = 0; k < 4; ++k) { + const int delta = + (int)((a >> (k * 8)) & 0xff) - (int)((b >> (k * 8)) & 0xff); + if (delta >= limit || delta <= -limit) { + return 0; + } + } + return 1; +} + +static int IsSmooth(const uint32_t* const prev_row, + const uint32_t* const curr_row, + const uint32_t* const next_row, + int ix, int limit) { + // Check that all pixels in 4-connected neighborhood are smooth. + return (IsNear(curr_row[ix], curr_row[ix - 1], limit) && + IsNear(curr_row[ix], curr_row[ix + 1], limit) && + IsNear(curr_row[ix], prev_row[ix], limit) && + IsNear(curr_row[ix], next_row[ix], limit)); +} + +// Adjusts pixel values of image with given maximum error. +static void NearLossless(int xsize, int ysize, const uint32_t* argb_src, + int stride, int limit_bits, uint32_t* copy_buffer, + uint32_t* argb_dst) { + int x, y; + const int limit = 1 << limit_bits; + uint32_t* prev_row = copy_buffer; + uint32_t* curr_row = prev_row + xsize; + uint32_t* next_row = curr_row + xsize; + memcpy(curr_row, argb_src, xsize * sizeof(argb_src[0])); + memcpy(next_row, argb_src + stride, xsize * sizeof(argb_src[0])); + + for (y = 0; y < ysize; ++y, argb_src += stride, argb_dst += xsize) { + if (y == 0 || y == ysize - 1) { + memcpy(argb_dst, argb_src, xsize * sizeof(argb_src[0])); + } else { + memcpy(next_row, argb_src + stride, xsize * sizeof(argb_src[0])); + argb_dst[0] = argb_src[0]; + argb_dst[xsize - 1] = argb_src[xsize - 1]; + for (x = 1; x < xsize - 1; ++x) { + if (IsSmooth(prev_row, curr_row, next_row, x, limit)) { + argb_dst[x] = curr_row[x]; + } else { + argb_dst[x] = ClosestDiscretizedArgb(curr_row[x], limit_bits); + } + } + } + { + // Three-way swap. + uint32_t* const temp = prev_row; + prev_row = curr_row; + curr_row = next_row; + next_row = temp; + } + } +} + +int VP8ApplyNearLossless(const WebPPicture* const picture, int quality, + uint32_t* const argb_dst) { + int i; + const int xsize = picture->width; + const int ysize = picture->height; + const int stride = picture->argb_stride; + uint32_t* const copy_buffer = + (uint32_t*)WebPSafeMalloc(xsize * 3, sizeof(*copy_buffer)); + const int limit_bits = VP8LNearLosslessBits(quality); + assert(argb_dst != NULL); + assert(limit_bits > 0); + assert(limit_bits <= MAX_LIMIT_BITS); + if (copy_buffer == NULL) { + return 0; + } + // For small icon images, don't attempt to apply near-lossless compression. + if ((xsize < MIN_DIM_FOR_NEAR_LOSSLESS && + ysize < MIN_DIM_FOR_NEAR_LOSSLESS) || + ysize < 3) { + for (i = 0; i < ysize; ++i) { + memcpy(argb_dst + i * xsize, picture->argb + i * picture->argb_stride, + xsize * sizeof(*argb_dst)); + } + WebPSafeFree(copy_buffer); + return 1; + } + + NearLossless(xsize, ysize, picture->argb, stride, limit_bits, copy_buffer, + argb_dst); + for (i = limit_bits - 1; i != 0; --i) { + NearLossless(xsize, ysize, argb_dst, xsize, i, copy_buffer, argb_dst); + } + WebPSafeFree(copy_buffer); + return 1; +} +#else // (WEBP_NEAR_LOSSLESS == 1) + +// Define a stub to suppress compiler warnings. +extern void VP8LNearLosslessStub(void); +void VP8LNearLosslessStub(void) {} + +#endif // (WEBP_NEAR_LOSSLESS == 1) diff --git a/vendor/github.com/sizeofint/webpanimation/enc_picture_csp_enc.c b/vendor/github.com/sizeofint/webpanimation/enc_picture_csp_enc.c new file mode 100644 index 00000000..b88930c8 --- /dev/null +++ b/vendor/github.com/sizeofint/webpanimation/enc_picture_csp_enc.c @@ -0,0 +1,1210 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the COPYING file in the root of the source +// tree. An additional intellectual property rights grant can be found +// in the file PATENTS. All contributing project authors may +// be found in the AUTHORS file in the root of the source tree. +// ----------------------------------------------------------------------------- +// +// WebPPicture utils for colorspace conversion +// +// Author: Skal (pascal.massimino@gmail.com) + +#include +#include +#include + +#include "enc_vp8i_enc.h" +#include "utils_random_utils.h" +#include "utils_utils.h" +#include "dsp_dsp.h" +#include "dsp_lossless.h" +#include "dsp_yuv.h" + +// Uncomment to disable gamma-compression during RGB->U/V averaging +#define USE_GAMMA_COMPRESSION + +// If defined, use table to compute x / alpha. +#define USE_INVERSE_ALPHA_TABLE + +#ifdef WORDS_BIGENDIAN +// uint32_t 0xff000000 is 0xff,00,00,00 in memory +#define CHANNEL_OFFSET(i) (i) +#else +// uint32_t 0xff000000 is 0x00,00,00,ff in memory +#define CHANNEL_OFFSET(i) (3-(i)) +#endif + +#define ALPHA_OFFSET CHANNEL_OFFSET(0) + +//------------------------------------------------------------------------------ +// Detection of non-trivial transparency + +// Returns true if alpha[] has non-0xff values. +static int CheckNonOpaque(const uint8_t* alpha, int width, int height, + int x_step, int y_step) { + if (alpha == NULL) return 0; + WebPInitAlphaProcessing(); + if (x_step == 1) { + for (; height-- > 0; alpha += y_step) { + if (WebPHasAlpha8b(alpha, width)) return 1; + } + } else { + for (; height-- > 0; alpha += y_step) { + if (WebPHasAlpha32b(alpha, width)) return 1; + } + } + return 0; +} + +// Checking for the presence of non-opaque alpha. +int WebPPictureHasTransparency(const WebPPicture* picture) { + if (picture == NULL) return 0; + if (picture->use_argb) { + const int alpha_offset = ALPHA_OFFSET; + return CheckNonOpaque((const uint8_t*)picture->argb + alpha_offset, + picture->width, picture->height, + 4, picture->argb_stride * sizeof(*picture->argb)); + } + return CheckNonOpaque(picture->a, picture->width, picture->height, + 1, picture->a_stride); +} + +//------------------------------------------------------------------------------ +// Code for gamma correction + +#if defined(USE_GAMMA_COMPRESSION) + +// gamma-compensates loss of resolution during chroma subsampling +#define kGamma 0.80 // for now we use a different gamma value than kGammaF +#define kGammaFix 12 // fixed-point precision for linear values +#define kGammaScale ((1 << kGammaFix) - 1) +#define kGammaTabFix 7 // fixed-point fractional bits precision +#define kGammaTabScale (1 << kGammaTabFix) +#define kGammaTabRounder (kGammaTabScale >> 1) +#define kGammaTabSize (1 << (kGammaFix - kGammaTabFix)) + +static int kLinearToGammaTab[kGammaTabSize + 1]; +static uint16_t kGammaToLinearTab[256]; +static volatile int kGammaTablesOk = 0; +static void InitGammaTables(void); + +WEBP_DSP_INIT_FUNC(InitGammaTables) { + if (!kGammaTablesOk) { + int v; + const double scale = (double)(1 << kGammaTabFix) / kGammaScale; + const double norm = 1. / 255.; + for (v = 0; v <= 255; ++v) { + kGammaToLinearTab[v] = + (uint16_t)(pow(norm * v, kGamma) * kGammaScale + .5); + } + for (v = 0; v <= kGammaTabSize; ++v) { + kLinearToGammaTab[v] = (int)(255. * pow(scale * v, 1. / kGamma) + .5); + } + kGammaTablesOk = 1; + } +} + +static WEBP_INLINE uint32_t GammaToLinear(uint8_t v) { + return kGammaToLinearTab[v]; +} + +static WEBP_INLINE int Interpolate(int v) { + const int tab_pos = v >> (kGammaTabFix + 2); // integer part + const int x = v & ((kGammaTabScale << 2) - 1); // fractional part + const int v0 = kLinearToGammaTab[tab_pos]; + const int v1 = kLinearToGammaTab[tab_pos + 1]; + const int y = v1 * x + v0 * ((kGammaTabScale << 2) - x); // interpolate + assert(tab_pos + 1 < kGammaTabSize + 1); + return y; +} + +// Convert a linear value 'v' to YUV_FIX+2 fixed-point precision +// U/V value, suitable for RGBToU/V calls. +static WEBP_INLINE int LinearToGamma(uint32_t base_value, int shift) { + const int y = Interpolate(base_value << shift); // final uplifted value + return (y + kGammaTabRounder) >> kGammaTabFix; // descale +} + +#else + +static void InitGammaTables(void) {} +static WEBP_INLINE uint32_t GammaToLinear(uint8_t v) { return v; } +static WEBP_INLINE int LinearToGamma(uint32_t base_value, int shift) { + return (int)(base_value << shift); +} + +#endif // USE_GAMMA_COMPRESSION + +//------------------------------------------------------------------------------ +// RGB -> YUV conversion + +static int RGBToY(int r, int g, int b, VP8Random* const rg) { + return (rg == NULL) ? VP8RGBToY(r, g, b, YUV_HALF) + : VP8RGBToY(r, g, b, VP8RandomBits(rg, YUV_FIX)); +} + +static int RGBToU(int r, int g, int b, VP8Random* const rg) { + return (rg == NULL) ? VP8RGBToU(r, g, b, YUV_HALF << 2) + : VP8RGBToU(r, g, b, VP8RandomBits(rg, YUV_FIX + 2)); +} + +static int RGBToV(int r, int g, int b, VP8Random* const rg) { + return (rg == NULL) ? VP8RGBToV(r, g, b, YUV_HALF << 2) + : VP8RGBToV(r, g, b, VP8RandomBits(rg, YUV_FIX + 2)); +} + +//------------------------------------------------------------------------------ +// Sharp RGB->YUV conversion + +static const int kNumIterations = 4; +static const int kMinDimensionIterativeConversion = 4; + +// We could use SFIX=0 and only uint8_t for fixed_y_t, but it produces some +// banding sometimes. Better use extra precision. +#define SFIX 2 // fixed-point precision of RGB and Y/W +typedef int16_t fixed_t; // signed type with extra SFIX precision for UV +typedef uint16_t fixed_y_t; // unsigned type with extra SFIX precision for W + +#define SHALF (1 << SFIX >> 1) +#define MAX_Y_T ((256 << SFIX) - 1) +#define SROUNDER (1 << (YUV_FIX + SFIX - 1)) + +#if defined(USE_GAMMA_COMPRESSION) + +// We use tables of different size and precision for the Rec709 / BT2020 +// transfer function. +#define kGammaF (1./0.45) +static uint32_t kLinearToGammaTabS[kGammaTabSize + 2]; +#define GAMMA_TO_LINEAR_BITS 14 +static uint32_t kGammaToLinearTabS[MAX_Y_T + 1]; // size scales with Y_FIX +static volatile int kGammaTablesSOk = 0; +static void InitGammaTablesS(void); + +WEBP_DSP_INIT_FUNC(InitGammaTablesS) { + assert(2 * GAMMA_TO_LINEAR_BITS < 32); // we use uint32_t intermediate values + if (!kGammaTablesSOk) { + int v; + const double norm = 1. / MAX_Y_T; + const double scale = 1. / kGammaTabSize; + const double a = 0.09929682680944; + const double thresh = 0.018053968510807; + const double final_scale = 1 << GAMMA_TO_LINEAR_BITS; + for (v = 0; v <= MAX_Y_T; ++v) { + const double g = norm * v; + double value; + if (g <= thresh * 4.5) { + value = g / 4.5; + } else { + const double a_rec = 1. / (1. + a); + value = pow(a_rec * (g + a), kGammaF); + } + kGammaToLinearTabS[v] = (uint32_t)(value * final_scale + .5); + } + for (v = 0; v <= kGammaTabSize; ++v) { + const double g = scale * v; + double value; + if (g <= thresh) { + value = 4.5 * g; + } else { + value = (1. + a) * pow(g, 1. / kGammaF) - a; + } + // we already incorporate the 1/2 rounding constant here + kLinearToGammaTabS[v] = + (uint32_t)(MAX_Y_T * value) + (1 << GAMMA_TO_LINEAR_BITS >> 1); + } + // to prevent small rounding errors to cause read-overflow: + kLinearToGammaTabS[kGammaTabSize + 1] = kLinearToGammaTabS[kGammaTabSize]; + kGammaTablesSOk = 1; + } +} + +// return value has a fixed-point precision of GAMMA_TO_LINEAR_BITS +static WEBP_INLINE uint32_t GammaToLinearS(int v) { + return kGammaToLinearTabS[v]; +} + +static WEBP_INLINE uint32_t LinearToGammaS(uint32_t value) { + // 'value' is in GAMMA_TO_LINEAR_BITS fractional precision + const uint32_t v = value * kGammaTabSize; + const uint32_t tab_pos = v >> GAMMA_TO_LINEAR_BITS; + // fractional part, in GAMMA_TO_LINEAR_BITS fixed-point precision + const uint32_t x = v - (tab_pos << GAMMA_TO_LINEAR_BITS); // fractional part + // v0 / v1 are in GAMMA_TO_LINEAR_BITS fixed-point precision (range [0..1]) + const uint32_t v0 = kLinearToGammaTabS[tab_pos + 0]; + const uint32_t v1 = kLinearToGammaTabS[tab_pos + 1]; + // Final interpolation. Note that rounding is already included. + const uint32_t v2 = (v1 - v0) * x; // note: v1 >= v0. + const uint32_t result = v0 + (v2 >> GAMMA_TO_LINEAR_BITS); + return result; +} + +#else + +static void InitGammaTablesS(void) {} +static WEBP_INLINE uint32_t GammaToLinearS(int v) { + return (v << GAMMA_TO_LINEAR_BITS) / MAX_Y_T; +} +static WEBP_INLINE uint32_t LinearToGammaS(uint32_t value) { + return (MAX_Y_T * value) >> GAMMA_TO_LINEAR_BITS; +} + +#endif // USE_GAMMA_COMPRESSION + +//------------------------------------------------------------------------------ + +static uint8_t clip_8b(fixed_t v) { + return (!(v & ~0xff)) ? (uint8_t)v : (v < 0) ? 0u : 255u; +} + +static fixed_y_t clip_y(int y) { + return (!(y & ~MAX_Y_T)) ? (fixed_y_t)y : (y < 0) ? 0 : MAX_Y_T; +} + +//------------------------------------------------------------------------------ + +static int RGBToGray(int r, int g, int b) { + const int luma = 13933 * r + 46871 * g + 4732 * b + YUV_HALF; + return (luma >> YUV_FIX); +} + +static uint32_t ScaleDown(int a, int b, int c, int d) { + const uint32_t A = GammaToLinearS(a); + const uint32_t B = GammaToLinearS(b); + const uint32_t C = GammaToLinearS(c); + const uint32_t D = GammaToLinearS(d); + return LinearToGammaS((A + B + C + D + 2) >> 2); +} + +static WEBP_INLINE void UpdateW(const fixed_y_t* src, fixed_y_t* dst, int w) { + int i; + for (i = 0; i < w; ++i) { + const uint32_t R = GammaToLinearS(src[0 * w + i]); + const uint32_t G = GammaToLinearS(src[1 * w + i]); + const uint32_t B = GammaToLinearS(src[2 * w + i]); + const uint32_t Y = RGBToGray(R, G, B); + dst[i] = (fixed_y_t)LinearToGammaS(Y); + } +} + +static void UpdateChroma(const fixed_y_t* src1, const fixed_y_t* src2, + fixed_t* dst, int uv_w) { + int i; + for (i = 0; i < uv_w; ++i) { + const int r = ScaleDown(src1[0 * uv_w + 0], src1[0 * uv_w + 1], + src2[0 * uv_w + 0], src2[0 * uv_w + 1]); + const int g = ScaleDown(src1[2 * uv_w + 0], src1[2 * uv_w + 1], + src2[2 * uv_w + 0], src2[2 * uv_w + 1]); + const int b = ScaleDown(src1[4 * uv_w + 0], src1[4 * uv_w + 1], + src2[4 * uv_w + 0], src2[4 * uv_w + 1]); + const int W = RGBToGray(r, g, b); + dst[0 * uv_w] = (fixed_t)(r - W); + dst[1 * uv_w] = (fixed_t)(g - W); + dst[2 * uv_w] = (fixed_t)(b - W); + dst += 1; + src1 += 2; + src2 += 2; + } +} + +static void StoreGray(const fixed_y_t* rgb, fixed_y_t* y, int w) { + int i; + for (i = 0; i < w; ++i) { + y[i] = RGBToGray(rgb[0 * w + i], rgb[1 * w + i], rgb[2 * w + i]); + } +} + +//------------------------------------------------------------------------------ + +static WEBP_INLINE fixed_y_t Filter2(int A, int B, int W0) { + const int v0 = (A * 3 + B + 2) >> 2; + return clip_y(v0 + W0); +} + +//------------------------------------------------------------------------------ + +static WEBP_INLINE fixed_y_t UpLift(uint8_t a) { // 8bit -> SFIX + return ((fixed_y_t)a << SFIX) | SHALF; +} + +static void ImportOneRow(const uint8_t* const r_ptr, + const uint8_t* const g_ptr, + const uint8_t* const b_ptr, + int step, + int pic_width, + fixed_y_t* const dst) { + int i; + const int w = (pic_width + 1) & ~1; + for (i = 0; i < pic_width; ++i) { + const int off = i * step; + dst[i + 0 * w] = UpLift(r_ptr[off]); + dst[i + 1 * w] = UpLift(g_ptr[off]); + dst[i + 2 * w] = UpLift(b_ptr[off]); + } + if (pic_width & 1) { // replicate rightmost pixel + dst[pic_width + 0 * w] = dst[pic_width + 0 * w - 1]; + dst[pic_width + 1 * w] = dst[pic_width + 1 * w - 1]; + dst[pic_width + 2 * w] = dst[pic_width + 2 * w - 1]; + } +} + +static void InterpolateTwoRows(const fixed_y_t* const best_y, + const fixed_t* prev_uv, + const fixed_t* cur_uv, + const fixed_t* next_uv, + int w, + fixed_y_t* out1, + fixed_y_t* out2) { + const int uv_w = w >> 1; + const int len = (w - 1) >> 1; // length to filter + int k = 3; + while (k-- > 0) { // process each R/G/B segments in turn + // special boundary case for i==0 + out1[0] = Filter2(cur_uv[0], prev_uv[0], best_y[0]); + out2[0] = Filter2(cur_uv[0], next_uv[0], best_y[w]); + + WebPSharpYUVFilterRow(cur_uv, prev_uv, len, best_y + 0 + 1, out1 + 1); + WebPSharpYUVFilterRow(cur_uv, next_uv, len, best_y + w + 1, out2 + 1); + + // special boundary case for i == w - 1 when w is even + if (!(w & 1)) { + out1[w - 1] = Filter2(cur_uv[uv_w - 1], prev_uv[uv_w - 1], + best_y[w - 1 + 0]); + out2[w - 1] = Filter2(cur_uv[uv_w - 1], next_uv[uv_w - 1], + best_y[w - 1 + w]); + } + out1 += w; + out2 += w; + prev_uv += uv_w; + cur_uv += uv_w; + next_uv += uv_w; + } +} + +static WEBP_INLINE uint8_t ConvertRGBToY(int r, int g, int b) { + const int luma = 16839 * r + 33059 * g + 6420 * b + SROUNDER; + return clip_8b(16 + (luma >> (YUV_FIX + SFIX))); +} + +static WEBP_INLINE uint8_t ConvertRGBToU(int r, int g, int b) { + const int u = -9719 * r - 19081 * g + 28800 * b + SROUNDER; + return clip_8b(128 + (u >> (YUV_FIX + SFIX))); +} + +static WEBP_INLINE uint8_t ConvertRGBToV(int r, int g, int b) { + const int v = +28800 * r - 24116 * g - 4684 * b + SROUNDER; + return clip_8b(128 + (v >> (YUV_FIX + SFIX))); +} + +static int ConvertWRGBToYUV(const fixed_y_t* best_y, const fixed_t* best_uv, + WebPPicture* const picture) { + int i, j; + uint8_t* dst_y = picture->y; + uint8_t* dst_u = picture->u; + uint8_t* dst_v = picture->v; + const fixed_t* const best_uv_base = best_uv; + const int w = (picture->width + 1) & ~1; + const int h = (picture->height + 1) & ~1; + const int uv_w = w >> 1; + const int uv_h = h >> 1; + for (best_uv = best_uv_base, j = 0; j < picture->height; ++j) { + for (i = 0; i < picture->width; ++i) { + const int off = (i >> 1); + const int W = best_y[i]; + const int r = best_uv[off + 0 * uv_w] + W; + const int g = best_uv[off + 1 * uv_w] + W; + const int b = best_uv[off + 2 * uv_w] + W; + dst_y[i] = ConvertRGBToY(r, g, b); + } + best_y += w; + best_uv += (j & 1) * 3 * uv_w; + dst_y += picture->y_stride; + } + for (best_uv = best_uv_base, j = 0; j < uv_h; ++j) { + for (i = 0; i < uv_w; ++i) { + const int off = i; + const int r = best_uv[off + 0 * uv_w]; + const int g = best_uv[off + 1 * uv_w]; + const int b = best_uv[off + 2 * uv_w]; + dst_u[i] = ConvertRGBToU(r, g, b); + dst_v[i] = ConvertRGBToV(r, g, b); + } + best_uv += 3 * uv_w; + dst_u += picture->uv_stride; + dst_v += picture->uv_stride; + } + return 1; +} + +//------------------------------------------------------------------------------ +// Main function + +#define SAFE_ALLOC(W, H, T) ((T*)WebPSafeMalloc((W) * (H), sizeof(T))) + +static int PreprocessARGB(const uint8_t* r_ptr, + const uint8_t* g_ptr, + const uint8_t* b_ptr, + int step, int rgb_stride, + WebPPicture* const picture) { + // we expand the right/bottom border if needed + const int w = (picture->width + 1) & ~1; + const int h = (picture->height + 1) & ~1; + const int uv_w = w >> 1; + const int uv_h = h >> 1; + uint64_t prev_diff_y_sum = ~0; + int j, iter; + + // TODO(skal): allocate one big memory chunk. But for now, it's easier + // for valgrind debugging to have several chunks. + fixed_y_t* const tmp_buffer = SAFE_ALLOC(w * 3, 2, fixed_y_t); // scratch + fixed_y_t* const best_y_base = SAFE_ALLOC(w, h, fixed_y_t); + fixed_y_t* const target_y_base = SAFE_ALLOC(w, h, fixed_y_t); + fixed_y_t* const best_rgb_y = SAFE_ALLOC(w, 2, fixed_y_t); + fixed_t* const best_uv_base = SAFE_ALLOC(uv_w * 3, uv_h, fixed_t); + fixed_t* const target_uv_base = SAFE_ALLOC(uv_w * 3, uv_h, fixed_t); + fixed_t* const best_rgb_uv = SAFE_ALLOC(uv_w * 3, 1, fixed_t); + fixed_y_t* best_y = best_y_base; + fixed_y_t* target_y = target_y_base; + fixed_t* best_uv = best_uv_base; + fixed_t* target_uv = target_uv_base; + const uint64_t diff_y_threshold = (uint64_t)(3.0 * w * h); + int ok; + + if (best_y_base == NULL || best_uv_base == NULL || + target_y_base == NULL || target_uv_base == NULL || + best_rgb_y == NULL || best_rgb_uv == NULL || + tmp_buffer == NULL) { + ok = WebPEncodingSetError(picture, VP8_ENC_ERROR_OUT_OF_MEMORY); + goto End; + } + assert(picture->width >= kMinDimensionIterativeConversion); + assert(picture->height >= kMinDimensionIterativeConversion); + + WebPInitConvertARGBToYUV(); + + // Import RGB samples to W/RGB representation. + for (j = 0; j < picture->height; j += 2) { + const int is_last_row = (j == picture->height - 1); + fixed_y_t* const src1 = tmp_buffer + 0 * w; + fixed_y_t* const src2 = tmp_buffer + 3 * w; + + // prepare two rows of input + ImportOneRow(r_ptr, g_ptr, b_ptr, step, picture->width, src1); + if (!is_last_row) { + ImportOneRow(r_ptr + rgb_stride, g_ptr + rgb_stride, b_ptr + rgb_stride, + step, picture->width, src2); + } else { + memcpy(src2, src1, 3 * w * sizeof(*src2)); + } + StoreGray(src1, best_y + 0, w); + StoreGray(src2, best_y + w, w); + + UpdateW(src1, target_y, w); + UpdateW(src2, target_y + w, w); + UpdateChroma(src1, src2, target_uv, uv_w); + memcpy(best_uv, target_uv, 3 * uv_w * sizeof(*best_uv)); + best_y += 2 * w; + best_uv += 3 * uv_w; + target_y += 2 * w; + target_uv += 3 * uv_w; + r_ptr += 2 * rgb_stride; + g_ptr += 2 * rgb_stride; + b_ptr += 2 * rgb_stride; + } + + // Iterate and resolve clipping conflicts. + for (iter = 0; iter < kNumIterations; ++iter) { + const fixed_t* cur_uv = best_uv_base; + const fixed_t* prev_uv = best_uv_base; + uint64_t diff_y_sum = 0; + + best_y = best_y_base; + best_uv = best_uv_base; + target_y = target_y_base; + target_uv = target_uv_base; + for (j = 0; j < h; j += 2) { + fixed_y_t* const src1 = tmp_buffer + 0 * w; + fixed_y_t* const src2 = tmp_buffer + 3 * w; + { + const fixed_t* const next_uv = cur_uv + ((j < h - 2) ? 3 * uv_w : 0); + InterpolateTwoRows(best_y, prev_uv, cur_uv, next_uv, w, src1, src2); + prev_uv = cur_uv; + cur_uv = next_uv; + } + + UpdateW(src1, best_rgb_y + 0 * w, w); + UpdateW(src2, best_rgb_y + 1 * w, w); + UpdateChroma(src1, src2, best_rgb_uv, uv_w); + + // update two rows of Y and one row of RGB + diff_y_sum += WebPSharpYUVUpdateY(target_y, best_rgb_y, best_y, 2 * w); + WebPSharpYUVUpdateRGB(target_uv, best_rgb_uv, best_uv, 3 * uv_w); + + best_y += 2 * w; + best_uv += 3 * uv_w; + target_y += 2 * w; + target_uv += 3 * uv_w; + } + // test exit condition + if (iter > 0) { + if (diff_y_sum < diff_y_threshold) break; + if (diff_y_sum > prev_diff_y_sum) break; + } + prev_diff_y_sum = diff_y_sum; + } + // final reconstruction + ok = ConvertWRGBToYUV(best_y_base, best_uv_base, picture); + + End: + WebPSafeFree(best_y_base); + WebPSafeFree(best_uv_base); + WebPSafeFree(target_y_base); + WebPSafeFree(target_uv_base); + WebPSafeFree(best_rgb_y); + WebPSafeFree(best_rgb_uv); + WebPSafeFree(tmp_buffer); + return ok; +} +#undef SAFE_ALLOC + +//------------------------------------------------------------------------------ +// "Fast" regular RGB->YUV + +#define SUM4(ptr, step) LinearToGamma( \ + GammaToLinear((ptr)[0]) + \ + GammaToLinear((ptr)[(step)]) + \ + GammaToLinear((ptr)[rgb_stride]) + \ + GammaToLinear((ptr)[rgb_stride + (step)]), 0) \ + +#define SUM2(ptr) \ + LinearToGamma(GammaToLinear((ptr)[0]) + GammaToLinear((ptr)[rgb_stride]), 1) + +#define SUM2ALPHA(ptr) ((ptr)[0] + (ptr)[rgb_stride]) +#define SUM4ALPHA(ptr) (SUM2ALPHA(ptr) + SUM2ALPHA((ptr) + 4)) + +#if defined(USE_INVERSE_ALPHA_TABLE) + +static const int kAlphaFix = 19; +// Following table is (1 << kAlphaFix) / a. The (v * kInvAlpha[a]) >> kAlphaFix +// formula is then equal to v / a in most (99.6%) cases. Note that this table +// and constant are adjusted very tightly to fit 32b arithmetic. +// In particular, they use the fact that the operands for 'v / a' are actually +// derived as v = (a0.p0 + a1.p1 + a2.p2 + a3.p3) and a = a0 + a1 + a2 + a3 +// with ai in [0..255] and pi in [0..1<> (kAlphaFix - 2)) + +#else + +#define DIVIDE_BY_ALPHA(sum, a) (4 * (sum) / (a)) + +#endif // USE_INVERSE_ALPHA_TABLE + +static WEBP_INLINE int LinearToGammaWeighted(const uint8_t* src, + const uint8_t* a_ptr, + uint32_t total_a, int step, + int rgb_stride) { + const uint32_t sum = + a_ptr[0] * GammaToLinear(src[0]) + + a_ptr[step] * GammaToLinear(src[step]) + + a_ptr[rgb_stride] * GammaToLinear(src[rgb_stride]) + + a_ptr[rgb_stride + step] * GammaToLinear(src[rgb_stride + step]); + assert(total_a > 0 && total_a <= 4 * 0xff); +#if defined(USE_INVERSE_ALPHA_TABLE) + assert((uint64_t)sum * kInvAlpha[total_a] < ((uint64_t)1 << 32)); +#endif + return LinearToGamma(DIVIDE_BY_ALPHA(sum, total_a), 0); +} + +static WEBP_INLINE void ConvertRowToY(const uint8_t* const r_ptr, + const uint8_t* const g_ptr, + const uint8_t* const b_ptr, + int step, + uint8_t* const dst_y, + int width, + VP8Random* const rg) { + int i, j; + for (i = 0, j = 0; i < width; i += 1, j += step) { + dst_y[i] = RGBToY(r_ptr[j], g_ptr[j], b_ptr[j], rg); + } +} + +static WEBP_INLINE void AccumulateRGBA(const uint8_t* const r_ptr, + const uint8_t* const g_ptr, + const uint8_t* const b_ptr, + const uint8_t* const a_ptr, + int rgb_stride, + uint16_t* dst, int width) { + int i, j; + // we loop over 2x2 blocks and produce one R/G/B/A value for each. + for (i = 0, j = 0; i < (width >> 1); i += 1, j += 2 * 4, dst += 4) { + const uint32_t a = SUM4ALPHA(a_ptr + j); + int r, g, b; + if (a == 4 * 0xff || a == 0) { + r = SUM4(r_ptr + j, 4); + g = SUM4(g_ptr + j, 4); + b = SUM4(b_ptr + j, 4); + } else { + r = LinearToGammaWeighted(r_ptr + j, a_ptr + j, a, 4, rgb_stride); + g = LinearToGammaWeighted(g_ptr + j, a_ptr + j, a, 4, rgb_stride); + b = LinearToGammaWeighted(b_ptr + j, a_ptr + j, a, 4, rgb_stride); + } + dst[0] = r; + dst[1] = g; + dst[2] = b; + dst[3] = a; + } + if (width & 1) { + const uint32_t a = 2u * SUM2ALPHA(a_ptr + j); + int r, g, b; + if (a == 4 * 0xff || a == 0) { + r = SUM2(r_ptr + j); + g = SUM2(g_ptr + j); + b = SUM2(b_ptr + j); + } else { + r = LinearToGammaWeighted(r_ptr + j, a_ptr + j, a, 0, rgb_stride); + g = LinearToGammaWeighted(g_ptr + j, a_ptr + j, a, 0, rgb_stride); + b = LinearToGammaWeighted(b_ptr + j, a_ptr + j, a, 0, rgb_stride); + } + dst[0] = r; + dst[1] = g; + dst[2] = b; + dst[3] = a; + } +} + +static WEBP_INLINE void AccumulateRGB(const uint8_t* const r_ptr, + const uint8_t* const g_ptr, + const uint8_t* const b_ptr, + int step, int rgb_stride, + uint16_t* dst, int width) { + int i, j; + for (i = 0, j = 0; i < (width >> 1); i += 1, j += 2 * step, dst += 4) { + dst[0] = SUM4(r_ptr + j, step); + dst[1] = SUM4(g_ptr + j, step); + dst[2] = SUM4(b_ptr + j, step); + } + if (width & 1) { + dst[0] = SUM2(r_ptr + j); + dst[1] = SUM2(g_ptr + j); + dst[2] = SUM2(b_ptr + j); + } +} + +static WEBP_INLINE void ConvertRowsToUV(const uint16_t* rgb, + uint8_t* const dst_u, + uint8_t* const dst_v, + int width, + VP8Random* const rg) { + int i; + for (i = 0; i < width; i += 1, rgb += 4) { + const int r = rgb[0], g = rgb[1], b = rgb[2]; + dst_u[i] = RGBToU(r, g, b, rg); + dst_v[i] = RGBToV(r, g, b, rg); + } +} + +static int ImportYUVAFromRGBA(const uint8_t* r_ptr, + const uint8_t* g_ptr, + const uint8_t* b_ptr, + const uint8_t* a_ptr, + int step, // bytes per pixel + int rgb_stride, // bytes per scanline + float dithering, + int use_iterative_conversion, + WebPPicture* const picture) { + int y; + const int width = picture->width; + const int height = picture->height; + const int has_alpha = CheckNonOpaque(a_ptr, width, height, step, rgb_stride); + const int is_rgb = (r_ptr < b_ptr); // otherwise it's bgr + + picture->colorspace = has_alpha ? WEBP_YUV420A : WEBP_YUV420; + picture->use_argb = 0; + + // disable smart conversion if source is too small (overkill). + if (width < kMinDimensionIterativeConversion || + height < kMinDimensionIterativeConversion) { + use_iterative_conversion = 0; + } + + if (!WebPPictureAllocYUVA(picture, width, height)) { + return 0; + } + if (has_alpha) { + assert(step == 4); +#if defined(USE_GAMMA_COMPRESSION) && defined(USE_INVERSE_ALPHA_TABLE) + assert(kAlphaFix + kGammaFix <= 31); +#endif + } + + if (use_iterative_conversion) { + InitGammaTablesS(); + if (!PreprocessARGB(r_ptr, g_ptr, b_ptr, step, rgb_stride, picture)) { + return 0; + } + if (has_alpha) { + WebPExtractAlpha(a_ptr, rgb_stride, width, height, + picture->a, picture->a_stride); + } + } else { + const int uv_width = (width + 1) >> 1; + int use_dsp = (step == 3); // use special function in this case + // temporary storage for accumulated R/G/B values during conversion to U/V + uint16_t* const tmp_rgb = + (uint16_t*)WebPSafeMalloc(4 * uv_width, sizeof(*tmp_rgb)); + uint8_t* dst_y = picture->y; + uint8_t* dst_u = picture->u; + uint8_t* dst_v = picture->v; + uint8_t* dst_a = picture->a; + + VP8Random base_rg; + VP8Random* rg = NULL; + if (dithering > 0.) { + VP8InitRandom(&base_rg, dithering); + rg = &base_rg; + use_dsp = 0; // can't use dsp in this case + } + WebPInitConvertARGBToYUV(); + InitGammaTables(); + + if (tmp_rgb == NULL) return 0; // malloc error + + // Downsample Y/U/V planes, two rows at a time + for (y = 0; y < (height >> 1); ++y) { + int rows_have_alpha = has_alpha; + if (use_dsp) { + if (is_rgb) { + WebPConvertRGB24ToY(r_ptr, dst_y, width); + WebPConvertRGB24ToY(r_ptr + rgb_stride, + dst_y + picture->y_stride, width); + } else { + WebPConvertBGR24ToY(b_ptr, dst_y, width); + WebPConvertBGR24ToY(b_ptr + rgb_stride, + dst_y + picture->y_stride, width); + } + } else { + ConvertRowToY(r_ptr, g_ptr, b_ptr, step, dst_y, width, rg); + ConvertRowToY(r_ptr + rgb_stride, + g_ptr + rgb_stride, + b_ptr + rgb_stride, step, + dst_y + picture->y_stride, width, rg); + } + dst_y += 2 * picture->y_stride; + if (has_alpha) { + rows_have_alpha &= !WebPExtractAlpha(a_ptr, rgb_stride, width, 2, + dst_a, picture->a_stride); + dst_a += 2 * picture->a_stride; + } + // Collect averaged R/G/B(/A) + if (!rows_have_alpha) { + AccumulateRGB(r_ptr, g_ptr, b_ptr, step, rgb_stride, tmp_rgb, width); + } else { + AccumulateRGBA(r_ptr, g_ptr, b_ptr, a_ptr, rgb_stride, tmp_rgb, width); + } + // Convert to U/V + if (rg == NULL) { + WebPConvertRGBA32ToUV(tmp_rgb, dst_u, dst_v, uv_width); + } else { + ConvertRowsToUV(tmp_rgb, dst_u, dst_v, uv_width, rg); + } + dst_u += picture->uv_stride; + dst_v += picture->uv_stride; + r_ptr += 2 * rgb_stride; + b_ptr += 2 * rgb_stride; + g_ptr += 2 * rgb_stride; + if (has_alpha) a_ptr += 2 * rgb_stride; + } + if (height & 1) { // extra last row + int row_has_alpha = has_alpha; + if (use_dsp) { + if (r_ptr < b_ptr) { + WebPConvertRGB24ToY(r_ptr, dst_y, width); + } else { + WebPConvertBGR24ToY(b_ptr, dst_y, width); + } + } else { + ConvertRowToY(r_ptr, g_ptr, b_ptr, step, dst_y, width, rg); + } + if (row_has_alpha) { + row_has_alpha &= !WebPExtractAlpha(a_ptr, 0, width, 1, dst_a, 0); + } + // Collect averaged R/G/B(/A) + if (!row_has_alpha) { + // Collect averaged R/G/B + AccumulateRGB(r_ptr, g_ptr, b_ptr, step, /* rgb_stride = */ 0, + tmp_rgb, width); + } else { + AccumulateRGBA(r_ptr, g_ptr, b_ptr, a_ptr, /* rgb_stride = */ 0, + tmp_rgb, width); + } + if (rg == NULL) { + WebPConvertRGBA32ToUV(tmp_rgb, dst_u, dst_v, uv_width); + } else { + ConvertRowsToUV(tmp_rgb, dst_u, dst_v, uv_width, rg); + } + } + WebPSafeFree(tmp_rgb); + } + return 1; +} + +#undef SUM4 +#undef SUM2 +#undef SUM4ALPHA +#undef SUM2ALPHA + +//------------------------------------------------------------------------------ +// call for ARGB->YUVA conversion + +static int PictureARGBToYUVA(WebPPicture* picture, WebPEncCSP colorspace, + float dithering, int use_iterative_conversion) { + if (picture == NULL) return 0; + if (picture->argb == NULL) { + return WebPEncodingSetError(picture, VP8_ENC_ERROR_NULL_PARAMETER); + } else if ((colorspace & WEBP_CSP_UV_MASK) != WEBP_YUV420) { + return WebPEncodingSetError(picture, VP8_ENC_ERROR_INVALID_CONFIGURATION); + } else { + const uint8_t* const argb = (const uint8_t*)picture->argb; + const uint8_t* const a = argb + CHANNEL_OFFSET(0); + const uint8_t* const r = argb + CHANNEL_OFFSET(1); + const uint8_t* const g = argb + CHANNEL_OFFSET(2); + const uint8_t* const b = argb + CHANNEL_OFFSET(3); + + picture->colorspace = WEBP_YUV420; + return ImportYUVAFromRGBA(r, g, b, a, 4, 4 * picture->argb_stride, + dithering, use_iterative_conversion, picture); + } +} + +int WebPPictureARGBToYUVADithered(WebPPicture* picture, WebPEncCSP colorspace, + float dithering) { + return PictureARGBToYUVA(picture, colorspace, dithering, 0); +} + +int WebPPictureARGBToYUVA(WebPPicture* picture, WebPEncCSP colorspace) { + return PictureARGBToYUVA(picture, colorspace, 0.f, 0); +} + +int WebPPictureSharpARGBToYUVA(WebPPicture* picture) { + return PictureARGBToYUVA(picture, WEBP_YUV420, 0.f, 1); +} +// for backward compatibility +int WebPPictureSmartARGBToYUVA(WebPPicture* picture) { + return WebPPictureSharpARGBToYUVA(picture); +} + +//------------------------------------------------------------------------------ +// call for YUVA -> ARGB conversion + +int WebPPictureYUVAToARGB(WebPPicture* picture) { + if (picture == NULL) return 0; + if (picture->y == NULL || picture->u == NULL || picture->v == NULL) { + return WebPEncodingSetError(picture, VP8_ENC_ERROR_NULL_PARAMETER); + } + if ((picture->colorspace & WEBP_CSP_ALPHA_BIT) && picture->a == NULL) { + return WebPEncodingSetError(picture, VP8_ENC_ERROR_NULL_PARAMETER); + } + if ((picture->colorspace & WEBP_CSP_UV_MASK) != WEBP_YUV420) { + return WebPEncodingSetError(picture, VP8_ENC_ERROR_INVALID_CONFIGURATION); + } + // Allocate a new argb buffer (discarding the previous one). + if (!WebPPictureAllocARGB(picture, picture->width, picture->height)) return 0; + picture->use_argb = 1; + + // Convert + { + int y; + const int width = picture->width; + const int height = picture->height; + const int argb_stride = 4 * picture->argb_stride; + uint8_t* dst = (uint8_t*)picture->argb; + const uint8_t* cur_u = picture->u, *cur_v = picture->v, *cur_y = picture->y; + WebPUpsampleLinePairFunc upsample = + WebPGetLinePairConverter(ALPHA_OFFSET > 0); + + // First row, with replicated top samples. + upsample(cur_y, NULL, cur_u, cur_v, cur_u, cur_v, dst, NULL, width); + cur_y += picture->y_stride; + dst += argb_stride; + // Center rows. + for (y = 1; y + 1 < height; y += 2) { + const uint8_t* const top_u = cur_u; + const uint8_t* const top_v = cur_v; + cur_u += picture->uv_stride; + cur_v += picture->uv_stride; + upsample(cur_y, cur_y + picture->y_stride, top_u, top_v, cur_u, cur_v, + dst, dst + argb_stride, width); + cur_y += 2 * picture->y_stride; + dst += 2 * argb_stride; + } + // Last row (if needed), with replicated bottom samples. + if (height > 1 && !(height & 1)) { + upsample(cur_y, NULL, cur_u, cur_v, cur_u, cur_v, dst, NULL, width); + } + // Insert alpha values if needed, in replacement for the default 0xff ones. + if (picture->colorspace & WEBP_CSP_ALPHA_BIT) { + for (y = 0; y < height; ++y) { + uint32_t* const argb_dst = picture->argb + y * picture->argb_stride; + const uint8_t* const src = picture->a + y * picture->a_stride; + int x; + for (x = 0; x < width; ++x) { + argb_dst[x] = (argb_dst[x] & 0x00ffffffu) | ((uint32_t)src[x] << 24); + } + } + } + } + return 1; +} + +//------------------------------------------------------------------------------ +// automatic import / conversion + +static int Import(WebPPicture* const picture, + const uint8_t* rgb, int rgb_stride, + int step, int swap_rb, int import_alpha) { + int y; + // swap_rb -> b,g,r,a , !swap_rb -> r,g,b,a + const uint8_t* r_ptr = rgb + (swap_rb ? 2 : 0); + const uint8_t* g_ptr = rgb + 1; + const uint8_t* b_ptr = rgb + (swap_rb ? 0 : 2); + const int width = picture->width; + const int height = picture->height; + + if (!picture->use_argb) { + const uint8_t* a_ptr = import_alpha ? rgb + 3 : NULL; + return ImportYUVAFromRGBA(r_ptr, g_ptr, b_ptr, a_ptr, step, rgb_stride, + 0.f /* no dithering */, 0, picture); + } + if (!WebPPictureAlloc(picture)) return 0; + + VP8LDspInit(); + WebPInitAlphaProcessing(); + + if (import_alpha) { + // dst[] byte order is {a,r,g,b} for big-endian, {b,g,r,a} for little endian + uint32_t* dst = picture->argb; + const int do_copy = (ALPHA_OFFSET == 3) && swap_rb; + assert(step == 4); + if (do_copy) { + for (y = 0; y < height; ++y) { + memcpy(dst, rgb, width * 4); + rgb += rgb_stride; + dst += picture->argb_stride; + } + } else { + for (y = 0; y < height; ++y) { +#ifdef WORDS_BIGENDIAN + // BGRA or RGBA input order. + const uint8_t* a_ptr = rgb + 3; + WebPPackARGB(a_ptr, r_ptr, g_ptr, b_ptr, width, dst); + r_ptr += rgb_stride; + g_ptr += rgb_stride; + b_ptr += rgb_stride; +#else + // RGBA input order. Need to swap R and B. + VP8LConvertBGRAToRGBA((const uint32_t*)rgb, width, (uint8_t*)dst); +#endif + rgb += rgb_stride; + dst += picture->argb_stride; + } + } + } else { + uint32_t* dst = picture->argb; + assert(step >= 3); + for (y = 0; y < height; ++y) { + WebPPackRGB(r_ptr, g_ptr, b_ptr, width, step, dst); + r_ptr += rgb_stride; + g_ptr += rgb_stride; + b_ptr += rgb_stride; + dst += picture->argb_stride; + } + } + return 1; +} + +// Public API + +#if !defined(WEBP_REDUCE_CSP) + +int WebPPictureImportBGR(WebPPicture* picture, + const uint8_t* rgb, int rgb_stride) { + return (picture != NULL && rgb != NULL) + ? Import(picture, rgb, rgb_stride, 3, 1, 0) + : 0; +} + +int WebPPictureImportBGRA(WebPPicture* picture, + const uint8_t* rgba, int rgba_stride) { + return (picture != NULL && rgba != NULL) + ? Import(picture, rgba, rgba_stride, 4, 1, 1) + : 0; +} + + +int WebPPictureImportBGRX(WebPPicture* picture, + const uint8_t* rgba, int rgba_stride) { + return (picture != NULL && rgba != NULL) + ? Import(picture, rgba, rgba_stride, 4, 1, 0) + : 0; +} + +#endif // WEBP_REDUCE_CSP + +int WebPPictureImportRGB(WebPPicture* picture, + const uint8_t* rgb, int rgb_stride) { + return (picture != NULL && rgb != NULL) + ? Import(picture, rgb, rgb_stride, 3, 0, 0) + : 0; +} + +int WebPPictureImportRGBA(WebPPicture* picture, + const uint8_t* rgba, int rgba_stride) { + return (picture != NULL && rgba != NULL) + ? Import(picture, rgba, rgba_stride, 4, 0, 1) + : 0; +} + +int WebPPictureImportRGBX(WebPPicture* picture, + const uint8_t* rgba, int rgba_stride) { + return (picture != NULL && rgba != NULL) + ? Import(picture, rgba, rgba_stride, 4, 0, 0) + : 0; +} + +//------------------------------------------------------------------------------ diff --git a/vendor/github.com/sizeofint/webpanimation/enc_picture_enc.c b/vendor/github.com/sizeofint/webpanimation/enc_picture_enc.c new file mode 100644 index 00000000..9397d226 --- /dev/null +++ b/vendor/github.com/sizeofint/webpanimation/enc_picture_enc.c @@ -0,0 +1,296 @@ +// Copyright 2011 Google Inc. All Rights Reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the COPYING file in the root of the source +// tree. An additional intellectual property rights grant can be found +// in the file PATENTS. All contributing project authors may +// be found in the AUTHORS file in the root of the source tree. +// ----------------------------------------------------------------------------- +// +// WebPPicture class basis +// +// Author: Skal (pascal.massimino@gmail.com) + +#include +#include + +#include "enc_vp8i_enc.h" +#include "dsp_dsp.h" +#include "utils_utils.h" + +//------------------------------------------------------------------------------ +// WebPPicture +//------------------------------------------------------------------------------ + +static int DummyWriter(const uint8_t* data, size_t data_size, + const WebPPicture* const picture) { + // The following are to prevent 'unused variable' error message. + (void)data; + (void)data_size; + (void)picture; + return 1; +} + +int WebPPictureInitInternal(WebPPicture* picture, int version) { + if (WEBP_ABI_IS_INCOMPATIBLE(version, WEBP_ENCODER_ABI_VERSION)) { + return 0; // caller/system version mismatch! + } + if (picture != NULL) { + memset(picture, 0, sizeof(*picture)); + picture->writer = DummyWriter; + WebPEncodingSetError(picture, VP8_ENC_OK); + } + return 1; +} + +//------------------------------------------------------------------------------ + +static void WebPPictureResetBufferARGB(WebPPicture* const picture) { + picture->memory_argb_ = NULL; + picture->argb = NULL; + picture->argb_stride = 0; +} + +static void WebPPictureResetBufferYUVA(WebPPicture* const picture) { + picture->memory_ = NULL; + picture->y = picture->u = picture->v = picture->a = NULL; + picture->y_stride = picture->uv_stride = 0; + picture->a_stride = 0; +} + +void WebPPictureResetBuffers(WebPPicture* const picture) { + WebPPictureResetBufferARGB(picture); + WebPPictureResetBufferYUVA(picture); +} + +int WebPPictureAllocARGB(WebPPicture* const picture, int width, int height) { + void* memory; + const uint64_t argb_size = (uint64_t)width * height; + + assert(picture != NULL); + + WebPSafeFree(picture->memory_argb_); + WebPPictureResetBufferARGB(picture); + + if (width <= 0 || height <= 0) { + return WebPEncodingSetError(picture, VP8_ENC_ERROR_BAD_DIMENSION); + } + // allocate a new buffer. + memory = WebPSafeMalloc(argb_size + WEBP_ALIGN_CST, sizeof(*picture->argb)); + if (memory == NULL) { + return WebPEncodingSetError(picture, VP8_ENC_ERROR_OUT_OF_MEMORY); + } + picture->memory_argb_ = memory; + picture->argb = (uint32_t*)WEBP_ALIGN(memory); + picture->argb_stride = width; + return 1; +} + +int WebPPictureAllocYUVA(WebPPicture* const picture, int width, int height) { + const WebPEncCSP uv_csp = + (WebPEncCSP)((int)picture->colorspace & WEBP_CSP_UV_MASK); + const int has_alpha = (int)picture->colorspace & WEBP_CSP_ALPHA_BIT; + const int y_stride = width; + const int uv_width = (int)(((int64_t)width + 1) >> 1); + const int uv_height = (int)(((int64_t)height + 1) >> 1); + const int uv_stride = uv_width; + int a_width, a_stride; + uint64_t y_size, uv_size, a_size, total_size; + uint8_t* mem; + + assert(picture != NULL); + + WebPSafeFree(picture->memory_); + WebPPictureResetBufferYUVA(picture); + + if (uv_csp != WEBP_YUV420) { + return WebPEncodingSetError(picture, VP8_ENC_ERROR_INVALID_CONFIGURATION); + } + + // alpha + a_width = has_alpha ? width : 0; + a_stride = a_width; + y_size = (uint64_t)y_stride * height; + uv_size = (uint64_t)uv_stride * uv_height; + a_size = (uint64_t)a_stride * height; + + total_size = y_size + a_size + 2 * uv_size; + + // Security and validation checks + if (width <= 0 || height <= 0 || // luma/alpha param error + uv_width <= 0 || uv_height <= 0) { // u/v param error + return WebPEncodingSetError(picture, VP8_ENC_ERROR_BAD_DIMENSION); + } + // allocate a new buffer. + mem = (uint8_t*)WebPSafeMalloc(total_size, sizeof(*mem)); + if (mem == NULL) { + return WebPEncodingSetError(picture, VP8_ENC_ERROR_OUT_OF_MEMORY); + } + + // From now on, we're in the clear, we can no longer fail... + picture->memory_ = (void*)mem; + picture->y_stride = y_stride; + picture->uv_stride = uv_stride; + picture->a_stride = a_stride; + + // TODO(skal): we could align the y/u/v planes and adjust stride. + picture->y = mem; + mem += y_size; + + picture->u = mem; + mem += uv_size; + picture->v = mem; + mem += uv_size; + + if (a_size > 0) { + picture->a = mem; + mem += a_size; + } + (void)mem; // makes the static analyzer happy + return 1; +} + +int WebPPictureAlloc(WebPPicture* picture) { + if (picture != NULL) { + const int width = picture->width; + const int height = picture->height; + + WebPPictureFree(picture); // erase previous buffer + + if (!picture->use_argb) { + return WebPPictureAllocYUVA(picture, width, height); + } else { + return WebPPictureAllocARGB(picture, width, height); + } + } + return 1; +} + +void WebPPictureFree(WebPPicture* picture) { + if (picture != NULL) { + WebPSafeFree(picture->memory_); + WebPSafeFree(picture->memory_argb_); + WebPPictureResetBuffers(picture); + } +} + +//------------------------------------------------------------------------------ +// WebPMemoryWriter: Write-to-memory + +void WebPMemoryWriterInit(WebPMemoryWriter* writer) { + writer->mem = NULL; + writer->size = 0; + writer->max_size = 0; +} + +int WebPMemoryWrite(const uint8_t* data, size_t data_size, + const WebPPicture* picture) { + WebPMemoryWriter* const w = (WebPMemoryWriter*)picture->custom_ptr; + uint64_t next_size; + if (w == NULL) { + return 1; + } + next_size = (uint64_t)w->size + data_size; + if (next_size > w->max_size) { + uint8_t* new_mem; + uint64_t next_max_size = 2ULL * w->max_size; + if (next_max_size < next_size) next_max_size = next_size; + if (next_max_size < 8192ULL) next_max_size = 8192ULL; + new_mem = (uint8_t*)WebPSafeMalloc(next_max_size, 1); + if (new_mem == NULL) { + return 0; + } + if (w->size > 0) { + memcpy(new_mem, w->mem, w->size); + } + WebPSafeFree(w->mem); + w->mem = new_mem; + // down-cast is ok, thanks to WebPSafeMalloc + w->max_size = (size_t)next_max_size; + } + if (data_size > 0) { + memcpy(w->mem + w->size, data, data_size); + w->size += data_size; + } + return 1; +} + +void WebPMemoryWriterClear(WebPMemoryWriter* writer) { + if (writer != NULL) { + WebPSafeFree(writer->mem); + writer->mem = NULL; + writer->size = 0; + writer->max_size = 0; + } +} + +//------------------------------------------------------------------------------ +// Simplest high-level calls: + +typedef int (*Importer)(WebPPicture* const, const uint8_t* const, int); + +static size_t Encode(const uint8_t* rgba, int width, int height, int stride, + Importer import, float quality_factor, int lossless, + uint8_t** output) { + WebPPicture pic; + WebPConfig config; + WebPMemoryWriter wrt; + int ok; + + if (output == NULL) return 0; + + if (!WebPConfigPreset(&config, WEBP_PRESET_DEFAULT, quality_factor) || + !WebPPictureInit(&pic)) { + return 0; // shouldn't happen, except if system installation is broken + } + + config.lossless = !!lossless; + pic.use_argb = !!lossless; + pic.width = width; + pic.height = height; + pic.writer = WebPMemoryWrite; + pic.custom_ptr = &wrt; + WebPMemoryWriterInit(&wrt); + + ok = import(&pic, rgba, stride) && WebPEncode(&config, &pic); + WebPPictureFree(&pic); + if (!ok) { + WebPMemoryWriterClear(&wrt); + *output = NULL; + return 0; + } + *output = wrt.mem; + return wrt.size; +} + +#define ENCODE_FUNC(NAME, IMPORTER) \ +size_t NAME(const uint8_t* in, int w, int h, int bps, float q, \ + uint8_t** out) { \ + return Encode(in, w, h, bps, IMPORTER, q, 0, out); \ +} + +ENCODE_FUNC(WebPEncodeRGB, WebPPictureImportRGB) +ENCODE_FUNC(WebPEncodeRGBA, WebPPictureImportRGBA) +#if !defined(WEBP_REDUCE_CSP) +ENCODE_FUNC(WebPEncodeBGR, WebPPictureImportBGR) +ENCODE_FUNC(WebPEncodeBGRA, WebPPictureImportBGRA) +#endif // WEBP_REDUCE_CSP + +#undef ENCODE_FUNC + +#define LOSSLESS_DEFAULT_QUALITY 70. +#define LOSSLESS_ENCODE_FUNC(NAME, IMPORTER) \ +size_t NAME(const uint8_t* in, int w, int h, int bps, uint8_t** out) { \ + return Encode(in, w, h, bps, IMPORTER, LOSSLESS_DEFAULT_QUALITY, 1, out); \ +} + +LOSSLESS_ENCODE_FUNC(WebPEncodeLosslessRGB, WebPPictureImportRGB) +LOSSLESS_ENCODE_FUNC(WebPEncodeLosslessRGBA, WebPPictureImportRGBA) +#if !defined(WEBP_REDUCE_CSP) +LOSSLESS_ENCODE_FUNC(WebPEncodeLosslessBGR, WebPPictureImportBGR) +LOSSLESS_ENCODE_FUNC(WebPEncodeLosslessBGRA, WebPPictureImportBGRA) +#endif // WEBP_REDUCE_CSP + +#undef LOSSLESS_ENCODE_FUNC + +//------------------------------------------------------------------------------ diff --git a/vendor/github.com/sizeofint/webpanimation/enc_picture_psnr_enc.c b/vendor/github.com/sizeofint/webpanimation/enc_picture_psnr_enc.c new file mode 100644 index 00000000..157dd18d --- /dev/null +++ b/vendor/github.com/sizeofint/webpanimation/enc_picture_psnr_enc.c @@ -0,0 +1,258 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the COPYING file in the root of the source +// tree. An additional intellectual property rights grant can be found +// in the file PATENTS. All contributing project authors may +// be found in the AUTHORS file in the root of the source tree. +// ----------------------------------------------------------------------------- +// +// WebPPicture tools for measuring distortion +// +// Author: Skal (pascal.massimino@gmail.com) + +#include "webp_encode.h" + +#if !(defined(WEBP_DISABLE_STATS) || defined(WEBP_REDUCE_SIZE)) + +#include +#include + +#include "dsp_dsp.h" +#include "enc_vp8i_enc.h" +#include "utils_utils.h" + +typedef double (*AccumulateFunc)(const uint8_t* src, int src_stride, + const uint8_t* ref, int ref_stride, + int w, int h); + +//------------------------------------------------------------------------------ +// local-min distortion +// +// For every pixel in the *reference* picture, we search for the local best +// match in the compressed image. This is not a symmetrical measure. + +#define RADIUS 2 // search radius. Shouldn't be too large. + +static double AccumulateLSIM(const uint8_t* src, int src_stride, + const uint8_t* ref, int ref_stride, + int w, int h) { + int x, y; + double total_sse = 0.; + for (y = 0; y < h; ++y) { + const int y_0 = (y - RADIUS < 0) ? 0 : y - RADIUS; + const int y_1 = (y + RADIUS + 1 >= h) ? h : y + RADIUS + 1; + for (x = 0; x < w; ++x) { + const int x_0 = (x - RADIUS < 0) ? 0 : x - RADIUS; + const int x_1 = (x + RADIUS + 1 >= w) ? w : x + RADIUS + 1; + double best_sse = 255. * 255.; + const double value = (double)ref[y * ref_stride + x]; + int i, j; + for (j = y_0; j < y_1; ++j) { + const uint8_t* const s = src + j * src_stride; + for (i = x_0; i < x_1; ++i) { + const double diff = s[i] - value; + const double sse = diff * diff; + if (sse < best_sse) best_sse = sse; + } + } + total_sse += best_sse; + } + } + return total_sse; +} +#undef RADIUS + +static double AccumulateSSE(const uint8_t* src, int src_stride, + const uint8_t* ref, int ref_stride, + int w, int h) { + int y; + double total_sse = 0.; + for (y = 0; y < h; ++y) { + total_sse += VP8AccumulateSSE(src, ref, w); + src += src_stride; + ref += ref_stride; + } + return total_sse; +} + +//------------------------------------------------------------------------------ + +static double AccumulateSSIM(const uint8_t* src, int src_stride, + const uint8_t* ref, int ref_stride, + int w, int h) { + const int w0 = (w < VP8_SSIM_KERNEL) ? w : VP8_SSIM_KERNEL; + const int w1 = w - VP8_SSIM_KERNEL - 1; + const int h0 = (h < VP8_SSIM_KERNEL) ? h : VP8_SSIM_KERNEL; + const int h1 = h - VP8_SSIM_KERNEL - 1; + int x, y; + double sum = 0.; + for (y = 0; y < h0; ++y) { + for (x = 0; x < w; ++x) { + sum += VP8SSIMGetClipped(src, src_stride, ref, ref_stride, x, y, w, h); + } + } + for (; y < h1; ++y) { + for (x = 0; x < w0; ++x) { + sum += VP8SSIMGetClipped(src, src_stride, ref, ref_stride, x, y, w, h); + } + for (; x < w1; ++x) { + const int off1 = x - VP8_SSIM_KERNEL + (y - VP8_SSIM_KERNEL) * src_stride; + const int off2 = x - VP8_SSIM_KERNEL + (y - VP8_SSIM_KERNEL) * ref_stride; + sum += VP8SSIMGet(src + off1, src_stride, ref + off2, ref_stride); + } + for (; x < w; ++x) { + sum += VP8SSIMGetClipped(src, src_stride, ref, ref_stride, x, y, w, h); + } + } + for (; y < h; ++y) { + for (x = 0; x < w; ++x) { + sum += VP8SSIMGetClipped(src, src_stride, ref, ref_stride, x, y, w, h); + } + } + return sum; +} + +//------------------------------------------------------------------------------ +// Distortion + +// Max value returned in case of exact similarity. +static const double kMinDistortion_dB = 99.; + +static double GetPSNR(double v, double size) { + return (v > 0. && size > 0.) ? -4.3429448 * log(v / (size * 255 * 255.)) + : kMinDistortion_dB; +} + +static double GetLogSSIM(double v, double size) { + v = (size > 0.) ? v / size : 1.; + return (v < 1.) ? -10.0 * log10(1. - v) : kMinDistortion_dB; +} + +int WebPPlaneDistortion(const uint8_t* src, size_t src_stride, + const uint8_t* ref, size_t ref_stride, + int width, int height, size_t x_step, + int type, float* distortion, float* result) { + uint8_t* allocated = NULL; + const AccumulateFunc metric = (type == 0) ? AccumulateSSE : + (type == 1) ? AccumulateSSIM : + AccumulateLSIM; + if (src == NULL || ref == NULL || + src_stride < x_step * width || ref_stride < x_step * width || + result == NULL || distortion == NULL) { + return 0; + } + + VP8SSIMDspInit(); + if (x_step != 1) { // extract a packed plane if needed + int x, y; + uint8_t* tmp1; + uint8_t* tmp2; + allocated = + (uint8_t*)WebPSafeMalloc(2ULL * width * height, sizeof(*allocated)); + if (allocated == NULL) return 0; + tmp1 = allocated; + tmp2 = tmp1 + (size_t)width * height; + for (y = 0; y < height; ++y) { + for (x = 0; x < width; ++x) { + tmp1[x + y * width] = src[x * x_step + y * src_stride]; + tmp2[x + y * width] = ref[x * x_step + y * ref_stride]; + } + } + src = tmp1; + ref = tmp2; + } + *distortion = (float)metric(src, width, ref, width, width, height); + WebPSafeFree(allocated); + + *result = (type == 1) ? (float)GetLogSSIM(*distortion, (double)width * height) + : (float)GetPSNR(*distortion, (double)width * height); + return 1; +} + +#ifdef WORDS_BIGENDIAN +#define BLUE_OFFSET 3 // uint32_t 0x000000ff is 0x00,00,00,ff in memory +#else +#define BLUE_OFFSET 0 // uint32_t 0x000000ff is 0xff,00,00,00 in memory +#endif + +int WebPPictureDistortion(const WebPPicture* src, const WebPPicture* ref, + int type, float results[5]) { + int w, h, c; + int ok = 0; + WebPPicture p0, p1; + double total_size = 0., total_distortion = 0.; + if (src == NULL || ref == NULL || + src->width != ref->width || src->height != ref->height || + results == NULL) { + return 0; + } + + VP8SSIMDspInit(); + if (!WebPPictureInit(&p0) || !WebPPictureInit(&p1)) return 0; + w = src->width; + h = src->height; + if (!WebPPictureView(src, 0, 0, w, h, &p0)) goto Error; + if (!WebPPictureView(ref, 0, 0, w, h, &p1)) goto Error; + + // We always measure distortion in ARGB space. + if (p0.use_argb == 0 && !WebPPictureYUVAToARGB(&p0)) goto Error; + if (p1.use_argb == 0 && !WebPPictureYUVAToARGB(&p1)) goto Error; + for (c = 0; c < 4; ++c) { + float distortion; + const size_t stride0 = 4 * (size_t)p0.argb_stride; + const size_t stride1 = 4 * (size_t)p1.argb_stride; + // results are reported as BGRA + const int offset = c ^ BLUE_OFFSET; + if (!WebPPlaneDistortion((const uint8_t*)p0.argb + offset, stride0, + (const uint8_t*)p1.argb + offset, stride1, + w, h, 4, type, &distortion, results + c)) { + goto Error; + } + total_distortion += distortion; + total_size += w * h; + } + + results[4] = (type == 1) ? (float)GetLogSSIM(total_distortion, total_size) + : (float)GetPSNR(total_distortion, total_size); + ok = 1; + + Error: + WebPPictureFree(&p0); + WebPPictureFree(&p1); + return ok; +} + +#undef BLUE_OFFSET + +#else // defined(WEBP_DISABLE_STATS) +int WebPPlaneDistortion(const uint8_t* src, size_t src_stride, + const uint8_t* ref, size_t ref_stride, + int width, int height, size_t x_step, + int type, float* distortion, float* result) { + (void)src; + (void)src_stride; + (void)ref; + (void)ref_stride; + (void)width; + (void)height; + (void)x_step; + (void)type; + if (distortion == NULL || result == NULL) return 0; + *distortion = 0.f; + *result = 0.f; + return 1; +} + +int WebPPictureDistortion(const WebPPicture* src, const WebPPicture* ref, + int type, float results[5]) { + int i; + (void)src; + (void)ref; + (void)type; + if (results == NULL) return 0; + for (i = 0; i < 5; ++i) results[i] = 0.f; + return 1; +} + +#endif // !defined(WEBP_DISABLE_STATS) diff --git a/vendor/github.com/sizeofint/webpanimation/enc_picture_rescale_enc.c b/vendor/github.com/sizeofint/webpanimation/enc_picture_rescale_enc.c new file mode 100644 index 00000000..a092e9df --- /dev/null +++ b/vendor/github.com/sizeofint/webpanimation/enc_picture_rescale_enc.c @@ -0,0 +1,309 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the COPYING file in the root of the source +// tree. An additional intellectual property rights grant can be found +// in the file PATENTS. All contributing project authors may +// be found in the AUTHORS file in the root of the source tree. +// ----------------------------------------------------------------------------- +// +// WebPPicture tools: copy, crop, rescaling and view. +// +// Author: Skal (pascal.massimino@gmail.com) + +#include "webp_encode.h" + +#if !defined(WEBP_REDUCE_SIZE) + +#include +#include + +#include "enc_vp8i_enc.h" +#include "utils_rescaler_utils.h" +#include "utils_utils.h" + +#define HALVE(x) (((x) + 1) >> 1) + +// Grab the 'specs' (writer, *opaque, width, height...) from 'src' and copy them +// into 'dst'. Mark 'dst' as not owning any memory. +static void PictureGrabSpecs(const WebPPicture* const src, + WebPPicture* const dst) { + assert(src != NULL && dst != NULL); + *dst = *src; + WebPPictureResetBuffers(dst); +} + +//------------------------------------------------------------------------------ + +// Adjust top-left corner to chroma sample position. +static void SnapTopLeftPosition(const WebPPicture* const pic, + int* const left, int* const top) { + if (!pic->use_argb) { + *left &= ~1; + *top &= ~1; + } +} + +// Adjust top-left corner and verify that the sub-rectangle is valid. +static int AdjustAndCheckRectangle(const WebPPicture* const pic, + int* const left, int* const top, + int width, int height) { + SnapTopLeftPosition(pic, left, top); + if ((*left) < 0 || (*top) < 0) return 0; + if (width <= 0 || height <= 0) return 0; + if ((*left) + width > pic->width) return 0; + if ((*top) + height > pic->height) return 0; + return 1; +} + +int WebPPictureCopy(const WebPPicture* src, WebPPicture* dst) { + if (src == NULL || dst == NULL) return 0; + if (src == dst) return 1; + + PictureGrabSpecs(src, dst); + if (!WebPPictureAlloc(dst)) return 0; + + if (!src->use_argb) { + WebPCopyPlane(src->y, src->y_stride, + dst->y, dst->y_stride, dst->width, dst->height); + WebPCopyPlane(src->u, src->uv_stride, dst->u, dst->uv_stride, + HALVE(dst->width), HALVE(dst->height)); + WebPCopyPlane(src->v, src->uv_stride, dst->v, dst->uv_stride, + HALVE(dst->width), HALVE(dst->height)); + if (dst->a != NULL) { + WebPCopyPlane(src->a, src->a_stride, + dst->a, dst->a_stride, dst->width, dst->height); + } + } else { + WebPCopyPlane((const uint8_t*)src->argb, 4 * src->argb_stride, + (uint8_t*)dst->argb, 4 * dst->argb_stride, + 4 * dst->width, dst->height); + } + return 1; +} + +int WebPPictureIsView(const WebPPicture* picture) { + if (picture == NULL) return 0; + if (picture->use_argb) { + return (picture->memory_argb_ == NULL); + } + return (picture->memory_ == NULL); +} + +int WebPPictureView(const WebPPicture* src, + int left, int top, int width, int height, + WebPPicture* dst) { + if (src == NULL || dst == NULL) return 0; + + // verify rectangle position. + if (!AdjustAndCheckRectangle(src, &left, &top, width, height)) return 0; + + if (src != dst) { // beware of aliasing! We don't want to leak 'memory_'. + PictureGrabSpecs(src, dst); + } + dst->width = width; + dst->height = height; + if (!src->use_argb) { + dst->y = src->y + top * src->y_stride + left; + dst->u = src->u + (top >> 1) * src->uv_stride + (left >> 1); + dst->v = src->v + (top >> 1) * src->uv_stride + (left >> 1); + dst->y_stride = src->y_stride; + dst->uv_stride = src->uv_stride; + if (src->a != NULL) { + dst->a = src->a + top * src->a_stride + left; + dst->a_stride = src->a_stride; + } + } else { + dst->argb = src->argb + top * src->argb_stride + left; + dst->argb_stride = src->argb_stride; + } + return 1; +} + +//------------------------------------------------------------------------------ +// Picture cropping + +int WebPPictureCrop(WebPPicture* pic, + int left, int top, int width, int height) { + WebPPicture tmp; + + if (pic == NULL) return 0; + if (!AdjustAndCheckRectangle(pic, &left, &top, width, height)) return 0; + + PictureGrabSpecs(pic, &tmp); + tmp.width = width; + tmp.height = height; + if (!WebPPictureAlloc(&tmp)) return 0; + + if (!pic->use_argb) { + const int y_offset = top * pic->y_stride + left; + const int uv_offset = (top / 2) * pic->uv_stride + left / 2; + WebPCopyPlane(pic->y + y_offset, pic->y_stride, + tmp.y, tmp.y_stride, width, height); + WebPCopyPlane(pic->u + uv_offset, pic->uv_stride, + tmp.u, tmp.uv_stride, HALVE(width), HALVE(height)); + WebPCopyPlane(pic->v + uv_offset, pic->uv_stride, + tmp.v, tmp.uv_stride, HALVE(width), HALVE(height)); + + if (tmp.a != NULL) { + const int a_offset = top * pic->a_stride + left; + WebPCopyPlane(pic->a + a_offset, pic->a_stride, + tmp.a, tmp.a_stride, width, height); + } + } else { + const uint8_t* const src = + (const uint8_t*)(pic->argb + top * pic->argb_stride + left); + WebPCopyPlane(src, pic->argb_stride * 4, (uint8_t*)tmp.argb, + tmp.argb_stride * 4, width * 4, height); + } + WebPPictureFree(pic); + *pic = tmp; + return 1; +} + +//------------------------------------------------------------------------------ +// Simple picture rescaler + +static void RescalePlane(const uint8_t* src, + int src_width, int src_height, int src_stride, + uint8_t* dst, + int dst_width, int dst_height, int dst_stride, + rescaler_t* const work, + int num_channels) { + WebPRescaler rescaler; + int y = 0; + WebPRescalerInit(&rescaler, src_width, src_height, + dst, dst_width, dst_height, dst_stride, + num_channels, work); + while (y < src_height) { + y += WebPRescalerImport(&rescaler, src_height - y, + src + y * src_stride, src_stride); + WebPRescalerExport(&rescaler); + } +} + +static void AlphaMultiplyARGB(WebPPicture* const pic, int inverse) { + assert(pic->argb != NULL); + WebPMultARGBRows((uint8_t*)pic->argb, pic->argb_stride * sizeof(*pic->argb), + pic->width, pic->height, inverse); +} + +static void AlphaMultiplyY(WebPPicture* const pic, int inverse) { + if (pic->a != NULL) { + WebPMultRows(pic->y, pic->y_stride, pic->a, pic->a_stride, + pic->width, pic->height, inverse); + } +} + +int WebPPictureRescale(WebPPicture* pic, int width, int height) { + WebPPicture tmp; + int prev_width, prev_height; + rescaler_t* work; + + if (pic == NULL) return 0; + prev_width = pic->width; + prev_height = pic->height; + if (!WebPRescalerGetScaledDimensions( + prev_width, prev_height, &width, &height)) { + return 0; + } + + PictureGrabSpecs(pic, &tmp); + tmp.width = width; + tmp.height = height; + if (!WebPPictureAlloc(&tmp)) return 0; + + if (!pic->use_argb) { + work = (rescaler_t*)WebPSafeMalloc(2ULL * width, sizeof(*work)); + if (work == NULL) { + WebPPictureFree(&tmp); + return 0; + } + // If present, we need to rescale alpha first (for AlphaMultiplyY). + if (pic->a != NULL) { + WebPInitAlphaProcessing(); + RescalePlane(pic->a, prev_width, prev_height, pic->a_stride, + tmp.a, width, height, tmp.a_stride, work, 1); + } + + // We take transparency into account on the luma plane only. That's not + // totally exact blending, but still is a good approximation. + AlphaMultiplyY(pic, 0); + RescalePlane(pic->y, prev_width, prev_height, pic->y_stride, + tmp.y, width, height, tmp.y_stride, work, 1); + AlphaMultiplyY(&tmp, 1); + + RescalePlane(pic->u, + HALVE(prev_width), HALVE(prev_height), pic->uv_stride, + tmp.u, + HALVE(width), HALVE(height), tmp.uv_stride, work, 1); + RescalePlane(pic->v, + HALVE(prev_width), HALVE(prev_height), pic->uv_stride, + tmp.v, + HALVE(width), HALVE(height), tmp.uv_stride, work, 1); + } else { + work = (rescaler_t*)WebPSafeMalloc(2ULL * width * 4, sizeof(*work)); + if (work == NULL) { + WebPPictureFree(&tmp); + return 0; + } + // In order to correctly interpolate colors, we need to apply the alpha + // weighting first (black-matting), scale the RGB values, and remove + // the premultiplication afterward (while preserving the alpha channel). + WebPInitAlphaProcessing(); + AlphaMultiplyARGB(pic, 0); + RescalePlane((const uint8_t*)pic->argb, prev_width, prev_height, + pic->argb_stride * 4, + (uint8_t*)tmp.argb, width, height, + tmp.argb_stride * 4, + work, 4); + AlphaMultiplyARGB(&tmp, 1); + } + WebPPictureFree(pic); + WebPSafeFree(work); + *pic = tmp; + return 1; +} + +#else // defined(WEBP_REDUCE_SIZE) + +int WebPPictureCopy(const WebPPicture* src, WebPPicture* dst) { + (void)src; + (void)dst; + return 0; +} + +int WebPPictureIsView(const WebPPicture* picture) { + (void)picture; + return 0; +} + +int WebPPictureView(const WebPPicture* src, + int left, int top, int width, int height, + WebPPicture* dst) { + (void)src; + (void)left; + (void)top; + (void)width; + (void)height; + (void)dst; + return 0; +} + +int WebPPictureCrop(WebPPicture* pic, + int left, int top, int width, int height) { + (void)pic; + (void)left; + (void)top; + (void)width; + (void)height; + return 0; +} + +int WebPPictureRescale(WebPPicture* pic, int width, int height) { + (void)pic; + (void)width; + (void)height; + return 0; +} +#endif // !defined(WEBP_REDUCE_SIZE) diff --git a/vendor/github.com/sizeofint/webpanimation/enc_picture_tools_enc.c b/vendor/github.com/sizeofint/webpanimation/enc_picture_tools_enc.c new file mode 100644 index 00000000..32eec1ef --- /dev/null +++ b/vendor/github.com/sizeofint/webpanimation/enc_picture_tools_enc.c @@ -0,0 +1,273 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the COPYING file in the root of the source +// tree. An additional intellectual property rights grant can be found +// in the file PATENTS. All contributing project authors may +// be found in the AUTHORS file in the root of the source tree. +// ----------------------------------------------------------------------------- +// +// WebPPicture tools: alpha handling, etc. +// +// Author: Skal (pascal.massimino@gmail.com) + +#include + +#include "enc_vp8i_enc.h" +#include "dsp_yuv.h" + +//------------------------------------------------------------------------------ +// Helper: clean up fully transparent area to help compressibility. + +#define SIZE 8 +#define SIZE2 (SIZE / 2) +static int IsTransparentARGBArea(const uint32_t* ptr, int stride, int size) { + int y, x; + for (y = 0; y < size; ++y) { + for (x = 0; x < size; ++x) { + if (ptr[x] & 0xff000000u) { + return 0; + } + } + ptr += stride; + } + return 1; +} + +static void Flatten(uint8_t* ptr, int v, int stride, int size) { + int y; + for (y = 0; y < size; ++y) { + memset(ptr, v, size); + ptr += stride; + } +} + +static void FlattenARGB(uint32_t* ptr, uint32_t v, int stride, int size) { + int x, y; + for (y = 0; y < size; ++y) { + for (x = 0; x < size; ++x) ptr[x] = v; + ptr += stride; + } +} + +// Smoothen the luma components of transparent pixels. Return true if the whole +// block is transparent. +static int SmoothenBlock(const uint8_t* a_ptr, int a_stride, uint8_t* y_ptr, + int y_stride, int width, int height) { + int sum = 0, count = 0; + int x, y; + const uint8_t* alpha_ptr = a_ptr; + uint8_t* luma_ptr = y_ptr; + for (y = 0; y < height; ++y) { + for (x = 0; x < width; ++x) { + if (alpha_ptr[x] != 0) { + ++count; + sum += luma_ptr[x]; + } + } + alpha_ptr += a_stride; + luma_ptr += y_stride; + } + if (count > 0 && count < width * height) { + const uint8_t avg_u8 = (uint8_t)(sum / count); + alpha_ptr = a_ptr; + luma_ptr = y_ptr; + for (y = 0; y < height; ++y) { + for (x = 0; x < width; ++x) { + if (alpha_ptr[x] == 0) luma_ptr[x] = avg_u8; + } + alpha_ptr += a_stride; + luma_ptr += y_stride; + } + } + return (count == 0); +} + +void WebPReplaceTransparentPixels(WebPPicture* const pic, uint32_t color) { + if (pic != NULL && pic->use_argb) { + int y = pic->height; + uint32_t* argb = pic->argb; + color &= 0xffffffu; // force alpha=0 + WebPInitAlphaProcessing(); + while (y-- > 0) { + WebPAlphaReplace(argb, pic->width, color); + argb += pic->argb_stride; + } + } +} + +void WebPCleanupTransparentArea(WebPPicture* pic) { + int x, y, w, h; + if (pic == NULL) return; + w = pic->width / SIZE; + h = pic->height / SIZE; + + // note: we ignore the left-overs on right/bottom, except for SmoothenBlock(). + if (pic->use_argb) { + uint32_t argb_value = 0; + for (y = 0; y < h; ++y) { + int need_reset = 1; + for (x = 0; x < w; ++x) { + const int off = (y * pic->argb_stride + x) * SIZE; + if (IsTransparentARGBArea(pic->argb + off, pic->argb_stride, SIZE)) { + if (need_reset) { + argb_value = pic->argb[off]; + need_reset = 0; + } + FlattenARGB(pic->argb + off, argb_value, pic->argb_stride, SIZE); + } else { + need_reset = 1; + } + } + } + } else { + const int width = pic->width; + const int height = pic->height; + const int y_stride = pic->y_stride; + const int uv_stride = pic->uv_stride; + const int a_stride = pic->a_stride; + uint8_t* y_ptr = pic->y; + uint8_t* u_ptr = pic->u; + uint8_t* v_ptr = pic->v; + const uint8_t* a_ptr = pic->a; + int values[3] = { 0 }; + if (a_ptr == NULL || y_ptr == NULL || u_ptr == NULL || v_ptr == NULL) { + return; + } + for (y = 0; y + SIZE <= height; y += SIZE) { + int need_reset = 1; + for (x = 0; x + SIZE <= width; x += SIZE) { + if (SmoothenBlock(a_ptr + x, a_stride, y_ptr + x, y_stride, + SIZE, SIZE)) { + if (need_reset) { + values[0] = y_ptr[x]; + values[1] = u_ptr[x >> 1]; + values[2] = v_ptr[x >> 1]; + need_reset = 0; + } + Flatten(y_ptr + x, values[0], y_stride, SIZE); + Flatten(u_ptr + (x >> 1), values[1], uv_stride, SIZE2); + Flatten(v_ptr + (x >> 1), values[2], uv_stride, SIZE2); + } else { + need_reset = 1; + } + } + if (x < width) { + SmoothenBlock(a_ptr + x, a_stride, y_ptr + x, y_stride, + width - x, SIZE); + } + a_ptr += SIZE * a_stride; + y_ptr += SIZE * y_stride; + u_ptr += SIZE2 * uv_stride; + v_ptr += SIZE2 * uv_stride; + } + if (y < height) { + const int sub_height = height - y; + for (x = 0; x + SIZE <= width; x += SIZE) { + SmoothenBlock(a_ptr + x, a_stride, y_ptr + x, y_stride, + SIZE, sub_height); + } + if (x < width) { + SmoothenBlock(a_ptr + x, a_stride, y_ptr + x, y_stride, + width - x, sub_height); + } + } + } +} + +#undef SIZE +#undef SIZE2 + +//------------------------------------------------------------------------------ +// Blend color and remove transparency info + +#define BLEND(V0, V1, ALPHA) \ + ((((V0) * (255 - (ALPHA)) + (V1) * (ALPHA)) * 0x101 + 256) >> 16) +#define BLEND_10BIT(V0, V1, ALPHA) \ + ((((V0) * (1020 - (ALPHA)) + (V1) * (ALPHA)) * 0x101 + 1024) >> 18) + +static WEBP_INLINE uint32_t MakeARGB32(int r, int g, int b) { + return (0xff000000u | (r << 16) | (g << 8) | b); +} + +void WebPBlendAlpha(WebPPicture* pic, uint32_t background_rgb) { + const int red = (background_rgb >> 16) & 0xff; + const int green = (background_rgb >> 8) & 0xff; + const int blue = (background_rgb >> 0) & 0xff; + int x, y; + if (pic == NULL) return; + if (!pic->use_argb) { + const int uv_width = (pic->width >> 1); // omit last pixel during u/v loop + const int Y0 = VP8RGBToY(red, green, blue, YUV_HALF); + // VP8RGBToU/V expects the u/v values summed over four pixels + const int U0 = VP8RGBToU(4 * red, 4 * green, 4 * blue, 4 * YUV_HALF); + const int V0 = VP8RGBToV(4 * red, 4 * green, 4 * blue, 4 * YUV_HALF); + const int has_alpha = pic->colorspace & WEBP_CSP_ALPHA_BIT; + uint8_t* y_ptr = pic->y; + uint8_t* u_ptr = pic->u; + uint8_t* v_ptr = pic->v; + uint8_t* a_ptr = pic->a; + if (!has_alpha || a_ptr == NULL) return; // nothing to do + for (y = 0; y < pic->height; ++y) { + // Luma blending + for (x = 0; x < pic->width; ++x) { + const uint8_t alpha = a_ptr[x]; + if (alpha < 0xff) { + y_ptr[x] = BLEND(Y0, y_ptr[x], alpha); + } + } + // Chroma blending every even line + if ((y & 1) == 0) { + uint8_t* const a_ptr2 = + (y + 1 == pic->height) ? a_ptr : a_ptr + pic->a_stride; + for (x = 0; x < uv_width; ++x) { + // Average four alpha values into a single blending weight. + // TODO(skal): might lead to visible contouring. Can we do better? + const uint32_t alpha = + a_ptr[2 * x + 0] + a_ptr[2 * x + 1] + + a_ptr2[2 * x + 0] + a_ptr2[2 * x + 1]; + u_ptr[x] = BLEND_10BIT(U0, u_ptr[x], alpha); + v_ptr[x] = BLEND_10BIT(V0, v_ptr[x], alpha); + } + if (pic->width & 1) { // rightmost pixel + const uint32_t alpha = 2 * (a_ptr[2 * x + 0] + a_ptr2[2 * x + 0]); + u_ptr[x] = BLEND_10BIT(U0, u_ptr[x], alpha); + v_ptr[x] = BLEND_10BIT(V0, v_ptr[x], alpha); + } + } else { + u_ptr += pic->uv_stride; + v_ptr += pic->uv_stride; + } + memset(a_ptr, 0xff, pic->width); // reset alpha value to opaque + a_ptr += pic->a_stride; + y_ptr += pic->y_stride; + } + } else { + uint32_t* argb = pic->argb; + const uint32_t background = MakeARGB32(red, green, blue); + for (y = 0; y < pic->height; ++y) { + for (x = 0; x < pic->width; ++x) { + const int alpha = (argb[x] >> 24) & 0xff; + if (alpha != 0xff) { + if (alpha > 0) { + int r = (argb[x] >> 16) & 0xff; + int g = (argb[x] >> 8) & 0xff; + int b = (argb[x] >> 0) & 0xff; + r = BLEND(red, r, alpha); + g = BLEND(green, g, alpha); + b = BLEND(blue, b, alpha); + argb[x] = MakeARGB32(r, g, b); + } else { + argb[x] = background; + } + } + } + argb += pic->argb_stride; + } + } +} + +#undef BLEND +#undef BLEND_10BIT + +//------------------------------------------------------------------------------ diff --git a/vendor/github.com/sizeofint/webpanimation/enc_predictor_enc.c b/vendor/github.com/sizeofint/webpanimation/enc_predictor_enc.c new file mode 100644 index 00000000..d855dae7 --- /dev/null +++ b/vendor/github.com/sizeofint/webpanimation/enc_predictor_enc.c @@ -0,0 +1,772 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the COPYING file in the root of the source +// tree. An additional intellectual property rights grant can be found +// in the file PATENTS. All contributing project authors may +// be found in the AUTHORS file in the root of the source tree. +// ----------------------------------------------------------------------------- +// +// Image transform methods for lossless encoder. +// +// Authors: Vikas Arora (vikaas.arora@gmail.com) +// Jyrki Alakuijala (jyrki@google.com) +// Urvang Joshi (urvang@google.com) +// Vincent Rabaud (vrabaud@google.com) + +#include "dsp_lossless.h" +#include "dsp_lossless_common.h" +#include "enc_vp8li_enc.h" + +#define MAX_DIFF_COST (1e30f) + +static const float kSpatialPredictorBias = 15.f; +static const int kPredLowEffort = 11; +static const uint32_t kMaskAlpha = 0xff000000; + +// Mostly used to reduce code size + readability +static WEBP_INLINE int GetMin(int a, int b) { return (a > b) ? b : a; } + +//------------------------------------------------------------------------------ +// Methods to calculate Entropy (Shannon). + +static float PredictionCostSpatial(const int counts[256], int weight_0, + double exp_val) { + const int significant_symbols = 256 >> 4; + const double exp_decay_factor = 0.6; + double bits = weight_0 * counts[0]; + int i; + for (i = 1; i < significant_symbols; ++i) { + bits += exp_val * (counts[i] + counts[256 - i]); + exp_val *= exp_decay_factor; + } + return (float)(-0.1 * bits); +} + +static float PredictionCostSpatialHistogram(const int accumulated[4][256], + const int tile[4][256]) { + int i; + double retval = 0; + for (i = 0; i < 4; ++i) { + const double kExpValue = 0.94; + retval += PredictionCostSpatial(tile[i], 1, kExpValue); + retval += VP8LCombinedShannonEntropy(tile[i], accumulated[i]); + } + return (float)retval; +} + +static WEBP_INLINE void UpdateHisto(int histo_argb[4][256], uint32_t argb) { + ++histo_argb[0][argb >> 24]; + ++histo_argb[1][(argb >> 16) & 0xff]; + ++histo_argb[2][(argb >> 8) & 0xff]; + ++histo_argb[3][argb & 0xff]; +} + +//------------------------------------------------------------------------------ +// Spatial transform functions. + +static WEBP_INLINE void PredictBatch(int mode, int x_start, int y, + int num_pixels, const uint32_t* current, + const uint32_t* upper, uint32_t* out) { + if (x_start == 0) { + if (y == 0) { + // ARGB_BLACK. + VP8LPredictorsSub[0](current, NULL, 1, out); + } else { + // Top one. + VP8LPredictorsSub[2](current, upper, 1, out); + } + ++x_start; + ++out; + --num_pixels; + } + if (y == 0) { + // Left one. + VP8LPredictorsSub[1](current + x_start, NULL, num_pixels, out); + } else { + VP8LPredictorsSub[mode](current + x_start, upper + x_start, num_pixels, + out); + } +} + +#if (WEBP_NEAR_LOSSLESS == 1) +static WEBP_INLINE int GetMax(int a, int b) { return (a < b) ? b : a; } + +static int MaxDiffBetweenPixels(uint32_t p1, uint32_t p2) { + const int diff_a = abs((int)(p1 >> 24) - (int)(p2 >> 24)); + const int diff_r = abs((int)((p1 >> 16) & 0xff) - (int)((p2 >> 16) & 0xff)); + const int diff_g = abs((int)((p1 >> 8) & 0xff) - (int)((p2 >> 8) & 0xff)); + const int diff_b = abs((int)(p1 & 0xff) - (int)(p2 & 0xff)); + return GetMax(GetMax(diff_a, diff_r), GetMax(diff_g, diff_b)); +} + +static int MaxDiffAroundPixel(uint32_t current, uint32_t up, uint32_t down, + uint32_t left, uint32_t right) { + const int diff_up = MaxDiffBetweenPixels(current, up); + const int diff_down = MaxDiffBetweenPixels(current, down); + const int diff_left = MaxDiffBetweenPixels(current, left); + const int diff_right = MaxDiffBetweenPixels(current, right); + return GetMax(GetMax(diff_up, diff_down), GetMax(diff_left, diff_right)); +} + +static uint32_t AddGreenToBlueAndRed(uint32_t argb) { + const uint32_t green = (argb >> 8) & 0xff; + uint32_t red_blue = argb & 0x00ff00ffu; + red_blue += (green << 16) | green; + red_blue &= 0x00ff00ffu; + return (argb & 0xff00ff00u) | red_blue; +} + +static void MaxDiffsForRow(int width, int stride, const uint32_t* const argb, + uint8_t* const max_diffs, int used_subtract_green) { + uint32_t current, up, down, left, right; + int x; + if (width <= 2) return; + current = argb[0]; + right = argb[1]; + if (used_subtract_green) { + current = AddGreenToBlueAndRed(current); + right = AddGreenToBlueAndRed(right); + } + // max_diffs[0] and max_diffs[width - 1] are never used. + for (x = 1; x < width - 1; ++x) { + up = argb[-stride + x]; + down = argb[stride + x]; + left = current; + current = right; + right = argb[x + 1]; + if (used_subtract_green) { + up = AddGreenToBlueAndRed(up); + down = AddGreenToBlueAndRed(down); + right = AddGreenToBlueAndRed(right); + } + max_diffs[x] = MaxDiffAroundPixel(current, up, down, left, right); + } +} + +// Quantize the difference between the actual component value and its prediction +// to a multiple of quantization, working modulo 256, taking care not to cross +// a boundary (inclusive upper limit). +static uint8_t NearLosslessComponent(uint8_t value, uint8_t predict, + uint8_t boundary, int quantization) { + const int residual = (value - predict) & 0xff; + const int boundary_residual = (boundary - predict) & 0xff; + const int lower = residual & ~(quantization - 1); + const int upper = lower + quantization; + // Resolve ties towards a value closer to the prediction (i.e. towards lower + // if value comes after prediction and towards upper otherwise). + const int bias = ((boundary - value) & 0xff) < boundary_residual; + if (residual - lower < upper - residual + bias) { + // lower is closer to residual than upper. + if (residual > boundary_residual && lower <= boundary_residual) { + // Halve quantization step to avoid crossing boundary. This midpoint is + // on the same side of boundary as residual because midpoint >= residual + // (since lower is closer than upper) and residual is above the boundary. + return lower + (quantization >> 1); + } + return lower; + } else { + // upper is closer to residual than lower. + if (residual <= boundary_residual && upper > boundary_residual) { + // Halve quantization step to avoid crossing boundary. This midpoint is + // on the same side of boundary as residual because midpoint <= residual + // (since upper is closer than lower) and residual is below the boundary. + return lower + (quantization >> 1); + } + return upper & 0xff; + } +} + +static WEBP_INLINE uint8_t NearLosslessDiff(uint8_t a, uint8_t b) { + return (uint8_t)((((int)(a) - (int)(b))) & 0xff); +} + +// Quantize every component of the difference between the actual pixel value and +// its prediction to a multiple of a quantization (a power of 2, not larger than +// max_quantization which is a power of 2, smaller than max_diff). Take care if +// value and predict have undergone subtract green, which means that red and +// blue are represented as offsets from green. +static uint32_t NearLossless(uint32_t value, uint32_t predict, + int max_quantization, int max_diff, + int used_subtract_green) { + int quantization; + uint8_t new_green = 0; + uint8_t green_diff = 0; + uint8_t a, r, g, b; + if (max_diff <= 2) { + return VP8LSubPixels(value, predict); + } + quantization = max_quantization; + while (quantization >= max_diff) { + quantization >>= 1; + } + if ((value >> 24) == 0 || (value >> 24) == 0xff) { + // Preserve transparency of fully transparent or fully opaque pixels. + a = NearLosslessDiff((value >> 24) & 0xff, (predict >> 24) & 0xff); + } else { + a = NearLosslessComponent(value >> 24, predict >> 24, 0xff, quantization); + } + g = NearLosslessComponent((value >> 8) & 0xff, (predict >> 8) & 0xff, 0xff, + quantization); + if (used_subtract_green) { + // The green offset will be added to red and blue components during decoding + // to obtain the actual red and blue values. + new_green = ((predict >> 8) + g) & 0xff; + // The amount by which green has been adjusted during quantization. It is + // subtracted from red and blue for compensation, to avoid accumulating two + // quantization errors in them. + green_diff = NearLosslessDiff(new_green, (value >> 8) & 0xff); + } + r = NearLosslessComponent(NearLosslessDiff((value >> 16) & 0xff, green_diff), + (predict >> 16) & 0xff, 0xff - new_green, + quantization); + b = NearLosslessComponent(NearLosslessDiff(value & 0xff, green_diff), + predict & 0xff, 0xff - new_green, quantization); + return ((uint32_t)a << 24) | ((uint32_t)r << 16) | ((uint32_t)g << 8) | b; +} +#endif // (WEBP_NEAR_LOSSLESS == 1) + +// Stores the difference between the pixel and its prediction in "out". +// In case of a lossy encoding, updates the source image to avoid propagating +// the deviation further to pixels which depend on the current pixel for their +// predictions. +static WEBP_INLINE void GetResidual( + int width, int height, uint32_t* const upper_row, + uint32_t* const current_row, const uint8_t* const max_diffs, int mode, + int x_start, int x_end, int y, int max_quantization, int exact, + int used_subtract_green, uint32_t* const out) { + if (exact) { + PredictBatch(mode, x_start, y, x_end - x_start, current_row, upper_row, + out); + } else { + const VP8LPredictorFunc pred_func = VP8LPredictors[mode]; + int x; + for (x = x_start; x < x_end; ++x) { + uint32_t predict; + uint32_t residual; + if (y == 0) { + predict = (x == 0) ? ARGB_BLACK : current_row[x - 1]; // Left. + } else if (x == 0) { + predict = upper_row[x]; // Top. + } else { + predict = pred_func(current_row[x - 1], upper_row + x); + } +#if (WEBP_NEAR_LOSSLESS == 1) + if (max_quantization == 1 || mode == 0 || y == 0 || y == height - 1 || + x == 0 || x == width - 1) { + residual = VP8LSubPixels(current_row[x], predict); + } else { + residual = NearLossless(current_row[x], predict, max_quantization, + max_diffs[x], used_subtract_green); + // Update the source image. + current_row[x] = VP8LAddPixels(predict, residual); + // x is never 0 here so we do not need to update upper_row like below. + } +#else + (void)max_diffs; + (void)height; + (void)max_quantization; + (void)used_subtract_green; + residual = VP8LSubPixels(current_row[x], predict); +#endif + if ((current_row[x] & kMaskAlpha) == 0) { + // If alpha is 0, cleanup RGB. We can choose the RGB values of the + // residual for best compression. The prediction of alpha itself can be + // non-zero and must be kept though. We choose RGB of the residual to be + // 0. + residual &= kMaskAlpha; + // Update the source image. + current_row[x] = predict & ~kMaskAlpha; + // The prediction for the rightmost pixel in a row uses the leftmost + // pixel + // in that row as its top-right context pixel. Hence if we change the + // leftmost pixel of current_row, the corresponding change must be + // applied + // to upper_row as well where top-right context is being read from. + if (x == 0 && y != 0) upper_row[width] = current_row[0]; + } + out[x - x_start] = residual; + } + } +} + +// Returns best predictor and updates the accumulated histogram. +// If max_quantization > 1, assumes that near lossless processing will be +// applied, quantizing residuals to multiples of quantization levels up to +// max_quantization (the actual quantization level depends on smoothness near +// the given pixel). +static int GetBestPredictorForTile(int width, int height, + int tile_x, int tile_y, int bits, + int accumulated[4][256], + uint32_t* const argb_scratch, + const uint32_t* const argb, + int max_quantization, + int exact, int used_subtract_green, + const uint32_t* const modes) { + const int kNumPredModes = 14; + const int start_x = tile_x << bits; + const int start_y = tile_y << bits; + const int tile_size = 1 << bits; + const int max_y = GetMin(tile_size, height - start_y); + const int max_x = GetMin(tile_size, width - start_x); + // Whether there exist columns just outside the tile. + const int have_left = (start_x > 0); + // Position and size of the strip covering the tile and adjacent columns if + // they exist. + const int context_start_x = start_x - have_left; +#if (WEBP_NEAR_LOSSLESS == 1) + const int context_width = max_x + have_left + (max_x < width - start_x); +#endif + const int tiles_per_row = VP8LSubSampleSize(width, bits); + // Prediction modes of the left and above neighbor tiles. + const int left_mode = (tile_x > 0) ? + (modes[tile_y * tiles_per_row + tile_x - 1] >> 8) & 0xff : 0xff; + const int above_mode = (tile_y > 0) ? + (modes[(tile_y - 1) * tiles_per_row + tile_x] >> 8) & 0xff : 0xff; + // The width of upper_row and current_row is one pixel larger than image width + // to allow the top right pixel to point to the leftmost pixel of the next row + // when at the right edge. + uint32_t* upper_row = argb_scratch; + uint32_t* current_row = upper_row + width + 1; + uint8_t* const max_diffs = (uint8_t*)(current_row + width + 1); + float best_diff = MAX_DIFF_COST; + int best_mode = 0; + int mode; + int histo_stack_1[4][256]; + int histo_stack_2[4][256]; + // Need pointers to be able to swap arrays. + int (*histo_argb)[256] = histo_stack_1; + int (*best_histo)[256] = histo_stack_2; + int i, j; + uint32_t residuals[1 << MAX_TRANSFORM_BITS]; + assert(bits <= MAX_TRANSFORM_BITS); + assert(max_x <= (1 << MAX_TRANSFORM_BITS)); + + for (mode = 0; mode < kNumPredModes; ++mode) { + float cur_diff; + int relative_y; + memset(histo_argb, 0, sizeof(histo_stack_1)); + if (start_y > 0) { + // Read the row above the tile which will become the first upper_row. + // Include a pixel to the left if it exists; include a pixel to the right + // in all cases (wrapping to the leftmost pixel of the next row if it does + // not exist). + memcpy(current_row + context_start_x, + argb + (start_y - 1) * width + context_start_x, + sizeof(*argb) * (max_x + have_left + 1)); + } + for (relative_y = 0; relative_y < max_y; ++relative_y) { + const int y = start_y + relative_y; + int relative_x; + uint32_t* tmp = upper_row; + upper_row = current_row; + current_row = tmp; + // Read current_row. Include a pixel to the left if it exists; include a + // pixel to the right in all cases except at the bottom right corner of + // the image (wrapping to the leftmost pixel of the next row if it does + // not exist in the current row). + memcpy(current_row + context_start_x, + argb + y * width + context_start_x, + sizeof(*argb) * (max_x + have_left + (y + 1 < height))); +#if (WEBP_NEAR_LOSSLESS == 1) + if (max_quantization > 1 && y >= 1 && y + 1 < height) { + MaxDiffsForRow(context_width, width, argb + y * width + context_start_x, + max_diffs + context_start_x, used_subtract_green); + } +#endif + + GetResidual(width, height, upper_row, current_row, max_diffs, mode, + start_x, start_x + max_x, y, max_quantization, exact, + used_subtract_green, residuals); + for (relative_x = 0; relative_x < max_x; ++relative_x) { + UpdateHisto(histo_argb, residuals[relative_x]); + } + } + cur_diff = PredictionCostSpatialHistogram( + (const int (*)[256])accumulated, (const int (*)[256])histo_argb); + // Favor keeping the areas locally similar. + if (mode == left_mode) cur_diff -= kSpatialPredictorBias; + if (mode == above_mode) cur_diff -= kSpatialPredictorBias; + + if (cur_diff < best_diff) { + int (*tmp)[256] = histo_argb; + histo_argb = best_histo; + best_histo = tmp; + best_diff = cur_diff; + best_mode = mode; + } + } + + for (i = 0; i < 4; i++) { + for (j = 0; j < 256; j++) { + accumulated[i][j] += best_histo[i][j]; + } + } + + return best_mode; +} + +// Converts pixels of the image to residuals with respect to predictions. +// If max_quantization > 1, applies near lossless processing, quantizing +// residuals to multiples of quantization levels up to max_quantization +// (the actual quantization level depends on smoothness near the given pixel). +static void CopyImageWithPrediction(int width, int height, + int bits, uint32_t* const modes, + uint32_t* const argb_scratch, + uint32_t* const argb, + int low_effort, int max_quantization, + int exact, int used_subtract_green) { + const int tiles_per_row = VP8LSubSampleSize(width, bits); + // The width of upper_row and current_row is one pixel larger than image width + // to allow the top right pixel to point to the leftmost pixel of the next row + // when at the right edge. + uint32_t* upper_row = argb_scratch; + uint32_t* current_row = upper_row + width + 1; + uint8_t* current_max_diffs = (uint8_t*)(current_row + width + 1); +#if (WEBP_NEAR_LOSSLESS == 1) + uint8_t* lower_max_diffs = current_max_diffs + width; +#endif + int y; + + for (y = 0; y < height; ++y) { + int x; + uint32_t* const tmp32 = upper_row; + upper_row = current_row; + current_row = tmp32; + memcpy(current_row, argb + y * width, + sizeof(*argb) * (width + (y + 1 < height))); + + if (low_effort) { + PredictBatch(kPredLowEffort, 0, y, width, current_row, upper_row, + argb + y * width); + } else { +#if (WEBP_NEAR_LOSSLESS == 1) + if (max_quantization > 1) { + // Compute max_diffs for the lower row now, because that needs the + // contents of argb for the current row, which we will overwrite with + // residuals before proceeding with the next row. + uint8_t* const tmp8 = current_max_diffs; + current_max_diffs = lower_max_diffs; + lower_max_diffs = tmp8; + if (y + 2 < height) { + MaxDiffsForRow(width, width, argb + (y + 1) * width, lower_max_diffs, + used_subtract_green); + } + } +#endif + for (x = 0; x < width;) { + const int mode = + (modes[(y >> bits) * tiles_per_row + (x >> bits)] >> 8) & 0xff; + int x_end = x + (1 << bits); + if (x_end > width) x_end = width; + GetResidual(width, height, upper_row, current_row, current_max_diffs, + mode, x, x_end, y, max_quantization, exact, + used_subtract_green, argb + y * width + x); + x = x_end; + } + } + } +} + +// Finds the best predictor for each tile, and converts the image to residuals +// with respect to predictions. If near_lossless_quality < 100, applies +// near lossless processing, shaving off more bits of residuals for lower +// qualities. +void VP8LResidualImage(int width, int height, int bits, int low_effort, + uint32_t* const argb, uint32_t* const argb_scratch, + uint32_t* const image, int near_lossless_quality, + int exact, int used_subtract_green) { + const int tiles_per_row = VP8LSubSampleSize(width, bits); + const int tiles_per_col = VP8LSubSampleSize(height, bits); + int tile_y; + int histo[4][256]; + const int max_quantization = 1 << VP8LNearLosslessBits(near_lossless_quality); + if (low_effort) { + int i; + for (i = 0; i < tiles_per_row * tiles_per_col; ++i) { + image[i] = ARGB_BLACK | (kPredLowEffort << 8); + } + } else { + memset(histo, 0, sizeof(histo)); + for (tile_y = 0; tile_y < tiles_per_col; ++tile_y) { + int tile_x; + for (tile_x = 0; tile_x < tiles_per_row; ++tile_x) { + const int pred = GetBestPredictorForTile(width, height, tile_x, tile_y, + bits, histo, argb_scratch, argb, max_quantization, exact, + used_subtract_green, image); + image[tile_y * tiles_per_row + tile_x] = ARGB_BLACK | (pred << 8); + } + } + } + + CopyImageWithPrediction(width, height, bits, image, argb_scratch, argb, + low_effort, max_quantization, exact, + used_subtract_green); +} + +//------------------------------------------------------------------------------ +// Color transform functions. + +static WEBP_INLINE void MultipliersClear(VP8LMultipliers* const m) { + m->green_to_red_ = 0; + m->green_to_blue_ = 0; + m->red_to_blue_ = 0; +} + +static WEBP_INLINE void ColorCodeToMultipliers(uint32_t color_code, + VP8LMultipliers* const m) { + m->green_to_red_ = (color_code >> 0) & 0xff; + m->green_to_blue_ = (color_code >> 8) & 0xff; + m->red_to_blue_ = (color_code >> 16) & 0xff; +} + +static WEBP_INLINE uint32_t MultipliersToColorCode( + const VP8LMultipliers* const m) { + return 0xff000000u | + ((uint32_t)(m->red_to_blue_) << 16) | + ((uint32_t)(m->green_to_blue_) << 8) | + m->green_to_red_; +} + +static float PredictionCostCrossColor(const int accumulated[256], + const int counts[256]) { + // Favor low entropy, locally and globally. + // Favor small absolute values for PredictionCostSpatial + static const double kExpValue = 2.4; + return VP8LCombinedShannonEntropy(counts, accumulated) + + PredictionCostSpatial(counts, 3, kExpValue); +} + +static float GetPredictionCostCrossColorRed( + const uint32_t* argb, int stride, int tile_width, int tile_height, + VP8LMultipliers prev_x, VP8LMultipliers prev_y, int green_to_red, + const int accumulated_red_histo[256]) { + int histo[256] = { 0 }; + float cur_diff; + + VP8LCollectColorRedTransforms(argb, stride, tile_width, tile_height, + green_to_red, histo); + + cur_diff = PredictionCostCrossColor(accumulated_red_histo, histo); + if ((uint8_t)green_to_red == prev_x.green_to_red_) { + cur_diff -= 3; // favor keeping the areas locally similar + } + if ((uint8_t)green_to_red == prev_y.green_to_red_) { + cur_diff -= 3; // favor keeping the areas locally similar + } + if (green_to_red == 0) { + cur_diff -= 3; + } + return cur_diff; +} + +static void GetBestGreenToRed( + const uint32_t* argb, int stride, int tile_width, int tile_height, + VP8LMultipliers prev_x, VP8LMultipliers prev_y, int quality, + const int accumulated_red_histo[256], VP8LMultipliers* const best_tx) { + const int kMaxIters = 4 + ((7 * quality) >> 8); // in range [4..6] + int green_to_red_best = 0; + int iter, offset; + float best_diff = GetPredictionCostCrossColorRed( + argb, stride, tile_width, tile_height, prev_x, prev_y, + green_to_red_best, accumulated_red_histo); + for (iter = 0; iter < kMaxIters; ++iter) { + // ColorTransformDelta is a 3.5 bit fixed point, so 32 is equal to + // one in color computation. Having initial delta here as 1 is sufficient + // to explore the range of (-2, 2). + const int delta = 32 >> iter; + // Try a negative and a positive delta from the best known value. + for (offset = -delta; offset <= delta; offset += 2 * delta) { + const int green_to_red_cur = offset + green_to_red_best; + const float cur_diff = GetPredictionCostCrossColorRed( + argb, stride, tile_width, tile_height, prev_x, prev_y, + green_to_red_cur, accumulated_red_histo); + if (cur_diff < best_diff) { + best_diff = cur_diff; + green_to_red_best = green_to_red_cur; + } + } + } + best_tx->green_to_red_ = (green_to_red_best & 0xff); +} + +static float GetPredictionCostCrossColorBlue( + const uint32_t* argb, int stride, int tile_width, int tile_height, + VP8LMultipliers prev_x, VP8LMultipliers prev_y, + int green_to_blue, int red_to_blue, const int accumulated_blue_histo[256]) { + int histo[256] = { 0 }; + float cur_diff; + + VP8LCollectColorBlueTransforms(argb, stride, tile_width, tile_height, + green_to_blue, red_to_blue, histo); + + cur_diff = PredictionCostCrossColor(accumulated_blue_histo, histo); + if ((uint8_t)green_to_blue == prev_x.green_to_blue_) { + cur_diff -= 3; // favor keeping the areas locally similar + } + if ((uint8_t)green_to_blue == prev_y.green_to_blue_) { + cur_diff -= 3; // favor keeping the areas locally similar + } + if ((uint8_t)red_to_blue == prev_x.red_to_blue_) { + cur_diff -= 3; // favor keeping the areas locally similar + } + if ((uint8_t)red_to_blue == prev_y.red_to_blue_) { + cur_diff -= 3; // favor keeping the areas locally similar + } + if (green_to_blue == 0) { + cur_diff -= 3; + } + if (red_to_blue == 0) { + cur_diff -= 3; + } + return cur_diff; +} + +#define kGreenRedToBlueNumAxis 8 +#define kGreenRedToBlueMaxIters 7 +static void GetBestGreenRedToBlue( + const uint32_t* argb, int stride, int tile_width, int tile_height, + VP8LMultipliers prev_x, VP8LMultipliers prev_y, int quality, + const int accumulated_blue_histo[256], + VP8LMultipliers* const best_tx) { + const int8_t offset[kGreenRedToBlueNumAxis][2] = + {{0, -1}, {0, 1}, {-1, 0}, {1, 0}, {-1, -1}, {-1, 1}, {1, -1}, {1, 1}}; + const int8_t delta_lut[kGreenRedToBlueMaxIters] = { 16, 16, 8, 4, 2, 2, 2 }; + const int iters = + (quality < 25) ? 1 : (quality > 50) ? kGreenRedToBlueMaxIters : 4; + int green_to_blue_best = 0; + int red_to_blue_best = 0; + int iter; + // Initial value at origin: + float best_diff = GetPredictionCostCrossColorBlue( + argb, stride, tile_width, tile_height, prev_x, prev_y, + green_to_blue_best, red_to_blue_best, accumulated_blue_histo); + for (iter = 0; iter < iters; ++iter) { + const int delta = delta_lut[iter]; + int axis; + for (axis = 0; axis < kGreenRedToBlueNumAxis; ++axis) { + const int green_to_blue_cur = + offset[axis][0] * delta + green_to_blue_best; + const int red_to_blue_cur = offset[axis][1] * delta + red_to_blue_best; + const float cur_diff = GetPredictionCostCrossColorBlue( + argb, stride, tile_width, tile_height, prev_x, prev_y, + green_to_blue_cur, red_to_blue_cur, accumulated_blue_histo); + if (cur_diff < best_diff) { + best_diff = cur_diff; + green_to_blue_best = green_to_blue_cur; + red_to_blue_best = red_to_blue_cur; + } + if (quality < 25 && iter == 4) { + // Only axis aligned diffs for lower quality. + break; // next iter. + } + } + if (delta == 2 && green_to_blue_best == 0 && red_to_blue_best == 0) { + // Further iterations would not help. + break; // out of iter-loop. + } + } + best_tx->green_to_blue_ = green_to_blue_best & 0xff; + best_tx->red_to_blue_ = red_to_blue_best & 0xff; +} +#undef kGreenRedToBlueMaxIters +#undef kGreenRedToBlueNumAxis + +static VP8LMultipliers GetBestColorTransformForTile( + int tile_x, int tile_y, int bits, + VP8LMultipliers prev_x, + VP8LMultipliers prev_y, + int quality, int xsize, int ysize, + const int accumulated_red_histo[256], + const int accumulated_blue_histo[256], + const uint32_t* const argb) { + const int max_tile_size = 1 << bits; + const int tile_y_offset = tile_y * max_tile_size; + const int tile_x_offset = tile_x * max_tile_size; + const int all_x_max = GetMin(tile_x_offset + max_tile_size, xsize); + const int all_y_max = GetMin(tile_y_offset + max_tile_size, ysize); + const int tile_width = all_x_max - tile_x_offset; + const int tile_height = all_y_max - tile_y_offset; + const uint32_t* const tile_argb = argb + tile_y_offset * xsize + + tile_x_offset; + VP8LMultipliers best_tx; + MultipliersClear(&best_tx); + + GetBestGreenToRed(tile_argb, xsize, tile_width, tile_height, + prev_x, prev_y, quality, accumulated_red_histo, &best_tx); + GetBestGreenRedToBlue(tile_argb, xsize, tile_width, tile_height, + prev_x, prev_y, quality, accumulated_blue_histo, + &best_tx); + return best_tx; +} + +static void CopyTileWithColorTransform(int xsize, int ysize, + int tile_x, int tile_y, + int max_tile_size, + VP8LMultipliers color_transform, + uint32_t* argb) { + const int xscan = GetMin(max_tile_size, xsize - tile_x); + int yscan = GetMin(max_tile_size, ysize - tile_y); + argb += tile_y * xsize + tile_x; + while (yscan-- > 0) { + VP8LTransformColor(&color_transform, argb, xscan); + argb += xsize; + } +} + +void VP8LColorSpaceTransform(int width, int height, int bits, int quality, + uint32_t* const argb, uint32_t* image) { + const int max_tile_size = 1 << bits; + const int tile_xsize = VP8LSubSampleSize(width, bits); + const int tile_ysize = VP8LSubSampleSize(height, bits); + int accumulated_red_histo[256] = { 0 }; + int accumulated_blue_histo[256] = { 0 }; + int tile_x, tile_y; + VP8LMultipliers prev_x, prev_y; + MultipliersClear(&prev_y); + MultipliersClear(&prev_x); + for (tile_y = 0; tile_y < tile_ysize; ++tile_y) { + for (tile_x = 0; tile_x < tile_xsize; ++tile_x) { + int y; + const int tile_x_offset = tile_x * max_tile_size; + const int tile_y_offset = tile_y * max_tile_size; + const int all_x_max = GetMin(tile_x_offset + max_tile_size, width); + const int all_y_max = GetMin(tile_y_offset + max_tile_size, height); + const int offset = tile_y * tile_xsize + tile_x; + if (tile_y != 0) { + ColorCodeToMultipliers(image[offset - tile_xsize], &prev_y); + } + prev_x = GetBestColorTransformForTile(tile_x, tile_y, bits, + prev_x, prev_y, + quality, width, height, + accumulated_red_histo, + accumulated_blue_histo, + argb); + image[offset] = MultipliersToColorCode(&prev_x); + CopyTileWithColorTransform(width, height, tile_x_offset, tile_y_offset, + max_tile_size, prev_x, argb); + + // Gather accumulated histogram data. + for (y = tile_y_offset; y < all_y_max; ++y) { + int ix = y * width + tile_x_offset; + const int ix_end = ix + all_x_max - tile_x_offset; + for (; ix < ix_end; ++ix) { + const uint32_t pix = argb[ix]; + if (ix >= 2 && + pix == argb[ix - 2] && + pix == argb[ix - 1]) { + continue; // repeated pixels are handled by backward references + } + if (ix >= width + 2 && + argb[ix - 2] == argb[ix - width - 2] && + argb[ix - 1] == argb[ix - width - 1] && + pix == argb[ix - width]) { + continue; // repeated pixels are handled by backward references + } + ++accumulated_red_histo[(pix >> 16) & 0xff]; + ++accumulated_blue_histo[(pix >> 0) & 0xff]; + } + } + } + } +} diff --git a/vendor/github.com/sizeofint/webpanimation/enc_quant_enc.c b/vendor/github.com/sizeofint/webpanimation/enc_quant_enc.c new file mode 100644 index 00000000..911a67fb --- /dev/null +++ b/vendor/github.com/sizeofint/webpanimation/enc_quant_enc.c @@ -0,0 +1,1372 @@ +// Copyright 2011 Google Inc. All Rights Reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the COPYING file in the root of the source +// tree. An additional intellectual property rights grant can be found +// in the file PATENTS. All contributing project authors may +// be found in the AUTHORS file in the root of the source tree. +// ----------------------------------------------------------------------------- +// +// Quantization +// +// Author: Skal (pascal.massimino@gmail.com) + +#include +#include +#include // for abs() + +#include "dsp_quant.h" +#include "enc_vp8i_enc.h" +#include "enc_cost_enc.h" + +#define DO_TRELLIS_I4 1 +#define DO_TRELLIS_I16 1 // not a huge gain, but ok at low bitrate. +#define DO_TRELLIS_UV 0 // disable trellis for UV. Risky. Not worth. +#define USE_TDISTO 1 + +#define MID_ALPHA 64 // neutral value for susceptibility +#define MIN_ALPHA 30 // lowest usable value for susceptibility +#define MAX_ALPHA 100 // higher meaningful value for susceptibility + +#define SNS_TO_DQ 0.9 // Scaling constant between the sns value and the QP + // power-law modulation. Must be strictly less than 1. + +// number of non-zero coeffs below which we consider the block very flat +// (and apply a penalty to complex predictions) +#define FLATNESS_LIMIT_I16 0 // I16 mode (special case) +#define FLATNESS_LIMIT_I4 3 // I4 mode +#define FLATNESS_LIMIT_UV 2 // UV mode +#define FLATNESS_PENALTY 140 // roughly ~1bit per block + +#define MULT_8B(a, b) (((a) * (b) + 128) >> 8) + +#define RD_DISTO_MULT 256 // distortion multiplier (equivalent of lambda) + +// #define DEBUG_BLOCK + +//------------------------------------------------------------------------------ + +#if defined(DEBUG_BLOCK) + +#include +#include + +static void PrintBlockInfo(const VP8EncIterator* const it, + const VP8ModeScore* const rd) { + int i, j; + const int is_i16 = (it->mb_->type_ == 1); + const uint8_t* const y_in = it->yuv_in_ + Y_OFF_ENC; + const uint8_t* const y_out = it->yuv_out_ + Y_OFF_ENC; + const uint8_t* const uv_in = it->yuv_in_ + U_OFF_ENC; + const uint8_t* const uv_out = it->yuv_out_ + U_OFF_ENC; + printf("SOURCE / OUTPUT / ABS DELTA\n"); + for (j = 0; j < 16; ++j) { + for (i = 0; i < 16; ++i) printf("%3d ", y_in[i + j * BPS]); + printf(" "); + for (i = 0; i < 16; ++i) printf("%3d ", y_out[i + j * BPS]); + printf(" "); + for (i = 0; i < 16; ++i) { + printf("%1d ", abs(y_in[i + j * BPS] - y_out[i + j * BPS])); + } + printf("\n"); + } + printf("\n"); // newline before the U/V block + for (j = 0; j < 8; ++j) { + for (i = 0; i < 8; ++i) printf("%3d ", uv_in[i + j * BPS]); + printf(" "); + for (i = 8; i < 16; ++i) printf("%3d ", uv_in[i + j * BPS]); + printf(" "); + for (i = 0; i < 8; ++i) printf("%3d ", uv_out[i + j * BPS]); + printf(" "); + for (i = 8; i < 16; ++i) printf("%3d ", uv_out[i + j * BPS]); + printf(" "); + for (i = 0; i < 8; ++i) { + printf("%1d ", abs(uv_out[i + j * BPS] - uv_in[i + j * BPS])); + } + printf(" "); + for (i = 8; i < 16; ++i) { + printf("%1d ", abs(uv_out[i + j * BPS] - uv_in[i + j * BPS])); + } + printf("\n"); + } + printf("\nD:%d SD:%d R:%d H:%d nz:0x%x score:%d\n", + (int)rd->D, (int)rd->SD, (int)rd->R, (int)rd->H, (int)rd->nz, + (int)rd->score); + if (is_i16) { + printf("Mode: %d\n", rd->mode_i16); + printf("y_dc_levels:"); + for (i = 0; i < 16; ++i) printf("%3d ", rd->y_dc_levels[i]); + printf("\n"); + } else { + printf("Modes[16]: "); + for (i = 0; i < 16; ++i) printf("%d ", rd->modes_i4[i]); + printf("\n"); + } + printf("y_ac_levels:\n"); + for (j = 0; j < 16; ++j) { + for (i = is_i16 ? 1 : 0; i < 16; ++i) { + printf("%4d ", rd->y_ac_levels[j][i]); + } + printf("\n"); + } + printf("\n"); + printf("uv_levels (mode=%d):\n", rd->mode_uv); + for (j = 0; j < 8; ++j) { + for (i = 0; i < 16; ++i) { + printf("%4d ", rd->uv_levels[j][i]); + } + printf("\n"); + } +} + +#endif // DEBUG_BLOCK + +//------------------------------------------------------------------------------ + +static WEBP_INLINE int clip(int v, int m, int M) { + return v < m ? m : v > M ? M : v; +} + +static const uint8_t kZigzag[16] = { + 0, 1, 4, 8, 5, 2, 3, 6, 9, 12, 13, 10, 7, 11, 14, 15 +}; + +static const uint8_t kDcTable[128] = { + 4, 5, 6, 7, 8, 9, 10, 10, + 11, 12, 13, 14, 15, 16, 17, 17, + 18, 19, 20, 20, 21, 21, 22, 22, + 23, 23, 24, 25, 25, 26, 27, 28, + 29, 30, 31, 32, 33, 34, 35, 36, + 37, 37, 38, 39, 40, 41, 42, 43, + 44, 45, 46, 46, 47, 48, 49, 50, + 51, 52, 53, 54, 55, 56, 57, 58, + 59, 60, 61, 62, 63, 64, 65, 66, + 67, 68, 69, 70, 71, 72, 73, 74, + 75, 76, 76, 77, 78, 79, 80, 81, + 82, 83, 84, 85, 86, 87, 88, 89, + 91, 93, 95, 96, 98, 100, 101, 102, + 104, 106, 108, 110, 112, 114, 116, 118, + 122, 124, 126, 128, 130, 132, 134, 136, + 138, 140, 143, 145, 148, 151, 154, 157 +}; + +static const uint16_t kAcTable[128] = { + 4, 5, 6, 7, 8, 9, 10, 11, + 12, 13, 14, 15, 16, 17, 18, 19, + 20, 21, 22, 23, 24, 25, 26, 27, + 28, 29, 30, 31, 32, 33, 34, 35, + 36, 37, 38, 39, 40, 41, 42, 43, + 44, 45, 46, 47, 48, 49, 50, 51, + 52, 53, 54, 55, 56, 57, 58, 60, + 62, 64, 66, 68, 70, 72, 74, 76, + 78, 80, 82, 84, 86, 88, 90, 92, + 94, 96, 98, 100, 102, 104, 106, 108, + 110, 112, 114, 116, 119, 122, 125, 128, + 131, 134, 137, 140, 143, 146, 149, 152, + 155, 158, 161, 164, 167, 170, 173, 177, + 181, 185, 189, 193, 197, 201, 205, 209, + 213, 217, 221, 225, 229, 234, 239, 245, + 249, 254, 259, 264, 269, 274, 279, 284 +}; + +static const uint16_t kAcTable2[128] = { + 8, 8, 9, 10, 12, 13, 15, 17, + 18, 20, 21, 23, 24, 26, 27, 29, + 31, 32, 34, 35, 37, 38, 40, 41, + 43, 44, 46, 48, 49, 51, 52, 54, + 55, 57, 58, 60, 62, 63, 65, 66, + 68, 69, 71, 72, 74, 75, 77, 79, + 80, 82, 83, 85, 86, 88, 89, 93, + 96, 99, 102, 105, 108, 111, 114, 117, + 120, 124, 127, 130, 133, 136, 139, 142, + 145, 148, 151, 155, 158, 161, 164, 167, + 170, 173, 176, 179, 184, 189, 193, 198, + 203, 207, 212, 217, 221, 226, 230, 235, + 240, 244, 249, 254, 258, 263, 268, 274, + 280, 286, 292, 299, 305, 311, 317, 323, + 330, 336, 342, 348, 354, 362, 370, 379, + 385, 393, 401, 409, 416, 424, 432, 440 +}; + +static const uint8_t kBiasMatrices[3][2] = { // [luma-ac,luma-dc,chroma][dc,ac] + { 96, 110 }, { 96, 108 }, { 110, 115 } +}; + +// Sharpening by (slightly) raising the hi-frequency coeffs. +// Hack-ish but helpful for mid-bitrate range. Use with care. +#define SHARPEN_BITS 11 // number of descaling bits for sharpening bias +static const uint8_t kFreqSharpening[16] = { + 0, 30, 60, 90, + 30, 60, 90, 90, + 60, 90, 90, 90, + 90, 90, 90, 90 +}; + +//------------------------------------------------------------------------------ +// Initialize quantization parameters in VP8Matrix + +// Returns the average quantizer +static int ExpandMatrix(VP8Matrix* const m, int type) { + int i, sum; + for (i = 0; i < 2; ++i) { + const int is_ac_coeff = (i > 0); + const int bias = kBiasMatrices[type][is_ac_coeff]; + m->iq_[i] = (1 << QFIX) / m->q_[i]; + m->bias_[i] = BIAS(bias); + // zthresh_ is the exact value such that QUANTDIV(coeff, iQ, B) is: + // * zero if coeff <= zthresh + // * non-zero if coeff > zthresh + m->zthresh_[i] = ((1 << QFIX) - 1 - m->bias_[i]) / m->iq_[i]; + } + for (i = 2; i < 16; ++i) { + m->q_[i] = m->q_[1]; + m->iq_[i] = m->iq_[1]; + m->bias_[i] = m->bias_[1]; + m->zthresh_[i] = m->zthresh_[1]; + } + for (sum = 0, i = 0; i < 16; ++i) { + if (type == 0) { // we only use sharpening for AC luma coeffs + m->sharpen_[i] = (kFreqSharpening[i] * m->q_[i]) >> SHARPEN_BITS; + } else { + m->sharpen_[i] = 0; + } + sum += m->q_[i]; + } + return (sum + 8) >> 4; +} + +static void CheckLambdaValue(int* const v) { if (*v < 1) *v = 1; } + +static void SetupMatrices(VP8Encoder* enc) { + int i; + const int tlambda_scale = + (enc->method_ >= 4) ? enc->config_->sns_strength + : 0; + const int num_segments = enc->segment_hdr_.num_segments_; + for (i = 0; i < num_segments; ++i) { + VP8SegmentInfo* const m = &enc->dqm_[i]; + const int q = m->quant_; + int q_i4, q_i16, q_uv; + m->y1_.q_[0] = kDcTable[clip(q + enc->dq_y1_dc_, 0, 127)]; + m->y1_.q_[1] = kAcTable[clip(q, 0, 127)]; + + m->y2_.q_[0] = kDcTable[ clip(q + enc->dq_y2_dc_, 0, 127)] * 2; + m->y2_.q_[1] = kAcTable2[clip(q + enc->dq_y2_ac_, 0, 127)]; + + m->uv_.q_[0] = kDcTable[clip(q + enc->dq_uv_dc_, 0, 117)]; + m->uv_.q_[1] = kAcTable[clip(q + enc->dq_uv_ac_, 0, 127)]; + + q_i4 = ExpandMatrix(&m->y1_, 0); + q_i16 = ExpandMatrix(&m->y2_, 1); + q_uv = ExpandMatrix(&m->uv_, 2); + + m->lambda_i4_ = (3 * q_i4 * q_i4) >> 7; + m->lambda_i16_ = (3 * q_i16 * q_i16); + m->lambda_uv_ = (3 * q_uv * q_uv) >> 6; + m->lambda_mode_ = (1 * q_i4 * q_i4) >> 7; + m->lambda_trellis_i4_ = (7 * q_i4 * q_i4) >> 3; + m->lambda_trellis_i16_ = (q_i16 * q_i16) >> 2; + m->lambda_trellis_uv_ = (q_uv * q_uv) << 1; + m->tlambda_ = (tlambda_scale * q_i4) >> 5; + + // none of these constants should be < 1 + CheckLambdaValue(&m->lambda_i4_); + CheckLambdaValue(&m->lambda_i16_); + CheckLambdaValue(&m->lambda_uv_); + CheckLambdaValue(&m->lambda_mode_); + CheckLambdaValue(&m->lambda_trellis_i4_); + CheckLambdaValue(&m->lambda_trellis_i16_); + CheckLambdaValue(&m->lambda_trellis_uv_); + CheckLambdaValue(&m->tlambda_); + + m->min_disto_ = 20 * m->y1_.q_[0]; // quantization-aware min disto + m->max_edge_ = 0; + + m->i4_penalty_ = 1000 * q_i4 * q_i4; + } +} + +//------------------------------------------------------------------------------ +// Initialize filtering parameters + +// Very small filter-strength values have close to no visual effect. So we can +// save a little decoding-CPU by turning filtering off for these. +#define FSTRENGTH_CUTOFF 2 + +static void SetupFilterStrength(VP8Encoder* const enc) { + int i; + // level0 is in [0..500]. Using '-f 50' as filter_strength is mid-filtering. + const int level0 = 5 * enc->config_->filter_strength; + for (i = 0; i < NUM_MB_SEGMENTS; ++i) { + VP8SegmentInfo* const m = &enc->dqm_[i]; + // We focus on the quantization of AC coeffs. + const int qstep = kAcTable[clip(m->quant_, 0, 127)] >> 2; + const int base_strength = + VP8FilterStrengthFromDelta(enc->filter_hdr_.sharpness_, qstep); + // Segments with lower complexity ('beta') will be less filtered. + const int f = base_strength * level0 / (256 + m->beta_); + m->fstrength_ = (f < FSTRENGTH_CUTOFF) ? 0 : (f > 63) ? 63 : f; + } + // We record the initial strength (mainly for the case of 1-segment only). + enc->filter_hdr_.level_ = enc->dqm_[0].fstrength_; + enc->filter_hdr_.simple_ = (enc->config_->filter_type == 0); + enc->filter_hdr_.sharpness_ = enc->config_->filter_sharpness; +} + +//------------------------------------------------------------------------------ + +// Note: if you change the values below, remember that the max range +// allowed by the syntax for DQ_UV is [-16,16]. +#define MAX_DQ_UV (6) +#define MIN_DQ_UV (-4) + +// We want to emulate jpeg-like behaviour where the expected "good" quality +// is around q=75. Internally, our "good" middle is around c=50. So we +// map accordingly using linear piece-wise function +static double QualityToCompression(double c) { + const double linear_c = (c < 0.75) ? c * (2. / 3.) : 2. * c - 1.; + // The file size roughly scales as pow(quantizer, 3.). Actually, the + // exponent is somewhere between 2.8 and 3.2, but we're mostly interested + // in the mid-quant range. So we scale the compressibility inversely to + // this power-law: quant ~= compression ^ 1/3. This law holds well for + // low quant. Finer modeling for high-quant would make use of kAcTable[] + // more explicitly. + const double v = pow(linear_c, 1 / 3.); + return v; +} + +static double QualityToJPEGCompression(double c, double alpha) { + // We map the complexity 'alpha' and quality setting 'c' to a compression + // exponent empirically matched to the compression curve of libjpeg6b. + // On average, the WebP output size will be roughly similar to that of a + // JPEG file compressed with same quality factor. + const double amin = 0.30; + const double amax = 0.85; + const double exp_min = 0.4; + const double exp_max = 0.9; + const double slope = (exp_min - exp_max) / (amax - amin); + // Linearly interpolate 'expn' from exp_min to exp_max + // in the [amin, amax] range. + const double expn = (alpha > amax) ? exp_min + : (alpha < amin) ? exp_max + : exp_max + slope * (alpha - amin); + const double v = pow(c, expn); + return v; +} + +static int SegmentsAreEquivalent(const VP8SegmentInfo* const S1, + const VP8SegmentInfo* const S2) { + return (S1->quant_ == S2->quant_) && (S1->fstrength_ == S2->fstrength_); +} + +static void SimplifySegments(VP8Encoder* const enc) { + int map[NUM_MB_SEGMENTS] = { 0, 1, 2, 3 }; + // 'num_segments_' is previously validated and <= NUM_MB_SEGMENTS, but an + // explicit check is needed to avoid a spurious warning about 'i' exceeding + // array bounds of 'dqm_' with some compilers (noticed with gcc-4.9). + const int num_segments = (enc->segment_hdr_.num_segments_ < NUM_MB_SEGMENTS) + ? enc->segment_hdr_.num_segments_ + : NUM_MB_SEGMENTS; + int num_final_segments = 1; + int s1, s2; + for (s1 = 1; s1 < num_segments; ++s1) { // find similar segments + const VP8SegmentInfo* const S1 = &enc->dqm_[s1]; + int found = 0; + // check if we already have similar segment + for (s2 = 0; s2 < num_final_segments; ++s2) { + const VP8SegmentInfo* const S2 = &enc->dqm_[s2]; + if (SegmentsAreEquivalent(S1, S2)) { + found = 1; + break; + } + } + map[s1] = s2; + if (!found) { + if (num_final_segments != s1) { + enc->dqm_[num_final_segments] = enc->dqm_[s1]; + } + ++num_final_segments; + } + } + if (num_final_segments < num_segments) { // Remap + int i = enc->mb_w_ * enc->mb_h_; + while (i-- > 0) enc->mb_info_[i].segment_ = map[enc->mb_info_[i].segment_]; + enc->segment_hdr_.num_segments_ = num_final_segments; + // Replicate the trailing segment infos (it's mostly cosmetics) + for (i = num_final_segments; i < num_segments; ++i) { + enc->dqm_[i] = enc->dqm_[num_final_segments - 1]; + } + } +} + +void VP8SetSegmentParams(VP8Encoder* const enc, float quality) { + int i; + int dq_uv_ac, dq_uv_dc; + const int num_segments = enc->segment_hdr_.num_segments_; + const double amp = SNS_TO_DQ * enc->config_->sns_strength / 100. / 128.; + const double Q = quality / 100.; + const double c_base = enc->config_->emulate_jpeg_size ? + QualityToJPEGCompression(Q, enc->alpha_ / 255.) : + QualityToCompression(Q); + for (i = 0; i < num_segments; ++i) { + // We modulate the base coefficient to accommodate for the quantization + // susceptibility and allow denser segments to be quantized more. + const double expn = 1. - amp * enc->dqm_[i].alpha_; + const double c = pow(c_base, expn); + const int q = (int)(127. * (1. - c)); + assert(expn > 0.); + enc->dqm_[i].quant_ = clip(q, 0, 127); + } + + // purely indicative in the bitstream (except for the 1-segment case) + enc->base_quant_ = enc->dqm_[0].quant_; + + // fill-in values for the unused segments (required by the syntax) + for (i = num_segments; i < NUM_MB_SEGMENTS; ++i) { + enc->dqm_[i].quant_ = enc->base_quant_; + } + + // uv_alpha_ is normally spread around ~60. The useful range is + // typically ~30 (quite bad) to ~100 (ok to decimate UV more). + // We map it to the safe maximal range of MAX/MIN_DQ_UV for dq_uv. + dq_uv_ac = (enc->uv_alpha_ - MID_ALPHA) * (MAX_DQ_UV - MIN_DQ_UV) + / (MAX_ALPHA - MIN_ALPHA); + // we rescale by the user-defined strength of adaptation + dq_uv_ac = dq_uv_ac * enc->config_->sns_strength / 100; + // and make it safe. + dq_uv_ac = clip(dq_uv_ac, MIN_DQ_UV, MAX_DQ_UV); + // We also boost the dc-uv-quant a little, based on sns-strength, since + // U/V channels are quite more reactive to high quants (flat DC-blocks + // tend to appear, and are unpleasant). + dq_uv_dc = -4 * enc->config_->sns_strength / 100; + dq_uv_dc = clip(dq_uv_dc, -15, 15); // 4bit-signed max allowed + + enc->dq_y1_dc_ = 0; // TODO(skal): dq-lum + enc->dq_y2_dc_ = 0; + enc->dq_y2_ac_ = 0; + enc->dq_uv_dc_ = dq_uv_dc; + enc->dq_uv_ac_ = dq_uv_ac; + + SetupFilterStrength(enc); // initialize segments' filtering, eventually + + if (num_segments > 1) SimplifySegments(enc); + + SetupMatrices(enc); // finalize quantization matrices +} + +//------------------------------------------------------------------------------ +// Form the predictions in cache + +// Must be ordered using {DC_PRED, TM_PRED, V_PRED, H_PRED} as index +const uint16_t VP8I16ModeOffsets[4] = { I16DC16, I16TM16, I16VE16, I16HE16 }; +const uint16_t VP8UVModeOffsets[4] = { C8DC8, C8TM8, C8VE8, C8HE8 }; + +// Must be indexed using {B_DC_PRED -> B_HU_PRED} as index +const uint16_t VP8I4ModeOffsets[NUM_BMODES] = { + I4DC4, I4TM4, I4VE4, I4HE4, I4RD4, I4VR4, I4LD4, I4VL4, I4HD4, I4HU4 +}; + +void VP8MakeLuma16Preds(const VP8EncIterator* const it) { + const uint8_t* const left = it->x_ ? it->y_left_ : NULL; + const uint8_t* const top = it->y_ ? it->y_top_ : NULL; + VP8EncPredLuma16(it->yuv_p_, left, top); +} + +void VP8MakeChroma8Preds(const VP8EncIterator* const it) { + const uint8_t* const left = it->x_ ? it->u_left_ : NULL; + const uint8_t* const top = it->y_ ? it->uv_top_ : NULL; + VP8EncPredChroma8(it->yuv_p_, left, top); +} + +void VP8MakeIntra4Preds(const VP8EncIterator* const it) { + VP8EncPredLuma4(it->yuv_p_, it->i4_top_); +} + +//------------------------------------------------------------------------------ +// Quantize + +// Layout: +// +----+----+ +// |YYYY|UUVV| 0 +// |YYYY|UUVV| 4 +// |YYYY|....| 8 +// |YYYY|....| 12 +// +----+----+ + +const uint16_t VP8Scan[16] = { // Luma + 0 + 0 * BPS, 4 + 0 * BPS, 8 + 0 * BPS, 12 + 0 * BPS, + 0 + 4 * BPS, 4 + 4 * BPS, 8 + 4 * BPS, 12 + 4 * BPS, + 0 + 8 * BPS, 4 + 8 * BPS, 8 + 8 * BPS, 12 + 8 * BPS, + 0 + 12 * BPS, 4 + 12 * BPS, 8 + 12 * BPS, 12 + 12 * BPS, +}; + +static const uint16_t VP8ScanUV[4 + 4] = { + 0 + 0 * BPS, 4 + 0 * BPS, 0 + 4 * BPS, 4 + 4 * BPS, // U + 8 + 0 * BPS, 12 + 0 * BPS, 8 + 4 * BPS, 12 + 4 * BPS // V +}; + +//------------------------------------------------------------------------------ +// Distortion measurement + +static const uint16_t kWeightY[16] = { + 38, 32, 20, 9, 32, 28, 17, 7, 20, 17, 10, 4, 9, 7, 4, 2 +}; + +static const uint16_t kWeightTrellis[16] = { +#if USE_TDISTO == 0 + 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16 +#else + 30, 27, 19, 11, + 27, 24, 17, 10, + 19, 17, 12, 8, + 11, 10, 8, 6 +#endif +}; + +// Init/Copy the common fields in score. +static void InitScore(VP8ModeScore* const rd) { + rd->D = 0; + rd->SD = 0; + rd->R = 0; + rd->H = 0; + rd->nz = 0; + rd->score = MAX_COST; +} + +static void CopyScore(VP8ModeScore* const dst, const VP8ModeScore* const src) { + dst->D = src->D; + dst->SD = src->SD; + dst->R = src->R; + dst->H = src->H; + dst->nz = src->nz; // note that nz is not accumulated, but just copied. + dst->score = src->score; +} + +static void AddScore(VP8ModeScore* const dst, const VP8ModeScore* const src) { + dst->D += src->D; + dst->SD += src->SD; + dst->R += src->R; + dst->H += src->H; + dst->nz |= src->nz; // here, new nz bits are accumulated. + dst->score += src->score; +} + +//------------------------------------------------------------------------------ +// Performs trellis-optimized quantization. + +// Trellis node +typedef struct { + int8_t prev; // best previous node + int8_t sign; // sign of coeff_i + int16_t level; // level +} Node; + +// Score state +typedef struct { + score_t score; // partial RD score + const uint16_t* costs; // shortcut to cost tables +} ScoreState; + +// If a coefficient was quantized to a value Q (using a neutral bias), +// we test all alternate possibilities between [Q-MIN_DELTA, Q+MAX_DELTA] +// We don't test negative values though. +#define MIN_DELTA 0 // how much lower level to try +#define MAX_DELTA 1 // how much higher +#define NUM_NODES (MIN_DELTA + 1 + MAX_DELTA) +#define NODE(n, l) (nodes[(n)][(l) + MIN_DELTA]) +#define SCORE_STATE(n, l) (score_states[n][(l) + MIN_DELTA]) + +static WEBP_INLINE void SetRDScore(int lambda, VP8ModeScore* const rd) { + rd->score = (rd->R + rd->H) * lambda + RD_DISTO_MULT * (rd->D + rd->SD); +} + +static WEBP_INLINE score_t RDScoreTrellis(int lambda, score_t rate, + score_t distortion) { + return rate * lambda + RD_DISTO_MULT * distortion; +} + +static int TrellisQuantizeBlock(const VP8Encoder* const enc, + int16_t in[16], int16_t out[16], + int ctx0, int coeff_type, + const VP8Matrix* const mtx, + int lambda) { + const ProbaArray* const probas = enc->proba_.coeffs_[coeff_type]; + CostArrayPtr const costs = + (CostArrayPtr)enc->proba_.remapped_costs_[coeff_type]; + const int first = (coeff_type == 0) ? 1 : 0; + Node nodes[16][NUM_NODES]; + ScoreState score_states[2][NUM_NODES]; + ScoreState* ss_cur = &SCORE_STATE(0, MIN_DELTA); + ScoreState* ss_prev = &SCORE_STATE(1, MIN_DELTA); + int best_path[3] = {-1, -1, -1}; // store best-last/best-level/best-previous + score_t best_score; + int n, m, p, last; + + { + score_t cost; + const int thresh = mtx->q_[1] * mtx->q_[1] / 4; + const int last_proba = probas[VP8EncBands[first]][ctx0][0]; + + // compute the position of the last interesting coefficient + last = first - 1; + for (n = 15; n >= first; --n) { + const int j = kZigzag[n]; + const int err = in[j] * in[j]; + if (err > thresh) { + last = n; + break; + } + } + // we don't need to go inspect up to n = 16 coeffs. We can just go up + // to last + 1 (inclusive) without losing much. + if (last < 15) ++last; + + // compute 'skip' score. This is the max score one can do. + cost = VP8BitCost(0, last_proba); + best_score = RDScoreTrellis(lambda, cost, 0); + + // initialize source node. + for (m = -MIN_DELTA; m <= MAX_DELTA; ++m) { + const score_t rate = (ctx0 == 0) ? VP8BitCost(1, last_proba) : 0; + ss_cur[m].score = RDScoreTrellis(lambda, rate, 0); + ss_cur[m].costs = costs[first][ctx0]; + } + } + + // traverse trellis. + for (n = first; n <= last; ++n) { + const int j = kZigzag[n]; + const uint32_t Q = mtx->q_[j]; + const uint32_t iQ = mtx->iq_[j]; + const uint32_t B = BIAS(0x00); // neutral bias + // note: it's important to take sign of the _original_ coeff, + // so we don't have to consider level < 0 afterward. + const int sign = (in[j] < 0); + const uint32_t coeff0 = (sign ? -in[j] : in[j]) + mtx->sharpen_[j]; + int level0 = QUANTDIV(coeff0, iQ, B); + int thresh_level = QUANTDIV(coeff0, iQ, BIAS(0x80)); + if (thresh_level > MAX_LEVEL) thresh_level = MAX_LEVEL; + if (level0 > MAX_LEVEL) level0 = MAX_LEVEL; + + { // Swap current and previous score states + ScoreState* const tmp = ss_cur; + ss_cur = ss_prev; + ss_prev = tmp; + } + + // test all alternate level values around level0. + for (m = -MIN_DELTA; m <= MAX_DELTA; ++m) { + Node* const cur = &NODE(n, m); + int level = level0 + m; + const int ctx = (level > 2) ? 2 : level; + const int band = VP8EncBands[n + 1]; + score_t base_score; + score_t best_cur_score = MAX_COST; + int best_prev = 0; // default, in case + + ss_cur[m].score = MAX_COST; + ss_cur[m].costs = costs[n + 1][ctx]; + if (level < 0 || level > thresh_level) { + // Node is dead. + continue; + } + + { + // Compute delta_error = how much coding this level will + // subtract to max_error as distortion. + // Here, distortion = sum of (|coeff_i| - level_i * Q_i)^2 + const int new_error = coeff0 - level * Q; + const int delta_error = + kWeightTrellis[j] * (new_error * new_error - coeff0 * coeff0); + base_score = RDScoreTrellis(lambda, 0, delta_error); + } + + // Inspect all possible non-dead predecessors. Retain only the best one. + for (p = -MIN_DELTA; p <= MAX_DELTA; ++p) { + // Dead nodes (with ss_prev[p].score >= MAX_COST) are automatically + // eliminated since their score can't be better than the current best. + const score_t cost = VP8LevelCost(ss_prev[p].costs, level); + // Examine node assuming it's a non-terminal one. + const score_t score = + base_score + ss_prev[p].score + RDScoreTrellis(lambda, cost, 0); + if (score < best_cur_score) { + best_cur_score = score; + best_prev = p; + } + } + // Store best finding in current node. + cur->sign = sign; + cur->level = level; + cur->prev = best_prev; + ss_cur[m].score = best_cur_score; + + // Now, record best terminal node (and thus best entry in the graph). + if (level != 0) { + const score_t last_pos_cost = + (n < 15) ? VP8BitCost(0, probas[band][ctx][0]) : 0; + const score_t last_pos_score = RDScoreTrellis(lambda, last_pos_cost, 0); + const score_t score = best_cur_score + last_pos_score; + if (score < best_score) { + best_score = score; + best_path[0] = n; // best eob position + best_path[1] = m; // best node index + best_path[2] = best_prev; // best predecessor + } + } + } + } + + // Fresh start + memset(in + first, 0, (16 - first) * sizeof(*in)); + memset(out + first, 0, (16 - first) * sizeof(*out)); + if (best_path[0] == -1) { + return 0; // skip! + } + + { + // Unwind the best path. + // Note: best-prev on terminal node is not necessarily equal to the + // best_prev for non-terminal. So we patch best_path[2] in. + int nz = 0; + int best_node = best_path[1]; + n = best_path[0]; + NODE(n, best_node).prev = best_path[2]; // force best-prev for terminal + + for (; n >= first; --n) { + const Node* const node = &NODE(n, best_node); + const int j = kZigzag[n]; + out[n] = node->sign ? -node->level : node->level; + nz |= node->level; + in[j] = out[n] * mtx->q_[j]; + best_node = node->prev; + } + return (nz != 0); + } +} + +#undef NODE + +//------------------------------------------------------------------------------ +// Performs: difference, transform, quantize, back-transform, add +// all at once. Output is the reconstructed block in *yuv_out, and the +// quantized levels in *levels. + +static int ReconstructIntra16(VP8EncIterator* const it, + VP8ModeScore* const rd, + uint8_t* const yuv_out, + int mode) { + const VP8Encoder* const enc = it->enc_; + const uint8_t* const ref = it->yuv_p_ + VP8I16ModeOffsets[mode]; + const uint8_t* const src = it->yuv_in_ + Y_OFF_ENC; + const VP8SegmentInfo* const dqm = &enc->dqm_[it->mb_->segment_]; + int nz = 0; + int n; + int16_t tmp[16][16], dc_tmp[16]; + + for (n = 0; n < 16; n += 2) { + VP8FTransform2(src + VP8Scan[n], ref + VP8Scan[n], tmp[n]); + } + VP8FTransformWHT(tmp[0], dc_tmp); + nz |= VP8EncQuantizeBlockWHT(dc_tmp, rd->y_dc_levels, &dqm->y2_) << 24; + + if (DO_TRELLIS_I16 && it->do_trellis_) { + int x, y; + VP8IteratorNzToBytes(it); + for (y = 0, n = 0; y < 4; ++y) { + for (x = 0; x < 4; ++x, ++n) { + const int ctx = it->top_nz_[x] + it->left_nz_[y]; + const int non_zero = + TrellisQuantizeBlock(enc, tmp[n], rd->y_ac_levels[n], ctx, 0, + &dqm->y1_, dqm->lambda_trellis_i16_); + it->top_nz_[x] = it->left_nz_[y] = non_zero; + rd->y_ac_levels[n][0] = 0; + nz |= non_zero << n; + } + } + } else { + for (n = 0; n < 16; n += 2) { + // Zero-out the first coeff, so that: a) nz is correct below, and + // b) finding 'last' non-zero coeffs in SetResidualCoeffs() is simplified. + tmp[n][0] = tmp[n + 1][0] = 0; + nz |= VP8EncQuantize2Blocks(tmp[n], rd->y_ac_levels[n], &dqm->y1_) << n; + assert(rd->y_ac_levels[n + 0][0] == 0); + assert(rd->y_ac_levels[n + 1][0] == 0); + } + } + + // Transform back + VP8TransformWHT(dc_tmp, tmp[0]); + for (n = 0; n < 16; n += 2) { + VP8ITransform(ref + VP8Scan[n], tmp[n], yuv_out + VP8Scan[n], 1); + } + + return nz; +} + +static int ReconstructIntra4(VP8EncIterator* const it, + int16_t levels[16], + const uint8_t* const src, + uint8_t* const yuv_out, + int mode) { + const VP8Encoder* const enc = it->enc_; + const uint8_t* const ref = it->yuv_p_ + VP8I4ModeOffsets[mode]; + const VP8SegmentInfo* const dqm = &enc->dqm_[it->mb_->segment_]; + int nz = 0; + int16_t tmp[16]; + + VP8FTransform(src, ref, tmp); + if (DO_TRELLIS_I4 && it->do_trellis_) { + const int x = it->i4_ & 3, y = it->i4_ >> 2; + const int ctx = it->top_nz_[x] + it->left_nz_[y]; + nz = TrellisQuantizeBlock(enc, tmp, levels, ctx, 3, &dqm->y1_, + dqm->lambda_trellis_i4_); + } else { + nz = VP8EncQuantizeBlock(tmp, levels, &dqm->y1_); + } + VP8ITransform(ref, tmp, yuv_out, 0); + return nz; +} + +//------------------------------------------------------------------------------ +// DC-error diffusion + +// Diffusion weights. We under-correct a bit (15/16th of the error is actually +// diffused) to avoid 'rainbow' chessboard pattern of blocks at q~=0. +#define C1 7 // fraction of error sent to the 4x4 block below +#define C2 8 // fraction of error sent to the 4x4 block on the right +#define DSHIFT 4 +#define DSCALE 1 // storage descaling, needed to make the error fit int8_t + +// Quantize as usual, but also compute and return the quantization error. +// Error is already divided by DSHIFT. +static int QuantizeSingle(int16_t* const v, const VP8Matrix* const mtx) { + int V = *v; + const int sign = (V < 0); + if (sign) V = -V; + if (V > (int)mtx->zthresh_[0]) { + const int qV = QUANTDIV(V, mtx->iq_[0], mtx->bias_[0]) * mtx->q_[0]; + const int err = (V - qV); + *v = sign ? -qV : qV; + return (sign ? -err : err) >> DSCALE; + } + *v = 0; + return (sign ? -V : V) >> DSCALE; +} + +static void CorrectDCValues(const VP8EncIterator* const it, + const VP8Matrix* const mtx, + int16_t tmp[][16], VP8ModeScore* const rd) { + // | top[0] | top[1] + // --------+--------+--------- + // left[0] | tmp[0] tmp[1] <-> err0 err1 + // left[1] | tmp[2] tmp[3] err2 err3 + // + // Final errors {err1,err2,err3} are preserved and later restored + // as top[]/left[] on the next block. + int ch; + for (ch = 0; ch <= 1; ++ch) { + const int8_t* const top = it->top_derr_[it->x_][ch]; + const int8_t* const left = it->left_derr_[ch]; + int16_t (* const c)[16] = &tmp[ch * 4]; + int err0, err1, err2, err3; + c[0][0] += (C1 * top[0] + C2 * left[0]) >> (DSHIFT - DSCALE); + err0 = QuantizeSingle(&c[0][0], mtx); + c[1][0] += (C1 * top[1] + C2 * err0) >> (DSHIFT - DSCALE); + err1 = QuantizeSingle(&c[1][0], mtx); + c[2][0] += (C1 * err0 + C2 * left[1]) >> (DSHIFT - DSCALE); + err2 = QuantizeSingle(&c[2][0], mtx); + c[3][0] += (C1 * err1 + C2 * err2) >> (DSHIFT - DSCALE); + err3 = QuantizeSingle(&c[3][0], mtx); + // error 'err' is bounded by mtx->q_[0] which is 132 at max. Hence + // err >> DSCALE will fit in an int8_t type if DSCALE>=1. + assert(abs(err1) <= 127 && abs(err2) <= 127 && abs(err3) <= 127); + rd->derr[ch][0] = (int8_t)err1; + rd->derr[ch][1] = (int8_t)err2; + rd->derr[ch][2] = (int8_t)err3; + } +} + +static void StoreDiffusionErrors(VP8EncIterator* const it, + const VP8ModeScore* const rd) { + int ch; + for (ch = 0; ch <= 1; ++ch) { + int8_t* const top = it->top_derr_[it->x_][ch]; + int8_t* const left = it->left_derr_[ch]; + left[0] = rd->derr[ch][0]; // restore err1 + left[1] = 3 * rd->derr[ch][2] >> 2; // ... 3/4th of err3 + top[0] = rd->derr[ch][1]; // ... err2 + top[1] = rd->derr[ch][2] - left[1]; // ... 1/4th of err3. + } +} + +#undef C1 +#undef C2 +#undef DSHIFT +#undef DSCALE + +//------------------------------------------------------------------------------ + +static int ReconstructUV(VP8EncIterator* const it, VP8ModeScore* const rd, + uint8_t* const yuv_out, int mode) { + const VP8Encoder* const enc = it->enc_; + const uint8_t* const ref = it->yuv_p_ + VP8UVModeOffsets[mode]; + const uint8_t* const src = it->yuv_in_ + U_OFF_ENC; + const VP8SegmentInfo* const dqm = &enc->dqm_[it->mb_->segment_]; + int nz = 0; + int n; + int16_t tmp[8][16]; + + for (n = 0; n < 8; n += 2) { + VP8FTransform2(src + VP8ScanUV[n], ref + VP8ScanUV[n], tmp[n]); + } + if (it->top_derr_ != NULL) CorrectDCValues(it, &dqm->uv_, tmp, rd); + + if (DO_TRELLIS_UV && it->do_trellis_) { + int ch, x, y; + for (ch = 0, n = 0; ch <= 2; ch += 2) { + for (y = 0; y < 2; ++y) { + for (x = 0; x < 2; ++x, ++n) { + const int ctx = it->top_nz_[4 + ch + x] + it->left_nz_[4 + ch + y]; + const int non_zero = + TrellisQuantizeBlock(enc, tmp[n], rd->uv_levels[n], ctx, 2, + &dqm->uv_, dqm->lambda_trellis_uv_); + it->top_nz_[4 + ch + x] = it->left_nz_[4 + ch + y] = non_zero; + nz |= non_zero << n; + } + } + } + } else { + for (n = 0; n < 8; n += 2) { + nz |= VP8EncQuantize2Blocks(tmp[n], rd->uv_levels[n], &dqm->uv_) << n; + } + } + + for (n = 0; n < 8; n += 2) { + VP8ITransform(ref + VP8ScanUV[n], tmp[n], yuv_out + VP8ScanUV[n], 1); + } + return (nz << 16); +} + +//------------------------------------------------------------------------------ +// RD-opt decision. Reconstruct each modes, evalue distortion and bit-cost. +// Pick the mode is lower RD-cost = Rate + lambda * Distortion. + +static void StoreMaxDelta(VP8SegmentInfo* const dqm, const int16_t DCs[16]) { + // We look at the first three AC coefficients to determine what is the average + // delta between each sub-4x4 block. + const int v0 = abs(DCs[1]); + const int v1 = abs(DCs[2]); + const int v2 = abs(DCs[4]); + int max_v = (v1 > v0) ? v1 : v0; + max_v = (v2 > max_v) ? v2 : max_v; + if (max_v > dqm->max_edge_) dqm->max_edge_ = max_v; +} + +static void SwapModeScore(VP8ModeScore** a, VP8ModeScore** b) { + VP8ModeScore* const tmp = *a; + *a = *b; + *b = tmp; +} + +static void SwapPtr(uint8_t** a, uint8_t** b) { + uint8_t* const tmp = *a; + *a = *b; + *b = tmp; +} + +static void SwapOut(VP8EncIterator* const it) { + SwapPtr(&it->yuv_out_, &it->yuv_out2_); +} + +static void PickBestIntra16(VP8EncIterator* const it, VP8ModeScore* rd) { + const int kNumBlocks = 16; + VP8SegmentInfo* const dqm = &it->enc_->dqm_[it->mb_->segment_]; + const int lambda = dqm->lambda_i16_; + const int tlambda = dqm->tlambda_; + const uint8_t* const src = it->yuv_in_ + Y_OFF_ENC; + VP8ModeScore rd_tmp; + VP8ModeScore* rd_cur = &rd_tmp; + VP8ModeScore* rd_best = rd; + int mode; + int is_flat = IsFlatSource16(it->yuv_in_ + Y_OFF_ENC); + + rd->mode_i16 = -1; + for (mode = 0; mode < NUM_PRED_MODES; ++mode) { + uint8_t* const tmp_dst = it->yuv_out2_ + Y_OFF_ENC; // scratch buffer + rd_cur->mode_i16 = mode; + + // Reconstruct + rd_cur->nz = ReconstructIntra16(it, rd_cur, tmp_dst, mode); + + // Measure RD-score + rd_cur->D = VP8SSE16x16(src, tmp_dst); + rd_cur->SD = + tlambda ? MULT_8B(tlambda, VP8TDisto16x16(src, tmp_dst, kWeightY)) : 0; + rd_cur->H = VP8FixedCostsI16[mode]; + rd_cur->R = VP8GetCostLuma16(it, rd_cur); + if (is_flat) { + // refine the first impression (which was in pixel space) + is_flat = IsFlat(rd_cur->y_ac_levels[0], kNumBlocks, FLATNESS_LIMIT_I16); + if (is_flat) { + // Block is very flat. We put emphasis on the distortion being very low! + rd_cur->D *= 2; + rd_cur->SD *= 2; + } + } + + // Since we always examine Intra16 first, we can overwrite *rd directly. + SetRDScore(lambda, rd_cur); + if (mode == 0 || rd_cur->score < rd_best->score) { + SwapModeScore(&rd_cur, &rd_best); + SwapOut(it); + } + } + if (rd_best != rd) { + memcpy(rd, rd_best, sizeof(*rd)); + } + SetRDScore(dqm->lambda_mode_, rd); // finalize score for mode decision. + VP8SetIntra16Mode(it, rd->mode_i16); + + // we have a blocky macroblock (only DCs are non-zero) with fairly high + // distortion, record max delta so we can later adjust the minimal filtering + // strength needed to smooth these blocks out. + if ((rd->nz & 0x100ffff) == 0x1000000 && rd->D > dqm->min_disto_) { + StoreMaxDelta(dqm, rd->y_dc_levels); + } +} + +//------------------------------------------------------------------------------ + +// return the cost array corresponding to the surrounding prediction modes. +static const uint16_t* GetCostModeI4(VP8EncIterator* const it, + const uint8_t modes[16]) { + const int preds_w = it->enc_->preds_w_; + const int x = (it->i4_ & 3), y = it->i4_ >> 2; + const int left = (x == 0) ? it->preds_[y * preds_w - 1] : modes[it->i4_ - 1]; + const int top = (y == 0) ? it->preds_[-preds_w + x] : modes[it->i4_ - 4]; + return VP8FixedCostsI4[top][left]; +} + +static int PickBestIntra4(VP8EncIterator* const it, VP8ModeScore* const rd) { + const VP8Encoder* const enc = it->enc_; + const VP8SegmentInfo* const dqm = &enc->dqm_[it->mb_->segment_]; + const int lambda = dqm->lambda_i4_; + const int tlambda = dqm->tlambda_; + const uint8_t* const src0 = it->yuv_in_ + Y_OFF_ENC; + uint8_t* const best_blocks = it->yuv_out2_ + Y_OFF_ENC; + int total_header_bits = 0; + VP8ModeScore rd_best; + + if (enc->max_i4_header_bits_ == 0) { + return 0; + } + + InitScore(&rd_best); + rd_best.H = 211; // '211' is the value of VP8BitCost(0, 145) + SetRDScore(dqm->lambda_mode_, &rd_best); + VP8IteratorStartI4(it); + do { + const int kNumBlocks = 1; + VP8ModeScore rd_i4; + int mode; + int best_mode = -1; + const uint8_t* const src = src0 + VP8Scan[it->i4_]; + const uint16_t* const mode_costs = GetCostModeI4(it, rd->modes_i4); + uint8_t* best_block = best_blocks + VP8Scan[it->i4_]; + uint8_t* tmp_dst = it->yuv_p_ + I4TMP; // scratch buffer. + + InitScore(&rd_i4); + VP8MakeIntra4Preds(it); + for (mode = 0; mode < NUM_BMODES; ++mode) { + VP8ModeScore rd_tmp; + int16_t tmp_levels[16]; + + // Reconstruct + rd_tmp.nz = + ReconstructIntra4(it, tmp_levels, src, tmp_dst, mode) << it->i4_; + + // Compute RD-score + rd_tmp.D = VP8SSE4x4(src, tmp_dst); + rd_tmp.SD = + tlambda ? MULT_8B(tlambda, VP8TDisto4x4(src, tmp_dst, kWeightY)) + : 0; + rd_tmp.H = mode_costs[mode]; + + // Add flatness penalty, to avoid flat area to be mispredicted + // by a complex mode. + if (mode > 0 && IsFlat(tmp_levels, kNumBlocks, FLATNESS_LIMIT_I4)) { + rd_tmp.R = FLATNESS_PENALTY * kNumBlocks; + } else { + rd_tmp.R = 0; + } + + // early-out check + SetRDScore(lambda, &rd_tmp); + if (best_mode >= 0 && rd_tmp.score >= rd_i4.score) continue; + + // finish computing score + rd_tmp.R += VP8GetCostLuma4(it, tmp_levels); + SetRDScore(lambda, &rd_tmp); + + if (best_mode < 0 || rd_tmp.score < rd_i4.score) { + CopyScore(&rd_i4, &rd_tmp); + best_mode = mode; + SwapPtr(&tmp_dst, &best_block); + memcpy(rd_best.y_ac_levels[it->i4_], tmp_levels, + sizeof(rd_best.y_ac_levels[it->i4_])); + } + } + SetRDScore(dqm->lambda_mode_, &rd_i4); + AddScore(&rd_best, &rd_i4); + if (rd_best.score >= rd->score) { + return 0; + } + total_header_bits += (int)rd_i4.H; // <- equal to mode_costs[best_mode]; + if (total_header_bits > enc->max_i4_header_bits_) { + return 0; + } + // Copy selected samples if not in the right place already. + if (best_block != best_blocks + VP8Scan[it->i4_]) { + VP8Copy4x4(best_block, best_blocks + VP8Scan[it->i4_]); + } + rd->modes_i4[it->i4_] = best_mode; + it->top_nz_[it->i4_ & 3] = it->left_nz_[it->i4_ >> 2] = (rd_i4.nz ? 1 : 0); + } while (VP8IteratorRotateI4(it, best_blocks)); + + // finalize state + CopyScore(rd, &rd_best); + VP8SetIntra4Mode(it, rd->modes_i4); + SwapOut(it); + memcpy(rd->y_ac_levels, rd_best.y_ac_levels, sizeof(rd->y_ac_levels)); + return 1; // select intra4x4 over intra16x16 +} + +//------------------------------------------------------------------------------ + +static void PickBestUV(VP8EncIterator* const it, VP8ModeScore* const rd) { + const int kNumBlocks = 8; + const VP8SegmentInfo* const dqm = &it->enc_->dqm_[it->mb_->segment_]; + const int lambda = dqm->lambda_uv_; + const uint8_t* const src = it->yuv_in_ + U_OFF_ENC; + uint8_t* tmp_dst = it->yuv_out2_ + U_OFF_ENC; // scratch buffer + uint8_t* dst0 = it->yuv_out_ + U_OFF_ENC; + uint8_t* dst = dst0; + VP8ModeScore rd_best; + int mode; + + rd->mode_uv = -1; + InitScore(&rd_best); + for (mode = 0; mode < NUM_PRED_MODES; ++mode) { + VP8ModeScore rd_uv; + + // Reconstruct + rd_uv.nz = ReconstructUV(it, &rd_uv, tmp_dst, mode); + + // Compute RD-score + rd_uv.D = VP8SSE16x8(src, tmp_dst); + rd_uv.SD = 0; // not calling TDisto here: it tends to flatten areas. + rd_uv.H = VP8FixedCostsUV[mode]; + rd_uv.R = VP8GetCostUV(it, &rd_uv); + if (mode > 0 && IsFlat(rd_uv.uv_levels[0], kNumBlocks, FLATNESS_LIMIT_UV)) { + rd_uv.R += FLATNESS_PENALTY * kNumBlocks; + } + + SetRDScore(lambda, &rd_uv); + if (mode == 0 || rd_uv.score < rd_best.score) { + CopyScore(&rd_best, &rd_uv); + rd->mode_uv = mode; + memcpy(rd->uv_levels, rd_uv.uv_levels, sizeof(rd->uv_levels)); + if (it->top_derr_ != NULL) { + memcpy(rd->derr, rd_uv.derr, sizeof(rd_uv.derr)); + } + SwapPtr(&dst, &tmp_dst); + } + } + VP8SetIntraUVMode(it, rd->mode_uv); + AddScore(rd, &rd_best); + if (dst != dst0) { // copy 16x8 block if needed + VP8Copy16x8(dst, dst0); + } + if (it->top_derr_ != NULL) { // store diffusion errors for next block + StoreDiffusionErrors(it, rd); + } +} + +//------------------------------------------------------------------------------ +// Final reconstruction and quantization. + +static void SimpleQuantize(VP8EncIterator* const it, VP8ModeScore* const rd) { + const VP8Encoder* const enc = it->enc_; + const int is_i16 = (it->mb_->type_ == 1); + int nz = 0; + + if (is_i16) { + nz = ReconstructIntra16(it, rd, it->yuv_out_ + Y_OFF_ENC, it->preds_[0]); + } else { + VP8IteratorStartI4(it); + do { + const int mode = + it->preds_[(it->i4_ & 3) + (it->i4_ >> 2) * enc->preds_w_]; + const uint8_t* const src = it->yuv_in_ + Y_OFF_ENC + VP8Scan[it->i4_]; + uint8_t* const dst = it->yuv_out_ + Y_OFF_ENC + VP8Scan[it->i4_]; + VP8MakeIntra4Preds(it); + nz |= ReconstructIntra4(it, rd->y_ac_levels[it->i4_], + src, dst, mode) << it->i4_; + } while (VP8IteratorRotateI4(it, it->yuv_out_ + Y_OFF_ENC)); + } + + nz |= ReconstructUV(it, rd, it->yuv_out_ + U_OFF_ENC, it->mb_->uv_mode_); + rd->nz = nz; +} + +// Refine intra16/intra4 sub-modes based on distortion only (not rate). +static void RefineUsingDistortion(VP8EncIterator* const it, + int try_both_modes, int refine_uv_mode, + VP8ModeScore* const rd) { + score_t best_score = MAX_COST; + int nz = 0; + int mode; + int is_i16 = try_both_modes || (it->mb_->type_ == 1); + + const VP8SegmentInfo* const dqm = &it->enc_->dqm_[it->mb_->segment_]; + // Some empiric constants, of approximate order of magnitude. + const int lambda_d_i16 = 106; + const int lambda_d_i4 = 11; + const int lambda_d_uv = 120; + score_t score_i4 = dqm->i4_penalty_; + score_t i4_bit_sum = 0; + const score_t bit_limit = try_both_modes ? it->enc_->mb_header_limit_ + : MAX_COST; // no early-out allowed + + if (is_i16) { // First, evaluate Intra16 distortion + int best_mode = -1; + const uint8_t* const src = it->yuv_in_ + Y_OFF_ENC; + for (mode = 0; mode < NUM_PRED_MODES; ++mode) { + const uint8_t* const ref = it->yuv_p_ + VP8I16ModeOffsets[mode]; + const score_t score = (score_t)VP8SSE16x16(src, ref) * RD_DISTO_MULT + + VP8FixedCostsI16[mode] * lambda_d_i16; + if (mode > 0 && VP8FixedCostsI16[mode] > bit_limit) { + continue; + } + + if (score < best_score) { + best_mode = mode; + best_score = score; + } + } + if (it->x_ == 0 || it->y_ == 0) { + // avoid starting a checkerboard resonance from the border. See bug #432. + if (IsFlatSource16(src)) { + best_mode = (it->x_ == 0) ? 0 : 2; + try_both_modes = 0; // stick to i16 + } + } + VP8SetIntra16Mode(it, best_mode); + // we'll reconstruct later, if i16 mode actually gets selected + } + + // Next, evaluate Intra4 + if (try_both_modes || !is_i16) { + // We don't evaluate the rate here, but just account for it through a + // constant penalty (i4 mode usually needs more bits compared to i16). + is_i16 = 0; + VP8IteratorStartI4(it); + do { + int best_i4_mode = -1; + score_t best_i4_score = MAX_COST; + const uint8_t* const src = it->yuv_in_ + Y_OFF_ENC + VP8Scan[it->i4_]; + const uint16_t* const mode_costs = GetCostModeI4(it, rd->modes_i4); + + VP8MakeIntra4Preds(it); + for (mode = 0; mode < NUM_BMODES; ++mode) { + const uint8_t* const ref = it->yuv_p_ + VP8I4ModeOffsets[mode]; + const score_t score = VP8SSE4x4(src, ref) * RD_DISTO_MULT + + mode_costs[mode] * lambda_d_i4; + if (score < best_i4_score) { + best_i4_mode = mode; + best_i4_score = score; + } + } + i4_bit_sum += mode_costs[best_i4_mode]; + rd->modes_i4[it->i4_] = best_i4_mode; + score_i4 += best_i4_score; + if (score_i4 >= best_score || i4_bit_sum > bit_limit) { + // Intra4 won't be better than Intra16. Bail out and pick Intra16. + is_i16 = 1; + break; + } else { // reconstruct partial block inside yuv_out2_ buffer + uint8_t* const tmp_dst = it->yuv_out2_ + Y_OFF_ENC + VP8Scan[it->i4_]; + nz |= ReconstructIntra4(it, rd->y_ac_levels[it->i4_], + src, tmp_dst, best_i4_mode) << it->i4_; + } + } while (VP8IteratorRotateI4(it, it->yuv_out2_ + Y_OFF_ENC)); + } + + // Final reconstruction, depending on which mode is selected. + if (!is_i16) { + VP8SetIntra4Mode(it, rd->modes_i4); + SwapOut(it); + best_score = score_i4; + } else { + nz = ReconstructIntra16(it, rd, it->yuv_out_ + Y_OFF_ENC, it->preds_[0]); + } + + // ... and UV! + if (refine_uv_mode) { + int best_mode = -1; + score_t best_uv_score = MAX_COST; + const uint8_t* const src = it->yuv_in_ + U_OFF_ENC; + for (mode = 0; mode < NUM_PRED_MODES; ++mode) { + const uint8_t* const ref = it->yuv_p_ + VP8UVModeOffsets[mode]; + const score_t score = VP8SSE16x8(src, ref) * RD_DISTO_MULT + + VP8FixedCostsUV[mode] * lambda_d_uv; + if (score < best_uv_score) { + best_mode = mode; + best_uv_score = score; + } + } + VP8SetIntraUVMode(it, best_mode); + } + nz |= ReconstructUV(it, rd, it->yuv_out_ + U_OFF_ENC, it->mb_->uv_mode_); + + rd->nz = nz; + rd->score = best_score; +} + +//------------------------------------------------------------------------------ +// Entry point + +int VP8Decimate(VP8EncIterator* const it, VP8ModeScore* const rd, + VP8RDLevel rd_opt) { + int is_skipped; + const int method = it->enc_->method_; + + InitScore(rd); + + // We can perform predictions for Luma16x16 and Chroma8x8 already. + // Luma4x4 predictions needs to be done as-we-go. + VP8MakeLuma16Preds(it); + VP8MakeChroma8Preds(it); + + if (rd_opt > RD_OPT_NONE) { + it->do_trellis_ = (rd_opt >= RD_OPT_TRELLIS_ALL); + PickBestIntra16(it, rd); + if (method >= 2) { + PickBestIntra4(it, rd); + } + PickBestUV(it, rd); + if (rd_opt == RD_OPT_TRELLIS) { // finish off with trellis-optim now + it->do_trellis_ = 1; + SimpleQuantize(it, rd); + } + } else { + // At this point we have heuristically decided intra16 / intra4. + // For method >= 2, pick the best intra4/intra16 based on SSE (~tad slower). + // For method <= 1, we don't re-examine the decision but just go ahead with + // quantization/reconstruction. + RefineUsingDistortion(it, (method >= 2), (method >= 1), rd); + } + is_skipped = (rd->nz == 0); + VP8SetSkip(it, is_skipped); + return is_skipped; +} diff --git a/vendor/github.com/sizeofint/webpanimation/enc_syntax_enc.c b/vendor/github.com/sizeofint/webpanimation/enc_syntax_enc.c new file mode 100644 index 00000000..03dd1655 --- /dev/null +++ b/vendor/github.com/sizeofint/webpanimation/enc_syntax_enc.c @@ -0,0 +1,388 @@ +// Copyright 2011 Google Inc. All Rights Reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the COPYING file in the root of the source +// tree. An additional intellectual property rights grant can be found +// in the file PATENTS. All contributing project authors may +// be found in the AUTHORS file in the root of the source tree. +// ----------------------------------------------------------------------------- +// +// Header syntax writing +// +// Author: Skal (pascal.massimino@gmail.com) + +#include + +#include "utils_utils.h" +#include "webp_format_constants.h" +#include "webp_mux_types.h" +#include "enc_vp8i_enc.h" + +//------------------------------------------------------------------------------ +// Helper functions + +static int IsVP8XNeeded(const VP8Encoder* const enc) { + return !!enc->has_alpha_; // Currently the only case when VP8X is needed. + // This could change in the future. +} + +static int PutPaddingByte(const WebPPicture* const pic) { + const uint8_t pad_byte[1] = { 0 }; + return !!pic->writer(pad_byte, 1, pic); +} + +//------------------------------------------------------------------------------ +// Writers for header's various pieces (in order of appearance) + +static WebPEncodingError PutRIFFHeader(const VP8Encoder* const enc, + size_t riff_size) { + const WebPPicture* const pic = enc->pic_; + uint8_t riff[RIFF_HEADER_SIZE] = { + 'R', 'I', 'F', 'F', 0, 0, 0, 0, 'W', 'E', 'B', 'P' + }; + assert(riff_size == (uint32_t)riff_size); + PutLE32(riff + TAG_SIZE, (uint32_t)riff_size); + if (!pic->writer(riff, sizeof(riff), pic)) { + return VP8_ENC_ERROR_BAD_WRITE; + } + return VP8_ENC_OK; +} + +static WebPEncodingError PutVP8XHeader(const VP8Encoder* const enc) { + const WebPPicture* const pic = enc->pic_; + uint8_t vp8x[CHUNK_HEADER_SIZE + VP8X_CHUNK_SIZE] = { + 'V', 'P', '8', 'X' + }; + uint32_t flags = 0; + + assert(IsVP8XNeeded(enc)); + assert(pic->width >= 1 && pic->height >= 1); + assert(pic->width <= MAX_CANVAS_SIZE && pic->height <= MAX_CANVAS_SIZE); + + if (enc->has_alpha_) { + flags |= ALPHA_FLAG; + } + + PutLE32(vp8x + TAG_SIZE, VP8X_CHUNK_SIZE); + PutLE32(vp8x + CHUNK_HEADER_SIZE, flags); + PutLE24(vp8x + CHUNK_HEADER_SIZE + 4, pic->width - 1); + PutLE24(vp8x + CHUNK_HEADER_SIZE + 7, pic->height - 1); + if (!pic->writer(vp8x, sizeof(vp8x), pic)) { + return VP8_ENC_ERROR_BAD_WRITE; + } + return VP8_ENC_OK; +} + +static WebPEncodingError PutAlphaChunk(const VP8Encoder* const enc) { + const WebPPicture* const pic = enc->pic_; + uint8_t alpha_chunk_hdr[CHUNK_HEADER_SIZE] = { + 'A', 'L', 'P', 'H' + }; + + assert(enc->has_alpha_); + + // Alpha chunk header. + PutLE32(alpha_chunk_hdr + TAG_SIZE, enc->alpha_data_size_); + if (!pic->writer(alpha_chunk_hdr, sizeof(alpha_chunk_hdr), pic)) { + return VP8_ENC_ERROR_BAD_WRITE; + } + + // Alpha chunk data. + if (!pic->writer(enc->alpha_data_, enc->alpha_data_size_, pic)) { + return VP8_ENC_ERROR_BAD_WRITE; + } + + // Padding. + if ((enc->alpha_data_size_ & 1) && !PutPaddingByte(pic)) { + return VP8_ENC_ERROR_BAD_WRITE; + } + return VP8_ENC_OK; +} + +static WebPEncodingError PutVP8Header(const WebPPicture* const pic, + size_t vp8_size) { + uint8_t vp8_chunk_hdr[CHUNK_HEADER_SIZE] = { + 'V', 'P', '8', ' ' + }; + assert(vp8_size == (uint32_t)vp8_size); + PutLE32(vp8_chunk_hdr + TAG_SIZE, (uint32_t)vp8_size); + if (!pic->writer(vp8_chunk_hdr, sizeof(vp8_chunk_hdr), pic)) { + return VP8_ENC_ERROR_BAD_WRITE; + } + return VP8_ENC_OK; +} + +static WebPEncodingError PutVP8FrameHeader(const WebPPicture* const pic, + int profile, size_t size0) { + uint8_t vp8_frm_hdr[VP8_FRAME_HEADER_SIZE]; + uint32_t bits; + + if (size0 >= VP8_MAX_PARTITION0_SIZE) { // partition #0 is too big to fit + return VP8_ENC_ERROR_PARTITION0_OVERFLOW; + } + + // Paragraph 9.1. + bits = 0 // keyframe (1b) + | (profile << 1) // profile (3b) + | (1 << 4) // visible (1b) + | ((uint32_t)size0 << 5); // partition length (19b) + vp8_frm_hdr[0] = (bits >> 0) & 0xff; + vp8_frm_hdr[1] = (bits >> 8) & 0xff; + vp8_frm_hdr[2] = (bits >> 16) & 0xff; + // signature + vp8_frm_hdr[3] = (VP8_SIGNATURE >> 16) & 0xff; + vp8_frm_hdr[4] = (VP8_SIGNATURE >> 8) & 0xff; + vp8_frm_hdr[5] = (VP8_SIGNATURE >> 0) & 0xff; + // dimensions + vp8_frm_hdr[6] = pic->width & 0xff; + vp8_frm_hdr[7] = pic->width >> 8; + vp8_frm_hdr[8] = pic->height & 0xff; + vp8_frm_hdr[9] = pic->height >> 8; + + if (!pic->writer(vp8_frm_hdr, sizeof(vp8_frm_hdr), pic)) { + return VP8_ENC_ERROR_BAD_WRITE; + } + return VP8_ENC_OK; +} + +// WebP Headers. +static int PutWebPHeaders(const VP8Encoder* const enc, size_t size0, + size_t vp8_size, size_t riff_size) { + WebPPicture* const pic = enc->pic_; + WebPEncodingError err = VP8_ENC_OK; + + // RIFF header. + err = PutRIFFHeader(enc, riff_size); + if (err != VP8_ENC_OK) goto Error; + + // VP8X. + if (IsVP8XNeeded(enc)) { + err = PutVP8XHeader(enc); + if (err != VP8_ENC_OK) goto Error; + } + + // Alpha. + if (enc->has_alpha_) { + err = PutAlphaChunk(enc); + if (err != VP8_ENC_OK) goto Error; + } + + // VP8 header. + err = PutVP8Header(pic, vp8_size); + if (err != VP8_ENC_OK) goto Error; + + // VP8 frame header. + err = PutVP8FrameHeader(pic, enc->profile_, size0); + if (err != VP8_ENC_OK) goto Error; + + // All OK. + return 1; + + // Error. + Error: + return WebPEncodingSetError(pic, err); +} + +// Segmentation header +static void PutSegmentHeader(VP8BitWriter* const bw, + const VP8Encoder* const enc) { + const VP8EncSegmentHeader* const hdr = &enc->segment_hdr_; + const VP8EncProba* const proba = &enc->proba_; + if (VP8PutBitUniform(bw, (hdr->num_segments_ > 1))) { + // We always 'update' the quant and filter strength values + const int update_data = 1; + int s; + VP8PutBitUniform(bw, hdr->update_map_); + if (VP8PutBitUniform(bw, update_data)) { + // we always use absolute values, not relative ones + VP8PutBitUniform(bw, 1); // (segment_feature_mode = 1. Paragraph 9.3.) + for (s = 0; s < NUM_MB_SEGMENTS; ++s) { + VP8PutSignedBits(bw, enc->dqm_[s].quant_, 7); + } + for (s = 0; s < NUM_MB_SEGMENTS; ++s) { + VP8PutSignedBits(bw, enc->dqm_[s].fstrength_, 6); + } + } + if (hdr->update_map_) { + for (s = 0; s < 3; ++s) { + if (VP8PutBitUniform(bw, (proba->segments_[s] != 255u))) { + VP8PutBits(bw, proba->segments_[s], 8); + } + } + } + } +} + +// Filtering parameters header +static void PutFilterHeader(VP8BitWriter* const bw, + const VP8EncFilterHeader* const hdr) { + const int use_lf_delta = (hdr->i4x4_lf_delta_ != 0); + VP8PutBitUniform(bw, hdr->simple_); + VP8PutBits(bw, hdr->level_, 6); + VP8PutBits(bw, hdr->sharpness_, 3); + if (VP8PutBitUniform(bw, use_lf_delta)) { + // '0' is the default value for i4x4_lf_delta_ at frame #0. + const int need_update = (hdr->i4x4_lf_delta_ != 0); + if (VP8PutBitUniform(bw, need_update)) { + // we don't use ref_lf_delta => emit four 0 bits + VP8PutBits(bw, 0, 4); + // we use mode_lf_delta for i4x4 + VP8PutSignedBits(bw, hdr->i4x4_lf_delta_, 6); + VP8PutBits(bw, 0, 3); // all others unused + } + } +} + +// Nominal quantization parameters +static void PutQuant(VP8BitWriter* const bw, + const VP8Encoder* const enc) { + VP8PutBits(bw, enc->base_quant_, 7); + VP8PutSignedBits(bw, enc->dq_y1_dc_, 4); + VP8PutSignedBits(bw, enc->dq_y2_dc_, 4); + VP8PutSignedBits(bw, enc->dq_y2_ac_, 4); + VP8PutSignedBits(bw, enc->dq_uv_dc_, 4); + VP8PutSignedBits(bw, enc->dq_uv_ac_, 4); +} + +// Partition sizes +static int EmitPartitionsSize(const VP8Encoder* const enc, + WebPPicture* const pic) { + uint8_t buf[3 * (MAX_NUM_PARTITIONS - 1)]; + int p; + for (p = 0; p < enc->num_parts_ - 1; ++p) { + const size_t part_size = VP8BitWriterSize(enc->parts_ + p); + if (part_size >= VP8_MAX_PARTITION_SIZE) { + return WebPEncodingSetError(pic, VP8_ENC_ERROR_PARTITION_OVERFLOW); + } + buf[3 * p + 0] = (part_size >> 0) & 0xff; + buf[3 * p + 1] = (part_size >> 8) & 0xff; + buf[3 * p + 2] = (part_size >> 16) & 0xff; + } + return p ? pic->writer(buf, 3 * p, pic) : 1; +} + +//------------------------------------------------------------------------------ + +static int GeneratePartition0(VP8Encoder* const enc) { + VP8BitWriter* const bw = &enc->bw_; + const int mb_size = enc->mb_w_ * enc->mb_h_; + uint64_t pos1, pos2, pos3; + + pos1 = VP8BitWriterPos(bw); + if (!VP8BitWriterInit(bw, mb_size * 7 / 8)) { // ~7 bits per macroblock + return WebPEncodingSetError(enc->pic_, VP8_ENC_ERROR_OUT_OF_MEMORY); + } + VP8PutBitUniform(bw, 0); // colorspace + VP8PutBitUniform(bw, 0); // clamp type + + PutSegmentHeader(bw, enc); + PutFilterHeader(bw, &enc->filter_hdr_); + VP8PutBits(bw, enc->num_parts_ == 8 ? 3 : + enc->num_parts_ == 4 ? 2 : + enc->num_parts_ == 2 ? 1 : 0, 2); + PutQuant(bw, enc); + VP8PutBitUniform(bw, 0); // no proba update + VP8WriteProbas(bw, &enc->proba_); + pos2 = VP8BitWriterPos(bw); + VP8CodeIntraModes(enc); + VP8BitWriterFinish(bw); + + pos3 = VP8BitWriterPos(bw); + +#if !defined(WEBP_DISABLE_STATS) + if (enc->pic_->stats) { + enc->pic_->stats->header_bytes[0] = (int)((pos2 - pos1 + 7) >> 3); + enc->pic_->stats->header_bytes[1] = (int)((pos3 - pos2 + 7) >> 3); + enc->pic_->stats->alpha_data_size = (int)enc->alpha_data_size_; + } +#else + (void)pos1; + (void)pos2; + (void)pos3; +#endif + if (bw->error_) { + return WebPEncodingSetError(enc->pic_, VP8_ENC_ERROR_OUT_OF_MEMORY); + } + return 1; +} + +void VP8EncFreeBitWriters(VP8Encoder* const enc) { + int p; + VP8BitWriterWipeOut(&enc->bw_); + for (p = 0; p < enc->num_parts_; ++p) { + VP8BitWriterWipeOut(enc->parts_ + p); + } +} + +int VP8EncWrite(VP8Encoder* const enc) { + WebPPicture* const pic = enc->pic_; + VP8BitWriter* const bw = &enc->bw_; + const int task_percent = 19; + const int percent_per_part = task_percent / enc->num_parts_; + const int final_percent = enc->percent_ + task_percent; + int ok = 0; + size_t vp8_size, pad, riff_size; + int p; + + // Partition #0 with header and partition sizes + ok = GeneratePartition0(enc); + if (!ok) return 0; + + // Compute VP8 size + vp8_size = VP8_FRAME_HEADER_SIZE + + VP8BitWriterSize(bw) + + 3 * (enc->num_parts_ - 1); + for (p = 0; p < enc->num_parts_; ++p) { + vp8_size += VP8BitWriterSize(enc->parts_ + p); + } + pad = vp8_size & 1; + vp8_size += pad; + + // Compute RIFF size + // At the minimum it is: "WEBPVP8 nnnn" + VP8 data size. + riff_size = TAG_SIZE + CHUNK_HEADER_SIZE + vp8_size; + if (IsVP8XNeeded(enc)) { // Add size for: VP8X header + data. + riff_size += CHUNK_HEADER_SIZE + VP8X_CHUNK_SIZE; + } + if (enc->has_alpha_) { // Add size for: ALPH header + data. + const uint32_t padded_alpha_size = enc->alpha_data_size_ + + (enc->alpha_data_size_ & 1); + riff_size += CHUNK_HEADER_SIZE + padded_alpha_size; + } + // Sanity check. + if (riff_size > 0xfffffffeU) { + return WebPEncodingSetError(pic, VP8_ENC_ERROR_FILE_TOO_BIG); + } + + // Emit headers and partition #0 + { + const uint8_t* const part0 = VP8BitWriterBuf(bw); + const size_t size0 = VP8BitWriterSize(bw); + ok = ok && PutWebPHeaders(enc, size0, vp8_size, riff_size) + && pic->writer(part0, size0, pic) + && EmitPartitionsSize(enc, pic); + VP8BitWriterWipeOut(bw); // will free the internal buffer. + } + + // Token partitions + for (p = 0; p < enc->num_parts_; ++p) { + const uint8_t* const buf = VP8BitWriterBuf(enc->parts_ + p); + const size_t size = VP8BitWriterSize(enc->parts_ + p); + if (size) ok = ok && pic->writer(buf, size, pic); + VP8BitWriterWipeOut(enc->parts_ + p); // will free the internal buffer. + ok = ok && WebPReportProgress(pic, enc->percent_ + percent_per_part, + &enc->percent_); + } + + // Padding byte + if (ok && pad) { + ok = PutPaddingByte(pic); + } + + enc->coded_size_ = (int)(CHUNK_HEADER_SIZE + riff_size); + ok = ok && WebPReportProgress(pic, final_percent, &enc->percent_); + return ok; +} + +//------------------------------------------------------------------------------ + diff --git a/vendor/github.com/sizeofint/webpanimation/enc_token_enc.c b/vendor/github.com/sizeofint/webpanimation/enc_token_enc.c new file mode 100644 index 00000000..0908201b --- /dev/null +++ b/vendor/github.com/sizeofint/webpanimation/enc_token_enc.c @@ -0,0 +1,262 @@ +// Copyright 2011 Google Inc. All Rights Reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the COPYING file in the root of the source +// tree. An additional intellectual property rights grant can be found +// in the file PATENTS. All contributing project authors may +// be found in the AUTHORS file in the root of the source tree. +// ----------------------------------------------------------------------------- +// +// Paginated token buffer +// +// A 'token' is a bit value associated with a probability, either fixed +// or a later-to-be-determined after statistics have been collected. +// For dynamic probability, we just record the slot id (idx) for the probability +// value in the final probability array (uint8_t* probas in VP8EmitTokens). +// +// Author: Skal (pascal.massimino@gmail.com) + +#include +#include +#include + +#include "enc_cost_enc.h" +#include "enc_vp8i_enc.h" +#include "utils_utils.h" + +#if !defined(DISABLE_TOKEN_BUFFER) + +// we use pages to reduce the number of memcpy() +#define MIN_PAGE_SIZE 8192 // minimum number of token per page +#define FIXED_PROBA_BIT (1u << 14) + +typedef uint16_t token_t; // bit #15: bit value + // bit #14: flags for constant proba or idx + // bits #0..13: slot or constant proba +struct VP8Tokens { + VP8Tokens* next_; // pointer to next page +}; +// Token data is located in memory just after the next_ field. +// This macro is used to return their address and hide the trick. +#define TOKEN_DATA(p) ((const token_t*)&(p)[1]) + +//------------------------------------------------------------------------------ + +void VP8TBufferInit(VP8TBuffer* const b, int page_size) { + b->tokens_ = NULL; + b->pages_ = NULL; + b->last_page_ = &b->pages_; + b->left_ = 0; + b->page_size_ = (page_size < MIN_PAGE_SIZE) ? MIN_PAGE_SIZE : page_size; + b->error_ = 0; +} + +void VP8TBufferClear(VP8TBuffer* const b) { + if (b != NULL) { + VP8Tokens* p = b->pages_; + while (p != NULL) { + VP8Tokens* const next = p->next_; + WebPSafeFree(p); + p = next; + } + VP8TBufferInit(b, b->page_size_); + } +} + +static int TBufferNewPage(VP8TBuffer* const b) { + VP8Tokens* page = NULL; + if (!b->error_) { + const size_t size = sizeof(*page) + b->page_size_ * sizeof(token_t); + page = (VP8Tokens*)WebPSafeMalloc(1ULL, size); + } + if (page == NULL) { + b->error_ = 1; + return 0; + } + page->next_ = NULL; + + *b->last_page_ = page; + b->last_page_ = &page->next_; + b->left_ = b->page_size_; + b->tokens_ = (token_t*)TOKEN_DATA(page); + return 1; +} + +//------------------------------------------------------------------------------ + +#define TOKEN_ID(t, b, ctx) \ + (NUM_PROBAS * ((ctx) + NUM_CTX * ((b) + NUM_BANDS * (t)))) + +static WEBP_INLINE uint32_t AddToken(VP8TBuffer* const b, uint32_t bit, + uint32_t proba_idx, + proba_t* const stats) { + assert(proba_idx < FIXED_PROBA_BIT); + assert(bit <= 1); + if (b->left_ > 0 || TBufferNewPage(b)) { + const int slot = --b->left_; + b->tokens_[slot] = (bit << 15) | proba_idx; + } + VP8RecordStats(bit, stats); + return bit; +} + +static WEBP_INLINE void AddConstantToken(VP8TBuffer* const b, + uint32_t bit, uint32_t proba) { + assert(proba < 256); + assert(bit <= 1); + if (b->left_ > 0 || TBufferNewPage(b)) { + const int slot = --b->left_; + b->tokens_[slot] = (bit << 15) | FIXED_PROBA_BIT | proba; + } +} + +int VP8RecordCoeffTokens(int ctx, const struct VP8Residual* const res, + VP8TBuffer* const tokens) { + const int16_t* const coeffs = res->coeffs; + const int coeff_type = res->coeff_type; + const int last = res->last; + int n = res->first; + uint32_t base_id = TOKEN_ID(coeff_type, n, ctx); + // should be stats[VP8EncBands[n]], but it's equivalent for n=0 or 1 + proba_t* s = res->stats[n][ctx]; + if (!AddToken(tokens, last >= 0, base_id + 0, s + 0)) { + return 0; + } + + while (n < 16) { + const int c = coeffs[n++]; + const int sign = c < 0; + const uint32_t v = sign ? -c : c; + if (!AddToken(tokens, v != 0, base_id + 1, s + 1)) { + base_id = TOKEN_ID(coeff_type, VP8EncBands[n], 0); // ctx=0 + s = res->stats[VP8EncBands[n]][0]; + continue; + } + if (!AddToken(tokens, v > 1, base_id + 2, s + 2)) { + base_id = TOKEN_ID(coeff_type, VP8EncBands[n], 1); // ctx=1 + s = res->stats[VP8EncBands[n]][1]; + } else { + if (!AddToken(tokens, v > 4, base_id + 3, s + 3)) { + if (AddToken(tokens, v != 2, base_id + 4, s + 4)) { + AddToken(tokens, v == 4, base_id + 5, s + 5); + } + } else if (!AddToken(tokens, v > 10, base_id + 6, s + 6)) { + if (!AddToken(tokens, v > 6, base_id + 7, s + 7)) { + AddConstantToken(tokens, v == 6, 159); + } else { + AddConstantToken(tokens, v >= 9, 165); + AddConstantToken(tokens, !(v & 1), 145); + } + } else { + int mask; + const uint8_t* tab; + uint32_t residue = v - 3; + if (residue < (8 << 1)) { // VP8Cat3 (3b) + AddToken(tokens, 0, base_id + 8, s + 8); + AddToken(tokens, 0, base_id + 9, s + 9); + residue -= (8 << 0); + mask = 1 << 2; + tab = VP8Cat3; + } else if (residue < (8 << 2)) { // VP8Cat4 (4b) + AddToken(tokens, 0, base_id + 8, s + 8); + AddToken(tokens, 1, base_id + 9, s + 9); + residue -= (8 << 1); + mask = 1 << 3; + tab = VP8Cat4; + } else if (residue < (8 << 3)) { // VP8Cat5 (5b) + AddToken(tokens, 1, base_id + 8, s + 8); + AddToken(tokens, 0, base_id + 10, s + 9); + residue -= (8 << 2); + mask = 1 << 4; + tab = VP8Cat5; + } else { // VP8Cat6 (11b) + AddToken(tokens, 1, base_id + 8, s + 8); + AddToken(tokens, 1, base_id + 10, s + 9); + residue -= (8 << 3); + mask = 1 << 10; + tab = VP8Cat6; + } + while (mask) { + AddConstantToken(tokens, !!(residue & mask), *tab++); + mask >>= 1; + } + } + base_id = TOKEN_ID(coeff_type, VP8EncBands[n], 2); // ctx=2 + s = res->stats[VP8EncBands[n]][2]; + } + AddConstantToken(tokens, sign, 128); + if (n == 16 || !AddToken(tokens, n <= last, base_id + 0, s + 0)) { + return 1; // EOB + } + } + return 1; +} + +#undef TOKEN_ID + +//------------------------------------------------------------------------------ +// Final coding pass, with known probabilities + +int VP8EmitTokens(VP8TBuffer* const b, VP8BitWriter* const bw, + const uint8_t* const probas, int final_pass) { + const VP8Tokens* p = b->pages_; + assert(!b->error_); + while (p != NULL) { + const VP8Tokens* const next = p->next_; + const int N = (next == NULL) ? b->left_ : 0; + int n = b->page_size_; + const token_t* const tokens = TOKEN_DATA(p); + while (n-- > N) { + const token_t token = tokens[n]; + const int bit = (token >> 15) & 1; + if (token & FIXED_PROBA_BIT) { + VP8PutBit(bw, bit, token & 0xffu); // constant proba + } else { + VP8PutBit(bw, bit, probas[token & 0x3fffu]); + } + } + if (final_pass) WebPSafeFree((void*)p); + p = next; + } + if (final_pass) b->pages_ = NULL; + return 1; +} + +// Size estimation +size_t VP8EstimateTokenSize(VP8TBuffer* const b, const uint8_t* const probas) { + size_t size = 0; + const VP8Tokens* p = b->pages_; + assert(!b->error_); + while (p != NULL) { + const VP8Tokens* const next = p->next_; + const int N = (next == NULL) ? b->left_ : 0; + int n = b->page_size_; + const token_t* const tokens = TOKEN_DATA(p); + while (n-- > N) { + const token_t token = tokens[n]; + const int bit = token & (1 << 15); + if (token & FIXED_PROBA_BIT) { + size += VP8BitCost(bit, token & 0xffu); + } else { + size += VP8BitCost(bit, probas[token & 0x3fffu]); + } + } + p = next; + } + return size; +} + +//------------------------------------------------------------------------------ + +#else // DISABLE_TOKEN_BUFFER + +void VP8TBufferInit(VP8TBuffer* const b, int page_size) { + (void)b; + (void)page_size; +} +void VP8TBufferClear(VP8TBuffer* const b) { + (void)b; +} + +#endif // !DISABLE_TOKEN_BUFFER + diff --git a/vendor/github.com/sizeofint/webpanimation/enc_tree_enc.c b/vendor/github.com/sizeofint/webpanimation/enc_tree_enc.c new file mode 100644 index 00000000..fdc1f53e --- /dev/null +++ b/vendor/github.com/sizeofint/webpanimation/enc_tree_enc.c @@ -0,0 +1,504 @@ +// Copyright 2011 Google Inc. All Rights Reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the COPYING file in the root of the source +// tree. An additional intellectual property rights grant can be found +// in the file PATENTS. All contributing project authors may +// be found in the AUTHORS file in the root of the source tree. +// ----------------------------------------------------------------------------- +// +// Coding of token probabilities, intra modes and segments. +// +// Author: Skal (pascal.massimino@gmail.com) + +#include "enc_vp8i_enc.h" + +//------------------------------------------------------------------------------ +// Default probabilities + +// Paragraph 13.5 +const uint8_t + VP8CoeffsProba0[NUM_TYPES][NUM_BANDS][NUM_CTX][NUM_PROBAS] = { + { { { 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128 }, + { 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128 }, + { 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128 } + }, + { { 253, 136, 254, 255, 228, 219, 128, 128, 128, 128, 128 }, + { 189, 129, 242, 255, 227, 213, 255, 219, 128, 128, 128 }, + { 106, 126, 227, 252, 214, 209, 255, 255, 128, 128, 128 } + }, + { { 1, 98, 248, 255, 236, 226, 255, 255, 128, 128, 128 }, + { 181, 133, 238, 254, 221, 234, 255, 154, 128, 128, 128 }, + { 78, 134, 202, 247, 198, 180, 255, 219, 128, 128, 128 }, + }, + { { 1, 185, 249, 255, 243, 255, 128, 128, 128, 128, 128 }, + { 184, 150, 247, 255, 236, 224, 128, 128, 128, 128, 128 }, + { 77, 110, 216, 255, 236, 230, 128, 128, 128, 128, 128 }, + }, + { { 1, 101, 251, 255, 241, 255, 128, 128, 128, 128, 128 }, + { 170, 139, 241, 252, 236, 209, 255, 255, 128, 128, 128 }, + { 37, 116, 196, 243, 228, 255, 255, 255, 128, 128, 128 } + }, + { { 1, 204, 254, 255, 245, 255, 128, 128, 128, 128, 128 }, + { 207, 160, 250, 255, 238, 128, 128, 128, 128, 128, 128 }, + { 102, 103, 231, 255, 211, 171, 128, 128, 128, 128, 128 } + }, + { { 1, 152, 252, 255, 240, 255, 128, 128, 128, 128, 128 }, + { 177, 135, 243, 255, 234, 225, 128, 128, 128, 128, 128 }, + { 80, 129, 211, 255, 194, 224, 128, 128, 128, 128, 128 } + }, + { { 1, 1, 255, 128, 128, 128, 128, 128, 128, 128, 128 }, + { 246, 1, 255, 128, 128, 128, 128, 128, 128, 128, 128 }, + { 255, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128 } + } + }, + { { { 198, 35, 237, 223, 193, 187, 162, 160, 145, 155, 62 }, + { 131, 45, 198, 221, 172, 176, 220, 157, 252, 221, 1 }, + { 68, 47, 146, 208, 149, 167, 221, 162, 255, 223, 128 } + }, + { { 1, 149, 241, 255, 221, 224, 255, 255, 128, 128, 128 }, + { 184, 141, 234, 253, 222, 220, 255, 199, 128, 128, 128 }, + { 81, 99, 181, 242, 176, 190, 249, 202, 255, 255, 128 } + }, + { { 1, 129, 232, 253, 214, 197, 242, 196, 255, 255, 128 }, + { 99, 121, 210, 250, 201, 198, 255, 202, 128, 128, 128 }, + { 23, 91, 163, 242, 170, 187, 247, 210, 255, 255, 128 } + }, + { { 1, 200, 246, 255, 234, 255, 128, 128, 128, 128, 128 }, + { 109, 178, 241, 255, 231, 245, 255, 255, 128, 128, 128 }, + { 44, 130, 201, 253, 205, 192, 255, 255, 128, 128, 128 } + }, + { { 1, 132, 239, 251, 219, 209, 255, 165, 128, 128, 128 }, + { 94, 136, 225, 251, 218, 190, 255, 255, 128, 128, 128 }, + { 22, 100, 174, 245, 186, 161, 255, 199, 128, 128, 128 } + }, + { { 1, 182, 249, 255, 232, 235, 128, 128, 128, 128, 128 }, + { 124, 143, 241, 255, 227, 234, 128, 128, 128, 128, 128 }, + { 35, 77, 181, 251, 193, 211, 255, 205, 128, 128, 128 } + }, + { { 1, 157, 247, 255, 236, 231, 255, 255, 128, 128, 128 }, + { 121, 141, 235, 255, 225, 227, 255, 255, 128, 128, 128 }, + { 45, 99, 188, 251, 195, 217, 255, 224, 128, 128, 128 } + }, + { { 1, 1, 251, 255, 213, 255, 128, 128, 128, 128, 128 }, + { 203, 1, 248, 255, 255, 128, 128, 128, 128, 128, 128 }, + { 137, 1, 177, 255, 224, 255, 128, 128, 128, 128, 128 } + } + }, + { { { 253, 9, 248, 251, 207, 208, 255, 192, 128, 128, 128 }, + { 175, 13, 224, 243, 193, 185, 249, 198, 255, 255, 128 }, + { 73, 17, 171, 221, 161, 179, 236, 167, 255, 234, 128 } + }, + { { 1, 95, 247, 253, 212, 183, 255, 255, 128, 128, 128 }, + { 239, 90, 244, 250, 211, 209, 255, 255, 128, 128, 128 }, + { 155, 77, 195, 248, 188, 195, 255, 255, 128, 128, 128 } + }, + { { 1, 24, 239, 251, 218, 219, 255, 205, 128, 128, 128 }, + { 201, 51, 219, 255, 196, 186, 128, 128, 128, 128, 128 }, + { 69, 46, 190, 239, 201, 218, 255, 228, 128, 128, 128 } + }, + { { 1, 191, 251, 255, 255, 128, 128, 128, 128, 128, 128 }, + { 223, 165, 249, 255, 213, 255, 128, 128, 128, 128, 128 }, + { 141, 124, 248, 255, 255, 128, 128, 128, 128, 128, 128 } + }, + { { 1, 16, 248, 255, 255, 128, 128, 128, 128, 128, 128 }, + { 190, 36, 230, 255, 236, 255, 128, 128, 128, 128, 128 }, + { 149, 1, 255, 128, 128, 128, 128, 128, 128, 128, 128 } + }, + { { 1, 226, 255, 128, 128, 128, 128, 128, 128, 128, 128 }, + { 247, 192, 255, 128, 128, 128, 128, 128, 128, 128, 128 }, + { 240, 128, 255, 128, 128, 128, 128, 128, 128, 128, 128 } + }, + { { 1, 134, 252, 255, 255, 128, 128, 128, 128, 128, 128 }, + { 213, 62, 250, 255, 255, 128, 128, 128, 128, 128, 128 }, + { 55, 93, 255, 128, 128, 128, 128, 128, 128, 128, 128 } + }, + { { 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128 }, + { 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128 }, + { 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128 } + } + }, + { { { 202, 24, 213, 235, 186, 191, 220, 160, 240, 175, 255 }, + { 126, 38, 182, 232, 169, 184, 228, 174, 255, 187, 128 }, + { 61, 46, 138, 219, 151, 178, 240, 170, 255, 216, 128 } + }, + { { 1, 112, 230, 250, 199, 191, 247, 159, 255, 255, 128 }, + { 166, 109, 228, 252, 211, 215, 255, 174, 128, 128, 128 }, + { 39, 77, 162, 232, 172, 180, 245, 178, 255, 255, 128 } + }, + { { 1, 52, 220, 246, 198, 199, 249, 220, 255, 255, 128 }, + { 124, 74, 191, 243, 183, 193, 250, 221, 255, 255, 128 }, + { 24, 71, 130, 219, 154, 170, 243, 182, 255, 255, 128 } + }, + { { 1, 182, 225, 249, 219, 240, 255, 224, 128, 128, 128 }, + { 149, 150, 226, 252, 216, 205, 255, 171, 128, 128, 128 }, + { 28, 108, 170, 242, 183, 194, 254, 223, 255, 255, 128 } + }, + { { 1, 81, 230, 252, 204, 203, 255, 192, 128, 128, 128 }, + { 123, 102, 209, 247, 188, 196, 255, 233, 128, 128, 128 }, + { 20, 95, 153, 243, 164, 173, 255, 203, 128, 128, 128 } + }, + { { 1, 222, 248, 255, 216, 213, 128, 128, 128, 128, 128 }, + { 168, 175, 246, 252, 235, 205, 255, 255, 128, 128, 128 }, + { 47, 116, 215, 255, 211, 212, 255, 255, 128, 128, 128 } + }, + { { 1, 121, 236, 253, 212, 214, 255, 255, 128, 128, 128 }, + { 141, 84, 213, 252, 201, 202, 255, 219, 128, 128, 128 }, + { 42, 80, 160, 240, 162, 185, 255, 205, 128, 128, 128 } + }, + { { 1, 1, 255, 128, 128, 128, 128, 128, 128, 128, 128 }, + { 244, 1, 255, 128, 128, 128, 128, 128, 128, 128, 128 }, + { 238, 1, 255, 128, 128, 128, 128, 128, 128, 128, 128 } + } + } +}; + +void VP8DefaultProbas(VP8Encoder* const enc) { + VP8EncProba* const probas = &enc->proba_; + probas->use_skip_proba_ = 0; + memset(probas->segments_, 255u, sizeof(probas->segments_)); + memcpy(probas->coeffs_, VP8CoeffsProba0, sizeof(VP8CoeffsProba0)); + // Note: we could hard-code the level_costs_ corresponding to VP8CoeffsProba0, + // but that's ~11k of static data. Better call VP8CalculateLevelCosts() later. + probas->dirty_ = 1; +} + +// Paragraph 11.5. 900bytes. +static const uint8_t kBModesProba[NUM_BMODES][NUM_BMODES][NUM_BMODES - 1] = { + { { 231, 120, 48, 89, 115, 113, 120, 152, 112 }, + { 152, 179, 64, 126, 170, 118, 46, 70, 95 }, + { 175, 69, 143, 80, 85, 82, 72, 155, 103 }, + { 56, 58, 10, 171, 218, 189, 17, 13, 152 }, + { 114, 26, 17, 163, 44, 195, 21, 10, 173 }, + { 121, 24, 80, 195, 26, 62, 44, 64, 85 }, + { 144, 71, 10, 38, 171, 213, 144, 34, 26 }, + { 170, 46, 55, 19, 136, 160, 33, 206, 71 }, + { 63, 20, 8, 114, 114, 208, 12, 9, 226 }, + { 81, 40, 11, 96, 182, 84, 29, 16, 36 } }, + { { 134, 183, 89, 137, 98, 101, 106, 165, 148 }, + { 72, 187, 100, 130, 157, 111, 32, 75, 80 }, + { 66, 102, 167, 99, 74, 62, 40, 234, 128 }, + { 41, 53, 9, 178, 241, 141, 26, 8, 107 }, + { 74, 43, 26, 146, 73, 166, 49, 23, 157 }, + { 65, 38, 105, 160, 51, 52, 31, 115, 128 }, + { 104, 79, 12, 27, 217, 255, 87, 17, 7 }, + { 87, 68, 71, 44, 114, 51, 15, 186, 23 }, + { 47, 41, 14, 110, 182, 183, 21, 17, 194 }, + { 66, 45, 25, 102, 197, 189, 23, 18, 22 } }, + { { 88, 88, 147, 150, 42, 46, 45, 196, 205 }, + { 43, 97, 183, 117, 85, 38, 35, 179, 61 }, + { 39, 53, 200, 87, 26, 21, 43, 232, 171 }, + { 56, 34, 51, 104, 114, 102, 29, 93, 77 }, + { 39, 28, 85, 171, 58, 165, 90, 98, 64 }, + { 34, 22, 116, 206, 23, 34, 43, 166, 73 }, + { 107, 54, 32, 26, 51, 1, 81, 43, 31 }, + { 68, 25, 106, 22, 64, 171, 36, 225, 114 }, + { 34, 19, 21, 102, 132, 188, 16, 76, 124 }, + { 62, 18, 78, 95, 85, 57, 50, 48, 51 } }, + { { 193, 101, 35, 159, 215, 111, 89, 46, 111 }, + { 60, 148, 31, 172, 219, 228, 21, 18, 111 }, + { 112, 113, 77, 85, 179, 255, 38, 120, 114 }, + { 40, 42, 1, 196, 245, 209, 10, 25, 109 }, + { 88, 43, 29, 140, 166, 213, 37, 43, 154 }, + { 61, 63, 30, 155, 67, 45, 68, 1, 209 }, + { 100, 80, 8, 43, 154, 1, 51, 26, 71 }, + { 142, 78, 78, 16, 255, 128, 34, 197, 171 }, + { 41, 40, 5, 102, 211, 183, 4, 1, 221 }, + { 51, 50, 17, 168, 209, 192, 23, 25, 82 } }, + { { 138, 31, 36, 171, 27, 166, 38, 44, 229 }, + { 67, 87, 58, 169, 82, 115, 26, 59, 179 }, + { 63, 59, 90, 180, 59, 166, 93, 73, 154 }, + { 40, 40, 21, 116, 143, 209, 34, 39, 175 }, + { 47, 15, 16, 183, 34, 223, 49, 45, 183 }, + { 46, 17, 33, 183, 6, 98, 15, 32, 183 }, + { 57, 46, 22, 24, 128, 1, 54, 17, 37 }, + { 65, 32, 73, 115, 28, 128, 23, 128, 205 }, + { 40, 3, 9, 115, 51, 192, 18, 6, 223 }, + { 87, 37, 9, 115, 59, 77, 64, 21, 47 } }, + { { 104, 55, 44, 218, 9, 54, 53, 130, 226 }, + { 64, 90, 70, 205, 40, 41, 23, 26, 57 }, + { 54, 57, 112, 184, 5, 41, 38, 166, 213 }, + { 30, 34, 26, 133, 152, 116, 10, 32, 134 }, + { 39, 19, 53, 221, 26, 114, 32, 73, 255 }, + { 31, 9, 65, 234, 2, 15, 1, 118, 73 }, + { 75, 32, 12, 51, 192, 255, 160, 43, 51 }, + { 88, 31, 35, 67, 102, 85, 55, 186, 85 }, + { 56, 21, 23, 111, 59, 205, 45, 37, 192 }, + { 55, 38, 70, 124, 73, 102, 1, 34, 98 } }, + { { 125, 98, 42, 88, 104, 85, 117, 175, 82 }, + { 95, 84, 53, 89, 128, 100, 113, 101, 45 }, + { 75, 79, 123, 47, 51, 128, 81, 171, 1 }, + { 57, 17, 5, 71, 102, 57, 53, 41, 49 }, + { 38, 33, 13, 121, 57, 73, 26, 1, 85 }, + { 41, 10, 67, 138, 77, 110, 90, 47, 114 }, + { 115, 21, 2, 10, 102, 255, 166, 23, 6 }, + { 101, 29, 16, 10, 85, 128, 101, 196, 26 }, + { 57, 18, 10, 102, 102, 213, 34, 20, 43 }, + { 117, 20, 15, 36, 163, 128, 68, 1, 26 } }, + { { 102, 61, 71, 37, 34, 53, 31, 243, 192 }, + { 69, 60, 71, 38, 73, 119, 28, 222, 37 }, + { 68, 45, 128, 34, 1, 47, 11, 245, 171 }, + { 62, 17, 19, 70, 146, 85, 55, 62, 70 }, + { 37, 43, 37, 154, 100, 163, 85, 160, 1 }, + { 63, 9, 92, 136, 28, 64, 32, 201, 85 }, + { 75, 15, 9, 9, 64, 255, 184, 119, 16 }, + { 86, 6, 28, 5, 64, 255, 25, 248, 1 }, + { 56, 8, 17, 132, 137, 255, 55, 116, 128 }, + { 58, 15, 20, 82, 135, 57, 26, 121, 40 } }, + { { 164, 50, 31, 137, 154, 133, 25, 35, 218 }, + { 51, 103, 44, 131, 131, 123, 31, 6, 158 }, + { 86, 40, 64, 135, 148, 224, 45, 183, 128 }, + { 22, 26, 17, 131, 240, 154, 14, 1, 209 }, + { 45, 16, 21, 91, 64, 222, 7, 1, 197 }, + { 56, 21, 39, 155, 60, 138, 23, 102, 213 }, + { 83, 12, 13, 54, 192, 255, 68, 47, 28 }, + { 85, 26, 85, 85, 128, 128, 32, 146, 171 }, + { 18, 11, 7, 63, 144, 171, 4, 4, 246 }, + { 35, 27, 10, 146, 174, 171, 12, 26, 128 } }, + { { 190, 80, 35, 99, 180, 80, 126, 54, 45 }, + { 85, 126, 47, 87, 176, 51, 41, 20, 32 }, + { 101, 75, 128, 139, 118, 146, 116, 128, 85 }, + { 56, 41, 15, 176, 236, 85, 37, 9, 62 }, + { 71, 30, 17, 119, 118, 255, 17, 18, 138 }, + { 101, 38, 60, 138, 55, 70, 43, 26, 142 }, + { 146, 36, 19, 30, 171, 255, 97, 27, 20 }, + { 138, 45, 61, 62, 219, 1, 81, 188, 64 }, + { 32, 41, 20, 117, 151, 142, 20, 21, 163 }, + { 112, 19, 12, 61, 195, 128, 48, 4, 24 } } +}; + +static int PutI4Mode(VP8BitWriter* const bw, int mode, + const uint8_t* const prob) { + if (VP8PutBit(bw, mode != B_DC_PRED, prob[0])) { + if (VP8PutBit(bw, mode != B_TM_PRED, prob[1])) { + if (VP8PutBit(bw, mode != B_VE_PRED, prob[2])) { + if (!VP8PutBit(bw, mode >= B_LD_PRED, prob[3])) { + if (VP8PutBit(bw, mode != B_HE_PRED, prob[4])) { + VP8PutBit(bw, mode != B_RD_PRED, prob[5]); + } + } else { + if (VP8PutBit(bw, mode != B_LD_PRED, prob[6])) { + if (VP8PutBit(bw, mode != B_VL_PRED, prob[7])) { + VP8PutBit(bw, mode != B_HD_PRED, prob[8]); + } + } + } + } + } + } + return mode; +} + +static void PutI16Mode(VP8BitWriter* const bw, int mode) { + if (VP8PutBit(bw, (mode == TM_PRED || mode == H_PRED), 156)) { + VP8PutBit(bw, mode == TM_PRED, 128); // TM or HE + } else { + VP8PutBit(bw, mode == V_PRED, 163); // VE or DC + } +} + +static void PutUVMode(VP8BitWriter* const bw, int uv_mode) { + if (VP8PutBit(bw, uv_mode != DC_PRED, 142)) { + if (VP8PutBit(bw, uv_mode != V_PRED, 114)) { + VP8PutBit(bw, uv_mode != H_PRED, 183); // else: TM_PRED + } + } +} + +static void PutSegment(VP8BitWriter* const bw, int s, const uint8_t* p) { + if (VP8PutBit(bw, s >= 2, p[0])) p += 1; + VP8PutBit(bw, s & 1, p[1]); +} + +void VP8CodeIntraModes(VP8Encoder* const enc) { + VP8BitWriter* const bw = &enc->bw_; + VP8EncIterator it; + VP8IteratorInit(enc, &it); + do { + const VP8MBInfo* const mb = it.mb_; + const uint8_t* preds = it.preds_; + if (enc->segment_hdr_.update_map_) { + PutSegment(bw, mb->segment_, enc->proba_.segments_); + } + if (enc->proba_.use_skip_proba_) { + VP8PutBit(bw, mb->skip_, enc->proba_.skip_proba_); + } + if (VP8PutBit(bw, (mb->type_ != 0), 145)) { // i16x16 + PutI16Mode(bw, preds[0]); + } else { + const int preds_w = enc->preds_w_; + const uint8_t* top_pred = preds - preds_w; + int x, y; + for (y = 0; y < 4; ++y) { + int left = preds[-1]; + for (x = 0; x < 4; ++x) { + const uint8_t* const probas = kBModesProba[top_pred[x]][left]; + left = PutI4Mode(bw, preds[x], probas); + } + top_pred = preds; + preds += preds_w; + } + } + PutUVMode(bw, mb->uv_mode_); + } while (VP8IteratorNext(&it)); +} + +//------------------------------------------------------------------------------ +// Paragraph 13 + +const uint8_t + VP8CoeffsUpdateProba[NUM_TYPES][NUM_BANDS][NUM_CTX][NUM_PROBAS] = { + { { { 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 }, + { 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 }, + { 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 } + }, + { { 176, 246, 255, 255, 255, 255, 255, 255, 255, 255, 255 }, + { 223, 241, 252, 255, 255, 255, 255, 255, 255, 255, 255 }, + { 249, 253, 253, 255, 255, 255, 255, 255, 255, 255, 255 } + }, + { { 255, 244, 252, 255, 255, 255, 255, 255, 255, 255, 255 }, + { 234, 254, 254, 255, 255, 255, 255, 255, 255, 255, 255 }, + { 253, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 } + }, + { { 255, 246, 254, 255, 255, 255, 255, 255, 255, 255, 255 }, + { 239, 253, 254, 255, 255, 255, 255, 255, 255, 255, 255 }, + { 254, 255, 254, 255, 255, 255, 255, 255, 255, 255, 255 } + }, + { { 255, 248, 254, 255, 255, 255, 255, 255, 255, 255, 255 }, + { 251, 255, 254, 255, 255, 255, 255, 255, 255, 255, 255 }, + { 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 } + }, + { { 255, 253, 254, 255, 255, 255, 255, 255, 255, 255, 255 }, + { 251, 254, 254, 255, 255, 255, 255, 255, 255, 255, 255 }, + { 254, 255, 254, 255, 255, 255, 255, 255, 255, 255, 255 } + }, + { { 255, 254, 253, 255, 254, 255, 255, 255, 255, 255, 255 }, + { 250, 255, 254, 255, 254, 255, 255, 255, 255, 255, 255 }, + { 254, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 } + }, + { { 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 }, + { 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 }, + { 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 } + } + }, + { { { 217, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 }, + { 225, 252, 241, 253, 255, 255, 254, 255, 255, 255, 255 }, + { 234, 250, 241, 250, 253, 255, 253, 254, 255, 255, 255 } + }, + { { 255, 254, 255, 255, 255, 255, 255, 255, 255, 255, 255 }, + { 223, 254, 254, 255, 255, 255, 255, 255, 255, 255, 255 }, + { 238, 253, 254, 254, 255, 255, 255, 255, 255, 255, 255 } + }, + { { 255, 248, 254, 255, 255, 255, 255, 255, 255, 255, 255 }, + { 249, 254, 255, 255, 255, 255, 255, 255, 255, 255, 255 }, + { 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 } + }, + { { 255, 253, 255, 255, 255, 255, 255, 255, 255, 255, 255 }, + { 247, 254, 255, 255, 255, 255, 255, 255, 255, 255, 255 }, + { 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 } + }, + { { 255, 253, 254, 255, 255, 255, 255, 255, 255, 255, 255 }, + { 252, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 }, + { 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 } + }, + { { 255, 254, 254, 255, 255, 255, 255, 255, 255, 255, 255 }, + { 253, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 }, + { 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 } + }, + { { 255, 254, 253, 255, 255, 255, 255, 255, 255, 255, 255 }, + { 250, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 }, + { 254, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 } + }, + { { 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 }, + { 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 }, + { 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 } + } + }, + { { { 186, 251, 250, 255, 255, 255, 255, 255, 255, 255, 255 }, + { 234, 251, 244, 254, 255, 255, 255, 255, 255, 255, 255 }, + { 251, 251, 243, 253, 254, 255, 254, 255, 255, 255, 255 } + }, + { { 255, 253, 254, 255, 255, 255, 255, 255, 255, 255, 255 }, + { 236, 253, 254, 255, 255, 255, 255, 255, 255, 255, 255 }, + { 251, 253, 253, 254, 254, 255, 255, 255, 255, 255, 255 } + }, + { { 255, 254, 254, 255, 255, 255, 255, 255, 255, 255, 255 }, + { 254, 254, 254, 255, 255, 255, 255, 255, 255, 255, 255 }, + { 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 } + }, + { { 255, 254, 255, 255, 255, 255, 255, 255, 255, 255, 255 }, + { 254, 254, 255, 255, 255, 255, 255, 255, 255, 255, 255 }, + { 254, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 } + }, + { { 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 }, + { 254, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 }, + { 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 } + }, + { { 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 }, + { 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 }, + { 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 } + }, + { { 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 }, + { 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 }, + { 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 } + }, + { { 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 }, + { 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 }, + { 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 } + } + }, + { { { 248, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 }, + { 250, 254, 252, 254, 255, 255, 255, 255, 255, 255, 255 }, + { 248, 254, 249, 253, 255, 255, 255, 255, 255, 255, 255 } + }, + { { 255, 253, 253, 255, 255, 255, 255, 255, 255, 255, 255 }, + { 246, 253, 253, 255, 255, 255, 255, 255, 255, 255, 255 }, + { 252, 254, 251, 254, 254, 255, 255, 255, 255, 255, 255 } + }, + { { 255, 254, 252, 255, 255, 255, 255, 255, 255, 255, 255 }, + { 248, 254, 253, 255, 255, 255, 255, 255, 255, 255, 255 }, + { 253, 255, 254, 254, 255, 255, 255, 255, 255, 255, 255 } + }, + { { 255, 251, 254, 255, 255, 255, 255, 255, 255, 255, 255 }, + { 245, 251, 254, 255, 255, 255, 255, 255, 255, 255, 255 }, + { 253, 253, 254, 255, 255, 255, 255, 255, 255, 255, 255 } + }, + { { 255, 251, 253, 255, 255, 255, 255, 255, 255, 255, 255 }, + { 252, 253, 254, 255, 255, 255, 255, 255, 255, 255, 255 }, + { 255, 254, 255, 255, 255, 255, 255, 255, 255, 255, 255 } + }, + { { 255, 252, 255, 255, 255, 255, 255, 255, 255, 255, 255 }, + { 249, 255, 254, 255, 255, 255, 255, 255, 255, 255, 255 }, + { 255, 255, 254, 255, 255, 255, 255, 255, 255, 255, 255 } + }, + { { 255, 255, 253, 255, 255, 255, 255, 255, 255, 255, 255 }, + { 250, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 }, + { 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 } + }, + { { 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 }, + { 254, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 }, + { 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 } + } + } +}; + +void VP8WriteProbas(VP8BitWriter* const bw, const VP8EncProba* const probas) { + int t, b, c, p; + for (t = 0; t < NUM_TYPES; ++t) { + for (b = 0; b < NUM_BANDS; ++b) { + for (c = 0; c < NUM_CTX; ++c) { + for (p = 0; p < NUM_PROBAS; ++p) { + const uint8_t p0 = probas->coeffs_[t][b][c][p]; + const int update = (p0 != VP8CoeffsProba0[t][b][c][p]); + if (VP8PutBit(bw, update, VP8CoeffsUpdateProba[t][b][c][p])) { + VP8PutBits(bw, p0, 8); + } + } + } + } + } + if (VP8PutBitUniform(bw, probas->use_skip_proba_)) { + VP8PutBits(bw, probas->skip_proba_, 8); + } +} + diff --git a/vendor/github.com/sizeofint/webpanimation/enc_vp8i_enc.h b/vendor/github.com/sizeofint/webpanimation/enc_vp8i_enc.h new file mode 100644 index 00000000..49526d17 --- /dev/null +++ b/vendor/github.com/sizeofint/webpanimation/enc_vp8i_enc.h @@ -0,0 +1,518 @@ +// Copyright 2011 Google Inc. All Rights Reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the COPYING file in the root of the source +// tree. An additional intellectual property rights grant can be found +// in the file PATENTS. All contributing project authors may +// be found in the AUTHORS file in the root of the source tree. +// ----------------------------------------------------------------------------- +// +// WebP encoder: internal header. +// +// Author: Skal (pascal.massimino@gmail.com) + +#ifndef WEBP_ENC_VP8I_ENC_H_ +#define WEBP_ENC_VP8I_ENC_H_ + +#include // for memcpy() +#include "dec_common_dec.h" +#include "dsp_dsp.h" +#include "utils_bit_writer_utils.h" +#include "utils_thread_utils.h" +#include "utils_utils.h" +#include "webp_encode.h" + +#ifdef __cplusplus +extern "C" { +#endif + +//------------------------------------------------------------------------------ +// Various defines and enums + +// version numbers +#define ENC_MAJ_VERSION 1 +#define ENC_MIN_VERSION 2 +#define ENC_REV_VERSION 0 + +enum { MAX_LF_LEVELS = 64, // Maximum loop filter level + MAX_VARIABLE_LEVEL = 67, // last (inclusive) level with variable cost + MAX_LEVEL = 2047 // max level (note: max codable is 2047 + 67) + }; + +typedef enum { // Rate-distortion optimization levels + RD_OPT_NONE = 0, // no rd-opt + RD_OPT_BASIC = 1, // basic scoring (no trellis) + RD_OPT_TRELLIS = 2, // perform trellis-quant on the final decision only + RD_OPT_TRELLIS_ALL = 3 // trellis-quant for every scoring (much slower) +} VP8RDLevel; + +// YUV-cache parameters. Cache is 32-bytes wide (= one cacheline). +// The original or reconstructed samples can be accessed using VP8Scan[]. +// The predicted blocks can be accessed using offsets to yuv_p_ and +// the arrays VP8*ModeOffsets[]. +// * YUV Samples area (yuv_in_/yuv_out_/yuv_out2_) +// (see VP8Scan[] for accessing the blocks, along with +// Y_OFF_ENC/U_OFF_ENC/V_OFF_ENC): +// +----+----+ +// Y_OFF_ENC |YYYY|UUVV| +// U_OFF_ENC |YYYY|UUVV| +// V_OFF_ENC |YYYY|....| <- 25% wasted U/V area +// |YYYY|....| +// +----+----+ +// * Prediction area ('yuv_p_', size = PRED_SIZE_ENC) +// Intra16 predictions (16x16 block each, two per row): +// |I16DC16|I16TM16| +// |I16VE16|I16HE16| +// Chroma U/V predictions (16x8 block each, two per row): +// |C8DC8|C8TM8| +// |C8VE8|C8HE8| +// Intra 4x4 predictions (4x4 block each) +// |I4DC4 I4TM4 I4VE4 I4HE4|I4RD4 I4VR4 I4LD4 I4VL4| +// |I4HD4 I4HU4 I4TMP .....|.......................| <- ~31% wasted +#define YUV_SIZE_ENC (BPS * 16) +#define PRED_SIZE_ENC (32 * BPS + 16 * BPS + 8 * BPS) // I16+Chroma+I4 preds +#define Y_OFF_ENC (0) +#define U_OFF_ENC (16) +#define V_OFF_ENC (16 + 8) + +extern const uint16_t VP8Scan[16]; +extern const uint16_t VP8UVModeOffsets[4]; +extern const uint16_t VP8I16ModeOffsets[4]; +extern const uint16_t VP8I4ModeOffsets[NUM_BMODES]; + +// Layout of prediction blocks +// intra 16x16 +#define I16DC16 (0 * 16 * BPS) +#define I16TM16 (I16DC16 + 16) +#define I16VE16 (1 * 16 * BPS) +#define I16HE16 (I16VE16 + 16) +// chroma 8x8, two U/V blocks side by side (hence: 16x8 each) +#define C8DC8 (2 * 16 * BPS) +#define C8TM8 (C8DC8 + 1 * 16) +#define C8VE8 (2 * 16 * BPS + 8 * BPS) +#define C8HE8 (C8VE8 + 1 * 16) +// intra 4x4 +#define I4DC4 (3 * 16 * BPS + 0) +#define I4TM4 (I4DC4 + 4) +#define I4VE4 (I4DC4 + 8) +#define I4HE4 (I4DC4 + 12) +#define I4RD4 (I4DC4 + 16) +#define I4VR4 (I4DC4 + 20) +#define I4LD4 (I4DC4 + 24) +#define I4VL4 (I4DC4 + 28) +#define I4HD4 (3 * 16 * BPS + 4 * BPS) +#define I4HU4 (I4HD4 + 4) +#define I4TMP (I4HD4 + 8) + +typedef int64_t score_t; // type used for scores, rate, distortion +// Note that MAX_COST is not the maximum allowed by sizeof(score_t), +// in order to allow overflowing computations. +#define MAX_COST ((score_t)0x7fffffffffffffLL) + +#define QFIX 17 +#define BIAS(b) ((b) << (QFIX - 8)) +// Fun fact: this is the _only_ line where we're actually being lossy and +// discarding bits. +static WEBP_INLINE int QUANTDIV(uint32_t n, uint32_t iQ, uint32_t B) { + return (int)((n * iQ + B) >> QFIX); +} + +// Uncomment the following to remove token-buffer code: +// #define DISABLE_TOKEN_BUFFER + +// quality below which error-diffusion is enabled +#define ERROR_DIFFUSION_QUALITY 98 + +//------------------------------------------------------------------------------ +// Headers + +typedef uint32_t proba_t; // 16b + 16b +typedef uint8_t ProbaArray[NUM_CTX][NUM_PROBAS]; +typedef proba_t StatsArray[NUM_CTX][NUM_PROBAS]; +typedef uint16_t CostArray[NUM_CTX][MAX_VARIABLE_LEVEL + 1]; +typedef const uint16_t* (*CostArrayPtr)[NUM_CTX]; // for easy casting +typedef const uint16_t* CostArrayMap[16][NUM_CTX]; +typedef double LFStats[NUM_MB_SEGMENTS][MAX_LF_LEVELS]; // filter stats + +typedef struct VP8Encoder VP8Encoder; + +// segment features +typedef struct { + int num_segments_; // Actual number of segments. 1 segment only = unused. + int update_map_; // whether to update the segment map or not. + // must be 0 if there's only 1 segment. + int size_; // bit-cost for transmitting the segment map +} VP8EncSegmentHeader; + +// Struct collecting all frame-persistent probabilities. +typedef struct { + uint8_t segments_[3]; // probabilities for segment tree + uint8_t skip_proba_; // final probability of being skipped. + ProbaArray coeffs_[NUM_TYPES][NUM_BANDS]; // 1056 bytes + StatsArray stats_[NUM_TYPES][NUM_BANDS]; // 4224 bytes + CostArray level_cost_[NUM_TYPES][NUM_BANDS]; // 13056 bytes + CostArrayMap remapped_costs_[NUM_TYPES]; // 1536 bytes + int dirty_; // if true, need to call VP8CalculateLevelCosts() + int use_skip_proba_; // Note: we always use skip_proba for now. + int nb_skip_; // number of skipped blocks +} VP8EncProba; + +// Filter parameters. Not actually used in the code (we don't perform +// the in-loop filtering), but filled from user's config +typedef struct { + int simple_; // filtering type: 0=complex, 1=simple + int level_; // base filter level [0..63] + int sharpness_; // [0..7] + int i4x4_lf_delta_; // delta filter level for i4x4 relative to i16x16 +} VP8EncFilterHeader; + +//------------------------------------------------------------------------------ +// Informations about the macroblocks. + +typedef struct { + // block type + unsigned int type_:2; // 0=i4x4, 1=i16x16 + unsigned int uv_mode_:2; + unsigned int skip_:1; + unsigned int segment_:2; + uint8_t alpha_; // quantization-susceptibility +} VP8MBInfo; + +typedef struct VP8Matrix { + uint16_t q_[16]; // quantizer steps + uint16_t iq_[16]; // reciprocals, fixed point. + uint32_t bias_[16]; // rounding bias + uint32_t zthresh_[16]; // value below which a coefficient is zeroed + uint16_t sharpen_[16]; // frequency boosters for slight sharpening +} VP8Matrix; + +typedef struct { + VP8Matrix y1_, y2_, uv_; // quantization matrices + int alpha_; // quant-susceptibility, range [-127,127]. Zero is neutral. + // Lower values indicate a lower risk of blurriness. + int beta_; // filter-susceptibility, range [0,255]. + int quant_; // final segment quantizer. + int fstrength_; // final in-loop filtering strength + int max_edge_; // max edge delta (for filtering strength) + int min_disto_; // minimum distortion required to trigger filtering record + // reactivities + int lambda_i16_, lambda_i4_, lambda_uv_; + int lambda_mode_, lambda_trellis_, tlambda_; + int lambda_trellis_i16_, lambda_trellis_i4_, lambda_trellis_uv_; + + // lambda values for distortion-based evaluation + score_t i4_penalty_; // penalty for using Intra4 +} VP8SegmentInfo; + +typedef int8_t DError[2 /* u/v */][2 /* top or left */]; + +// Handy transient struct to accumulate score and info during RD-optimization +// and mode evaluation. +typedef struct { + score_t D, SD; // Distortion, spectral distortion + score_t H, R, score; // header bits, rate, score. + int16_t y_dc_levels[16]; // Quantized levels for luma-DC, luma-AC, chroma. + int16_t y_ac_levels[16][16]; + int16_t uv_levels[4 + 4][16]; + int mode_i16; // mode number for intra16 prediction + uint8_t modes_i4[16]; // mode numbers for intra4 predictions + int mode_uv; // mode number of chroma prediction + uint32_t nz; // non-zero blocks + int8_t derr[2][3]; // DC diffusion errors for U/V for blocks #1/2/3 +} VP8ModeScore; + +// Iterator structure to iterate through macroblocks, pointing to the +// right neighbouring data (samples, predictions, contexts, ...) +typedef struct { + int x_, y_; // current macroblock + uint8_t* yuv_in_; // input samples + uint8_t* yuv_out_; // output samples + uint8_t* yuv_out2_; // secondary buffer swapped with yuv_out_. + uint8_t* yuv_p_; // scratch buffer for prediction + VP8Encoder* enc_; // back-pointer + VP8MBInfo* mb_; // current macroblock + VP8BitWriter* bw_; // current bit-writer + uint8_t* preds_; // intra mode predictors (4x4 blocks) + uint32_t* nz_; // non-zero pattern + uint8_t i4_boundary_[37]; // 32+5 boundary samples needed by intra4x4 + uint8_t* i4_top_; // pointer to the current top boundary sample + int i4_; // current intra4x4 mode being tested + int top_nz_[9]; // top-non-zero context. + int left_nz_[9]; // left-non-zero. left_nz[8] is independent. + uint64_t bit_count_[4][3]; // bit counters for coded levels. + uint64_t luma_bits_; // macroblock bit-cost for luma + uint64_t uv_bits_; // macroblock bit-cost for chroma + LFStats* lf_stats_; // filter stats (borrowed from enc_) + int do_trellis_; // if true, perform extra level optimisation + int count_down_; // number of mb still to be processed + int count_down0_; // starting counter value (for progress) + int percent0_; // saved initial progress percent + + DError left_derr_; // left error diffusion (u/v) + DError* top_derr_; // top diffusion error - NULL if disabled + + uint8_t* y_left_; // left luma samples (addressable from index -1 to 15). + uint8_t* u_left_; // left u samples (addressable from index -1 to 7) + uint8_t* v_left_; // left v samples (addressable from index -1 to 7) + + uint8_t* y_top_; // top luma samples at position 'x_' + uint8_t* uv_top_; // top u/v samples at position 'x_', packed as 16 bytes + + // memory for storing y/u/v_left_ + uint8_t yuv_left_mem_[17 + 16 + 16 + 8 + WEBP_ALIGN_CST]; + // memory for yuv_* + uint8_t yuv_mem_[3 * YUV_SIZE_ENC + PRED_SIZE_ENC + WEBP_ALIGN_CST]; +} VP8EncIterator; + + // in iterator.c +// must be called first +void VP8IteratorInit(VP8Encoder* const enc, VP8EncIterator* const it); +// restart a scan +void VP8IteratorReset(VP8EncIterator* const it); +// reset iterator position to row 'y' +void VP8IteratorSetRow(VP8EncIterator* const it, int y); +// set count down (=number of iterations to go) +void VP8IteratorSetCountDown(VP8EncIterator* const it, int count_down); +// return true if iteration is finished +int VP8IteratorIsDone(const VP8EncIterator* const it); +// Import uncompressed samples from source. +// If tmp_32 is not NULL, import boundary samples too. +// tmp_32 is a 32-bytes scratch buffer that must be aligned in memory. +void VP8IteratorImport(VP8EncIterator* const it, uint8_t* const tmp_32); +// export decimated samples +void VP8IteratorExport(const VP8EncIterator* const it); +// go to next macroblock. Returns false if not finished. +int VP8IteratorNext(VP8EncIterator* const it); +// save the yuv_out_ boundary values to top_/left_ arrays for next iterations. +void VP8IteratorSaveBoundary(VP8EncIterator* const it); +// Report progression based on macroblock rows. Return 0 for user-abort request. +int VP8IteratorProgress(const VP8EncIterator* const it, + int final_delta_percent); +// Intra4x4 iterations +void VP8IteratorStartI4(VP8EncIterator* const it); +// returns true if not done. +int VP8IteratorRotateI4(VP8EncIterator* const it, + const uint8_t* const yuv_out); + +// Non-zero context setup/teardown +void VP8IteratorNzToBytes(VP8EncIterator* const it); +void VP8IteratorBytesToNz(VP8EncIterator* const it); + +// Helper functions to set mode properties +void VP8SetIntra16Mode(const VP8EncIterator* const it, int mode); +void VP8SetIntra4Mode(const VP8EncIterator* const it, const uint8_t* modes); +void VP8SetIntraUVMode(const VP8EncIterator* const it, int mode); +void VP8SetSkip(const VP8EncIterator* const it, int skip); +void VP8SetSegment(const VP8EncIterator* const it, int segment); + +//------------------------------------------------------------------------------ +// Paginated token buffer + +typedef struct VP8Tokens VP8Tokens; // struct details in token.c + +typedef struct { +#if !defined(DISABLE_TOKEN_BUFFER) + VP8Tokens* pages_; // first page + VP8Tokens** last_page_; // last page + uint16_t* tokens_; // set to (*last_page_)->tokens_ + int left_; // how many free tokens left before the page is full + int page_size_; // number of tokens per page +#endif + int error_; // true in case of malloc error +} VP8TBuffer; + +// initialize an empty buffer +void VP8TBufferInit(VP8TBuffer* const b, int page_size); +void VP8TBufferClear(VP8TBuffer* const b); // de-allocate pages memory + +#if !defined(DISABLE_TOKEN_BUFFER) + +// Finalizes bitstream when probabilities are known. +// Deletes the allocated token memory if final_pass is true. +int VP8EmitTokens(VP8TBuffer* const b, VP8BitWriter* const bw, + const uint8_t* const probas, int final_pass); + +// record the coding of coefficients without knowing the probabilities yet +int VP8RecordCoeffTokens(int ctx, const struct VP8Residual* const res, + VP8TBuffer* const tokens); + +// Estimate the final coded size given a set of 'probas'. +size_t VP8EstimateTokenSize(VP8TBuffer* const b, const uint8_t* const probas); + +#endif // !DISABLE_TOKEN_BUFFER + +//------------------------------------------------------------------------------ +// VP8Encoder + +struct VP8Encoder { + const WebPConfig* config_; // user configuration and parameters + WebPPicture* pic_; // input / output picture + + // headers + VP8EncFilterHeader filter_hdr_; // filtering information + VP8EncSegmentHeader segment_hdr_; // segment information + + int profile_; // VP8's profile, deduced from Config. + + // dimension, in macroblock units. + int mb_w_, mb_h_; + int preds_w_; // stride of the *preds_ prediction plane (=4*mb_w + 1) + + // number of partitions (1, 2, 4 or 8 = MAX_NUM_PARTITIONS) + int num_parts_; + + // per-partition boolean decoders. + VP8BitWriter bw_; // part0 + VP8BitWriter parts_[MAX_NUM_PARTITIONS]; // token partitions + VP8TBuffer tokens_; // token buffer + + int percent_; // for progress + + // transparency blob + int has_alpha_; + uint8_t* alpha_data_; // non-NULL if transparency is present + uint32_t alpha_data_size_; + WebPWorker alpha_worker_; + + // quantization info (one set of DC/AC dequant factor per segment) + VP8SegmentInfo dqm_[NUM_MB_SEGMENTS]; + int base_quant_; // nominal quantizer value. Only used + // for relative coding of segments' quant. + int alpha_; // global susceptibility (<=> complexity) + int uv_alpha_; // U/V quantization susceptibility + // global offset of quantizers, shared by all segments + int dq_y1_dc_; + int dq_y2_dc_, dq_y2_ac_; + int dq_uv_dc_, dq_uv_ac_; + + // probabilities and statistics + VP8EncProba proba_; + uint64_t sse_[4]; // sum of Y/U/V/A squared errors for all macroblocks + uint64_t sse_count_; // pixel count for the sse_[] stats + int coded_size_; + int residual_bytes_[3][4]; + int block_count_[3]; + + // quality/speed settings + int method_; // 0=fastest, 6=best/slowest. + VP8RDLevel rd_opt_level_; // Deduced from method_. + int max_i4_header_bits_; // partition #0 safeness factor + int mb_header_limit_; // rough limit for header bits per MB + int thread_level_; // derived from config->thread_level + int do_search_; // derived from config->target_XXX + int use_tokens_; // if true, use token buffer + + // Memory + VP8MBInfo* mb_info_; // contextual macroblock infos (mb_w_ + 1) + uint8_t* preds_; // predictions modes: (4*mb_w+1) * (4*mb_h+1) + uint32_t* nz_; // non-zero bit context: mb_w+1 + uint8_t* y_top_; // top luma samples. + uint8_t* uv_top_; // top u/v samples. + // U and V are packed into 16 bytes (8 U + 8 V) + LFStats* lf_stats_; // autofilter stats (if NULL, autofilter is off) + DError* top_derr_; // diffusion error (NULL if disabled) +}; + +//------------------------------------------------------------------------------ +// internal functions. Not public. + + // in tree.c +extern const uint8_t VP8CoeffsProba0[NUM_TYPES][NUM_BANDS][NUM_CTX][NUM_PROBAS]; +extern const uint8_t + VP8CoeffsUpdateProba[NUM_TYPES][NUM_BANDS][NUM_CTX][NUM_PROBAS]; +// Reset the token probabilities to their initial (default) values +void VP8DefaultProbas(VP8Encoder* const enc); +// Write the token probabilities +void VP8WriteProbas(VP8BitWriter* const bw, const VP8EncProba* const probas); +// Writes the partition #0 modes (that is: all intra modes) +void VP8CodeIntraModes(VP8Encoder* const enc); + + // in syntax.c +// Generates the final bitstream by coding the partition0 and headers, +// and appending an assembly of all the pre-coded token partitions. +// Return true if everything is ok. +int VP8EncWrite(VP8Encoder* const enc); +// Release memory allocated for bit-writing in VP8EncLoop & seq. +void VP8EncFreeBitWriters(VP8Encoder* const enc); + + // in frame.c +extern const uint8_t VP8Cat3[]; +extern const uint8_t VP8Cat4[]; +extern const uint8_t VP8Cat5[]; +extern const uint8_t VP8Cat6[]; + +// Form all the four Intra16x16 predictions in the yuv_p_ cache +void VP8MakeLuma16Preds(const VP8EncIterator* const it); +// Form all the four Chroma8x8 predictions in the yuv_p_ cache +void VP8MakeChroma8Preds(const VP8EncIterator* const it); +// Form all the ten Intra4x4 predictions in the yuv_p_ cache +// for the 4x4 block it->i4_ +void VP8MakeIntra4Preds(const VP8EncIterator* const it); +// Rate calculation +int VP8GetCostLuma16(VP8EncIterator* const it, const VP8ModeScore* const rd); +int VP8GetCostLuma4(VP8EncIterator* const it, const int16_t levels[16]); +int VP8GetCostUV(VP8EncIterator* const it, const VP8ModeScore* const rd); +// Main coding calls +int VP8EncLoop(VP8Encoder* const enc); +int VP8EncTokenLoop(VP8Encoder* const enc); + + // in webpenc.c +// Assign an error code to a picture. Return false for convenience. +int WebPEncodingSetError(const WebPPicture* const pic, WebPEncodingError error); +int WebPReportProgress(const WebPPicture* const pic, + int percent, int* const percent_store); + + // in analysis.c +// Main analysis loop. Decides the segmentations and complexity. +// Assigns a first guess for Intra16 and uvmode_ prediction modes. +int VP8EncAnalyze(VP8Encoder* const enc); + + // in quant.c +// Sets up segment's quantization values, base_quant_ and filter strengths. +void VP8SetSegmentParams(VP8Encoder* const enc, float quality); +// Pick best modes and fills the levels. Returns true if skipped. +int VP8Decimate(VP8EncIterator* const it, VP8ModeScore* const rd, + VP8RDLevel rd_opt); + + // in alpha.c +void VP8EncInitAlpha(VP8Encoder* const enc); // initialize alpha compression +int VP8EncStartAlpha(VP8Encoder* const enc); // start alpha coding process +int VP8EncFinishAlpha(VP8Encoder* const enc); // finalize compressed data +int VP8EncDeleteAlpha(VP8Encoder* const enc); // delete compressed data + +// autofilter +void VP8InitFilter(VP8EncIterator* const it); +void VP8StoreFilterStats(VP8EncIterator* const it); +void VP8AdjustFilterStrength(VP8EncIterator* const it); + +// returns the approximate filtering strength needed to smooth a edge +// step of 'delta', given a sharpness parameter 'sharpness'. +int VP8FilterStrengthFromDelta(int sharpness, int delta); + + // misc utils for picture_*.c: + +// Remove reference to the ARGB/YUVA buffer (doesn't free anything). +void WebPPictureResetBuffers(WebPPicture* const picture); + +// Allocates ARGB buffer of given dimension (previous one is always free'd). +// Preserves the YUV(A) buffer. Returns false in case of error (invalid param, +// out-of-memory). +int WebPPictureAllocARGB(WebPPicture* const picture, int width, int height); + +// Allocates YUVA buffer of given dimension (previous one is always free'd). +// Uses picture->csp to determine whether an alpha buffer is needed. +// Preserves the ARGB buffer. +// Returns false in case of error (invalid param, out-of-memory). +int WebPPictureAllocYUVA(WebPPicture* const picture, int width, int height); + +// Replace samples that are fully transparent by 'color' to help compressibility +// (no guarantee, though). Assumes pic->use_argb is true. +void WebPReplaceTransparentPixels(WebPPicture* const pic, uint32_t color); + +//------------------------------------------------------------------------------ + +#ifdef __cplusplus +} // extern "C" +#endif + +#endif // WEBP_ENC_VP8I_ENC_H_ diff --git a/vendor/github.com/sizeofint/webpanimation/enc_vp8l_enc.c b/vendor/github.com/sizeofint/webpanimation/enc_vp8l_enc.c new file mode 100644 index 00000000..cfd4e212 --- /dev/null +++ b/vendor/github.com/sizeofint/webpanimation/enc_vp8l_enc.c @@ -0,0 +1,1930 @@ +// Copyright 2012 Google Inc. All Rights Reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the COPYING file in the root of the source +// tree. An additional intellectual property rights grant can be found +// in the file PATENTS. All contributing project authors may +// be found in the AUTHORS file in the root of the source tree. +// ----------------------------------------------------------------------------- +// +// main entry for the lossless encoder. +// +// Author: Vikas Arora (vikaas.arora@gmail.com) +// + +#include +#include + +#include "enc_backward_references_enc.h" +#include "enc_histogram_enc.h" +#include "enc_vp8i_enc.h" +#include "enc_vp8li_enc.h" +#include "dsp_lossless.h" +#include "dsp_lossless_common.h" +#include "utils_bit_writer_utils.h" +#include "utils_huffman_encode_utils.h" +#include "utils_utils.h" +#include "webp_format_constants.h" + +// Maximum number of histogram images (sub-blocks). +#define MAX_HUFF_IMAGE_SIZE 2600 + +// Palette reordering for smaller sum of deltas (and for smaller storage). + +static int PaletteCompareColorsForQsort(const void* p1, const void* p2) { + const uint32_t a = WebPMemToUint32((uint8_t*)p1); + const uint32_t b = WebPMemToUint32((uint8_t*)p2); + assert(a != b); + return (a < b) ? -1 : 1; +} + +static WEBP_INLINE uint32_t PaletteComponentDistance(uint32_t v) { + return (v <= 128) ? v : (256 - v); +} + +// Computes a value that is related to the entropy created by the +// palette entry diff. +// +// Note that the last & 0xff is a no-operation in the next statement, but +// removed by most compilers and is here only for regularity of the code. +static WEBP_INLINE uint32_t PaletteColorDistance(uint32_t col1, uint32_t col2) { + const uint32_t diff = VP8LSubPixels(col1, col2); + const int kMoreWeightForRGBThanForAlpha = 9; + uint32_t score; + score = PaletteComponentDistance((diff >> 0) & 0xff); + score += PaletteComponentDistance((diff >> 8) & 0xff); + score += PaletteComponentDistance((diff >> 16) & 0xff); + score *= kMoreWeightForRGBThanForAlpha; + score += PaletteComponentDistance((diff >> 24) & 0xff); + return score; +} + +static WEBP_INLINE void SwapColor(uint32_t* const col1, uint32_t* const col2) { + const uint32_t tmp = *col1; + *col1 = *col2; + *col2 = tmp; +} + +static void GreedyMinimizeDeltas(uint32_t palette[], int num_colors) { + // Find greedily always the closest color of the predicted color to minimize + // deltas in the palette. This reduces storage needs since the + // palette is stored with delta encoding. + uint32_t predict = 0x00000000; + int i, k; + for (i = 0; i < num_colors; ++i) { + int best_ix = i; + uint32_t best_score = ~0U; + for (k = i; k < num_colors; ++k) { + const uint32_t cur_score = PaletteColorDistance(palette[k], predict); + if (best_score > cur_score) { + best_score = cur_score; + best_ix = k; + } + } + SwapColor(&palette[best_ix], &palette[i]); + predict = palette[i]; + } +} + +// The palette has been sorted by alpha. This function checks if the other +// components of the palette have a monotonic development with regards to +// position in the palette. If all have monotonic development, there is +// no benefit to re-organize them greedily. A monotonic development +// would be spotted in green-only situations (like lossy alpha) or gray-scale +// images. +static int PaletteHasNonMonotonousDeltas(uint32_t palette[], int num_colors) { + uint32_t predict = 0x000000; + int i; + uint8_t sign_found = 0x00; + for (i = 0; i < num_colors; ++i) { + const uint32_t diff = VP8LSubPixels(palette[i], predict); + const uint8_t rd = (diff >> 16) & 0xff; + const uint8_t gd = (diff >> 8) & 0xff; + const uint8_t bd = (diff >> 0) & 0xff; + if (rd != 0x00) { + sign_found |= (rd < 0x80) ? 1 : 2; + } + if (gd != 0x00) { + sign_found |= (gd < 0x80) ? 8 : 16; + } + if (bd != 0x00) { + sign_found |= (bd < 0x80) ? 64 : 128; + } + predict = palette[i]; + } + return (sign_found & (sign_found << 1)) != 0; // two consequent signs. +} + +// ----------------------------------------------------------------------------- +// Palette + +// If number of colors in the image is less than or equal to MAX_PALETTE_SIZE, +// creates a palette and returns true, else returns false. +static int AnalyzeAndCreatePalette(const WebPPicture* const pic, + int low_effort, + uint32_t palette[MAX_PALETTE_SIZE], + int* const palette_size) { + const int num_colors = WebPGetColorPalette(pic, palette); + if (num_colors > MAX_PALETTE_SIZE) { + *palette_size = 0; + return 0; + } + *palette_size = num_colors; + qsort(palette, num_colors, sizeof(*palette), PaletteCompareColorsForQsort); + if (!low_effort && PaletteHasNonMonotonousDeltas(palette, num_colors)) { + GreedyMinimizeDeltas(palette, num_colors); + } + return 1; +} + +// These five modes are evaluated and their respective entropy is computed. +typedef enum { + kDirect = 0, + kSpatial = 1, + kSubGreen = 2, + kSpatialSubGreen = 3, + kPalette = 4, + kPaletteAndSpatial = 5, + kNumEntropyIx = 6 +} EntropyIx; + +typedef enum { + kHistoAlpha = 0, + kHistoAlphaPred, + kHistoGreen, + kHistoGreenPred, + kHistoRed, + kHistoRedPred, + kHistoBlue, + kHistoBluePred, + kHistoRedSubGreen, + kHistoRedPredSubGreen, + kHistoBlueSubGreen, + kHistoBluePredSubGreen, + kHistoPalette, + kHistoTotal // Must be last. +} HistoIx; + +static void AddSingleSubGreen(int p, uint32_t* const r, uint32_t* const b) { + const int green = p >> 8; // The upper bits are masked away later. + ++r[((p >> 16) - green) & 0xff]; + ++b[((p >> 0) - green) & 0xff]; +} + +static void AddSingle(uint32_t p, + uint32_t* const a, uint32_t* const r, + uint32_t* const g, uint32_t* const b) { + ++a[(p >> 24) & 0xff]; + ++r[(p >> 16) & 0xff]; + ++g[(p >> 8) & 0xff]; + ++b[(p >> 0) & 0xff]; +} + +static WEBP_INLINE uint32_t HashPix(uint32_t pix) { + // Note that masking with 0xffffffffu is for preventing an + // 'unsigned int overflow' warning. Doesn't impact the compiled code. + return ((((uint64_t)pix + (pix >> 19)) * 0x39c5fba7ull) & 0xffffffffu) >> 24; +} + +static int AnalyzeEntropy(const uint32_t* argb, + int width, int height, int argb_stride, + int use_palette, + int palette_size, int transform_bits, + EntropyIx* const min_entropy_ix, + int* const red_and_blue_always_zero) { + // Allocate histogram set with cache_bits = 0. + uint32_t* histo; + + if (use_palette && palette_size <= 16) { + // In the case of small palettes, we pack 2, 4 or 8 pixels together. In + // practice, small palettes are better than any other transform. + *min_entropy_ix = kPalette; + *red_and_blue_always_zero = 1; + return 1; + } + histo = (uint32_t*)WebPSafeCalloc(kHistoTotal, sizeof(*histo) * 256); + if (histo != NULL) { + int i, x, y; + const uint32_t* prev_row = NULL; + const uint32_t* curr_row = argb; + uint32_t pix_prev = argb[0]; // Skip the first pixel. + for (y = 0; y < height; ++y) { + for (x = 0; x < width; ++x) { + const uint32_t pix = curr_row[x]; + const uint32_t pix_diff = VP8LSubPixels(pix, pix_prev); + pix_prev = pix; + if ((pix_diff == 0) || (prev_row != NULL && pix == prev_row[x])) { + continue; + } + AddSingle(pix, + &histo[kHistoAlpha * 256], + &histo[kHistoRed * 256], + &histo[kHistoGreen * 256], + &histo[kHistoBlue * 256]); + AddSingle(pix_diff, + &histo[kHistoAlphaPred * 256], + &histo[kHistoRedPred * 256], + &histo[kHistoGreenPred * 256], + &histo[kHistoBluePred * 256]); + AddSingleSubGreen(pix, + &histo[kHistoRedSubGreen * 256], + &histo[kHistoBlueSubGreen * 256]); + AddSingleSubGreen(pix_diff, + &histo[kHistoRedPredSubGreen * 256], + &histo[kHistoBluePredSubGreen * 256]); + { + // Approximate the palette by the entropy of the multiplicative hash. + const uint32_t hash = HashPix(pix); + ++histo[kHistoPalette * 256 + hash]; + } + } + prev_row = curr_row; + curr_row += argb_stride; + } + { + double entropy_comp[kHistoTotal]; + double entropy[kNumEntropyIx]; + int k; + int last_mode_to_analyze = use_palette ? kPalette : kSpatialSubGreen; + int j; + // Let's add one zero to the predicted histograms. The zeros are removed + // too efficiently by the pix_diff == 0 comparison, at least one of the + // zeros is likely to exist. + ++histo[kHistoRedPredSubGreen * 256]; + ++histo[kHistoBluePredSubGreen * 256]; + ++histo[kHistoRedPred * 256]; + ++histo[kHistoGreenPred * 256]; + ++histo[kHistoBluePred * 256]; + ++histo[kHistoAlphaPred * 256]; + + for (j = 0; j < kHistoTotal; ++j) { + entropy_comp[j] = VP8LBitsEntropy(&histo[j * 256], 256); + } + entropy[kDirect] = entropy_comp[kHistoAlpha] + + entropy_comp[kHistoRed] + + entropy_comp[kHistoGreen] + + entropy_comp[kHistoBlue]; + entropy[kSpatial] = entropy_comp[kHistoAlphaPred] + + entropy_comp[kHistoRedPred] + + entropy_comp[kHistoGreenPred] + + entropy_comp[kHistoBluePred]; + entropy[kSubGreen] = entropy_comp[kHistoAlpha] + + entropy_comp[kHistoRedSubGreen] + + entropy_comp[kHistoGreen] + + entropy_comp[kHistoBlueSubGreen]; + entropy[kSpatialSubGreen] = entropy_comp[kHistoAlphaPred] + + entropy_comp[kHistoRedPredSubGreen] + + entropy_comp[kHistoGreenPred] + + entropy_comp[kHistoBluePredSubGreen]; + entropy[kPalette] = entropy_comp[kHistoPalette]; + + // When including transforms, there is an overhead in bits from + // storing them. This overhead is small but matters for small images. + // For spatial, there are 14 transformations. + entropy[kSpatial] += VP8LSubSampleSize(width, transform_bits) * + VP8LSubSampleSize(height, transform_bits) * + VP8LFastLog2(14); + // For color transforms: 24 as only 3 channels are considered in a + // ColorTransformElement. + entropy[kSpatialSubGreen] += VP8LSubSampleSize(width, transform_bits) * + VP8LSubSampleSize(height, transform_bits) * + VP8LFastLog2(24); + // For palettes, add the cost of storing the palette. + // We empirically estimate the cost of a compressed entry as 8 bits. + // The palette is differential-coded when compressed hence a much + // lower cost than sizeof(uint32_t)*8. + entropy[kPalette] += palette_size * 8; + + *min_entropy_ix = kDirect; + for (k = kDirect + 1; k <= last_mode_to_analyze; ++k) { + if (entropy[*min_entropy_ix] > entropy[k]) { + *min_entropy_ix = (EntropyIx)k; + } + } + assert((int)*min_entropy_ix <= last_mode_to_analyze); + *red_and_blue_always_zero = 1; + // Let's check if the histogram of the chosen entropy mode has + // non-zero red and blue values. If all are zero, we can later skip + // the cross color optimization. + { + static const uint8_t kHistoPairs[5][2] = { + { kHistoRed, kHistoBlue }, + { kHistoRedPred, kHistoBluePred }, + { kHistoRedSubGreen, kHistoBlueSubGreen }, + { kHistoRedPredSubGreen, kHistoBluePredSubGreen }, + { kHistoRed, kHistoBlue } + }; + const uint32_t* const red_histo = + &histo[256 * kHistoPairs[*min_entropy_ix][0]]; + const uint32_t* const blue_histo = + &histo[256 * kHistoPairs[*min_entropy_ix][1]]; + for (i = 1; i < 256; ++i) { + if ((red_histo[i] | blue_histo[i]) != 0) { + *red_and_blue_always_zero = 0; + break; + } + } + } + } + WebPSafeFree(histo); + return 1; + } else { + return 0; + } +} + +static int GetHistoBits(int method, int use_palette, int width, int height) { + // Make tile size a function of encoding method (Range: 0 to 6). + int histo_bits = (use_palette ? 9 : 7) - method; + while (1) { + const int huff_image_size = VP8LSubSampleSize(width, histo_bits) * + VP8LSubSampleSize(height, histo_bits); + if (huff_image_size <= MAX_HUFF_IMAGE_SIZE) break; + ++histo_bits; + } + return (histo_bits < MIN_HUFFMAN_BITS) ? MIN_HUFFMAN_BITS : + (histo_bits > MAX_HUFFMAN_BITS) ? MAX_HUFFMAN_BITS : histo_bits; +} + +static int GetTransformBits(int method, int histo_bits) { + const int max_transform_bits = (method < 4) ? 6 : (method > 4) ? 4 : 5; + const int res = + (histo_bits > max_transform_bits) ? max_transform_bits : histo_bits; + assert(res <= MAX_TRANSFORM_BITS); + return res; +} + +// Set of parameters to be used in each iteration of the cruncher. +#define CRUNCH_SUBCONFIGS_MAX 2 +typedef struct { + int lz77_; + int do_no_cache_; +} CrunchSubConfig; +typedef struct { + int entropy_idx_; + CrunchSubConfig sub_configs_[CRUNCH_SUBCONFIGS_MAX]; + int sub_configs_size_; +} CrunchConfig; + +#define CRUNCH_CONFIGS_MAX kNumEntropyIx + +static int EncoderAnalyze(VP8LEncoder* const enc, + CrunchConfig crunch_configs[CRUNCH_CONFIGS_MAX], + int* const crunch_configs_size, + int* const red_and_blue_always_zero) { + const WebPPicture* const pic = enc->pic_; + const int width = pic->width; + const int height = pic->height; + const WebPConfig* const config = enc->config_; + const int method = config->method; + const int low_effort = (config->method == 0); + int i; + int use_palette; + int n_lz77s; + // If set to 0, analyze the cache with the computed cache value. If 1, also + // analyze with no-cache. + int do_no_cache = 0; + assert(pic != NULL && pic->argb != NULL); + + use_palette = + AnalyzeAndCreatePalette(pic, low_effort, + enc->palette_, &enc->palette_size_); + + // Empirical bit sizes. + enc->histo_bits_ = GetHistoBits(method, use_palette, + pic->width, pic->height); + enc->transform_bits_ = GetTransformBits(method, enc->histo_bits_); + + if (low_effort) { + // AnalyzeEntropy is somewhat slow. + crunch_configs[0].entropy_idx_ = use_palette ? kPalette : kSpatialSubGreen; + n_lz77s = 1; + *crunch_configs_size = 1; + } else { + EntropyIx min_entropy_ix; + // Try out multiple LZ77 on images with few colors. + n_lz77s = (enc->palette_size_ > 0 && enc->palette_size_ <= 16) ? 2 : 1; + if (!AnalyzeEntropy(pic->argb, width, height, pic->argb_stride, use_palette, + enc->palette_size_, enc->transform_bits_, + &min_entropy_ix, red_and_blue_always_zero)) { + return 0; + } + if (method == 6 && config->quality == 100) { + do_no_cache = 1; + // Go brute force on all transforms. + *crunch_configs_size = 0; + for (i = 0; i < kNumEntropyIx; ++i) { + // We can only apply kPalette or kPaletteAndSpatial if we can indeed use + // a palette. + if ((i != kPalette && i != kPaletteAndSpatial) || use_palette) { + assert(*crunch_configs_size < CRUNCH_CONFIGS_MAX); + crunch_configs[(*crunch_configs_size)++].entropy_idx_ = i; + } + } + } else { + // Only choose the guessed best transform. + *crunch_configs_size = 1; + crunch_configs[0].entropy_idx_ = min_entropy_ix; + if (config->quality >= 75 && method == 5) { + // Test with and without color cache. + do_no_cache = 1; + // If we have a palette, also check in combination with spatial. + if (min_entropy_ix == kPalette) { + *crunch_configs_size = 2; + crunch_configs[1].entropy_idx_ = kPaletteAndSpatial; + } + } + } + } + // Fill in the different LZ77s. + assert(n_lz77s <= CRUNCH_SUBCONFIGS_MAX); + for (i = 0; i < *crunch_configs_size; ++i) { + int j; + for (j = 0; j < n_lz77s; ++j) { + assert(j < CRUNCH_SUBCONFIGS_MAX); + crunch_configs[i].sub_configs_[j].lz77_ = + (j == 0) ? kLZ77Standard | kLZ77RLE : kLZ77Box; + crunch_configs[i].sub_configs_[j].do_no_cache_ = do_no_cache; + } + crunch_configs[i].sub_configs_size_ = n_lz77s; + } + return 1; +} + +static int EncoderInit(VP8LEncoder* const enc) { + const WebPPicture* const pic = enc->pic_; + const int width = pic->width; + const int height = pic->height; + const int pix_cnt = width * height; + // we round the block size up, so we're guaranteed to have + // at most MAX_REFS_BLOCK_PER_IMAGE blocks used: + const int refs_block_size = (pix_cnt - 1) / MAX_REFS_BLOCK_PER_IMAGE + 1; + int i; + if (!VP8LHashChainInit(&enc->hash_chain_, pix_cnt)) return 0; + + for (i = 0; i < 4; ++i) VP8LBackwardRefsInit(&enc->refs_[i], refs_block_size); + + return 1; +} + +// Returns false in case of memory error. +static int GetHuffBitLengthsAndCodes( + const VP8LHistogramSet* const histogram_image, + HuffmanTreeCode* const huffman_codes) { + int i, k; + int ok = 0; + uint64_t total_length_size = 0; + uint8_t* mem_buf = NULL; + const int histogram_image_size = histogram_image->size; + int max_num_symbols = 0; + uint8_t* buf_rle = NULL; + HuffmanTree* huff_tree = NULL; + + // Iterate over all histograms and get the aggregate number of codes used. + for (i = 0; i < histogram_image_size; ++i) { + const VP8LHistogram* const histo = histogram_image->histograms[i]; + HuffmanTreeCode* const codes = &huffman_codes[5 * i]; + assert(histo != NULL); + for (k = 0; k < 5; ++k) { + const int num_symbols = + (k == 0) ? VP8LHistogramNumCodes(histo->palette_code_bits_) : + (k == 4) ? NUM_DISTANCE_CODES : 256; + codes[k].num_symbols = num_symbols; + total_length_size += num_symbols; + } + } + + // Allocate and Set Huffman codes. + { + uint16_t* codes; + uint8_t* lengths; + mem_buf = (uint8_t*)WebPSafeCalloc(total_length_size, + sizeof(*lengths) + sizeof(*codes)); + if (mem_buf == NULL) goto End; + + codes = (uint16_t*)mem_buf; + lengths = (uint8_t*)&codes[total_length_size]; + for (i = 0; i < 5 * histogram_image_size; ++i) { + const int bit_length = huffman_codes[i].num_symbols; + huffman_codes[i].codes = codes; + huffman_codes[i].code_lengths = lengths; + codes += bit_length; + lengths += bit_length; + if (max_num_symbols < bit_length) { + max_num_symbols = bit_length; + } + } + } + + buf_rle = (uint8_t*)WebPSafeMalloc(1ULL, max_num_symbols); + huff_tree = (HuffmanTree*)WebPSafeMalloc(3ULL * max_num_symbols, + sizeof(*huff_tree)); + if (buf_rle == NULL || huff_tree == NULL) goto End; + + // Create Huffman trees. + for (i = 0; i < histogram_image_size; ++i) { + HuffmanTreeCode* const codes = &huffman_codes[5 * i]; + VP8LHistogram* const histo = histogram_image->histograms[i]; + VP8LCreateHuffmanTree(histo->literal_, 15, buf_rle, huff_tree, codes + 0); + VP8LCreateHuffmanTree(histo->red_, 15, buf_rle, huff_tree, codes + 1); + VP8LCreateHuffmanTree(histo->blue_, 15, buf_rle, huff_tree, codes + 2); + VP8LCreateHuffmanTree(histo->alpha_, 15, buf_rle, huff_tree, codes + 3); + VP8LCreateHuffmanTree(histo->distance_, 15, buf_rle, huff_tree, codes + 4); + } + ok = 1; + End: + WebPSafeFree(huff_tree); + WebPSafeFree(buf_rle); + if (!ok) { + WebPSafeFree(mem_buf); + memset(huffman_codes, 0, 5 * histogram_image_size * sizeof(*huffman_codes)); + } + return ok; +} + +static void StoreHuffmanTreeOfHuffmanTreeToBitMask( + VP8LBitWriter* const bw, const uint8_t* code_length_bitdepth) { + // RFC 1951 will calm you down if you are worried about this funny sequence. + // This sequence is tuned from that, but more weighted for lower symbol count, + // and more spiking histograms. + static const uint8_t kStorageOrder[CODE_LENGTH_CODES] = { + 17, 18, 0, 1, 2, 3, 4, 5, 16, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 + }; + int i; + // Throw away trailing zeros: + int codes_to_store = CODE_LENGTH_CODES; + for (; codes_to_store > 4; --codes_to_store) { + if (code_length_bitdepth[kStorageOrder[codes_to_store - 1]] != 0) { + break; + } + } + VP8LPutBits(bw, codes_to_store - 4, 4); + for (i = 0; i < codes_to_store; ++i) { + VP8LPutBits(bw, code_length_bitdepth[kStorageOrder[i]], 3); + } +} + +static void ClearHuffmanTreeIfOnlyOneSymbol( + HuffmanTreeCode* const huffman_code) { + int k; + int count = 0; + for (k = 0; k < huffman_code->num_symbols; ++k) { + if (huffman_code->code_lengths[k] != 0) { + ++count; + if (count > 1) return; + } + } + for (k = 0; k < huffman_code->num_symbols; ++k) { + huffman_code->code_lengths[k] = 0; + huffman_code->codes[k] = 0; + } +} + +static void StoreHuffmanTreeToBitMask( + VP8LBitWriter* const bw, + const HuffmanTreeToken* const tokens, const int num_tokens, + const HuffmanTreeCode* const huffman_code) { + int i; + for (i = 0; i < num_tokens; ++i) { + const int ix = tokens[i].code; + const int extra_bits = tokens[i].extra_bits; + VP8LPutBits(bw, huffman_code->codes[ix], huffman_code->code_lengths[ix]); + switch (ix) { + case 16: + VP8LPutBits(bw, extra_bits, 2); + break; + case 17: + VP8LPutBits(bw, extra_bits, 3); + break; + case 18: + VP8LPutBits(bw, extra_bits, 7); + break; + } + } +} + +// 'huff_tree' and 'tokens' are pre-alloacted buffers. +static void StoreFullHuffmanCode(VP8LBitWriter* const bw, + HuffmanTree* const huff_tree, + HuffmanTreeToken* const tokens, + const HuffmanTreeCode* const tree) { + uint8_t code_length_bitdepth[CODE_LENGTH_CODES] = { 0 }; + uint16_t code_length_bitdepth_symbols[CODE_LENGTH_CODES] = { 0 }; + const int max_tokens = tree->num_symbols; + int num_tokens; + HuffmanTreeCode huffman_code; + huffman_code.num_symbols = CODE_LENGTH_CODES; + huffman_code.code_lengths = code_length_bitdepth; + huffman_code.codes = code_length_bitdepth_symbols; + + VP8LPutBits(bw, 0, 1); + num_tokens = VP8LCreateCompressedHuffmanTree(tree, tokens, max_tokens); + { + uint32_t histogram[CODE_LENGTH_CODES] = { 0 }; + uint8_t buf_rle[CODE_LENGTH_CODES] = { 0 }; + int i; + for (i = 0; i < num_tokens; ++i) { + ++histogram[tokens[i].code]; + } + + VP8LCreateHuffmanTree(histogram, 7, buf_rle, huff_tree, &huffman_code); + } + + StoreHuffmanTreeOfHuffmanTreeToBitMask(bw, code_length_bitdepth); + ClearHuffmanTreeIfOnlyOneSymbol(&huffman_code); + { + int trailing_zero_bits = 0; + int trimmed_length = num_tokens; + int write_trimmed_length; + int length; + int i = num_tokens; + while (i-- > 0) { + const int ix = tokens[i].code; + if (ix == 0 || ix == 17 || ix == 18) { + --trimmed_length; // discount trailing zeros + trailing_zero_bits += code_length_bitdepth[ix]; + if (ix == 17) { + trailing_zero_bits += 3; + } else if (ix == 18) { + trailing_zero_bits += 7; + } + } else { + break; + } + } + write_trimmed_length = (trimmed_length > 1 && trailing_zero_bits > 12); + length = write_trimmed_length ? trimmed_length : num_tokens; + VP8LPutBits(bw, write_trimmed_length, 1); + if (write_trimmed_length) { + if (trimmed_length == 2) { + VP8LPutBits(bw, 0, 3 + 2); // nbitpairs=1, trimmed_length=2 + } else { + const int nbits = BitsLog2Floor(trimmed_length - 2); + const int nbitpairs = nbits / 2 + 1; + assert(trimmed_length > 2); + assert(nbitpairs - 1 < 8); + VP8LPutBits(bw, nbitpairs - 1, 3); + VP8LPutBits(bw, trimmed_length - 2, nbitpairs * 2); + } + } + StoreHuffmanTreeToBitMask(bw, tokens, length, &huffman_code); + } +} + +// 'huff_tree' and 'tokens' are pre-alloacted buffers. +static void StoreHuffmanCode(VP8LBitWriter* const bw, + HuffmanTree* const huff_tree, + HuffmanTreeToken* const tokens, + const HuffmanTreeCode* const huffman_code) { + int i; + int count = 0; + int symbols[2] = { 0, 0 }; + const int kMaxBits = 8; + const int kMaxSymbol = 1 << kMaxBits; + + // Check whether it's a small tree. + for (i = 0; i < huffman_code->num_symbols && count < 3; ++i) { + if (huffman_code->code_lengths[i] != 0) { + if (count < 2) symbols[count] = i; + ++count; + } + } + + if (count == 0) { // emit minimal tree for empty cases + // bits: small tree marker: 1, count-1: 0, large 8-bit code: 0, code: 0 + VP8LPutBits(bw, 0x01, 4); + } else if (count <= 2 && symbols[0] < kMaxSymbol && symbols[1] < kMaxSymbol) { + VP8LPutBits(bw, 1, 1); // Small tree marker to encode 1 or 2 symbols. + VP8LPutBits(bw, count - 1, 1); + if (symbols[0] <= 1) { + VP8LPutBits(bw, 0, 1); // Code bit for small (1 bit) symbol value. + VP8LPutBits(bw, symbols[0], 1); + } else { + VP8LPutBits(bw, 1, 1); + VP8LPutBits(bw, symbols[0], 8); + } + if (count == 2) { + VP8LPutBits(bw, symbols[1], 8); + } + } else { + StoreFullHuffmanCode(bw, huff_tree, tokens, huffman_code); + } +} + +static WEBP_INLINE void WriteHuffmanCode(VP8LBitWriter* const bw, + const HuffmanTreeCode* const code, + int code_index) { + const int depth = code->code_lengths[code_index]; + const int symbol = code->codes[code_index]; + VP8LPutBits(bw, symbol, depth); +} + +static WEBP_INLINE void WriteHuffmanCodeWithExtraBits( + VP8LBitWriter* const bw, + const HuffmanTreeCode* const code, + int code_index, + int bits, + int n_bits) { + const int depth = code->code_lengths[code_index]; + const int symbol = code->codes[code_index]; + VP8LPutBits(bw, (bits << depth) | symbol, depth + n_bits); +} + +static WebPEncodingError StoreImageToBitMask( + VP8LBitWriter* const bw, int width, int histo_bits, + const VP8LBackwardRefs* const refs, + const uint16_t* histogram_symbols, + const HuffmanTreeCode* const huffman_codes) { + const int histo_xsize = histo_bits ? VP8LSubSampleSize(width, histo_bits) : 1; + const int tile_mask = (histo_bits == 0) ? 0 : -(1 << histo_bits); + // x and y trace the position in the image. + int x = 0; + int y = 0; + int tile_x = x & tile_mask; + int tile_y = y & tile_mask; + int histogram_ix = histogram_symbols[0]; + const HuffmanTreeCode* codes = huffman_codes + 5 * histogram_ix; + VP8LRefsCursor c = VP8LRefsCursorInit(refs); + while (VP8LRefsCursorOk(&c)) { + const PixOrCopy* const v = c.cur_pos; + if ((tile_x != (x & tile_mask)) || (tile_y != (y & tile_mask))) { + tile_x = x & tile_mask; + tile_y = y & tile_mask; + histogram_ix = histogram_symbols[(y >> histo_bits) * histo_xsize + + (x >> histo_bits)]; + codes = huffman_codes + 5 * histogram_ix; + } + if (PixOrCopyIsLiteral(v)) { + static const uint8_t order[] = { 1, 2, 0, 3 }; + int k; + for (k = 0; k < 4; ++k) { + const int code = PixOrCopyLiteral(v, order[k]); + WriteHuffmanCode(bw, codes + k, code); + } + } else if (PixOrCopyIsCacheIdx(v)) { + const int code = PixOrCopyCacheIdx(v); + const int literal_ix = 256 + NUM_LENGTH_CODES + code; + WriteHuffmanCode(bw, codes, literal_ix); + } else { + int bits, n_bits; + int code; + + const int distance = PixOrCopyDistance(v); + VP8LPrefixEncode(v->len, &code, &n_bits, &bits); + WriteHuffmanCodeWithExtraBits(bw, codes, 256 + code, bits, n_bits); + + // Don't write the distance with the extra bits code since + // the distance can be up to 18 bits of extra bits, and the prefix + // 15 bits, totaling to 33, and our PutBits only supports up to 32 bits. + VP8LPrefixEncode(distance, &code, &n_bits, &bits); + WriteHuffmanCode(bw, codes + 4, code); + VP8LPutBits(bw, bits, n_bits); + } + x += PixOrCopyLength(v); + while (x >= width) { + x -= width; + ++y; + } + VP8LRefsCursorNext(&c); + } + return bw->error_ ? VP8_ENC_ERROR_OUT_OF_MEMORY : VP8_ENC_OK; +} + +// Special case of EncodeImageInternal() for cache-bits=0, histo_bits=31 +static WebPEncodingError EncodeImageNoHuffman( + VP8LBitWriter* const bw, const uint32_t* const argb, + VP8LHashChain* const hash_chain, VP8LBackwardRefs* const refs_array, + int width, int height, int quality, int low_effort) { + int i; + int max_tokens = 0; + WebPEncodingError err = VP8_ENC_OK; + VP8LBackwardRefs* refs; + HuffmanTreeToken* tokens = NULL; + HuffmanTreeCode huffman_codes[5] = { { 0, NULL, NULL } }; + const uint16_t histogram_symbols[1] = { 0 }; // only one tree, one symbol + int cache_bits = 0; + VP8LHistogramSet* histogram_image = NULL; + HuffmanTree* const huff_tree = (HuffmanTree*)WebPSafeMalloc( + 3ULL * CODE_LENGTH_CODES, sizeof(*huff_tree)); + if (huff_tree == NULL) { + err = VP8_ENC_ERROR_OUT_OF_MEMORY; + goto Error; + } + + // Calculate backward references from ARGB image. + if (!VP8LHashChainFill(hash_chain, quality, argb, width, height, + low_effort)) { + err = VP8_ENC_ERROR_OUT_OF_MEMORY; + goto Error; + } + err = VP8LGetBackwardReferences( + width, height, argb, quality, /*low_effort=*/0, kLZ77Standard | kLZ77RLE, + cache_bits, /*do_no_cache=*/0, hash_chain, refs_array, &cache_bits); + if (err != VP8_ENC_OK) goto Error; + refs = &refs_array[0]; + histogram_image = VP8LAllocateHistogramSet(1, cache_bits); + if (histogram_image == NULL) { + err = VP8_ENC_ERROR_OUT_OF_MEMORY; + goto Error; + } + VP8LHistogramSetClear(histogram_image); + + // Build histogram image and symbols from backward references. + VP8LHistogramStoreRefs(refs, histogram_image->histograms[0]); + + // Create Huffman bit lengths and codes for each histogram image. + assert(histogram_image->size == 1); + if (!GetHuffBitLengthsAndCodes(histogram_image, huffman_codes)) { + err = VP8_ENC_ERROR_OUT_OF_MEMORY; + goto Error; + } + + // No color cache, no Huffman image. + VP8LPutBits(bw, 0, 1); + + // Find maximum number of symbols for the huffman tree-set. + for (i = 0; i < 5; ++i) { + HuffmanTreeCode* const codes = &huffman_codes[i]; + if (max_tokens < codes->num_symbols) { + max_tokens = codes->num_symbols; + } + } + + tokens = (HuffmanTreeToken*)WebPSafeMalloc(max_tokens, sizeof(*tokens)); + if (tokens == NULL) { + err = VP8_ENC_ERROR_OUT_OF_MEMORY; + goto Error; + } + + // Store Huffman codes. + for (i = 0; i < 5; ++i) { + HuffmanTreeCode* const codes = &huffman_codes[i]; + StoreHuffmanCode(bw, huff_tree, tokens, codes); + ClearHuffmanTreeIfOnlyOneSymbol(codes); + } + + // Store actual literals. + err = StoreImageToBitMask(bw, width, 0, refs, histogram_symbols, + huffman_codes); + + Error: + WebPSafeFree(tokens); + WebPSafeFree(huff_tree); + VP8LFreeHistogramSet(histogram_image); + WebPSafeFree(huffman_codes[0].codes); + return err; +} + +static WebPEncodingError EncodeImageInternal( + VP8LBitWriter* const bw, const uint32_t* const argb, + VP8LHashChain* const hash_chain, VP8LBackwardRefs refs_array[4], int width, + int height, int quality, int low_effort, int use_cache, + const CrunchConfig* const config, int* cache_bits, int histogram_bits, + size_t init_byte_position, int* const hdr_size, int* const data_size) { + WebPEncodingError err = VP8_ENC_ERROR_OUT_OF_MEMORY; + const uint32_t histogram_image_xysize = + VP8LSubSampleSize(width, histogram_bits) * + VP8LSubSampleSize(height, histogram_bits); + VP8LHistogramSet* histogram_image = NULL; + VP8LHistogram* tmp_histo = NULL; + int histogram_image_size = 0; + size_t bit_array_size = 0; + HuffmanTree* const huff_tree = (HuffmanTree*)WebPSafeMalloc( + 3ULL * CODE_LENGTH_CODES, sizeof(*huff_tree)); + HuffmanTreeToken* tokens = NULL; + HuffmanTreeCode* huffman_codes = NULL; + uint16_t* const histogram_symbols = + (uint16_t*)WebPSafeMalloc(histogram_image_xysize, + sizeof(*histogram_symbols)); + int sub_configs_idx; + int cache_bits_init, write_histogram_image; + VP8LBitWriter bw_init = *bw, bw_best; + int hdr_size_tmp; + VP8LHashChain hash_chain_histogram; // histogram image hash chain + size_t bw_size_best = ~(size_t)0; + assert(histogram_bits >= MIN_HUFFMAN_BITS); + assert(histogram_bits <= MAX_HUFFMAN_BITS); + assert(hdr_size != NULL); + assert(data_size != NULL); + + // Make sure we can allocate the different objects. + memset(&hash_chain_histogram, 0, sizeof(hash_chain_histogram)); + if (huff_tree == NULL || histogram_symbols == NULL || + !VP8LHashChainInit(&hash_chain_histogram, histogram_image_xysize) || + !VP8LHashChainFill(hash_chain, quality, argb, width, height, + low_effort)) { + goto Error; + } + if (use_cache) { + // If the value is different from zero, it has been set during the + // palette analysis. + cache_bits_init = (*cache_bits == 0) ? MAX_COLOR_CACHE_BITS : *cache_bits; + } else { + cache_bits_init = 0; + } + // If several iterations will happen, clone into bw_best. + if (!VP8LBitWriterInit(&bw_best, 0) || + ((config->sub_configs_size_ > 1 || + config->sub_configs_[0].do_no_cache_) && + !VP8LBitWriterClone(bw, &bw_best))) { + goto Error; + } + for (sub_configs_idx = 0; sub_configs_idx < config->sub_configs_size_; + ++sub_configs_idx) { + const CrunchSubConfig* const sub_config = + &config->sub_configs_[sub_configs_idx]; + int cache_bits_best, i_cache; + err = VP8LGetBackwardReferences(width, height, argb, quality, low_effort, + sub_config->lz77_, cache_bits_init, + sub_config->do_no_cache_, hash_chain, + &refs_array[0], &cache_bits_best); + if (err != VP8_ENC_OK) goto Error; + + for (i_cache = 0; i_cache < (sub_config->do_no_cache_ ? 2 : 1); ++i_cache) { + const int cache_bits_tmp = (i_cache == 0) ? cache_bits_best : 0; + // Speed-up: no need to study the no-cache case if it was already studied + // in i_cache == 0. + if (i_cache == 1 && cache_bits_best == 0) break; + + // Reset the bit writer for this iteration. + VP8LBitWriterReset(&bw_init, bw); + + // Build histogram image and symbols from backward references. + histogram_image = + VP8LAllocateHistogramSet(histogram_image_xysize, cache_bits_tmp); + tmp_histo = VP8LAllocateHistogram(cache_bits_tmp); + if (histogram_image == NULL || tmp_histo == NULL || + !VP8LGetHistoImageSymbols(width, height, &refs_array[i_cache], + quality, low_effort, histogram_bits, + cache_bits_tmp, histogram_image, tmp_histo, + histogram_symbols)) { + goto Error; + } + // Create Huffman bit lengths and codes for each histogram image. + histogram_image_size = histogram_image->size; + bit_array_size = 5 * histogram_image_size; + huffman_codes = (HuffmanTreeCode*)WebPSafeCalloc(bit_array_size, + sizeof(*huffman_codes)); + // Note: some histogram_image entries may point to tmp_histos[], so the + // latter need to outlive the following call to + // GetHuffBitLengthsAndCodes(). + if (huffman_codes == NULL || + !GetHuffBitLengthsAndCodes(histogram_image, huffman_codes)) { + goto Error; + } + // Free combined histograms. + VP8LFreeHistogramSet(histogram_image); + histogram_image = NULL; + + // Free scratch histograms. + VP8LFreeHistogram(tmp_histo); + tmp_histo = NULL; + + // Color Cache parameters. + if (cache_bits_tmp > 0) { + VP8LPutBits(bw, 1, 1); + VP8LPutBits(bw, cache_bits_tmp, 4); + } else { + VP8LPutBits(bw, 0, 1); + } + + // Huffman image + meta huffman. + write_histogram_image = (histogram_image_size > 1); + VP8LPutBits(bw, write_histogram_image, 1); + if (write_histogram_image) { + uint32_t* const histogram_argb = + (uint32_t*)WebPSafeMalloc(histogram_image_xysize, + sizeof(*histogram_argb)); + int max_index = 0; + uint32_t i; + if (histogram_argb == NULL) goto Error; + for (i = 0; i < histogram_image_xysize; ++i) { + const int symbol_index = histogram_symbols[i] & 0xffff; + histogram_argb[i] = (symbol_index << 8); + if (symbol_index >= max_index) { + max_index = symbol_index + 1; + } + } + histogram_image_size = max_index; + + VP8LPutBits(bw, histogram_bits - 2, 3); + err = EncodeImageNoHuffman( + bw, histogram_argb, &hash_chain_histogram, &refs_array[2], + VP8LSubSampleSize(width, histogram_bits), + VP8LSubSampleSize(height, histogram_bits), quality, low_effort); + WebPSafeFree(histogram_argb); + if (err != VP8_ENC_OK) goto Error; + } + + // Store Huffman codes. + { + int i; + int max_tokens = 0; + // Find maximum number of symbols for the huffman tree-set. + for (i = 0; i < 5 * histogram_image_size; ++i) { + HuffmanTreeCode* const codes = &huffman_codes[i]; + if (max_tokens < codes->num_symbols) { + max_tokens = codes->num_symbols; + } + } + tokens = (HuffmanTreeToken*)WebPSafeMalloc(max_tokens, sizeof(*tokens)); + if (tokens == NULL) goto Error; + for (i = 0; i < 5 * histogram_image_size; ++i) { + HuffmanTreeCode* const codes = &huffman_codes[i]; + StoreHuffmanCode(bw, huff_tree, tokens, codes); + ClearHuffmanTreeIfOnlyOneSymbol(codes); + } + } + // Store actual literals. + hdr_size_tmp = (int)(VP8LBitWriterNumBytes(bw) - init_byte_position); + err = StoreImageToBitMask(bw, width, histogram_bits, &refs_array[i_cache], + histogram_symbols, huffman_codes); + if (err != VP8_ENC_OK) goto Error; + // Keep track of the smallest image so far. + if (VP8LBitWriterNumBytes(bw) < bw_size_best) { + bw_size_best = VP8LBitWriterNumBytes(bw); + *cache_bits = cache_bits_tmp; + *hdr_size = hdr_size_tmp; + *data_size = + (int)(VP8LBitWriterNumBytes(bw) - init_byte_position - *hdr_size); + VP8LBitWriterSwap(bw, &bw_best); + } + WebPSafeFree(tokens); + tokens = NULL; + if (huffman_codes != NULL) { + WebPSafeFree(huffman_codes->codes); + WebPSafeFree(huffman_codes); + huffman_codes = NULL; + } + } + } + VP8LBitWriterSwap(bw, &bw_best); + err = VP8_ENC_OK; + + Error: + WebPSafeFree(tokens); + WebPSafeFree(huff_tree); + VP8LFreeHistogramSet(histogram_image); + VP8LFreeHistogram(tmp_histo); + VP8LHashChainClear(&hash_chain_histogram); + if (huffman_codes != NULL) { + WebPSafeFree(huffman_codes->codes); + WebPSafeFree(huffman_codes); + } + WebPSafeFree(histogram_symbols); + VP8LBitWriterWipeOut(&bw_best); + return err; +} + +// ----------------------------------------------------------------------------- +// Transforms + +static void ApplySubtractGreen(VP8LEncoder* const enc, int width, int height, + VP8LBitWriter* const bw) { + VP8LPutBits(bw, TRANSFORM_PRESENT, 1); + VP8LPutBits(bw, SUBTRACT_GREEN, 2); + VP8LSubtractGreenFromBlueAndRed(enc->argb_, width * height); +} + +static WebPEncodingError ApplyPredictFilter(const VP8LEncoder* const enc, + int width, int height, + int quality, int low_effort, + int used_subtract_green, + VP8LBitWriter* const bw) { + const int pred_bits = enc->transform_bits_; + const int transform_width = VP8LSubSampleSize(width, pred_bits); + const int transform_height = VP8LSubSampleSize(height, pred_bits); + // we disable near-lossless quantization if palette is used. + const int near_lossless_strength = enc->use_palette_ ? 100 + : enc->config_->near_lossless; + + VP8LResidualImage(width, height, pred_bits, low_effort, enc->argb_, + enc->argb_scratch_, enc->transform_data_, + near_lossless_strength, enc->config_->exact, + used_subtract_green); + VP8LPutBits(bw, TRANSFORM_PRESENT, 1); + VP8LPutBits(bw, PREDICTOR_TRANSFORM, 2); + assert(pred_bits >= 2); + VP8LPutBits(bw, pred_bits - 2, 3); + return EncodeImageNoHuffman( + bw, enc->transform_data_, (VP8LHashChain*)&enc->hash_chain_, + (VP8LBackwardRefs*)&enc->refs_[0], transform_width, transform_height, + quality, low_effort); +} + +static WebPEncodingError ApplyCrossColorFilter(const VP8LEncoder* const enc, + int width, int height, + int quality, int low_effort, + VP8LBitWriter* const bw) { + const int ccolor_transform_bits = enc->transform_bits_; + const int transform_width = VP8LSubSampleSize(width, ccolor_transform_bits); + const int transform_height = VP8LSubSampleSize(height, ccolor_transform_bits); + + VP8LColorSpaceTransform(width, height, ccolor_transform_bits, quality, + enc->argb_, enc->transform_data_); + VP8LPutBits(bw, TRANSFORM_PRESENT, 1); + VP8LPutBits(bw, CROSS_COLOR_TRANSFORM, 2); + assert(ccolor_transform_bits >= 2); + VP8LPutBits(bw, ccolor_transform_bits - 2, 3); + return EncodeImageNoHuffman( + bw, enc->transform_data_, (VP8LHashChain*)&enc->hash_chain_, + (VP8LBackwardRefs*)&enc->refs_[0], transform_width, transform_height, + quality, low_effort); +} + +// ----------------------------------------------------------------------------- + +static WebPEncodingError WriteRiffHeader(const WebPPicture* const pic, + size_t riff_size, size_t vp8l_size) { + uint8_t riff[RIFF_HEADER_SIZE + CHUNK_HEADER_SIZE + VP8L_SIGNATURE_SIZE] = { + 'R', 'I', 'F', 'F', 0, 0, 0, 0, 'W', 'E', 'B', 'P', + 'V', 'P', '8', 'L', 0, 0, 0, 0, VP8L_MAGIC_BYTE, + }; + PutLE32(riff + TAG_SIZE, (uint32_t)riff_size); + PutLE32(riff + RIFF_HEADER_SIZE + TAG_SIZE, (uint32_t)vp8l_size); + if (!pic->writer(riff, sizeof(riff), pic)) { + return VP8_ENC_ERROR_BAD_WRITE; + } + return VP8_ENC_OK; +} + +static int WriteImageSize(const WebPPicture* const pic, + VP8LBitWriter* const bw) { + const int width = pic->width - 1; + const int height = pic->height - 1; + assert(width < WEBP_MAX_DIMENSION && height < WEBP_MAX_DIMENSION); + + VP8LPutBits(bw, width, VP8L_IMAGE_SIZE_BITS); + VP8LPutBits(bw, height, VP8L_IMAGE_SIZE_BITS); + return !bw->error_; +} + +static int WriteRealAlphaAndVersion(VP8LBitWriter* const bw, int has_alpha) { + VP8LPutBits(bw, has_alpha, 1); + VP8LPutBits(bw, VP8L_VERSION, VP8L_VERSION_BITS); + return !bw->error_; +} + +static WebPEncodingError WriteImage(const WebPPicture* const pic, + VP8LBitWriter* const bw, + size_t* const coded_size) { + WebPEncodingError err = VP8_ENC_OK; + const uint8_t* const webpll_data = VP8LBitWriterFinish(bw); + const size_t webpll_size = VP8LBitWriterNumBytes(bw); + const size_t vp8l_size = VP8L_SIGNATURE_SIZE + webpll_size; + const size_t pad = vp8l_size & 1; + const size_t riff_size = TAG_SIZE + CHUNK_HEADER_SIZE + vp8l_size + pad; + + err = WriteRiffHeader(pic, riff_size, vp8l_size); + if (err != VP8_ENC_OK) goto Error; + + if (!pic->writer(webpll_data, webpll_size, pic)) { + err = VP8_ENC_ERROR_BAD_WRITE; + goto Error; + } + + if (pad) { + const uint8_t pad_byte[1] = { 0 }; + if (!pic->writer(pad_byte, 1, pic)) { + err = VP8_ENC_ERROR_BAD_WRITE; + goto Error; + } + } + *coded_size = CHUNK_HEADER_SIZE + riff_size; + return VP8_ENC_OK; + + Error: + return err; +} + +// ----------------------------------------------------------------------------- + +static void ClearTransformBuffer(VP8LEncoder* const enc) { + WebPSafeFree(enc->transform_mem_); + enc->transform_mem_ = NULL; + enc->transform_mem_size_ = 0; +} + +// Allocates the memory for argb (W x H) buffer, 2 rows of context for +// prediction and transform data. +// Flags influencing the memory allocated: +// enc->transform_bits_ +// enc->use_predict_, enc->use_cross_color_ +static WebPEncodingError AllocateTransformBuffer(VP8LEncoder* const enc, + int width, int height) { + WebPEncodingError err = VP8_ENC_OK; + const uint64_t image_size = width * height; + // VP8LResidualImage needs room for 2 scanlines of uint32 pixels with an extra + // pixel in each, plus 2 regular scanlines of bytes. + // TODO(skal): Clean up by using arithmetic in bytes instead of words. + const uint64_t argb_scratch_size = + enc->use_predict_ + ? (width + 1) * 2 + + (width * 2 + sizeof(uint32_t) - 1) / sizeof(uint32_t) + : 0; + const uint64_t transform_data_size = + (enc->use_predict_ || enc->use_cross_color_) + ? VP8LSubSampleSize(width, enc->transform_bits_) * + VP8LSubSampleSize(height, enc->transform_bits_) + : 0; + const uint64_t max_alignment_in_words = + (WEBP_ALIGN_CST + sizeof(uint32_t) - 1) / sizeof(uint32_t); + const uint64_t mem_size = + image_size + max_alignment_in_words + + argb_scratch_size + max_alignment_in_words + + transform_data_size; + uint32_t* mem = enc->transform_mem_; + if (mem == NULL || mem_size > enc->transform_mem_size_) { + ClearTransformBuffer(enc); + mem = (uint32_t*)WebPSafeMalloc(mem_size, sizeof(*mem)); + if (mem == NULL) { + err = VP8_ENC_ERROR_OUT_OF_MEMORY; + goto Error; + } + enc->transform_mem_ = mem; + enc->transform_mem_size_ = (size_t)mem_size; + enc->argb_content_ = kEncoderNone; + } + enc->argb_ = mem; + mem = (uint32_t*)WEBP_ALIGN(mem + image_size); + enc->argb_scratch_ = mem; + mem = (uint32_t*)WEBP_ALIGN(mem + argb_scratch_size); + enc->transform_data_ = mem; + + enc->current_width_ = width; + Error: + return err; +} + +static WebPEncodingError MakeInputImageCopy(VP8LEncoder* const enc) { + WebPEncodingError err = VP8_ENC_OK; + const WebPPicture* const picture = enc->pic_; + const int width = picture->width; + const int height = picture->height; + + err = AllocateTransformBuffer(enc, width, height); + if (err != VP8_ENC_OK) return err; + if (enc->argb_content_ == kEncoderARGB) return VP8_ENC_OK; + + { + uint32_t* dst = enc->argb_; + const uint32_t* src = picture->argb; + int y; + for (y = 0; y < height; ++y) { + memcpy(dst, src, width * sizeof(*dst)); + dst += width; + src += picture->argb_stride; + } + } + enc->argb_content_ = kEncoderARGB; + assert(enc->current_width_ == width); + return VP8_ENC_OK; +} + +// ----------------------------------------------------------------------------- + +static WEBP_INLINE int SearchColorNoIdx(const uint32_t sorted[], uint32_t color, + int hi) { + int low = 0; + if (sorted[low] == color) return low; // loop invariant: sorted[low] != color + while (1) { + const int mid = (low + hi) >> 1; + if (sorted[mid] == color) { + return mid; + } else if (sorted[mid] < color) { + low = mid; + } else { + hi = mid; + } + } +} + +#define APPLY_PALETTE_GREEDY_MAX 4 + +static WEBP_INLINE uint32_t SearchColorGreedy(const uint32_t palette[], + int palette_size, + uint32_t color) { + (void)palette_size; + assert(palette_size < APPLY_PALETTE_GREEDY_MAX); + assert(3 == APPLY_PALETTE_GREEDY_MAX - 1); + if (color == palette[0]) return 0; + if (color == palette[1]) return 1; + if (color == palette[2]) return 2; + return 3; +} + +static WEBP_INLINE uint32_t ApplyPaletteHash0(uint32_t color) { + // Focus on the green color. + return (color >> 8) & 0xff; +} + +#define PALETTE_INV_SIZE_BITS 11 +#define PALETTE_INV_SIZE (1 << PALETTE_INV_SIZE_BITS) + +static WEBP_INLINE uint32_t ApplyPaletteHash1(uint32_t color) { + // Forget about alpha. + return ((uint32_t)((color & 0x00ffffffu) * 4222244071ull)) >> + (32 - PALETTE_INV_SIZE_BITS); +} + +static WEBP_INLINE uint32_t ApplyPaletteHash2(uint32_t color) { + // Forget about alpha. + return ((uint32_t)((color & 0x00ffffffu) * ((1ull << 31) - 1))) >> + (32 - PALETTE_INV_SIZE_BITS); +} + +// Sort palette in increasing order and prepare an inverse mapping array. +static void PrepareMapToPalette(const uint32_t palette[], int num_colors, + uint32_t sorted[], uint32_t idx_map[]) { + int i; + memcpy(sorted, palette, num_colors * sizeof(*sorted)); + qsort(sorted, num_colors, sizeof(*sorted), PaletteCompareColorsForQsort); + for (i = 0; i < num_colors; ++i) { + idx_map[SearchColorNoIdx(sorted, palette[i], num_colors)] = i; + } +} + +// Use 1 pixel cache for ARGB pixels. +#define APPLY_PALETTE_FOR(COLOR_INDEX) do { \ + uint32_t prev_pix = palette[0]; \ + uint32_t prev_idx = 0; \ + for (y = 0; y < height; ++y) { \ + for (x = 0; x < width; ++x) { \ + const uint32_t pix = src[x]; \ + if (pix != prev_pix) { \ + prev_idx = COLOR_INDEX; \ + prev_pix = pix; \ + } \ + tmp_row[x] = prev_idx; \ + } \ + VP8LBundleColorMap(tmp_row, width, xbits, dst); \ + src += src_stride; \ + dst += dst_stride; \ + } \ +} while (0) + +// Remap argb values in src[] to packed palettes entries in dst[] +// using 'row' as a temporary buffer of size 'width'. +// We assume that all src[] values have a corresponding entry in the palette. +// Note: src[] can be the same as dst[] +static WebPEncodingError ApplyPalette(const uint32_t* src, uint32_t src_stride, + uint32_t* dst, uint32_t dst_stride, + const uint32_t* palette, int palette_size, + int width, int height, int xbits) { + // TODO(skal): this tmp buffer is not needed if VP8LBundleColorMap() can be + // made to work in-place. + uint8_t* const tmp_row = (uint8_t*)WebPSafeMalloc(width, sizeof(*tmp_row)); + int x, y; + + if (tmp_row == NULL) return VP8_ENC_ERROR_OUT_OF_MEMORY; + + if (palette_size < APPLY_PALETTE_GREEDY_MAX) { + APPLY_PALETTE_FOR(SearchColorGreedy(palette, palette_size, pix)); + } else { + int i, j; + uint16_t buffer[PALETTE_INV_SIZE]; + uint32_t (*const hash_functions[])(uint32_t) = { + ApplyPaletteHash0, ApplyPaletteHash1, ApplyPaletteHash2 + }; + + // Try to find a perfect hash function able to go from a color to an index + // within 1 << PALETTE_INV_SIZE_BITS in order to build a hash map to go + // from color to index in palette. + for (i = 0; i < 3; ++i) { + int use_LUT = 1; + // Set each element in buffer to max uint16_t. + memset(buffer, 0xff, sizeof(buffer)); + for (j = 0; j < palette_size; ++j) { + const uint32_t ind = hash_functions[i](palette[j]); + if (buffer[ind] != 0xffffu) { + use_LUT = 0; + break; + } else { + buffer[ind] = j; + } + } + if (use_LUT) break; + } + + if (i == 0) { + APPLY_PALETTE_FOR(buffer[ApplyPaletteHash0(pix)]); + } else if (i == 1) { + APPLY_PALETTE_FOR(buffer[ApplyPaletteHash1(pix)]); + } else if (i == 2) { + APPLY_PALETTE_FOR(buffer[ApplyPaletteHash2(pix)]); + } else { + uint32_t idx_map[MAX_PALETTE_SIZE]; + uint32_t palette_sorted[MAX_PALETTE_SIZE]; + PrepareMapToPalette(palette, palette_size, palette_sorted, idx_map); + APPLY_PALETTE_FOR( + idx_map[SearchColorNoIdx(palette_sorted, pix, palette_size)]); + } + } + WebPSafeFree(tmp_row); + return VP8_ENC_OK; +} +#undef APPLY_PALETTE_FOR +#undef PALETTE_INV_SIZE_BITS +#undef PALETTE_INV_SIZE +#undef APPLY_PALETTE_GREEDY_MAX + +// Note: Expects "enc->palette_" to be set properly. +static WebPEncodingError MapImageFromPalette(VP8LEncoder* const enc, + int in_place) { + WebPEncodingError err = VP8_ENC_OK; + const WebPPicture* const pic = enc->pic_; + const int width = pic->width; + const int height = pic->height; + const uint32_t* const palette = enc->palette_; + const uint32_t* src = in_place ? enc->argb_ : pic->argb; + const int src_stride = in_place ? enc->current_width_ : pic->argb_stride; + const int palette_size = enc->palette_size_; + int xbits; + + // Replace each input pixel by corresponding palette index. + // This is done line by line. + if (palette_size <= 4) { + xbits = (palette_size <= 2) ? 3 : 2; + } else { + xbits = (palette_size <= 16) ? 1 : 0; + } + + err = AllocateTransformBuffer(enc, VP8LSubSampleSize(width, xbits), height); + if (err != VP8_ENC_OK) return err; + + err = ApplyPalette(src, src_stride, + enc->argb_, enc->current_width_, + palette, palette_size, width, height, xbits); + enc->argb_content_ = kEncoderPalette; + return err; +} + +// Save palette_[] to bitstream. +static WebPEncodingError EncodePalette(VP8LBitWriter* const bw, int low_effort, + VP8LEncoder* const enc) { + int i; + uint32_t tmp_palette[MAX_PALETTE_SIZE]; + const int palette_size = enc->palette_size_; + const uint32_t* const palette = enc->palette_; + VP8LPutBits(bw, TRANSFORM_PRESENT, 1); + VP8LPutBits(bw, COLOR_INDEXING_TRANSFORM, 2); + assert(palette_size >= 1 && palette_size <= MAX_PALETTE_SIZE); + VP8LPutBits(bw, palette_size - 1, 8); + for (i = palette_size - 1; i >= 1; --i) { + tmp_palette[i] = VP8LSubPixels(palette[i], palette[i - 1]); + } + tmp_palette[0] = palette[0]; + return EncodeImageNoHuffman(bw, tmp_palette, &enc->hash_chain_, + &enc->refs_[0], palette_size, 1, /*quality=*/20, + low_effort); +} + +// ----------------------------------------------------------------------------- +// VP8LEncoder + +static VP8LEncoder* VP8LEncoderNew(const WebPConfig* const config, + const WebPPicture* const picture) { + VP8LEncoder* const enc = (VP8LEncoder*)WebPSafeCalloc(1ULL, sizeof(*enc)); + if (enc == NULL) { + WebPEncodingSetError(picture, VP8_ENC_ERROR_OUT_OF_MEMORY); + return NULL; + } + enc->config_ = config; + enc->pic_ = picture; + enc->argb_content_ = kEncoderNone; + + VP8LEncDspInit(); + + return enc; +} + +static void VP8LEncoderDelete(VP8LEncoder* enc) { + if (enc != NULL) { + int i; + VP8LHashChainClear(&enc->hash_chain_); + for (i = 0; i < 4; ++i) VP8LBackwardRefsClear(&enc->refs_[i]); + ClearTransformBuffer(enc); + WebPSafeFree(enc); + } +} + +// ----------------------------------------------------------------------------- +// Main call + +typedef struct { + const WebPConfig* config_; + const WebPPicture* picture_; + VP8LBitWriter* bw_; + VP8LEncoder* enc_; + int use_cache_; + CrunchConfig crunch_configs_[CRUNCH_CONFIGS_MAX]; + int num_crunch_configs_; + int red_and_blue_always_zero_; + WebPEncodingError err_; + WebPAuxStats* stats_; +} StreamEncodeContext; + +static int EncodeStreamHook(void* input, void* data2) { + StreamEncodeContext* const params = (StreamEncodeContext*)input; + const WebPConfig* const config = params->config_; + const WebPPicture* const picture = params->picture_; + VP8LBitWriter* const bw = params->bw_; + VP8LEncoder* const enc = params->enc_; + const int use_cache = params->use_cache_; + const CrunchConfig* const crunch_configs = params->crunch_configs_; + const int num_crunch_configs = params->num_crunch_configs_; + const int red_and_blue_always_zero = params->red_and_blue_always_zero_; +#if !defined(WEBP_DISABLE_STATS) + WebPAuxStats* const stats = params->stats_; +#endif + WebPEncodingError err = VP8_ENC_OK; + const int quality = (int)config->quality; + const int low_effort = (config->method == 0); +#if (WEBP_NEAR_LOSSLESS == 1) + const int width = picture->width; +#endif + const int height = picture->height; + const size_t byte_position = VP8LBitWriterNumBytes(bw); +#if (WEBP_NEAR_LOSSLESS == 1) + int use_near_lossless = 0; +#endif + int hdr_size = 0; + int data_size = 0; + int use_delta_palette = 0; + int idx; + size_t best_size = ~(size_t)0; + VP8LBitWriter bw_init = *bw, bw_best; + (void)data2; + + if (!VP8LBitWriterInit(&bw_best, 0) || + (num_crunch_configs > 1 && !VP8LBitWriterClone(bw, &bw_best))) { + err = VP8_ENC_ERROR_OUT_OF_MEMORY; + goto Error; + } + + for (idx = 0; idx < num_crunch_configs; ++idx) { + const int entropy_idx = crunch_configs[idx].entropy_idx_; + enc->use_palette_ = + (entropy_idx == kPalette) || (entropy_idx == kPaletteAndSpatial); + enc->use_subtract_green_ = + (entropy_idx == kSubGreen) || (entropy_idx == kSpatialSubGreen); + enc->use_predict_ = (entropy_idx == kSpatial) || + (entropy_idx == kSpatialSubGreen) || + (entropy_idx == kPaletteAndSpatial); + if (low_effort) { + enc->use_cross_color_ = 0; + } else { + enc->use_cross_color_ = red_and_blue_always_zero ? 0 : enc->use_predict_; + } + // Reset any parameter in the encoder that is set in the previous iteration. + enc->cache_bits_ = 0; + VP8LBackwardRefsClear(&enc->refs_[0]); + VP8LBackwardRefsClear(&enc->refs_[1]); + +#if (WEBP_NEAR_LOSSLESS == 1) + // Apply near-lossless preprocessing. + use_near_lossless = (config->near_lossless < 100) && !enc->use_palette_ && + !enc->use_predict_; + if (use_near_lossless) { + err = AllocateTransformBuffer(enc, width, height); + if (err != VP8_ENC_OK) goto Error; + if ((enc->argb_content_ != kEncoderNearLossless) && + !VP8ApplyNearLossless(picture, config->near_lossless, enc->argb_)) { + err = VP8_ENC_ERROR_OUT_OF_MEMORY; + goto Error; + } + enc->argb_content_ = kEncoderNearLossless; + } else { + enc->argb_content_ = kEncoderNone; + } +#else + enc->argb_content_ = kEncoderNone; +#endif + + // Encode palette + if (enc->use_palette_) { + err = EncodePalette(bw, low_effort, enc); + if (err != VP8_ENC_OK) goto Error; + err = MapImageFromPalette(enc, use_delta_palette); + if (err != VP8_ENC_OK) goto Error; + // If using a color cache, do not have it bigger than the number of + // colors. + if (use_cache && enc->palette_size_ < (1 << MAX_COLOR_CACHE_BITS)) { + enc->cache_bits_ = BitsLog2Floor(enc->palette_size_) + 1; + } + } + if (!use_delta_palette) { + // In case image is not packed. + if (enc->argb_content_ != kEncoderNearLossless && + enc->argb_content_ != kEncoderPalette) { + err = MakeInputImageCopy(enc); + if (err != VP8_ENC_OK) goto Error; + } + + // ----------------------------------------------------------------------- + // Apply transforms and write transform data. + + if (enc->use_subtract_green_) { + ApplySubtractGreen(enc, enc->current_width_, height, bw); + } + + if (enc->use_predict_) { + err = ApplyPredictFilter(enc, enc->current_width_, height, quality, + low_effort, enc->use_subtract_green_, bw); + if (err != VP8_ENC_OK) goto Error; + } + + if (enc->use_cross_color_) { + err = ApplyCrossColorFilter(enc, enc->current_width_, height, quality, + low_effort, bw); + if (err != VP8_ENC_OK) goto Error; + } + } + + VP8LPutBits(bw, !TRANSFORM_PRESENT, 1); // No more transforms. + + // ------------------------------------------------------------------------- + // Encode and write the transformed image. + err = EncodeImageInternal(bw, enc->argb_, &enc->hash_chain_, enc->refs_, + enc->current_width_, height, quality, low_effort, + use_cache, &crunch_configs[idx], + &enc->cache_bits_, enc->histo_bits_, + byte_position, &hdr_size, &data_size); + if (err != VP8_ENC_OK) goto Error; + + // If we are better than what we already have. + if (VP8LBitWriterNumBytes(bw) < best_size) { + best_size = VP8LBitWriterNumBytes(bw); + // Store the BitWriter. + VP8LBitWriterSwap(bw, &bw_best); +#if !defined(WEBP_DISABLE_STATS) + // Update the stats. + if (stats != NULL) { + stats->lossless_features = 0; + if (enc->use_predict_) stats->lossless_features |= 1; + if (enc->use_cross_color_) stats->lossless_features |= 2; + if (enc->use_subtract_green_) stats->lossless_features |= 4; + if (enc->use_palette_) stats->lossless_features |= 8; + stats->histogram_bits = enc->histo_bits_; + stats->transform_bits = enc->transform_bits_; + stats->cache_bits = enc->cache_bits_; + stats->palette_size = enc->palette_size_; + stats->lossless_size = (int)(best_size - byte_position); + stats->lossless_hdr_size = hdr_size; + stats->lossless_data_size = data_size; + } +#endif + } + // Reset the bit writer for the following iteration if any. + if (num_crunch_configs > 1) VP8LBitWriterReset(&bw_init, bw); + } + VP8LBitWriterSwap(&bw_best, bw); + +Error: + VP8LBitWriterWipeOut(&bw_best); + params->err_ = err; + // The hook should return false in case of error. + return (err == VP8_ENC_OK); +} + +WebPEncodingError VP8LEncodeStream(const WebPConfig* const config, + const WebPPicture* const picture, + VP8LBitWriter* const bw_main, + int use_cache) { + WebPEncodingError err = VP8_ENC_OK; + VP8LEncoder* const enc_main = VP8LEncoderNew(config, picture); + VP8LEncoder* enc_side = NULL; + CrunchConfig crunch_configs[CRUNCH_CONFIGS_MAX]; + int num_crunch_configs_main, num_crunch_configs_side = 0; + int idx; + int red_and_blue_always_zero = 0; + WebPWorker worker_main, worker_side; + StreamEncodeContext params_main, params_side; + // The main thread uses picture->stats, the side thread uses stats_side. + WebPAuxStats stats_side; + VP8LBitWriter bw_side; + const WebPWorkerInterface* const worker_interface = WebPGetWorkerInterface(); + int ok_main; + + // Analyze image (entropy, num_palettes etc) + if (enc_main == NULL || + !EncoderAnalyze(enc_main, crunch_configs, &num_crunch_configs_main, + &red_and_blue_always_zero) || + !EncoderInit(enc_main) || !VP8LBitWriterInit(&bw_side, 0)) { + err = VP8_ENC_ERROR_OUT_OF_MEMORY; + goto Error; + } + + // Split the configs between the main and side threads (if any). + if (config->thread_level > 0) { + num_crunch_configs_side = num_crunch_configs_main / 2; + for (idx = 0; idx < num_crunch_configs_side; ++idx) { + params_side.crunch_configs_[idx] = + crunch_configs[num_crunch_configs_main - num_crunch_configs_side + + idx]; + } + params_side.num_crunch_configs_ = num_crunch_configs_side; + } + num_crunch_configs_main -= num_crunch_configs_side; + for (idx = 0; idx < num_crunch_configs_main; ++idx) { + params_main.crunch_configs_[idx] = crunch_configs[idx]; + } + params_main.num_crunch_configs_ = num_crunch_configs_main; + + // Fill in the parameters for the thread workers. + { + const int params_size = (num_crunch_configs_side > 0) ? 2 : 1; + for (idx = 0; idx < params_size; ++idx) { + // Create the parameters for each worker. + WebPWorker* const worker = (idx == 0) ? &worker_main : &worker_side; + StreamEncodeContext* const param = + (idx == 0) ? ¶ms_main : ¶ms_side; + param->config_ = config; + param->picture_ = picture; + param->use_cache_ = use_cache; + param->red_and_blue_always_zero_ = red_and_blue_always_zero; + if (idx == 0) { + param->stats_ = picture->stats; + param->bw_ = bw_main; + param->enc_ = enc_main; + } else { + param->stats_ = (picture->stats == NULL) ? NULL : &stats_side; + // Create a side bit writer. + if (!VP8LBitWriterClone(bw_main, &bw_side)) { + err = VP8_ENC_ERROR_OUT_OF_MEMORY; + goto Error; + } + param->bw_ = &bw_side; + // Create a side encoder. + enc_side = VP8LEncoderNew(config, picture); + if (enc_side == NULL || !EncoderInit(enc_side)) { + err = VP8_ENC_ERROR_OUT_OF_MEMORY; + goto Error; + } + // Copy the values that were computed for the main encoder. + enc_side->histo_bits_ = enc_main->histo_bits_; + enc_side->transform_bits_ = enc_main->transform_bits_; + enc_side->palette_size_ = enc_main->palette_size_; + memcpy(enc_side->palette_, enc_main->palette_, + sizeof(enc_main->palette_)); + param->enc_ = enc_side; + } + // Create the workers. + worker_interface->Init(worker); + worker->data1 = param; + worker->data2 = NULL; + worker->hook = EncodeStreamHook; + } + } + + // Start the second thread if needed. + if (num_crunch_configs_side != 0) { + if (!worker_interface->Reset(&worker_side)) { + err = VP8_ENC_ERROR_OUT_OF_MEMORY; + goto Error; + } +#if !defined(WEBP_DISABLE_STATS) + // This line is here and not in the param initialization above to remove a + // Clang static analyzer warning. + if (picture->stats != NULL) { + memcpy(&stats_side, picture->stats, sizeof(stats_side)); + } +#endif + // This line is only useful to remove a Clang static analyzer warning. + params_side.err_ = VP8_ENC_OK; + worker_interface->Launch(&worker_side); + } + // Execute the main thread. + worker_interface->Execute(&worker_main); + ok_main = worker_interface->Sync(&worker_main); + worker_interface->End(&worker_main); + if (num_crunch_configs_side != 0) { + // Wait for the second thread. + const int ok_side = worker_interface->Sync(&worker_side); + worker_interface->End(&worker_side); + if (!ok_main || !ok_side) { + err = ok_main ? params_side.err_ : params_main.err_; + goto Error; + } + if (VP8LBitWriterNumBytes(&bw_side) < VP8LBitWriterNumBytes(bw_main)) { + VP8LBitWriterSwap(bw_main, &bw_side); +#if !defined(WEBP_DISABLE_STATS) + if (picture->stats != NULL) { + memcpy(picture->stats, &stats_side, sizeof(*picture->stats)); + } +#endif + } + } else { + if (!ok_main) { + err = params_main.err_; + goto Error; + } + } + +Error: + VP8LBitWriterWipeOut(&bw_side); + VP8LEncoderDelete(enc_main); + VP8LEncoderDelete(enc_side); + return err; +} + +#undef CRUNCH_CONFIGS_MAX +#undef CRUNCH_SUBCONFIGS_MAX + +int VP8LEncodeImage(const WebPConfig* const config, + const WebPPicture* const picture) { + int width, height; + int has_alpha; + size_t coded_size; + int percent = 0; + int initial_size; + WebPEncodingError err = VP8_ENC_OK; + VP8LBitWriter bw; + + if (picture == NULL) return 0; + + if (config == NULL || picture->argb == NULL) { + err = VP8_ENC_ERROR_NULL_PARAMETER; + WebPEncodingSetError(picture, err); + return 0; + } + + width = picture->width; + height = picture->height; + // Initialize BitWriter with size corresponding to 16 bpp to photo images and + // 8 bpp for graphical images. + initial_size = (config->image_hint == WEBP_HINT_GRAPH) ? + width * height : width * height * 2; + if (!VP8LBitWriterInit(&bw, initial_size)) { + err = VP8_ENC_ERROR_OUT_OF_MEMORY; + goto Error; + } + + if (!WebPReportProgress(picture, 1, &percent)) { + UserAbort: + err = VP8_ENC_ERROR_USER_ABORT; + goto Error; + } + // Reset stats (for pure lossless coding) + if (picture->stats != NULL) { + WebPAuxStats* const stats = picture->stats; + memset(stats, 0, sizeof(*stats)); + stats->PSNR[0] = 99.f; + stats->PSNR[1] = 99.f; + stats->PSNR[2] = 99.f; + stats->PSNR[3] = 99.f; + stats->PSNR[4] = 99.f; + } + + // Write image size. + if (!WriteImageSize(picture, &bw)) { + err = VP8_ENC_ERROR_OUT_OF_MEMORY; + goto Error; + } + + has_alpha = WebPPictureHasTransparency(picture); + // Write the non-trivial Alpha flag and lossless version. + if (!WriteRealAlphaAndVersion(&bw, has_alpha)) { + err = VP8_ENC_ERROR_OUT_OF_MEMORY; + goto Error; + } + + if (!WebPReportProgress(picture, 5, &percent)) goto UserAbort; + + // Encode main image stream. + err = VP8LEncodeStream(config, picture, &bw, 1 /*use_cache*/); + if (err != VP8_ENC_OK) goto Error; + + if (!WebPReportProgress(picture, 90, &percent)) goto UserAbort; + + // Finish the RIFF chunk. + err = WriteImage(picture, &bw, &coded_size); + if (err != VP8_ENC_OK) goto Error; + + if (!WebPReportProgress(picture, 100, &percent)) goto UserAbort; + +#if !defined(WEBP_DISABLE_STATS) + // Save size. + if (picture->stats != NULL) { + picture->stats->coded_size += (int)coded_size; + picture->stats->lossless_size = (int)coded_size; + } +#endif + + if (picture->extra_info != NULL) { + const int mb_w = (width + 15) >> 4; + const int mb_h = (height + 15) >> 4; + memset(picture->extra_info, 0, mb_w * mb_h * sizeof(*picture->extra_info)); + } + + Error: + if (bw.error_) err = VP8_ENC_ERROR_OUT_OF_MEMORY; + VP8LBitWriterWipeOut(&bw); + if (err != VP8_ENC_OK) { + WebPEncodingSetError(picture, err); + return 0; + } + return 1; +} + +//------------------------------------------------------------------------------ diff --git a/vendor/github.com/sizeofint/webpanimation/enc_vp8li_enc.h b/vendor/github.com/sizeofint/webpanimation/enc_vp8li_enc.h new file mode 100644 index 00000000..28b2f9f4 --- /dev/null +++ b/vendor/github.com/sizeofint/webpanimation/enc_vp8li_enc.h @@ -0,0 +1,118 @@ +// Copyright 2012 Google Inc. All Rights Reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the COPYING file in the root of the source +// tree. An additional intellectual property rights grant can be found +// in the file PATENTS. All contributing project authors may +// be found in the AUTHORS file in the root of the source tree. +// ----------------------------------------------------------------------------- +// +// Lossless encoder: internal header. +// +// Author: Vikas Arora (vikaas.arora@gmail.com) + +#ifndef WEBP_ENC_VP8LI_ENC_H_ +#define WEBP_ENC_VP8LI_ENC_H_ + +#ifdef HAVE_CONFIG_H +#include "config.h" +#endif +// Either WEBP_NEAR_LOSSLESS is defined as 0 in config.h when compiling to +// disable near-lossless, or it is enabled by default. +#ifndef WEBP_NEAR_LOSSLESS +#define WEBP_NEAR_LOSSLESS 1 +#endif + +#include "enc_backward_references_enc.h" +#include "enc_histogram_enc.h" +#include "utils_bit_writer_utils.h" +#include "webp_encode.h" +#include "webp_format_constants.h" + +#ifdef __cplusplus +extern "C" { +#endif + +// maximum value of transform_bits_ in VP8LEncoder. +#define MAX_TRANSFORM_BITS 6 + +typedef enum { + kEncoderNone = 0, + kEncoderARGB, + kEncoderNearLossless, + kEncoderPalette +} VP8LEncoderARGBContent; + +typedef struct { + const WebPConfig* config_; // user configuration and parameters + const WebPPicture* pic_; // input picture. + + uint32_t* argb_; // Transformed argb image data. + VP8LEncoderARGBContent argb_content_; // Content type of the argb buffer. + uint32_t* argb_scratch_; // Scratch memory for argb rows + // (used for prediction). + uint32_t* transform_data_; // Scratch memory for transform data. + uint32_t* transform_mem_; // Currently allocated memory. + size_t transform_mem_size_; // Currently allocated memory size. + + int current_width_; // Corresponds to packed image width. + + // Encoding parameters derived from quality parameter. + int histo_bits_; + int transform_bits_; // <= MAX_TRANSFORM_BITS. + int cache_bits_; // If equal to 0, don't use color cache. + + // Encoding parameters derived from image characteristics. + int use_cross_color_; + int use_subtract_green_; + int use_predict_; + int use_palette_; + int palette_size_; + uint32_t palette_[MAX_PALETTE_SIZE]; + + // Some 'scratch' (potentially large) objects. + struct VP8LBackwardRefs refs_[4]; // Backward Refs array for temporaries. + VP8LHashChain hash_chain_; // HashChain data for constructing + // backward references. +} VP8LEncoder; + +//------------------------------------------------------------------------------ +// internal functions. Not public. + +// Encodes the picture. +// Returns 0 if config or picture is NULL or picture doesn't have valid argb +// input. +int VP8LEncodeImage(const WebPConfig* const config, + const WebPPicture* const picture); + +// Encodes the main image stream using the supplied bit writer. +// If 'use_cache' is false, disables the use of color cache. +WebPEncodingError VP8LEncodeStream(const WebPConfig* const config, + const WebPPicture* const picture, + VP8LBitWriter* const bw, int use_cache); + +#if (WEBP_NEAR_LOSSLESS == 1) +// in near_lossless.c +// Near lossless preprocessing in RGB color-space. +int VP8ApplyNearLossless(const WebPPicture* const picture, int quality, + uint32_t* const argb_dst); +#endif + +//------------------------------------------------------------------------------ +// Image transforms in predictor.c. + +void VP8LResidualImage(int width, int height, int bits, int low_effort, + uint32_t* const argb, uint32_t* const argb_scratch, + uint32_t* const image, int near_lossless, int exact, + int used_subtract_green); + +void VP8LColorSpaceTransform(int width, int height, int bits, int quality, + uint32_t* const argb, uint32_t* image); + +//------------------------------------------------------------------------------ + +#ifdef __cplusplus +} // extern "C" +#endif + +#endif // WEBP_ENC_VP8LI_ENC_H_ diff --git a/vendor/github.com/sizeofint/webpanimation/enc_webp_enc.c b/vendor/github.com/sizeofint/webpanimation/enc_webp_enc.c new file mode 100644 index 00000000..8151fc1f --- /dev/null +++ b/vendor/github.com/sizeofint/webpanimation/enc_webp_enc.c @@ -0,0 +1,410 @@ +// Copyright 2011 Google Inc. All Rights Reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the COPYING file in the root of the source +// tree. An additional intellectual property rights grant can be found +// in the file PATENTS. All contributing project authors may +// be found in the AUTHORS file in the root of the source tree. +// ----------------------------------------------------------------------------- +// +// WebP encoder: main entry point +// +// Author: Skal (pascal.massimino@gmail.com) + +#include +#include +#include +#include + +#include "enc_cost_enc.h" +#include "enc_vp8i_enc.h" +#include "enc_vp8li_enc.h" +#include "utils_utils.h" + +// #define PRINT_MEMORY_INFO + +#ifdef PRINT_MEMORY_INFO +#include +#endif + +//------------------------------------------------------------------------------ + +int WebPGetEncoderVersion(void) { + return (ENC_MAJ_VERSION << 16) | (ENC_MIN_VERSION << 8) | ENC_REV_VERSION; +} + +//------------------------------------------------------------------------------ +// VP8Encoder +//------------------------------------------------------------------------------ + +static void ResetSegmentHeader(VP8Encoder* const enc) { + VP8EncSegmentHeader* const hdr = &enc->segment_hdr_; + hdr->num_segments_ = enc->config_->segments; + hdr->update_map_ = (hdr->num_segments_ > 1); + hdr->size_ = 0; +} + +static void ResetFilterHeader(VP8Encoder* const enc) { + VP8EncFilterHeader* const hdr = &enc->filter_hdr_; + hdr->simple_ = 1; + hdr->level_ = 0; + hdr->sharpness_ = 0; + hdr->i4x4_lf_delta_ = 0; +} + +static void ResetBoundaryPredictions(VP8Encoder* const enc) { + // init boundary values once for all + // Note: actually, initializing the preds_[] is only needed for intra4. + int i; + uint8_t* const top = enc->preds_ - enc->preds_w_; + uint8_t* const left = enc->preds_ - 1; + for (i = -1; i < 4 * enc->mb_w_; ++i) { + top[i] = B_DC_PRED; + } + for (i = 0; i < 4 * enc->mb_h_; ++i) { + left[i * enc->preds_w_] = B_DC_PRED; + } + enc->nz_[-1] = 0; // constant +} + +// Mapping from config->method_ to coding tools used. +//-------------------+---+---+---+---+---+---+---+ +// Method | 0 | 1 | 2 | 3 |(4)| 5 | 6 | +//-------------------+---+---+---+---+---+---+---+ +// fast probe | x | | | x | | | | +//-------------------+---+---+---+---+---+---+---+ +// dynamic proba | ~ | x | x | x | x | x | x | +//-------------------+---+---+---+---+---+---+---+ +// fast mode analysis|[x]|[x]| | | x | x | x | +//-------------------+---+---+---+---+---+---+---+ +// basic rd-opt | | | | x | x | x | x | +//-------------------+---+---+---+---+---+---+---+ +// disto-refine i4/16| x | x | x | | | | | +//-------------------+---+---+---+---+---+---+---+ +// disto-refine uv | | x | x | | | | | +//-------------------+---+---+---+---+---+---+---+ +// rd-opt i4/16 | | | ~ | x | x | x | x | +//-------------------+---+---+---+---+---+---+---+ +// token buffer (opt)| | | | x | x | x | x | +//-------------------+---+---+---+---+---+---+---+ +// Trellis | | | | | | x |Ful| +//-------------------+---+---+---+---+---+---+---+ +// full-SNS | | | | | x | x | x | +//-------------------+---+---+---+---+---+---+---+ + +static void MapConfigToTools(VP8Encoder* const enc) { + const WebPConfig* const config = enc->config_; + const int method = config->method; + const int limit = 100 - config->partition_limit; + enc->method_ = method; + enc->rd_opt_level_ = (method >= 6) ? RD_OPT_TRELLIS_ALL + : (method >= 5) ? RD_OPT_TRELLIS + : (method >= 3) ? RD_OPT_BASIC + : RD_OPT_NONE; + enc->max_i4_header_bits_ = + 256 * 16 * 16 * // upper bound: up to 16bit per 4x4 block + (limit * limit) / (100 * 100); // ... modulated with a quadratic curve. + + // partition0 = 512k max. + enc->mb_header_limit_ = + (score_t)256 * 510 * 8 * 1024 / (enc->mb_w_ * enc->mb_h_); + + enc->thread_level_ = config->thread_level; + + enc->do_search_ = (config->target_size > 0 || config->target_PSNR > 0); + if (!config->low_memory) { +#if !defined(DISABLE_TOKEN_BUFFER) + enc->use_tokens_ = (enc->rd_opt_level_ >= RD_OPT_BASIC); // need rd stats +#endif + if (enc->use_tokens_) { + enc->num_parts_ = 1; // doesn't work with multi-partition + } + } +} + +// Memory scaling with dimensions: +// memory (bytes) ~= 2.25 * w + 0.0625 * w * h +// +// Typical memory footprint (614x440 picture) +// encoder: 22111 +// info: 4368 +// preds: 17741 +// top samples: 1263 +// non-zero: 175 +// lf-stats: 0 +// total: 45658 +// Transient object sizes: +// VP8EncIterator: 3360 +// VP8ModeScore: 872 +// VP8SegmentInfo: 732 +// VP8EncProba: 18352 +// LFStats: 2048 +// Picture size (yuv): 419328 + +static VP8Encoder* InitVP8Encoder(const WebPConfig* const config, + WebPPicture* const picture) { + VP8Encoder* enc; + const int use_filter = + (config->filter_strength > 0) || (config->autofilter > 0); + const int mb_w = (picture->width + 15) >> 4; + const int mb_h = (picture->height + 15) >> 4; + const int preds_w = 4 * mb_w + 1; + const int preds_h = 4 * mb_h + 1; + const size_t preds_size = preds_w * preds_h * sizeof(*enc->preds_); + const int top_stride = mb_w * 16; + const size_t nz_size = (mb_w + 1) * sizeof(*enc->nz_) + WEBP_ALIGN_CST; + const size_t info_size = mb_w * mb_h * sizeof(*enc->mb_info_); + const size_t samples_size = + 2 * top_stride * sizeof(*enc->y_top_) // top-luma/u/v + + WEBP_ALIGN_CST; // align all + const size_t lf_stats_size = + config->autofilter ? sizeof(*enc->lf_stats_) + WEBP_ALIGN_CST : 0; + const size_t top_derr_size = + (config->quality <= ERROR_DIFFUSION_QUALITY || config->pass > 1) ? + mb_w * sizeof(*enc->top_derr_) : 0; + uint8_t* mem; + const uint64_t size = (uint64_t)sizeof(*enc) // main struct + + WEBP_ALIGN_CST // cache alignment + + info_size // modes info + + preds_size // prediction modes + + samples_size // top/left samples + + top_derr_size // top diffusion error + + nz_size // coeff context bits + + lf_stats_size; // autofilter stats + +#ifdef PRINT_MEMORY_INFO + printf("===================================\n"); + printf("Memory used:\n" + " encoder: %ld\n" + " info: %ld\n" + " preds: %ld\n" + " top samples: %ld\n" + " top diffusion: %ld\n" + " non-zero: %ld\n" + " lf-stats: %ld\n" + " total: %ld\n", + sizeof(*enc) + WEBP_ALIGN_CST, info_size, + preds_size, samples_size, top_derr_size, nz_size, lf_stats_size, size); + printf("Transient object sizes:\n" + " VP8EncIterator: %ld\n" + " VP8ModeScore: %ld\n" + " VP8SegmentInfo: %ld\n" + " VP8EncProba: %ld\n" + " LFStats: %ld\n", + sizeof(VP8EncIterator), sizeof(VP8ModeScore), + sizeof(VP8SegmentInfo), sizeof(VP8EncProba), + sizeof(LFStats)); + printf("Picture size (yuv): %ld\n", + mb_w * mb_h * 384 * sizeof(uint8_t)); + printf("===================================\n"); +#endif + mem = (uint8_t*)WebPSafeMalloc(size, sizeof(*mem)); + if (mem == NULL) { + WebPEncodingSetError(picture, VP8_ENC_ERROR_OUT_OF_MEMORY); + return NULL; + } + enc = (VP8Encoder*)mem; + mem = (uint8_t*)WEBP_ALIGN(mem + sizeof(*enc)); + memset(enc, 0, sizeof(*enc)); + enc->num_parts_ = 1 << config->partitions; + enc->mb_w_ = mb_w; + enc->mb_h_ = mb_h; + enc->preds_w_ = preds_w; + enc->mb_info_ = (VP8MBInfo*)mem; + mem += info_size; + enc->preds_ = mem + 1 + enc->preds_w_; + mem += preds_size; + enc->nz_ = 1 + (uint32_t*)WEBP_ALIGN(mem); + mem += nz_size; + enc->lf_stats_ = lf_stats_size ? (LFStats*)WEBP_ALIGN(mem) : NULL; + mem += lf_stats_size; + + // top samples (all 16-aligned) + mem = (uint8_t*)WEBP_ALIGN(mem); + enc->y_top_ = mem; + enc->uv_top_ = enc->y_top_ + top_stride; + mem += 2 * top_stride; + enc->top_derr_ = top_derr_size ? (DError*)mem : NULL; + mem += top_derr_size; + assert(mem <= (uint8_t*)enc + size); + + enc->config_ = config; + enc->profile_ = use_filter ? ((config->filter_type == 1) ? 0 : 1) : 2; + enc->pic_ = picture; + enc->percent_ = 0; + + MapConfigToTools(enc); + VP8EncDspInit(); + VP8DefaultProbas(enc); + ResetSegmentHeader(enc); + ResetFilterHeader(enc); + ResetBoundaryPredictions(enc); + VP8EncDspCostInit(); + VP8EncInitAlpha(enc); + + // lower quality means smaller output -> we modulate a little the page + // size based on quality. This is just a crude 1rst-order prediction. + { + const float scale = 1.f + config->quality * 5.f / 100.f; // in [1,6] + VP8TBufferInit(&enc->tokens_, (int)(mb_w * mb_h * 4 * scale)); + } + return enc; +} + +static int DeleteVP8Encoder(VP8Encoder* enc) { + int ok = 1; + if (enc != NULL) { + ok = VP8EncDeleteAlpha(enc); + VP8TBufferClear(&enc->tokens_); + WebPSafeFree(enc); + } + return ok; +} + +//------------------------------------------------------------------------------ + +#if !defined(WEBP_DISABLE_STATS) +static double GetPSNR(uint64_t err, uint64_t size) { + return (err > 0 && size > 0) ? 10. * log10(255. * 255. * size / err) : 99.; +} + +static void FinalizePSNR(const VP8Encoder* const enc) { + WebPAuxStats* stats = enc->pic_->stats; + const uint64_t size = enc->sse_count_; + const uint64_t* const sse = enc->sse_; + stats->PSNR[0] = (float)GetPSNR(sse[0], size); + stats->PSNR[1] = (float)GetPSNR(sse[1], size / 4); + stats->PSNR[2] = (float)GetPSNR(sse[2], size / 4); + stats->PSNR[3] = (float)GetPSNR(sse[0] + sse[1] + sse[2], size * 3 / 2); + stats->PSNR[4] = (float)GetPSNR(sse[3], size); +} +#endif // !defined(WEBP_DISABLE_STATS) + +static void StoreStats(VP8Encoder* const enc) { +#if !defined(WEBP_DISABLE_STATS) + WebPAuxStats* const stats = enc->pic_->stats; + if (stats != NULL) { + int i, s; + for (i = 0; i < NUM_MB_SEGMENTS; ++i) { + stats->segment_level[i] = enc->dqm_[i].fstrength_; + stats->segment_quant[i] = enc->dqm_[i].quant_; + for (s = 0; s <= 2; ++s) { + stats->residual_bytes[s][i] = enc->residual_bytes_[s][i]; + } + } + FinalizePSNR(enc); + stats->coded_size = enc->coded_size_; + for (i = 0; i < 3; ++i) { + stats->block_count[i] = enc->block_count_[i]; + } + } +#else // defined(WEBP_DISABLE_STATS) + WebPReportProgress(enc->pic_, 100, &enc->percent_); // done! +#endif // !defined(WEBP_DISABLE_STATS) +} + +int WebPEncodingSetError(const WebPPicture* const pic, + WebPEncodingError error) { + assert((int)error < VP8_ENC_ERROR_LAST); + assert((int)error >= VP8_ENC_OK); + ((WebPPicture*)pic)->error_code = error; + return 0; +} + +int WebPReportProgress(const WebPPicture* const pic, + int percent, int* const percent_store) { + if (percent_store != NULL && percent != *percent_store) { + *percent_store = percent; + if (pic->progress_hook && !pic->progress_hook(percent, pic)) { + // user abort requested + WebPEncodingSetError(pic, VP8_ENC_ERROR_USER_ABORT); + return 0; + } + } + return 1; // ok +} +//------------------------------------------------------------------------------ + +int WebPEncode(const WebPConfig* config, WebPPicture* pic) { + int ok = 0; + if (pic == NULL) return 0; + + WebPEncodingSetError(pic, VP8_ENC_OK); // all ok so far + if (config == NULL) { // bad params + return WebPEncodingSetError(pic, VP8_ENC_ERROR_NULL_PARAMETER); + } + if (!WebPValidateConfig(config)) { + return WebPEncodingSetError(pic, VP8_ENC_ERROR_INVALID_CONFIGURATION); + } + if (pic->width <= 0 || pic->height <= 0) { + return WebPEncodingSetError(pic, VP8_ENC_ERROR_BAD_DIMENSION); + } + if (pic->width > WEBP_MAX_DIMENSION || pic->height > WEBP_MAX_DIMENSION) { + return WebPEncodingSetError(pic, VP8_ENC_ERROR_BAD_DIMENSION); + } + + if (pic->stats != NULL) memset(pic->stats, 0, sizeof(*pic->stats)); + + if (!config->lossless) { + VP8Encoder* enc = NULL; + + if (pic->use_argb || pic->y == NULL || pic->u == NULL || pic->v == NULL) { + // Make sure we have YUVA samples. + if (config->use_sharp_yuv || (config->preprocessing & 4)) { + if (!WebPPictureSharpARGBToYUVA(pic)) { + return 0; + } + } else { + float dithering = 0.f; + if (config->preprocessing & 2) { + const float x = config->quality / 100.f; + const float x2 = x * x; + // slowly decreasing from max dithering at low quality (q->0) + // to 0.5 dithering amplitude at high quality (q->100) + dithering = 1.0f + (0.5f - 1.0f) * x2 * x2; + } + if (!WebPPictureARGBToYUVADithered(pic, WEBP_YUV420, dithering)) { + return 0; + } + } + } + + if (!config->exact) { + WebPCleanupTransparentArea(pic); + } + + enc = InitVP8Encoder(config, pic); + if (enc == NULL) return 0; // pic->error is already set. + // Note: each of the tasks below account for 20% in the progress report. + ok = VP8EncAnalyze(enc); + + // Analysis is done, proceed to actual coding. + ok = ok && VP8EncStartAlpha(enc); // possibly done in parallel + if (!enc->use_tokens_) { + ok = ok && VP8EncLoop(enc); + } else { + ok = ok && VP8EncTokenLoop(enc); + } + ok = ok && VP8EncFinishAlpha(enc); + + ok = ok && VP8EncWrite(enc); + StoreStats(enc); + if (!ok) { + VP8EncFreeBitWriters(enc); + } + ok &= DeleteVP8Encoder(enc); // must always be called, even if !ok + } else { + // Make sure we have ARGB samples. + if (pic->argb == NULL && !WebPPictureYUVAToARGB(pic)) { + return 0; + } + + if (!config->exact) { + WebPReplaceTransparentPixels(pic, 0x000000); + } + + ok = VP8LEncodeImage(config, pic); // Sets pic->error in case of problem. + } + + return ok; +} diff --git a/vendor/github.com/sizeofint/webpanimation/generate_from_libwebp.py b/vendor/github.com/sizeofint/webpanimation/generate_from_libwebp.py new file mode 100644 index 00000000..4ed29e8d --- /dev/null +++ b/vendor/github.com/sizeofint/webpanimation/generate_from_libwebp.py @@ -0,0 +1,73 @@ +#!/usr/bin/python3 +# ./generate_from_libwebp.py /path/to/clean/libwebp/src/ +import glob +import os +import re +import sys + +FILE_KEYS = {} + +def fix_headers(libwebp_dir, code_text): + out = '' + for line in code_text: + # Special config.h + if 'src/webp/config.h' in line: + out += '#include "config.h"\n' + continue + line = line.replace('#include "src/', '#include "' + libwebp_dir + '/') + header_file = re.match('#include\s+["]([^"]+)["].*', line) + # regex to search for <, > too + #header_file = re.match('#include\s+[<"]([^>"]+)[>"].*', line) + if header_file: + header = header_file.groups()[0] + abs_header = os.path.abspath(header) + header_exists = os.path.exists(abs_header) + if header_exists and abs_header in FILE_KEYS: + out += '#include "' + FILE_KEYS[abs_header] + '"\n' + else: + out += line + '\n' + else: + out += line + '\n' + return out + +if len(sys.argv) != 2: + print('usage: ./generate_from_libwebp.py /path/to/clean/libwebp/src/') + os._exit(1) + +code = ['.c', '.s', '.S', '.sx', 'cc', 'cpp', 'cpp' ] +header = ['.h', '.hh', '.hpp', '.hxx' ] + +# Remove old files +files = os.listdir('.') +for file in files: + if file.endswith(tuple(code)) or file.endswith(tuple(header)): + os.remove(os.path.join('.', file)) + +path = sys.argv[1] + +for file in glob.iglob(path + '/**', recursive=True): + if file.endswith(tuple(code)) or file.endswith(tuple(header)): + key = os.path.abspath(file) + val = file.replace(path, '').replace('/', '_') + FILE_KEYS[key] = val + +root_dir = os.path.abspath('.') +libwebp_dir = os.path.abspath(path) +for full_path, local in FILE_KEYS.items(): + os.chdir(os.path.dirname(full_path)) + with open(full_path) as code: + code_text = code.read().splitlines() + code.close() + fixed = fix_headers(libwebp_dir, code_text) + os.chdir(root_dir) + local_file = open(local, "w") + local_file.write(fixed) + local_file.close() + +# Write config.h +config = '#ifndef WEBANIMATION_HPP\n#define WEBANIMATION_HPP\n' +config += '#define WEBP_USE_THREAD\n' +config += '#endif\n' +config_file = open('config.h', "w") +config_file.write(config) +config_file.close() diff --git a/vendor/github.com/sizeofint/webpanimation/go.mod b/vendor/github.com/sizeofint/webpanimation/go.mod new file mode 100644 index 00000000..00805651 --- /dev/null +++ b/vendor/github.com/sizeofint/webpanimation/go.mod @@ -0,0 +1,3 @@ +module github.com/sizeofint/webpanimation + +go 1.16 diff --git a/vendor/github.com/sizeofint/webpanimation/mux_anim_encode.c b/vendor/github.com/sizeofint/webpanimation/mux_anim_encode.c new file mode 100644 index 00000000..3d250d34 --- /dev/null +++ b/vendor/github.com/sizeofint/webpanimation/mux_anim_encode.c @@ -0,0 +1,1581 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the COPYING file in the root of the source +// tree. An additional intellectual property rights grant can be found +// in the file PATENTS. All contributing project authors may +// be found in the AUTHORS file in the root of the source tree. +// ----------------------------------------------------------------------------- +// +// AnimEncoder implementation. +// + +#include +#include +#include // for pow() +#include +#include // for abs() + +#include "mux_animi.h" +#include "utils_utils.h" +#include "webp_decode.h" +#include "webp_encode.h" +#include "webp_format_constants.h" +#include "webp_mux.h" + +#if defined(_MSC_VER) && _MSC_VER < 1900 +#define snprintf _snprintf +#endif + +#define ERROR_STR_MAX_LENGTH 100 + +//------------------------------------------------------------------------------ +// Internal structs. + +// Stores frame rectangle dimensions. +typedef struct { + int x_offset_, y_offset_, width_, height_; +} FrameRectangle; + +// Used to store two candidates of encoded data for an animation frame. One of +// the two will be chosen later. +typedef struct { + WebPMuxFrameInfo sub_frame_; // Encoded frame rectangle. + WebPMuxFrameInfo key_frame_; // Encoded frame if it is a key-frame. + int is_key_frame_; // True if 'key_frame' has been chosen. +} EncodedFrame; + +struct WebPAnimEncoder { + const int canvas_width_; // Canvas width. + const int canvas_height_; // Canvas height. + const WebPAnimEncoderOptions options_; // Global encoding options. + + FrameRectangle prev_rect_; // Previous WebP frame rectangle. + WebPConfig last_config_; // Cached in case a re-encode is needed. + WebPConfig last_config_reversed_; // If 'last_config_' uses lossless, then + // this config uses lossy and vice versa; + // only valid if 'options_.allow_mixed' + // is true. + + WebPPicture* curr_canvas_; // Only pointer; we don't own memory. + + // Canvas buffers. + WebPPicture curr_canvas_copy_; // Possibly modified current canvas. + int curr_canvas_copy_modified_; // True if pixels in 'curr_canvas_copy_' + // differ from those in 'curr_canvas_'. + + WebPPicture prev_canvas_; // Previous canvas. + WebPPicture prev_canvas_disposed_; // Previous canvas disposed to background. + + // Encoded data. + EncodedFrame* encoded_frames_; // Array of encoded frames. + size_t size_; // Number of allocated frames. + size_t start_; // Frame start index. + size_t count_; // Number of valid frames. + size_t flush_count_; // If >0, 'flush_count' frames starting from + // 'start' are ready to be added to mux. + + // key-frame related. + int64_t best_delta_; // min(canvas size - frame size) over the frames. + // Can be negative in certain cases due to + // transparent pixels in a frame. + int keyframe_; // Index of selected key-frame relative to 'start_'. + int count_since_key_frame_; // Frames seen since the last key-frame. + + int first_timestamp_; // Timestamp of the first frame. + int prev_timestamp_; // Timestamp of the last added frame. + int prev_candidate_undecided_; // True if it's not yet decided if previous + // frame would be a sub-frame or a key-frame. + + // Misc. + int is_first_frame_; // True if first frame is yet to be added/being added. + int got_null_frame_; // True if WebPAnimEncoderAdd() has already been called + // with a NULL frame. + + size_t in_frame_count_; // Number of input frames processed so far. + size_t out_frame_count_; // Number of frames added to mux so far. This may be + // different from 'in_frame_count_' due to merging. + + WebPMux* mux_; // Muxer to assemble the WebP bitstream. + char error_str_[ERROR_STR_MAX_LENGTH]; // Error string. Empty if no error. +}; + +// ----------------------------------------------------------------------------- +// Life of WebPAnimEncoder object. + +#define DELTA_INFINITY (1ULL << 32) +#define KEYFRAME_NONE (-1) + +// Reset the counters in the WebPAnimEncoder. +static void ResetCounters(WebPAnimEncoder* const enc) { + enc->start_ = 0; + enc->count_ = 0; + enc->flush_count_ = 0; + enc->best_delta_ = DELTA_INFINITY; + enc->keyframe_ = KEYFRAME_NONE; +} + +static void DisableKeyframes(WebPAnimEncoderOptions* const enc_options) { + enc_options->kmax = INT_MAX; + enc_options->kmin = enc_options->kmax - 1; +} + +#define MAX_CACHED_FRAMES 30 + +static void SanitizeEncoderOptions(WebPAnimEncoderOptions* const enc_options) { + int print_warning = enc_options->verbose; + + if (enc_options->minimize_size) { + DisableKeyframes(enc_options); + } + + if (enc_options->kmax == 1) { // All frames will be key-frames. + enc_options->kmin = 0; + enc_options->kmax = 0; + return; + } else if (enc_options->kmax <= 0) { + DisableKeyframes(enc_options); + print_warning = 0; + } + + if (enc_options->kmin >= enc_options->kmax) { + enc_options->kmin = enc_options->kmax - 1; + if (print_warning) { + fprintf(stderr, "WARNING: Setting kmin = %d, so that kmin < kmax.\n", + enc_options->kmin); + } + } else { + const int kmin_limit = enc_options->kmax / 2 + 1; + if (enc_options->kmin < kmin_limit && kmin_limit < enc_options->kmax) { + // This ensures that enc.keyframe + kmin >= kmax is always true. So, we + // can flush all the frames in the 'count_since_key_frame == kmax' case. + enc_options->kmin = kmin_limit; + if (print_warning) { + fprintf(stderr, + "WARNING: Setting kmin = %d, so that kmin >= kmax / 2 + 1.\n", + enc_options->kmin); + } + } + } + // Limit the max number of frames that are allocated. + if (enc_options->kmax - enc_options->kmin > MAX_CACHED_FRAMES) { + enc_options->kmin = enc_options->kmax - MAX_CACHED_FRAMES; + if (print_warning) { + fprintf(stderr, + "WARNING: Setting kmin = %d, so that kmax - kmin <= %d.\n", + enc_options->kmin, MAX_CACHED_FRAMES); + } + } + assert(enc_options->kmin < enc_options->kmax); +} + +#undef MAX_CACHED_FRAMES + +static void DefaultEncoderOptions(WebPAnimEncoderOptions* const enc_options) { + enc_options->anim_params.loop_count = 0; + enc_options->anim_params.bgcolor = 0xffffffff; // White. + enc_options->minimize_size = 0; + DisableKeyframes(enc_options); + enc_options->allow_mixed = 0; + enc_options->verbose = 0; +} + +int WebPAnimEncoderOptionsInitInternal(WebPAnimEncoderOptions* enc_options, + int abi_version) { + if (enc_options == NULL || + WEBP_ABI_IS_INCOMPATIBLE(abi_version, WEBP_MUX_ABI_VERSION)) { + return 0; + } + DefaultEncoderOptions(enc_options); + return 1; +} + +// This starting value is more fit to WebPCleanupTransparentAreaLossless(). +#define TRANSPARENT_COLOR 0x00000000 + +static void ClearRectangle(WebPPicture* const picture, + int left, int top, int width, int height) { + int j; + for (j = top; j < top + height; ++j) { + uint32_t* const dst = picture->argb + j * picture->argb_stride; + int i; + for (i = left; i < left + width; ++i) { + dst[i] = TRANSPARENT_COLOR; + } + } +} + +static void WebPUtilClearPic(WebPPicture* const picture, + const FrameRectangle* const rect) { + if (rect != NULL) { + ClearRectangle(picture, rect->x_offset_, rect->y_offset_, + rect->width_, rect->height_); + } else { + ClearRectangle(picture, 0, 0, picture->width, picture->height); + } +} + +static void MarkNoError(WebPAnimEncoder* const enc) { + enc->error_str_[0] = '\0'; // Empty string. +} + +static void MarkError(WebPAnimEncoder* const enc, const char* str) { + if (snprintf(enc->error_str_, ERROR_STR_MAX_LENGTH, "%s.", str) < 0) { + assert(0); // FIX ME! + } +} + +static void MarkError2(WebPAnimEncoder* const enc, + const char* str, int error_code) { + if (snprintf(enc->error_str_, ERROR_STR_MAX_LENGTH, "%s: %d.", str, + error_code) < 0) { + assert(0); // FIX ME! + } +} + +WebPAnimEncoder* WebPAnimEncoderNewInternal( + int width, int height, const WebPAnimEncoderOptions* enc_options, + int abi_version) { + WebPAnimEncoder* enc; + + if (WEBP_ABI_IS_INCOMPATIBLE(abi_version, WEBP_MUX_ABI_VERSION)) { + return NULL; + } + if (width <= 0 || height <= 0 || + (width * (uint64_t)height) >= MAX_IMAGE_AREA) { + return NULL; + } + + enc = (WebPAnimEncoder*)WebPSafeCalloc(1, sizeof(*enc)); + if (enc == NULL) return NULL; + // sanity inits, so we can call WebPAnimEncoderDelete(): + enc->encoded_frames_ = NULL; + enc->mux_ = NULL; + MarkNoError(enc); + + // Dimensions and options. + *(int*)&enc->canvas_width_ = width; + *(int*)&enc->canvas_height_ = height; + if (enc_options != NULL) { + *(WebPAnimEncoderOptions*)&enc->options_ = *enc_options; + SanitizeEncoderOptions((WebPAnimEncoderOptions*)&enc->options_); + } else { + DefaultEncoderOptions((WebPAnimEncoderOptions*)&enc->options_); + } + + // Canvas buffers. + if (!WebPPictureInit(&enc->curr_canvas_copy_) || + !WebPPictureInit(&enc->prev_canvas_) || + !WebPPictureInit(&enc->prev_canvas_disposed_)) { + goto Err; + } + enc->curr_canvas_copy_.width = width; + enc->curr_canvas_copy_.height = height; + enc->curr_canvas_copy_.use_argb = 1; + if (!WebPPictureAlloc(&enc->curr_canvas_copy_) || + !WebPPictureCopy(&enc->curr_canvas_copy_, &enc->prev_canvas_) || + !WebPPictureCopy(&enc->curr_canvas_copy_, &enc->prev_canvas_disposed_)) { + goto Err; + } + WebPUtilClearPic(&enc->prev_canvas_, NULL); + enc->curr_canvas_copy_modified_ = 1; + + // Encoded frames. + ResetCounters(enc); + // Note: one extra storage is for the previous frame. + enc->size_ = enc->options_.kmax - enc->options_.kmin + 1; + // We need space for at least 2 frames. But when kmin, kmax are both zero, + // enc->size_ will be 1. So we handle that special case below. + if (enc->size_ < 2) enc->size_ = 2; + enc->encoded_frames_ = + (EncodedFrame*)WebPSafeCalloc(enc->size_, sizeof(*enc->encoded_frames_)); + if (enc->encoded_frames_ == NULL) goto Err; + + enc->mux_ = WebPMuxNew(); + if (enc->mux_ == NULL) goto Err; + + enc->count_since_key_frame_ = 0; + enc->first_timestamp_ = 0; + enc->prev_timestamp_ = 0; + enc->prev_candidate_undecided_ = 0; + enc->is_first_frame_ = 1; + enc->got_null_frame_ = 0; + + return enc; // All OK. + + Err: + WebPAnimEncoderDelete(enc); + return NULL; +} + +// Release the data contained by 'encoded_frame'. +static void FrameRelease(EncodedFrame* const encoded_frame) { + if (encoded_frame != NULL) { + WebPDataClear(&encoded_frame->sub_frame_.bitstream); + WebPDataClear(&encoded_frame->key_frame_.bitstream); + memset(encoded_frame, 0, sizeof(*encoded_frame)); + } +} + +void WebPAnimEncoderDelete(WebPAnimEncoder* enc) { + if (enc != NULL) { + WebPPictureFree(&enc->curr_canvas_copy_); + WebPPictureFree(&enc->prev_canvas_); + WebPPictureFree(&enc->prev_canvas_disposed_); + if (enc->encoded_frames_ != NULL) { + size_t i; + for (i = 0; i < enc->size_; ++i) { + FrameRelease(&enc->encoded_frames_[i]); + } + WebPSafeFree(enc->encoded_frames_); + } + WebPMuxDelete(enc->mux_); + WebPSafeFree(enc); + } +} + +// ----------------------------------------------------------------------------- +// Frame addition. + +// Returns cached frame at the given 'position'. +static EncodedFrame* GetFrame(const WebPAnimEncoder* const enc, + size_t position) { + assert(enc->start_ + position < enc->size_); + return &enc->encoded_frames_[enc->start_ + position]; +} + +typedef int (*ComparePixelsFunc)(const uint32_t*, int, const uint32_t*, int, + int, int); + +// Returns true if 'length' number of pixels in 'src' and 'dst' are equal, +// assuming the given step sizes between pixels. +// 'max_allowed_diff' is unused and only there to allow function pointer use. +static WEBP_INLINE int ComparePixelsLossless(const uint32_t* src, int src_step, + const uint32_t* dst, int dst_step, + int length, int max_allowed_diff) { + (void)max_allowed_diff; + assert(length > 0); + while (length-- > 0) { + if (*src != *dst) { + return 0; + } + src += src_step; + dst += dst_step; + } + return 1; +} + +// Helper to check if each channel in 'src' and 'dst' is at most off by +// 'max_allowed_diff'. +static WEBP_INLINE int PixelsAreSimilar(uint32_t src, uint32_t dst, + int max_allowed_diff) { + const int src_a = (src >> 24) & 0xff; + const int src_r = (src >> 16) & 0xff; + const int src_g = (src >> 8) & 0xff; + const int src_b = (src >> 0) & 0xff; + const int dst_a = (dst >> 24) & 0xff; + const int dst_r = (dst >> 16) & 0xff; + const int dst_g = (dst >> 8) & 0xff; + const int dst_b = (dst >> 0) & 0xff; + + return (src_a == dst_a) && + (abs(src_r - dst_r) * dst_a <= (max_allowed_diff * 255)) && + (abs(src_g - dst_g) * dst_a <= (max_allowed_diff * 255)) && + (abs(src_b - dst_b) * dst_a <= (max_allowed_diff * 255)); +} + +// Returns true if 'length' number of pixels in 'src' and 'dst' are within an +// error bound, assuming the given step sizes between pixels. +static WEBP_INLINE int ComparePixelsLossy(const uint32_t* src, int src_step, + const uint32_t* dst, int dst_step, + int length, int max_allowed_diff) { + assert(length > 0); + while (length-- > 0) { + if (!PixelsAreSimilar(*src, *dst, max_allowed_diff)) { + return 0; + } + src += src_step; + dst += dst_step; + } + return 1; +} + +static int IsEmptyRect(const FrameRectangle* const rect) { + return (rect->width_ == 0) || (rect->height_ == 0); +} + +static int QualityToMaxDiff(float quality) { + const double val = pow(quality / 100., 0.5); + const double max_diff = 31 * (1 - val) + 1 * val; + return (int)(max_diff + 0.5); +} + +// Assumes that an initial valid guess of change rectangle 'rect' is passed. +static void MinimizeChangeRectangle(const WebPPicture* const src, + const WebPPicture* const dst, + FrameRectangle* const rect, + int is_lossless, float quality) { + int i, j; + const ComparePixelsFunc compare_pixels = + is_lossless ? ComparePixelsLossless : ComparePixelsLossy; + const int max_allowed_diff_lossy = QualityToMaxDiff(quality); + const int max_allowed_diff = is_lossless ? 0 : max_allowed_diff_lossy; + + // Sanity checks. + assert(src->width == dst->width && src->height == dst->height); + assert(rect->x_offset_ + rect->width_ <= dst->width); + assert(rect->y_offset_ + rect->height_ <= dst->height); + + // Left boundary. + for (i = rect->x_offset_; i < rect->x_offset_ + rect->width_; ++i) { + const uint32_t* const src_argb = + &src->argb[rect->y_offset_ * src->argb_stride + i]; + const uint32_t* const dst_argb = + &dst->argb[rect->y_offset_ * dst->argb_stride + i]; + if (compare_pixels(src_argb, src->argb_stride, dst_argb, dst->argb_stride, + rect->height_, max_allowed_diff)) { + --rect->width_; // Redundant column. + ++rect->x_offset_; + } else { + break; + } + } + if (rect->width_ == 0) goto NoChange; + + // Right boundary. + for (i = rect->x_offset_ + rect->width_ - 1; i >= rect->x_offset_; --i) { + const uint32_t* const src_argb = + &src->argb[rect->y_offset_ * src->argb_stride + i]; + const uint32_t* const dst_argb = + &dst->argb[rect->y_offset_ * dst->argb_stride + i]; + if (compare_pixels(src_argb, src->argb_stride, dst_argb, dst->argb_stride, + rect->height_, max_allowed_diff)) { + --rect->width_; // Redundant column. + } else { + break; + } + } + if (rect->width_ == 0) goto NoChange; + + // Top boundary. + for (j = rect->y_offset_; j < rect->y_offset_ + rect->height_; ++j) { + const uint32_t* const src_argb = + &src->argb[j * src->argb_stride + rect->x_offset_]; + const uint32_t* const dst_argb = + &dst->argb[j * dst->argb_stride + rect->x_offset_]; + if (compare_pixels(src_argb, 1, dst_argb, 1, rect->width_, + max_allowed_diff)) { + --rect->height_; // Redundant row. + ++rect->y_offset_; + } else { + break; + } + } + if (rect->height_ == 0) goto NoChange; + + // Bottom boundary. + for (j = rect->y_offset_ + rect->height_ - 1; j >= rect->y_offset_; --j) { + const uint32_t* const src_argb = + &src->argb[j * src->argb_stride + rect->x_offset_]; + const uint32_t* const dst_argb = + &dst->argb[j * dst->argb_stride + rect->x_offset_]; + if (compare_pixels(src_argb, 1, dst_argb, 1, rect->width_, + max_allowed_diff)) { + --rect->height_; // Redundant row. + } else { + break; + } + } + if (rect->height_ == 0) goto NoChange; + + if (IsEmptyRect(rect)) { + NoChange: + rect->x_offset_ = 0; + rect->y_offset_ = 0; + rect->width_ = 0; + rect->height_ = 0; + } +} + +// Snap rectangle to even offsets (and adjust dimensions if needed). +static WEBP_INLINE void SnapToEvenOffsets(FrameRectangle* const rect) { + rect->width_ += (rect->x_offset_ & 1); + rect->height_ += (rect->y_offset_ & 1); + rect->x_offset_ &= ~1; + rect->y_offset_ &= ~1; +} + +typedef struct { + int should_try_; // Should try this set of parameters. + int empty_rect_allowed_; // Frame with empty rectangle can be skipped. + FrameRectangle rect_ll_; // Frame rectangle for lossless compression. + WebPPicture sub_frame_ll_; // Sub-frame pic for lossless compression. + FrameRectangle rect_lossy_; // Frame rectangle for lossy compression. + // Could be smaller than rect_ll_ as pixels + // with small diffs can be ignored. + WebPPicture sub_frame_lossy_; // Sub-frame pic for lossless compression. +} SubFrameParams; + +static int SubFrameParamsInit(SubFrameParams* const params, + int should_try, int empty_rect_allowed) { + params->should_try_ = should_try; + params->empty_rect_allowed_ = empty_rect_allowed; + if (!WebPPictureInit(¶ms->sub_frame_ll_) || + !WebPPictureInit(¶ms->sub_frame_lossy_)) { + return 0; + } + return 1; +} + +static void SubFrameParamsFree(SubFrameParams* const params) { + WebPPictureFree(¶ms->sub_frame_ll_); + WebPPictureFree(¶ms->sub_frame_lossy_); +} + +// Given previous and current canvas, picks the optimal rectangle for the +// current frame based on 'is_lossless' and other parameters. Assumes that the +// initial guess 'rect' is valid. +static int GetSubRect(const WebPPicture* const prev_canvas, + const WebPPicture* const curr_canvas, int is_key_frame, + int is_first_frame, int empty_rect_allowed, + int is_lossless, float quality, + FrameRectangle* const rect, + WebPPicture* const sub_frame) { + if (!is_key_frame || is_first_frame) { // Optimize frame rectangle. + // Note: This behaves as expected for first frame, as 'prev_canvas' is + // initialized to a fully transparent canvas in the beginning. + MinimizeChangeRectangle(prev_canvas, curr_canvas, rect, + is_lossless, quality); + } + + if (IsEmptyRect(rect)) { + if (empty_rect_allowed) { // No need to get 'sub_frame'. + return 1; + } else { // Force a 1x1 rectangle. + rect->width_ = 1; + rect->height_ = 1; + assert(rect->x_offset_ == 0); + assert(rect->y_offset_ == 0); + } + } + + SnapToEvenOffsets(rect); + return WebPPictureView(curr_canvas, rect->x_offset_, rect->y_offset_, + rect->width_, rect->height_, sub_frame); +} + +// Picks optimal frame rectangle for both lossless and lossy compression. The +// initial guess for frame rectangles will be the full canvas. +static int GetSubRects(const WebPPicture* const prev_canvas, + const WebPPicture* const curr_canvas, int is_key_frame, + int is_first_frame, float quality, + SubFrameParams* const params) { + // Lossless frame rectangle. + params->rect_ll_.x_offset_ = 0; + params->rect_ll_.y_offset_ = 0; + params->rect_ll_.width_ = curr_canvas->width; + params->rect_ll_.height_ = curr_canvas->height; + if (!GetSubRect(prev_canvas, curr_canvas, is_key_frame, is_first_frame, + params->empty_rect_allowed_, 1, quality, + ¶ms->rect_ll_, ¶ms->sub_frame_ll_)) { + return 0; + } + // Lossy frame rectangle. + params->rect_lossy_ = params->rect_ll_; // seed with lossless rect. + return GetSubRect(prev_canvas, curr_canvas, is_key_frame, is_first_frame, + params->empty_rect_allowed_, 0, quality, + ¶ms->rect_lossy_, ¶ms->sub_frame_lossy_); +} + +static WEBP_INLINE int clip(int v, int min_v, int max_v) { + return (v < min_v) ? min_v : (v > max_v) ? max_v : v; +} + +int WebPAnimEncoderRefineRect( + const WebPPicture* const prev_canvas, const WebPPicture* const curr_canvas, + int is_lossless, float quality, int* const x_offset, int* const y_offset, + int* const width, int* const height) { + FrameRectangle rect; + const int right = clip(*x_offset + *width, 0, curr_canvas->width); + const int left = clip(*x_offset, 0, curr_canvas->width - 1); + const int bottom = clip(*y_offset + *height, 0, curr_canvas->height); + const int top = clip(*y_offset, 0, curr_canvas->height - 1); + if (prev_canvas == NULL || curr_canvas == NULL || + prev_canvas->width != curr_canvas->width || + prev_canvas->height != curr_canvas->height || + !prev_canvas->use_argb || !curr_canvas->use_argb) { + return 0; + } + rect.x_offset_ = left; + rect.y_offset_ = top; + rect.width_ = clip(right - left, 0, curr_canvas->width - rect.x_offset_); + rect.height_ = clip(bottom - top, 0, curr_canvas->height - rect.y_offset_); + MinimizeChangeRectangle(prev_canvas, curr_canvas, &rect, is_lossless, + quality); + SnapToEvenOffsets(&rect); + *x_offset = rect.x_offset_; + *y_offset = rect.y_offset_; + *width = rect.width_; + *height = rect.height_; + return 1; +} + +static void DisposeFrameRectangle(int dispose_method, + const FrameRectangle* const rect, + WebPPicture* const curr_canvas) { + assert(rect != NULL); + if (dispose_method == WEBP_MUX_DISPOSE_BACKGROUND) { + WebPUtilClearPic(curr_canvas, rect); + } +} + +static uint32_t RectArea(const FrameRectangle* const rect) { + return (uint32_t)rect->width_ * rect->height_; +} + +static int IsLosslessBlendingPossible(const WebPPicture* const src, + const WebPPicture* const dst, + const FrameRectangle* const rect) { + int i, j; + assert(src->width == dst->width && src->height == dst->height); + assert(rect->x_offset_ + rect->width_ <= dst->width); + assert(rect->y_offset_ + rect->height_ <= dst->height); + for (j = rect->y_offset_; j < rect->y_offset_ + rect->height_; ++j) { + for (i = rect->x_offset_; i < rect->x_offset_ + rect->width_; ++i) { + const uint32_t src_pixel = src->argb[j * src->argb_stride + i]; + const uint32_t dst_pixel = dst->argb[j * dst->argb_stride + i]; + const uint32_t dst_alpha = dst_pixel >> 24; + if (dst_alpha != 0xff && src_pixel != dst_pixel) { + // In this case, if we use blending, we can't attain the desired + // 'dst_pixel' value for this pixel. So, blending is not possible. + return 0; + } + } + } + return 1; +} + +static int IsLossyBlendingPossible(const WebPPicture* const src, + const WebPPicture* const dst, + const FrameRectangle* const rect, + float quality) { + const int max_allowed_diff_lossy = QualityToMaxDiff(quality); + int i, j; + assert(src->width == dst->width && src->height == dst->height); + assert(rect->x_offset_ + rect->width_ <= dst->width); + assert(rect->y_offset_ + rect->height_ <= dst->height); + for (j = rect->y_offset_; j < rect->y_offset_ + rect->height_; ++j) { + for (i = rect->x_offset_; i < rect->x_offset_ + rect->width_; ++i) { + const uint32_t src_pixel = src->argb[j * src->argb_stride + i]; + const uint32_t dst_pixel = dst->argb[j * dst->argb_stride + i]; + const uint32_t dst_alpha = dst_pixel >> 24; + if (dst_alpha != 0xff && + !PixelsAreSimilar(src_pixel, dst_pixel, max_allowed_diff_lossy)) { + // In this case, if we use blending, we can't attain the desired + // 'dst_pixel' value for this pixel. So, blending is not possible. + return 0; + } + } + } + return 1; +} + +// For pixels in 'rect', replace those pixels in 'dst' that are same as 'src' by +// transparent pixels. +// Returns true if at least one pixel gets modified. +static int IncreaseTransparency(const WebPPicture* const src, + const FrameRectangle* const rect, + WebPPicture* const dst) { + int i, j; + int modified = 0; + assert(src != NULL && dst != NULL && rect != NULL); + assert(src->width == dst->width && src->height == dst->height); + for (j = rect->y_offset_; j < rect->y_offset_ + rect->height_; ++j) { + const uint32_t* const psrc = src->argb + j * src->argb_stride; + uint32_t* const pdst = dst->argb + j * dst->argb_stride; + for (i = rect->x_offset_; i < rect->x_offset_ + rect->width_; ++i) { + if (psrc[i] == pdst[i] && pdst[i] != TRANSPARENT_COLOR) { + pdst[i] = TRANSPARENT_COLOR; + modified = 1; + } + } + } + return modified; +} + +#undef TRANSPARENT_COLOR + +// Replace similar blocks of pixels by a 'see-through' transparent block +// with uniform average color. +// Assumes lossy compression is being used. +// Returns true if at least one pixel gets modified. +static int FlattenSimilarBlocks(const WebPPicture* const src, + const FrameRectangle* const rect, + WebPPicture* const dst, float quality) { + const int max_allowed_diff_lossy = QualityToMaxDiff(quality); + int i, j; + int modified = 0; + const int block_size = 8; + const int y_start = (rect->y_offset_ + block_size) & ~(block_size - 1); + const int y_end = (rect->y_offset_ + rect->height_) & ~(block_size - 1); + const int x_start = (rect->x_offset_ + block_size) & ~(block_size - 1); + const int x_end = (rect->x_offset_ + rect->width_) & ~(block_size - 1); + assert(src != NULL && dst != NULL && rect != NULL); + assert(src->width == dst->width && src->height == dst->height); + assert((block_size & (block_size - 1)) == 0); // must be a power of 2 + // Iterate over each block and count similar pixels. + for (j = y_start; j < y_end; j += block_size) { + for (i = x_start; i < x_end; i += block_size) { + int cnt = 0; + int avg_r = 0, avg_g = 0, avg_b = 0; + int x, y; + const uint32_t* const psrc = src->argb + j * src->argb_stride + i; + uint32_t* const pdst = dst->argb + j * dst->argb_stride + i; + for (y = 0; y < block_size; ++y) { + for (x = 0; x < block_size; ++x) { + const uint32_t src_pixel = psrc[x + y * src->argb_stride]; + const int alpha = src_pixel >> 24; + if (alpha == 0xff && + PixelsAreSimilar(src_pixel, pdst[x + y * dst->argb_stride], + max_allowed_diff_lossy)) { + ++cnt; + avg_r += (src_pixel >> 16) & 0xff; + avg_g += (src_pixel >> 8) & 0xff; + avg_b += (src_pixel >> 0) & 0xff; + } + } + } + // If we have a fully similar block, we replace it with an + // average transparent block. This compresses better in lossy mode. + if (cnt == block_size * block_size) { + const uint32_t color = (0x00 << 24) | + ((avg_r / cnt) << 16) | + ((avg_g / cnt) << 8) | + ((avg_b / cnt) << 0); + for (y = 0; y < block_size; ++y) { + for (x = 0; x < block_size; ++x) { + pdst[x + y * dst->argb_stride] = color; + } + } + modified = 1; + } + } + } + return modified; +} + +static int EncodeFrame(const WebPConfig* const config, WebPPicture* const pic, + WebPMemoryWriter* const memory) { + pic->use_argb = 1; + pic->writer = WebPMemoryWrite; + pic->custom_ptr = memory; + if (!WebPEncode(config, pic)) { + return 0; + } + return 1; +} + +// Struct representing a candidate encoded frame including its metadata. +typedef struct { + WebPMemoryWriter mem_; + WebPMuxFrameInfo info_; + FrameRectangle rect_; + int evaluate_; // True if this candidate should be evaluated. +} Candidate; + +// Generates a candidate encoded frame given a picture and metadata. +static WebPEncodingError EncodeCandidate(WebPPicture* const sub_frame, + const FrameRectangle* const rect, + const WebPConfig* const encoder_config, + int use_blending, + Candidate* const candidate) { + WebPConfig config = *encoder_config; + WebPEncodingError error_code = VP8_ENC_OK; + assert(candidate != NULL); + memset(candidate, 0, sizeof(*candidate)); + + // Set frame rect and info. + candidate->rect_ = *rect; + candidate->info_.id = WEBP_CHUNK_ANMF; + candidate->info_.x_offset = rect->x_offset_; + candidate->info_.y_offset = rect->y_offset_; + candidate->info_.dispose_method = WEBP_MUX_DISPOSE_NONE; // Set later. + candidate->info_.blend_method = + use_blending ? WEBP_MUX_BLEND : WEBP_MUX_NO_BLEND; + candidate->info_.duration = 0; // Set in next call to WebPAnimEncoderAdd(). + + // Encode picture. + WebPMemoryWriterInit(&candidate->mem_); + + if (!config.lossless && use_blending) { + // Disable filtering to avoid blockiness in reconstructed frames at the + // time of decoding. + config.autofilter = 0; + config.filter_strength = 0; + } + if (!EncodeFrame(&config, sub_frame, &candidate->mem_)) { + error_code = sub_frame->error_code; + goto Err; + } + + candidate->evaluate_ = 1; + return error_code; + + Err: + WebPMemoryWriterClear(&candidate->mem_); + return error_code; +} + +static void CopyCurrentCanvas(WebPAnimEncoder* const enc) { + if (enc->curr_canvas_copy_modified_) { + WebPCopyPixels(enc->curr_canvas_, &enc->curr_canvas_copy_); + enc->curr_canvas_copy_.progress_hook = enc->curr_canvas_->progress_hook; + enc->curr_canvas_copy_.user_data = enc->curr_canvas_->user_data; + enc->curr_canvas_copy_modified_ = 0; + } +} + +enum { + LL_DISP_NONE = 0, + LL_DISP_BG, + LOSSY_DISP_NONE, + LOSSY_DISP_BG, + CANDIDATE_COUNT +}; + +#define MIN_COLORS_LOSSY 31 // Don't try lossy below this threshold. +#define MAX_COLORS_LOSSLESS 194 // Don't try lossless above this threshold. + +// Generates candidates for a given dispose method given pre-filled sub-frame +// 'params'. +static WebPEncodingError GenerateCandidates( + WebPAnimEncoder* const enc, Candidate candidates[CANDIDATE_COUNT], + WebPMuxAnimDispose dispose_method, int is_lossless, int is_key_frame, + SubFrameParams* const params, + const WebPConfig* const config_ll, const WebPConfig* const config_lossy) { + WebPEncodingError error_code = VP8_ENC_OK; + const int is_dispose_none = (dispose_method == WEBP_MUX_DISPOSE_NONE); + Candidate* const candidate_ll = + is_dispose_none ? &candidates[LL_DISP_NONE] : &candidates[LL_DISP_BG]; + Candidate* const candidate_lossy = is_dispose_none + ? &candidates[LOSSY_DISP_NONE] + : &candidates[LOSSY_DISP_BG]; + WebPPicture* const curr_canvas = &enc->curr_canvas_copy_; + const WebPPicture* const prev_canvas = + is_dispose_none ? &enc->prev_canvas_ : &enc->prev_canvas_disposed_; + int use_blending_ll, use_blending_lossy; + int evaluate_ll, evaluate_lossy; + + CopyCurrentCanvas(enc); + use_blending_ll = + !is_key_frame && + IsLosslessBlendingPossible(prev_canvas, curr_canvas, ¶ms->rect_ll_); + use_blending_lossy = + !is_key_frame && + IsLossyBlendingPossible(prev_canvas, curr_canvas, ¶ms->rect_lossy_, + config_lossy->quality); + + // Pick candidates to be tried. + if (!enc->options_.allow_mixed) { + evaluate_ll = is_lossless; + evaluate_lossy = !is_lossless; + } else if (enc->options_.minimize_size) { + evaluate_ll = 1; + evaluate_lossy = 1; + } else { // Use a heuristic for trying lossless and/or lossy compression. + const int num_colors = WebPGetColorPalette(¶ms->sub_frame_ll_, NULL); + evaluate_ll = (num_colors < MAX_COLORS_LOSSLESS); + evaluate_lossy = (num_colors >= MIN_COLORS_LOSSY); + } + + // Generate candidates. + if (evaluate_ll) { + CopyCurrentCanvas(enc); + if (use_blending_ll) { + enc->curr_canvas_copy_modified_ = + IncreaseTransparency(prev_canvas, ¶ms->rect_ll_, curr_canvas); + } + error_code = EncodeCandidate(¶ms->sub_frame_ll_, ¶ms->rect_ll_, + config_ll, use_blending_ll, candidate_ll); + if (error_code != VP8_ENC_OK) return error_code; + } + if (evaluate_lossy) { + CopyCurrentCanvas(enc); + if (use_blending_lossy) { + enc->curr_canvas_copy_modified_ = + FlattenSimilarBlocks(prev_canvas, ¶ms->rect_lossy_, curr_canvas, + config_lossy->quality); + } + error_code = + EncodeCandidate(¶ms->sub_frame_lossy_, ¶ms->rect_lossy_, + config_lossy, use_blending_lossy, candidate_lossy); + if (error_code != VP8_ENC_OK) return error_code; + enc->curr_canvas_copy_modified_ = 1; + } + return error_code; +} + +#undef MIN_COLORS_LOSSY +#undef MAX_COLORS_LOSSLESS + +static void GetEncodedData(const WebPMemoryWriter* const memory, + WebPData* const encoded_data) { + encoded_data->bytes = memory->mem; + encoded_data->size = memory->size; +} + +// Sets dispose method of the previous frame to be 'dispose_method'. +static void SetPreviousDisposeMethod(WebPAnimEncoder* const enc, + WebPMuxAnimDispose dispose_method) { + const size_t position = enc->count_ - 2; + EncodedFrame* const prev_enc_frame = GetFrame(enc, position); + assert(enc->count_ >= 2); // As current and previous frames are in enc. + + if (enc->prev_candidate_undecided_) { + assert(dispose_method == WEBP_MUX_DISPOSE_NONE); + prev_enc_frame->sub_frame_.dispose_method = dispose_method; + prev_enc_frame->key_frame_.dispose_method = dispose_method; + } else { + WebPMuxFrameInfo* const prev_info = prev_enc_frame->is_key_frame_ + ? &prev_enc_frame->key_frame_ + : &prev_enc_frame->sub_frame_; + prev_info->dispose_method = dispose_method; + } +} + +static int IncreasePreviousDuration(WebPAnimEncoder* const enc, int duration) { + const size_t position = enc->count_ - 1; + EncodedFrame* const prev_enc_frame = GetFrame(enc, position); + int new_duration; + + assert(enc->count_ >= 1); + assert(prev_enc_frame->sub_frame_.duration == + prev_enc_frame->key_frame_.duration); + assert(prev_enc_frame->sub_frame_.duration == + (prev_enc_frame->sub_frame_.duration & (MAX_DURATION - 1))); + assert(duration == (duration & (MAX_DURATION - 1))); + + new_duration = prev_enc_frame->sub_frame_.duration + duration; + if (new_duration >= MAX_DURATION) { // Special case. + // Separate out previous frame from earlier merged frames to avoid overflow. + // We add a 1x1 transparent frame for the previous frame, with blending on. + const FrameRectangle rect = { 0, 0, 1, 1 }; + const uint8_t lossless_1x1_bytes[] = { + 0x52, 0x49, 0x46, 0x46, 0x14, 0x00, 0x00, 0x00, 0x57, 0x45, 0x42, 0x50, + 0x56, 0x50, 0x38, 0x4c, 0x08, 0x00, 0x00, 0x00, 0x2f, 0x00, 0x00, 0x00, + 0x10, 0x88, 0x88, 0x08 + }; + const WebPData lossless_1x1 = { + lossless_1x1_bytes, sizeof(lossless_1x1_bytes) + }; + const uint8_t lossy_1x1_bytes[] = { + 0x52, 0x49, 0x46, 0x46, 0x40, 0x00, 0x00, 0x00, 0x57, 0x45, 0x42, 0x50, + 0x56, 0x50, 0x38, 0x58, 0x0a, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x41, 0x4c, 0x50, 0x48, 0x02, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x56, 0x50, 0x38, 0x20, 0x18, 0x00, 0x00, 0x00, + 0x30, 0x01, 0x00, 0x9d, 0x01, 0x2a, 0x01, 0x00, 0x01, 0x00, 0x02, 0x00, + 0x34, 0x25, 0xa4, 0x00, 0x03, 0x70, 0x00, 0xfe, 0xfb, 0xfd, 0x50, 0x00 + }; + const WebPData lossy_1x1 = { lossy_1x1_bytes, sizeof(lossy_1x1_bytes) }; + const int can_use_lossless = + (enc->last_config_.lossless || enc->options_.allow_mixed); + EncodedFrame* const curr_enc_frame = GetFrame(enc, enc->count_); + curr_enc_frame->is_key_frame_ = 0; + curr_enc_frame->sub_frame_.id = WEBP_CHUNK_ANMF; + curr_enc_frame->sub_frame_.x_offset = 0; + curr_enc_frame->sub_frame_.y_offset = 0; + curr_enc_frame->sub_frame_.dispose_method = WEBP_MUX_DISPOSE_NONE; + curr_enc_frame->sub_frame_.blend_method = WEBP_MUX_BLEND; + curr_enc_frame->sub_frame_.duration = duration; + if (!WebPDataCopy(can_use_lossless ? &lossless_1x1 : &lossy_1x1, + &curr_enc_frame->sub_frame_.bitstream)) { + return 0; + } + ++enc->count_; + ++enc->count_since_key_frame_; + enc->flush_count_ = enc->count_ - 1; + enc->prev_candidate_undecided_ = 0; + enc->prev_rect_ = rect; + } else { // Regular case. + // Increase duration of the previous frame by 'duration'. + prev_enc_frame->sub_frame_.duration = new_duration; + prev_enc_frame->key_frame_.duration = new_duration; + } + return 1; +} + +// Pick the candidate encoded frame with smallest size and release other +// candidates. +// TODO(later): Perhaps a rough SSIM/PSNR produced by the encoder should +// also be a criteria, in addition to sizes. +static void PickBestCandidate(WebPAnimEncoder* const enc, + Candidate* const candidates, int is_key_frame, + EncodedFrame* const encoded_frame) { + int i; + int best_idx = -1; + size_t best_size = ~0; + for (i = 0; i < CANDIDATE_COUNT; ++i) { + if (candidates[i].evaluate_) { + const size_t candidate_size = candidates[i].mem_.size; + if (candidate_size < best_size) { + best_idx = i; + best_size = candidate_size; + } + } + } + assert(best_idx != -1); + for (i = 0; i < CANDIDATE_COUNT; ++i) { + if (candidates[i].evaluate_) { + if (i == best_idx) { + WebPMuxFrameInfo* const dst = is_key_frame + ? &encoded_frame->key_frame_ + : &encoded_frame->sub_frame_; + *dst = candidates[i].info_; + GetEncodedData(&candidates[i].mem_, &dst->bitstream); + if (!is_key_frame) { + // Note: Previous dispose method only matters for non-keyframes. + // Also, we don't want to modify previous dispose method that was + // selected when a non key-frame was assumed. + const WebPMuxAnimDispose prev_dispose_method = + (best_idx == LL_DISP_NONE || best_idx == LOSSY_DISP_NONE) + ? WEBP_MUX_DISPOSE_NONE + : WEBP_MUX_DISPOSE_BACKGROUND; + SetPreviousDisposeMethod(enc, prev_dispose_method); + } + enc->prev_rect_ = candidates[i].rect_; // save for next frame. + } else { + WebPMemoryWriterClear(&candidates[i].mem_); + candidates[i].evaluate_ = 0; + } + } + } +} + +// Depending on the configuration, tries different compressions +// (lossy/lossless), dispose methods, blending methods etc to encode the current +// frame and outputs the best one in 'encoded_frame'. +// 'frame_skipped' will be set to true if this frame should actually be skipped. +static WebPEncodingError SetFrame(WebPAnimEncoder* const enc, + const WebPConfig* const config, + int is_key_frame, + EncodedFrame* const encoded_frame, + int* const frame_skipped) { + int i; + WebPEncodingError error_code = VP8_ENC_OK; + const WebPPicture* const curr_canvas = &enc->curr_canvas_copy_; + const WebPPicture* const prev_canvas = &enc->prev_canvas_; + Candidate candidates[CANDIDATE_COUNT]; + const int is_lossless = config->lossless; + const int consider_lossless = is_lossless || enc->options_.allow_mixed; + const int consider_lossy = !is_lossless || enc->options_.allow_mixed; + const int is_first_frame = enc->is_first_frame_; + + // First frame cannot be skipped as there is no 'previous frame' to merge it + // to. So, empty rectangle is not allowed for the first frame. + const int empty_rect_allowed_none = !is_first_frame; + + // Even if there is exact pixel match between 'disposed previous canvas' and + // 'current canvas', we can't skip current frame, as there may not be exact + // pixel match between 'previous canvas' and 'current canvas'. So, we don't + // allow empty rectangle in this case. + const int empty_rect_allowed_bg = 0; + + // If current frame is a key-frame, dispose method of previous frame doesn't + // matter, so we don't try dispose to background. + // Also, if key-frame insertion is on, and previous frame could be picked as + // either a sub-frame or a key-frame, then we can't be sure about what frame + // rectangle would be disposed. In that case too, we don't try dispose to + // background. + const int dispose_bg_possible = + !is_key_frame && !enc->prev_candidate_undecided_; + + SubFrameParams dispose_none_params; + SubFrameParams dispose_bg_params; + + WebPConfig config_ll = *config; + WebPConfig config_lossy = *config; + config_ll.lossless = 1; + config_lossy.lossless = 0; + enc->last_config_ = *config; + enc->last_config_reversed_ = config->lossless ? config_lossy : config_ll; + *frame_skipped = 0; + + if (!SubFrameParamsInit(&dispose_none_params, 1, empty_rect_allowed_none) || + !SubFrameParamsInit(&dispose_bg_params, 0, empty_rect_allowed_bg)) { + return VP8_ENC_ERROR_INVALID_CONFIGURATION; + } + + memset(candidates, 0, sizeof(candidates)); + + // Change-rectangle assuming previous frame was DISPOSE_NONE. + if (!GetSubRects(prev_canvas, curr_canvas, is_key_frame, is_first_frame, + config_lossy.quality, &dispose_none_params)) { + error_code = VP8_ENC_ERROR_INVALID_CONFIGURATION; + goto Err; + } + + if ((consider_lossless && IsEmptyRect(&dispose_none_params.rect_ll_)) || + (consider_lossy && IsEmptyRect(&dispose_none_params.rect_lossy_))) { + // Don't encode the frame at all. Instead, the duration of the previous + // frame will be increased later. + assert(empty_rect_allowed_none); + *frame_skipped = 1; + goto End; + } + + if (dispose_bg_possible) { + // Change-rectangle assuming previous frame was DISPOSE_BACKGROUND. + WebPPicture* const prev_canvas_disposed = &enc->prev_canvas_disposed_; + WebPCopyPixels(prev_canvas, prev_canvas_disposed); + DisposeFrameRectangle(WEBP_MUX_DISPOSE_BACKGROUND, &enc->prev_rect_, + prev_canvas_disposed); + + if (!GetSubRects(prev_canvas_disposed, curr_canvas, is_key_frame, + is_first_frame, config_lossy.quality, + &dispose_bg_params)) { + error_code = VP8_ENC_ERROR_INVALID_CONFIGURATION; + goto Err; + } + assert(!IsEmptyRect(&dispose_bg_params.rect_ll_)); + assert(!IsEmptyRect(&dispose_bg_params.rect_lossy_)); + + if (enc->options_.minimize_size) { // Try both dispose methods. + dispose_bg_params.should_try_ = 1; + dispose_none_params.should_try_ = 1; + } else if ((is_lossless && + RectArea(&dispose_bg_params.rect_ll_) < + RectArea(&dispose_none_params.rect_ll_)) || + (!is_lossless && + RectArea(&dispose_bg_params.rect_lossy_) < + RectArea(&dispose_none_params.rect_lossy_))) { + dispose_bg_params.should_try_ = 1; // Pick DISPOSE_BACKGROUND. + dispose_none_params.should_try_ = 0; + } + } + + if (dispose_none_params.should_try_) { + error_code = GenerateCandidates( + enc, candidates, WEBP_MUX_DISPOSE_NONE, is_lossless, is_key_frame, + &dispose_none_params, &config_ll, &config_lossy); + if (error_code != VP8_ENC_OK) goto Err; + } + + if (dispose_bg_params.should_try_) { + assert(!enc->is_first_frame_); + assert(dispose_bg_possible); + error_code = GenerateCandidates( + enc, candidates, WEBP_MUX_DISPOSE_BACKGROUND, is_lossless, is_key_frame, + &dispose_bg_params, &config_ll, &config_lossy); + if (error_code != VP8_ENC_OK) goto Err; + } + + PickBestCandidate(enc, candidates, is_key_frame, encoded_frame); + + goto End; + + Err: + for (i = 0; i < CANDIDATE_COUNT; ++i) { + if (candidates[i].evaluate_) { + WebPMemoryWriterClear(&candidates[i].mem_); + } + } + + End: + SubFrameParamsFree(&dispose_none_params); + SubFrameParamsFree(&dispose_bg_params); + return error_code; +} + +// Calculate the penalty incurred if we encode given frame as a key frame +// instead of a sub-frame. +static int64_t KeyFramePenalty(const EncodedFrame* const encoded_frame) { + return ((int64_t)encoded_frame->key_frame_.bitstream.size - + encoded_frame->sub_frame_.bitstream.size); +} + +static int CacheFrame(WebPAnimEncoder* const enc, + const WebPConfig* const config) { + int ok = 0; + int frame_skipped = 0; + WebPEncodingError error_code = VP8_ENC_OK; + const size_t position = enc->count_; + EncodedFrame* const encoded_frame = GetFrame(enc, position); + + ++enc->count_; + + if (enc->is_first_frame_) { // Add this as a key-frame. + error_code = SetFrame(enc, config, 1, encoded_frame, &frame_skipped); + if (error_code != VP8_ENC_OK) goto End; + assert(frame_skipped == 0); // First frame can't be skipped, even if empty. + assert(position == 0 && enc->count_ == 1); + encoded_frame->is_key_frame_ = 1; + enc->flush_count_ = 0; + enc->count_since_key_frame_ = 0; + enc->prev_candidate_undecided_ = 0; + } else { + ++enc->count_since_key_frame_; + if (enc->count_since_key_frame_ <= enc->options_.kmin) { + // Add this as a frame rectangle. + error_code = SetFrame(enc, config, 0, encoded_frame, &frame_skipped); + if (error_code != VP8_ENC_OK) goto End; + if (frame_skipped) goto Skip; + encoded_frame->is_key_frame_ = 0; + enc->flush_count_ = enc->count_ - 1; + enc->prev_candidate_undecided_ = 0; + } else { + int64_t curr_delta; + FrameRectangle prev_rect_key, prev_rect_sub; + + // Add this as a frame rectangle to enc. + error_code = SetFrame(enc, config, 0, encoded_frame, &frame_skipped); + if (error_code != VP8_ENC_OK) goto End; + if (frame_skipped) goto Skip; + prev_rect_sub = enc->prev_rect_; + + + // Add this as a key-frame to enc, too. + error_code = SetFrame(enc, config, 1, encoded_frame, &frame_skipped); + if (error_code != VP8_ENC_OK) goto End; + assert(frame_skipped == 0); // Key-frame cannot be an empty rectangle. + prev_rect_key = enc->prev_rect_; + + // Analyze size difference of the two variants. + curr_delta = KeyFramePenalty(encoded_frame); + if (curr_delta <= enc->best_delta_) { // Pick this as the key-frame. + if (enc->keyframe_ != KEYFRAME_NONE) { + EncodedFrame* const old_keyframe = GetFrame(enc, enc->keyframe_); + assert(old_keyframe->is_key_frame_); + old_keyframe->is_key_frame_ = 0; + } + encoded_frame->is_key_frame_ = 1; + enc->prev_candidate_undecided_ = 1; + enc->keyframe_ = (int)position; + enc->best_delta_ = curr_delta; + enc->flush_count_ = enc->count_ - 1; // We can flush previous frames. + } else { + encoded_frame->is_key_frame_ = 0; + enc->prev_candidate_undecided_ = 0; + } + // Note: We need '>=' below because when kmin and kmax are both zero, + // count_since_key_frame will always be > kmax. + if (enc->count_since_key_frame_ >= enc->options_.kmax) { + enc->flush_count_ = enc->count_ - 1; + enc->count_since_key_frame_ = 0; + enc->keyframe_ = KEYFRAME_NONE; + enc->best_delta_ = DELTA_INFINITY; + } + if (!enc->prev_candidate_undecided_) { + enc->prev_rect_ = + encoded_frame->is_key_frame_ ? prev_rect_key : prev_rect_sub; + } + } + } + + // Update previous to previous and previous canvases for next call. + WebPCopyPixels(enc->curr_canvas_, &enc->prev_canvas_); + enc->is_first_frame_ = 0; + + Skip: + ok = 1; + ++enc->in_frame_count_; + + End: + if (!ok || frame_skipped) { + FrameRelease(encoded_frame); + // We reset some counters, as the frame addition failed/was skipped. + --enc->count_; + if (!enc->is_first_frame_) --enc->count_since_key_frame_; + if (!ok) { + MarkError2(enc, "ERROR adding frame. WebPEncodingError", error_code); + } + } + enc->curr_canvas_->error_code = error_code; // report error_code + assert(ok || error_code != VP8_ENC_OK); + return ok; +} + +static int FlushFrames(WebPAnimEncoder* const enc) { + while (enc->flush_count_ > 0) { + WebPMuxError err; + EncodedFrame* const curr = GetFrame(enc, 0); + const WebPMuxFrameInfo* const info = + curr->is_key_frame_ ? &curr->key_frame_ : &curr->sub_frame_; + assert(enc->mux_ != NULL); + err = WebPMuxPushFrame(enc->mux_, info, 1); + if (err != WEBP_MUX_OK) { + MarkError2(enc, "ERROR adding frame. WebPMuxError", err); + return 0; + } + if (enc->options_.verbose) { + fprintf(stderr, "INFO: Added frame. offset:%d,%d dispose:%d blend:%d\n", + info->x_offset, info->y_offset, info->dispose_method, + info->blend_method); + } + ++enc->out_frame_count_; + FrameRelease(curr); + ++enc->start_; + --enc->flush_count_; + --enc->count_; + if (enc->keyframe_ != KEYFRAME_NONE) --enc->keyframe_; + } + + if (enc->count_ == 1 && enc->start_ != 0) { + // Move enc->start to index 0. + const int enc_start_tmp = (int)enc->start_; + EncodedFrame temp = enc->encoded_frames_[0]; + enc->encoded_frames_[0] = enc->encoded_frames_[enc_start_tmp]; + enc->encoded_frames_[enc_start_tmp] = temp; + FrameRelease(&enc->encoded_frames_[enc_start_tmp]); + enc->start_ = 0; + } + return 1; +} + +#undef DELTA_INFINITY +#undef KEYFRAME_NONE + +int WebPAnimEncoderAdd(WebPAnimEncoder* enc, WebPPicture* frame, int timestamp, + const WebPConfig* encoder_config) { + WebPConfig config; + int ok; + + if (enc == NULL) { + return 0; + } + MarkNoError(enc); + + if (!enc->is_first_frame_) { + // Make sure timestamps are non-decreasing (integer wrap-around is OK). + const uint32_t prev_frame_duration = + (uint32_t)timestamp - enc->prev_timestamp_; + if (prev_frame_duration >= MAX_DURATION) { + if (frame != NULL) { + frame->error_code = VP8_ENC_ERROR_INVALID_CONFIGURATION; + } + MarkError(enc, "ERROR adding frame: timestamps must be non-decreasing"); + return 0; + } + if (!IncreasePreviousDuration(enc, (int)prev_frame_duration)) { + return 0; + } + } else { + enc->first_timestamp_ = timestamp; + } + + if (frame == NULL) { // Special: last call. + enc->got_null_frame_ = 1; + enc->prev_timestamp_ = timestamp; + return 1; + } + + if (frame->width != enc->canvas_width_ || + frame->height != enc->canvas_height_) { + frame->error_code = VP8_ENC_ERROR_INVALID_CONFIGURATION; + MarkError(enc, "ERROR adding frame: Invalid frame dimensions"); + return 0; + } + + if (!frame->use_argb) { // Convert frame from YUV(A) to ARGB. + if (enc->options_.verbose) { + fprintf(stderr, "WARNING: Converting frame from YUV(A) to ARGB format; " + "this incurs a small loss.\n"); + } + if (!WebPPictureYUVAToARGB(frame)) { + MarkError(enc, "ERROR converting frame from YUV(A) to ARGB"); + return 0; + } + } + + if (encoder_config != NULL) { + if (!WebPValidateConfig(encoder_config)) { + MarkError(enc, "ERROR adding frame: Invalid WebPConfig"); + return 0; + } + config = *encoder_config; + } else { + WebPConfigInit(&config); + config.lossless = 1; + } + assert(enc->curr_canvas_ == NULL); + enc->curr_canvas_ = frame; // Store reference. + assert(enc->curr_canvas_copy_modified_ == 1); + CopyCurrentCanvas(enc); + + ok = CacheFrame(enc, &config) && FlushFrames(enc); + + enc->curr_canvas_ = NULL; + enc->curr_canvas_copy_modified_ = 1; + if (ok) { + enc->prev_timestamp_ = timestamp; + } + return ok; +} + +// ----------------------------------------------------------------------------- +// Bitstream assembly. + +static int DecodeFrameOntoCanvas(const WebPMuxFrameInfo* const frame, + WebPPicture* const canvas) { + const WebPData* const image = &frame->bitstream; + WebPPicture sub_image; + WebPDecoderConfig config; + WebPInitDecoderConfig(&config); + WebPUtilClearPic(canvas, NULL); + if (WebPGetFeatures(image->bytes, image->size, &config.input) != + VP8_STATUS_OK) { + return 0; + } + if (!WebPPictureView(canvas, frame->x_offset, frame->y_offset, + config.input.width, config.input.height, &sub_image)) { + return 0; + } + config.output.is_external_memory = 1; + config.output.colorspace = MODE_BGRA; + config.output.u.RGBA.rgba = (uint8_t*)sub_image.argb; + config.output.u.RGBA.stride = sub_image.argb_stride * 4; + config.output.u.RGBA.size = config.output.u.RGBA.stride * sub_image.height; + + if (WebPDecode(image->bytes, image->size, &config) != VP8_STATUS_OK) { + return 0; + } + return 1; +} + +static int FrameToFullCanvas(WebPAnimEncoder* const enc, + const WebPMuxFrameInfo* const frame, + WebPData* const full_image) { + WebPPicture* const canvas_buf = &enc->curr_canvas_copy_; + WebPMemoryWriter mem1, mem2; + WebPMemoryWriterInit(&mem1); + WebPMemoryWriterInit(&mem2); + + if (!DecodeFrameOntoCanvas(frame, canvas_buf)) goto Err; + if (!EncodeFrame(&enc->last_config_, canvas_buf, &mem1)) goto Err; + GetEncodedData(&mem1, full_image); + + if (enc->options_.allow_mixed) { + if (!EncodeFrame(&enc->last_config_reversed_, canvas_buf, &mem2)) goto Err; + if (mem2.size < mem1.size) { + GetEncodedData(&mem2, full_image); + WebPMemoryWriterClear(&mem1); + } else { + WebPMemoryWriterClear(&mem2); + } + } + return 1; + + Err: + WebPMemoryWriterClear(&mem1); + WebPMemoryWriterClear(&mem2); + return 0; +} + +// Convert a single-frame animation to a non-animated image if appropriate. +// TODO(urvang): Can we pick one of the two heuristically (based on frame +// rectangle and/or presence of alpha)? +static WebPMuxError OptimizeSingleFrame(WebPAnimEncoder* const enc, + WebPData* const webp_data) { + WebPMuxError err = WEBP_MUX_OK; + int canvas_width, canvas_height; + WebPMuxFrameInfo frame; + WebPData full_image; + WebPData webp_data2; + WebPMux* const mux = WebPMuxCreate(webp_data, 0); + if (mux == NULL) return WEBP_MUX_BAD_DATA; + assert(enc->out_frame_count_ == 1); + WebPDataInit(&frame.bitstream); + WebPDataInit(&full_image); + WebPDataInit(&webp_data2); + + err = WebPMuxGetFrame(mux, 1, &frame); + if (err != WEBP_MUX_OK) goto End; + if (frame.id != WEBP_CHUNK_ANMF) goto End; // Non-animation: nothing to do. + err = WebPMuxGetCanvasSize(mux, &canvas_width, &canvas_height); + if (err != WEBP_MUX_OK) goto End; + if (!FrameToFullCanvas(enc, &frame, &full_image)) { + err = WEBP_MUX_BAD_DATA; + goto End; + } + err = WebPMuxSetImage(mux, &full_image, 1); + if (err != WEBP_MUX_OK) goto End; + err = WebPMuxAssemble(mux, &webp_data2); + if (err != WEBP_MUX_OK) goto End; + + if (webp_data2.size < webp_data->size) { // Pick 'webp_data2' if smaller. + WebPDataClear(webp_data); + *webp_data = webp_data2; + WebPDataInit(&webp_data2); + } + + End: + WebPDataClear(&frame.bitstream); + WebPDataClear(&full_image); + WebPMuxDelete(mux); + WebPDataClear(&webp_data2); + return err; +} + +int WebPAnimEncoderAssemble(WebPAnimEncoder* enc, WebPData* webp_data) { + WebPMux* mux; + WebPMuxError err; + + if (enc == NULL) { + return 0; + } + MarkNoError(enc); + + if (webp_data == NULL) { + MarkError(enc, "ERROR assembling: NULL input"); + return 0; + } + + if (enc->in_frame_count_ == 0) { + MarkError(enc, "ERROR: No frames to assemble"); + return 0; + } + + if (!enc->got_null_frame_ && enc->in_frame_count_ > 1 && enc->count_ > 0) { + // set duration of the last frame to be avg of durations of previous frames. + const double delta_time = + (uint32_t)enc->prev_timestamp_ - enc->first_timestamp_; + const int average_duration = (int)(delta_time / (enc->in_frame_count_ - 1)); + if (!IncreasePreviousDuration(enc, average_duration)) { + return 0; + } + } + + // Flush any remaining frames. + enc->flush_count_ = enc->count_; + if (!FlushFrames(enc)) { + return 0; + } + + // Set definitive canvas size. + mux = enc->mux_; + err = WebPMuxSetCanvasSize(mux, enc->canvas_width_, enc->canvas_height_); + if (err != WEBP_MUX_OK) goto Err; + + err = WebPMuxSetAnimationParams(mux, &enc->options_.anim_params); + if (err != WEBP_MUX_OK) goto Err; + + // Assemble into a WebP bitstream. + err = WebPMuxAssemble(mux, webp_data); + if (err != WEBP_MUX_OK) goto Err; + + if (enc->out_frame_count_ == 1) { + err = OptimizeSingleFrame(enc, webp_data); + if (err != WEBP_MUX_OK) goto Err; + } + return 1; + + Err: + MarkError2(enc, "ERROR assembling WebP", err); + return 0; +} + +const char* WebPAnimEncoderGetError(WebPAnimEncoder* enc) { + if (enc == NULL) return NULL; + return enc->error_str_; +} + +// ----------------------------------------------------------------------------- diff --git a/vendor/github.com/sizeofint/webpanimation/mux_animi.h b/vendor/github.com/sizeofint/webpanimation/mux_animi.h new file mode 100644 index 00000000..a9a6735f --- /dev/null +++ b/vendor/github.com/sizeofint/webpanimation/mux_animi.h @@ -0,0 +1,43 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the COPYING file in the root of the source +// tree. An additional intellectual property rights grant can be found +// in the file PATENTS. All contributing project authors may +// be found in the AUTHORS file in the root of the source tree. +// ----------------------------------------------------------------------------- +// +// Internal header for animation related functions. +// +// Author: Hui Su (huisu@google.com) + +#ifndef WEBP_MUX_ANIMI_H_ +#define WEBP_MUX_ANIMI_H_ + +#include "webp_mux.h" + +#ifdef __cplusplus +extern "C" { +#endif + +// Picks the optimal rectangle between two pictures, starting with initial +// values of offsets and dimensions that are passed in. The initial +// values will be clipped, if necessary, to make sure the rectangle is +// within the canvas. "use_argb" must be true for both pictures. +// Parameters: +// prev_canvas, curr_canvas - (in) two input pictures to compare. +// is_lossless, quality - (in) encoding settings. +// x_offset, y_offset, width, height - (in/out) rectangle between the two +// input pictures. +// Returns true on success. +int WebPAnimEncoderRefineRect( + const struct WebPPicture* const prev_canvas, + const struct WebPPicture* const curr_canvas, + int is_lossless, float quality, int* const x_offset, int* const y_offset, + int* const width, int* const height); + +#ifdef __cplusplus +} // extern "C" +#endif + +#endif // WEBP_MUX_ANIMI_H_ diff --git a/vendor/github.com/sizeofint/webpanimation/mux_muxedit.c b/vendor/github.com/sizeofint/webpanimation/mux_muxedit.c new file mode 100644 index 00000000..2dea1a65 --- /dev/null +++ b/vendor/github.com/sizeofint/webpanimation/mux_muxedit.c @@ -0,0 +1,657 @@ +// Copyright 2011 Google Inc. All Rights Reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the COPYING file in the root of the source +// tree. An additional intellectual property rights grant can be found +// in the file PATENTS. All contributing project authors may +// be found in the AUTHORS file in the root of the source tree. +// ----------------------------------------------------------------------------- +// +// Set and delete APIs for mux. +// +// Authors: Urvang (urvang@google.com) +// Vikas (vikasa@google.com) + +#include +#include "mux_muxi.h" +#include "utils_utils.h" + +//------------------------------------------------------------------------------ +// Life of a mux object. + +static void MuxInit(WebPMux* const mux) { + assert(mux != NULL); + memset(mux, 0, sizeof(*mux)); + mux->canvas_width_ = 0; // just to be explicit + mux->canvas_height_ = 0; +} + +WebPMux* WebPNewInternal(int version) { + if (WEBP_ABI_IS_INCOMPATIBLE(version, WEBP_MUX_ABI_VERSION)) { + return NULL; + } else { + WebPMux* const mux = (WebPMux*)WebPSafeMalloc(1ULL, sizeof(WebPMux)); + if (mux != NULL) MuxInit(mux); + return mux; + } +} + +// Delete all images in 'wpi_list'. +static void DeleteAllImages(WebPMuxImage** const wpi_list) { + while (*wpi_list != NULL) { + *wpi_list = MuxImageDelete(*wpi_list); + } +} + +static void MuxRelease(WebPMux* const mux) { + assert(mux != NULL); + DeleteAllImages(&mux->images_); + ChunkListDelete(&mux->vp8x_); + ChunkListDelete(&mux->iccp_); + ChunkListDelete(&mux->anim_); + ChunkListDelete(&mux->exif_); + ChunkListDelete(&mux->xmp_); + ChunkListDelete(&mux->unknown_); +} + +void WebPMuxDelete(WebPMux* mux) { + if (mux != NULL) { + MuxRelease(mux); + WebPSafeFree(mux); + } +} + +//------------------------------------------------------------------------------ +// Helper method(s). + +// Handy MACRO, makes MuxSet() very symmetric to MuxGet(). +#define SWITCH_ID_LIST(INDEX, LIST) \ + if (idx == (INDEX)) { \ + err = ChunkAssignData(&chunk, data, copy_data, tag); \ + if (err == WEBP_MUX_OK) { \ + err = ChunkSetHead(&chunk, (LIST)); \ + } \ + return err; \ + } + +static WebPMuxError MuxSet(WebPMux* const mux, uint32_t tag, + const WebPData* const data, int copy_data) { + WebPChunk chunk; + WebPMuxError err = WEBP_MUX_NOT_FOUND; + const CHUNK_INDEX idx = ChunkGetIndexFromTag(tag); + assert(mux != NULL); + assert(!IsWPI(kChunks[idx].id)); + + ChunkInit(&chunk); + SWITCH_ID_LIST(IDX_VP8X, &mux->vp8x_); + SWITCH_ID_LIST(IDX_ICCP, &mux->iccp_); + SWITCH_ID_LIST(IDX_ANIM, &mux->anim_); + SWITCH_ID_LIST(IDX_EXIF, &mux->exif_); + SWITCH_ID_LIST(IDX_XMP, &mux->xmp_); + SWITCH_ID_LIST(IDX_UNKNOWN, &mux->unknown_); + return err; +} +#undef SWITCH_ID_LIST + +// Create data for frame given image data, offsets and duration. +static WebPMuxError CreateFrameData( + int width, int height, const WebPMuxFrameInfo* const info, + WebPData* const frame) { + uint8_t* frame_bytes; + const size_t frame_size = kChunks[IDX_ANMF].size; + + assert(width > 0 && height > 0 && info->duration >= 0); + assert(info->dispose_method == (info->dispose_method & 1)); + // Note: assertion on upper bounds is done in PutLE24(). + + frame_bytes = (uint8_t*)WebPSafeMalloc(1ULL, frame_size); + if (frame_bytes == NULL) return WEBP_MUX_MEMORY_ERROR; + + PutLE24(frame_bytes + 0, info->x_offset / 2); + PutLE24(frame_bytes + 3, info->y_offset / 2); + + PutLE24(frame_bytes + 6, width - 1); + PutLE24(frame_bytes + 9, height - 1); + PutLE24(frame_bytes + 12, info->duration); + frame_bytes[15] = + (info->blend_method == WEBP_MUX_NO_BLEND ? 2 : 0) | + (info->dispose_method == WEBP_MUX_DISPOSE_BACKGROUND ? 1 : 0); + + frame->bytes = frame_bytes; + frame->size = frame_size; + return WEBP_MUX_OK; +} + +// Outputs image data given a bitstream. The bitstream can either be a +// single-image WebP file or raw VP8/VP8L data. +// Also outputs 'is_lossless' to be true if the given bitstream is lossless. +static WebPMuxError GetImageData(const WebPData* const bitstream, + WebPData* const image, WebPData* const alpha, + int* const is_lossless) { + WebPDataInit(alpha); // Default: no alpha. + if (bitstream->size < TAG_SIZE || + memcmp(bitstream->bytes, "RIFF", TAG_SIZE)) { + // It is NOT webp file data. Return input data as is. + *image = *bitstream; + } else { + // It is webp file data. Extract image data from it. + const WebPMuxImage* wpi; + WebPMux* const mux = WebPMuxCreate(bitstream, 0); + if (mux == NULL) return WEBP_MUX_BAD_DATA; + wpi = mux->images_; + assert(wpi != NULL && wpi->img_ != NULL); + *image = wpi->img_->data_; + if (wpi->alpha_ != NULL) { + *alpha = wpi->alpha_->data_; + } + WebPMuxDelete(mux); + } + *is_lossless = VP8LCheckSignature(image->bytes, image->size); + return WEBP_MUX_OK; +} + +static WebPMuxError DeleteChunks(WebPChunk** chunk_list, uint32_t tag) { + WebPMuxError err = WEBP_MUX_NOT_FOUND; + assert(chunk_list); + while (*chunk_list) { + WebPChunk* const chunk = *chunk_list; + if (chunk->tag_ == tag) { + *chunk_list = ChunkDelete(chunk); + err = WEBP_MUX_OK; + } else { + chunk_list = &chunk->next_; + } + } + return err; +} + +static WebPMuxError MuxDeleteAllNamedData(WebPMux* const mux, uint32_t tag) { + const WebPChunkId id = ChunkGetIdFromTag(tag); + assert(mux != NULL); + if (IsWPI(id)) return WEBP_MUX_INVALID_ARGUMENT; + return DeleteChunks(MuxGetChunkListFromId(mux, id), tag); +} + +//------------------------------------------------------------------------------ +// Set API(s). + +WebPMuxError WebPMuxSetChunk(WebPMux* mux, const char fourcc[4], + const WebPData* chunk_data, int copy_data) { + uint32_t tag; + WebPMuxError err; + if (mux == NULL || fourcc == NULL || chunk_data == NULL || + chunk_data->bytes == NULL || chunk_data->size > MAX_CHUNK_PAYLOAD) { + return WEBP_MUX_INVALID_ARGUMENT; + } + tag = ChunkGetTagFromFourCC(fourcc); + + // Delete existing chunk(s) with the same 'fourcc'. + err = MuxDeleteAllNamedData(mux, tag); + if (err != WEBP_MUX_OK && err != WEBP_MUX_NOT_FOUND) return err; + + // Add the given chunk. + return MuxSet(mux, tag, chunk_data, copy_data); +} + +// Creates a chunk from given 'data' and sets it as 1st chunk in 'chunk_list'. +static WebPMuxError AddDataToChunkList( + const WebPData* const data, int copy_data, uint32_t tag, + WebPChunk** chunk_list) { + WebPChunk chunk; + WebPMuxError err; + ChunkInit(&chunk); + err = ChunkAssignData(&chunk, data, copy_data, tag); + if (err != WEBP_MUX_OK) goto Err; + err = ChunkSetHead(&chunk, chunk_list); + if (err != WEBP_MUX_OK) goto Err; + return WEBP_MUX_OK; + Err: + ChunkRelease(&chunk); + return err; +} + +// Extracts image & alpha data from the given bitstream and then sets wpi.alpha_ +// and wpi.img_ appropriately. +static WebPMuxError SetAlphaAndImageChunks( + const WebPData* const bitstream, int copy_data, WebPMuxImage* const wpi) { + int is_lossless = 0; + WebPData image, alpha; + WebPMuxError err = GetImageData(bitstream, &image, &alpha, &is_lossless); + const int image_tag = + is_lossless ? kChunks[IDX_VP8L].tag : kChunks[IDX_VP8].tag; + if (err != WEBP_MUX_OK) return err; + if (alpha.bytes != NULL) { + err = AddDataToChunkList(&alpha, copy_data, kChunks[IDX_ALPHA].tag, + &wpi->alpha_); + if (err != WEBP_MUX_OK) return err; + } + err = AddDataToChunkList(&image, copy_data, image_tag, &wpi->img_); + if (err != WEBP_MUX_OK) return err; + return MuxImageFinalize(wpi) ? WEBP_MUX_OK : WEBP_MUX_INVALID_ARGUMENT; +} + +WebPMuxError WebPMuxSetImage(WebPMux* mux, const WebPData* bitstream, + int copy_data) { + WebPMuxImage wpi; + WebPMuxError err; + + // Sanity checks. + if (mux == NULL || bitstream == NULL || bitstream->bytes == NULL || + bitstream->size > MAX_CHUNK_PAYLOAD) { + return WEBP_MUX_INVALID_ARGUMENT; + } + + if (mux->images_ != NULL) { + // Only one 'simple image' can be added in mux. So, remove present images. + DeleteAllImages(&mux->images_); + } + + MuxImageInit(&wpi); + err = SetAlphaAndImageChunks(bitstream, copy_data, &wpi); + if (err != WEBP_MUX_OK) goto Err; + + // Add this WebPMuxImage to mux. + err = MuxImagePush(&wpi, &mux->images_); + if (err != WEBP_MUX_OK) goto Err; + + // All is well. + return WEBP_MUX_OK; + + Err: // Something bad happened. + MuxImageRelease(&wpi); + return err; +} + +WebPMuxError WebPMuxPushFrame(WebPMux* mux, const WebPMuxFrameInfo* info, + int copy_data) { + WebPMuxImage wpi; + WebPMuxError err; + + // Sanity checks. + if (mux == NULL || info == NULL) return WEBP_MUX_INVALID_ARGUMENT; + + if (info->id != WEBP_CHUNK_ANMF) return WEBP_MUX_INVALID_ARGUMENT; + + if (info->bitstream.bytes == NULL || + info->bitstream.size > MAX_CHUNK_PAYLOAD) { + return WEBP_MUX_INVALID_ARGUMENT; + } + + if (mux->images_ != NULL) { + const WebPMuxImage* const image = mux->images_; + const uint32_t image_id = (image->header_ != NULL) ? + ChunkGetIdFromTag(image->header_->tag_) : WEBP_CHUNK_IMAGE; + if (image_id != info->id) { + return WEBP_MUX_INVALID_ARGUMENT; // Conflicting frame types. + } + } + + MuxImageInit(&wpi); + err = SetAlphaAndImageChunks(&info->bitstream, copy_data, &wpi); + if (err != WEBP_MUX_OK) goto Err; + assert(wpi.img_ != NULL); // As SetAlphaAndImageChunks() was successful. + + { + WebPData frame; + const uint32_t tag = kChunks[IDX_ANMF].tag; + WebPMuxFrameInfo tmp = *info; + tmp.x_offset &= ~1; // Snap offsets to even. + tmp.y_offset &= ~1; + if (tmp.x_offset < 0 || tmp.x_offset >= MAX_POSITION_OFFSET || + tmp.y_offset < 0 || tmp.y_offset >= MAX_POSITION_OFFSET || + (tmp.duration < 0 || tmp.duration >= MAX_DURATION) || + tmp.dispose_method != (tmp.dispose_method & 1)) { + err = WEBP_MUX_INVALID_ARGUMENT; + goto Err; + } + err = CreateFrameData(wpi.width_, wpi.height_, &tmp, &frame); + if (err != WEBP_MUX_OK) goto Err; + // Add frame chunk (with copy_data = 1). + err = AddDataToChunkList(&frame, 1, tag, &wpi.header_); + WebPDataClear(&frame); // frame owned by wpi.header_ now. + if (err != WEBP_MUX_OK) goto Err; + } + + // Add this WebPMuxImage to mux. + err = MuxImagePush(&wpi, &mux->images_); + if (err != WEBP_MUX_OK) goto Err; + + // All is well. + return WEBP_MUX_OK; + + Err: // Something bad happened. + MuxImageRelease(&wpi); + return err; +} + +WebPMuxError WebPMuxSetAnimationParams(WebPMux* mux, + const WebPMuxAnimParams* params) { + WebPMuxError err; + uint8_t data[ANIM_CHUNK_SIZE]; + const WebPData anim = { data, ANIM_CHUNK_SIZE }; + + if (mux == NULL || params == NULL) return WEBP_MUX_INVALID_ARGUMENT; + if (params->loop_count < 0 || params->loop_count >= MAX_LOOP_COUNT) { + return WEBP_MUX_INVALID_ARGUMENT; + } + + // Delete any existing ANIM chunk(s). + err = MuxDeleteAllNamedData(mux, kChunks[IDX_ANIM].tag); + if (err != WEBP_MUX_OK && err != WEBP_MUX_NOT_FOUND) return err; + + // Set the animation parameters. + PutLE32(data, params->bgcolor); + PutLE16(data + 4, params->loop_count); + return MuxSet(mux, kChunks[IDX_ANIM].tag, &anim, 1); +} + +WebPMuxError WebPMuxSetCanvasSize(WebPMux* mux, + int width, int height) { + WebPMuxError err; + if (mux == NULL) { + return WEBP_MUX_INVALID_ARGUMENT; + } + if (width < 0 || height < 0 || + width > MAX_CANVAS_SIZE || height > MAX_CANVAS_SIZE) { + return WEBP_MUX_INVALID_ARGUMENT; + } + if (width * (uint64_t)height >= MAX_IMAGE_AREA) { + return WEBP_MUX_INVALID_ARGUMENT; + } + if ((width * height) == 0 && (width | height) != 0) { + // one of width / height is zero, but not both -> invalid! + return WEBP_MUX_INVALID_ARGUMENT; + } + // If we already assembled a VP8X chunk, invalidate it. + err = MuxDeleteAllNamedData(mux, kChunks[IDX_VP8X].tag); + if (err != WEBP_MUX_OK && err != WEBP_MUX_NOT_FOUND) return err; + + mux->canvas_width_ = width; + mux->canvas_height_ = height; + return WEBP_MUX_OK; +} + +//------------------------------------------------------------------------------ +// Delete API(s). + +WebPMuxError WebPMuxDeleteChunk(WebPMux* mux, const char fourcc[4]) { + if (mux == NULL || fourcc == NULL) return WEBP_MUX_INVALID_ARGUMENT; + return MuxDeleteAllNamedData(mux, ChunkGetTagFromFourCC(fourcc)); +} + +WebPMuxError WebPMuxDeleteFrame(WebPMux* mux, uint32_t nth) { + if (mux == NULL) return WEBP_MUX_INVALID_ARGUMENT; + return MuxImageDeleteNth(&mux->images_, nth); +} + +//------------------------------------------------------------------------------ +// Assembly of the WebP RIFF file. + +static WebPMuxError GetFrameInfo( + const WebPChunk* const frame_chunk, + int* const x_offset, int* const y_offset, int* const duration) { + const WebPData* const data = &frame_chunk->data_; + const size_t expected_data_size = ANMF_CHUNK_SIZE; + assert(frame_chunk->tag_ == kChunks[IDX_ANMF].tag); + assert(frame_chunk != NULL); + if (data->size != expected_data_size) return WEBP_MUX_INVALID_ARGUMENT; + + *x_offset = 2 * GetLE24(data->bytes + 0); + *y_offset = 2 * GetLE24(data->bytes + 3); + *duration = GetLE24(data->bytes + 12); + return WEBP_MUX_OK; +} + +static WebPMuxError GetImageInfo(const WebPMuxImage* const wpi, + int* const x_offset, int* const y_offset, + int* const duration, + int* const width, int* const height) { + const WebPChunk* const frame_chunk = wpi->header_; + WebPMuxError err; + assert(wpi != NULL); + assert(frame_chunk != NULL); + + // Get offsets and duration from ANMF chunk. + err = GetFrameInfo(frame_chunk, x_offset, y_offset, duration); + if (err != WEBP_MUX_OK) return err; + + // Get width and height from VP8/VP8L chunk. + if (width != NULL) *width = wpi->width_; + if (height != NULL) *height = wpi->height_; + return WEBP_MUX_OK; +} + +// Returns the tightest dimension for the canvas considering the image list. +static WebPMuxError GetAdjustedCanvasSize(const WebPMux* const mux, + int* const width, int* const height) { + WebPMuxImage* wpi = NULL; + assert(mux != NULL); + assert(width != NULL && height != NULL); + + wpi = mux->images_; + assert(wpi != NULL); + assert(wpi->img_ != NULL); + + if (wpi->next_ != NULL) { + int max_x = 0, max_y = 0; + // if we have a chain of wpi's, header_ is necessarily set + assert(wpi->header_ != NULL); + // Aggregate the bounding box for animation frames. + for (; wpi != NULL; wpi = wpi->next_) { + int x_offset = 0, y_offset = 0, duration = 0, w = 0, h = 0; + const WebPMuxError err = GetImageInfo(wpi, &x_offset, &y_offset, + &duration, &w, &h); + const int max_x_pos = x_offset + w; + const int max_y_pos = y_offset + h; + if (err != WEBP_MUX_OK) return err; + assert(x_offset < MAX_POSITION_OFFSET); + assert(y_offset < MAX_POSITION_OFFSET); + + if (max_x_pos > max_x) max_x = max_x_pos; + if (max_y_pos > max_y) max_y = max_y_pos; + } + *width = max_x; + *height = max_y; + } else { + // For a single image, canvas dimensions are same as image dimensions. + *width = wpi->width_; + *height = wpi->height_; + } + return WEBP_MUX_OK; +} + +// VP8X format: +// Total Size : 10, +// Flags : 4 bytes, +// Width : 3 bytes, +// Height : 3 bytes. +static WebPMuxError CreateVP8XChunk(WebPMux* const mux) { + WebPMuxError err = WEBP_MUX_OK; + uint32_t flags = 0; + int width = 0; + int height = 0; + uint8_t data[VP8X_CHUNK_SIZE]; + const WebPData vp8x = { data, VP8X_CHUNK_SIZE }; + const WebPMuxImage* images = NULL; + + assert(mux != NULL); + images = mux->images_; // First image. + if (images == NULL || images->img_ == NULL || + images->img_->data_.bytes == NULL) { + return WEBP_MUX_INVALID_ARGUMENT; + } + + // If VP8X chunk(s) is(are) already present, remove them (and later add new + // VP8X chunk with updated flags). + err = MuxDeleteAllNamedData(mux, kChunks[IDX_VP8X].tag); + if (err != WEBP_MUX_OK && err != WEBP_MUX_NOT_FOUND) return err; + + // Set flags. + if (mux->iccp_ != NULL && mux->iccp_->data_.bytes != NULL) { + flags |= ICCP_FLAG; + } + if (mux->exif_ != NULL && mux->exif_->data_.bytes != NULL) { + flags |= EXIF_FLAG; + } + if (mux->xmp_ != NULL && mux->xmp_->data_.bytes != NULL) { + flags |= XMP_FLAG; + } + if (images->header_ != NULL) { + if (images->header_->tag_ == kChunks[IDX_ANMF].tag) { + // This is an image with animation. + flags |= ANIMATION_FLAG; + } + } + if (MuxImageCount(images, WEBP_CHUNK_ALPHA) > 0) { + flags |= ALPHA_FLAG; // Some images have an alpha channel. + } + + err = GetAdjustedCanvasSize(mux, &width, &height); + if (err != WEBP_MUX_OK) return err; + + if (width <= 0 || height <= 0) { + return WEBP_MUX_INVALID_ARGUMENT; + } + if (width > MAX_CANVAS_SIZE || height > MAX_CANVAS_SIZE) { + return WEBP_MUX_INVALID_ARGUMENT; + } + + if (mux->canvas_width_ != 0 || mux->canvas_height_ != 0) { + if (width > mux->canvas_width_ || height > mux->canvas_height_) { + return WEBP_MUX_INVALID_ARGUMENT; + } + width = mux->canvas_width_; + height = mux->canvas_height_; + } + + if (flags == 0 && mux->unknown_ == NULL) { + // For simple file format, VP8X chunk should not be added. + return WEBP_MUX_OK; + } + + if (MuxHasAlpha(images)) { + // This means some frames explicitly/implicitly contain alpha. + // Note: This 'flags' update must NOT be done for a lossless image + // without a VP8X chunk! + flags |= ALPHA_FLAG; + } + + PutLE32(data + 0, flags); // VP8X chunk flags. + PutLE24(data + 4, width - 1); // canvas width. + PutLE24(data + 7, height - 1); // canvas height. + + return MuxSet(mux, kChunks[IDX_VP8X].tag, &vp8x, 1); +} + +// Cleans up 'mux' by removing any unnecessary chunks. +static WebPMuxError MuxCleanup(WebPMux* const mux) { + int num_frames; + int num_anim_chunks; + + // If we have an image with a single frame, and its rectangle + // covers the whole canvas, convert it to a non-animated image + // (to avoid writing ANMF chunk unnecessarily). + WebPMuxError err = WebPMuxNumChunks(mux, kChunks[IDX_ANMF].id, &num_frames); + if (err != WEBP_MUX_OK) return err; + if (num_frames == 1) { + WebPMuxImage* frame = NULL; + err = MuxImageGetNth((const WebPMuxImage**)&mux->images_, 1, &frame); + assert(err == WEBP_MUX_OK); // We know that one frame does exist. + assert(frame != NULL); + if (frame->header_ != NULL && + ((mux->canvas_width_ == 0 && mux->canvas_height_ == 0) || + (frame->width_ == mux->canvas_width_ && + frame->height_ == mux->canvas_height_))) { + assert(frame->header_->tag_ == kChunks[IDX_ANMF].tag); + ChunkDelete(frame->header_); // Removes ANMF chunk. + frame->header_ = NULL; + num_frames = 0; + } + } + // Remove ANIM chunk if this is a non-animated image. + err = WebPMuxNumChunks(mux, kChunks[IDX_ANIM].id, &num_anim_chunks); + if (err != WEBP_MUX_OK) return err; + if (num_anim_chunks >= 1 && num_frames == 0) { + err = MuxDeleteAllNamedData(mux, kChunks[IDX_ANIM].tag); + if (err != WEBP_MUX_OK) return err; + } + return WEBP_MUX_OK; +} + +// Total size of a list of images. +static size_t ImageListDiskSize(const WebPMuxImage* wpi_list) { + size_t size = 0; + while (wpi_list != NULL) { + size += MuxImageDiskSize(wpi_list); + wpi_list = wpi_list->next_; + } + return size; +} + +// Write out the given list of images into 'dst'. +static uint8_t* ImageListEmit(const WebPMuxImage* wpi_list, uint8_t* dst) { + while (wpi_list != NULL) { + dst = MuxImageEmit(wpi_list, dst); + wpi_list = wpi_list->next_; + } + return dst; +} + +WebPMuxError WebPMuxAssemble(WebPMux* mux, WebPData* assembled_data) { + size_t size = 0; + uint8_t* data = NULL; + uint8_t* dst = NULL; + WebPMuxError err; + + if (assembled_data == NULL) { + return WEBP_MUX_INVALID_ARGUMENT; + } + // Clean up returned data, in case something goes wrong. + memset(assembled_data, 0, sizeof(*assembled_data)); + + if (mux == NULL) { + return WEBP_MUX_INVALID_ARGUMENT; + } + + // Finalize mux. + err = MuxCleanup(mux); + if (err != WEBP_MUX_OK) return err; + err = CreateVP8XChunk(mux); + if (err != WEBP_MUX_OK) return err; + + // Allocate data. + size = ChunkListDiskSize(mux->vp8x_) + ChunkListDiskSize(mux->iccp_) + + ChunkListDiskSize(mux->anim_) + ImageListDiskSize(mux->images_) + + ChunkListDiskSize(mux->exif_) + ChunkListDiskSize(mux->xmp_) + + ChunkListDiskSize(mux->unknown_) + RIFF_HEADER_SIZE; + + data = (uint8_t*)WebPSafeMalloc(1ULL, size); + if (data == NULL) return WEBP_MUX_MEMORY_ERROR; + + // Emit header & chunks. + dst = MuxEmitRiffHeader(data, size); + dst = ChunkListEmit(mux->vp8x_, dst); + dst = ChunkListEmit(mux->iccp_, dst); + dst = ChunkListEmit(mux->anim_, dst); + dst = ImageListEmit(mux->images_, dst); + dst = ChunkListEmit(mux->exif_, dst); + dst = ChunkListEmit(mux->xmp_, dst); + dst = ChunkListEmit(mux->unknown_, dst); + assert(dst == data + size); + + // Validate mux. + err = MuxValidate(mux); + if (err != WEBP_MUX_OK) { + WebPSafeFree(data); + data = NULL; + size = 0; + } + + // Finalize data. + assembled_data->bytes = data; + assembled_data->size = size; + + return err; +} + +//------------------------------------------------------------------------------ diff --git a/vendor/github.com/sizeofint/webpanimation/mux_muxi.h b/vendor/github.com/sizeofint/webpanimation/mux_muxi.h new file mode 100644 index 00000000..e6532304 --- /dev/null +++ b/vendor/github.com/sizeofint/webpanimation/mux_muxi.h @@ -0,0 +1,234 @@ +// Copyright 2011 Google Inc. All Rights Reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the COPYING file in the root of the source +// tree. An additional intellectual property rights grant can be found +// in the file PATENTS. All contributing project authors may +// be found in the AUTHORS file in the root of the source tree. +// ----------------------------------------------------------------------------- +// +// Internal header for mux library. +// +// Author: Urvang (urvang@google.com) + +#ifndef WEBP_MUX_MUXI_H_ +#define WEBP_MUX_MUXI_H_ + +#include +#include +#include "dec_vp8i_dec.h" +#include "dec_vp8li_dec.h" +#include "webp_mux.h" + +#ifdef __cplusplus +extern "C" { +#endif + +//------------------------------------------------------------------------------ +// Defines and constants. + +#define MUX_MAJ_VERSION 1 +#define MUX_MIN_VERSION 2 +#define MUX_REV_VERSION 0 + +// Chunk object. +typedef struct WebPChunk WebPChunk; +struct WebPChunk { + uint32_t tag_; + int owner_; // True if *data_ memory is owned internally. + // VP8X, ANIM, and other internally created chunks + // like ANMF are always owned. + WebPData data_; + WebPChunk* next_; +}; + +// MuxImage object. Store a full WebP image (including ANMF chunk, ALPH +// chunk and VP8/VP8L chunk), +typedef struct WebPMuxImage WebPMuxImage; +struct WebPMuxImage { + WebPChunk* header_; // Corresponds to WEBP_CHUNK_ANMF. + WebPChunk* alpha_; // Corresponds to WEBP_CHUNK_ALPHA. + WebPChunk* img_; // Corresponds to WEBP_CHUNK_IMAGE. + WebPChunk* unknown_; // Corresponds to WEBP_CHUNK_UNKNOWN. + int width_; + int height_; + int has_alpha_; // Through ALPH chunk or as part of VP8L. + int is_partial_; // True if only some of the chunks are filled. + WebPMuxImage* next_; +}; + +// Main mux object. Stores data chunks. +struct WebPMux { + WebPMuxImage* images_; + WebPChunk* iccp_; + WebPChunk* exif_; + WebPChunk* xmp_; + WebPChunk* anim_; + WebPChunk* vp8x_; + + WebPChunk* unknown_; + int canvas_width_; + int canvas_height_; +}; + +// CHUNK_INDEX enum: used for indexing within 'kChunks' (defined below) only. +// Note: the reason for having two enums ('WebPChunkId' and 'CHUNK_INDEX') is to +// allow two different chunks to have the same id (e.g. WebPChunkId +// 'WEBP_CHUNK_IMAGE' can correspond to CHUNK_INDEX 'IDX_VP8' or 'IDX_VP8L'). +typedef enum { + IDX_VP8X = 0, + IDX_ICCP, + IDX_ANIM, + IDX_ANMF, + IDX_ALPHA, + IDX_VP8, + IDX_VP8L, + IDX_EXIF, + IDX_XMP, + IDX_UNKNOWN, + + IDX_NIL, + IDX_LAST_CHUNK +} CHUNK_INDEX; + +#define NIL_TAG 0x00000000u // To signal void chunk. + +typedef struct { + uint32_t tag; + WebPChunkId id; + uint32_t size; +} ChunkInfo; + +extern const ChunkInfo kChunks[IDX_LAST_CHUNK]; + +//------------------------------------------------------------------------------ +// Chunk object management. + +// Initialize. +void ChunkInit(WebPChunk* const chunk); + +// Get chunk index from chunk tag. Returns IDX_UNKNOWN if not found. +CHUNK_INDEX ChunkGetIndexFromTag(uint32_t tag); + +// Get chunk id from chunk tag. Returns WEBP_CHUNK_UNKNOWN if not found. +WebPChunkId ChunkGetIdFromTag(uint32_t tag); + +// Convert a fourcc string to a tag. +uint32_t ChunkGetTagFromFourCC(const char fourcc[4]); + +// Get chunk index from fourcc. Returns IDX_UNKNOWN if given fourcc is unknown. +CHUNK_INDEX ChunkGetIndexFromFourCC(const char fourcc[4]); + +// Search for nth chunk with given 'tag' in the chunk list. +// nth = 0 means "last of the list". +WebPChunk* ChunkSearchList(WebPChunk* first, uint32_t nth, uint32_t tag); + +// Fill the chunk with the given data. +WebPMuxError ChunkAssignData(WebPChunk* chunk, const WebPData* const data, + int copy_data, uint32_t tag); + +// Sets 'chunk' as the only element in 'chunk_list' if it is empty. +// On success ownership is transferred from 'chunk' to the 'chunk_list'. +WebPMuxError ChunkSetHead(WebPChunk* const chunk, WebPChunk** const chunk_list); +// Sets 'chunk' at last position in the 'chunk_list'. +// On success ownership is transferred from 'chunk' to the 'chunk_list'. +// *chunk_list also points towards the last valid element of the initial +// *chunk_list. +WebPMuxError ChunkAppend(WebPChunk* const chunk, WebPChunk*** const chunk_list); + +// Releases chunk and returns chunk->next_. +WebPChunk* ChunkRelease(WebPChunk* const chunk); + +// Deletes given chunk & returns chunk->next_. +WebPChunk* ChunkDelete(WebPChunk* const chunk); + +// Deletes all chunks in the given chunk list. +void ChunkListDelete(WebPChunk** const chunk_list); + +// Returns size of the chunk including chunk header and padding byte (if any). +static WEBP_INLINE size_t SizeWithPadding(size_t chunk_size) { + assert(chunk_size <= MAX_CHUNK_PAYLOAD); + return CHUNK_HEADER_SIZE + ((chunk_size + 1) & ~1U); +} + +// Size of a chunk including header and padding. +static WEBP_INLINE size_t ChunkDiskSize(const WebPChunk* chunk) { + const size_t data_size = chunk->data_.size; + return SizeWithPadding(data_size); +} + +// Total size of a list of chunks. +size_t ChunkListDiskSize(const WebPChunk* chunk_list); + +// Write out the given list of chunks into 'dst'. +uint8_t* ChunkListEmit(const WebPChunk* chunk_list, uint8_t* dst); + +//------------------------------------------------------------------------------ +// MuxImage object management. + +// Initialize. +void MuxImageInit(WebPMuxImage* const wpi); + +// Releases image 'wpi' and returns wpi->next. +WebPMuxImage* MuxImageRelease(WebPMuxImage* const wpi); + +// Delete image 'wpi' and return the next image in the list or NULL. +// 'wpi' can be NULL. +WebPMuxImage* MuxImageDelete(WebPMuxImage* const wpi); + +// Count number of images matching the given tag id in the 'wpi_list'. +// If id == WEBP_CHUNK_NIL, all images will be matched. +int MuxImageCount(const WebPMuxImage* wpi_list, WebPChunkId id); + +// Update width/height/has_alpha info from chunks within wpi. +// Also remove ALPH chunk if not needed. +int MuxImageFinalize(WebPMuxImage* const wpi); + +// Check if given ID corresponds to an image related chunk. +static WEBP_INLINE int IsWPI(WebPChunkId id) { + switch (id) { + case WEBP_CHUNK_ANMF: + case WEBP_CHUNK_ALPHA: + case WEBP_CHUNK_IMAGE: return 1; + default: return 0; + } +} + +// Pushes 'wpi' at the end of 'wpi_list'. +WebPMuxError MuxImagePush(const WebPMuxImage* wpi, WebPMuxImage** wpi_list); + +// Delete nth image in the image list. +WebPMuxError MuxImageDeleteNth(WebPMuxImage** wpi_list, uint32_t nth); + +// Get nth image in the image list. +WebPMuxError MuxImageGetNth(const WebPMuxImage** wpi_list, uint32_t nth, + WebPMuxImage** wpi); + +// Total size of the given image. +size_t MuxImageDiskSize(const WebPMuxImage* const wpi); + +// Write out the given image into 'dst'. +uint8_t* MuxImageEmit(const WebPMuxImage* const wpi, uint8_t* dst); + +//------------------------------------------------------------------------------ +// Helper methods for mux. + +// Checks if the given image list contains at least one image with alpha. +int MuxHasAlpha(const WebPMuxImage* images); + +// Write out RIFF header into 'data', given total data size 'size'. +uint8_t* MuxEmitRiffHeader(uint8_t* const data, size_t size); + +// Returns the list where chunk with given ID is to be inserted in mux. +WebPChunk** MuxGetChunkListFromId(const WebPMux* mux, WebPChunkId id); + +// Validates the given mux object. +WebPMuxError MuxValidate(const WebPMux* const mux); + +//------------------------------------------------------------------------------ + +#ifdef __cplusplus +} // extern "C" +#endif + +#endif // WEBP_MUX_MUXI_H_ diff --git a/vendor/github.com/sizeofint/webpanimation/mux_muxinternal.c b/vendor/github.com/sizeofint/webpanimation/mux_muxinternal.c new file mode 100644 index 00000000..d6ef877d --- /dev/null +++ b/vendor/github.com/sizeofint/webpanimation/mux_muxinternal.c @@ -0,0 +1,548 @@ +// Copyright 2011 Google Inc. All Rights Reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the COPYING file in the root of the source +// tree. An additional intellectual property rights grant can be found +// in the file PATENTS. All contributing project authors may +// be found in the AUTHORS file in the root of the source tree. +// ----------------------------------------------------------------------------- +// +// Internal objects and utils for mux. +// +// Authors: Urvang (urvang@google.com) +// Vikas (vikasa@google.com) + +#include +#include "mux_muxi.h" +#include "utils_utils.h" + +#define UNDEFINED_CHUNK_SIZE ((uint32_t)(-1)) + +const ChunkInfo kChunks[] = { + { MKFOURCC('V', 'P', '8', 'X'), WEBP_CHUNK_VP8X, VP8X_CHUNK_SIZE }, + { MKFOURCC('I', 'C', 'C', 'P'), WEBP_CHUNK_ICCP, UNDEFINED_CHUNK_SIZE }, + { MKFOURCC('A', 'N', 'I', 'M'), WEBP_CHUNK_ANIM, ANIM_CHUNK_SIZE }, + { MKFOURCC('A', 'N', 'M', 'F'), WEBP_CHUNK_ANMF, ANMF_CHUNK_SIZE }, + { MKFOURCC('A', 'L', 'P', 'H'), WEBP_CHUNK_ALPHA, UNDEFINED_CHUNK_SIZE }, + { MKFOURCC('V', 'P', '8', ' '), WEBP_CHUNK_IMAGE, UNDEFINED_CHUNK_SIZE }, + { MKFOURCC('V', 'P', '8', 'L'), WEBP_CHUNK_IMAGE, UNDEFINED_CHUNK_SIZE }, + { MKFOURCC('E', 'X', 'I', 'F'), WEBP_CHUNK_EXIF, UNDEFINED_CHUNK_SIZE }, + { MKFOURCC('X', 'M', 'P', ' '), WEBP_CHUNK_XMP, UNDEFINED_CHUNK_SIZE }, + { NIL_TAG, WEBP_CHUNK_UNKNOWN, UNDEFINED_CHUNK_SIZE }, + + { NIL_TAG, WEBP_CHUNK_NIL, UNDEFINED_CHUNK_SIZE } +}; + +//------------------------------------------------------------------------------ + +int WebPGetMuxVersion(void) { + return (MUX_MAJ_VERSION << 16) | (MUX_MIN_VERSION << 8) | MUX_REV_VERSION; +} + +//------------------------------------------------------------------------------ +// Life of a chunk object. + +void ChunkInit(WebPChunk* const chunk) { + assert(chunk); + memset(chunk, 0, sizeof(*chunk)); + chunk->tag_ = NIL_TAG; +} + +WebPChunk* ChunkRelease(WebPChunk* const chunk) { + WebPChunk* next; + if (chunk == NULL) return NULL; + if (chunk->owner_) { + WebPDataClear(&chunk->data_); + } + next = chunk->next_; + ChunkInit(chunk); + return next; +} + +//------------------------------------------------------------------------------ +// Chunk misc methods. + +CHUNK_INDEX ChunkGetIndexFromTag(uint32_t tag) { + int i; + for (i = 0; kChunks[i].tag != NIL_TAG; ++i) { + if (tag == kChunks[i].tag) return (CHUNK_INDEX)i; + } + return IDX_UNKNOWN; +} + +WebPChunkId ChunkGetIdFromTag(uint32_t tag) { + int i; + for (i = 0; kChunks[i].tag != NIL_TAG; ++i) { + if (tag == kChunks[i].tag) return kChunks[i].id; + } + return WEBP_CHUNK_UNKNOWN; +} + +uint32_t ChunkGetTagFromFourCC(const char fourcc[4]) { + return MKFOURCC(fourcc[0], fourcc[1], fourcc[2], fourcc[3]); +} + +CHUNK_INDEX ChunkGetIndexFromFourCC(const char fourcc[4]) { + const uint32_t tag = ChunkGetTagFromFourCC(fourcc); + return ChunkGetIndexFromTag(tag); +} + +//------------------------------------------------------------------------------ +// Chunk search methods. + +// Returns next chunk in the chunk list with the given tag. +static WebPChunk* ChunkSearchNextInList(WebPChunk* chunk, uint32_t tag) { + while (chunk != NULL && chunk->tag_ != tag) { + chunk = chunk->next_; + } + return chunk; +} + +WebPChunk* ChunkSearchList(WebPChunk* first, uint32_t nth, uint32_t tag) { + uint32_t iter = nth; + first = ChunkSearchNextInList(first, tag); + if (first == NULL) return NULL; + + while (--iter != 0) { + WebPChunk* next_chunk = ChunkSearchNextInList(first->next_, tag); + if (next_chunk == NULL) break; + first = next_chunk; + } + return ((nth > 0) && (iter > 0)) ? NULL : first; +} + +//------------------------------------------------------------------------------ +// Chunk writer methods. + +WebPMuxError ChunkAssignData(WebPChunk* chunk, const WebPData* const data, + int copy_data, uint32_t tag) { + // For internally allocated chunks, always copy data & make it owner of data. + if (tag == kChunks[IDX_VP8X].tag || tag == kChunks[IDX_ANIM].tag) { + copy_data = 1; + } + + ChunkRelease(chunk); + + if (data != NULL) { + if (copy_data) { // Copy data. + if (!WebPDataCopy(data, &chunk->data_)) return WEBP_MUX_MEMORY_ERROR; + chunk->owner_ = 1; // Chunk is owner of data. + } else { // Don't copy data. + chunk->data_ = *data; + } + } + chunk->tag_ = tag; + return WEBP_MUX_OK; +} + +WebPMuxError ChunkSetHead(WebPChunk* const chunk, + WebPChunk** const chunk_list) { + WebPChunk* new_chunk; + + assert(chunk_list != NULL); + if (*chunk_list != NULL) { + return WEBP_MUX_NOT_FOUND; + } + + new_chunk = (WebPChunk*)WebPSafeMalloc(1ULL, sizeof(*new_chunk)); + if (new_chunk == NULL) return WEBP_MUX_MEMORY_ERROR; + *new_chunk = *chunk; + chunk->owner_ = 0; + new_chunk->next_ = NULL; + *chunk_list = new_chunk; + return WEBP_MUX_OK; +} + +WebPMuxError ChunkAppend(WebPChunk* const chunk, + WebPChunk*** const chunk_list) { + assert(chunk_list != NULL && *chunk_list != NULL); + + if (**chunk_list == NULL) { + ChunkSetHead(chunk, *chunk_list); + } else { + WebPChunk* last_chunk = **chunk_list; + while (last_chunk->next_ != NULL) last_chunk = last_chunk->next_; + ChunkSetHead(chunk, &last_chunk->next_); + *chunk_list = &last_chunk->next_; + } + return WEBP_MUX_OK; +} + +//------------------------------------------------------------------------------ +// Chunk deletion method(s). + +WebPChunk* ChunkDelete(WebPChunk* const chunk) { + WebPChunk* const next = ChunkRelease(chunk); + WebPSafeFree(chunk); + return next; +} + +void ChunkListDelete(WebPChunk** const chunk_list) { + while (*chunk_list != NULL) { + *chunk_list = ChunkDelete(*chunk_list); + } +} + +//------------------------------------------------------------------------------ +// Chunk serialization methods. + +static uint8_t* ChunkEmit(const WebPChunk* const chunk, uint8_t* dst) { + const size_t chunk_size = chunk->data_.size; + assert(chunk); + assert(chunk->tag_ != NIL_TAG); + PutLE32(dst + 0, chunk->tag_); + PutLE32(dst + TAG_SIZE, (uint32_t)chunk_size); + assert(chunk_size == (uint32_t)chunk_size); + memcpy(dst + CHUNK_HEADER_SIZE, chunk->data_.bytes, chunk_size); + if (chunk_size & 1) + dst[CHUNK_HEADER_SIZE + chunk_size] = 0; // Add padding. + return dst + ChunkDiskSize(chunk); +} + +uint8_t* ChunkListEmit(const WebPChunk* chunk_list, uint8_t* dst) { + while (chunk_list != NULL) { + dst = ChunkEmit(chunk_list, dst); + chunk_list = chunk_list->next_; + } + return dst; +} + +size_t ChunkListDiskSize(const WebPChunk* chunk_list) { + size_t size = 0; + while (chunk_list != NULL) { + size += ChunkDiskSize(chunk_list); + chunk_list = chunk_list->next_; + } + return size; +} + +//------------------------------------------------------------------------------ +// Life of a MuxImage object. + +void MuxImageInit(WebPMuxImage* const wpi) { + assert(wpi); + memset(wpi, 0, sizeof(*wpi)); +} + +WebPMuxImage* MuxImageRelease(WebPMuxImage* const wpi) { + WebPMuxImage* next; + if (wpi == NULL) return NULL; + // There should be at most one chunk of header_, alpha_, img_ but we call + // ChunkListDelete to be safe + ChunkListDelete(&wpi->header_); + ChunkListDelete(&wpi->alpha_); + ChunkListDelete(&wpi->img_); + ChunkListDelete(&wpi->unknown_); + + next = wpi->next_; + MuxImageInit(wpi); + return next; +} + +//------------------------------------------------------------------------------ +// MuxImage search methods. + +// Get a reference to appropriate chunk list within an image given chunk tag. +static WebPChunk** GetChunkListFromId(const WebPMuxImage* const wpi, + WebPChunkId id) { + assert(wpi != NULL); + switch (id) { + case WEBP_CHUNK_ANMF: return (WebPChunk**)&wpi->header_; + case WEBP_CHUNK_ALPHA: return (WebPChunk**)&wpi->alpha_; + case WEBP_CHUNK_IMAGE: return (WebPChunk**)&wpi->img_; + default: return NULL; + } +} + +int MuxImageCount(const WebPMuxImage* wpi_list, WebPChunkId id) { + int count = 0; + const WebPMuxImage* current; + for (current = wpi_list; current != NULL; current = current->next_) { + if (id == WEBP_CHUNK_NIL) { + ++count; // Special case: count all images. + } else { + const WebPChunk* const wpi_chunk = *GetChunkListFromId(current, id); + if (wpi_chunk != NULL) { + const WebPChunkId wpi_chunk_id = ChunkGetIdFromTag(wpi_chunk->tag_); + if (wpi_chunk_id == id) ++count; // Count images with a matching 'id'. + } + } + } + return count; +} + +// Outputs a pointer to 'prev_wpi->next_', +// where 'prev_wpi' is the pointer to the image at position (nth - 1). +// Returns true if nth image was found. +static int SearchImageToGetOrDelete(WebPMuxImage** wpi_list, uint32_t nth, + WebPMuxImage*** const location) { + uint32_t count = 0; + assert(wpi_list); + *location = wpi_list; + + if (nth == 0) { + nth = MuxImageCount(*wpi_list, WEBP_CHUNK_NIL); + if (nth == 0) return 0; // Not found. + } + + while (*wpi_list != NULL) { + WebPMuxImage* const cur_wpi = *wpi_list; + ++count; + if (count == nth) return 1; // Found. + wpi_list = &cur_wpi->next_; + *location = wpi_list; + } + return 0; // Not found. +} + +//------------------------------------------------------------------------------ +// MuxImage writer methods. + +WebPMuxError MuxImagePush(const WebPMuxImage* wpi, WebPMuxImage** wpi_list) { + WebPMuxImage* new_wpi; + + while (*wpi_list != NULL) { + WebPMuxImage* const cur_wpi = *wpi_list; + if (cur_wpi->next_ == NULL) break; + wpi_list = &cur_wpi->next_; + } + + new_wpi = (WebPMuxImage*)WebPSafeMalloc(1ULL, sizeof(*new_wpi)); + if (new_wpi == NULL) return WEBP_MUX_MEMORY_ERROR; + *new_wpi = *wpi; + new_wpi->next_ = NULL; + + if (*wpi_list != NULL) { + (*wpi_list)->next_ = new_wpi; + } else { + *wpi_list = new_wpi; + } + return WEBP_MUX_OK; +} + +//------------------------------------------------------------------------------ +// MuxImage deletion methods. + +WebPMuxImage* MuxImageDelete(WebPMuxImage* const wpi) { + // Delete the components of wpi. If wpi is NULL this is a noop. + WebPMuxImage* const next = MuxImageRelease(wpi); + WebPSafeFree(wpi); + return next; +} + +WebPMuxError MuxImageDeleteNth(WebPMuxImage** wpi_list, uint32_t nth) { + assert(wpi_list); + if (!SearchImageToGetOrDelete(wpi_list, nth, &wpi_list)) { + return WEBP_MUX_NOT_FOUND; + } + *wpi_list = MuxImageDelete(*wpi_list); + return WEBP_MUX_OK; +} + +//------------------------------------------------------------------------------ +// MuxImage reader methods. + +WebPMuxError MuxImageGetNth(const WebPMuxImage** wpi_list, uint32_t nth, + WebPMuxImage** wpi) { + assert(wpi_list); + assert(wpi); + if (!SearchImageToGetOrDelete((WebPMuxImage**)wpi_list, nth, + (WebPMuxImage***)&wpi_list)) { + return WEBP_MUX_NOT_FOUND; + } + *wpi = (WebPMuxImage*)*wpi_list; + return WEBP_MUX_OK; +} + +//------------------------------------------------------------------------------ +// MuxImage serialization methods. + +// Size of an image. +size_t MuxImageDiskSize(const WebPMuxImage* const wpi) { + size_t size = 0; + if (wpi->header_ != NULL) size += ChunkDiskSize(wpi->header_); + if (wpi->alpha_ != NULL) size += ChunkDiskSize(wpi->alpha_); + if (wpi->img_ != NULL) size += ChunkDiskSize(wpi->img_); + if (wpi->unknown_ != NULL) size += ChunkListDiskSize(wpi->unknown_); + return size; +} + +// Special case as ANMF chunk encapsulates other image chunks. +static uint8_t* ChunkEmitSpecial(const WebPChunk* const header, + size_t total_size, uint8_t* dst) { + const size_t header_size = header->data_.size; + const size_t offset_to_next = total_size - CHUNK_HEADER_SIZE; + assert(header->tag_ == kChunks[IDX_ANMF].tag); + PutLE32(dst + 0, header->tag_); + PutLE32(dst + TAG_SIZE, (uint32_t)offset_to_next); + assert(header_size == (uint32_t)header_size); + memcpy(dst + CHUNK_HEADER_SIZE, header->data_.bytes, header_size); + if (header_size & 1) { + dst[CHUNK_HEADER_SIZE + header_size] = 0; // Add padding. + } + return dst + ChunkDiskSize(header); +} + +uint8_t* MuxImageEmit(const WebPMuxImage* const wpi, uint8_t* dst) { + // Ordering of chunks to be emitted is strictly as follows: + // 1. ANMF chunk (if present). + // 2. ALPH chunk (if present). + // 3. VP8/VP8L chunk. + assert(wpi); + if (wpi->header_ != NULL) { + dst = ChunkEmitSpecial(wpi->header_, MuxImageDiskSize(wpi), dst); + } + if (wpi->alpha_ != NULL) dst = ChunkEmit(wpi->alpha_, dst); + if (wpi->img_ != NULL) dst = ChunkEmit(wpi->img_, dst); + if (wpi->unknown_ != NULL) dst = ChunkListEmit(wpi->unknown_, dst); + return dst; +} + +//------------------------------------------------------------------------------ +// Helper methods for mux. + +int MuxHasAlpha(const WebPMuxImage* images) { + while (images != NULL) { + if (images->has_alpha_) return 1; + images = images->next_; + } + return 0; +} + +uint8_t* MuxEmitRiffHeader(uint8_t* const data, size_t size) { + PutLE32(data + 0, MKFOURCC('R', 'I', 'F', 'F')); + PutLE32(data + TAG_SIZE, (uint32_t)size - CHUNK_HEADER_SIZE); + assert(size == (uint32_t)size); + PutLE32(data + TAG_SIZE + CHUNK_SIZE_BYTES, MKFOURCC('W', 'E', 'B', 'P')); + return data + RIFF_HEADER_SIZE; +} + +WebPChunk** MuxGetChunkListFromId(const WebPMux* mux, WebPChunkId id) { + assert(mux != NULL); + switch (id) { + case WEBP_CHUNK_VP8X: return (WebPChunk**)&mux->vp8x_; + case WEBP_CHUNK_ICCP: return (WebPChunk**)&mux->iccp_; + case WEBP_CHUNK_ANIM: return (WebPChunk**)&mux->anim_; + case WEBP_CHUNK_EXIF: return (WebPChunk**)&mux->exif_; + case WEBP_CHUNK_XMP: return (WebPChunk**)&mux->xmp_; + default: return (WebPChunk**)&mux->unknown_; + } +} + +static int IsNotCompatible(int feature, int num_items) { + return (feature != 0) != (num_items > 0); +} + +#define NO_FLAG ((WebPFeatureFlags)0) + +// Test basic constraints: +// retrieval, maximum number of chunks by index (use -1 to skip) +// and feature incompatibility (use NO_FLAG to skip). +// On success returns WEBP_MUX_OK and stores the chunk count in *num. +static WebPMuxError ValidateChunk(const WebPMux* const mux, CHUNK_INDEX idx, + WebPFeatureFlags feature, + uint32_t vp8x_flags, + int max, int* num) { + const WebPMuxError err = + WebPMuxNumChunks(mux, kChunks[idx].id, num); + if (err != WEBP_MUX_OK) return err; + if (max > -1 && *num > max) return WEBP_MUX_INVALID_ARGUMENT; + if (feature != NO_FLAG && IsNotCompatible(vp8x_flags & feature, *num)) { + return WEBP_MUX_INVALID_ARGUMENT; + } + return WEBP_MUX_OK; +} + +WebPMuxError MuxValidate(const WebPMux* const mux) { + int num_iccp; + int num_exif; + int num_xmp; + int num_anim; + int num_frames; + int num_vp8x; + int num_images; + int num_alpha; + uint32_t flags; + WebPMuxError err; + + // Verify mux is not NULL. + if (mux == NULL) return WEBP_MUX_INVALID_ARGUMENT; + + // Verify mux has at least one image. + if (mux->images_ == NULL) return WEBP_MUX_INVALID_ARGUMENT; + + err = WebPMuxGetFeatures(mux, &flags); + if (err != WEBP_MUX_OK) return err; + + // At most one color profile chunk. + err = ValidateChunk(mux, IDX_ICCP, ICCP_FLAG, flags, 1, &num_iccp); + if (err != WEBP_MUX_OK) return err; + + // At most one EXIF metadata. + err = ValidateChunk(mux, IDX_EXIF, EXIF_FLAG, flags, 1, &num_exif); + if (err != WEBP_MUX_OK) return err; + + // At most one XMP metadata. + err = ValidateChunk(mux, IDX_XMP, XMP_FLAG, flags, 1, &num_xmp); + if (err != WEBP_MUX_OK) return err; + + // Animation: ANIMATION_FLAG, ANIM chunk and ANMF chunk(s) are consistent. + // At most one ANIM chunk. + err = ValidateChunk(mux, IDX_ANIM, NO_FLAG, flags, 1, &num_anim); + if (err != WEBP_MUX_OK) return err; + err = ValidateChunk(mux, IDX_ANMF, NO_FLAG, flags, -1, &num_frames); + if (err != WEBP_MUX_OK) return err; + + { + const int has_animation = !!(flags & ANIMATION_FLAG); + if (has_animation && (num_anim == 0 || num_frames == 0)) { + return WEBP_MUX_INVALID_ARGUMENT; + } + if (!has_animation && (num_anim == 1 || num_frames > 0)) { + return WEBP_MUX_INVALID_ARGUMENT; + } + if (!has_animation) { + const WebPMuxImage* images = mux->images_; + // There can be only one image. + if (images == NULL || images->next_ != NULL) { + return WEBP_MUX_INVALID_ARGUMENT; + } + // Size must match. + if (mux->canvas_width_ > 0) { + if (images->width_ != mux->canvas_width_ || + images->height_ != mux->canvas_height_) { + return WEBP_MUX_INVALID_ARGUMENT; + } + } + } + } + + // Verify either VP8X chunk is present OR there is only one elem in + // mux->images_. + err = ValidateChunk(mux, IDX_VP8X, NO_FLAG, flags, 1, &num_vp8x); + if (err != WEBP_MUX_OK) return err; + err = ValidateChunk(mux, IDX_VP8, NO_FLAG, flags, -1, &num_images); + if (err != WEBP_MUX_OK) return err; + if (num_vp8x == 0 && num_images != 1) return WEBP_MUX_INVALID_ARGUMENT; + + // ALPHA_FLAG & alpha chunk(s) are consistent. + // Note: ALPHA_FLAG can be set when there is actually no Alpha data present. + if (MuxHasAlpha(mux->images_)) { + if (num_vp8x > 0) { + // VP8X chunk is present, so it should contain ALPHA_FLAG. + if (!(flags & ALPHA_FLAG)) return WEBP_MUX_INVALID_ARGUMENT; + } else { + // VP8X chunk is not present, so ALPH chunks should NOT be present either. + err = WebPMuxNumChunks(mux, WEBP_CHUNK_ALPHA, &num_alpha); + if (err != WEBP_MUX_OK) return err; + if (num_alpha > 0) return WEBP_MUX_INVALID_ARGUMENT; + } + } + + return WEBP_MUX_OK; +} + +#undef NO_FLAG + +//------------------------------------------------------------------------------ + diff --git a/vendor/github.com/sizeofint/webpanimation/mux_muxread.c b/vendor/github.com/sizeofint/webpanimation/mux_muxread.c new file mode 100644 index 00000000..a0d016f3 --- /dev/null +++ b/vendor/github.com/sizeofint/webpanimation/mux_muxread.c @@ -0,0 +1,556 @@ +// Copyright 2011 Google Inc. All Rights Reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the COPYING file in the root of the source +// tree. An additional intellectual property rights grant can be found +// in the file PATENTS. All contributing project authors may +// be found in the AUTHORS file in the root of the source tree. +// ----------------------------------------------------------------------------- +// +// Read APIs for mux. +// +// Authors: Urvang (urvang@google.com) +// Vikas (vikasa@google.com) + +#include +#include "mux_muxi.h" +#include "utils_utils.h" + +//------------------------------------------------------------------------------ +// Helper method(s). + +// Handy MACRO. +#define SWITCH_ID_LIST(INDEX, LIST) \ + if (idx == (INDEX)) { \ + const WebPChunk* const chunk = ChunkSearchList((LIST), nth, \ + kChunks[(INDEX)].tag); \ + if (chunk) { \ + *data = chunk->data_; \ + return WEBP_MUX_OK; \ + } else { \ + return WEBP_MUX_NOT_FOUND; \ + } \ + } + +static WebPMuxError MuxGet(const WebPMux* const mux, CHUNK_INDEX idx, + uint32_t nth, WebPData* const data) { + assert(mux != NULL); + assert(!IsWPI(kChunks[idx].id)); + WebPDataInit(data); + + SWITCH_ID_LIST(IDX_VP8X, mux->vp8x_); + SWITCH_ID_LIST(IDX_ICCP, mux->iccp_); + SWITCH_ID_LIST(IDX_ANIM, mux->anim_); + SWITCH_ID_LIST(IDX_EXIF, mux->exif_); + SWITCH_ID_LIST(IDX_XMP, mux->xmp_); + assert(idx != IDX_UNKNOWN); + return WEBP_MUX_NOT_FOUND; +} +#undef SWITCH_ID_LIST + +// Fill the chunk with the given data (includes chunk header bytes), after some +// verifications. +static WebPMuxError ChunkVerifyAndAssign(WebPChunk* chunk, + const uint8_t* data, size_t data_size, + size_t riff_size, int copy_data) { + uint32_t chunk_size; + WebPData chunk_data; + + // Sanity checks. + if (data_size < CHUNK_HEADER_SIZE) return WEBP_MUX_NOT_ENOUGH_DATA; + chunk_size = GetLE32(data + TAG_SIZE); + if (chunk_size > MAX_CHUNK_PAYLOAD) return WEBP_MUX_BAD_DATA; + + { + const size_t chunk_disk_size = SizeWithPadding(chunk_size); + if (chunk_disk_size > riff_size) return WEBP_MUX_BAD_DATA; + if (chunk_disk_size > data_size) return WEBP_MUX_NOT_ENOUGH_DATA; + } + + // Data assignment. + chunk_data.bytes = data + CHUNK_HEADER_SIZE; + chunk_data.size = chunk_size; + return ChunkAssignData(chunk, &chunk_data, copy_data, GetLE32(data + 0)); +} + +int MuxImageFinalize(WebPMuxImage* const wpi) { + const WebPChunk* const img = wpi->img_; + const WebPData* const image = &img->data_; + const int is_lossless = (img->tag_ == kChunks[IDX_VP8L].tag); + int w, h; + int vp8l_has_alpha = 0; + const int ok = is_lossless ? + VP8LGetInfo(image->bytes, image->size, &w, &h, &vp8l_has_alpha) : + VP8GetInfo(image->bytes, image->size, image->size, &w, &h); + assert(img != NULL); + if (ok) { + // Ignore ALPH chunk accompanying VP8L. + if (is_lossless && (wpi->alpha_ != NULL)) { + ChunkDelete(wpi->alpha_); + wpi->alpha_ = NULL; + } + wpi->width_ = w; + wpi->height_ = h; + wpi->has_alpha_ = vp8l_has_alpha || (wpi->alpha_ != NULL); + } + return ok; +} + +static int MuxImageParse(const WebPChunk* const chunk, int copy_data, + WebPMuxImage* const wpi) { + const uint8_t* bytes = chunk->data_.bytes; + size_t size = chunk->data_.size; + const uint8_t* const last = (bytes == NULL) ? NULL : bytes + size; + WebPChunk subchunk; + size_t subchunk_size; + WebPChunk** unknown_chunk_list = &wpi->unknown_; + ChunkInit(&subchunk); + + assert(chunk->tag_ == kChunks[IDX_ANMF].tag); + assert(!wpi->is_partial_); + + // ANMF. + { + const size_t hdr_size = ANMF_CHUNK_SIZE; + const WebPData temp = { bytes, hdr_size }; + // Each of ANMF chunk contain a header at the beginning. So, its size should + // be at least 'hdr_size'. + if (size < hdr_size) goto Fail; + ChunkAssignData(&subchunk, &temp, copy_data, chunk->tag_); + } + ChunkSetHead(&subchunk, &wpi->header_); + wpi->is_partial_ = 1; // Waiting for ALPH and/or VP8/VP8L chunks. + + // Rest of the chunks. + subchunk_size = ChunkDiskSize(&subchunk) - CHUNK_HEADER_SIZE; + bytes += subchunk_size; + size -= subchunk_size; + + while (bytes != last) { + ChunkInit(&subchunk); + if (ChunkVerifyAndAssign(&subchunk, bytes, size, size, + copy_data) != WEBP_MUX_OK) { + goto Fail; + } + switch (ChunkGetIdFromTag(subchunk.tag_)) { + case WEBP_CHUNK_ALPHA: + if (wpi->alpha_ != NULL) goto Fail; // Consecutive ALPH chunks. + if (ChunkSetHead(&subchunk, &wpi->alpha_) != WEBP_MUX_OK) goto Fail; + wpi->is_partial_ = 1; // Waiting for a VP8 chunk. + break; + case WEBP_CHUNK_IMAGE: + if (wpi->img_ != NULL) goto Fail; // Only 1 image chunk allowed. + if (ChunkSetHead(&subchunk, &wpi->img_) != WEBP_MUX_OK) goto Fail; + if (!MuxImageFinalize(wpi)) goto Fail; + wpi->is_partial_ = 0; // wpi is completely filled. + break; + case WEBP_CHUNK_UNKNOWN: + if (wpi->is_partial_) { + goto Fail; // Encountered an unknown chunk + // before some image chunks. + } + if (ChunkAppend(&subchunk, &unknown_chunk_list) != WEBP_MUX_OK) { + goto Fail; + } + break; + default: + goto Fail; + } + subchunk_size = ChunkDiskSize(&subchunk); + bytes += subchunk_size; + size -= subchunk_size; + } + if (wpi->is_partial_) goto Fail; + return 1; + + Fail: + ChunkRelease(&subchunk); + return 0; +} + +//------------------------------------------------------------------------------ +// Create a mux object from WebP-RIFF data. + +WebPMux* WebPMuxCreateInternal(const WebPData* bitstream, int copy_data, + int version) { + size_t riff_size; + uint32_t tag; + const uint8_t* end; + WebPMux* mux = NULL; + WebPMuxImage* wpi = NULL; + const uint8_t* data; + size_t size; + WebPChunk chunk; + // Stores the end of the chunk lists so that it is faster to append data to + // their ends. + WebPChunk** chunk_list_ends[WEBP_CHUNK_NIL + 1] = { NULL }; + ChunkInit(&chunk); + + // Sanity checks. + if (WEBP_ABI_IS_INCOMPATIBLE(version, WEBP_MUX_ABI_VERSION)) { + return NULL; // version mismatch + } + if (bitstream == NULL) return NULL; + + data = bitstream->bytes; + size = bitstream->size; + + if (data == NULL) return NULL; + if (size < RIFF_HEADER_SIZE + CHUNK_HEADER_SIZE) return NULL; + if (GetLE32(data + 0) != MKFOURCC('R', 'I', 'F', 'F') || + GetLE32(data + CHUNK_HEADER_SIZE) != MKFOURCC('W', 'E', 'B', 'P')) { + return NULL; + } + + mux = WebPMuxNew(); + if (mux == NULL) return NULL; + + tag = GetLE32(data + RIFF_HEADER_SIZE); + if (tag != kChunks[IDX_VP8].tag && + tag != kChunks[IDX_VP8L].tag && + tag != kChunks[IDX_VP8X].tag) { + goto Err; // First chunk should be VP8, VP8L or VP8X. + } + + riff_size = GetLE32(data + TAG_SIZE); + if (riff_size > MAX_CHUNK_PAYLOAD) goto Err; + + // Note this padding is historical and differs from demux.c which does not + // pad the file size. + riff_size = SizeWithPadding(riff_size); + if (riff_size < CHUNK_HEADER_SIZE) goto Err; + if (riff_size > size) goto Err; + // There's no point in reading past the end of the RIFF chunk. + if (size > riff_size + CHUNK_HEADER_SIZE) { + size = riff_size + CHUNK_HEADER_SIZE; + } + + end = data + size; + data += RIFF_HEADER_SIZE; + size -= RIFF_HEADER_SIZE; + + wpi = (WebPMuxImage*)WebPSafeMalloc(1ULL, sizeof(*wpi)); + if (wpi == NULL) goto Err; + MuxImageInit(wpi); + + // Loop over chunks. + while (data != end) { + size_t data_size; + WebPChunkId id; + if (ChunkVerifyAndAssign(&chunk, data, size, riff_size, + copy_data) != WEBP_MUX_OK) { + goto Err; + } + data_size = ChunkDiskSize(&chunk); + id = ChunkGetIdFromTag(chunk.tag_); + switch (id) { + case WEBP_CHUNK_ALPHA: + if (wpi->alpha_ != NULL) goto Err; // Consecutive ALPH chunks. + if (ChunkSetHead(&chunk, &wpi->alpha_) != WEBP_MUX_OK) goto Err; + wpi->is_partial_ = 1; // Waiting for a VP8 chunk. + break; + case WEBP_CHUNK_IMAGE: + if (ChunkSetHead(&chunk, &wpi->img_) != WEBP_MUX_OK) goto Err; + if (!MuxImageFinalize(wpi)) goto Err; + wpi->is_partial_ = 0; // wpi is completely filled. + PushImage: + // Add this to mux->images_ list. + if (MuxImagePush(wpi, &mux->images_) != WEBP_MUX_OK) goto Err; + MuxImageInit(wpi); // Reset for reading next image. + break; + case WEBP_CHUNK_ANMF: + if (wpi->is_partial_) goto Err; // Previous wpi is still incomplete. + if (!MuxImageParse(&chunk, copy_data, wpi)) goto Err; + ChunkRelease(&chunk); + goto PushImage; + default: // A non-image chunk. + if (wpi->is_partial_) goto Err; // Encountered a non-image chunk before + // getting all chunks of an image. + if (chunk_list_ends[id] == NULL) { + chunk_list_ends[id] = + MuxGetChunkListFromId(mux, id); // List to add this chunk. + } + if (ChunkAppend(&chunk, &chunk_list_ends[id]) != WEBP_MUX_OK) goto Err; + if (id == WEBP_CHUNK_VP8X) { // grab global specs + if (data_size < CHUNK_HEADER_SIZE + VP8X_CHUNK_SIZE) goto Err; + mux->canvas_width_ = GetLE24(data + 12) + 1; + mux->canvas_height_ = GetLE24(data + 15) + 1; + } + break; + } + data += data_size; + size -= data_size; + ChunkInit(&chunk); + } + + // Incomplete image. + if (wpi->is_partial_) goto Err; + + // Validate mux if complete. + if (MuxValidate(mux) != WEBP_MUX_OK) goto Err; + + MuxImageDelete(wpi); + return mux; // All OK; + + Err: // Something bad happened. + ChunkRelease(&chunk); + MuxImageDelete(wpi); + WebPMuxDelete(mux); + return NULL; +} + +//------------------------------------------------------------------------------ +// Get API(s). + +// Validates that the given mux has a single image. +static WebPMuxError ValidateForSingleImage(const WebPMux* const mux) { + const int num_images = MuxImageCount(mux->images_, WEBP_CHUNK_IMAGE); + const int num_frames = MuxImageCount(mux->images_, WEBP_CHUNK_ANMF); + + if (num_images == 0) { + // No images in mux. + return WEBP_MUX_NOT_FOUND; + } else if (num_images == 1 && num_frames == 0) { + // Valid case (single image). + return WEBP_MUX_OK; + } else { + // Frame case OR an invalid mux. + return WEBP_MUX_INVALID_ARGUMENT; + } +} + +// Get the canvas width, height and flags after validating that VP8X/VP8/VP8L +// chunk and canvas size are valid. +static WebPMuxError MuxGetCanvasInfo(const WebPMux* const mux, + int* width, int* height, uint32_t* flags) { + int w, h; + uint32_t f = 0; + WebPData data; + assert(mux != NULL); + + // Check if VP8X chunk is present. + if (MuxGet(mux, IDX_VP8X, 1, &data) == WEBP_MUX_OK) { + if (data.size < VP8X_CHUNK_SIZE) return WEBP_MUX_BAD_DATA; + f = GetLE32(data.bytes + 0); + w = GetLE24(data.bytes + 4) + 1; + h = GetLE24(data.bytes + 7) + 1; + } else { + const WebPMuxImage* const wpi = mux->images_; + // Grab user-forced canvas size as default. + w = mux->canvas_width_; + h = mux->canvas_height_; + if (w == 0 && h == 0 && ValidateForSingleImage(mux) == WEBP_MUX_OK) { + // single image and not forced canvas size => use dimension of first frame + assert(wpi != NULL); + w = wpi->width_; + h = wpi->height_; + } + if (wpi != NULL) { + if (wpi->has_alpha_) f |= ALPHA_FLAG; + } + } + if (w * (uint64_t)h >= MAX_IMAGE_AREA) return WEBP_MUX_BAD_DATA; + + if (width != NULL) *width = w; + if (height != NULL) *height = h; + if (flags != NULL) *flags = f; + return WEBP_MUX_OK; +} + +WebPMuxError WebPMuxGetCanvasSize(const WebPMux* mux, int* width, int* height) { + if (mux == NULL || width == NULL || height == NULL) { + return WEBP_MUX_INVALID_ARGUMENT; + } + return MuxGetCanvasInfo(mux, width, height, NULL); +} + +WebPMuxError WebPMuxGetFeatures(const WebPMux* mux, uint32_t* flags) { + if (mux == NULL || flags == NULL) return WEBP_MUX_INVALID_ARGUMENT; + return MuxGetCanvasInfo(mux, NULL, NULL, flags); +} + +static uint8_t* EmitVP8XChunk(uint8_t* const dst, int width, + int height, uint32_t flags) { + const size_t vp8x_size = CHUNK_HEADER_SIZE + VP8X_CHUNK_SIZE; + assert(width >= 1 && height >= 1); + assert(width <= MAX_CANVAS_SIZE && height <= MAX_CANVAS_SIZE); + assert(width * (uint64_t)height < MAX_IMAGE_AREA); + PutLE32(dst, MKFOURCC('V', 'P', '8', 'X')); + PutLE32(dst + TAG_SIZE, VP8X_CHUNK_SIZE); + PutLE32(dst + CHUNK_HEADER_SIZE, flags); + PutLE24(dst + CHUNK_HEADER_SIZE + 4, width - 1); + PutLE24(dst + CHUNK_HEADER_SIZE + 7, height - 1); + return dst + vp8x_size; +} + +// Assemble a single image WebP bitstream from 'wpi'. +static WebPMuxError SynthesizeBitstream(const WebPMuxImage* const wpi, + WebPData* const bitstream) { + uint8_t* dst; + + // Allocate data. + const int need_vp8x = (wpi->alpha_ != NULL); + const size_t vp8x_size = need_vp8x ? CHUNK_HEADER_SIZE + VP8X_CHUNK_SIZE : 0; + const size_t alpha_size = need_vp8x ? ChunkDiskSize(wpi->alpha_) : 0; + // Note: No need to output ANMF chunk for a single image. + const size_t size = RIFF_HEADER_SIZE + vp8x_size + alpha_size + + ChunkDiskSize(wpi->img_); + uint8_t* const data = (uint8_t*)WebPSafeMalloc(1ULL, size); + if (data == NULL) return WEBP_MUX_MEMORY_ERROR; + + // There should be at most one alpha_ chunk and exactly one img_ chunk. + assert(wpi->alpha_ == NULL || wpi->alpha_->next_ == NULL); + assert(wpi->img_ != NULL && wpi->img_->next_ == NULL); + + // Main RIFF header. + dst = MuxEmitRiffHeader(data, size); + + if (need_vp8x) { + dst = EmitVP8XChunk(dst, wpi->width_, wpi->height_, ALPHA_FLAG); // VP8X. + dst = ChunkListEmit(wpi->alpha_, dst); // ALPH. + } + + // Bitstream. + dst = ChunkListEmit(wpi->img_, dst); + assert(dst == data + size); + + // Output. + bitstream->bytes = data; + bitstream->size = size; + return WEBP_MUX_OK; +} + +WebPMuxError WebPMuxGetChunk(const WebPMux* mux, const char fourcc[4], + WebPData* chunk_data) { + CHUNK_INDEX idx; + if (mux == NULL || fourcc == NULL || chunk_data == NULL) { + return WEBP_MUX_INVALID_ARGUMENT; + } + idx = ChunkGetIndexFromFourCC(fourcc); + if (IsWPI(kChunks[idx].id)) { // An image chunk. + return WEBP_MUX_INVALID_ARGUMENT; + } else if (idx != IDX_UNKNOWN) { // A known chunk type. + return MuxGet(mux, idx, 1, chunk_data); + } else { // An unknown chunk type. + const WebPChunk* const chunk = + ChunkSearchList(mux->unknown_, 1, ChunkGetTagFromFourCC(fourcc)); + if (chunk == NULL) return WEBP_MUX_NOT_FOUND; + *chunk_data = chunk->data_; + return WEBP_MUX_OK; + } +} + +static WebPMuxError MuxGetImageInternal(const WebPMuxImage* const wpi, + WebPMuxFrameInfo* const info) { + // Set some defaults for unrelated fields. + info->x_offset = 0; + info->y_offset = 0; + info->duration = 1; + info->dispose_method = WEBP_MUX_DISPOSE_NONE; + info->blend_method = WEBP_MUX_BLEND; + // Extract data for related fields. + info->id = ChunkGetIdFromTag(wpi->img_->tag_); + return SynthesizeBitstream(wpi, &info->bitstream); +} + +static WebPMuxError MuxGetFrameInternal(const WebPMuxImage* const wpi, + WebPMuxFrameInfo* const frame) { + const int is_frame = (wpi->header_->tag_ == kChunks[IDX_ANMF].tag); + const WebPData* frame_data; + if (!is_frame) return WEBP_MUX_INVALID_ARGUMENT; + assert(wpi->header_ != NULL); // Already checked by WebPMuxGetFrame(). + // Get frame chunk. + frame_data = &wpi->header_->data_; + if (frame_data->size < kChunks[IDX_ANMF].size) return WEBP_MUX_BAD_DATA; + // Extract info. + frame->x_offset = 2 * GetLE24(frame_data->bytes + 0); + frame->y_offset = 2 * GetLE24(frame_data->bytes + 3); + { + const uint8_t bits = frame_data->bytes[15]; + frame->duration = GetLE24(frame_data->bytes + 12); + frame->dispose_method = + (bits & 1) ? WEBP_MUX_DISPOSE_BACKGROUND : WEBP_MUX_DISPOSE_NONE; + frame->blend_method = (bits & 2) ? WEBP_MUX_NO_BLEND : WEBP_MUX_BLEND; + } + frame->id = ChunkGetIdFromTag(wpi->header_->tag_); + return SynthesizeBitstream(wpi, &frame->bitstream); +} + +WebPMuxError WebPMuxGetFrame( + const WebPMux* mux, uint32_t nth, WebPMuxFrameInfo* frame) { + WebPMuxError err; + WebPMuxImage* wpi; + + // Sanity checks. + if (mux == NULL || frame == NULL) { + return WEBP_MUX_INVALID_ARGUMENT; + } + + // Get the nth WebPMuxImage. + err = MuxImageGetNth((const WebPMuxImage**)&mux->images_, nth, &wpi); + if (err != WEBP_MUX_OK) return err; + + // Get frame info. + if (wpi->header_ == NULL) { + return MuxGetImageInternal(wpi, frame); + } else { + return MuxGetFrameInternal(wpi, frame); + } +} + +WebPMuxError WebPMuxGetAnimationParams(const WebPMux* mux, + WebPMuxAnimParams* params) { + WebPData anim; + WebPMuxError err; + + if (mux == NULL || params == NULL) return WEBP_MUX_INVALID_ARGUMENT; + + err = MuxGet(mux, IDX_ANIM, 1, &anim); + if (err != WEBP_MUX_OK) return err; + if (anim.size < kChunks[WEBP_CHUNK_ANIM].size) return WEBP_MUX_BAD_DATA; + params->bgcolor = GetLE32(anim.bytes); + params->loop_count = GetLE16(anim.bytes + 4); + + return WEBP_MUX_OK; +} + +// Get chunk index from chunk id. Returns IDX_NIL if not found. +static CHUNK_INDEX ChunkGetIndexFromId(WebPChunkId id) { + int i; + for (i = 0; kChunks[i].id != WEBP_CHUNK_NIL; ++i) { + if (id == kChunks[i].id) return (CHUNK_INDEX)i; + } + return IDX_NIL; +} + +// Count number of chunks matching 'tag' in the 'chunk_list'. +// If tag == NIL_TAG, any tag will be matched. +static int CountChunks(const WebPChunk* const chunk_list, uint32_t tag) { + int count = 0; + const WebPChunk* current; + for (current = chunk_list; current != NULL; current = current->next_) { + if (tag == NIL_TAG || current->tag_ == tag) { + count++; // Count chunks whose tags match. + } + } + return count; +} + +WebPMuxError WebPMuxNumChunks(const WebPMux* mux, + WebPChunkId id, int* num_elements) { + if (mux == NULL || num_elements == NULL) { + return WEBP_MUX_INVALID_ARGUMENT; + } + + if (IsWPI(id)) { + *num_elements = MuxImageCount(mux->images_, id); + } else { + WebPChunk* const* chunk_list = MuxGetChunkListFromId(mux, id); + const CHUNK_INDEX idx = ChunkGetIndexFromId(id); + *num_elements = CountChunks(*chunk_list, kChunks[idx].tag); + } + + return WEBP_MUX_OK; +} + +//------------------------------------------------------------------------------ diff --git a/vendor/github.com/sizeofint/webpanimation/readme.md b/vendor/github.com/sizeofint/webpanimation/readme.md new file mode 100644 index 00000000..f03491fa --- /dev/null +++ b/vendor/github.com/sizeofint/webpanimation/readme.md @@ -0,0 +1,11 @@ +# webpanimation +Packge is binding to libwebp providing methods to create webp animations from golang `image.Image` interface +## Installing +`go get github.com/sizeofint/webpanimation` + + +## Examples +Check out [examples](examples) folder + +## Dependencies +The only dependency is libwebp, it source code are embeded in package so no additional installations are needed on machine. It can be updated to latest libwebp via ```./generate_from_libwebp.py /path/to/clean/libwebp/src/``` diff --git a/vendor/github.com/sizeofint/webpanimation/utils_bit_reader_inl_utils.h b/vendor/github.com/sizeofint/webpanimation/utils_bit_reader_inl_utils.h new file mode 100644 index 00000000..2851545b --- /dev/null +++ b/vendor/github.com/sizeofint/webpanimation/utils_bit_reader_inl_utils.h @@ -0,0 +1,195 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the COPYING file in the root of the source +// tree. An additional intellectual property rights grant can be found +// in the file PATENTS. All contributing project authors may +// be found in the AUTHORS file in the root of the source tree. +// ----------------------------------------------------------------------------- +// +// Specific inlined methods for boolean decoder [VP8GetBit() ...] +// This file should be included by the .c sources that actually need to call +// these methods. +// +// Author: Skal (pascal.massimino@gmail.com) + +#ifndef WEBP_UTILS_BIT_READER_INL_UTILS_H_ +#define WEBP_UTILS_BIT_READER_INL_UTILS_H_ + +#ifdef HAVE_CONFIG_H +#include "config.h" +#endif + +#include // for memcpy + +#include "dsp_dsp.h" +#include "utils_bit_reader_utils.h" +#include "utils_endian_inl_utils.h" +#include "utils_utils.h" + +#ifdef __cplusplus +extern "C" { +#endif + +//------------------------------------------------------------------------------ +// Derived type lbit_t = natural type for memory I/O + +#if (BITS > 32) +typedef uint64_t lbit_t; +#elif (BITS > 16) +typedef uint32_t lbit_t; +#elif (BITS > 8) +typedef uint16_t lbit_t; +#else +typedef uint8_t lbit_t; +#endif + +extern const uint8_t kVP8Log2Range[128]; +extern const uint8_t kVP8NewRange[128]; + +// special case for the tail byte-reading +void VP8LoadFinalBytes(VP8BitReader* const br); + +//------------------------------------------------------------------------------ +// Inlined critical functions + +// makes sure br->value_ has at least BITS bits worth of data +static WEBP_UBSAN_IGNORE_UNDEF WEBP_INLINE +void VP8LoadNewBytes(VP8BitReader* const br) { + assert(br != NULL && br->buf_ != NULL); + // Read 'BITS' bits at a time if possible. + if (br->buf_ < br->buf_max_) { + // convert memory type to register type (with some zero'ing!) + bit_t bits; +#if defined(WEBP_USE_MIPS32) + // This is needed because of un-aligned read. + lbit_t in_bits; + lbit_t* p_buf_ = (lbit_t*)br->buf_; + __asm__ volatile( + ".set push \n\t" + ".set at \n\t" + ".set macro \n\t" + "ulw %[in_bits], 0(%[p_buf_]) \n\t" + ".set pop \n\t" + : [in_bits]"=r"(in_bits) + : [p_buf_]"r"(p_buf_) + : "memory", "at" + ); +#else + lbit_t in_bits; + memcpy(&in_bits, br->buf_, sizeof(in_bits)); +#endif + br->buf_ += BITS >> 3; +#if !defined(WORDS_BIGENDIAN) +#if (BITS > 32) + bits = BSwap64(in_bits); + bits >>= 64 - BITS; +#elif (BITS >= 24) + bits = BSwap32(in_bits); + bits >>= (32 - BITS); +#elif (BITS == 16) + bits = BSwap16(in_bits); +#else // BITS == 8 + bits = (bit_t)in_bits; +#endif // BITS > 32 +#else // WORDS_BIGENDIAN + bits = (bit_t)in_bits; + if (BITS != 8 * sizeof(bit_t)) bits >>= (8 * sizeof(bit_t) - BITS); +#endif + br->value_ = bits | (br->value_ << BITS); + br->bits_ += BITS; + } else { + VP8LoadFinalBytes(br); // no need to be inlined + } +} + +// Read a bit with proba 'prob'. Speed-critical function! +static WEBP_INLINE int VP8GetBit(VP8BitReader* const br, + int prob, const char label[]) { + // Don't move this declaration! It makes a big speed difference to store + // 'range' *before* calling VP8LoadNewBytes(), even if this function doesn't + // alter br->range_ value. + range_t range = br->range_; + if (br->bits_ < 0) { + VP8LoadNewBytes(br); + } + { + const int pos = br->bits_; + const range_t split = (range * prob) >> 8; + const range_t value = (range_t)(br->value_ >> pos); + const int bit = (value > split); + if (bit) { + range -= split; + br->value_ -= (bit_t)(split + 1) << pos; + } else { + range = split + 1; + } + { + const int shift = 7 ^ BitsLog2Floor(range); + range <<= shift; + br->bits_ -= shift; + } + br->range_ = range - 1; + BT_TRACK(br); + return bit; + } +} + +// simplified version of VP8GetBit() for prob=0x80 (note shift is always 1 here) +static WEBP_UBSAN_IGNORE_UNSIGNED_OVERFLOW WEBP_INLINE +int VP8GetSigned(VP8BitReader* const br, int v, const char label[]) { + if (br->bits_ < 0) { + VP8LoadNewBytes(br); + } + { + const int pos = br->bits_; + const range_t split = br->range_ >> 1; + const range_t value = (range_t)(br->value_ >> pos); + const int32_t mask = (int32_t)(split - value) >> 31; // -1 or 0 + br->bits_ -= 1; + br->range_ += mask; + br->range_ |= 1; + br->value_ -= (bit_t)((split + 1) & mask) << pos; + BT_TRACK(br); + return (v ^ mask) - mask; + } +} + +static WEBP_INLINE int VP8GetBitAlt(VP8BitReader* const br, + int prob, const char label[]) { + // Don't move this declaration! It makes a big speed difference to store + // 'range' *before* calling VP8LoadNewBytes(), even if this function doesn't + // alter br->range_ value. + range_t range = br->range_; + if (br->bits_ < 0) { + VP8LoadNewBytes(br); + } + { + const int pos = br->bits_; + const range_t split = (range * prob) >> 8; + const range_t value = (range_t)(br->value_ >> pos); + int bit; // Don't use 'const int bit = (value > split);", it's slower. + if (value > split) { + range -= split + 1; + br->value_ -= (bit_t)(split + 1) << pos; + bit = 1; + } else { + range = split; + bit = 0; + } + if (range <= (range_t)0x7e) { + const int shift = kVP8Log2Range[range]; + range = kVP8NewRange[range]; + br->bits_ -= shift; + } + br->range_ = range; + BT_TRACK(br); + return bit; + } +} + +#ifdef __cplusplus +} // extern "C" +#endif + +#endif // WEBP_UTILS_BIT_READER_INL_UTILS_H_ diff --git a/vendor/github.com/sizeofint/webpanimation/utils_bit_reader_utils.c b/vendor/github.com/sizeofint/webpanimation/utils_bit_reader_utils.c new file mode 100644 index 00000000..0870da7e --- /dev/null +++ b/vendor/github.com/sizeofint/webpanimation/utils_bit_reader_utils.c @@ -0,0 +1,298 @@ +// Copyright 2010 Google Inc. All Rights Reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the COPYING file in the root of the source +// tree. An additional intellectual property rights grant can be found +// in the file PATENTS. All contributing project authors may +// be found in the AUTHORS file in the root of the source tree. +// ----------------------------------------------------------------------------- +// +// Boolean decoder non-inlined methods +// +// Author: Skal (pascal.massimino@gmail.com) + +#ifdef HAVE_CONFIG_H +#include "config.h" +#endif + +#include "utils_bit_reader_inl_utils.h" +#include "utils_utils.h" + +//------------------------------------------------------------------------------ +// VP8BitReader + +void VP8BitReaderSetBuffer(VP8BitReader* const br, + const uint8_t* const start, + size_t size) { + br->buf_ = start; + br->buf_end_ = start + size; + br->buf_max_ = + (size >= sizeof(lbit_t)) ? start + size - sizeof(lbit_t) + 1 + : start; +} + +void VP8InitBitReader(VP8BitReader* const br, + const uint8_t* const start, size_t size) { + assert(br != NULL); + assert(start != NULL); + assert(size < (1u << 31)); // limit ensured by format and upstream checks + br->range_ = 255 - 1; + br->value_ = 0; + br->bits_ = -8; // to load the very first 8bits + br->eof_ = 0; + VP8BitReaderSetBuffer(br, start, size); + VP8LoadNewBytes(br); +} + +void VP8RemapBitReader(VP8BitReader* const br, ptrdiff_t offset) { + if (br->buf_ != NULL) { + br->buf_ += offset; + br->buf_end_ += offset; + br->buf_max_ += offset; + } +} + +const uint8_t kVP8Log2Range[128] = { + 7, 6, 6, 5, 5, 5, 5, 4, 4, 4, 4, 4, 4, 4, 4, + 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 0 +}; + +// range = ((range - 1) << kVP8Log2Range[range]) + 1 +const uint8_t kVP8NewRange[128] = { + 127, 127, 191, 127, 159, 191, 223, 127, + 143, 159, 175, 191, 207, 223, 239, 127, + 135, 143, 151, 159, 167, 175, 183, 191, + 199, 207, 215, 223, 231, 239, 247, 127, + 131, 135, 139, 143, 147, 151, 155, 159, + 163, 167, 171, 175, 179, 183, 187, 191, + 195, 199, 203, 207, 211, 215, 219, 223, + 227, 231, 235, 239, 243, 247, 251, 127, + 129, 131, 133, 135, 137, 139, 141, 143, + 145, 147, 149, 151, 153, 155, 157, 159, + 161, 163, 165, 167, 169, 171, 173, 175, + 177, 179, 181, 183, 185, 187, 189, 191, + 193, 195, 197, 199, 201, 203, 205, 207, + 209, 211, 213, 215, 217, 219, 221, 223, + 225, 227, 229, 231, 233, 235, 237, 239, + 241, 243, 245, 247, 249, 251, 253, 127 +}; + +void VP8LoadFinalBytes(VP8BitReader* const br) { + assert(br != NULL && br->buf_ != NULL); + // Only read 8bits at a time + if (br->buf_ < br->buf_end_) { + br->bits_ += 8; + br->value_ = (bit_t)(*br->buf_++) | (br->value_ << 8); + } else if (!br->eof_) { + br->value_ <<= 8; + br->bits_ += 8; + br->eof_ = 1; + } else { + br->bits_ = 0; // This is to avoid undefined behaviour with shifts. + } +} + +//------------------------------------------------------------------------------ +// Higher-level calls + +uint32_t VP8GetValue(VP8BitReader* const br, int bits, const char label[]) { + uint32_t v = 0; + while (bits-- > 0) { + v |= VP8GetBit(br, 0x80, label) << bits; + } + return v; +} + +int32_t VP8GetSignedValue(VP8BitReader* const br, int bits, + const char label[]) { + const int value = VP8GetValue(br, bits, label); + return VP8Get(br, label) ? -value : value; +} + +//------------------------------------------------------------------------------ +// VP8LBitReader + +#define VP8L_LOG8_WBITS 4 // Number of bytes needed to store VP8L_WBITS bits. + +#if defined(__arm__) || defined(_M_ARM) || defined(__aarch64__) || \ + defined(__i386__) || defined(_M_IX86) || \ + defined(__x86_64__) || defined(_M_X64) +#define VP8L_USE_FAST_LOAD +#endif + +static const uint32_t kBitMask[VP8L_MAX_NUM_BIT_READ + 1] = { + 0, + 0x000001, 0x000003, 0x000007, 0x00000f, + 0x00001f, 0x00003f, 0x00007f, 0x0000ff, + 0x0001ff, 0x0003ff, 0x0007ff, 0x000fff, + 0x001fff, 0x003fff, 0x007fff, 0x00ffff, + 0x01ffff, 0x03ffff, 0x07ffff, 0x0fffff, + 0x1fffff, 0x3fffff, 0x7fffff, 0xffffff +}; + +void VP8LInitBitReader(VP8LBitReader* const br, const uint8_t* const start, + size_t length) { + size_t i; + vp8l_val_t value = 0; + assert(br != NULL); + assert(start != NULL); + assert(length < 0xfffffff8u); // can't happen with a RIFF chunk. + + br->len_ = length; + br->val_ = 0; + br->bit_pos_ = 0; + br->eos_ = 0; + + if (length > sizeof(br->val_)) { + length = sizeof(br->val_); + } + for (i = 0; i < length; ++i) { + value |= (vp8l_val_t)start[i] << (8 * i); + } + br->val_ = value; + br->pos_ = length; + br->buf_ = start; +} + +void VP8LBitReaderSetBuffer(VP8LBitReader* const br, + const uint8_t* const buf, size_t len) { + assert(br != NULL); + assert(buf != NULL); + assert(len < 0xfffffff8u); // can't happen with a RIFF chunk. + br->buf_ = buf; + br->len_ = len; + // pos_ > len_ should be considered a param error. + br->eos_ = (br->pos_ > br->len_) || VP8LIsEndOfStream(br); +} + +static void VP8LSetEndOfStream(VP8LBitReader* const br) { + br->eos_ = 1; + br->bit_pos_ = 0; // To avoid undefined behaviour with shifts. +} + +// If not at EOS, reload up to VP8L_LBITS byte-by-byte +static void ShiftBytes(VP8LBitReader* const br) { + while (br->bit_pos_ >= 8 && br->pos_ < br->len_) { + br->val_ >>= 8; + br->val_ |= ((vp8l_val_t)br->buf_[br->pos_]) << (VP8L_LBITS - 8); + ++br->pos_; + br->bit_pos_ -= 8; + } + if (VP8LIsEndOfStream(br)) { + VP8LSetEndOfStream(br); + } +} + +void VP8LDoFillBitWindow(VP8LBitReader* const br) { + assert(br->bit_pos_ >= VP8L_WBITS); +#if defined(VP8L_USE_FAST_LOAD) + if (br->pos_ + sizeof(br->val_) < br->len_) { + br->val_ >>= VP8L_WBITS; + br->bit_pos_ -= VP8L_WBITS; + br->val_ |= (vp8l_val_t)HToLE32(WebPMemToUint32(br->buf_ + br->pos_)) << + (VP8L_LBITS - VP8L_WBITS); + br->pos_ += VP8L_LOG8_WBITS; + return; + } +#endif + ShiftBytes(br); // Slow path. +} + +uint32_t VP8LReadBits(VP8LBitReader* const br, int n_bits) { + assert(n_bits >= 0); + // Flag an error if end_of_stream or n_bits is more than allowed limit. + if (!br->eos_ && n_bits <= VP8L_MAX_NUM_BIT_READ) { + const uint32_t val = VP8LPrefetchBits(br) & kBitMask[n_bits]; + const int new_bits = br->bit_pos_ + n_bits; + br->bit_pos_ = new_bits; + ShiftBytes(br); + return val; + } else { + VP8LSetEndOfStream(br); + return 0; + } +} + +//------------------------------------------------------------------------------ +// Bit-tracing tool + +#if (BITTRACE > 0) + +#include // for atexit() +#include +#include + +#define MAX_NUM_LABELS 32 +static struct { + const char* label; + int size; + int count; +} kLabels[MAX_NUM_LABELS]; + +static int last_label = 0; +static int last_pos = 0; +static const uint8_t* buf_start = NULL; +static int init_done = 0; + +static void PrintBitTraces(void) { + int i; + int scale = 1; + int total = 0; + const char* units = "bits"; +#if (BITTRACE == 2) + scale = 8; + units = "bytes"; +#endif + for (i = 0; i < last_label; ++i) total += kLabels[i].size; + if (total < 1) total = 1; // avoid rounding errors + printf("=== Bit traces ===\n"); + for (i = 0; i < last_label; ++i) { + const int skip = 16 - (int)strlen(kLabels[i].label); + const int value = (kLabels[i].size + scale - 1) / scale; + assert(skip > 0); + printf("%s \%*s: %6d %s \t[%5.2f%%] [count: %7d]\n", + kLabels[i].label, skip, "", value, units, + 100.f * kLabels[i].size / total, + kLabels[i].count); + } + total = (total + scale - 1) / scale; + printf("Total: %d %s\n", total, units); +} + +void BitTrace(const struct VP8BitReader* const br, const char label[]) { + int i, pos; + if (!init_done) { + memset(kLabels, 0, sizeof(kLabels)); + atexit(PrintBitTraces); + buf_start = br->buf_; + init_done = 1; + } + pos = (int)(br->buf_ - buf_start) * 8 - br->bits_; + // if there's a too large jump, we've changed partition -> reset counter + if (abs(pos - last_pos) > 32) { + buf_start = br->buf_; + pos = 0; + last_pos = 0; + } + if (br->range_ >= 0x7f) pos += kVP8Log2Range[br->range_ - 0x7f]; + for (i = 0; i < last_label; ++i) { + if (!strcmp(label, kLabels[i].label)) break; + } + if (i == MAX_NUM_LABELS) abort(); // overflow! + kLabels[i].label = label; + kLabels[i].size += pos - last_pos; + kLabels[i].count += 1; + if (i == last_label) ++last_label; + last_pos = pos; +} + +#endif // BITTRACE > 0 + +//------------------------------------------------------------------------------ diff --git a/vendor/github.com/sizeofint/webpanimation/utils_bit_reader_utils.h b/vendor/github.com/sizeofint/webpanimation/utils_bit_reader_utils.h new file mode 100644 index 00000000..aa915491 --- /dev/null +++ b/vendor/github.com/sizeofint/webpanimation/utils_bit_reader_utils.h @@ -0,0 +1,194 @@ +// Copyright 2010 Google Inc. All Rights Reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the COPYING file in the root of the source +// tree. An additional intellectual property rights grant can be found +// in the file PATENTS. All contributing project authors may +// be found in the AUTHORS file in the root of the source tree. +// ----------------------------------------------------------------------------- +// +// Boolean decoder +// +// Author: Skal (pascal.massimino@gmail.com) +// Vikas Arora (vikaas.arora@gmail.com) + +#ifndef WEBP_UTILS_BIT_READER_UTILS_H_ +#define WEBP_UTILS_BIT_READER_UTILS_H_ + +#include +#ifdef _MSC_VER +#include // _byteswap_ulong +#endif +#include "webp_types.h" + +// Warning! This macro triggers quite some MACRO wizardry around func signature! +#if !defined(BITTRACE) +#define BITTRACE 0 // 0 = off, 1 = print bits, 2 = print bytes +#endif + +#if (BITTRACE > 0) +struct VP8BitReader; +extern void BitTrace(const struct VP8BitReader* const br, const char label[]); +#define BT_TRACK(br) BitTrace(br, label) +#define VP8Get(BR, L) VP8GetValue(BR, 1, L) +#else +#define BT_TRACK(br) +// We'll REMOVE the 'const char label[]' from all signatures and calls (!!): +#define VP8GetValue(BR, N, L) VP8GetValue(BR, N) +#define VP8Get(BR, L) VP8GetValue(BR, 1, L) +#define VP8GetSignedValue(BR, N, L) VP8GetSignedValue(BR, N) +#define VP8GetBit(BR, P, L) VP8GetBit(BR, P) +#define VP8GetBitAlt(BR, P, L) VP8GetBitAlt(BR, P) +#define VP8GetSigned(BR, V, L) VP8GetSigned(BR, V) +#endif + +#ifdef __cplusplus +extern "C" { +#endif + +// The Boolean decoder needs to maintain infinite precision on the value_ field. +// However, since range_ is only 8bit, we only need an active window of 8 bits +// for value_. Left bits (MSB) gets zeroed and shifted away when value_ falls +// below 128, range_ is updated, and fresh bits read from the bitstream are +// brought in as LSB. To avoid reading the fresh bits one by one (slow), we +// cache BITS of them ahead. The total of (BITS + 8) bits must fit into a +// natural register (with type bit_t). To fetch BITS bits from bitstream we +// use a type lbit_t. +// +// BITS can be any multiple of 8 from 8 to 56 (inclusive). +// Pick values that fit natural register size. + +#if defined(__i386__) || defined(_M_IX86) // x86 32bit +#define BITS 24 +#elif defined(__x86_64__) || defined(_M_X64) // x86 64bit +#define BITS 56 +#elif defined(__arm__) || defined(_M_ARM) // ARM +#define BITS 24 +#elif defined(__aarch64__) // ARM 64bit +#define BITS 56 +#elif defined(__mips__) // MIPS +#define BITS 24 +#else // reasonable default +#define BITS 24 +#endif + +//------------------------------------------------------------------------------ +// Derived types and constants: +// bit_t = natural register type for storing 'value_' (which is BITS+8 bits) +// range_t = register for 'range_' (which is 8bits only) + +#if (BITS > 24) +typedef uint64_t bit_t; +#else +typedef uint32_t bit_t; +#endif + +typedef uint32_t range_t; + +//------------------------------------------------------------------------------ +// Bitreader + +typedef struct VP8BitReader VP8BitReader; +struct VP8BitReader { + // boolean decoder (keep the field ordering as is!) + bit_t value_; // current value + range_t range_; // current range minus 1. In [127, 254] interval. + int bits_; // number of valid bits left + // read buffer + const uint8_t* buf_; // next byte to be read + const uint8_t* buf_end_; // end of read buffer + const uint8_t* buf_max_; // max packed-read position on buffer + int eof_; // true if input is exhausted +}; + +// Initialize the bit reader and the boolean decoder. +void VP8InitBitReader(VP8BitReader* const br, + const uint8_t* const start, size_t size); +// Sets the working read buffer. +void VP8BitReaderSetBuffer(VP8BitReader* const br, + const uint8_t* const start, size_t size); + +// Update internal pointers to displace the byte buffer by the +// relative offset 'offset'. +void VP8RemapBitReader(VP8BitReader* const br, ptrdiff_t offset); + +// return the next value made of 'num_bits' bits +uint32_t VP8GetValue(VP8BitReader* const br, int num_bits, const char label[]); + +// return the next value with sign-extension. +int32_t VP8GetSignedValue(VP8BitReader* const br, int num_bits, + const char label[]); + +// bit_reader_inl.h will implement the following methods: +// static WEBP_INLINE int VP8GetBit(VP8BitReader* const br, int prob, ...) +// static WEBP_INLINE int VP8GetSigned(VP8BitReader* const br, int v, ...) +// and should be included by the .c files that actually need them. +// This is to avoid recompiling the whole library whenever this file is touched, +// and also allowing platform-specific ad-hoc hacks. + +// ----------------------------------------------------------------------------- +// Bitreader for lossless format + +// maximum number of bits (inclusive) the bit-reader can handle: +#define VP8L_MAX_NUM_BIT_READ 24 + +#define VP8L_LBITS 64 // Number of bits prefetched (= bit-size of vp8l_val_t). +#define VP8L_WBITS 32 // Minimum number of bytes ready after VP8LFillBitWindow. + +typedef uint64_t vp8l_val_t; // right now, this bit-reader can only use 64bit. + +typedef struct { + vp8l_val_t val_; // pre-fetched bits + const uint8_t* buf_; // input byte buffer + size_t len_; // buffer length + size_t pos_; // byte position in buf_ + int bit_pos_; // current bit-reading position in val_ + int eos_; // true if a bit was read past the end of buffer +} VP8LBitReader; + +void VP8LInitBitReader(VP8LBitReader* const br, + const uint8_t* const start, + size_t length); + +// Sets a new data buffer. +void VP8LBitReaderSetBuffer(VP8LBitReader* const br, + const uint8_t* const buffer, size_t length); + +// Reads the specified number of bits from read buffer. +// Flags an error in case end_of_stream or n_bits is more than the allowed limit +// of VP8L_MAX_NUM_BIT_READ (inclusive). +// Flags eos_ if this read attempt is going to cross the read buffer. +uint32_t VP8LReadBits(VP8LBitReader* const br, int n_bits); + +// Return the prefetched bits, so they can be looked up. +static WEBP_INLINE uint32_t VP8LPrefetchBits(VP8LBitReader* const br) { + return (uint32_t)(br->val_ >> (br->bit_pos_ & (VP8L_LBITS - 1))); +} + +// Returns true if there was an attempt at reading bit past the end of +// the buffer. Doesn't set br->eos_ flag. +static WEBP_INLINE int VP8LIsEndOfStream(const VP8LBitReader* const br) { + assert(br->pos_ <= br->len_); + return br->eos_ || ((br->pos_ == br->len_) && (br->bit_pos_ > VP8L_LBITS)); +} + +// For jumping over a number of bits in the bit stream when accessed with +// VP8LPrefetchBits and VP8LFillBitWindow. +// This function does *not* set br->eos_, since it's speed-critical. +// Use with extreme care! +static WEBP_INLINE void VP8LSetBitPos(VP8LBitReader* const br, int val) { + br->bit_pos_ = val; +} + +// Advances the read buffer by 4 bytes to make room for reading next 32 bits. +// Speed critical, but infrequent part of the code can be non-inlined. +extern void VP8LDoFillBitWindow(VP8LBitReader* const br); +static WEBP_INLINE void VP8LFillBitWindow(VP8LBitReader* const br) { + if (br->bit_pos_ >= VP8L_WBITS) VP8LDoFillBitWindow(br); +} + +#ifdef __cplusplus +} // extern "C" +#endif + +#endif // WEBP_UTILS_BIT_READER_UTILS_H_ diff --git a/vendor/github.com/sizeofint/webpanimation/utils_bit_writer_utils.c b/vendor/github.com/sizeofint/webpanimation/utils_bit_writer_utils.c new file mode 100644 index 00000000..d1a87e58 --- /dev/null +++ b/vendor/github.com/sizeofint/webpanimation/utils_bit_writer_utils.c @@ -0,0 +1,347 @@ +// Copyright 2011 Google Inc. All Rights Reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the COPYING file in the root of the source +// tree. An additional intellectual property rights grant can be found +// in the file PATENTS. All contributing project authors may +// be found in the AUTHORS file in the root of the source tree. +// ----------------------------------------------------------------------------- +// +// Bit writing and boolean coder +// +// Author: Skal (pascal.massimino@gmail.com) +// Vikas Arora (vikaas.arora@gmail.com) + +#include +#include // for memcpy() +#include + +#include "utils_bit_writer_utils.h" +#include "utils_endian_inl_utils.h" +#include "utils_utils.h" + +//------------------------------------------------------------------------------ +// VP8BitWriter + +static int BitWriterResize(VP8BitWriter* const bw, size_t extra_size) { + uint8_t* new_buf; + size_t new_size; + const uint64_t needed_size_64b = (uint64_t)bw->pos_ + extra_size; + const size_t needed_size = (size_t)needed_size_64b; + if (needed_size_64b != needed_size) { + bw->error_ = 1; + return 0; + } + if (needed_size <= bw->max_pos_) return 1; + // If the following line wraps over 32bit, the test just after will catch it. + new_size = 2 * bw->max_pos_; + if (new_size < needed_size) new_size = needed_size; + if (new_size < 1024) new_size = 1024; + new_buf = (uint8_t*)WebPSafeMalloc(1ULL, new_size); + if (new_buf == NULL) { + bw->error_ = 1; + return 0; + } + if (bw->pos_ > 0) { + assert(bw->buf_ != NULL); + memcpy(new_buf, bw->buf_, bw->pos_); + } + WebPSafeFree(bw->buf_); + bw->buf_ = new_buf; + bw->max_pos_ = new_size; + return 1; +} + +static void Flush(VP8BitWriter* const bw) { + const int s = 8 + bw->nb_bits_; + const int32_t bits = bw->value_ >> s; + assert(bw->nb_bits_ >= 0); + bw->value_ -= bits << s; + bw->nb_bits_ -= 8; + if ((bits & 0xff) != 0xff) { + size_t pos = bw->pos_; + if (!BitWriterResize(bw, bw->run_ + 1)) { + return; + } + if (bits & 0x100) { // overflow -> propagate carry over pending 0xff's + if (pos > 0) bw->buf_[pos - 1]++; + } + if (bw->run_ > 0) { + const int value = (bits & 0x100) ? 0x00 : 0xff; + for (; bw->run_ > 0; --bw->run_) bw->buf_[pos++] = value; + } + bw->buf_[pos++] = bits & 0xff; + bw->pos_ = pos; + } else { + bw->run_++; // delay writing of bytes 0xff, pending eventual carry. + } +} + +//------------------------------------------------------------------------------ +// renormalization + +static const uint8_t kNorm[128] = { // renorm_sizes[i] = 8 - log2(i) + 7, 6, 6, 5, 5, 5, 5, 4, 4, 4, 4, 4, 4, 4, 4, + 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 0 +}; + +// range = ((range + 1) << kVP8Log2Range[range]) - 1 +static const uint8_t kNewRange[128] = { + 127, 127, 191, 127, 159, 191, 223, 127, 143, 159, 175, 191, 207, 223, 239, + 127, 135, 143, 151, 159, 167, 175, 183, 191, 199, 207, 215, 223, 231, 239, + 247, 127, 131, 135, 139, 143, 147, 151, 155, 159, 163, 167, 171, 175, 179, + 183, 187, 191, 195, 199, 203, 207, 211, 215, 219, 223, 227, 231, 235, 239, + 243, 247, 251, 127, 129, 131, 133, 135, 137, 139, 141, 143, 145, 147, 149, + 151, 153, 155, 157, 159, 161, 163, 165, 167, 169, 171, 173, 175, 177, 179, + 181, 183, 185, 187, 189, 191, 193, 195, 197, 199, 201, 203, 205, 207, 209, + 211, 213, 215, 217, 219, 221, 223, 225, 227, 229, 231, 233, 235, 237, 239, + 241, 243, 245, 247, 249, 251, 253, 127 +}; + +int VP8PutBit(VP8BitWriter* const bw, int bit, int prob) { + const int split = (bw->range_ * prob) >> 8; + if (bit) { + bw->value_ += split + 1; + bw->range_ -= split + 1; + } else { + bw->range_ = split; + } + if (bw->range_ < 127) { // emit 'shift' bits out and renormalize + const int shift = kNorm[bw->range_]; + bw->range_ = kNewRange[bw->range_]; + bw->value_ <<= shift; + bw->nb_bits_ += shift; + if (bw->nb_bits_ > 0) Flush(bw); + } + return bit; +} + +int VP8PutBitUniform(VP8BitWriter* const bw, int bit) { + const int split = bw->range_ >> 1; + if (bit) { + bw->value_ += split + 1; + bw->range_ -= split + 1; + } else { + bw->range_ = split; + } + if (bw->range_ < 127) { + bw->range_ = kNewRange[bw->range_]; + bw->value_ <<= 1; + bw->nb_bits_ += 1; + if (bw->nb_bits_ > 0) Flush(bw); + } + return bit; +} + +void VP8PutBits(VP8BitWriter* const bw, uint32_t value, int nb_bits) { + uint32_t mask; + assert(nb_bits > 0 && nb_bits < 32); + for (mask = 1u << (nb_bits - 1); mask; mask >>= 1) { + VP8PutBitUniform(bw, value & mask); + } +} + +void VP8PutSignedBits(VP8BitWriter* const bw, int value, int nb_bits) { + if (!VP8PutBitUniform(bw, value != 0)) return; + if (value < 0) { + VP8PutBits(bw, ((-value) << 1) | 1, nb_bits + 1); + } else { + VP8PutBits(bw, value << 1, nb_bits + 1); + } +} + +//------------------------------------------------------------------------------ + +int VP8BitWriterInit(VP8BitWriter* const bw, size_t expected_size) { + bw->range_ = 255 - 1; + bw->value_ = 0; + bw->run_ = 0; + bw->nb_bits_ = -8; + bw->pos_ = 0; + bw->max_pos_ = 0; + bw->error_ = 0; + bw->buf_ = NULL; + return (expected_size > 0) ? BitWriterResize(bw, expected_size) : 1; +} + +uint8_t* VP8BitWriterFinish(VP8BitWriter* const bw) { + VP8PutBits(bw, 0, 9 - bw->nb_bits_); + bw->nb_bits_ = 0; // pad with zeroes + Flush(bw); + return bw->buf_; +} + +int VP8BitWriterAppend(VP8BitWriter* const bw, + const uint8_t* data, size_t size) { + assert(data != NULL); + if (bw->nb_bits_ != -8) return 0; // Flush() must have been called + if (!BitWriterResize(bw, size)) return 0; + memcpy(bw->buf_ + bw->pos_, data, size); + bw->pos_ += size; + return 1; +} + +void VP8BitWriterWipeOut(VP8BitWriter* const bw) { + if (bw != NULL) { + WebPSafeFree(bw->buf_); + memset(bw, 0, sizeof(*bw)); + } +} + +//------------------------------------------------------------------------------ +// VP8LBitWriter + +// This is the minimum amount of size the memory buffer is guaranteed to grow +// when extra space is needed. +#define MIN_EXTRA_SIZE (32768ULL) + +// Returns 1 on success. +static int VP8LBitWriterResize(VP8LBitWriter* const bw, size_t extra_size) { + uint8_t* allocated_buf; + size_t allocated_size; + const size_t max_bytes = bw->end_ - bw->buf_; + const size_t current_size = bw->cur_ - bw->buf_; + const uint64_t size_required_64b = (uint64_t)current_size + extra_size; + const size_t size_required = (size_t)size_required_64b; + if (size_required != size_required_64b) { + bw->error_ = 1; + return 0; + } + if (max_bytes > 0 && size_required <= max_bytes) return 1; + allocated_size = (3 * max_bytes) >> 1; + if (allocated_size < size_required) allocated_size = size_required; + // make allocated size multiple of 1k + allocated_size = (((allocated_size >> 10) + 1) << 10); + allocated_buf = (uint8_t*)WebPSafeMalloc(1ULL, allocated_size); + if (allocated_buf == NULL) { + bw->error_ = 1; + return 0; + } + if (current_size > 0) { + memcpy(allocated_buf, bw->buf_, current_size); + } + WebPSafeFree(bw->buf_); + bw->buf_ = allocated_buf; + bw->cur_ = bw->buf_ + current_size; + bw->end_ = bw->buf_ + allocated_size; + return 1; +} + +int VP8LBitWriterInit(VP8LBitWriter* const bw, size_t expected_size) { + memset(bw, 0, sizeof(*bw)); + return VP8LBitWriterResize(bw, expected_size); +} + +int VP8LBitWriterClone(const VP8LBitWriter* const src, + VP8LBitWriter* const dst) { + const size_t current_size = src->cur_ - src->buf_; + assert(src->cur_ >= src->buf_ && src->cur_ <= src->end_); + if (!VP8LBitWriterResize(dst, current_size)) return 0; + memcpy(dst->buf_, src->buf_, current_size); + dst->bits_ = src->bits_; + dst->used_ = src->used_; + dst->error_ = src->error_; + dst->cur_ = dst->buf_ + current_size; + return 1; +} + +void VP8LBitWriterWipeOut(VP8LBitWriter* const bw) { + if (bw != NULL) { + WebPSafeFree(bw->buf_); + memset(bw, 0, sizeof(*bw)); + } +} + +void VP8LBitWriterReset(const VP8LBitWriter* const bw_init, + VP8LBitWriter* const bw) { + bw->bits_ = bw_init->bits_; + bw->used_ = bw_init->used_; + bw->cur_ = bw->buf_ + (bw_init->cur_ - bw_init->buf_); + assert(bw->cur_ <= bw->end_); + bw->error_ = bw_init->error_; +} + +void VP8LBitWriterSwap(VP8LBitWriter* const src, VP8LBitWriter* const dst) { + const VP8LBitWriter tmp = *src; + *src = *dst; + *dst = tmp; +} + +void VP8LPutBitsFlushBits(VP8LBitWriter* const bw) { + // If needed, make some room by flushing some bits out. + if (bw->cur_ + VP8L_WRITER_BYTES > bw->end_) { + const uint64_t extra_size = (bw->end_ - bw->buf_) + MIN_EXTRA_SIZE; + if (extra_size != (size_t)extra_size || + !VP8LBitWriterResize(bw, (size_t)extra_size)) { + bw->cur_ = bw->buf_; + bw->error_ = 1; + return; + } + } + *(vp8l_wtype_t*)bw->cur_ = (vp8l_wtype_t)WSWAP((vp8l_wtype_t)bw->bits_); + bw->cur_ += VP8L_WRITER_BYTES; + bw->bits_ >>= VP8L_WRITER_BITS; + bw->used_ -= VP8L_WRITER_BITS; +} + +void VP8LPutBitsInternal(VP8LBitWriter* const bw, uint32_t bits, int n_bits) { + assert(n_bits <= 32); + // That's the max we can handle: + assert(sizeof(vp8l_wtype_t) == 2); + if (n_bits > 0) { + vp8l_atype_t lbits = bw->bits_; + int used = bw->used_; + // Special case of overflow handling for 32bit accumulator (2-steps flush). +#if VP8L_WRITER_BITS == 16 + if (used + n_bits >= VP8L_WRITER_MAX_BITS) { + // Fill up all the VP8L_WRITER_MAX_BITS so it can be flushed out below. + const int shift = VP8L_WRITER_MAX_BITS - used; + lbits |= (vp8l_atype_t)bits << used; + used = VP8L_WRITER_MAX_BITS; + n_bits -= shift; + bits >>= shift; + assert(n_bits <= VP8L_WRITER_MAX_BITS); + } +#endif + // If needed, make some room by flushing some bits out. + while (used >= VP8L_WRITER_BITS) { + if (bw->cur_ + VP8L_WRITER_BYTES > bw->end_) { + const uint64_t extra_size = (bw->end_ - bw->buf_) + MIN_EXTRA_SIZE; + if (extra_size != (size_t)extra_size || + !VP8LBitWriterResize(bw, (size_t)extra_size)) { + bw->cur_ = bw->buf_; + bw->error_ = 1; + return; + } + } + *(vp8l_wtype_t*)bw->cur_ = (vp8l_wtype_t)WSWAP((vp8l_wtype_t)lbits); + bw->cur_ += VP8L_WRITER_BYTES; + lbits >>= VP8L_WRITER_BITS; + used -= VP8L_WRITER_BITS; + } + bw->bits_ = lbits | ((vp8l_atype_t)bits << used); + bw->used_ = used + n_bits; + } +} + +uint8_t* VP8LBitWriterFinish(VP8LBitWriter* const bw) { + // flush leftover bits + if (VP8LBitWriterResize(bw, (bw->used_ + 7) >> 3)) { + while (bw->used_ > 0) { + *bw->cur_++ = (uint8_t)bw->bits_; + bw->bits_ >>= 8; + bw->used_ -= 8; + } + bw->used_ = 0; + } + return bw->buf_; +} + +//------------------------------------------------------------------------------ diff --git a/vendor/github.com/sizeofint/webpanimation/utils_bit_writer_utils.h b/vendor/github.com/sizeofint/webpanimation/utils_bit_writer_utils.h new file mode 100644 index 00000000..4592d084 --- /dev/null +++ b/vendor/github.com/sizeofint/webpanimation/utils_bit_writer_utils.h @@ -0,0 +1,154 @@ +// Copyright 2011 Google Inc. All Rights Reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the COPYING file in the root of the source +// tree. An additional intellectual property rights grant can be found +// in the file PATENTS. All contributing project authors may +// be found in the AUTHORS file in the root of the source tree. +// ----------------------------------------------------------------------------- +// +// Bit writing and boolean coder +// +// Author: Skal (pascal.massimino@gmail.com) + +#ifndef WEBP_UTILS_BIT_WRITER_UTILS_H_ +#define WEBP_UTILS_BIT_WRITER_UTILS_H_ + +#include "webp_types.h" + +#ifdef __cplusplus +extern "C" { +#endif + +//------------------------------------------------------------------------------ +// Bit-writing + +typedef struct VP8BitWriter VP8BitWriter; +struct VP8BitWriter { + int32_t range_; // range-1 + int32_t value_; + int run_; // number of outstanding bits + int nb_bits_; // number of pending bits + uint8_t* buf_; // internal buffer. Re-allocated regularly. Not owned. + size_t pos_; + size_t max_pos_; + int error_; // true in case of error +}; + +// Initialize the object. Allocates some initial memory based on expected_size. +int VP8BitWriterInit(VP8BitWriter* const bw, size_t expected_size); +// Finalize the bitstream coding. Returns a pointer to the internal buffer. +uint8_t* VP8BitWriterFinish(VP8BitWriter* const bw); +// Release any pending memory and zeroes the object. Not a mandatory call. +// Only useful in case of error, when the internal buffer hasn't been grabbed! +void VP8BitWriterWipeOut(VP8BitWriter* const bw); + +int VP8PutBit(VP8BitWriter* const bw, int bit, int prob); +int VP8PutBitUniform(VP8BitWriter* const bw, int bit); +void VP8PutBits(VP8BitWriter* const bw, uint32_t value, int nb_bits); +void VP8PutSignedBits(VP8BitWriter* const bw, int value, int nb_bits); + +// Appends some bytes to the internal buffer. Data is copied. +int VP8BitWriterAppend(VP8BitWriter* const bw, + const uint8_t* data, size_t size); + +// return approximate write position (in bits) +static WEBP_INLINE uint64_t VP8BitWriterPos(const VP8BitWriter* const bw) { + const uint64_t nb_bits = 8 + bw->nb_bits_; // bw->nb_bits_ is <= 0, note + return (bw->pos_ + bw->run_) * 8 + nb_bits; +} + +// Returns a pointer to the internal buffer. +static WEBP_INLINE uint8_t* VP8BitWriterBuf(const VP8BitWriter* const bw) { + return bw->buf_; +} +// Returns the size of the internal buffer. +static WEBP_INLINE size_t VP8BitWriterSize(const VP8BitWriter* const bw) { + return bw->pos_; +} + +//------------------------------------------------------------------------------ +// VP8LBitWriter + +#if defined(__x86_64__) || defined(_M_X64) // 64bit +typedef uint64_t vp8l_atype_t; // accumulator type +typedef uint32_t vp8l_wtype_t; // writing type +#define WSWAP HToLE32 +#define VP8L_WRITER_BYTES 4 // sizeof(vp8l_wtype_t) +#define VP8L_WRITER_BITS 32 // 8 * sizeof(vp8l_wtype_t) +#define VP8L_WRITER_MAX_BITS 64 // 8 * sizeof(vp8l_atype_t) +#else +typedef uint32_t vp8l_atype_t; +typedef uint16_t vp8l_wtype_t; +#define WSWAP HToLE16 +#define VP8L_WRITER_BYTES 2 +#define VP8L_WRITER_BITS 16 +#define VP8L_WRITER_MAX_BITS 32 +#endif + +typedef struct { + vp8l_atype_t bits_; // bit accumulator + int used_; // number of bits used in accumulator + uint8_t* buf_; // start of buffer + uint8_t* cur_; // current write position + uint8_t* end_; // end of buffer + + // After all bits are written (VP8LBitWriterFinish()), the caller must observe + // the state of error_. A value of 1 indicates that a memory allocation + // failure has happened during bit writing. A value of 0 indicates successful + // writing of bits. + int error_; +} VP8LBitWriter; + +static WEBP_INLINE size_t VP8LBitWriterNumBytes(const VP8LBitWriter* const bw) { + return (bw->cur_ - bw->buf_) + ((bw->used_ + 7) >> 3); +} + +// Returns false in case of memory allocation error. +int VP8LBitWriterInit(VP8LBitWriter* const bw, size_t expected_size); +// Returns false in case of memory allocation error. +int VP8LBitWriterClone(const VP8LBitWriter* const src, + VP8LBitWriter* const dst); +// Finalize the bitstream coding. Returns a pointer to the internal buffer. +uint8_t* VP8LBitWriterFinish(VP8LBitWriter* const bw); +// Release any pending memory and zeroes the object. +void VP8LBitWriterWipeOut(VP8LBitWriter* const bw); +// Resets the cursor of the BitWriter bw to when it was like in bw_init. +void VP8LBitWriterReset(const VP8LBitWriter* const bw_init, + VP8LBitWriter* const bw); +// Swaps the memory held by two BitWriters. +void VP8LBitWriterSwap(VP8LBitWriter* const src, VP8LBitWriter* const dst); + +// Internal function for VP8LPutBits flushing 32 bits from the written state. +void VP8LPutBitsFlushBits(VP8LBitWriter* const bw); + +// PutBits internal function used in the 16 bit vp8l_wtype_t case. +void VP8LPutBitsInternal(VP8LBitWriter* const bw, uint32_t bits, int n_bits); + +// This function writes bits into bytes in increasing addresses (little endian), +// and within a byte least-significant-bit first. +// This function can write up to 32 bits in one go, but VP8LBitReader can only +// read 24 bits max (VP8L_MAX_NUM_BIT_READ). +// VP8LBitWriter's error_ flag is set in case of memory allocation error. +static WEBP_INLINE void VP8LPutBits(VP8LBitWriter* const bw, + uint32_t bits, int n_bits) { + if (sizeof(vp8l_wtype_t) == 4) { + if (n_bits > 0) { + if (bw->used_ >= 32) { + VP8LPutBitsFlushBits(bw); + } + bw->bits_ |= (vp8l_atype_t)bits << bw->used_; + bw->used_ += n_bits; + } + } else { + VP8LPutBitsInternal(bw, bits, n_bits); + } +} + +//------------------------------------------------------------------------------ + +#ifdef __cplusplus +} // extern "C" +#endif + +#endif // WEBP_UTILS_BIT_WRITER_UTILS_H_ diff --git a/vendor/github.com/sizeofint/webpanimation/utils_color_cache_utils.c b/vendor/github.com/sizeofint/webpanimation/utils_color_cache_utils.c new file mode 100644 index 00000000..f07e7b52 --- /dev/null +++ b/vendor/github.com/sizeofint/webpanimation/utils_color_cache_utils.c @@ -0,0 +1,49 @@ +// Copyright 2012 Google Inc. All Rights Reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the COPYING file in the root of the source +// tree. An additional intellectual property rights grant can be found +// in the file PATENTS. All contributing project authors may +// be found in the AUTHORS file in the root of the source tree. +// ----------------------------------------------------------------------------- +// +// Color Cache for WebP Lossless +// +// Author: Jyrki Alakuijala (jyrki@google.com) + +#include +#include +#include +#include "utils_color_cache_utils.h" +#include "utils_utils.h" + +//------------------------------------------------------------------------------ +// VP8LColorCache. + +int VP8LColorCacheInit(VP8LColorCache* const cc, int hash_bits) { + const int hash_size = 1 << hash_bits; + assert(cc != NULL); + assert(hash_bits > 0); + cc->colors_ = (uint32_t*)WebPSafeCalloc((uint64_t)hash_size, + sizeof(*cc->colors_)); + if (cc->colors_ == NULL) return 0; + cc->hash_shift_ = 32 - hash_bits; + cc->hash_bits_ = hash_bits; + return 1; +} + +void VP8LColorCacheClear(VP8LColorCache* const cc) { + if (cc != NULL) { + WebPSafeFree(cc->colors_); + cc->colors_ = NULL; + } +} + +void VP8LColorCacheCopy(const VP8LColorCache* const src, + VP8LColorCache* const dst) { + assert(src != NULL); + assert(dst != NULL); + assert(src->hash_bits_ == dst->hash_bits_); + memcpy(dst->colors_, src->colors_, + ((size_t)1u << dst->hash_bits_) * sizeof(*dst->colors_)); +} diff --git a/vendor/github.com/sizeofint/webpanimation/utils_color_cache_utils.h b/vendor/github.com/sizeofint/webpanimation/utils_color_cache_utils.h new file mode 100644 index 00000000..78a84514 --- /dev/null +++ b/vendor/github.com/sizeofint/webpanimation/utils_color_cache_utils.h @@ -0,0 +1,89 @@ +// Copyright 2012 Google Inc. All Rights Reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the COPYING file in the root of the source +// tree. An additional intellectual property rights grant can be found +// in the file PATENTS. All contributing project authors may +// be found in the AUTHORS file in the root of the source tree. +// ----------------------------------------------------------------------------- +// +// Color Cache for WebP Lossless +// +// Authors: Jyrki Alakuijala (jyrki@google.com) +// Urvang Joshi (urvang@google.com) + +#ifndef WEBP_UTILS_COLOR_CACHE_UTILS_H_ +#define WEBP_UTILS_COLOR_CACHE_UTILS_H_ + +#include + +#include "dsp_dsp.h" +#include "webp_types.h" + +#ifdef __cplusplus +extern "C" { +#endif + +// Main color cache struct. +typedef struct { + uint32_t* colors_; // color entries + int hash_shift_; // Hash shift: 32 - hash_bits_. + int hash_bits_; +} VP8LColorCache; + +static const uint32_t kHashMul = 0x1e35a7bdu; + +static WEBP_UBSAN_IGNORE_UNSIGNED_OVERFLOW WEBP_INLINE +int VP8LHashPix(uint32_t argb, int shift) { + return (int)((argb * kHashMul) >> shift); +} + +static WEBP_INLINE uint32_t VP8LColorCacheLookup( + const VP8LColorCache* const cc, uint32_t key) { + assert((key >> cc->hash_bits_) == 0u); + return cc->colors_[key]; +} + +static WEBP_INLINE void VP8LColorCacheSet(const VP8LColorCache* const cc, + uint32_t key, uint32_t argb) { + assert((key >> cc->hash_bits_) == 0u); + cc->colors_[key] = argb; +} + +static WEBP_INLINE void VP8LColorCacheInsert(const VP8LColorCache* const cc, + uint32_t argb) { + const int key = VP8LHashPix(argb, cc->hash_shift_); + cc->colors_[key] = argb; +} + +static WEBP_INLINE int VP8LColorCacheGetIndex(const VP8LColorCache* const cc, + uint32_t argb) { + return VP8LHashPix(argb, cc->hash_shift_); +} + +// Return the key if cc contains argb, and -1 otherwise. +static WEBP_INLINE int VP8LColorCacheContains(const VP8LColorCache* const cc, + uint32_t argb) { + const int key = VP8LHashPix(argb, cc->hash_shift_); + return (cc->colors_[key] == argb) ? key : -1; +} + +//------------------------------------------------------------------------------ + +// Initializes the color cache with 'hash_bits' bits for the keys. +// Returns false in case of memory error. +int VP8LColorCacheInit(VP8LColorCache* const color_cache, int hash_bits); + +void VP8LColorCacheCopy(const VP8LColorCache* const src, + VP8LColorCache* const dst); + +// Delete the memory associated to color cache. +void VP8LColorCacheClear(VP8LColorCache* const color_cache); + +//------------------------------------------------------------------------------ + +#ifdef __cplusplus +} +#endif + +#endif // WEBP_UTILS_COLOR_CACHE_UTILS_H_ diff --git a/vendor/github.com/sizeofint/webpanimation/utils_endian_inl_utils.h b/vendor/github.com/sizeofint/webpanimation/utils_endian_inl_utils.h new file mode 100644 index 00000000..19ffed2f --- /dev/null +++ b/vendor/github.com/sizeofint/webpanimation/utils_endian_inl_utils.h @@ -0,0 +1,93 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the COPYING file in the root of the source +// tree. An additional intellectual property rights grant can be found +// in the file PATENTS. All contributing project authors may +// be found in the AUTHORS file in the root of the source tree. +// ----------------------------------------------------------------------------- +// +// Endian related functions. + +#ifndef WEBP_UTILS_ENDIAN_INL_UTILS_H_ +#define WEBP_UTILS_ENDIAN_INL_UTILS_H_ + +#ifdef HAVE_CONFIG_H +#include "config.h" +#endif + +#include "dsp_dsp.h" +#include "webp_types.h" + +#if defined(WORDS_BIGENDIAN) +#define HToLE32 BSwap32 +#define HToLE16 BSwap16 +#else +#define HToLE32(x) (x) +#define HToLE16(x) (x) +#endif + +#if !defined(HAVE_CONFIG_H) +#if LOCAL_GCC_PREREQ(4,8) || __has_builtin(__builtin_bswap16) +#define HAVE_BUILTIN_BSWAP16 +#endif +#if LOCAL_GCC_PREREQ(4,3) || __has_builtin(__builtin_bswap32) +#define HAVE_BUILTIN_BSWAP32 +#endif +#if LOCAL_GCC_PREREQ(4,3) || __has_builtin(__builtin_bswap64) +#define HAVE_BUILTIN_BSWAP64 +#endif +#endif // !HAVE_CONFIG_H + +static WEBP_INLINE uint16_t BSwap16(uint16_t x) { +#if defined(HAVE_BUILTIN_BSWAP16) + return __builtin_bswap16(x); +#elif defined(_MSC_VER) + return _byteswap_ushort(x); +#else + // gcc will recognize a 'rorw $8, ...' here: + return (x >> 8) | ((x & 0xff) << 8); +#endif // HAVE_BUILTIN_BSWAP16 +} + +static WEBP_INLINE uint32_t BSwap32(uint32_t x) { +#if defined(WEBP_USE_MIPS32_R2) + uint32_t ret; + __asm__ volatile ( + "wsbh %[ret], %[x] \n\t" + "rotr %[ret], %[ret], 16 \n\t" + : [ret]"=r"(ret) + : [x]"r"(x) + ); + return ret; +#elif defined(HAVE_BUILTIN_BSWAP32) + return __builtin_bswap32(x); +#elif defined(__i386__) || defined(__x86_64__) + uint32_t swapped_bytes; + __asm__ volatile("bswap %0" : "=r"(swapped_bytes) : "0"(x)); + return swapped_bytes; +#elif defined(_MSC_VER) + return (uint32_t)_byteswap_ulong(x); +#else + return (x >> 24) | ((x >> 8) & 0xff00) | ((x << 8) & 0xff0000) | (x << 24); +#endif // HAVE_BUILTIN_BSWAP32 +} + +static WEBP_INLINE uint64_t BSwap64(uint64_t x) { +#if defined(HAVE_BUILTIN_BSWAP64) + return __builtin_bswap64(x); +#elif defined(__x86_64__) + uint64_t swapped_bytes; + __asm__ volatile("bswapq %0" : "=r"(swapped_bytes) : "0"(x)); + return swapped_bytes; +#elif defined(_MSC_VER) + return (uint64_t)_byteswap_uint64(x); +#else // generic code for swapping 64-bit values (suggested by bdb@) + x = ((x & 0xffffffff00000000ull) >> 32) | ((x & 0x00000000ffffffffull) << 32); + x = ((x & 0xffff0000ffff0000ull) >> 16) | ((x & 0x0000ffff0000ffffull) << 16); + x = ((x & 0xff00ff00ff00ff00ull) >> 8) | ((x & 0x00ff00ff00ff00ffull) << 8); + return x; +#endif // HAVE_BUILTIN_BSWAP64 +} + +#endif // WEBP_UTILS_ENDIAN_INL_UTILS_H_ diff --git a/vendor/github.com/sizeofint/webpanimation/utils_filters_utils.c b/vendor/github.com/sizeofint/webpanimation/utils_filters_utils.c new file mode 100644 index 00000000..86de1155 --- /dev/null +++ b/vendor/github.com/sizeofint/webpanimation/utils_filters_utils.c @@ -0,0 +1,76 @@ +// Copyright 2011 Google Inc. All Rights Reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the COPYING file in the root of the source +// tree. An additional intellectual property rights grant can be found +// in the file PATENTS. All contributing project authors may +// be found in the AUTHORS file in the root of the source tree. +// ----------------------------------------------------------------------------- +// +// filter estimation +// +// Author: Urvang (urvang@google.com) + +#include "utils_filters_utils.h" +#include +#include + +// ----------------------------------------------------------------------------- +// Quick estimate of a potentially interesting filter mode to try. + +#define SMAX 16 +#define SDIFF(a, b) (abs((a) - (b)) >> 4) // Scoring diff, in [0..SMAX) + +static WEBP_INLINE int GradientPredictor(uint8_t a, uint8_t b, uint8_t c) { + const int g = a + b - c; + return ((g & ~0xff) == 0) ? g : (g < 0) ? 0 : 255; // clip to 8bit +} + +WEBP_FILTER_TYPE WebPEstimateBestFilter(const uint8_t* data, + int width, int height, int stride) { + int i, j; + int bins[WEBP_FILTER_LAST][SMAX]; + memset(bins, 0, sizeof(bins)); + + // We only sample every other pixels. That's enough. + for (j = 2; j < height - 1; j += 2) { + const uint8_t* const p = data + j * stride; + int mean = p[0]; + for (i = 2; i < width - 1; i += 2) { + const int diff0 = SDIFF(p[i], mean); + const int diff1 = SDIFF(p[i], p[i - 1]); + const int diff2 = SDIFF(p[i], p[i - width]); + const int grad_pred = + GradientPredictor(p[i - 1], p[i - width], p[i - width - 1]); + const int diff3 = SDIFF(p[i], grad_pred); + bins[WEBP_FILTER_NONE][diff0] = 1; + bins[WEBP_FILTER_HORIZONTAL][diff1] = 1; + bins[WEBP_FILTER_VERTICAL][diff2] = 1; + bins[WEBP_FILTER_GRADIENT][diff3] = 1; + mean = (3 * mean + p[i] + 2) >> 2; + } + } + { + int filter; + WEBP_FILTER_TYPE best_filter = WEBP_FILTER_NONE; + int best_score = 0x7fffffff; + for (filter = WEBP_FILTER_NONE; filter < WEBP_FILTER_LAST; ++filter) { + int score = 0; + for (i = 0; i < SMAX; ++i) { + if (bins[filter][i] > 0) { + score += i; + } + } + if (score < best_score) { + best_score = score; + best_filter = (WEBP_FILTER_TYPE)filter; + } + } + return best_filter; + } +} + +#undef SMAX +#undef SDIFF + +//------------------------------------------------------------------------------ diff --git a/vendor/github.com/sizeofint/webpanimation/utils_filters_utils.h b/vendor/github.com/sizeofint/webpanimation/utils_filters_utils.h new file mode 100644 index 00000000..215a275d --- /dev/null +++ b/vendor/github.com/sizeofint/webpanimation/utils_filters_utils.h @@ -0,0 +1,32 @@ +// Copyright 2011 Google Inc. All Rights Reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the COPYING file in the root of the source +// tree. An additional intellectual property rights grant can be found +// in the file PATENTS. All contributing project authors may +// be found in the AUTHORS file in the root of the source tree. +// ----------------------------------------------------------------------------- +// +// Spatial prediction using various filters +// +// Author: Urvang (urvang@google.com) + +#ifndef WEBP_UTILS_FILTERS_UTILS_H_ +#define WEBP_UTILS_FILTERS_UTILS_H_ + +#include "webp_types.h" +#include "dsp_dsp.h" + +#ifdef __cplusplus +extern "C" { +#endif + +// Fast estimate of a potentially good filter. +WEBP_FILTER_TYPE WebPEstimateBestFilter(const uint8_t* data, + int width, int height, int stride); + +#ifdef __cplusplus +} // extern "C" +#endif + +#endif // WEBP_UTILS_FILTERS_UTILS_H_ diff --git a/vendor/github.com/sizeofint/webpanimation/utils_huffman_encode_utils.c b/vendor/github.com/sizeofint/webpanimation/utils_huffman_encode_utils.c new file mode 100644 index 00000000..0d761f51 --- /dev/null +++ b/vendor/github.com/sizeofint/webpanimation/utils_huffman_encode_utils.c @@ -0,0 +1,417 @@ +// Copyright 2011 Google Inc. All Rights Reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the COPYING file in the root of the source +// tree. An additional intellectual property rights grant can be found +// in the file PATENTS. All contributing project authors may +// be found in the AUTHORS file in the root of the source tree. +// ----------------------------------------------------------------------------- +// +// Author: Jyrki Alakuijala (jyrki@google.com) +// +// Entropy encoding (Huffman) for webp lossless. + +#include +#include +#include +#include "utils_huffman_encode_utils.h" +#include "utils_utils.h" +#include "webp_format_constants.h" + +// ----------------------------------------------------------------------------- +// Util function to optimize the symbol map for RLE coding + +// Heuristics for selecting the stride ranges to collapse. +static int ValuesShouldBeCollapsedToStrideAverage(int a, int b) { + return abs(a - b) < 4; +} + +// Change the population counts in a way that the consequent +// Huffman tree compression, especially its RLE-part, give smaller output. +static void OptimizeHuffmanForRle(int length, uint8_t* const good_for_rle, + uint32_t* const counts) { + // 1) Let's make the Huffman code more compatible with rle encoding. + int i; + for (; length >= 0; --length) { + if (length == 0) { + return; // All zeros. + } + if (counts[length - 1] != 0) { + // Now counts[0..length - 1] does not have trailing zeros. + break; + } + } + // 2) Let's mark all population counts that already can be encoded + // with an rle code. + { + // Let's not spoil any of the existing good rle codes. + // Mark any seq of 0's that is longer as 5 as a good_for_rle. + // Mark any seq of non-0's that is longer as 7 as a good_for_rle. + uint32_t symbol = counts[0]; + int stride = 0; + for (i = 0; i < length + 1; ++i) { + if (i == length || counts[i] != symbol) { + if ((symbol == 0 && stride >= 5) || + (symbol != 0 && stride >= 7)) { + int k; + for (k = 0; k < stride; ++k) { + good_for_rle[i - k - 1] = 1; + } + } + stride = 1; + if (i != length) { + symbol = counts[i]; + } + } else { + ++stride; + } + } + } + // 3) Let's replace those population counts that lead to more rle codes. + { + uint32_t stride = 0; + uint32_t limit = counts[0]; + uint32_t sum = 0; + for (i = 0; i < length + 1; ++i) { + if (i == length || good_for_rle[i] || + (i != 0 && good_for_rle[i - 1]) || + !ValuesShouldBeCollapsedToStrideAverage(counts[i], limit)) { + if (stride >= 4 || (stride >= 3 && sum == 0)) { + uint32_t k; + // The stride must end, collapse what we have, if we have enough (4). + uint32_t count = (sum + stride / 2) / stride; + if (count < 1) { + count = 1; + } + if (sum == 0) { + // Don't make an all zeros stride to be upgraded to ones. + count = 0; + } + for (k = 0; k < stride; ++k) { + // We don't want to change value at counts[i], + // that is already belonging to the next stride. Thus - 1. + counts[i - k - 1] = count; + } + } + stride = 0; + sum = 0; + if (i < length - 3) { + // All interesting strides have a count of at least 4, + // at least when non-zeros. + limit = (counts[i] + counts[i + 1] + + counts[i + 2] + counts[i + 3] + 2) / 4; + } else if (i < length) { + limit = counts[i]; + } else { + limit = 0; + } + } + ++stride; + if (i != length) { + sum += counts[i]; + if (stride >= 4) { + limit = (sum + stride / 2) / stride; + } + } + } + } +} + +// A comparer function for two Huffman trees: sorts first by 'total count' +// (more comes first), and then by 'value' (more comes first). +static int CompareHuffmanTrees(const void* ptr1, const void* ptr2) { + const HuffmanTree* const t1 = (const HuffmanTree*)ptr1; + const HuffmanTree* const t2 = (const HuffmanTree*)ptr2; + if (t1->total_count_ > t2->total_count_) { + return -1; + } else if (t1->total_count_ < t2->total_count_) { + return 1; + } else { + assert(t1->value_ != t2->value_); + return (t1->value_ < t2->value_) ? -1 : 1; + } +} + +static void SetBitDepths(const HuffmanTree* const tree, + const HuffmanTree* const pool, + uint8_t* const bit_depths, int level) { + if (tree->pool_index_left_ >= 0) { + SetBitDepths(&pool[tree->pool_index_left_], pool, bit_depths, level + 1); + SetBitDepths(&pool[tree->pool_index_right_], pool, bit_depths, level + 1); + } else { + bit_depths[tree->value_] = level; + } +} + +// Create an optimal Huffman tree. +// +// (data,length): population counts. +// tree_limit: maximum bit depth (inclusive) of the codes. +// bit_depths[]: how many bits are used for the symbol. +// +// Returns 0 when an error has occurred. +// +// The catch here is that the tree cannot be arbitrarily deep +// +// count_limit is the value that is to be faked as the minimum value +// and this minimum value is raised until the tree matches the +// maximum length requirement. +// +// This algorithm is not of excellent performance for very long data blocks, +// especially when population counts are longer than 2**tree_limit, but +// we are not planning to use this with extremely long blocks. +// +// See http://en.wikipedia.org/wiki/Huffman_coding +static void GenerateOptimalTree(const uint32_t* const histogram, + int histogram_size, + HuffmanTree* tree, int tree_depth_limit, + uint8_t* const bit_depths) { + uint32_t count_min; + HuffmanTree* tree_pool; + int tree_size_orig = 0; + int i; + + for (i = 0; i < histogram_size; ++i) { + if (histogram[i] != 0) { + ++tree_size_orig; + } + } + + if (tree_size_orig == 0) { // pretty optimal already! + return; + } + + tree_pool = tree + tree_size_orig; + + // For block sizes with less than 64k symbols we never need to do a + // second iteration of this loop. + // If we actually start running inside this loop a lot, we would perhaps + // be better off with the Katajainen algorithm. + assert(tree_size_orig <= (1 << (tree_depth_limit - 1))); + for (count_min = 1; ; count_min *= 2) { + int tree_size = tree_size_orig; + // We need to pack the Huffman tree in tree_depth_limit bits. + // So, we try by faking histogram entries to be at least 'count_min'. + int idx = 0; + int j; + for (j = 0; j < histogram_size; ++j) { + if (histogram[j] != 0) { + const uint32_t count = + (histogram[j] < count_min) ? count_min : histogram[j]; + tree[idx].total_count_ = count; + tree[idx].value_ = j; + tree[idx].pool_index_left_ = -1; + tree[idx].pool_index_right_ = -1; + ++idx; + } + } + + // Build the Huffman tree. + qsort(tree, tree_size, sizeof(*tree), CompareHuffmanTrees); + + if (tree_size > 1) { // Normal case. + int tree_pool_size = 0; + while (tree_size > 1) { // Finish when we have only one root. + uint32_t count; + tree_pool[tree_pool_size++] = tree[tree_size - 1]; + tree_pool[tree_pool_size++] = tree[tree_size - 2]; + count = tree_pool[tree_pool_size - 1].total_count_ + + tree_pool[tree_pool_size - 2].total_count_; + tree_size -= 2; + { + // Search for the insertion point. + int k; + for (k = 0; k < tree_size; ++k) { + if (tree[k].total_count_ <= count) { + break; + } + } + memmove(tree + (k + 1), tree + k, (tree_size - k) * sizeof(*tree)); + tree[k].total_count_ = count; + tree[k].value_ = -1; + + tree[k].pool_index_left_ = tree_pool_size - 1; + tree[k].pool_index_right_ = tree_pool_size - 2; + tree_size = tree_size + 1; + } + } + SetBitDepths(&tree[0], tree_pool, bit_depths, 0); + } else if (tree_size == 1) { // Trivial case: only one element. + bit_depths[tree[0].value_] = 1; + } + + { + // Test if this Huffman tree satisfies our 'tree_depth_limit' criteria. + int max_depth = bit_depths[0]; + for (j = 1; j < histogram_size; ++j) { + if (max_depth < bit_depths[j]) { + max_depth = bit_depths[j]; + } + } + if (max_depth <= tree_depth_limit) { + break; + } + } + } +} + +// ----------------------------------------------------------------------------- +// Coding of the Huffman tree values + +static HuffmanTreeToken* CodeRepeatedValues(int repetitions, + HuffmanTreeToken* tokens, + int value, int prev_value) { + assert(value <= MAX_ALLOWED_CODE_LENGTH); + if (value != prev_value) { + tokens->code = value; + tokens->extra_bits = 0; + ++tokens; + --repetitions; + } + while (repetitions >= 1) { + if (repetitions < 3) { + int i; + for (i = 0; i < repetitions; ++i) { + tokens->code = value; + tokens->extra_bits = 0; + ++tokens; + } + break; + } else if (repetitions < 7) { + tokens->code = 16; + tokens->extra_bits = repetitions - 3; + ++tokens; + break; + } else { + tokens->code = 16; + tokens->extra_bits = 3; + ++tokens; + repetitions -= 6; + } + } + return tokens; +} + +static HuffmanTreeToken* CodeRepeatedZeros(int repetitions, + HuffmanTreeToken* tokens) { + while (repetitions >= 1) { + if (repetitions < 3) { + int i; + for (i = 0; i < repetitions; ++i) { + tokens->code = 0; // 0-value + tokens->extra_bits = 0; + ++tokens; + } + break; + } else if (repetitions < 11) { + tokens->code = 17; + tokens->extra_bits = repetitions - 3; + ++tokens; + break; + } else if (repetitions < 139) { + tokens->code = 18; + tokens->extra_bits = repetitions - 11; + ++tokens; + break; + } else { + tokens->code = 18; + tokens->extra_bits = 0x7f; // 138 repeated 0s + ++tokens; + repetitions -= 138; + } + } + return tokens; +} + +int VP8LCreateCompressedHuffmanTree(const HuffmanTreeCode* const tree, + HuffmanTreeToken* tokens, int max_tokens) { + HuffmanTreeToken* const starting_token = tokens; + HuffmanTreeToken* const ending_token = tokens + max_tokens; + const int depth_size = tree->num_symbols; + int prev_value = 8; // 8 is the initial value for rle. + int i = 0; + assert(tokens != NULL); + while (i < depth_size) { + const int value = tree->code_lengths[i]; + int k = i + 1; + int runs; + while (k < depth_size && tree->code_lengths[k] == value) ++k; + runs = k - i; + if (value == 0) { + tokens = CodeRepeatedZeros(runs, tokens); + } else { + tokens = CodeRepeatedValues(runs, tokens, value, prev_value); + prev_value = value; + } + i += runs; + assert(tokens <= ending_token); + } + (void)ending_token; // suppress 'unused variable' warning + return (int)(tokens - starting_token); +} + +// ----------------------------------------------------------------------------- + +// Pre-reversed 4-bit values. +static const uint8_t kReversedBits[16] = { + 0x0, 0x8, 0x4, 0xc, 0x2, 0xa, 0x6, 0xe, + 0x1, 0x9, 0x5, 0xd, 0x3, 0xb, 0x7, 0xf +}; + +static uint32_t ReverseBits(int num_bits, uint32_t bits) { + uint32_t retval = 0; + int i = 0; + while (i < num_bits) { + i += 4; + retval |= kReversedBits[bits & 0xf] << (MAX_ALLOWED_CODE_LENGTH + 1 - i); + bits >>= 4; + } + retval >>= (MAX_ALLOWED_CODE_LENGTH + 1 - num_bits); + return retval; +} + +// Get the actual bit values for a tree of bit depths. +static void ConvertBitDepthsToSymbols(HuffmanTreeCode* const tree) { + // 0 bit-depth means that the symbol does not exist. + int i; + int len; + uint32_t next_code[MAX_ALLOWED_CODE_LENGTH + 1]; + int depth_count[MAX_ALLOWED_CODE_LENGTH + 1] = { 0 }; + + assert(tree != NULL); + len = tree->num_symbols; + for (i = 0; i < len; ++i) { + const int code_length = tree->code_lengths[i]; + assert(code_length <= MAX_ALLOWED_CODE_LENGTH); + ++depth_count[code_length]; + } + depth_count[0] = 0; // ignore unused symbol + next_code[0] = 0; + { + uint32_t code = 0; + for (i = 1; i <= MAX_ALLOWED_CODE_LENGTH; ++i) { + code = (code + depth_count[i - 1]) << 1; + next_code[i] = code; + } + } + for (i = 0; i < len; ++i) { + const int code_length = tree->code_lengths[i]; + tree->codes[i] = ReverseBits(code_length, next_code[code_length]++); + } +} + +// ----------------------------------------------------------------------------- +// Main entry point + +void VP8LCreateHuffmanTree(uint32_t* const histogram, int tree_depth_limit, + uint8_t* const buf_rle, + HuffmanTree* const huff_tree, + HuffmanTreeCode* const huff_code) { + const int num_symbols = huff_code->num_symbols; + memset(buf_rle, 0, num_symbols * sizeof(*buf_rle)); + OptimizeHuffmanForRle(num_symbols, buf_rle, histogram); + GenerateOptimalTree(histogram, num_symbols, huff_tree, tree_depth_limit, + huff_code->code_lengths); + // Create the actual bit codes for the bit lengths. + ConvertBitDepthsToSymbols(huff_code); +} diff --git a/vendor/github.com/sizeofint/webpanimation/utils_huffman_encode_utils.h b/vendor/github.com/sizeofint/webpanimation/utils_huffman_encode_utils.h new file mode 100644 index 00000000..23cfdcc7 --- /dev/null +++ b/vendor/github.com/sizeofint/webpanimation/utils_huffman_encode_utils.h @@ -0,0 +1,60 @@ +// Copyright 2011 Google Inc. All Rights Reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the COPYING file in the root of the source +// tree. An additional intellectual property rights grant can be found +// in the file PATENTS. All contributing project authors may +// be found in the AUTHORS file in the root of the source tree. +// ----------------------------------------------------------------------------- +// +// Author: Jyrki Alakuijala (jyrki@google.com) +// +// Entropy encoding (Huffman) for webp lossless + +#ifndef WEBP_UTILS_HUFFMAN_ENCODE_UTILS_H_ +#define WEBP_UTILS_HUFFMAN_ENCODE_UTILS_H_ + +#include "webp_types.h" + +#ifdef __cplusplus +extern "C" { +#endif + +// Struct for holding the tree header in coded form. +typedef struct { + uint8_t code; // value (0..15) or escape code (16,17,18) + uint8_t extra_bits; // extra bits for escape codes +} HuffmanTreeToken; + +// Struct to represent the tree codes (depth and bits array). +typedef struct { + int num_symbols; // Number of symbols. + uint8_t* code_lengths; // Code lengths of the symbols. + uint16_t* codes; // Symbol Codes. +} HuffmanTreeCode; + +// Struct to represent the Huffman tree. +typedef struct { + uint32_t total_count_; // Symbol frequency. + int value_; // Symbol value. + int pool_index_left_; // Index for the left sub-tree. + int pool_index_right_; // Index for the right sub-tree. +} HuffmanTree; + +// Turn the Huffman tree into a token sequence. +// Returns the number of tokens used. +int VP8LCreateCompressedHuffmanTree(const HuffmanTreeCode* const tree, + HuffmanTreeToken* tokens, int max_tokens); + +// Create an optimized tree, and tokenize it. +// 'buf_rle' and 'huff_tree' are pre-allocated and the 'tree' is the constructed +// huffman code tree. +void VP8LCreateHuffmanTree(uint32_t* const histogram, int tree_depth_limit, + uint8_t* const buf_rle, HuffmanTree* const huff_tree, + HuffmanTreeCode* const tree); + +#ifdef __cplusplus +} +#endif + +#endif // WEBP_UTILS_HUFFMAN_ENCODE_UTILS_H_ diff --git a/vendor/github.com/sizeofint/webpanimation/utils_huffman_utils.c b/vendor/github.com/sizeofint/webpanimation/utils_huffman_utils.c new file mode 100644 index 00000000..2081a6ca --- /dev/null +++ b/vendor/github.com/sizeofint/webpanimation/utils_huffman_utils.c @@ -0,0 +1,235 @@ +// Copyright 2012 Google Inc. All Rights Reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the COPYING file in the root of the source +// tree. An additional intellectual property rights grant can be found +// in the file PATENTS. All contributing project authors may +// be found in the AUTHORS file in the root of the source tree. +// ----------------------------------------------------------------------------- +// +// Utilities for building and looking up Huffman trees. +// +// Author: Urvang Joshi (urvang@google.com) + +#include +#include +#include +#include "utils_huffman_utils.h" +#include "utils_utils.h" +#include "webp_format_constants.h" + +// Huffman data read via DecodeImageStream is represented in two (red and green) +// bytes. +#define MAX_HTREE_GROUPS 0x10000 + +HTreeGroup* VP8LHtreeGroupsNew(int num_htree_groups) { + HTreeGroup* const htree_groups = + (HTreeGroup*)WebPSafeMalloc(num_htree_groups, sizeof(*htree_groups)); + if (htree_groups == NULL) { + return NULL; + } + assert(num_htree_groups <= MAX_HTREE_GROUPS); + return htree_groups; +} + +void VP8LHtreeGroupsFree(HTreeGroup* const htree_groups) { + if (htree_groups != NULL) { + WebPSafeFree(htree_groups); + } +} + +// Returns reverse(reverse(key, len) + 1, len), where reverse(key, len) is the +// bit-wise reversal of the len least significant bits of key. +static WEBP_INLINE uint32_t GetNextKey(uint32_t key, int len) { + uint32_t step = 1 << (len - 1); + while (key & step) { + step >>= 1; + } + return step ? (key & (step - 1)) + step : key; +} + +// Stores code in table[0], table[step], table[2*step], ..., table[end]. +// Assumes that end is an integer multiple of step. +static WEBP_INLINE void ReplicateValue(HuffmanCode* table, + int step, int end, + HuffmanCode code) { + assert(end % step == 0); + do { + end -= step; + table[end] = code; + } while (end > 0); +} + +// Returns the table width of the next 2nd level table. count is the histogram +// of bit lengths for the remaining symbols, len is the code length of the next +// processed symbol +static WEBP_INLINE int NextTableBitSize(const int* const count, + int len, int root_bits) { + int left = 1 << (len - root_bits); + while (len < MAX_ALLOWED_CODE_LENGTH) { + left -= count[len]; + if (left <= 0) break; + ++len; + left <<= 1; + } + return len - root_bits; +} + +// sorted[code_lengths_size] is a pre-allocated array for sorting symbols +// by code length. +static int BuildHuffmanTable(HuffmanCode* const root_table, int root_bits, + const int code_lengths[], int code_lengths_size, + uint16_t sorted[]) { + HuffmanCode* table = root_table; // next available space in table + int total_size = 1 << root_bits; // total size root table + 2nd level table + int len; // current code length + int symbol; // symbol index in original or sorted table + // number of codes of each length: + int count[MAX_ALLOWED_CODE_LENGTH + 1] = { 0 }; + // offsets in sorted table for each length: + int offset[MAX_ALLOWED_CODE_LENGTH + 1]; + + assert(code_lengths_size != 0); + assert(code_lengths != NULL); + assert((root_table != NULL && sorted != NULL) || + (root_table == NULL && sorted == NULL)); + assert(root_bits > 0); + + // Build histogram of code lengths. + for (symbol = 0; symbol < code_lengths_size; ++symbol) { + if (code_lengths[symbol] > MAX_ALLOWED_CODE_LENGTH) { + return 0; + } + ++count[code_lengths[symbol]]; + } + + // Error, all code lengths are zeros. + if (count[0] == code_lengths_size) { + return 0; + } + + // Generate offsets into sorted symbol table by code length. + offset[1] = 0; + for (len = 1; len < MAX_ALLOWED_CODE_LENGTH; ++len) { + if (count[len] > (1 << len)) { + return 0; + } + offset[len + 1] = offset[len] + count[len]; + } + + // Sort symbols by length, by symbol order within each length. + for (symbol = 0; symbol < code_lengths_size; ++symbol) { + const int symbol_code_length = code_lengths[symbol]; + if (code_lengths[symbol] > 0) { + if (sorted != NULL) { + sorted[offset[symbol_code_length]++] = symbol; + } else { + offset[symbol_code_length]++; + } + } + } + + // Special case code with only one value. + if (offset[MAX_ALLOWED_CODE_LENGTH] == 1) { + if (sorted != NULL) { + HuffmanCode code; + code.bits = 0; + code.value = (uint16_t)sorted[0]; + ReplicateValue(table, 1, total_size, code); + } + return total_size; + } + + { + int step; // step size to replicate values in current table + uint32_t low = -1; // low bits for current root entry + uint32_t mask = total_size - 1; // mask for low bits + uint32_t key = 0; // reversed prefix code + int num_nodes = 1; // number of Huffman tree nodes + int num_open = 1; // number of open branches in current tree level + int table_bits = root_bits; // key length of current table + int table_size = 1 << table_bits; // size of current table + symbol = 0; + // Fill in root table. + for (len = 1, step = 2; len <= root_bits; ++len, step <<= 1) { + num_open <<= 1; + num_nodes += num_open; + num_open -= count[len]; + if (num_open < 0) { + return 0; + } + if (root_table == NULL) continue; + for (; count[len] > 0; --count[len]) { + HuffmanCode code; + code.bits = (uint8_t)len; + code.value = (uint16_t)sorted[symbol++]; + ReplicateValue(&table[key], step, table_size, code); + key = GetNextKey(key, len); + } + } + + // Fill in 2nd level tables and add pointers to root table. + for (len = root_bits + 1, step = 2; len <= MAX_ALLOWED_CODE_LENGTH; + ++len, step <<= 1) { + num_open <<= 1; + num_nodes += num_open; + num_open -= count[len]; + if (num_open < 0) { + return 0; + } + if (root_table == NULL) continue; + for (; count[len] > 0; --count[len]) { + HuffmanCode code; + if ((key & mask) != low) { + table += table_size; + table_bits = NextTableBitSize(count, len, root_bits); + table_size = 1 << table_bits; + total_size += table_size; + low = key & mask; + root_table[low].bits = (uint8_t)(table_bits + root_bits); + root_table[low].value = (uint16_t)((table - root_table) - low); + } + code.bits = (uint8_t)(len - root_bits); + code.value = (uint16_t)sorted[symbol++]; + ReplicateValue(&table[key >> root_bits], step, table_size, code); + key = GetNextKey(key, len); + } + } + + // Check if tree is full. + if (num_nodes != 2 * offset[MAX_ALLOWED_CODE_LENGTH] - 1) { + return 0; + } + } + + return total_size; +} + +// Maximum code_lengths_size is 2328 (reached for 11-bit color_cache_bits). +// More commonly, the value is around ~280. +#define MAX_CODE_LENGTHS_SIZE \ + ((1 << MAX_CACHE_BITS) + NUM_LITERAL_CODES + NUM_LENGTH_CODES) +// Cut-off value for switching between heap and stack allocation. +#define SORTED_SIZE_CUTOFF 512 +int VP8LBuildHuffmanTable(HuffmanCode* const root_table, int root_bits, + const int code_lengths[], int code_lengths_size) { + int total_size; + assert(code_lengths_size <= MAX_CODE_LENGTHS_SIZE); + if (root_table == NULL) { + total_size = BuildHuffmanTable(NULL, root_bits, + code_lengths, code_lengths_size, NULL); + } else if (code_lengths_size <= SORTED_SIZE_CUTOFF) { + // use local stack-allocated array. + uint16_t sorted[SORTED_SIZE_CUTOFF]; + total_size = BuildHuffmanTable(root_table, root_bits, + code_lengths, code_lengths_size, sorted); + } else { // rare case. Use heap allocation. + uint16_t* const sorted = + (uint16_t*)WebPSafeMalloc(code_lengths_size, sizeof(*sorted)); + if (sorted == NULL) return 0; + total_size = BuildHuffmanTable(root_table, root_bits, + code_lengths, code_lengths_size, sorted); + WebPSafeFree(sorted); + } + return total_size; +} diff --git a/vendor/github.com/sizeofint/webpanimation/utils_huffman_utils.h b/vendor/github.com/sizeofint/webpanimation/utils_huffman_utils.h new file mode 100644 index 00000000..6ba65a32 --- /dev/null +++ b/vendor/github.com/sizeofint/webpanimation/utils_huffman_utils.h @@ -0,0 +1,90 @@ +// Copyright 2012 Google Inc. All Rights Reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the COPYING file in the root of the source +// tree. An additional intellectual property rights grant can be found +// in the file PATENTS. All contributing project authors may +// be found in the AUTHORS file in the root of the source tree. +// ----------------------------------------------------------------------------- +// +// Utilities for building and looking up Huffman trees. +// +// Author: Urvang Joshi (urvang@google.com) + +#ifndef WEBP_UTILS_HUFFMAN_UTILS_H_ +#define WEBP_UTILS_HUFFMAN_UTILS_H_ + +#include +#include "webp_format_constants.h" +#include "webp_types.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#define HUFFMAN_TABLE_BITS 8 +#define HUFFMAN_TABLE_MASK ((1 << HUFFMAN_TABLE_BITS) - 1) + +#define LENGTHS_TABLE_BITS 7 +#define LENGTHS_TABLE_MASK ((1 << LENGTHS_TABLE_BITS) - 1) + + +// Huffman lookup table entry +typedef struct { + uint8_t bits; // number of bits used for this symbol + uint16_t value; // symbol value or table offset +} HuffmanCode; + +// long version for holding 32b values +typedef struct { + int bits; // number of bits used for this symbol, + // or an impossible value if not a literal code. + uint32_t value; // 32b packed ARGB value if literal, + // or non-literal symbol otherwise +} HuffmanCode32; + +#define HUFFMAN_PACKED_BITS 6 +#define HUFFMAN_PACKED_TABLE_SIZE (1u << HUFFMAN_PACKED_BITS) + +// Huffman table group. +// Includes special handling for the following cases: +// - is_trivial_literal: one common literal base for RED/BLUE/ALPHA (not GREEN) +// - is_trivial_code: only 1 code (no bit is read from bitstream) +// - use_packed_table: few enough literal symbols, so all the bit codes +// can fit into a small look-up table packed_table[] +// The common literal base, if applicable, is stored in 'literal_arb'. +typedef struct HTreeGroup HTreeGroup; +struct HTreeGroup { + HuffmanCode* htrees[HUFFMAN_CODES_PER_META_CODE]; + int is_trivial_literal; // True, if huffman trees for Red, Blue & Alpha + // Symbols are trivial (have a single code). + uint32_t literal_arb; // If is_trivial_literal is true, this is the + // ARGB value of the pixel, with Green channel + // being set to zero. + int is_trivial_code; // true if is_trivial_literal with only one code + int use_packed_table; // use packed table below for short literal code + // table mapping input bits to a packed values, or escape case to literal code + HuffmanCode32 packed_table[HUFFMAN_PACKED_TABLE_SIZE]; +}; + +// Creates the instance of HTreeGroup with specified number of tree-groups. +HTreeGroup* VP8LHtreeGroupsNew(int num_htree_groups); + +// Releases the memory allocated for HTreeGroup. +void VP8LHtreeGroupsFree(HTreeGroup* const htree_groups); + +// Builds Huffman lookup table assuming code lengths are in symbol order. +// The 'code_lengths' is pre-allocated temporary memory buffer used for creating +// the huffman table. +// Returns built table size or 0 in case of error (invalid tree or +// memory error). +// If root_table is NULL, it returns 0 if a lookup cannot be built, something +// > 0 otherwise (but not the table size). +int VP8LBuildHuffmanTable(HuffmanCode* const root_table, int root_bits, + const int code_lengths[], int code_lengths_size); + +#ifdef __cplusplus +} // extern "C" +#endif + +#endif // WEBP_UTILS_HUFFMAN_UTILS_H_ diff --git a/vendor/github.com/sizeofint/webpanimation/utils_quant_levels_dec_utils.c b/vendor/github.com/sizeofint/webpanimation/utils_quant_levels_dec_utils.c new file mode 100644 index 00000000..a3b519f0 --- /dev/null +++ b/vendor/github.com/sizeofint/webpanimation/utils_quant_levels_dec_utils.c @@ -0,0 +1,291 @@ +// Copyright 2013 Google Inc. All Rights Reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the COPYING file in the root of the source +// tree. An additional intellectual property rights grant can be found +// in the file PATENTS. All contributing project authors may +// be found in the AUTHORS file in the root of the source tree. +// ----------------------------------------------------------------------------- +// +// Implement gradient smoothing: we replace a current alpha value by its +// surrounding average if it's close enough (that is: the change will be less +// than the minimum distance between two quantized level). +// We use sliding window for computing the 2d moving average. +// +// Author: Skal (pascal.massimino@gmail.com) + +#include "utils_quant_levels_dec_utils.h" + +#include // for memset + +#include "utils_utils.h" + +// #define USE_DITHERING // uncomment to enable ordered dithering (not vital) + +#define FIX 16 // fix-point precision for averaging +#define LFIX 2 // extra precision for look-up table +#define LUT_SIZE ((1 << (8 + LFIX)) - 1) // look-up table size + +#if defined(USE_DITHERING) + +#define DFIX 4 // extra precision for ordered dithering +#define DSIZE 4 // dithering size (must be a power of two) +// cf. http://en.wikipedia.org/wiki/Ordered_dithering +static const uint8_t kOrderedDither[DSIZE][DSIZE] = { + { 0, 8, 2, 10 }, // coefficients are in DFIX fixed-point precision + { 12, 4, 14, 6 }, + { 3, 11, 1, 9 }, + { 15, 7, 13, 5 } +}; + +#else +#define DFIX 0 +#endif + +typedef struct { + int width_, height_; // dimension + int stride_; // stride in bytes + int row_; // current input row being processed + uint8_t* src_; // input pointer + uint8_t* dst_; // output pointer + + int radius_; // filter radius (=delay) + int scale_; // normalization factor, in FIX bits precision + + void* mem_; // all memory + + // various scratch buffers + uint16_t* start_; + uint16_t* cur_; + uint16_t* end_; + uint16_t* top_; + uint16_t* average_; + + // input levels distribution + int num_levels_; // number of quantized levels + int min_, max_; // min and max level values + int min_level_dist_; // smallest distance between two consecutive levels + + int16_t* correction_; // size = 1 + 2*LUT_SIZE -> ~4k memory +} SmoothParams; + +//------------------------------------------------------------------------------ + +#define CLIP_8b_MASK (int)(~0U << (8 + DFIX)) +static WEBP_INLINE uint8_t clip_8b(int v) { + return (!(v & CLIP_8b_MASK)) ? (uint8_t)(v >> DFIX) : (v < 0) ? 0u : 255u; +} +#undef CLIP_8b_MASK + +// vertical accumulation +static void VFilter(SmoothParams* const p) { + const uint8_t* src = p->src_; + const int w = p->width_; + uint16_t* const cur = p->cur_; + const uint16_t* const top = p->top_; + uint16_t* const out = p->end_; + uint16_t sum = 0; // all arithmetic is modulo 16bit + int x; + + for (x = 0; x < w; ++x) { + uint16_t new_value; + sum += src[x]; + new_value = top[x] + sum; + out[x] = new_value - cur[x]; // vertical sum of 'r' pixels. + cur[x] = new_value; + } + // move input pointers one row down + p->top_ = p->cur_; + p->cur_ += w; + if (p->cur_ == p->end_) p->cur_ = p->start_; // roll-over + // We replicate edges, as it's somewhat easier as a boundary condition. + // That's why we don't update the 'src' pointer on top/bottom area: + if (p->row_ >= 0 && p->row_ < p->height_ - 1) { + p->src_ += p->stride_; + } +} + +// horizontal accumulation. We use mirror replication of missing pixels, as it's +// a little easier to implement (surprisingly). +static void HFilter(SmoothParams* const p) { + const uint16_t* const in = p->end_; + uint16_t* const out = p->average_; + const uint32_t scale = p->scale_; + const int w = p->width_; + const int r = p->radius_; + + int x; + for (x = 0; x <= r; ++x) { // left mirroring + const uint16_t delta = in[x + r - 1] + in[r - x]; + out[x] = (delta * scale) >> FIX; + } + for (; x < w - r; ++x) { // bulk middle run + const uint16_t delta = in[x + r] - in[x - r - 1]; + out[x] = (delta * scale) >> FIX; + } + for (; x < w; ++x) { // right mirroring + const uint16_t delta = + 2 * in[w - 1] - in[2 * w - 2 - r - x] - in[x - r - 1]; + out[x] = (delta * scale) >> FIX; + } +} + +// emit one filtered output row +static void ApplyFilter(SmoothParams* const p) { + const uint16_t* const average = p->average_; + const int w = p->width_; + const int16_t* const correction = p->correction_; +#if defined(USE_DITHERING) + const uint8_t* const dither = kOrderedDither[p->row_ % DSIZE]; +#endif + uint8_t* const dst = p->dst_; + int x; + for (x = 0; x < w; ++x) { + const int v = dst[x]; + if (v < p->max_ && v > p->min_) { + const int c = (v << DFIX) + correction[average[x] - (v << LFIX)]; +#if defined(USE_DITHERING) + dst[x] = clip_8b(c + dither[x % DSIZE]); +#else + dst[x] = clip_8b(c); +#endif + } + } + p->dst_ += p->stride_; // advance output pointer +} + +//------------------------------------------------------------------------------ +// Initialize correction table + +static void InitCorrectionLUT(int16_t* const lut, int min_dist) { + // The correction curve is: + // f(x) = x for x <= threshold2 + // f(x) = 0 for x >= threshold1 + // and a linear interpolation for range x=[threshold2, threshold1] + // (along with f(-x) = -f(x) symmetry). + // Note that: threshold2 = 3/4 * threshold1 + const int threshold1 = min_dist << LFIX; + const int threshold2 = (3 * threshold1) >> 2; + const int max_threshold = threshold2 << DFIX; + const int delta = threshold1 - threshold2; + int i; + for (i = 1; i <= LUT_SIZE; ++i) { + int c = (i <= threshold2) ? (i << DFIX) + : (i < threshold1) ? max_threshold * (threshold1 - i) / delta + : 0; + c >>= LFIX; + lut[+i] = +c; + lut[-i] = -c; + } + lut[0] = 0; +} + +static void CountLevels(SmoothParams* const p) { + int i, j, last_level; + uint8_t used_levels[256] = { 0 }; + const uint8_t* data = p->src_; + p->min_ = 255; + p->max_ = 0; + for (j = 0; j < p->height_; ++j) { + for (i = 0; i < p->width_; ++i) { + const int v = data[i]; + if (v < p->min_) p->min_ = v; + if (v > p->max_) p->max_ = v; + used_levels[v] = 1; + } + data += p->stride_; + } + // Compute the mininum distance between two non-zero levels. + p->min_level_dist_ = p->max_ - p->min_; + last_level = -1; + for (i = 0; i < 256; ++i) { + if (used_levels[i]) { + ++p->num_levels_; + if (last_level >= 0) { + const int level_dist = i - last_level; + if (level_dist < p->min_level_dist_) { + p->min_level_dist_ = level_dist; + } + } + last_level = i; + } + } +} + +// Initialize all params. +static int InitParams(uint8_t* const data, int width, int height, int stride, + int radius, SmoothParams* const p) { + const int R = 2 * radius + 1; // total size of the kernel + + const size_t size_scratch_m = (R + 1) * width * sizeof(*p->start_); + const size_t size_m = width * sizeof(*p->average_); + const size_t size_lut = (1 + 2 * LUT_SIZE) * sizeof(*p->correction_); + const size_t total_size = size_scratch_m + size_m + size_lut; + uint8_t* mem = (uint8_t*)WebPSafeMalloc(1U, total_size); + + if (mem == NULL) return 0; + p->mem_ = (void*)mem; + + p->start_ = (uint16_t*)mem; + p->cur_ = p->start_; + p->end_ = p->start_ + R * width; + p->top_ = p->end_ - width; + memset(p->top_, 0, width * sizeof(*p->top_)); + mem += size_scratch_m; + + p->average_ = (uint16_t*)mem; + mem += size_m; + + p->width_ = width; + p->height_ = height; + p->stride_ = stride; + p->src_ = data; + p->dst_ = data; + p->radius_ = radius; + p->scale_ = (1 << (FIX + LFIX)) / (R * R); // normalization constant + p->row_ = -radius; + + // analyze the input distribution so we can best-fit the threshold + CountLevels(p); + + // correction table + p->correction_ = ((int16_t*)mem) + LUT_SIZE; + InitCorrectionLUT(p->correction_, p->min_level_dist_); + + return 1; +} + +static void CleanupParams(SmoothParams* const p) { + WebPSafeFree(p->mem_); +} + +int WebPDequantizeLevels(uint8_t* const data, int width, int height, int stride, + int strength) { + int radius = 4 * strength / 100; + + if (strength < 0 || strength > 100) return 0; + if (data == NULL || width <= 0 || height <= 0) return 0; // bad params + + // limit the filter size to not exceed the image dimensions + if (2 * radius + 1 > width) radius = (width - 1) >> 1; + if (2 * radius + 1 > height) radius = (height - 1) >> 1; + + if (radius > 0) { + SmoothParams p; + memset(&p, 0, sizeof(p)); + if (!InitParams(data, width, height, stride, radius, &p)) return 0; + if (p.num_levels_ > 2) { + for (; p.row_ < p.height_; ++p.row_) { + VFilter(&p); // accumulate average of input + // Need to wait few rows in order to prime the filter, + // before emitting some output. + if (p.row_ >= p.radius_) { + HFilter(&p); + ApplyFilter(&p); + } + } + } + CleanupParams(&p); + } + return 1; +} diff --git a/vendor/github.com/sizeofint/webpanimation/utils_quant_levels_dec_utils.h b/vendor/github.com/sizeofint/webpanimation/utils_quant_levels_dec_utils.h new file mode 100644 index 00000000..6924694e --- /dev/null +++ b/vendor/github.com/sizeofint/webpanimation/utils_quant_levels_dec_utils.h @@ -0,0 +1,35 @@ +// Copyright 2013 Google Inc. All Rights Reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the COPYING file in the root of the source +// tree. An additional intellectual property rights grant can be found +// in the file PATENTS. All contributing project authors may +// be found in the AUTHORS file in the root of the source tree. +// ----------------------------------------------------------------------------- +// +// Alpha plane de-quantization utility +// +// Author: Vikas Arora (vikasa@google.com) + +#ifndef WEBP_UTILS_QUANT_LEVELS_DEC_UTILS_H_ +#define WEBP_UTILS_QUANT_LEVELS_DEC_UTILS_H_ + +#include "webp_types.h" + +#ifdef __cplusplus +extern "C" { +#endif + +// Apply post-processing to input 'data' of size 'width'x'height' assuming that +// the source was quantized to a reduced number of levels. 'stride' is in bytes. +// Strength is in [0..100] and controls the amount of dithering applied. +// Returns false in case of error (data is NULL, invalid parameters, +// malloc failure, ...). +int WebPDequantizeLevels(uint8_t* const data, int width, int height, int stride, + int strength); + +#ifdef __cplusplus +} // extern "C" +#endif + +#endif // WEBP_UTILS_QUANT_LEVELS_DEC_UTILS_H_ diff --git a/vendor/github.com/sizeofint/webpanimation/utils_quant_levels_utils.c b/vendor/github.com/sizeofint/webpanimation/utils_quant_levels_utils.c new file mode 100644 index 00000000..a0e395c1 --- /dev/null +++ b/vendor/github.com/sizeofint/webpanimation/utils_quant_levels_utils.c @@ -0,0 +1,140 @@ +// Copyright 2011 Google Inc. All Rights Reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the COPYING file in the root of the source +// tree. An additional intellectual property rights grant can be found +// in the file PATENTS. All contributing project authors may +// be found in the AUTHORS file in the root of the source tree. +// ----------------------------------------------------------------------------- +// +// Quantize levels for specified number of quantization-levels ([2, 256]). +// Min and max values are preserved (usual 0 and 255 for alpha plane). +// +// Author: Skal (pascal.massimino@gmail.com) + +#include + +#include "utils_quant_levels_utils.h" + +#define NUM_SYMBOLS 256 + +#define MAX_ITER 6 // Maximum number of convergence steps. +#define ERROR_THRESHOLD 1e-4 // MSE stopping criterion. + +// ----------------------------------------------------------------------------- +// Quantize levels. + +int QuantizeLevels(uint8_t* const data, int width, int height, + int num_levels, uint64_t* const sse) { + int freq[NUM_SYMBOLS] = { 0 }; + int q_level[NUM_SYMBOLS] = { 0 }; + double inv_q_level[NUM_SYMBOLS] = { 0 }; + int min_s = 255, max_s = 0; + const size_t data_size = height * width; + int i, num_levels_in, iter; + double last_err = 1.e38, err = 0.; + const double err_threshold = ERROR_THRESHOLD * data_size; + + if (data == NULL) { + return 0; + } + + if (width <= 0 || height <= 0) { + return 0; + } + + if (num_levels < 2 || num_levels > 256) { + return 0; + } + + { + size_t n; + num_levels_in = 0; + for (n = 0; n < data_size; ++n) { + num_levels_in += (freq[data[n]] == 0); + if (min_s > data[n]) min_s = data[n]; + if (max_s < data[n]) max_s = data[n]; + ++freq[data[n]]; + } + } + + if (num_levels_in <= num_levels) goto End; // nothing to do! + + // Start with uniformly spread centroids. + for (i = 0; i < num_levels; ++i) { + inv_q_level[i] = min_s + (double)(max_s - min_s) * i / (num_levels - 1); + } + + // Fixed values. Won't be changed. + q_level[min_s] = 0; + q_level[max_s] = num_levels - 1; + assert(inv_q_level[0] == min_s); + assert(inv_q_level[num_levels - 1] == max_s); + + // k-Means iterations. + for (iter = 0; iter < MAX_ITER; ++iter) { + double q_sum[NUM_SYMBOLS] = { 0 }; + double q_count[NUM_SYMBOLS] = { 0 }; + int s, slot = 0; + + // Assign classes to representatives. + for (s = min_s; s <= max_s; ++s) { + // Keep track of the nearest neighbour 'slot' + while (slot < num_levels - 1 && + 2 * s > inv_q_level[slot] + inv_q_level[slot + 1]) { + ++slot; + } + if (freq[s] > 0) { + q_sum[slot] += s * freq[s]; + q_count[slot] += freq[s]; + } + q_level[s] = slot; + } + + // Assign new representatives to classes. + if (num_levels > 2) { + for (slot = 1; slot < num_levels - 1; ++slot) { + const double count = q_count[slot]; + if (count > 0.) { + inv_q_level[slot] = q_sum[slot] / count; + } + } + } + + // Compute convergence error. + err = 0.; + for (s = min_s; s <= max_s; ++s) { + const double error = s - inv_q_level[q_level[s]]; + err += freq[s] * error * error; + } + + // Check for convergence: we stop as soon as the error is no + // longer improving. + if (last_err - err < err_threshold) break; + last_err = err; + } + + // Remap the alpha plane to quantized values. + { + // double->int rounding operation can be costly, so we do it + // once for all before remapping. We also perform the data[] -> slot + // mapping, while at it (avoid one indirection in the final loop). + uint8_t map[NUM_SYMBOLS]; + int s; + size_t n; + for (s = min_s; s <= max_s; ++s) { + const int slot = q_level[s]; + map[s] = (uint8_t)(inv_q_level[slot] + .5); + } + // Final pass. + for (n = 0; n < data_size; ++n) { + data[n] = map[data[n]]; + } + } + End: + // Store sum of squared error if needed. + if (sse != NULL) *sse = (uint64_t)err; + + return 1; +} + diff --git a/vendor/github.com/sizeofint/webpanimation/utils_quant_levels_utils.h b/vendor/github.com/sizeofint/webpanimation/utils_quant_levels_utils.h new file mode 100644 index 00000000..99d7555b --- /dev/null +++ b/vendor/github.com/sizeofint/webpanimation/utils_quant_levels_utils.h @@ -0,0 +1,36 @@ +// Copyright 2011 Google Inc. All Rights Reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the COPYING file in the root of the source +// tree. An additional intellectual property rights grant can be found +// in the file PATENTS. All contributing project authors may +// be found in the AUTHORS file in the root of the source tree. +// ----------------------------------------------------------------------------- +// +// Alpha plane quantization utility +// +// Author: Vikas Arora (vikasa@google.com) + +#ifndef WEBP_UTILS_QUANT_LEVELS_UTILS_H_ +#define WEBP_UTILS_QUANT_LEVELS_UTILS_H_ + +#include + +#include "webp_types.h" + +#ifdef __cplusplus +extern "C" { +#endif + +// Replace the input 'data' of size 'width'x'height' with 'num-levels' +// quantized values. If not NULL, 'sse' will contain the sum of squared error. +// Valid range for 'num_levels' is [2, 256]. +// Returns false in case of error (data is NULL, or parameters are invalid). +int QuantizeLevels(uint8_t* const data, int width, int height, int num_levels, + uint64_t* const sse); + +#ifdef __cplusplus +} // extern "C" +#endif + +#endif // WEBP_UTILS_QUANT_LEVELS_UTILS_H_ diff --git a/vendor/github.com/sizeofint/webpanimation/utils_random_utils.c b/vendor/github.com/sizeofint/webpanimation/utils_random_utils.c new file mode 100644 index 00000000..4e15d55e --- /dev/null +++ b/vendor/github.com/sizeofint/webpanimation/utils_random_utils.c @@ -0,0 +1,43 @@ +// Copyright 2013 Google Inc. All Rights Reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the COPYING file in the root of the source +// tree. An additional intellectual property rights grant can be found +// in the file PATENTS. All contributing project authors may +// be found in the AUTHORS file in the root of the source tree. +// ----------------------------------------------------------------------------- +// +// Pseudo-random utilities +// +// Author: Skal (pascal.massimino@gmail.com) + +#include +#include "utils_random_utils.h" + +//------------------------------------------------------------------------------ + +// 31b-range values +static const uint32_t kRandomTable[VP8_RANDOM_TABLE_SIZE] = { + 0x0de15230, 0x03b31886, 0x775faccb, 0x1c88626a, 0x68385c55, 0x14b3b828, + 0x4a85fef8, 0x49ddb84b, 0x64fcf397, 0x5c550289, 0x4a290000, 0x0d7ec1da, + 0x5940b7ab, 0x5492577d, 0x4e19ca72, 0x38d38c69, 0x0c01ee65, 0x32a1755f, + 0x5437f652, 0x5abb2c32, 0x0faa57b1, 0x73f533e7, 0x685feeda, 0x7563cce2, + 0x6e990e83, 0x4730a7ed, 0x4fc0d9c6, 0x496b153c, 0x4f1403fa, 0x541afb0c, + 0x73990b32, 0x26d7cb1c, 0x6fcc3706, 0x2cbb77d8, 0x75762f2a, 0x6425ccdd, + 0x24b35461, 0x0a7d8715, 0x220414a8, 0x141ebf67, 0x56b41583, 0x73e502e3, + 0x44cab16f, 0x28264d42, 0x73baaefb, 0x0a50ebed, 0x1d6ab6fb, 0x0d3ad40b, + 0x35db3b68, 0x2b081e83, 0x77ce6b95, 0x5181e5f0, 0x78853bbc, 0x009f9494, + 0x27e5ed3c +}; + +void VP8InitRandom(VP8Random* const rg, float dithering) { + memcpy(rg->tab_, kRandomTable, sizeof(rg->tab_)); + rg->index1_ = 0; + rg->index2_ = 31; + rg->amp_ = (dithering < 0.0) ? 0 + : (dithering > 1.0) ? (1 << VP8_RANDOM_DITHER_FIX) + : (uint32_t)((1 << VP8_RANDOM_DITHER_FIX) * dithering); +} + +//------------------------------------------------------------------------------ + diff --git a/vendor/github.com/sizeofint/webpanimation/utils_random_utils.h b/vendor/github.com/sizeofint/webpanimation/utils_random_utils.h new file mode 100644 index 00000000..b70a66ac --- /dev/null +++ b/vendor/github.com/sizeofint/webpanimation/utils_random_utils.h @@ -0,0 +1,63 @@ +// Copyright 2013 Google Inc. All Rights Reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the COPYING file in the root of the source +// tree. An additional intellectual property rights grant can be found +// in the file PATENTS. All contributing project authors may +// be found in the AUTHORS file in the root of the source tree. +// ----------------------------------------------------------------------------- +// +// Pseudo-random utilities +// +// Author: Skal (pascal.massimino@gmail.com) + +#ifndef WEBP_UTILS_RANDOM_UTILS_H_ +#define WEBP_UTILS_RANDOM_UTILS_H_ + +#include +#include "webp_types.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#define VP8_RANDOM_DITHER_FIX 8 // fixed-point precision for dithering +#define VP8_RANDOM_TABLE_SIZE 55 + +typedef struct { + int index1_, index2_; + uint32_t tab_[VP8_RANDOM_TABLE_SIZE]; + int amp_; +} VP8Random; + +// Initializes random generator with an amplitude 'dithering' in range [0..1]. +void VP8InitRandom(VP8Random* const rg, float dithering); + +// Returns a centered pseudo-random number with 'num_bits' amplitude. +// (uses D.Knuth's Difference-based random generator). +// 'amp' is in VP8_RANDOM_DITHER_FIX fixed-point precision. +static WEBP_INLINE int VP8RandomBits2(VP8Random* const rg, int num_bits, + int amp) { + int diff; + assert(num_bits + VP8_RANDOM_DITHER_FIX <= 31); + diff = rg->tab_[rg->index1_] - rg->tab_[rg->index2_]; + if (diff < 0) diff += (1u << 31); + rg->tab_[rg->index1_] = diff; + if (++rg->index1_ == VP8_RANDOM_TABLE_SIZE) rg->index1_ = 0; + if (++rg->index2_ == VP8_RANDOM_TABLE_SIZE) rg->index2_ = 0; + // sign-extend, 0-center + diff = (int)((uint32_t)diff << 1) >> (32 - num_bits); + diff = (diff * amp) >> VP8_RANDOM_DITHER_FIX; // restrict range + diff += 1 << (num_bits - 1); // shift back to 0.5-center + return diff; +} + +static WEBP_INLINE int VP8RandomBits(VP8Random* const rg, int num_bits) { + return VP8RandomBits2(rg, num_bits, rg->amp_); +} + +#ifdef __cplusplus +} // extern "C" +#endif + +#endif // WEBP_UTILS_RANDOM_UTILS_H_ diff --git a/vendor/github.com/sizeofint/webpanimation/utils_rescaler_utils.c b/vendor/github.com/sizeofint/webpanimation/utils_rescaler_utils.c new file mode 100644 index 00000000..d28f7d64 --- /dev/null +++ b/vendor/github.com/sizeofint/webpanimation/utils_rescaler_utils.c @@ -0,0 +1,148 @@ +// Copyright 2012 Google Inc. All Rights Reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the COPYING file in the root of the source +// tree. An additional intellectual property rights grant can be found +// in the file PATENTS. All contributing project authors may +// be found in the AUTHORS file in the root of the source tree. +// ----------------------------------------------------------------------------- +// +// Rescaling functions +// +// Author: Skal (pascal.massimino@gmail.com) + +#include +#include +#include +#include "dsp_dsp.h" +#include "utils_rescaler_utils.h" + +//------------------------------------------------------------------------------ + +void WebPRescalerInit(WebPRescaler* const wrk, int src_width, int src_height, + uint8_t* const dst, + int dst_width, int dst_height, int dst_stride, + int num_channels, rescaler_t* const work) { + const int x_add = src_width, x_sub = dst_width; + const int y_add = src_height, y_sub = dst_height; + wrk->x_expand = (src_width < dst_width); + wrk->y_expand = (src_height < dst_height); + wrk->src_width = src_width; + wrk->src_height = src_height; + wrk->dst_width = dst_width; + wrk->dst_height = dst_height; + wrk->src_y = 0; + wrk->dst_y = 0; + wrk->dst = dst; + wrk->dst_stride = dst_stride; + wrk->num_channels = num_channels; + + // for 'x_expand', we use bilinear interpolation + wrk->x_add = wrk->x_expand ? (x_sub - 1) : x_add; + wrk->x_sub = wrk->x_expand ? (x_add - 1) : x_sub; + if (!wrk->x_expand) { // fx_scale is not used otherwise + wrk->fx_scale = WEBP_RESCALER_FRAC(1, wrk->x_sub); + } + // vertical scaling parameters + wrk->y_add = wrk->y_expand ? y_add - 1 : y_add; + wrk->y_sub = wrk->y_expand ? y_sub - 1 : y_sub; + wrk->y_accum = wrk->y_expand ? wrk->y_sub : wrk->y_add; + if (!wrk->y_expand) { + // This is WEBP_RESCALER_FRAC(dst_height, x_add * y_add) without the cast. + // Its value is <= WEBP_RESCALER_ONE, because dst_height <= wrk->y_add, and + // wrk->x_add >= 1; + const uint64_t ratio = + (uint64_t)dst_height * WEBP_RESCALER_ONE / (wrk->x_add * wrk->y_add); + if (ratio != (uint32_t)ratio) { + // When ratio == WEBP_RESCALER_ONE, we can't represent the ratio with the + // current fixed-point precision. This happens when src_height == + // wrk->y_add (which == src_height), and wrk->x_add == 1. + // => We special-case fxy_scale = 0, in WebPRescalerExportRow(). + wrk->fxy_scale = 0; + } else { + wrk->fxy_scale = (uint32_t)ratio; + } + wrk->fy_scale = WEBP_RESCALER_FRAC(1, wrk->y_sub); + } else { + wrk->fy_scale = WEBP_RESCALER_FRAC(1, wrk->x_add); + // wrk->fxy_scale is unused here. + } + wrk->irow = work; + wrk->frow = work + num_channels * dst_width; + memset(work, 0, 2 * dst_width * num_channels * sizeof(*work)); + + WebPRescalerDspInit(); +} + +int WebPRescalerGetScaledDimensions(int src_width, int src_height, + int* const scaled_width, + int* const scaled_height) { + assert(scaled_width != NULL); + assert(scaled_height != NULL); + { + int width = *scaled_width; + int height = *scaled_height; + + // if width is unspecified, scale original proportionally to height ratio. + if (width == 0 && src_height > 0) { + width = + (int)(((uint64_t)src_width * height + src_height - 1) / src_height); + } + // if height is unspecified, scale original proportionally to width ratio. + if (height == 0 && src_width > 0) { + height = + (int)(((uint64_t)src_height * width + src_width - 1) / src_width); + } + // Check if the overall dimensions still make sense. + if (width <= 0 || height <= 0) { + return 0; + } + + *scaled_width = width; + *scaled_height = height; + return 1; + } +} + +//------------------------------------------------------------------------------ +// all-in-one calls + +int WebPRescaleNeededLines(const WebPRescaler* const wrk, int max_num_lines) { + const int num_lines = (wrk->y_accum + wrk->y_sub - 1) / wrk->y_sub; + return (num_lines > max_num_lines) ? max_num_lines : num_lines; +} + +int WebPRescalerImport(WebPRescaler* const wrk, int num_lines, + const uint8_t* src, int src_stride) { + int total_imported = 0; + while (total_imported < num_lines && !WebPRescalerHasPendingOutput(wrk)) { + if (wrk->y_expand) { + rescaler_t* const tmp = wrk->irow; + wrk->irow = wrk->frow; + wrk->frow = tmp; + } + WebPRescalerImportRow(wrk, src); + if (!wrk->y_expand) { // Accumulate the contribution of the new row. + int x; + for (x = 0; x < wrk->num_channels * wrk->dst_width; ++x) { + wrk->irow[x] += wrk->frow[x]; + } + } + ++wrk->src_y; + src += src_stride; + ++total_imported; + wrk->y_accum -= wrk->y_sub; + } + return total_imported; +} + +int WebPRescalerExport(WebPRescaler* const rescaler) { + int total_exported = 0; + while (WebPRescalerHasPendingOutput(rescaler)) { + WebPRescalerExportRow(rescaler); + ++total_exported; + } + return total_exported; +} + +//------------------------------------------------------------------------------ diff --git a/vendor/github.com/sizeofint/webpanimation/utils_rescaler_utils.h b/vendor/github.com/sizeofint/webpanimation/utils_rescaler_utils.h new file mode 100644 index 00000000..8d1dd09a --- /dev/null +++ b/vendor/github.com/sizeofint/webpanimation/utils_rescaler_utils.h @@ -0,0 +1,101 @@ +// Copyright 2012 Google Inc. All Rights Reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the COPYING file in the root of the source +// tree. An additional intellectual property rights grant can be found +// in the file PATENTS. All contributing project authors may +// be found in the AUTHORS file in the root of the source tree. +// ----------------------------------------------------------------------------- +// +// Rescaling functions +// +// Author: Skal (pascal.massimino@gmail.com) + +#ifndef WEBP_UTILS_RESCALER_UTILS_H_ +#define WEBP_UTILS_RESCALER_UTILS_H_ + +#ifdef __cplusplus +extern "C" { +#endif + +#include "webp_types.h" + +#define WEBP_RESCALER_RFIX 32 // fixed-point precision for multiplies +#define WEBP_RESCALER_ONE (1ull << WEBP_RESCALER_RFIX) +#define WEBP_RESCALER_FRAC(x, y) \ + ((uint32_t)(((uint64_t)(x) << WEBP_RESCALER_RFIX) / (y))) + +// Structure used for on-the-fly rescaling +typedef uint32_t rescaler_t; // type for side-buffer +typedef struct WebPRescaler WebPRescaler; +struct WebPRescaler { + int x_expand; // true if we're expanding in the x direction + int y_expand; // true if we're expanding in the y direction + int num_channels; // bytes to jump between pixels + uint32_t fx_scale; // fixed-point scaling factors + uint32_t fy_scale; // '' + uint32_t fxy_scale; // '' + int y_accum; // vertical accumulator + int y_add, y_sub; // vertical increments + int x_add, x_sub; // horizontal increments + int src_width, src_height; // source dimensions + int dst_width, dst_height; // destination dimensions + int src_y, dst_y; // row counters for input and output + uint8_t* dst; + int dst_stride; + rescaler_t* irow, *frow; // work buffer +}; + +// Initialize a rescaler given scratch area 'work' and dimensions of src & dst. +void WebPRescalerInit(WebPRescaler* const rescaler, + int src_width, int src_height, + uint8_t* const dst, + int dst_width, int dst_height, int dst_stride, + int num_channels, + rescaler_t* const work); + +// If either 'scaled_width' or 'scaled_height' (but not both) is 0 the value +// will be calculated preserving the aspect ratio, otherwise the values are +// left unmodified. Returns true on success, false if either value is 0 after +// performing the scaling calculation. +int WebPRescalerGetScaledDimensions(int src_width, int src_height, + int* const scaled_width, + int* const scaled_height); + +// Returns the number of input lines needed next to produce one output line, +// considering that the maximum available input lines are 'max_num_lines'. +int WebPRescaleNeededLines(const WebPRescaler* const rescaler, + int max_num_lines); + +// Import multiple rows over all channels, until at least one row is ready to +// be exported. Returns the actual number of lines that were imported. +int WebPRescalerImport(WebPRescaler* const rescaler, int num_rows, + const uint8_t* src, int src_stride); + +// Export as many rows as possible. Return the numbers of rows written. +int WebPRescalerExport(WebPRescaler* const rescaler); + +// Return true if input is finished +static WEBP_INLINE +int WebPRescalerInputDone(const WebPRescaler* const rescaler) { + return (rescaler->src_y >= rescaler->src_height); +} +// Return true if output is finished +static WEBP_INLINE +int WebPRescalerOutputDone(const WebPRescaler* const rescaler) { + return (rescaler->dst_y >= rescaler->dst_height); +} + +// Return true if there are pending output rows ready. +static WEBP_INLINE +int WebPRescalerHasPendingOutput(const WebPRescaler* const rescaler) { + return !WebPRescalerOutputDone(rescaler) && (rescaler->y_accum <= 0); +} + +//------------------------------------------------------------------------------ + +#ifdef __cplusplus +} // extern "C" +#endif + +#endif // WEBP_UTILS_RESCALER_UTILS_H_ diff --git a/vendor/github.com/sizeofint/webpanimation/utils_thread_utils.c b/vendor/github.com/sizeofint/webpanimation/utils_thread_utils.c new file mode 100644 index 00000000..3a6a984f --- /dev/null +++ b/vendor/github.com/sizeofint/webpanimation/utils_thread_utils.c @@ -0,0 +1,369 @@ +// Copyright 2011 Google Inc. All Rights Reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the COPYING file in the root of the source +// tree. An additional intellectual property rights grant can be found +// in the file PATENTS. All contributing project authors may +// be found in the AUTHORS file in the root of the source tree. +// ----------------------------------------------------------------------------- +// +// Multi-threaded worker +// +// Author: Skal (pascal.massimino@gmail.com) + +#include +#include // for memset() +#include "utils_thread_utils.h" +#include "utils_utils.h" + +#ifdef WEBP_USE_THREAD + +#if defined(_WIN32) + +#include +typedef HANDLE pthread_t; +typedef CRITICAL_SECTION pthread_mutex_t; + +#if _WIN32_WINNT >= 0x0600 // Windows Vista / Server 2008 or greater +#define USE_WINDOWS_CONDITION_VARIABLE +typedef CONDITION_VARIABLE pthread_cond_t; +#else +typedef struct { + HANDLE waiting_sem_; + HANDLE received_sem_; + HANDLE signal_event_; +} pthread_cond_t; +#endif // _WIN32_WINNT >= 0x600 + +#ifndef WINAPI_FAMILY_PARTITION +#define WINAPI_PARTITION_DESKTOP 1 +#define WINAPI_FAMILY_PARTITION(x) x +#endif + +#if !WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_DESKTOP) +#define USE_CREATE_THREAD +#endif + +#else // !_WIN32 + +#include + +#endif // _WIN32 + +typedef struct { + pthread_mutex_t mutex_; + pthread_cond_t condition_; + pthread_t thread_; +} WebPWorkerImpl; + +#if defined(_WIN32) + +//------------------------------------------------------------------------------ +// simplistic pthread emulation layer + +#include + +// _beginthreadex requires __stdcall +#define THREADFN unsigned int __stdcall +#define THREAD_RETURN(val) (unsigned int)((DWORD_PTR)val) + +#if _WIN32_WINNT >= 0x0501 // Windows XP or greater +#define WaitForSingleObject(obj, timeout) \ + WaitForSingleObjectEx(obj, timeout, FALSE /*bAlertable*/) +#endif + +static int pthread_create(pthread_t* const thread, const void* attr, + unsigned int (__stdcall* start)(void*), void* arg) { + (void)attr; +#ifdef USE_CREATE_THREAD + *thread = CreateThread(NULL, /* lpThreadAttributes */ + 0, /* dwStackSize */ + start, + arg, + 0, /* dwStackSize */ + NULL); /* lpThreadId */ +#else + *thread = (pthread_t)_beginthreadex(NULL, /* void *security */ + 0, /* unsigned stack_size */ + start, + arg, + 0, /* unsigned initflag */ + NULL); /* unsigned *thrdaddr */ +#endif + if (*thread == NULL) return 1; + SetThreadPriority(*thread, THREAD_PRIORITY_ABOVE_NORMAL); + return 0; +} + +static int pthread_join(pthread_t thread, void** value_ptr) { + (void)value_ptr; + return (WaitForSingleObject(thread, INFINITE) != WAIT_OBJECT_0 || + CloseHandle(thread) == 0); +} + +// Mutex +static int pthread_mutex_init(pthread_mutex_t* const mutex, void* mutexattr) { + (void)mutexattr; +#if _WIN32_WINNT >= 0x0600 // Windows Vista / Server 2008 or greater + InitializeCriticalSectionEx(mutex, 0 /*dwSpinCount*/, 0 /*Flags*/); +#else + InitializeCriticalSection(mutex); +#endif + return 0; +} + +static int pthread_mutex_lock(pthread_mutex_t* const mutex) { + EnterCriticalSection(mutex); + return 0; +} + +static int pthread_mutex_unlock(pthread_mutex_t* const mutex) { + LeaveCriticalSection(mutex); + return 0; +} + +static int pthread_mutex_destroy(pthread_mutex_t* const mutex) { + DeleteCriticalSection(mutex); + return 0; +} + +// Condition +static int pthread_cond_destroy(pthread_cond_t* const condition) { + int ok = 1; +#ifdef USE_WINDOWS_CONDITION_VARIABLE + (void)condition; +#else + ok &= (CloseHandle(condition->waiting_sem_) != 0); + ok &= (CloseHandle(condition->received_sem_) != 0); + ok &= (CloseHandle(condition->signal_event_) != 0); +#endif + return !ok; +} + +static int pthread_cond_init(pthread_cond_t* const condition, void* cond_attr) { + (void)cond_attr; +#ifdef USE_WINDOWS_CONDITION_VARIABLE + InitializeConditionVariable(condition); +#else + condition->waiting_sem_ = CreateSemaphore(NULL, 0, 1, NULL); + condition->received_sem_ = CreateSemaphore(NULL, 0, 1, NULL); + condition->signal_event_ = CreateEvent(NULL, FALSE, FALSE, NULL); + if (condition->waiting_sem_ == NULL || + condition->received_sem_ == NULL || + condition->signal_event_ == NULL) { + pthread_cond_destroy(condition); + return 1; + } +#endif + return 0; +} + +static int pthread_cond_signal(pthread_cond_t* const condition) { + int ok = 1; +#ifdef USE_WINDOWS_CONDITION_VARIABLE + WakeConditionVariable(condition); +#else + if (WaitForSingleObject(condition->waiting_sem_, 0) == WAIT_OBJECT_0) { + // a thread is waiting in pthread_cond_wait: allow it to be notified + ok = SetEvent(condition->signal_event_); + // wait until the event is consumed so the signaler cannot consume + // the event via its own pthread_cond_wait. + ok &= (WaitForSingleObject(condition->received_sem_, INFINITE) != + WAIT_OBJECT_0); + } +#endif + return !ok; +} + +static int pthread_cond_wait(pthread_cond_t* const condition, + pthread_mutex_t* const mutex) { + int ok; +#ifdef USE_WINDOWS_CONDITION_VARIABLE + ok = SleepConditionVariableCS(condition, mutex, INFINITE); +#else + // note that there is a consumer available so the signal isn't dropped in + // pthread_cond_signal + if (!ReleaseSemaphore(condition->waiting_sem_, 1, NULL)) return 1; + // now unlock the mutex so pthread_cond_signal may be issued + pthread_mutex_unlock(mutex); + ok = (WaitForSingleObject(condition->signal_event_, INFINITE) == + WAIT_OBJECT_0); + ok &= ReleaseSemaphore(condition->received_sem_, 1, NULL); + pthread_mutex_lock(mutex); +#endif + return !ok; +} + +#else // !_WIN32 +# define THREADFN void* +# define THREAD_RETURN(val) val +#endif // _WIN32 + +//------------------------------------------------------------------------------ + +static THREADFN ThreadLoop(void* ptr) { + WebPWorker* const worker = (WebPWorker*)ptr; + WebPWorkerImpl* const impl = (WebPWorkerImpl*)worker->impl_; + int done = 0; + while (!done) { + pthread_mutex_lock(&impl->mutex_); + while (worker->status_ == OK) { // wait in idling mode + pthread_cond_wait(&impl->condition_, &impl->mutex_); + } + if (worker->status_ == WORK) { + WebPGetWorkerInterface()->Execute(worker); + worker->status_ = OK; + } else if (worker->status_ == NOT_OK) { // finish the worker + done = 1; + } + // signal to the main thread that we're done (for Sync()) + // Note the associated mutex does not need to be held when signaling the + // condition. Unlocking the mutex first may improve performance in some + // implementations, avoiding the case where the waiting thread can't + // reacquire the mutex when woken. + pthread_mutex_unlock(&impl->mutex_); + pthread_cond_signal(&impl->condition_); + } + return THREAD_RETURN(NULL); // Thread is finished +} + +// main thread state control +static void ChangeState(WebPWorker* const worker, WebPWorkerStatus new_status) { + // No-op when attempting to change state on a thread that didn't come up. + // Checking status_ without acquiring the lock first would result in a data + // race. + WebPWorkerImpl* const impl = (WebPWorkerImpl*)worker->impl_; + if (impl == NULL) return; + + pthread_mutex_lock(&impl->mutex_); + if (worker->status_ >= OK) { + // wait for the worker to finish + while (worker->status_ != OK) { + pthread_cond_wait(&impl->condition_, &impl->mutex_); + } + // assign new status and release the working thread if needed + if (new_status != OK) { + worker->status_ = new_status; + // Note the associated mutex does not need to be held when signaling the + // condition. Unlocking the mutex first may improve performance in some + // implementations, avoiding the case where the waiting thread can't + // reacquire the mutex when woken. + pthread_mutex_unlock(&impl->mutex_); + pthread_cond_signal(&impl->condition_); + return; + } + } + pthread_mutex_unlock(&impl->mutex_); +} + +#endif // WEBP_USE_THREAD + +//------------------------------------------------------------------------------ + +static void Init(WebPWorker* const worker) { + memset(worker, 0, sizeof(*worker)); + worker->status_ = NOT_OK; +} + +static int Sync(WebPWorker* const worker) { +#ifdef WEBP_USE_THREAD + ChangeState(worker, OK); +#endif + assert(worker->status_ <= OK); + return !worker->had_error; +} + +static int Reset(WebPWorker* const worker) { + int ok = 1; + worker->had_error = 0; + if (worker->status_ < OK) { +#ifdef WEBP_USE_THREAD + WebPWorkerImpl* const impl = + (WebPWorkerImpl*)WebPSafeCalloc(1, sizeof(WebPWorkerImpl)); + worker->impl_ = (void*)impl; + if (worker->impl_ == NULL) { + return 0; + } + if (pthread_mutex_init(&impl->mutex_, NULL)) { + goto Error; + } + if (pthread_cond_init(&impl->condition_, NULL)) { + pthread_mutex_destroy(&impl->mutex_); + goto Error; + } + pthread_mutex_lock(&impl->mutex_); + ok = !pthread_create(&impl->thread_, NULL, ThreadLoop, worker); + if (ok) worker->status_ = OK; + pthread_mutex_unlock(&impl->mutex_); + if (!ok) { + pthread_mutex_destroy(&impl->mutex_); + pthread_cond_destroy(&impl->condition_); + Error: + WebPSafeFree(impl); + worker->impl_ = NULL; + return 0; + } +#else + worker->status_ = OK; +#endif + } else if (worker->status_ > OK) { + ok = Sync(worker); + } + assert(!ok || (worker->status_ == OK)); + return ok; +} + +static void Execute(WebPWorker* const worker) { + if (worker->hook != NULL) { + worker->had_error |= !worker->hook(worker->data1, worker->data2); + } +} + +static void Launch(WebPWorker* const worker) { +#ifdef WEBP_USE_THREAD + ChangeState(worker, WORK); +#else + Execute(worker); +#endif +} + +static void End(WebPWorker* const worker) { +#ifdef WEBP_USE_THREAD + if (worker->impl_ != NULL) { + WebPWorkerImpl* const impl = (WebPWorkerImpl*)worker->impl_; + ChangeState(worker, NOT_OK); + pthread_join(impl->thread_, NULL); + pthread_mutex_destroy(&impl->mutex_); + pthread_cond_destroy(&impl->condition_); + WebPSafeFree(impl); + worker->impl_ = NULL; + } +#else + worker->status_ = NOT_OK; + assert(worker->impl_ == NULL); +#endif + assert(worker->status_ == NOT_OK); +} + +//------------------------------------------------------------------------------ + +static WebPWorkerInterface g_worker_interface = { + Init, Reset, Sync, Launch, Execute, End +}; + +int WebPSetWorkerInterface(const WebPWorkerInterface* const winterface) { + if (winterface == NULL || + winterface->Init == NULL || winterface->Reset == NULL || + winterface->Sync == NULL || winterface->Launch == NULL || + winterface->Execute == NULL || winterface->End == NULL) { + return 0; + } + g_worker_interface = *winterface; + return 1; +} + +const WebPWorkerInterface* WebPGetWorkerInterface(void) { + return &g_worker_interface; +} + +//------------------------------------------------------------------------------ diff --git a/vendor/github.com/sizeofint/webpanimation/utils_thread_utils.h b/vendor/github.com/sizeofint/webpanimation/utils_thread_utils.h new file mode 100644 index 00000000..9639da47 --- /dev/null +++ b/vendor/github.com/sizeofint/webpanimation/utils_thread_utils.h @@ -0,0 +1,90 @@ +// Copyright 2011 Google Inc. All Rights Reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the COPYING file in the root of the source +// tree. An additional intellectual property rights grant can be found +// in the file PATENTS. All contributing project authors may +// be found in the AUTHORS file in the root of the source tree. +// ----------------------------------------------------------------------------- +// +// Multi-threaded worker +// +// Author: Skal (pascal.massimino@gmail.com) + +#ifndef WEBP_UTILS_THREAD_UTILS_H_ +#define WEBP_UTILS_THREAD_UTILS_H_ + +#ifdef HAVE_CONFIG_H +#include "config.h" +#endif + +#include "webp_types.h" + +#ifdef __cplusplus +extern "C" { +#endif + +// State of the worker thread object +typedef enum { + NOT_OK = 0, // object is unusable + OK, // ready to work + WORK // busy finishing the current task +} WebPWorkerStatus; + +// Function to be called by the worker thread. Takes two opaque pointers as +// arguments (data1 and data2), and should return false in case of error. +typedef int (*WebPWorkerHook)(void*, void*); + +// Synchronization object used to launch job in the worker thread +typedef struct { + void* impl_; // platform-dependent implementation worker details + WebPWorkerStatus status_; + WebPWorkerHook hook; // hook to call + void* data1; // first argument passed to 'hook' + void* data2; // second argument passed to 'hook' + int had_error; // return value of the last call to 'hook' +} WebPWorker; + +// The interface for all thread-worker related functions. All these functions +// must be implemented. +typedef struct { + // Must be called first, before any other method. + void (*Init)(WebPWorker* const worker); + // Must be called to initialize the object and spawn the thread. Re-entrant. + // Will potentially launch the thread. Returns false in case of error. + int (*Reset)(WebPWorker* const worker); + // Makes sure the previous work is finished. Returns true if worker->had_error + // was not set and no error condition was triggered by the working thread. + int (*Sync)(WebPWorker* const worker); + // Triggers the thread to call hook() with data1 and data2 arguments. These + // hook/data1/data2 values can be changed at any time before calling this + // function, but not be changed afterward until the next call to Sync(). + void (*Launch)(WebPWorker* const worker); + // This function is similar to Launch() except that it calls the + // hook directly instead of using a thread. Convenient to bypass the thread + // mechanism while still using the WebPWorker structs. Sync() must + // still be called afterward (for error reporting). + void (*Execute)(WebPWorker* const worker); + // Kill the thread and terminate the object. To use the object again, one + // must call Reset() again. + void (*End)(WebPWorker* const worker); +} WebPWorkerInterface; + +// Install a new set of threading functions, overriding the defaults. This +// should be done before any workers are started, i.e., before any encoding or +// decoding takes place. The contents of the interface struct are copied, it +// is safe to free the corresponding memory after this call. This function is +// not thread-safe. Return false in case of invalid pointer or methods. +WEBP_EXTERN int WebPSetWorkerInterface( + const WebPWorkerInterface* const winterface); + +// Retrieve the currently set thread worker interface. +WEBP_EXTERN const WebPWorkerInterface* WebPGetWorkerInterface(void); + +//------------------------------------------------------------------------------ + +#ifdef __cplusplus +} // extern "C" +#endif + +#endif // WEBP_UTILS_THREAD_UTILS_H_ diff --git a/vendor/github.com/sizeofint/webpanimation/utils_utils.c b/vendor/github.com/sizeofint/webpanimation/utils_utils.c new file mode 100644 index 00000000..270ffab6 --- /dev/null +++ b/vendor/github.com/sizeofint/webpanimation/utils_utils.c @@ -0,0 +1,335 @@ +// Copyright 2012 Google Inc. All Rights Reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the COPYING file in the root of the source +// tree. An additional intellectual property rights grant can be found +// in the file PATENTS. All contributing project authors may +// be found in the AUTHORS file in the root of the source tree. +// ----------------------------------------------------------------------------- +// +// Misc. common utility functions +// +// Author: Skal (pascal.massimino@gmail.com) + +#include +#include // for memcpy() +#include "webp_decode.h" +#include "webp_encode.h" +#include "webp_format_constants.h" +#include "utils_color_cache_utils.h" +#include "utils_utils.h" + +// If PRINT_MEM_INFO is defined, extra info (like total memory used, number of +// alloc/free etc) is printed. For debugging/tuning purpose only (it's slow, +// and not multi-thread safe!). +// An interesting alternative is valgrind's 'massif' tool: +// http://valgrind.org/docs/manual/ms-manual.html +// Here is an example command line: +/* valgrind --tool=massif --massif-out-file=massif.out \ + --stacks=yes --alloc-fn=WebPSafeMalloc --alloc-fn=WebPSafeCalloc + ms_print massif.out +*/ +// In addition: +// * if PRINT_MEM_TRAFFIC is defined, all the details of the malloc/free cycles +// are printed. +// * if MALLOC_FAIL_AT is defined, the global environment variable +// $MALLOC_FAIL_AT is used to simulate a memory error when calloc or malloc +// is called for the nth time. Example usage: +// export MALLOC_FAIL_AT=50 && ./examples/cwebp input.png +// * if MALLOC_LIMIT is defined, the global environment variable $MALLOC_LIMIT +// sets the maximum amount of memory (in bytes) made available to libwebp. +// This can be used to emulate environment with very limited memory. +// Example: export MALLOC_LIMIT=64000000 && ./examples/dwebp picture.webp + +// #define PRINT_MEM_INFO +// #define PRINT_MEM_TRAFFIC +// #define MALLOC_FAIL_AT +// #define MALLOC_LIMIT + +//------------------------------------------------------------------------------ +// Checked memory allocation + +#if defined(PRINT_MEM_INFO) + +#include + +static int num_malloc_calls = 0; +static int num_calloc_calls = 0; +static int num_free_calls = 0; +static int countdown_to_fail = 0; // 0 = off + +typedef struct MemBlock MemBlock; +struct MemBlock { + void* ptr_; + size_t size_; + MemBlock* next_; +}; + +static MemBlock* all_blocks = NULL; +static size_t total_mem = 0; +static size_t total_mem_allocated = 0; +static size_t high_water_mark = 0; +static size_t mem_limit = 0; + +static int exit_registered = 0; + +static void PrintMemInfo(void) { + fprintf(stderr, "\nMEMORY INFO:\n"); + fprintf(stderr, "num calls to: malloc = %4d\n", num_malloc_calls); + fprintf(stderr, " calloc = %4d\n", num_calloc_calls); + fprintf(stderr, " free = %4d\n", num_free_calls); + fprintf(stderr, "total_mem: %u\n", (uint32_t)total_mem); + fprintf(stderr, "total_mem allocated: %u\n", (uint32_t)total_mem_allocated); + fprintf(stderr, "high-water mark: %u\n", (uint32_t)high_water_mark); + while (all_blocks != NULL) { + MemBlock* b = all_blocks; + all_blocks = b->next_; + free(b); + } +} + +static void Increment(int* const v) { + if (!exit_registered) { +#if defined(MALLOC_FAIL_AT) + { + const char* const malloc_fail_at_str = getenv("MALLOC_FAIL_AT"); + if (malloc_fail_at_str != NULL) { + countdown_to_fail = atoi(malloc_fail_at_str); + } + } +#endif +#if defined(MALLOC_LIMIT) + { + const char* const malloc_limit_str = getenv("MALLOC_LIMIT"); + if (malloc_limit_str != NULL) { + mem_limit = atoi(malloc_limit_str); + } + } +#endif + (void)countdown_to_fail; + (void)mem_limit; + atexit(PrintMemInfo); + exit_registered = 1; + } + ++*v; +} + +static void AddMem(void* ptr, size_t size) { + if (ptr != NULL) { + MemBlock* const b = (MemBlock*)malloc(sizeof(*b)); + if (b == NULL) abort(); + b->next_ = all_blocks; + all_blocks = b; + b->ptr_ = ptr; + b->size_ = size; + total_mem += size; + total_mem_allocated += size; +#if defined(PRINT_MEM_TRAFFIC) +#if defined(MALLOC_FAIL_AT) + fprintf(stderr, "fail-count: %5d [mem=%u]\n", + num_malloc_calls + num_calloc_calls, (uint32_t)total_mem); +#else + fprintf(stderr, "Mem: %u (+%u)\n", (uint32_t)total_mem, (uint32_t)size); +#endif +#endif + if (total_mem > high_water_mark) high_water_mark = total_mem; + } +} + +static void SubMem(void* ptr) { + if (ptr != NULL) { + MemBlock** b = &all_blocks; + // Inefficient search, but that's just for debugging. + while (*b != NULL && (*b)->ptr_ != ptr) b = &(*b)->next_; + if (*b == NULL) { + fprintf(stderr, "Invalid pointer free! (%p)\n", ptr); + abort(); + } + { + MemBlock* const block = *b; + *b = block->next_; + total_mem -= block->size_; +#if defined(PRINT_MEM_TRAFFIC) + fprintf(stderr, "Mem: %u (-%u)\n", + (uint32_t)total_mem, (uint32_t)block->size_); +#endif + free(block); + } + } +} + +#else +#define Increment(v) do {} while (0) +#define AddMem(p, s) do {} while (0) +#define SubMem(p) do {} while (0) +#endif + +// Returns 0 in case of overflow of nmemb * size. +static int CheckSizeArgumentsOverflow(uint64_t nmemb, size_t size) { + const uint64_t total_size = nmemb * size; + if (nmemb == 0) return 1; + if ((uint64_t)size > WEBP_MAX_ALLOCABLE_MEMORY / nmemb) return 0; + if (total_size != (size_t)total_size) return 0; +#if defined(PRINT_MEM_INFO) && defined(MALLOC_FAIL_AT) + if (countdown_to_fail > 0 && --countdown_to_fail == 0) { + return 0; // fake fail! + } +#endif +#if defined(MALLOC_LIMIT) + if (mem_limit > 0) { + const uint64_t new_total_mem = (uint64_t)total_mem + total_size; + if (new_total_mem != (size_t)new_total_mem || + new_total_mem > mem_limit) { + return 0; // fake fail! + } + } +#endif + + return 1; +} + +void* WebPSafeMalloc(uint64_t nmemb, size_t size) { + void* ptr; + Increment(&num_malloc_calls); + if (!CheckSizeArgumentsOverflow(nmemb, size)) return NULL; + assert(nmemb * size > 0); + ptr = malloc((size_t)(nmemb * size)); + AddMem(ptr, (size_t)(nmemb * size)); + return ptr; +} + +void* WebPSafeCalloc(uint64_t nmemb, size_t size) { + void* ptr; + Increment(&num_calloc_calls); + if (!CheckSizeArgumentsOverflow(nmemb, size)) return NULL; + assert(nmemb * size > 0); + ptr = calloc((size_t)nmemb, size); + AddMem(ptr, (size_t)(nmemb * size)); + return ptr; +} + +void WebPSafeFree(void* const ptr) { + if (ptr != NULL) { + Increment(&num_free_calls); + SubMem(ptr); + } + free(ptr); +} + +// Public API functions. + +void* WebPMalloc(size_t size) { + return WebPSafeMalloc(1, size); +} + +void WebPFree(void* ptr) { + WebPSafeFree(ptr); +} + +//------------------------------------------------------------------------------ + +void WebPCopyPlane(const uint8_t* src, int src_stride, + uint8_t* dst, int dst_stride, int width, int height) { + assert(src != NULL && dst != NULL); + assert(abs(src_stride) >= width && abs(dst_stride) >= width); + while (height-- > 0) { + memcpy(dst, src, width); + src += src_stride; + dst += dst_stride; + } +} + +void WebPCopyPixels(const WebPPicture* const src, WebPPicture* const dst) { + assert(src != NULL && dst != NULL); + assert(src->width == dst->width && src->height == dst->height); + assert(src->use_argb && dst->use_argb); + WebPCopyPlane((uint8_t*)src->argb, 4 * src->argb_stride, (uint8_t*)dst->argb, + 4 * dst->argb_stride, 4 * src->width, src->height); +} + +//------------------------------------------------------------------------------ + +#define COLOR_HASH_SIZE (MAX_PALETTE_SIZE * 4) +#define COLOR_HASH_RIGHT_SHIFT 22 // 32 - log2(COLOR_HASH_SIZE). + +int WebPGetColorPalette(const WebPPicture* const pic, uint32_t* const palette) { + int i; + int x, y; + int num_colors = 0; + uint8_t in_use[COLOR_HASH_SIZE] = { 0 }; + uint32_t colors[COLOR_HASH_SIZE]; + const uint32_t* argb = pic->argb; + const int width = pic->width; + const int height = pic->height; + uint32_t last_pix = ~argb[0]; // so we're sure that last_pix != argb[0] + assert(pic != NULL); + assert(pic->use_argb); + + for (y = 0; y < height; ++y) { + for (x = 0; x < width; ++x) { + int key; + if (argb[x] == last_pix) { + continue; + } + last_pix = argb[x]; + key = VP8LHashPix(last_pix, COLOR_HASH_RIGHT_SHIFT); + while (1) { + if (!in_use[key]) { + colors[key] = last_pix; + in_use[key] = 1; + ++num_colors; + if (num_colors > MAX_PALETTE_SIZE) { + return MAX_PALETTE_SIZE + 1; // Exact count not needed. + } + break; + } else if (colors[key] == last_pix) { + break; // The color is already there. + } else { + // Some other color sits here, so do linear conflict resolution. + ++key; + key &= (COLOR_HASH_SIZE - 1); // Key mask. + } + } + } + argb += pic->argb_stride; + } + + if (palette != NULL) { // Fill the colors into palette. + num_colors = 0; + for (i = 0; i < COLOR_HASH_SIZE; ++i) { + if (in_use[i]) { + palette[num_colors] = colors[i]; + ++num_colors; + } + } + } + return num_colors; +} + +#undef COLOR_HASH_SIZE +#undef COLOR_HASH_RIGHT_SHIFT + +//------------------------------------------------------------------------------ + +#if defined(WEBP_NEED_LOG_TABLE_8BIT) +const uint8_t WebPLogTable8bit[256] = { // 31 ^ clz(i) + 0, 0, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, + 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, + 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, + 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, + 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, + 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, + 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, + 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, + 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, + 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, + 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, + 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, + 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, + 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, + 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7 +}; +#endif + +//------------------------------------------------------------------------------ diff --git a/vendor/github.com/sizeofint/webpanimation/utils_utils.h b/vendor/github.com/sizeofint/webpanimation/utils_utils.h new file mode 100644 index 00000000..fb2876b1 --- /dev/null +++ b/vendor/github.com/sizeofint/webpanimation/utils_utils.h @@ -0,0 +1,178 @@ +// Copyright 2012 Google Inc. All Rights Reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the COPYING file in the root of the source +// tree. An additional intellectual property rights grant can be found +// in the file PATENTS. All contributing project authors may +// be found in the AUTHORS file in the root of the source tree. +// ----------------------------------------------------------------------------- +// +// Misc. common utility functions +// +// Authors: Skal (pascal.massimino@gmail.com) +// Urvang (urvang@google.com) + +#ifndef WEBP_UTILS_UTILS_H_ +#define WEBP_UTILS_UTILS_H_ + +#ifdef HAVE_CONFIG_H +#include "config.h" +#endif + +#include +#include + +#include "dsp_dsp.h" +#include "webp_types.h" + +#ifdef __cplusplus +extern "C" { +#endif + +//------------------------------------------------------------------------------ +// Memory allocation + +// This is the maximum memory amount that libwebp will ever try to allocate. +#ifndef WEBP_MAX_ALLOCABLE_MEMORY +#if SIZE_MAX > (1ULL << 34) +#define WEBP_MAX_ALLOCABLE_MEMORY (1ULL << 34) +#else +// For 32-bit targets keep this below INT_MAX to avoid valgrind warnings. +#define WEBP_MAX_ALLOCABLE_MEMORY ((1ULL << 31) - (1 << 16)) +#endif +#endif // WEBP_MAX_ALLOCABLE_MEMORY + +// size-checking safe malloc/calloc: verify that the requested size is not too +// large, or return NULL. You don't need to call these for constructs like +// malloc(sizeof(foo)), but only if there's picture-dependent size involved +// somewhere (like: malloc(num_pixels * sizeof(*something))). That's why this +// safe malloc() borrows the signature from calloc(), pointing at the dangerous +// underlying multiply involved. +WEBP_EXTERN void* WebPSafeMalloc(uint64_t nmemb, size_t size); +// Note that WebPSafeCalloc() expects the second argument type to be 'size_t' +// in order to favor the "calloc(num_foo, sizeof(foo))" pattern. +WEBP_EXTERN void* WebPSafeCalloc(uint64_t nmemb, size_t size); + +// Companion deallocation function to the above allocations. +WEBP_EXTERN void WebPSafeFree(void* const ptr); + +//------------------------------------------------------------------------------ +// Alignment + +#define WEBP_ALIGN_CST 31 +#define WEBP_ALIGN(PTR) (((uintptr_t)(PTR) + WEBP_ALIGN_CST) & ~WEBP_ALIGN_CST) + +#include +// memcpy() is the safe way of moving potentially unaligned 32b memory. +static WEBP_INLINE uint32_t WebPMemToUint32(const uint8_t* const ptr) { + uint32_t A; + memcpy(&A, ptr, sizeof(A)); + return A; +} +static WEBP_INLINE void WebPUint32ToMem(uint8_t* const ptr, uint32_t val) { + memcpy(ptr, &val, sizeof(val)); +} + +//------------------------------------------------------------------------------ +// Reading/writing data. + +// Read 16, 24 or 32 bits stored in little-endian order. +static WEBP_INLINE int GetLE16(const uint8_t* const data) { + return (int)(data[0] << 0) | (data[1] << 8); +} + +static WEBP_INLINE int GetLE24(const uint8_t* const data) { + return GetLE16(data) | (data[2] << 16); +} + +static WEBP_INLINE uint32_t GetLE32(const uint8_t* const data) { + return GetLE16(data) | ((uint32_t)GetLE16(data + 2) << 16); +} + +// Store 16, 24 or 32 bits in little-endian order. +static WEBP_INLINE void PutLE16(uint8_t* const data, int val) { + assert(val < (1 << 16)); + data[0] = (val >> 0) & 0xff; + data[1] = (val >> 8) & 0xff; +} + +static WEBP_INLINE void PutLE24(uint8_t* const data, int val) { + assert(val < (1 << 24)); + PutLE16(data, val & 0xffff); + data[2] = (val >> 16) & 0xff; +} + +static WEBP_INLINE void PutLE32(uint8_t* const data, uint32_t val) { + PutLE16(data, (int)(val & 0xffff)); + PutLE16(data + 2, (int)(val >> 16)); +} + +// Returns (int)floor(log2(n)). n must be > 0. +// use GNU builtins where available. +#if defined(__GNUC__) && \ + ((__GNUC__ == 3 && __GNUC_MINOR__ >= 4) || __GNUC__ >= 4) +static WEBP_INLINE int BitsLog2Floor(uint32_t n) { + return 31 ^ __builtin_clz(n); +} +#elif defined(_MSC_VER) && _MSC_VER > 1310 && \ + (defined(_M_X64) || defined(_M_IX86)) +#include +#pragma intrinsic(_BitScanReverse) + +static WEBP_INLINE int BitsLog2Floor(uint32_t n) { + unsigned long first_set_bit; + _BitScanReverse(&first_set_bit, n); + return first_set_bit; +} +#else // default: use the C-version. +// Returns 31 ^ clz(n) = log2(n). This is the default C-implementation, either +// based on table or not. Can be used as fallback if clz() is not available. +#define WEBP_NEED_LOG_TABLE_8BIT +extern const uint8_t WebPLogTable8bit[256]; +static WEBP_INLINE int WebPLog2FloorC(uint32_t n) { + int log_value = 0; + while (n >= 256) { + log_value += 8; + n >>= 8; + } + return log_value + WebPLogTable8bit[n]; +} + +static WEBP_INLINE int BitsLog2Floor(uint32_t n) { return WebPLog2FloorC(n); } +#endif + +//------------------------------------------------------------------------------ +// Pixel copying. + +struct WebPPicture; + +// Copy width x height pixels from 'src' to 'dst' honoring the strides. +WEBP_EXTERN void WebPCopyPlane(const uint8_t* src, int src_stride, + uint8_t* dst, int dst_stride, + int width, int height); + +// Copy ARGB pixels from 'src' to 'dst' honoring strides. 'src' and 'dst' are +// assumed to be already allocated and using ARGB data. +WEBP_EXTERN void WebPCopyPixels(const struct WebPPicture* const src, + struct WebPPicture* const dst); + +//------------------------------------------------------------------------------ +// Unique colors. + +// Returns count of unique colors in 'pic', assuming pic->use_argb is true. +// If the unique color count is more than MAX_PALETTE_SIZE, returns +// MAX_PALETTE_SIZE+1. +// If 'palette' is not NULL and number of unique colors is less than or equal to +// MAX_PALETTE_SIZE, also outputs the actual unique colors into 'palette'. +// Note: 'palette' is assumed to be an array already allocated with at least +// MAX_PALETTE_SIZE elements. +WEBP_EXTERN int WebPGetColorPalette(const struct WebPPicture* const pic, + uint32_t* const palette); + +//------------------------------------------------------------------------------ + +#ifdef __cplusplus +} // extern "C" +#endif + +#endif // WEBP_UTILS_UTILS_H_ diff --git a/vendor/github.com/sizeofint/webpanimation/webp_decode.h b/vendor/github.com/sizeofint/webpanimation/webp_decode.h new file mode 100644 index 00000000..0fe8449f --- /dev/null +++ b/vendor/github.com/sizeofint/webpanimation/webp_decode.h @@ -0,0 +1,503 @@ +// Copyright 2010 Google Inc. All Rights Reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the COPYING file in the root of the source +// tree. An additional intellectual property rights grant can be found +// in the file PATENTS. All contributing project authors may +// be found in the AUTHORS file in the root of the source tree. +// ----------------------------------------------------------------------------- +// +// Main decoding functions for WebP images. +// +// Author: Skal (pascal.massimino@gmail.com) + +#ifndef WEBP_WEBP_DECODE_H_ +#define WEBP_WEBP_DECODE_H_ + +#include "webp_types.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#define WEBP_DECODER_ABI_VERSION 0x0209 // MAJOR(8b) + MINOR(8b) + +// Note: forward declaring enumerations is not allowed in (strict) C and C++, +// the types are left here for reference. +// typedef enum VP8StatusCode VP8StatusCode; +// typedef enum WEBP_CSP_MODE WEBP_CSP_MODE; +typedef struct WebPRGBABuffer WebPRGBABuffer; +typedef struct WebPYUVABuffer WebPYUVABuffer; +typedef struct WebPDecBuffer WebPDecBuffer; +typedef struct WebPIDecoder WebPIDecoder; +typedef struct WebPBitstreamFeatures WebPBitstreamFeatures; +typedef struct WebPDecoderOptions WebPDecoderOptions; +typedef struct WebPDecoderConfig WebPDecoderConfig; + +// Return the decoder's version number, packed in hexadecimal using 8bits for +// each of major/minor/revision. E.g: v2.5.7 is 0x020507. +WEBP_EXTERN int WebPGetDecoderVersion(void); + +// Retrieve basic header information: width, height. +// This function will also validate the header, returning true on success, +// false otherwise. '*width' and '*height' are only valid on successful return. +// Pointers 'width' and 'height' can be passed NULL if deemed irrelevant. +// Note: The following chunk sequences (before the raw VP8/VP8L data) are +// considered valid by this function: +// RIFF + VP8(L) +// RIFF + VP8X + (optional chunks) + VP8(L) +// ALPH + VP8 <-- Not a valid WebP format: only allowed for internal purpose. +// VP8(L) <-- Not a valid WebP format: only allowed for internal purpose. +WEBP_EXTERN int WebPGetInfo(const uint8_t* data, size_t data_size, + int* width, int* height); + +// Decodes WebP images pointed to by 'data' and returns RGBA samples, along +// with the dimensions in *width and *height. The ordering of samples in +// memory is R, G, B, A, R, G, B, A... in scan order (endian-independent). +// The returned pointer should be deleted calling WebPFree(). +// Returns NULL in case of error. +WEBP_EXTERN uint8_t* WebPDecodeRGBA(const uint8_t* data, size_t data_size, + int* width, int* height); + +// Same as WebPDecodeRGBA, but returning A, R, G, B, A, R, G, B... ordered data. +WEBP_EXTERN uint8_t* WebPDecodeARGB(const uint8_t* data, size_t data_size, + int* width, int* height); + +// Same as WebPDecodeRGBA, but returning B, G, R, A, B, G, R, A... ordered data. +WEBP_EXTERN uint8_t* WebPDecodeBGRA(const uint8_t* data, size_t data_size, + int* width, int* height); + +// Same as WebPDecodeRGBA, but returning R, G, B, R, G, B... ordered data. +// If the bitstream contains transparency, it is ignored. +WEBP_EXTERN uint8_t* WebPDecodeRGB(const uint8_t* data, size_t data_size, + int* width, int* height); + +// Same as WebPDecodeRGB, but returning B, G, R, B, G, R... ordered data. +WEBP_EXTERN uint8_t* WebPDecodeBGR(const uint8_t* data, size_t data_size, + int* width, int* height); + + +// Decode WebP images pointed to by 'data' to Y'UV format(*). The pointer +// returned is the Y samples buffer. Upon return, *u and *v will point to +// the U and V chroma data. These U and V buffers need NOT be passed to +// WebPFree(), unlike the returned Y luma one. The dimension of the U and V +// planes are both (*width + 1) / 2 and (*height + 1)/ 2. +// Upon return, the Y buffer has a stride returned as '*stride', while U and V +// have a common stride returned as '*uv_stride'. +// Return NULL in case of error. +// (*) Also named Y'CbCr. See: http://en.wikipedia.org/wiki/YCbCr +WEBP_EXTERN uint8_t* WebPDecodeYUV(const uint8_t* data, size_t data_size, + int* width, int* height, + uint8_t** u, uint8_t** v, + int* stride, int* uv_stride); + +// These five functions are variants of the above ones, that decode the image +// directly into a pre-allocated buffer 'output_buffer'. The maximum storage +// available in this buffer is indicated by 'output_buffer_size'. If this +// storage is not sufficient (or an error occurred), NULL is returned. +// Otherwise, output_buffer is returned, for convenience. +// The parameter 'output_stride' specifies the distance (in bytes) +// between scanlines. Hence, output_buffer_size is expected to be at least +// output_stride x picture-height. +WEBP_EXTERN uint8_t* WebPDecodeRGBAInto( + const uint8_t* data, size_t data_size, + uint8_t* output_buffer, size_t output_buffer_size, int output_stride); +WEBP_EXTERN uint8_t* WebPDecodeARGBInto( + const uint8_t* data, size_t data_size, + uint8_t* output_buffer, size_t output_buffer_size, int output_stride); +WEBP_EXTERN uint8_t* WebPDecodeBGRAInto( + const uint8_t* data, size_t data_size, + uint8_t* output_buffer, size_t output_buffer_size, int output_stride); + +// RGB and BGR variants. Here too the transparency information, if present, +// will be dropped and ignored. +WEBP_EXTERN uint8_t* WebPDecodeRGBInto( + const uint8_t* data, size_t data_size, + uint8_t* output_buffer, size_t output_buffer_size, int output_stride); +WEBP_EXTERN uint8_t* WebPDecodeBGRInto( + const uint8_t* data, size_t data_size, + uint8_t* output_buffer, size_t output_buffer_size, int output_stride); + +// WebPDecodeYUVInto() is a variant of WebPDecodeYUV() that operates directly +// into pre-allocated luma/chroma plane buffers. This function requires the +// strides to be passed: one for the luma plane and one for each of the +// chroma ones. The size of each plane buffer is passed as 'luma_size', +// 'u_size' and 'v_size' respectively. +// Pointer to the luma plane ('*luma') is returned or NULL if an error occurred +// during decoding (or because some buffers were found to be too small). +WEBP_EXTERN uint8_t* WebPDecodeYUVInto( + const uint8_t* data, size_t data_size, + uint8_t* luma, size_t luma_size, int luma_stride, + uint8_t* u, size_t u_size, int u_stride, + uint8_t* v, size_t v_size, int v_stride); + +//------------------------------------------------------------------------------ +// Output colorspaces and buffer + +// Colorspaces +// Note: the naming describes the byte-ordering of packed samples in memory. +// For instance, MODE_BGRA relates to samples ordered as B,G,R,A,B,G,R,A,... +// Non-capital names (e.g.:MODE_Argb) relates to pre-multiplied RGB channels. +// RGBA-4444 and RGB-565 colorspaces are represented by following byte-order: +// RGBA-4444: [r3 r2 r1 r0 g3 g2 g1 g0], [b3 b2 b1 b0 a3 a2 a1 a0], ... +// RGB-565: [r4 r3 r2 r1 r0 g5 g4 g3], [g2 g1 g0 b4 b3 b2 b1 b0], ... +// In the case WEBP_SWAP_16BITS_CSP is defined, the bytes are swapped for +// these two modes: +// RGBA-4444: [b3 b2 b1 b0 a3 a2 a1 a0], [r3 r2 r1 r0 g3 g2 g1 g0], ... +// RGB-565: [g2 g1 g0 b4 b3 b2 b1 b0], [r4 r3 r2 r1 r0 g5 g4 g3], ... + +typedef enum WEBP_CSP_MODE { + MODE_RGB = 0, MODE_RGBA = 1, + MODE_BGR = 2, MODE_BGRA = 3, + MODE_ARGB = 4, MODE_RGBA_4444 = 5, + MODE_RGB_565 = 6, + // RGB-premultiplied transparent modes (alpha value is preserved) + MODE_rgbA = 7, + MODE_bgrA = 8, + MODE_Argb = 9, + MODE_rgbA_4444 = 10, + // YUV modes must come after RGB ones. + MODE_YUV = 11, MODE_YUVA = 12, // yuv 4:2:0 + MODE_LAST = 13 +} WEBP_CSP_MODE; + +// Some useful macros: +static WEBP_INLINE int WebPIsPremultipliedMode(WEBP_CSP_MODE mode) { + return (mode == MODE_rgbA || mode == MODE_bgrA || mode == MODE_Argb || + mode == MODE_rgbA_4444); +} + +static WEBP_INLINE int WebPIsAlphaMode(WEBP_CSP_MODE mode) { + return (mode == MODE_RGBA || mode == MODE_BGRA || mode == MODE_ARGB || + mode == MODE_RGBA_4444 || mode == MODE_YUVA || + WebPIsPremultipliedMode(mode)); +} + +static WEBP_INLINE int WebPIsRGBMode(WEBP_CSP_MODE mode) { + return (mode < MODE_YUV); +} + +//------------------------------------------------------------------------------ +// WebPDecBuffer: Generic structure for describing the output sample buffer. + +struct WebPRGBABuffer { // view as RGBA + uint8_t* rgba; // pointer to RGBA samples + int stride; // stride in bytes from one scanline to the next. + size_t size; // total size of the *rgba buffer. +}; + +struct WebPYUVABuffer { // view as YUVA + uint8_t* y, *u, *v, *a; // pointer to luma, chroma U/V, alpha samples + int y_stride; // luma stride + int u_stride, v_stride; // chroma strides + int a_stride; // alpha stride + size_t y_size; // luma plane size + size_t u_size, v_size; // chroma planes size + size_t a_size; // alpha-plane size +}; + +// Output buffer +struct WebPDecBuffer { + WEBP_CSP_MODE colorspace; // Colorspace. + int width, height; // Dimensions. + int is_external_memory; // If non-zero, 'internal_memory' pointer is not + // used. If value is '2' or more, the external + // memory is considered 'slow' and multiple + // read/write will be avoided. + union { + WebPRGBABuffer RGBA; + WebPYUVABuffer YUVA; + } u; // Nameless union of buffer parameters. + uint32_t pad[4]; // padding for later use + + uint8_t* private_memory; // Internally allocated memory (only when + // is_external_memory is 0). Should not be used + // externally, but accessed via the buffer union. +}; + +// Internal, version-checked, entry point +WEBP_EXTERN int WebPInitDecBufferInternal(WebPDecBuffer*, int); + +// Initialize the structure as empty. Must be called before any other use. +// Returns false in case of version mismatch +static WEBP_INLINE int WebPInitDecBuffer(WebPDecBuffer* buffer) { + return WebPInitDecBufferInternal(buffer, WEBP_DECODER_ABI_VERSION); +} + +// Free any memory associated with the buffer. Must always be called last. +// Note: doesn't free the 'buffer' structure itself. +WEBP_EXTERN void WebPFreeDecBuffer(WebPDecBuffer* buffer); + +//------------------------------------------------------------------------------ +// Enumeration of the status codes + +typedef enum VP8StatusCode { + VP8_STATUS_OK = 0, + VP8_STATUS_OUT_OF_MEMORY, + VP8_STATUS_INVALID_PARAM, + VP8_STATUS_BITSTREAM_ERROR, + VP8_STATUS_UNSUPPORTED_FEATURE, + VP8_STATUS_SUSPENDED, + VP8_STATUS_USER_ABORT, + VP8_STATUS_NOT_ENOUGH_DATA +} VP8StatusCode; + +//------------------------------------------------------------------------------ +// Incremental decoding +// +// This API allows streamlined decoding of partial data. +// Picture can be incrementally decoded as data become available thanks to the +// WebPIDecoder object. This object can be left in a SUSPENDED state if the +// picture is only partially decoded, pending additional input. +// Code example: +// +// WebPInitDecBuffer(&output_buffer); +// output_buffer.colorspace = mode; +// ... +// WebPIDecoder* idec = WebPINewDecoder(&output_buffer); +// while (additional_data_is_available) { +// // ... (get additional data in some new_data[] buffer) +// status = WebPIAppend(idec, new_data, new_data_size); +// if (status != VP8_STATUS_OK && status != VP8_STATUS_SUSPENDED) { +// break; // an error occurred. +// } +// +// // The above call decodes the current available buffer. +// // Part of the image can now be refreshed by calling +// // WebPIDecGetRGB()/WebPIDecGetYUVA() etc. +// } +// WebPIDelete(idec); + +// Creates a new incremental decoder with the supplied buffer parameter. +// This output_buffer can be passed NULL, in which case a default output buffer +// is used (with MODE_RGB). Otherwise, an internal reference to 'output_buffer' +// is kept, which means that the lifespan of 'output_buffer' must be larger than +// that of the returned WebPIDecoder object. +// The supplied 'output_buffer' content MUST NOT be changed between calls to +// WebPIAppend() or WebPIUpdate() unless 'output_buffer.is_external_memory' is +// not set to 0. In such a case, it is allowed to modify the pointers, size and +// stride of output_buffer.u.RGBA or output_buffer.u.YUVA, provided they remain +// within valid bounds. +// All other fields of WebPDecBuffer MUST remain constant between calls. +// Returns NULL if the allocation failed. +WEBP_EXTERN WebPIDecoder* WebPINewDecoder(WebPDecBuffer* output_buffer); + +// This function allocates and initializes an incremental-decoder object, which +// will output the RGB/A samples specified by 'csp' into a preallocated +// buffer 'output_buffer'. The size of this buffer is at least +// 'output_buffer_size' and the stride (distance in bytes between two scanlines) +// is specified by 'output_stride'. +// Additionally, output_buffer can be passed NULL in which case the output +// buffer will be allocated automatically when the decoding starts. The +// colorspace 'csp' is taken into account for allocating this buffer. All other +// parameters are ignored. +// Returns NULL if the allocation failed, or if some parameters are invalid. +WEBP_EXTERN WebPIDecoder* WebPINewRGB( + WEBP_CSP_MODE csp, + uint8_t* output_buffer, size_t output_buffer_size, int output_stride); + +// This function allocates and initializes an incremental-decoder object, which +// will output the raw luma/chroma samples into a preallocated planes if +// supplied. The luma plane is specified by its pointer 'luma', its size +// 'luma_size' and its stride 'luma_stride'. Similarly, the chroma-u plane +// is specified by the 'u', 'u_size' and 'u_stride' parameters, and the chroma-v +// plane by 'v' and 'v_size'. And same for the alpha-plane. The 'a' pointer +// can be pass NULL in case one is not interested in the transparency plane. +// Conversely, 'luma' can be passed NULL if no preallocated planes are supplied. +// In this case, the output buffer will be automatically allocated (using +// MODE_YUVA) when decoding starts. All parameters are then ignored. +// Returns NULL if the allocation failed or if a parameter is invalid. +WEBP_EXTERN WebPIDecoder* WebPINewYUVA( + uint8_t* luma, size_t luma_size, int luma_stride, + uint8_t* u, size_t u_size, int u_stride, + uint8_t* v, size_t v_size, int v_stride, + uint8_t* a, size_t a_size, int a_stride); + +// Deprecated version of the above, without the alpha plane. +// Kept for backward compatibility. +WEBP_EXTERN WebPIDecoder* WebPINewYUV( + uint8_t* luma, size_t luma_size, int luma_stride, + uint8_t* u, size_t u_size, int u_stride, + uint8_t* v, size_t v_size, int v_stride); + +// Deletes the WebPIDecoder object and associated memory. Must always be called +// if WebPINewDecoder, WebPINewRGB or WebPINewYUV succeeded. +WEBP_EXTERN void WebPIDelete(WebPIDecoder* idec); + +// Copies and decodes the next available data. Returns VP8_STATUS_OK when +// the image is successfully decoded. Returns VP8_STATUS_SUSPENDED when more +// data is expected. Returns error in other cases. +WEBP_EXTERN VP8StatusCode WebPIAppend( + WebPIDecoder* idec, const uint8_t* data, size_t data_size); + +// A variant of the above function to be used when data buffer contains +// partial data from the beginning. In this case data buffer is not copied +// to the internal memory. +// Note that the value of the 'data' pointer can change between calls to +// WebPIUpdate, for instance when the data buffer is resized to fit larger data. +WEBP_EXTERN VP8StatusCode WebPIUpdate( + WebPIDecoder* idec, const uint8_t* data, size_t data_size); + +// Returns the RGB/A image decoded so far. Returns NULL if output params +// are not initialized yet. The RGB/A output type corresponds to the colorspace +// specified during call to WebPINewDecoder() or WebPINewRGB(). +// *last_y is the index of last decoded row in raster scan order. Some pointers +// (*last_y, *width etc.) can be NULL if corresponding information is not +// needed. The values in these pointers are only valid on successful (non-NULL) +// return. +WEBP_EXTERN uint8_t* WebPIDecGetRGB( + const WebPIDecoder* idec, int* last_y, + int* width, int* height, int* stride); + +// Same as above function to get a YUVA image. Returns pointer to the luma +// plane or NULL in case of error. If there is no alpha information +// the alpha pointer '*a' will be returned NULL. +WEBP_EXTERN uint8_t* WebPIDecGetYUVA( + const WebPIDecoder* idec, int* last_y, + uint8_t** u, uint8_t** v, uint8_t** a, + int* width, int* height, int* stride, int* uv_stride, int* a_stride); + +// Deprecated alpha-less version of WebPIDecGetYUVA(): it will ignore the +// alpha information (if present). Kept for backward compatibility. +static WEBP_INLINE uint8_t* WebPIDecGetYUV( + const WebPIDecoder* idec, int* last_y, uint8_t** u, uint8_t** v, + int* width, int* height, int* stride, int* uv_stride) { + return WebPIDecGetYUVA(idec, last_y, u, v, NULL, width, height, + stride, uv_stride, NULL); +} + +// Generic call to retrieve information about the displayable area. +// If non NULL, the left/right/width/height pointers are filled with the visible +// rectangular area so far. +// Returns NULL in case the incremental decoder object is in an invalid state. +// Otherwise returns the pointer to the internal representation. This structure +// is read-only, tied to WebPIDecoder's lifespan and should not be modified. +WEBP_EXTERN const WebPDecBuffer* WebPIDecodedArea( + const WebPIDecoder* idec, int* left, int* top, int* width, int* height); + +//------------------------------------------------------------------------------ +// Advanced decoding parametrization +// +// Code sample for using the advanced decoding API +/* + // A) Init a configuration object + WebPDecoderConfig config; + CHECK(WebPInitDecoderConfig(&config)); + + // B) optional: retrieve the bitstream's features. + CHECK(WebPGetFeatures(data, data_size, &config.input) == VP8_STATUS_OK); + + // C) Adjust 'config', if needed + config.no_fancy_upsampling = 1; + config.output.colorspace = MODE_BGRA; + // etc. + + // Note that you can also make config.output point to an externally + // supplied memory buffer, provided it's big enough to store the decoded + // picture. Otherwise, config.output will just be used to allocate memory + // and store the decoded picture. + + // D) Decode! + CHECK(WebPDecode(data, data_size, &config) == VP8_STATUS_OK); + + // E) Decoded image is now in config.output (and config.output.u.RGBA) + + // F) Reclaim memory allocated in config's object. It's safe to call + // this function even if the memory is external and wasn't allocated + // by WebPDecode(). + WebPFreeDecBuffer(&config.output); +*/ + +// Features gathered from the bitstream +struct WebPBitstreamFeatures { + int width; // Width in pixels, as read from the bitstream. + int height; // Height in pixels, as read from the bitstream. + int has_alpha; // True if the bitstream contains an alpha channel. + int has_animation; // True if the bitstream is an animation. + int format; // 0 = undefined (/mixed), 1 = lossy, 2 = lossless + + uint32_t pad[5]; // padding for later use +}; + +// Internal, version-checked, entry point +WEBP_EXTERN VP8StatusCode WebPGetFeaturesInternal( + const uint8_t*, size_t, WebPBitstreamFeatures*, int); + +// Retrieve features from the bitstream. The *features structure is filled +// with information gathered from the bitstream. +// Returns VP8_STATUS_OK when the features are successfully retrieved. Returns +// VP8_STATUS_NOT_ENOUGH_DATA when more data is needed to retrieve the +// features from headers. Returns error in other cases. +// Note: The following chunk sequences (before the raw VP8/VP8L data) are +// considered valid by this function: +// RIFF + VP8(L) +// RIFF + VP8X + (optional chunks) + VP8(L) +// ALPH + VP8 <-- Not a valid WebP format: only allowed for internal purpose. +// VP8(L) <-- Not a valid WebP format: only allowed for internal purpose. +static WEBP_INLINE VP8StatusCode WebPGetFeatures( + const uint8_t* data, size_t data_size, + WebPBitstreamFeatures* features) { + return WebPGetFeaturesInternal(data, data_size, features, + WEBP_DECODER_ABI_VERSION); +} + +// Decoding options +struct WebPDecoderOptions { + int bypass_filtering; // if true, skip the in-loop filtering + int no_fancy_upsampling; // if true, use faster pointwise upsampler + int use_cropping; // if true, cropping is applied _first_ + int crop_left, crop_top; // top-left position for cropping. + // Will be snapped to even values. + int crop_width, crop_height; // dimension of the cropping area + int use_scaling; // if true, scaling is applied _afterward_ + int scaled_width, scaled_height; // final resolution + int use_threads; // if true, use multi-threaded decoding + int dithering_strength; // dithering strength (0=Off, 100=full) + int flip; // if true, flip output vertically + int alpha_dithering_strength; // alpha dithering strength in [0..100] + + uint32_t pad[5]; // padding for later use +}; + +// Main object storing the configuration for advanced decoding. +struct WebPDecoderConfig { + WebPBitstreamFeatures input; // Immutable bitstream features (optional) + WebPDecBuffer output; // Output buffer (can point to external mem) + WebPDecoderOptions options; // Decoding options +}; + +// Internal, version-checked, entry point +WEBP_EXTERN int WebPInitDecoderConfigInternal(WebPDecoderConfig*, int); + +// Initialize the configuration as empty. This function must always be +// called first, unless WebPGetFeatures() is to be called. +// Returns false in case of mismatched version. +static WEBP_INLINE int WebPInitDecoderConfig(WebPDecoderConfig* config) { + return WebPInitDecoderConfigInternal(config, WEBP_DECODER_ABI_VERSION); +} + +// Instantiate a new incremental decoder object with the requested +// configuration. The bitstream can be passed using 'data' and 'data_size' +// parameter, in which case the features will be parsed and stored into +// config->input. Otherwise, 'data' can be NULL and no parsing will occur. +// Note that 'config' can be NULL too, in which case a default configuration +// is used. If 'config' is not NULL, it must outlive the WebPIDecoder object +// as some references to its fields will be used. No internal copy of 'config' +// is made. +// The return WebPIDecoder object must always be deleted calling WebPIDelete(). +// Returns NULL in case of error (and config->status will then reflect +// the error condition, if available). +WEBP_EXTERN WebPIDecoder* WebPIDecode(const uint8_t* data, size_t data_size, + WebPDecoderConfig* config); + +// Non-incremental version. This version decodes the full data at once, taking +// 'config' into account. Returns decoding status (which should be VP8_STATUS_OK +// if the decoding was successful). Note that 'config' cannot be NULL. +WEBP_EXTERN VP8StatusCode WebPDecode(const uint8_t* data, size_t data_size, + WebPDecoderConfig* config); + +#ifdef __cplusplus +} // extern "C" +#endif + +#endif // WEBP_WEBP_DECODE_H_ diff --git a/vendor/github.com/sizeofint/webpanimation/webp_demux.h b/vendor/github.com/sizeofint/webpanimation/webp_demux.h new file mode 100644 index 00000000..1a882fcb --- /dev/null +++ b/vendor/github.com/sizeofint/webpanimation/webp_demux.h @@ -0,0 +1,363 @@ +// Copyright 2012 Google Inc. All Rights Reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the COPYING file in the root of the source +// tree. An additional intellectual property rights grant can be found +// in the file PATENTS. All contributing project authors may +// be found in the AUTHORS file in the root of the source tree. +// ----------------------------------------------------------------------------- +// +// Demux API. +// Enables extraction of image and extended format data from WebP files. + +// Code Example: Demuxing WebP data to extract all the frames, ICC profile +// and EXIF/XMP metadata. +/* + WebPDemuxer* demux = WebPDemux(&webp_data); + + uint32_t width = WebPDemuxGetI(demux, WEBP_FF_CANVAS_WIDTH); + uint32_t height = WebPDemuxGetI(demux, WEBP_FF_CANVAS_HEIGHT); + // ... (Get information about the features present in the WebP file). + uint32_t flags = WebPDemuxGetI(demux, WEBP_FF_FORMAT_FLAGS); + + // ... (Iterate over all frames). + WebPIterator iter; + if (WebPDemuxGetFrame(demux, 1, &iter)) { + do { + // ... (Consume 'iter'; e.g. Decode 'iter.fragment' with WebPDecode(), + // ... and get other frame properties like width, height, offsets etc. + // ... see 'struct WebPIterator' below for more info). + } while (WebPDemuxNextFrame(&iter)); + WebPDemuxReleaseIterator(&iter); + } + + // ... (Extract metadata). + WebPChunkIterator chunk_iter; + if (flags & ICCP_FLAG) WebPDemuxGetChunk(demux, "ICCP", 1, &chunk_iter); + // ... (Consume the ICC profile in 'chunk_iter.chunk'). + WebPDemuxReleaseChunkIterator(&chunk_iter); + if (flags & EXIF_FLAG) WebPDemuxGetChunk(demux, "EXIF", 1, &chunk_iter); + // ... (Consume the EXIF metadata in 'chunk_iter.chunk'). + WebPDemuxReleaseChunkIterator(&chunk_iter); + if (flags & XMP_FLAG) WebPDemuxGetChunk(demux, "XMP ", 1, &chunk_iter); + // ... (Consume the XMP metadata in 'chunk_iter.chunk'). + WebPDemuxReleaseChunkIterator(&chunk_iter); + WebPDemuxDelete(demux); +*/ + +#ifndef WEBP_WEBP_DEMUX_H_ +#define WEBP_WEBP_DEMUX_H_ + +#include "webp_decode.h" +#include "webp_mux_types.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#define WEBP_DEMUX_ABI_VERSION 0x0107 // MAJOR(8b) + MINOR(8b) + +// Note: forward declaring enumerations is not allowed in (strict) C and C++, +// the types are left here for reference. +// typedef enum WebPDemuxState WebPDemuxState; +// typedef enum WebPFormatFeature WebPFormatFeature; +typedef struct WebPDemuxer WebPDemuxer; +typedef struct WebPIterator WebPIterator; +typedef struct WebPChunkIterator WebPChunkIterator; +typedef struct WebPAnimInfo WebPAnimInfo; +typedef struct WebPAnimDecoderOptions WebPAnimDecoderOptions; + +//------------------------------------------------------------------------------ + +// Returns the version number of the demux library, packed in hexadecimal using +// 8bits for each of major/minor/revision. E.g: v2.5.7 is 0x020507. +WEBP_EXTERN int WebPGetDemuxVersion(void); + +//------------------------------------------------------------------------------ +// Life of a Demux object + +typedef enum WebPDemuxState { + WEBP_DEMUX_PARSE_ERROR = -1, // An error occurred while parsing. + WEBP_DEMUX_PARSING_HEADER = 0, // Not enough data to parse full header. + WEBP_DEMUX_PARSED_HEADER = 1, // Header parsing complete, + // data may be available. + WEBP_DEMUX_DONE = 2 // Entire file has been parsed. +} WebPDemuxState; + +// Internal, version-checked, entry point +WEBP_EXTERN WebPDemuxer* WebPDemuxInternal( + const WebPData*, int, WebPDemuxState*, int); + +// Parses the full WebP file given by 'data'. For single images the WebP file +// header alone or the file header and the chunk header may be absent. +// Returns a WebPDemuxer object on successful parse, NULL otherwise. +static WEBP_INLINE WebPDemuxer* WebPDemux(const WebPData* data) { + return WebPDemuxInternal(data, 0, NULL, WEBP_DEMUX_ABI_VERSION); +} + +// Parses the possibly incomplete WebP file given by 'data'. +// If 'state' is non-NULL it will be set to indicate the status of the demuxer. +// Returns NULL in case of error or if there isn't enough data to start parsing; +// and a WebPDemuxer object on successful parse. +// Note that WebPDemuxer keeps internal pointers to 'data' memory segment. +// If this data is volatile, the demuxer object should be deleted (by calling +// WebPDemuxDelete()) and WebPDemuxPartial() called again on the new data. +// This is usually an inexpensive operation. +static WEBP_INLINE WebPDemuxer* WebPDemuxPartial( + const WebPData* data, WebPDemuxState* state) { + return WebPDemuxInternal(data, 1, state, WEBP_DEMUX_ABI_VERSION); +} + +// Frees memory associated with 'dmux'. +WEBP_EXTERN void WebPDemuxDelete(WebPDemuxer* dmux); + +//------------------------------------------------------------------------------ +// Data/information extraction. + +typedef enum WebPFormatFeature { + WEBP_FF_FORMAT_FLAGS, // bit-wise combination of WebPFeatureFlags + // corresponding to the 'VP8X' chunk (if present). + WEBP_FF_CANVAS_WIDTH, + WEBP_FF_CANVAS_HEIGHT, + WEBP_FF_LOOP_COUNT, // only relevant for animated file + WEBP_FF_BACKGROUND_COLOR, // idem. + WEBP_FF_FRAME_COUNT // Number of frames present in the demux object. + // In case of a partial demux, this is the number + // of frames seen so far, with the last frame + // possibly being partial. +} WebPFormatFeature; + +// Get the 'feature' value from the 'dmux'. +// NOTE: values are only valid if WebPDemux() was used or WebPDemuxPartial() +// returned a state > WEBP_DEMUX_PARSING_HEADER. +// If 'feature' is WEBP_FF_FORMAT_FLAGS, the returned value is a bit-wise +// combination of WebPFeatureFlags values. +// If 'feature' is WEBP_FF_LOOP_COUNT, WEBP_FF_BACKGROUND_COLOR, the returned +// value is only meaningful if the bitstream is animated. +WEBP_EXTERN uint32_t WebPDemuxGetI( + const WebPDemuxer* dmux, WebPFormatFeature feature); + +//------------------------------------------------------------------------------ +// Frame iteration. + +struct WebPIterator { + int frame_num; + int num_frames; // equivalent to WEBP_FF_FRAME_COUNT. + int x_offset, y_offset; // offset relative to the canvas. + int width, height; // dimensions of this frame. + int duration; // display duration in milliseconds. + WebPMuxAnimDispose dispose_method; // dispose method for the frame. + int complete; // true if 'fragment' contains a full frame. partial images + // may still be decoded with the WebP incremental decoder. + WebPData fragment; // The frame given by 'frame_num'. Note for historical + // reasons this is called a fragment. + int has_alpha; // True if the frame contains transparency. + WebPMuxAnimBlend blend_method; // Blend operation for the frame. + + uint32_t pad[2]; // padding for later use. + void* private_; // for internal use only. +}; + +// Retrieves frame 'frame_number' from 'dmux'. +// 'iter->fragment' points to the frame on return from this function. +// Setting 'frame_number' equal to 0 will return the last frame of the image. +// Returns false if 'dmux' is NULL or frame 'frame_number' is not present. +// Call WebPDemuxReleaseIterator() when use of the iterator is complete. +// NOTE: 'dmux' must persist for the lifetime of 'iter'. +WEBP_EXTERN int WebPDemuxGetFrame( + const WebPDemuxer* dmux, int frame_number, WebPIterator* iter); + +// Sets 'iter->fragment' to point to the next ('iter->frame_num' + 1) or +// previous ('iter->frame_num' - 1) frame. These functions do not loop. +// Returns true on success, false otherwise. +WEBP_EXTERN int WebPDemuxNextFrame(WebPIterator* iter); +WEBP_EXTERN int WebPDemuxPrevFrame(WebPIterator* iter); + +// Releases any memory associated with 'iter'. +// Must be called before any subsequent calls to WebPDemuxGetChunk() on the same +// iter. Also, must be called before destroying the associated WebPDemuxer with +// WebPDemuxDelete(). +WEBP_EXTERN void WebPDemuxReleaseIterator(WebPIterator* iter); + +//------------------------------------------------------------------------------ +// Chunk iteration. + +struct WebPChunkIterator { + // The current and total number of chunks with the fourcc given to + // WebPDemuxGetChunk(). + int chunk_num; + int num_chunks; + WebPData chunk; // The payload of the chunk. + + uint32_t pad[6]; // padding for later use + void* private_; +}; + +// Retrieves the 'chunk_number' instance of the chunk with id 'fourcc' from +// 'dmux'. +// 'fourcc' is a character array containing the fourcc of the chunk to return, +// e.g., "ICCP", "XMP ", "EXIF", etc. +// Setting 'chunk_number' equal to 0 will return the last chunk in a set. +// Returns true if the chunk is found, false otherwise. Image related chunk +// payloads are accessed through WebPDemuxGetFrame() and related functions. +// Call WebPDemuxReleaseChunkIterator() when use of the iterator is complete. +// NOTE: 'dmux' must persist for the lifetime of the iterator. +WEBP_EXTERN int WebPDemuxGetChunk(const WebPDemuxer* dmux, + const char fourcc[4], int chunk_number, + WebPChunkIterator* iter); + +// Sets 'iter->chunk' to point to the next ('iter->chunk_num' + 1) or previous +// ('iter->chunk_num' - 1) chunk. These functions do not loop. +// Returns true on success, false otherwise. +WEBP_EXTERN int WebPDemuxNextChunk(WebPChunkIterator* iter); +WEBP_EXTERN int WebPDemuxPrevChunk(WebPChunkIterator* iter); + +// Releases any memory associated with 'iter'. +// Must be called before destroying the associated WebPDemuxer with +// WebPDemuxDelete(). +WEBP_EXTERN void WebPDemuxReleaseChunkIterator(WebPChunkIterator* iter); + +//------------------------------------------------------------------------------ +// WebPAnimDecoder API +// +// This API allows decoding (possibly) animated WebP images. +// +// Code Example: +/* + WebPAnimDecoderOptions dec_options; + WebPAnimDecoderOptionsInit(&dec_options); + // Tune 'dec_options' as needed. + WebPAnimDecoder* dec = WebPAnimDecoderNew(webp_data, &dec_options); + WebPAnimInfo anim_info; + WebPAnimDecoderGetInfo(dec, &anim_info); + for (uint32_t i = 0; i < anim_info.loop_count; ++i) { + while (WebPAnimDecoderHasMoreFrames(dec)) { + uint8_t* buf; + int timestamp; + WebPAnimDecoderGetNext(dec, &buf, ×tamp); + // ... (Render 'buf' based on 'timestamp'). + // ... (Do NOT free 'buf', as it is owned by 'dec'). + } + WebPAnimDecoderReset(dec); + } + const WebPDemuxer* demuxer = WebPAnimDecoderGetDemuxer(dec); + // ... (Do something using 'demuxer'; e.g. get EXIF/XMP/ICC data). + WebPAnimDecoderDelete(dec); +*/ + +typedef struct WebPAnimDecoder WebPAnimDecoder; // Main opaque object. + +// Global options. +struct WebPAnimDecoderOptions { + // Output colorspace. Only the following modes are supported: + // MODE_RGBA, MODE_BGRA, MODE_rgbA and MODE_bgrA. + WEBP_CSP_MODE color_mode; + int use_threads; // If true, use multi-threaded decoding. + uint32_t padding[7]; // Padding for later use. +}; + +// Internal, version-checked, entry point. +WEBP_EXTERN int WebPAnimDecoderOptionsInitInternal( + WebPAnimDecoderOptions*, int); + +// Should always be called, to initialize a fresh WebPAnimDecoderOptions +// structure before modification. Returns false in case of version mismatch. +// WebPAnimDecoderOptionsInit() must have succeeded before using the +// 'dec_options' object. +static WEBP_INLINE int WebPAnimDecoderOptionsInit( + WebPAnimDecoderOptions* dec_options) { + return WebPAnimDecoderOptionsInitInternal(dec_options, + WEBP_DEMUX_ABI_VERSION); +} + +// Internal, version-checked, entry point. +WEBP_EXTERN WebPAnimDecoder* WebPAnimDecoderNewInternal( + const WebPData*, const WebPAnimDecoderOptions*, int); + +// Creates and initializes a WebPAnimDecoder object. +// Parameters: +// webp_data - (in) WebP bitstream. This should remain unchanged during the +// lifetime of the output WebPAnimDecoder object. +// dec_options - (in) decoding options. Can be passed NULL to choose +// reasonable defaults (in particular, color mode MODE_RGBA +// will be picked). +// Returns: +// A pointer to the newly created WebPAnimDecoder object, or NULL in case of +// parsing error, invalid option or memory error. +static WEBP_INLINE WebPAnimDecoder* WebPAnimDecoderNew( + const WebPData* webp_data, const WebPAnimDecoderOptions* dec_options) { + return WebPAnimDecoderNewInternal(webp_data, dec_options, + WEBP_DEMUX_ABI_VERSION); +} + +// Global information about the animation.. +struct WebPAnimInfo { + uint32_t canvas_width; + uint32_t canvas_height; + uint32_t loop_count; + uint32_t bgcolor; + uint32_t frame_count; + uint32_t pad[4]; // padding for later use +}; + +// Get global information about the animation. +// Parameters: +// dec - (in) decoder instance to get information from. +// info - (out) global information fetched from the animation. +// Returns: +// True on success. +WEBP_EXTERN int WebPAnimDecoderGetInfo(const WebPAnimDecoder* dec, + WebPAnimInfo* info); + +// Fetch the next frame from 'dec' based on options supplied to +// WebPAnimDecoderNew(). This will be a fully reconstructed canvas of size +// 'canvas_width * 4 * canvas_height', and not just the frame sub-rectangle. The +// returned buffer 'buf' is valid only until the next call to +// WebPAnimDecoderGetNext(), WebPAnimDecoderReset() or WebPAnimDecoderDelete(). +// Parameters: +// dec - (in/out) decoder instance from which the next frame is to be fetched. +// buf - (out) decoded frame. +// timestamp - (out) timestamp of the frame in milliseconds. +// Returns: +// False if any of the arguments are NULL, or if there is a parsing or +// decoding error, or if there are no more frames. Otherwise, returns true. +WEBP_EXTERN int WebPAnimDecoderGetNext(WebPAnimDecoder* dec, + uint8_t** buf, int* timestamp); + +// Check if there are more frames left to decode. +// Parameters: +// dec - (in) decoder instance to be checked. +// Returns: +// True if 'dec' is not NULL and some frames are yet to be decoded. +// Otherwise, returns false. +WEBP_EXTERN int WebPAnimDecoderHasMoreFrames(const WebPAnimDecoder* dec); + +// Resets the WebPAnimDecoder object, so that next call to +// WebPAnimDecoderGetNext() will restart decoding from 1st frame. This would be +// helpful when all frames need to be decoded multiple times (e.g. +// info.loop_count times) without destroying and recreating the 'dec' object. +// Parameters: +// dec - (in/out) decoder instance to be reset +WEBP_EXTERN void WebPAnimDecoderReset(WebPAnimDecoder* dec); + +// Grab the internal demuxer object. +// Getting the demuxer object can be useful if one wants to use operations only +// available through demuxer; e.g. to get XMP/EXIF/ICC metadata. The returned +// demuxer object is owned by 'dec' and is valid only until the next call to +// WebPAnimDecoderDelete(). +// +// Parameters: +// dec - (in) decoder instance from which the demuxer object is to be fetched. +WEBP_EXTERN const WebPDemuxer* WebPAnimDecoderGetDemuxer( + const WebPAnimDecoder* dec); + +// Deletes the WebPAnimDecoder object. +// Parameters: +// dec - (in/out) decoder instance to be deleted +WEBP_EXTERN void WebPAnimDecoderDelete(WebPAnimDecoder* dec); + +#ifdef __cplusplus +} // extern "C" +#endif + +#endif // WEBP_WEBP_DEMUX_H_ diff --git a/vendor/github.com/sizeofint/webpanimation/webp_encode.h b/vendor/github.com/sizeofint/webpanimation/webp_encode.h new file mode 100644 index 00000000..95f86c26 --- /dev/null +++ b/vendor/github.com/sizeofint/webpanimation/webp_encode.h @@ -0,0 +1,552 @@ +// Copyright 2011 Google Inc. All Rights Reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the COPYING file in the root of the source +// tree. An additional intellectual property rights grant can be found +// in the file PATENTS. All contributing project authors may +// be found in the AUTHORS file in the root of the source tree. +// ----------------------------------------------------------------------------- +// +// WebP encoder: main interface +// +// Author: Skal (pascal.massimino@gmail.com) + +#ifndef WEBP_WEBP_ENCODE_H_ +#define WEBP_WEBP_ENCODE_H_ + +#include "webp_types.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#define WEBP_ENCODER_ABI_VERSION 0x020f // MAJOR(8b) + MINOR(8b) + +// Note: forward declaring enumerations is not allowed in (strict) C and C++, +// the types are left here for reference. +// typedef enum WebPImageHint WebPImageHint; +// typedef enum WebPEncCSP WebPEncCSP; +// typedef enum WebPPreset WebPPreset; +// typedef enum WebPEncodingError WebPEncodingError; +typedef struct WebPConfig WebPConfig; +typedef struct WebPPicture WebPPicture; // main structure for I/O +typedef struct WebPAuxStats WebPAuxStats; +typedef struct WebPMemoryWriter WebPMemoryWriter; + +// Return the encoder's version number, packed in hexadecimal using 8bits for +// each of major/minor/revision. E.g: v2.5.7 is 0x020507. +WEBP_EXTERN int WebPGetEncoderVersion(void); + +//------------------------------------------------------------------------------ +// One-stop-shop call! No questions asked: + +// Returns the size of the compressed data (pointed to by *output), or 0 if +// an error occurred. The compressed data must be released by the caller +// using the call 'WebPFree(*output)'. +// These functions compress using the lossy format, and the quality_factor +// can go from 0 (smaller output, lower quality) to 100 (best quality, +// larger output). +WEBP_EXTERN size_t WebPEncodeRGB(const uint8_t* rgb, + int width, int height, int stride, + float quality_factor, uint8_t** output); +WEBP_EXTERN size_t WebPEncodeBGR(const uint8_t* bgr, + int width, int height, int stride, + float quality_factor, uint8_t** output); +WEBP_EXTERN size_t WebPEncodeRGBA(const uint8_t* rgba, + int width, int height, int stride, + float quality_factor, uint8_t** output); +WEBP_EXTERN size_t WebPEncodeBGRA(const uint8_t* bgra, + int width, int height, int stride, + float quality_factor, uint8_t** output); + +// These functions are the equivalent of the above, but compressing in a +// lossless manner. Files are usually larger than lossy format, but will +// not suffer any compression loss. +// Note these functions, like the lossy versions, use the library's default +// settings. For lossless this means 'exact' is disabled. RGB values in +// transparent areas will be modified to improve compression. To avoid this, +// use WebPEncode() and set WebPConfig::exact to 1. +WEBP_EXTERN size_t WebPEncodeLosslessRGB(const uint8_t* rgb, + int width, int height, int stride, + uint8_t** output); +WEBP_EXTERN size_t WebPEncodeLosslessBGR(const uint8_t* bgr, + int width, int height, int stride, + uint8_t** output); +WEBP_EXTERN size_t WebPEncodeLosslessRGBA(const uint8_t* rgba, + int width, int height, int stride, + uint8_t** output); +WEBP_EXTERN size_t WebPEncodeLosslessBGRA(const uint8_t* bgra, + int width, int height, int stride, + uint8_t** output); + +//------------------------------------------------------------------------------ +// Coding parameters + +// Image characteristics hint for the underlying encoder. +typedef enum WebPImageHint { + WEBP_HINT_DEFAULT = 0, // default preset. + WEBP_HINT_PICTURE, // digital picture, like portrait, inner shot + WEBP_HINT_PHOTO, // outdoor photograph, with natural lighting + WEBP_HINT_GRAPH, // Discrete tone image (graph, map-tile etc). + WEBP_HINT_LAST +} WebPImageHint; + +// Compression parameters. +struct WebPConfig { + int lossless; // Lossless encoding (0=lossy(default), 1=lossless). + float quality; // between 0 and 100. For lossy, 0 gives the smallest + // size and 100 the largest. For lossless, this + // parameter is the amount of effort put into the + // compression: 0 is the fastest but gives larger + // files compared to the slowest, but best, 100. + int method; // quality/speed trade-off (0=fast, 6=slower-better) + + WebPImageHint image_hint; // Hint for image type (lossless only for now). + + int target_size; // if non-zero, set the desired target size in bytes. + // Takes precedence over the 'compression' parameter. + float target_PSNR; // if non-zero, specifies the minimal distortion to + // try to achieve. Takes precedence over target_size. + int segments; // maximum number of segments to use, in [1..4] + int sns_strength; // Spatial Noise Shaping. 0=off, 100=maximum. + int filter_strength; // range: [0 = off .. 100 = strongest] + int filter_sharpness; // range: [0 = off .. 7 = least sharp] + int filter_type; // filtering type: 0 = simple, 1 = strong (only used + // if filter_strength > 0 or autofilter > 0) + int autofilter; // Auto adjust filter's strength [0 = off, 1 = on] + int alpha_compression; // Algorithm for encoding the alpha plane (0 = none, + // 1 = compressed with WebP lossless). Default is 1. + int alpha_filtering; // Predictive filtering method for alpha plane. + // 0: none, 1: fast, 2: best. Default if 1. + int alpha_quality; // Between 0 (smallest size) and 100 (lossless). + // Default is 100. + int pass; // number of entropy-analysis passes (in [1..10]). + + int show_compressed; // if true, export the compressed picture back. + // In-loop filtering is not applied. + int preprocessing; // preprocessing filter: + // 0=none, 1=segment-smooth, 2=pseudo-random dithering + int partitions; // log2(number of token partitions) in [0..3]. Default + // is set to 0 for easier progressive decoding. + int partition_limit; // quality degradation allowed to fit the 512k limit + // on prediction modes coding (0: no degradation, + // 100: maximum possible degradation). + int emulate_jpeg_size; // If true, compression parameters will be remapped + // to better match the expected output size from + // JPEG compression. Generally, the output size will + // be similar but the degradation will be lower. + int thread_level; // If non-zero, try and use multi-threaded encoding. + int low_memory; // If set, reduce memory usage (but increase CPU use). + + int near_lossless; // Near lossless encoding [0 = max loss .. 100 = off + // (default)]. + int exact; // if non-zero, preserve the exact RGB values under + // transparent area. Otherwise, discard this invisible + // RGB information for better compression. The default + // value is 0. + + int use_delta_palette; // reserved for future lossless feature + int use_sharp_yuv; // if needed, use sharp (and slow) RGB->YUV conversion + + int qmin; // minimum permissible quality factor + int qmax; // maximum permissible quality factor +}; + +// Enumerate some predefined settings for WebPConfig, depending on the type +// of source picture. These presets are used when calling WebPConfigPreset(). +typedef enum WebPPreset { + WEBP_PRESET_DEFAULT = 0, // default preset. + WEBP_PRESET_PICTURE, // digital picture, like portrait, inner shot + WEBP_PRESET_PHOTO, // outdoor photograph, with natural lighting + WEBP_PRESET_DRAWING, // hand or line drawing, with high-contrast details + WEBP_PRESET_ICON, // small-sized colorful images + WEBP_PRESET_TEXT // text-like +} WebPPreset; + +// Internal, version-checked, entry point +WEBP_EXTERN int WebPConfigInitInternal(WebPConfig*, WebPPreset, float, int); + +// Should always be called, to initialize a fresh WebPConfig structure before +// modification. Returns false in case of version mismatch. WebPConfigInit() +// must have succeeded before using the 'config' object. +// Note that the default values are lossless=0 and quality=75. +static WEBP_INLINE int WebPConfigInit(WebPConfig* config) { + return WebPConfigInitInternal(config, WEBP_PRESET_DEFAULT, 75.f, + WEBP_ENCODER_ABI_VERSION); +} + +// This function will initialize the configuration according to a predefined +// set of parameters (referred to by 'preset') and a given quality factor. +// This function can be called as a replacement to WebPConfigInit(). Will +// return false in case of error. +static WEBP_INLINE int WebPConfigPreset(WebPConfig* config, + WebPPreset preset, float quality) { + return WebPConfigInitInternal(config, preset, quality, + WEBP_ENCODER_ABI_VERSION); +} + +// Activate the lossless compression mode with the desired efficiency level +// between 0 (fastest, lowest compression) and 9 (slower, best compression). +// A good default level is '6', providing a fair tradeoff between compression +// speed and final compressed size. +// This function will overwrite several fields from config: 'method', 'quality' +// and 'lossless'. Returns false in case of parameter error. +WEBP_EXTERN int WebPConfigLosslessPreset(WebPConfig* config, int level); + +// Returns true if 'config' is non-NULL and all configuration parameters are +// within their valid ranges. +WEBP_EXTERN int WebPValidateConfig(const WebPConfig* config); + +//------------------------------------------------------------------------------ +// Input / Output +// Structure for storing auxiliary statistics. + +struct WebPAuxStats { + int coded_size; // final size + + float PSNR[5]; // peak-signal-to-noise ratio for Y/U/V/All/Alpha + int block_count[3]; // number of intra4/intra16/skipped macroblocks + int header_bytes[2]; // approximate number of bytes spent for header + // and mode-partition #0 + int residual_bytes[3][4]; // approximate number of bytes spent for + // DC/AC/uv coefficients for each (0..3) segments. + int segment_size[4]; // number of macroblocks in each segments + int segment_quant[4]; // quantizer values for each segments + int segment_level[4]; // filtering strength for each segments [0..63] + + int alpha_data_size; // size of the transparency data + int layer_data_size; // size of the enhancement layer data + + // lossless encoder statistics + uint32_t lossless_features; // bit0:predictor bit1:cross-color transform + // bit2:subtract-green bit3:color indexing + int histogram_bits; // number of precision bits of histogram + int transform_bits; // precision bits for transform + int cache_bits; // number of bits for color cache lookup + int palette_size; // number of color in palette, if used + int lossless_size; // final lossless size + int lossless_hdr_size; // lossless header (transform, huffman etc) size + int lossless_data_size; // lossless image data size + + uint32_t pad[2]; // padding for later use +}; + +// Signature for output function. Should return true if writing was successful. +// data/data_size is the segment of data to write, and 'picture' is for +// reference (and so one can make use of picture->custom_ptr). +typedef int (*WebPWriterFunction)(const uint8_t* data, size_t data_size, + const WebPPicture* picture); + +// WebPMemoryWrite: a special WebPWriterFunction that writes to memory using +// the following WebPMemoryWriter object (to be set as a custom_ptr). +struct WebPMemoryWriter { + uint8_t* mem; // final buffer (of size 'max_size', larger than 'size'). + size_t size; // final size + size_t max_size; // total capacity + uint32_t pad[1]; // padding for later use +}; + +// The following must be called first before any use. +WEBP_EXTERN void WebPMemoryWriterInit(WebPMemoryWriter* writer); + +// The following must be called to deallocate writer->mem memory. The 'writer' +// object itself is not deallocated. +WEBP_EXTERN void WebPMemoryWriterClear(WebPMemoryWriter* writer); +// The custom writer to be used with WebPMemoryWriter as custom_ptr. Upon +// completion, writer.mem and writer.size will hold the coded data. +// writer.mem must be freed by calling WebPMemoryWriterClear. +WEBP_EXTERN int WebPMemoryWrite(const uint8_t* data, size_t data_size, + const WebPPicture* picture); + +// Progress hook, called from time to time to report progress. It can return +// false to request an abort of the encoding process, or true otherwise if +// everything is OK. +typedef int (*WebPProgressHook)(int percent, const WebPPicture* picture); + +// Color spaces. +typedef enum WebPEncCSP { + // chroma sampling + WEBP_YUV420 = 0, // 4:2:0 + WEBP_YUV420A = 4, // alpha channel variant + WEBP_CSP_UV_MASK = 3, // bit-mask to get the UV sampling factors + WEBP_CSP_ALPHA_BIT = 4 // bit that is set if alpha is present +} WebPEncCSP; + +// Encoding error conditions. +typedef enum WebPEncodingError { + VP8_ENC_OK = 0, + VP8_ENC_ERROR_OUT_OF_MEMORY, // memory error allocating objects + VP8_ENC_ERROR_BITSTREAM_OUT_OF_MEMORY, // memory error while flushing bits + VP8_ENC_ERROR_NULL_PARAMETER, // a pointer parameter is NULL + VP8_ENC_ERROR_INVALID_CONFIGURATION, // configuration is invalid + VP8_ENC_ERROR_BAD_DIMENSION, // picture has invalid width/height + VP8_ENC_ERROR_PARTITION0_OVERFLOW, // partition is bigger than 512k + VP8_ENC_ERROR_PARTITION_OVERFLOW, // partition is bigger than 16M + VP8_ENC_ERROR_BAD_WRITE, // error while flushing bytes + VP8_ENC_ERROR_FILE_TOO_BIG, // file is bigger than 4G + VP8_ENC_ERROR_USER_ABORT, // abort request by user + VP8_ENC_ERROR_LAST // list terminator. always last. +} WebPEncodingError; + +// maximum width/height allowed (inclusive), in pixels +#define WEBP_MAX_DIMENSION 16383 + +// Main exchange structure (input samples, output bytes, statistics) +// +// Once WebPPictureInit() has been called, it's ok to make all the INPUT fields +// (use_argb, y/u/v, argb, ...) point to user-owned data, even if +// WebPPictureAlloc() has been called. Depending on the value use_argb, +// it's guaranteed that either *argb or *y/*u/*v content will be kept untouched. +struct WebPPicture { + // INPUT + ////////////// + // Main flag for encoder selecting between ARGB or YUV input. + // It is recommended to use ARGB input (*argb, argb_stride) for lossless + // compression, and YUV input (*y, *u, *v, etc.) for lossy compression + // since these are the respective native colorspace for these formats. + int use_argb; + + // YUV input (mostly used for input to lossy compression) + WebPEncCSP colorspace; // colorspace: should be YUV420 for now (=Y'CbCr). + int width, height; // dimensions (less or equal to WEBP_MAX_DIMENSION) + uint8_t* y, *u, *v; // pointers to luma/chroma planes. + int y_stride, uv_stride; // luma/chroma strides. + uint8_t* a; // pointer to the alpha plane + int a_stride; // stride of the alpha plane + uint32_t pad1[2]; // padding for later use + + // ARGB input (mostly used for input to lossless compression) + uint32_t* argb; // Pointer to argb (32 bit) plane. + int argb_stride; // This is stride in pixels units, not bytes. + uint32_t pad2[3]; // padding for later use + + // OUTPUT + /////////////// + // Byte-emission hook, to store compressed bytes as they are ready. + WebPWriterFunction writer; // can be NULL + void* custom_ptr; // can be used by the writer. + + // map for extra information (only for lossy compression mode) + int extra_info_type; // 1: intra type, 2: segment, 3: quant + // 4: intra-16 prediction mode, + // 5: chroma prediction mode, + // 6: bit cost, 7: distortion + uint8_t* extra_info; // if not NULL, points to an array of size + // ((width + 15) / 16) * ((height + 15) / 16) that + // will be filled with a macroblock map, depending + // on extra_info_type. + + // STATS AND REPORTS + /////////////////////////// + // Pointer to side statistics (updated only if not NULL) + WebPAuxStats* stats; + + // Error code for the latest error encountered during encoding + WebPEncodingError error_code; + + // If not NULL, report progress during encoding. + WebPProgressHook progress_hook; + + void* user_data; // this field is free to be set to any value and + // used during callbacks (like progress-report e.g.). + + uint32_t pad3[3]; // padding for later use + + // Unused for now + uint8_t* pad4, *pad5; + uint32_t pad6[8]; // padding for later use + + // PRIVATE FIELDS + //////////////////// + void* memory_; // row chunk of memory for yuva planes + void* memory_argb_; // and for argb too. + void* pad7[2]; // padding for later use +}; + +// Internal, version-checked, entry point +WEBP_EXTERN int WebPPictureInitInternal(WebPPicture*, int); + +// Should always be called, to initialize the structure. Returns false in case +// of version mismatch. WebPPictureInit() must have succeeded before using the +// 'picture' object. +// Note that, by default, use_argb is false and colorspace is WEBP_YUV420. +static WEBP_INLINE int WebPPictureInit(WebPPicture* picture) { + return WebPPictureInitInternal(picture, WEBP_ENCODER_ABI_VERSION); +} + +//------------------------------------------------------------------------------ +// WebPPicture utils + +// Convenience allocation / deallocation based on picture->width/height: +// Allocate y/u/v buffers as per colorspace/width/height specification. +// Note! This function will free the previous buffer if needed. +// Returns false in case of memory error. +WEBP_EXTERN int WebPPictureAlloc(WebPPicture* picture); + +// Release the memory allocated by WebPPictureAlloc() or WebPPictureImport*(). +// Note that this function does _not_ free the memory used by the 'picture' +// object itself. +// Besides memory (which is reclaimed) all other fields of 'picture' are +// preserved. +WEBP_EXTERN void WebPPictureFree(WebPPicture* picture); + +// Copy the pixels of *src into *dst, using WebPPictureAlloc. Upon return, *dst +// will fully own the copied pixels (this is not a view). The 'dst' picture need +// not be initialized as its content is overwritten. +// Returns false in case of memory allocation error. +WEBP_EXTERN int WebPPictureCopy(const WebPPicture* src, WebPPicture* dst); + +// Compute the single distortion for packed planes of samples. +// 'src' will be compared to 'ref', and the raw distortion stored into +// '*distortion'. The refined metric (log(MSE), log(1 - ssim),...' will be +// stored in '*result'. +// 'x_step' is the horizontal stride (in bytes) between samples. +// 'src/ref_stride' is the byte distance between rows. +// Returns false in case of error (bad parameter, memory allocation error, ...). +WEBP_EXTERN int WebPPlaneDistortion(const uint8_t* src, size_t src_stride, + const uint8_t* ref, size_t ref_stride, + int width, int height, + size_t x_step, + int type, // 0 = PSNR, 1 = SSIM, 2 = LSIM + float* distortion, float* result); + +// Compute PSNR, SSIM or LSIM distortion metric between two pictures. Results +// are in dB, stored in result[] in the B/G/R/A/All order. The distortion is +// always performed using ARGB samples. Hence if the input is YUV(A), the +// picture will be internally converted to ARGB (just for the measurement). +// Warning: this function is rather CPU-intensive. +WEBP_EXTERN int WebPPictureDistortion( + const WebPPicture* src, const WebPPicture* ref, + int metric_type, // 0 = PSNR, 1 = SSIM, 2 = LSIM + float result[5]); + +// self-crops a picture to the rectangle defined by top/left/width/height. +// Returns false in case of memory allocation error, or if the rectangle is +// outside of the source picture. +// The rectangle for the view is defined by the top-left corner pixel +// coordinates (left, top) as well as its width and height. This rectangle +// must be fully be comprised inside the 'src' source picture. If the source +// picture uses the YUV420 colorspace, the top and left coordinates will be +// snapped to even values. +WEBP_EXTERN int WebPPictureCrop(WebPPicture* picture, + int left, int top, int width, int height); + +// Extracts a view from 'src' picture into 'dst'. The rectangle for the view +// is defined by the top-left corner pixel coordinates (left, top) as well +// as its width and height. This rectangle must be fully be comprised inside +// the 'src' source picture. If the source picture uses the YUV420 colorspace, +// the top and left coordinates will be snapped to even values. +// Picture 'src' must out-live 'dst' picture. Self-extraction of view is allowed +// ('src' equal to 'dst') as a mean of fast-cropping (but note that doing so, +// the original dimension will be lost). Picture 'dst' need not be initialized +// with WebPPictureInit() if it is different from 'src', since its content will +// be overwritten. +// Returns false in case of memory allocation error or invalid parameters. +WEBP_EXTERN int WebPPictureView(const WebPPicture* src, + int left, int top, int width, int height, + WebPPicture* dst); + +// Returns true if the 'picture' is actually a view and therefore does +// not own the memory for pixels. +WEBP_EXTERN int WebPPictureIsView(const WebPPicture* picture); + +// Rescale a picture to new dimension width x height. +// If either 'width' or 'height' (but not both) is 0 the corresponding +// dimension will be calculated preserving the aspect ratio. +// No gamma correction is applied. +// Returns false in case of error (invalid parameter or insufficient memory). +WEBP_EXTERN int WebPPictureRescale(WebPPicture* pic, int width, int height); + +// Colorspace conversion function to import RGB samples. +// Previous buffer will be free'd, if any. +// *rgb buffer should have a size of at least height * rgb_stride. +// Returns false in case of memory error. +WEBP_EXTERN int WebPPictureImportRGB( + WebPPicture* picture, const uint8_t* rgb, int rgb_stride); +// Same, but for RGBA buffer. +WEBP_EXTERN int WebPPictureImportRGBA( + WebPPicture* picture, const uint8_t* rgba, int rgba_stride); +// Same, but for RGBA buffer. Imports the RGB direct from the 32-bit format +// input buffer ignoring the alpha channel. Avoids needing to copy the data +// to a temporary 24-bit RGB buffer to import the RGB only. +WEBP_EXTERN int WebPPictureImportRGBX( + WebPPicture* picture, const uint8_t* rgbx, int rgbx_stride); + +// Variants of the above, but taking BGR(A|X) input. +WEBP_EXTERN int WebPPictureImportBGR( + WebPPicture* picture, const uint8_t* bgr, int bgr_stride); +WEBP_EXTERN int WebPPictureImportBGRA( + WebPPicture* picture, const uint8_t* bgra, int bgra_stride); +WEBP_EXTERN int WebPPictureImportBGRX( + WebPPicture* picture, const uint8_t* bgrx, int bgrx_stride); + +// Converts picture->argb data to the YUV420A format. The 'colorspace' +// parameter is deprecated and should be equal to WEBP_YUV420. +// Upon return, picture->use_argb is set to false. The presence of real +// non-opaque transparent values is detected, and 'colorspace' will be +// adjusted accordingly. Note that this method is lossy. +// Returns false in case of error. +WEBP_EXTERN int WebPPictureARGBToYUVA(WebPPicture* picture, + WebPEncCSP /*colorspace = WEBP_YUV420*/); + +// Same as WebPPictureARGBToYUVA(), but the conversion is done using +// pseudo-random dithering with a strength 'dithering' between +// 0.0 (no dithering) and 1.0 (maximum dithering). This is useful +// for photographic picture. +WEBP_EXTERN int WebPPictureARGBToYUVADithered( + WebPPicture* picture, WebPEncCSP colorspace, float dithering); + +// Performs 'sharp' RGBA->YUVA420 downsampling and colorspace conversion. +// Downsampling is handled with extra care in case of color clipping. This +// method is roughly 2x slower than WebPPictureARGBToYUVA() but produces better +// and sharper YUV representation. +// Returns false in case of error. +WEBP_EXTERN int WebPPictureSharpARGBToYUVA(WebPPicture* picture); +// kept for backward compatibility: +WEBP_EXTERN int WebPPictureSmartARGBToYUVA(WebPPicture* picture); + +// Converts picture->yuv to picture->argb and sets picture->use_argb to true. +// The input format must be YUV_420 or YUV_420A. The conversion from YUV420 to +// ARGB incurs a small loss too. +// Note that the use of this colorspace is discouraged if one has access to the +// raw ARGB samples, since using YUV420 is comparatively lossy. +// Returns false in case of error. +WEBP_EXTERN int WebPPictureYUVAToARGB(WebPPicture* picture); + +// Helper function: given a width x height plane of RGBA or YUV(A) samples +// clean-up or smoothen the YUV or RGB samples under fully transparent area, +// to help compressibility (no guarantee, though). +WEBP_EXTERN void WebPCleanupTransparentArea(WebPPicture* picture); + +// Scan the picture 'picture' for the presence of non fully opaque alpha values. +// Returns true in such case. Otherwise returns false (indicating that the +// alpha plane can be ignored altogether e.g.). +WEBP_EXTERN int WebPPictureHasTransparency(const WebPPicture* picture); + +// Remove the transparency information (if present) by blending the color with +// the background color 'background_rgb' (specified as 24bit RGB triplet). +// After this call, all alpha values are reset to 0xff. +WEBP_EXTERN void WebPBlendAlpha(WebPPicture* pic, uint32_t background_rgb); + +//------------------------------------------------------------------------------ +// Main call + +// Main encoding call, after config and picture have been initialized. +// 'picture' must be less than 16384x16384 in dimension (cf WEBP_MAX_DIMENSION), +// and the 'config' object must be a valid one. +// Returns false in case of error, true otherwise. +// In case of error, picture->error_code is updated accordingly. +// 'picture' can hold the source samples in both YUV(A) or ARGB input, depending +// on the value of 'picture->use_argb'. It is highly recommended to use +// the former for lossy encoding, and the latter for lossless encoding +// (when config.lossless is true). Automatic conversion from one format to +// another is provided but they both incur some loss. +WEBP_EXTERN int WebPEncode(const WebPConfig* config, WebPPicture* picture); + +//------------------------------------------------------------------------------ + +#ifdef __cplusplus +} // extern "C" +#endif + +#endif // WEBP_WEBP_ENCODE_H_ diff --git a/vendor/github.com/sizeofint/webpanimation/webp_format_constants.h b/vendor/github.com/sizeofint/webpanimation/webp_format_constants.h new file mode 100644 index 00000000..eca6981a --- /dev/null +++ b/vendor/github.com/sizeofint/webpanimation/webp_format_constants.h @@ -0,0 +1,87 @@ +// Copyright 2012 Google Inc. All Rights Reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the COPYING file in the root of the source +// tree. An additional intellectual property rights grant can be found +// in the file PATENTS. All contributing project authors may +// be found in the AUTHORS file in the root of the source tree. +// ----------------------------------------------------------------------------- +// +// Internal header for constants related to WebP file format. +// +// Author: Urvang (urvang@google.com) + +#ifndef WEBP_WEBP_FORMAT_CONSTANTS_H_ +#define WEBP_WEBP_FORMAT_CONSTANTS_H_ + +// Create fourcc of the chunk from the chunk tag characters. +#define MKFOURCC(a, b, c, d) ((a) | (b) << 8 | (c) << 16 | (uint32_t)(d) << 24) + +// VP8 related constants. +#define VP8_SIGNATURE 0x9d012a // Signature in VP8 data. +#define VP8_MAX_PARTITION0_SIZE (1 << 19) // max size of mode partition +#define VP8_MAX_PARTITION_SIZE (1 << 24) // max size for token partition +#define VP8_FRAME_HEADER_SIZE 10 // Size of the frame header within VP8 data. + +// VP8L related constants. +#define VP8L_SIGNATURE_SIZE 1 // VP8L signature size. +#define VP8L_MAGIC_BYTE 0x2f // VP8L signature byte. +#define VP8L_IMAGE_SIZE_BITS 14 // Number of bits used to store + // width and height. +#define VP8L_VERSION_BITS 3 // 3 bits reserved for version. +#define VP8L_VERSION 0 // version 0 +#define VP8L_FRAME_HEADER_SIZE 5 // Size of the VP8L frame header. + +#define MAX_PALETTE_SIZE 256 +#define MAX_CACHE_BITS 11 +#define HUFFMAN_CODES_PER_META_CODE 5 +#define ARGB_BLACK 0xff000000 + +#define DEFAULT_CODE_LENGTH 8 +#define MAX_ALLOWED_CODE_LENGTH 15 + +#define NUM_LITERAL_CODES 256 +#define NUM_LENGTH_CODES 24 +#define NUM_DISTANCE_CODES 40 +#define CODE_LENGTH_CODES 19 + +#define MIN_HUFFMAN_BITS 2 // min number of Huffman bits +#define MAX_HUFFMAN_BITS 9 // max number of Huffman bits + +#define TRANSFORM_PRESENT 1 // The bit to be written when next data + // to be read is a transform. +#define NUM_TRANSFORMS 4 // Maximum number of allowed transform + // in a bitstream. +typedef enum { + PREDICTOR_TRANSFORM = 0, + CROSS_COLOR_TRANSFORM = 1, + SUBTRACT_GREEN = 2, + COLOR_INDEXING_TRANSFORM = 3 +} VP8LImageTransformType; + +// Alpha related constants. +#define ALPHA_HEADER_LEN 1 +#define ALPHA_NO_COMPRESSION 0 +#define ALPHA_LOSSLESS_COMPRESSION 1 +#define ALPHA_PREPROCESSED_LEVELS 1 + +// Mux related constants. +#define TAG_SIZE 4 // Size of a chunk tag (e.g. "VP8L"). +#define CHUNK_SIZE_BYTES 4 // Size needed to store chunk's size. +#define CHUNK_HEADER_SIZE 8 // Size of a chunk header. +#define RIFF_HEADER_SIZE 12 // Size of the RIFF header ("RIFFnnnnWEBP"). +#define ANMF_CHUNK_SIZE 16 // Size of an ANMF chunk. +#define ANIM_CHUNK_SIZE 6 // Size of an ANIM chunk. +#define VP8X_CHUNK_SIZE 10 // Size of a VP8X chunk. + +#define MAX_CANVAS_SIZE (1 << 24) // 24-bit max for VP8X width/height. +#define MAX_IMAGE_AREA (1ULL << 32) // 32-bit max for width x height. +#define MAX_LOOP_COUNT (1 << 16) // maximum value for loop-count +#define MAX_DURATION (1 << 24) // maximum duration +#define MAX_POSITION_OFFSET (1 << 24) // maximum frame x/y offset + +// Maximum chunk payload is such that adding the header and padding won't +// overflow a uint32_t. +#define MAX_CHUNK_PAYLOAD (~0U - CHUNK_HEADER_SIZE - 1) + +#endif // WEBP_WEBP_FORMAT_CONSTANTS_H_ diff --git a/vendor/github.com/sizeofint/webpanimation/webp_mux.h b/vendor/github.com/sizeofint/webpanimation/webp_mux.h new file mode 100644 index 00000000..c8a3b45c --- /dev/null +++ b/vendor/github.com/sizeofint/webpanimation/webp_mux.h @@ -0,0 +1,530 @@ +// Copyright 2011 Google Inc. All Rights Reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the COPYING file in the root of the source +// tree. An additional intellectual property rights grant can be found +// in the file PATENTS. All contributing project authors may +// be found in the AUTHORS file in the root of the source tree. +// ----------------------------------------------------------------------------- +// +// RIFF container manipulation and encoding for WebP images. +// +// Authors: Urvang (urvang@google.com) +// Vikas (vikasa@google.com) + +#ifndef WEBP_WEBP_MUX_H_ +#define WEBP_WEBP_MUX_H_ + +#include "webp_mux_types.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#define WEBP_MUX_ABI_VERSION 0x0108 // MAJOR(8b) + MINOR(8b) + +//------------------------------------------------------------------------------ +// Mux API +// +// This API allows manipulation of WebP container images containing features +// like color profile, metadata, animation. +// +// Code Example#1: Create a WebPMux object with image data, color profile and +// XMP metadata. +/* + int copy_data = 0; + WebPMux* mux = WebPMuxNew(); + // ... (Prepare image data). + WebPMuxSetImage(mux, &image, copy_data); + // ... (Prepare ICCP color profile data). + WebPMuxSetChunk(mux, "ICCP", &icc_profile, copy_data); + // ... (Prepare XMP metadata). + WebPMuxSetChunk(mux, "XMP ", &xmp, copy_data); + // Get data from mux in WebP RIFF format. + WebPMuxAssemble(mux, &output_data); + WebPMuxDelete(mux); + // ... (Consume output_data; e.g. write output_data.bytes to file). + WebPDataClear(&output_data); +*/ + +// Code Example#2: Get image and color profile data from a WebP file. +/* + int copy_data = 0; + // ... (Read data from file). + WebPMux* mux = WebPMuxCreate(&data, copy_data); + WebPMuxGetFrame(mux, 1, &image); + // ... (Consume image; e.g. call WebPDecode() to decode the data). + WebPMuxGetChunk(mux, "ICCP", &icc_profile); + // ... (Consume icc_data). + WebPMuxDelete(mux); + WebPFree(data); +*/ + +// Note: forward declaring enumerations is not allowed in (strict) C and C++, +// the types are left here for reference. +// typedef enum WebPMuxError WebPMuxError; +// typedef enum WebPChunkId WebPChunkId; +typedef struct WebPMux WebPMux; // main opaque object. +typedef struct WebPMuxFrameInfo WebPMuxFrameInfo; +typedef struct WebPMuxAnimParams WebPMuxAnimParams; +typedef struct WebPAnimEncoderOptions WebPAnimEncoderOptions; + +// Error codes +typedef enum WebPMuxError { + WEBP_MUX_OK = 1, + WEBP_MUX_NOT_FOUND = 0, + WEBP_MUX_INVALID_ARGUMENT = -1, + WEBP_MUX_BAD_DATA = -2, + WEBP_MUX_MEMORY_ERROR = -3, + WEBP_MUX_NOT_ENOUGH_DATA = -4 +} WebPMuxError; + +// IDs for different types of chunks. +typedef enum WebPChunkId { + WEBP_CHUNK_VP8X, // VP8X + WEBP_CHUNK_ICCP, // ICCP + WEBP_CHUNK_ANIM, // ANIM + WEBP_CHUNK_ANMF, // ANMF + WEBP_CHUNK_DEPRECATED, // (deprecated from FRGM) + WEBP_CHUNK_ALPHA, // ALPH + WEBP_CHUNK_IMAGE, // VP8/VP8L + WEBP_CHUNK_EXIF, // EXIF + WEBP_CHUNK_XMP, // XMP + WEBP_CHUNK_UNKNOWN, // Other chunks. + WEBP_CHUNK_NIL +} WebPChunkId; + +//------------------------------------------------------------------------------ + +// Returns the version number of the mux library, packed in hexadecimal using +// 8bits for each of major/minor/revision. E.g: v2.5.7 is 0x020507. +WEBP_EXTERN int WebPGetMuxVersion(void); + +//------------------------------------------------------------------------------ +// Life of a Mux object + +// Internal, version-checked, entry point +WEBP_EXTERN WebPMux* WebPNewInternal(int); + +// Creates an empty mux object. +// Returns: +// A pointer to the newly created empty mux object. +// Or NULL in case of memory error. +static WEBP_INLINE WebPMux* WebPMuxNew(void) { + return WebPNewInternal(WEBP_MUX_ABI_VERSION); +} + +// Deletes the mux object. +// Parameters: +// mux - (in/out) object to be deleted +WEBP_EXTERN void WebPMuxDelete(WebPMux* mux); + +//------------------------------------------------------------------------------ +// Mux creation. + +// Internal, version-checked, entry point +WEBP_EXTERN WebPMux* WebPMuxCreateInternal(const WebPData*, int, int); + +// Creates a mux object from raw data given in WebP RIFF format. +// Parameters: +// bitstream - (in) the bitstream data in WebP RIFF format +// copy_data - (in) value 1 indicates given data WILL be copied to the mux +// object and value 0 indicates data will NOT be copied. +// Returns: +// A pointer to the mux object created from given data - on success. +// NULL - In case of invalid data or memory error. +static WEBP_INLINE WebPMux* WebPMuxCreate(const WebPData* bitstream, + int copy_data) { + return WebPMuxCreateInternal(bitstream, copy_data, WEBP_MUX_ABI_VERSION); +} + +//------------------------------------------------------------------------------ +// Non-image chunks. + +// Note: Only non-image related chunks should be managed through chunk APIs. +// (Image related chunks are: "ANMF", "VP8 ", "VP8L" and "ALPH"). +// To add, get and delete images, use WebPMuxSetImage(), WebPMuxPushFrame(), +// WebPMuxGetFrame() and WebPMuxDeleteFrame(). + +// Adds a chunk with id 'fourcc' and data 'chunk_data' in the mux object. +// Any existing chunk(s) with the same id will be removed. +// Parameters: +// mux - (in/out) object to which the chunk is to be added +// fourcc - (in) a character array containing the fourcc of the given chunk; +// e.g., "ICCP", "XMP ", "EXIF" etc. +// chunk_data - (in) the chunk data to be added +// copy_data - (in) value 1 indicates given data WILL be copied to the mux +// object and value 0 indicates data will NOT be copied. +// Returns: +// WEBP_MUX_INVALID_ARGUMENT - if mux, fourcc or chunk_data is NULL +// or if fourcc corresponds to an image chunk. +// WEBP_MUX_MEMORY_ERROR - on memory allocation error. +// WEBP_MUX_OK - on success. +WEBP_EXTERN WebPMuxError WebPMuxSetChunk( + WebPMux* mux, const char fourcc[4], const WebPData* chunk_data, + int copy_data); + +// Gets a reference to the data of the chunk with id 'fourcc' in the mux object. +// The caller should NOT free the returned data. +// Parameters: +// mux - (in) object from which the chunk data is to be fetched +// fourcc - (in) a character array containing the fourcc of the chunk; +// e.g., "ICCP", "XMP ", "EXIF" etc. +// chunk_data - (out) returned chunk data +// Returns: +// WEBP_MUX_INVALID_ARGUMENT - if mux, fourcc or chunk_data is NULL +// or if fourcc corresponds to an image chunk. +// WEBP_MUX_NOT_FOUND - If mux does not contain a chunk with the given id. +// WEBP_MUX_OK - on success. +WEBP_EXTERN WebPMuxError WebPMuxGetChunk( + const WebPMux* mux, const char fourcc[4], WebPData* chunk_data); + +// Deletes the chunk with the given 'fourcc' from the mux object. +// Parameters: +// mux - (in/out) object from which the chunk is to be deleted +// fourcc - (in) a character array containing the fourcc of the chunk; +// e.g., "ICCP", "XMP ", "EXIF" etc. +// Returns: +// WEBP_MUX_INVALID_ARGUMENT - if mux or fourcc is NULL +// or if fourcc corresponds to an image chunk. +// WEBP_MUX_NOT_FOUND - If mux does not contain a chunk with the given fourcc. +// WEBP_MUX_OK - on success. +WEBP_EXTERN WebPMuxError WebPMuxDeleteChunk( + WebPMux* mux, const char fourcc[4]); + +//------------------------------------------------------------------------------ +// Images. + +// Encapsulates data about a single frame. +struct WebPMuxFrameInfo { + WebPData bitstream; // image data: can be a raw VP8/VP8L bitstream + // or a single-image WebP file. + int x_offset; // x-offset of the frame. + int y_offset; // y-offset of the frame. + int duration; // duration of the frame (in milliseconds). + + WebPChunkId id; // frame type: should be one of WEBP_CHUNK_ANMF + // or WEBP_CHUNK_IMAGE + WebPMuxAnimDispose dispose_method; // Disposal method for the frame. + WebPMuxAnimBlend blend_method; // Blend operation for the frame. + uint32_t pad[1]; // padding for later use +}; + +// Sets the (non-animated) image in the mux object. +// Note: Any existing images (including frames) will be removed. +// Parameters: +// mux - (in/out) object in which the image is to be set +// bitstream - (in) can be a raw VP8/VP8L bitstream or a single-image +// WebP file (non-animated) +// copy_data - (in) value 1 indicates given data WILL be copied to the mux +// object and value 0 indicates data will NOT be copied. +// Returns: +// WEBP_MUX_INVALID_ARGUMENT - if mux is NULL or bitstream is NULL. +// WEBP_MUX_MEMORY_ERROR - on memory allocation error. +// WEBP_MUX_OK - on success. +WEBP_EXTERN WebPMuxError WebPMuxSetImage( + WebPMux* mux, const WebPData* bitstream, int copy_data); + +// Adds a frame at the end of the mux object. +// Notes: (1) frame.id should be WEBP_CHUNK_ANMF +// (2) For setting a non-animated image, use WebPMuxSetImage() instead. +// (3) Type of frame being pushed must be same as the frames in mux. +// (4) As WebP only supports even offsets, any odd offset will be snapped +// to an even location using: offset &= ~1 +// Parameters: +// mux - (in/out) object to which the frame is to be added +// frame - (in) frame data. +// copy_data - (in) value 1 indicates given data WILL be copied to the mux +// object and value 0 indicates data will NOT be copied. +// Returns: +// WEBP_MUX_INVALID_ARGUMENT - if mux or frame is NULL +// or if content of 'frame' is invalid. +// WEBP_MUX_MEMORY_ERROR - on memory allocation error. +// WEBP_MUX_OK - on success. +WEBP_EXTERN WebPMuxError WebPMuxPushFrame( + WebPMux* mux, const WebPMuxFrameInfo* frame, int copy_data); + +// Gets the nth frame from the mux object. +// The content of 'frame->bitstream' is allocated using WebPMalloc(), and NOT +// owned by the 'mux' object. It MUST be deallocated by the caller by calling +// WebPDataClear(). +// nth=0 has a special meaning - last position. +// Parameters: +// mux - (in) object from which the info is to be fetched +// nth - (in) index of the frame in the mux object +// frame - (out) data of the returned frame +// Returns: +// WEBP_MUX_INVALID_ARGUMENT - if mux or frame is NULL. +// WEBP_MUX_NOT_FOUND - if there are less than nth frames in the mux object. +// WEBP_MUX_BAD_DATA - if nth frame chunk in mux is invalid. +// WEBP_MUX_MEMORY_ERROR - on memory allocation error. +// WEBP_MUX_OK - on success. +WEBP_EXTERN WebPMuxError WebPMuxGetFrame( + const WebPMux* mux, uint32_t nth, WebPMuxFrameInfo* frame); + +// Deletes a frame from the mux object. +// nth=0 has a special meaning - last position. +// Parameters: +// mux - (in/out) object from which a frame is to be deleted +// nth - (in) The position from which the frame is to be deleted +// Returns: +// WEBP_MUX_INVALID_ARGUMENT - if mux is NULL. +// WEBP_MUX_NOT_FOUND - If there are less than nth frames in the mux object +// before deletion. +// WEBP_MUX_OK - on success. +WEBP_EXTERN WebPMuxError WebPMuxDeleteFrame(WebPMux* mux, uint32_t nth); + +//------------------------------------------------------------------------------ +// Animation. + +// Animation parameters. +struct WebPMuxAnimParams { + uint32_t bgcolor; // Background color of the canvas stored (in MSB order) as: + // Bits 00 to 07: Alpha. + // Bits 08 to 15: Red. + // Bits 16 to 23: Green. + // Bits 24 to 31: Blue. + int loop_count; // Number of times to repeat the animation [0 = infinite]. +}; + +// Sets the animation parameters in the mux object. Any existing ANIM chunks +// will be removed. +// Parameters: +// mux - (in/out) object in which ANIM chunk is to be set/added +// params - (in) animation parameters. +// Returns: +// WEBP_MUX_INVALID_ARGUMENT - if mux or params is NULL. +// WEBP_MUX_MEMORY_ERROR - on memory allocation error. +// WEBP_MUX_OK - on success. +WEBP_EXTERN WebPMuxError WebPMuxSetAnimationParams( + WebPMux* mux, const WebPMuxAnimParams* params); + +// Gets the animation parameters from the mux object. +// Parameters: +// mux - (in) object from which the animation parameters to be fetched +// params - (out) animation parameters extracted from the ANIM chunk +// Returns: +// WEBP_MUX_INVALID_ARGUMENT - if mux or params is NULL. +// WEBP_MUX_NOT_FOUND - if ANIM chunk is not present in mux object. +// WEBP_MUX_OK - on success. +WEBP_EXTERN WebPMuxError WebPMuxGetAnimationParams( + const WebPMux* mux, WebPMuxAnimParams* params); + +//------------------------------------------------------------------------------ +// Misc Utilities. + +// Sets the canvas size for the mux object. The width and height can be +// specified explicitly or left as zero (0, 0). +// * When width and height are specified explicitly, then this frame bound is +// enforced during subsequent calls to WebPMuxAssemble() and an error is +// reported if any animated frame does not completely fit within the canvas. +// * When unspecified (0, 0), the constructed canvas will get the frame bounds +// from the bounding-box over all frames after calling WebPMuxAssemble(). +// Parameters: +// mux - (in) object to which the canvas size is to be set +// width - (in) canvas width +// height - (in) canvas height +// Returns: +// WEBP_MUX_INVALID_ARGUMENT - if mux is NULL; or +// width or height are invalid or out of bounds +// WEBP_MUX_OK - on success. +WEBP_EXTERN WebPMuxError WebPMuxSetCanvasSize(WebPMux* mux, + int width, int height); + +// Gets the canvas size from the mux object. +// Note: This method assumes that the VP8X chunk, if present, is up-to-date. +// That is, the mux object hasn't been modified since the last call to +// WebPMuxAssemble() or WebPMuxCreate(). +// Parameters: +// mux - (in) object from which the canvas size is to be fetched +// width - (out) canvas width +// height - (out) canvas height +// Returns: +// WEBP_MUX_INVALID_ARGUMENT - if mux, width or height is NULL. +// WEBP_MUX_BAD_DATA - if VP8X/VP8/VP8L chunk or canvas size is invalid. +// WEBP_MUX_OK - on success. +WEBP_EXTERN WebPMuxError WebPMuxGetCanvasSize(const WebPMux* mux, + int* width, int* height); + +// Gets the feature flags from the mux object. +// Note: This method assumes that the VP8X chunk, if present, is up-to-date. +// That is, the mux object hasn't been modified since the last call to +// WebPMuxAssemble() or WebPMuxCreate(). +// Parameters: +// mux - (in) object from which the features are to be fetched +// flags - (out) the flags specifying which features are present in the +// mux object. This will be an OR of various flag values. +// Enum 'WebPFeatureFlags' can be used to test individual flag values. +// Returns: +// WEBP_MUX_INVALID_ARGUMENT - if mux or flags is NULL. +// WEBP_MUX_BAD_DATA - if VP8X/VP8/VP8L chunk or canvas size is invalid. +// WEBP_MUX_OK - on success. +WEBP_EXTERN WebPMuxError WebPMuxGetFeatures(const WebPMux* mux, + uint32_t* flags); + +// Gets number of chunks with the given 'id' in the mux object. +// Parameters: +// mux - (in) object from which the info is to be fetched +// id - (in) chunk id specifying the type of chunk +// num_elements - (out) number of chunks with the given chunk id +// Returns: +// WEBP_MUX_INVALID_ARGUMENT - if mux, or num_elements is NULL. +// WEBP_MUX_OK - on success. +WEBP_EXTERN WebPMuxError WebPMuxNumChunks(const WebPMux* mux, + WebPChunkId id, int* num_elements); + +// Assembles all chunks in WebP RIFF format and returns in 'assembled_data'. +// This function also validates the mux object. +// Note: The content of 'assembled_data' will be ignored and overwritten. +// Also, the content of 'assembled_data' is allocated using WebPMalloc(), and +// NOT owned by the 'mux' object. It MUST be deallocated by the caller by +// calling WebPDataClear(). It's always safe to call WebPDataClear() upon +// return, even in case of error. +// Parameters: +// mux - (in/out) object whose chunks are to be assembled +// assembled_data - (out) assembled WebP data +// Returns: +// WEBP_MUX_BAD_DATA - if mux object is invalid. +// WEBP_MUX_INVALID_ARGUMENT - if mux or assembled_data is NULL. +// WEBP_MUX_MEMORY_ERROR - on memory allocation error. +// WEBP_MUX_OK - on success. +WEBP_EXTERN WebPMuxError WebPMuxAssemble(WebPMux* mux, + WebPData* assembled_data); + +//------------------------------------------------------------------------------ +// WebPAnimEncoder API +// +// This API allows encoding (possibly) animated WebP images. +// +// Code Example: +/* + WebPAnimEncoderOptions enc_options; + WebPAnimEncoderOptionsInit(&enc_options); + // Tune 'enc_options' as needed. + WebPAnimEncoder* enc = WebPAnimEncoderNew(width, height, &enc_options); + while() { + WebPConfig config; + WebPConfigInit(&config); + // Tune 'config' as needed. + WebPAnimEncoderAdd(enc, frame, timestamp_ms, &config); + } + WebPAnimEncoderAdd(enc, NULL, timestamp_ms, NULL); + WebPAnimEncoderAssemble(enc, webp_data); + WebPAnimEncoderDelete(enc); + // Write the 'webp_data' to a file, or re-mux it further. +*/ + +typedef struct WebPAnimEncoder WebPAnimEncoder; // Main opaque object. + +// Forward declarations. Defined in encode.h. +struct WebPPicture; +struct WebPConfig; + +// Global options. +struct WebPAnimEncoderOptions { + WebPMuxAnimParams anim_params; // Animation parameters. + int minimize_size; // If true, minimize the output size (slow). Implicitly + // disables key-frame insertion. + int kmin; + int kmax; // Minimum and maximum distance between consecutive key + // frames in the output. The library may insert some key + // frames as needed to satisfy this criteria. + // Note that these conditions should hold: kmax > kmin + // and kmin >= kmax / 2 + 1. Also, if kmax <= 0, then + // key-frame insertion is disabled; and if kmax == 1, + // then all frames will be key-frames (kmin value does + // not matter for these special cases). + int allow_mixed; // If true, use mixed compression mode; may choose + // either lossy and lossless for each frame. + int verbose; // If true, print info and warning messages to stderr. + + uint32_t padding[4]; // Padding for later use. +}; + +// Internal, version-checked, entry point. +WEBP_EXTERN int WebPAnimEncoderOptionsInitInternal( + WebPAnimEncoderOptions*, int); + +// Should always be called, to initialize a fresh WebPAnimEncoderOptions +// structure before modification. Returns false in case of version mismatch. +// WebPAnimEncoderOptionsInit() must have succeeded before using the +// 'enc_options' object. +static WEBP_INLINE int WebPAnimEncoderOptionsInit( + WebPAnimEncoderOptions* enc_options) { + return WebPAnimEncoderOptionsInitInternal(enc_options, WEBP_MUX_ABI_VERSION); +} + +// Internal, version-checked, entry point. +WEBP_EXTERN WebPAnimEncoder* WebPAnimEncoderNewInternal( + int, int, const WebPAnimEncoderOptions*, int); + +// Creates and initializes a WebPAnimEncoder object. +// Parameters: +// width/height - (in) canvas width and height of the animation. +// enc_options - (in) encoding options; can be passed NULL to pick +// reasonable defaults. +// Returns: +// A pointer to the newly created WebPAnimEncoder object. +// Or NULL in case of memory error. +static WEBP_INLINE WebPAnimEncoder* WebPAnimEncoderNew( + int width, int height, const WebPAnimEncoderOptions* enc_options) { + return WebPAnimEncoderNewInternal(width, height, enc_options, + WEBP_MUX_ABI_VERSION); +} + +// Optimize the given frame for WebP, encode it and add it to the +// WebPAnimEncoder object. +// The last call to 'WebPAnimEncoderAdd' should be with frame = NULL, which +// indicates that no more frames are to be added. This call is also used to +// determine the duration of the last frame. +// Parameters: +// enc - (in/out) object to which the frame is to be added. +// frame - (in/out) frame data in ARGB or YUV(A) format. If it is in YUV(A) +// format, it will be converted to ARGB, which incurs a small loss. +// timestamp_ms - (in) timestamp of this frame in milliseconds. +// Duration of a frame would be calculated as +// "timestamp of next frame - timestamp of this frame". +// Hence, timestamps should be in non-decreasing order. +// config - (in) encoding options; can be passed NULL to pick +// reasonable defaults. +// Returns: +// On error, returns false and frame->error_code is set appropriately. +// Otherwise, returns true. +WEBP_EXTERN int WebPAnimEncoderAdd( + WebPAnimEncoder* enc, struct WebPPicture* frame, int timestamp_ms, + const struct WebPConfig* config); + +// Assemble all frames added so far into a WebP bitstream. +// This call should be preceded by a call to 'WebPAnimEncoderAdd' with +// frame = NULL; if not, the duration of the last frame will be internally +// estimated. +// Parameters: +// enc - (in/out) object from which the frames are to be assembled. +// webp_data - (out) generated WebP bitstream. +// Returns: +// True on success. +WEBP_EXTERN int WebPAnimEncoderAssemble(WebPAnimEncoder* enc, + WebPData* webp_data); + +// Get error string corresponding to the most recent call using 'enc'. The +// returned string is owned by 'enc' and is valid only until the next call to +// WebPAnimEncoderAdd() or WebPAnimEncoderAssemble() or WebPAnimEncoderDelete(). +// Parameters: +// enc - (in/out) object from which the error string is to be fetched. +// Returns: +// NULL if 'enc' is NULL. Otherwise, returns the error string if the last call +// to 'enc' had an error, or an empty string if the last call was a success. +WEBP_EXTERN const char* WebPAnimEncoderGetError(WebPAnimEncoder* enc); + +// Deletes the WebPAnimEncoder object. +// Parameters: +// enc - (in/out) object to be deleted +WEBP_EXTERN void WebPAnimEncoderDelete(WebPAnimEncoder* enc); + +//------------------------------------------------------------------------------ + +#ifdef __cplusplus +} // extern "C" +#endif + +#endif // WEBP_WEBP_MUX_H_ diff --git a/vendor/github.com/sizeofint/webpanimation/webp_mux_types.h b/vendor/github.com/sizeofint/webpanimation/webp_mux_types.h new file mode 100644 index 00000000..6271c0a4 --- /dev/null +++ b/vendor/github.com/sizeofint/webpanimation/webp_mux_types.h @@ -0,0 +1,98 @@ +// Copyright 2012 Google Inc. All Rights Reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the COPYING file in the root of the source +// tree. An additional intellectual property rights grant can be found +// in the file PATENTS. All contributing project authors may +// be found in the AUTHORS file in the root of the source tree. +// ----------------------------------------------------------------------------- +// +// Data-types common to the mux and demux libraries. +// +// Author: Urvang (urvang@google.com) + +#ifndef WEBP_WEBP_MUX_TYPES_H_ +#define WEBP_WEBP_MUX_TYPES_H_ + +#include // memset() +#include "webp_types.h" + +#ifdef __cplusplus +extern "C" { +#endif + +// Note: forward declaring enumerations is not allowed in (strict) C and C++, +// the types are left here for reference. +// typedef enum WebPFeatureFlags WebPFeatureFlags; +// typedef enum WebPMuxAnimDispose WebPMuxAnimDispose; +// typedef enum WebPMuxAnimBlend WebPMuxAnimBlend; +typedef struct WebPData WebPData; + +// VP8X Feature Flags. +typedef enum WebPFeatureFlags { + ANIMATION_FLAG = 0x00000002, + XMP_FLAG = 0x00000004, + EXIF_FLAG = 0x00000008, + ALPHA_FLAG = 0x00000010, + ICCP_FLAG = 0x00000020, + + ALL_VALID_FLAGS = 0x0000003e +} WebPFeatureFlags; + +// Dispose method (animation only). Indicates how the area used by the current +// frame is to be treated before rendering the next frame on the canvas. +typedef enum WebPMuxAnimDispose { + WEBP_MUX_DISPOSE_NONE, // Do not dispose. + WEBP_MUX_DISPOSE_BACKGROUND // Dispose to background color. +} WebPMuxAnimDispose; + +// Blend operation (animation only). Indicates how transparent pixels of the +// current frame are blended with those of the previous canvas. +typedef enum WebPMuxAnimBlend { + WEBP_MUX_BLEND, // Blend. + WEBP_MUX_NO_BLEND // Do not blend. +} WebPMuxAnimBlend; + +// Data type used to describe 'raw' data, e.g., chunk data +// (ICC profile, metadata) and WebP compressed image data. +// 'bytes' memory must be allocated using WebPMalloc() and such. +struct WebPData { + const uint8_t* bytes; + size_t size; +}; + +// Initializes the contents of the 'webp_data' object with default values. +static WEBP_INLINE void WebPDataInit(WebPData* webp_data) { + if (webp_data != NULL) { + memset(webp_data, 0, sizeof(*webp_data)); + } +} + +// Clears the contents of the 'webp_data' object by calling WebPFree(). +// Does not deallocate the object itself. +static WEBP_INLINE void WebPDataClear(WebPData* webp_data) { + if (webp_data != NULL) { + WebPFree((void*)webp_data->bytes); + WebPDataInit(webp_data); + } +} + +// Allocates necessary storage for 'dst' and copies the contents of 'src'. +// Returns true on success. +static WEBP_INLINE int WebPDataCopy(const WebPData* src, WebPData* dst) { + if (src == NULL || dst == NULL) return 0; + WebPDataInit(dst); + if (src->bytes != NULL && src->size != 0) { + dst->bytes = (uint8_t*)WebPMalloc(src->size); + if (dst->bytes == NULL) return 0; + memcpy((void*)dst->bytes, src->bytes, src->size); + dst->size = src->size; + } + return 1; +} + +#ifdef __cplusplus +} // extern "C" +#endif + +#endif // WEBP_WEBP_MUX_TYPES_H_ diff --git a/vendor/github.com/sizeofint/webpanimation/webp_types.h b/vendor/github.com/sizeofint/webpanimation/webp_types.h new file mode 100644 index 00000000..47f7f2b0 --- /dev/null +++ b/vendor/github.com/sizeofint/webpanimation/webp_types.h @@ -0,0 +1,68 @@ +// Copyright 2010 Google Inc. All Rights Reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the COPYING file in the root of the source +// tree. An additional intellectual property rights grant can be found +// in the file PATENTS. All contributing project authors may +// be found in the AUTHORS file in the root of the source tree. +// ----------------------------------------------------------------------------- +// +// Common types + memory wrappers +// +// Author: Skal (pascal.massimino@gmail.com) + +#ifndef WEBP_WEBP_TYPES_H_ +#define WEBP_WEBP_TYPES_H_ + +#include // for size_t + +#ifndef _MSC_VER +#include +#if defined(__cplusplus) || !defined(__STRICT_ANSI__) || \ + (defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L) +#define WEBP_INLINE inline +#else +#define WEBP_INLINE +#endif +#else +typedef signed char int8_t; +typedef unsigned char uint8_t; +typedef signed short int16_t; +typedef unsigned short uint16_t; +typedef signed int int32_t; +typedef unsigned int uint32_t; +typedef unsigned long long int uint64_t; +typedef long long int int64_t; +#define WEBP_INLINE __forceinline +#endif /* _MSC_VER */ + +#ifndef WEBP_EXTERN +// This explicitly marks library functions and allows for changing the +// signature for e.g., Windows DLL builds. +# if defined(__GNUC__) && __GNUC__ >= 4 +# define WEBP_EXTERN extern __attribute__ ((visibility ("default"))) +# else +# define WEBP_EXTERN extern +# endif /* __GNUC__ >= 4 */ +#endif /* WEBP_EXTERN */ + +// Macro to check ABI compatibility (same major revision number) +#define WEBP_ABI_IS_INCOMPATIBLE(a, b) (((a) >> 8) != ((b) >> 8)) + +#ifdef __cplusplus +extern "C" { +#endif + +// Allocates 'size' bytes of memory. Returns NULL upon error. Memory +// must be deallocated by calling WebPFree(). This function is made available +// by the core 'libwebp' library. +WEBP_EXTERN void* WebPMalloc(size_t size); + +// Releases memory returned by the WebPDecode*() functions (from decode.h). +WEBP_EXTERN void WebPFree(void* ptr); + +#ifdef __cplusplus +} // extern "C" +#endif + +#endif // WEBP_WEBP_TYPES_H_ diff --git a/vendor/github.com/sizeofint/webpanimation/webpanimation.go b/vendor/github.com/sizeofint/webpanimation/webpanimation.go new file mode 100644 index 00000000..b8a2adeb --- /dev/null +++ b/vendor/github.com/sizeofint/webpanimation/webpanimation.go @@ -0,0 +1,109 @@ +package webpanimation + +import ( + "errors" + "fmt" + "image" + "image/draw" + "io" +) + +type WebpAnimation struct { + WebPAnimEncoderOptions *WebPAnimEncoderOptions + Width int + Height int + loopCount int + AnimationEncoder *WebPAnimEncoder + WebPData *WebPData + WebPMux *WebPMux + WebPPictures []*WebPPicture +} + +// NewWebpAnimation Initialize animation +func NewWebpAnimation(width, height, loopCount int) *WebpAnimation { + webpAnimation := &WebpAnimation{loopCount: loopCount, Width: width, Height: height} + webpAnimation.WebPAnimEncoderOptions = &WebPAnimEncoderOptions{} + + WebPAnimEncoderOptionsInitInternal(webpAnimation.WebPAnimEncoderOptions) + + webpAnimation.AnimationEncoder = WebPAnimEncoderNewInternal(width, height, webpAnimation.WebPAnimEncoderOptions) + return webpAnimation +} + +// ReleaseMemory release memory +func (wpa *WebpAnimation) ReleaseMemory() { + WebPDataClear(wpa.WebPData) + WebPMuxDelete(wpa.WebPMux) + for _, webpPicture := range wpa.WebPPictures { + WebPPictureFree(webpPicture) + } + WebPAnimEncoderDelete(wpa.AnimationEncoder) +} + +// AddFrame add frame to animation +func (wpa *WebpAnimation) AddFrame(img image.Image, timestamp int, webpcfg WebPConfig) error { + var webPPicture *WebPPicture = nil + var m *image.RGBA + if img != nil { + if v, ok := img.(*image.RGBA); ok { + m = v + } else { + b := img.Bounds() + m = image.NewRGBA(image.Rect(0, 0, b.Dx(), b.Dy())) + draw.Draw(m, m.Bounds(), img, b.Min, draw.Src) + } + + webPPicture = &WebPPicture{} + wpa.WebPPictures = append(wpa.WebPPictures, webPPicture) + webPPicture.SetUseArgb(1) + webPPicture.SetHeight(wpa.Height) + webPPicture.SetWidth(wpa.Width) + err := WebPPictureImportRGBA(m.Pix, m.Stride, webPPicture) + if err != nil { + return err + } + } + + res := WebPAnimEncoderAdd(wpa.AnimationEncoder, webPPicture, timestamp, webpcfg) + if res == 0 { + return errors.New("Failed to add frame in animation ecoder") + } + return nil +} + +// Encode encode animation +func (wpa *WebpAnimation) Encode(w io.Writer) error { + wpa.WebPData = &WebPData{} + + WebPDataInit(wpa.WebPData) + + WebPAnimEncoderAssemble(wpa.AnimationEncoder, wpa.WebPData) + + if wpa.loopCount > 0 { + wpa.WebPMux = WebPMuxCreateInternal(wpa.WebPData, 1) + if wpa.WebPMux == nil { + return errors.New("ERROR: Could not re-mux to add loop count/metadata.") + } + WebPDataClear(wpa.WebPData) + + webPMuxAnimNewParams := WebPMuxAnimParams{} + muxErr := WebPMuxGetAnimationParams(wpa.WebPMux, &webPMuxAnimNewParams) + if muxErr != WebpMuxOk { + return errors.New("Could not fetch loop count") + } + webPMuxAnimNewParams.SetLoopCount(wpa.loopCount) + + muxErr = WebPMuxSetAnimationParams(wpa.WebPMux, &webPMuxAnimNewParams) + if muxErr != WebpMuxOk { + return errors.New(fmt.Sprint("Could not update loop count, code:", muxErr)) + } + + muxErr = WebPMuxAssemble(wpa.WebPMux, wpa.WebPData) + if muxErr != WebpMuxOk { + return errors.New("Could not assemble when re-muxing to add") + } + + } + _, err := w.Write(wpa.WebPData.GetBytes()) + return err +} diff --git a/vendor/modules.txt b/vendor/modules.txt index 45936a01..954bfb20 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -4,6 +4,11 @@ github.com/42wim/go-gitter # github.com/Baozisoftware/qrcode-terminal-go v0.0.0-20170407111555-c0650d8dff0f ## explicit github.com/Baozisoftware/qrcode-terminal-go +# github.com/Benau/go_rlottie v0.0.0-20210807002906-98c1b2421989 +github.com/Benau/go_rlottie +# github.com/Benau/tgsconverter v0.0.0-20210809170556-99f4a4f6337f +## explicit +github.com/Benau/tgsconverter/libtgsconverter # github.com/Jeffail/gabs v1.4.0 ## explicit github.com/Jeffail/gabs @@ -36,6 +41,8 @@ github.com/SevereCloud/vksdk/v2/events github.com/SevereCloud/vksdk/v2/internal github.com/SevereCloud/vksdk/v2/longpoll-bot github.com/SevereCloud/vksdk/v2/object +# github.com/av-elier/go-decimal-to-rational v0.0.0-20191127152832-89e6aad02ecf +github.com/av-elier/go-decimal-to-rational # github.com/blang/semver v3.5.1+incompatible github.com/blang/semver # github.com/d5/tengo/v2 v2.8.0 @@ -111,6 +118,8 @@ github.com/hashicorp/hcl/json/token # github.com/jpillora/backoff v1.0.0 ## explicit github.com/jpillora/backoff +# github.com/kettek/apng v0.0.0-20191108220231-414630eed80f +github.com/kettek/apng # github.com/keybase/go-keybase-chat-bot v0.0.0-20200505163032-5cacf52379da ## explicit github.com/keybase/go-keybase-chat-bot/kbchat @@ -238,6 +247,8 @@ github.com/shazow/ssh-chat/sshd/terminal # github.com/sirupsen/logrus v1.8.1 ## explicit github.com/sirupsen/logrus +# github.com/sizeofint/webpanimation v0.0.0-20210809145948-1d2b32119882 +github.com/sizeofint/webpanimation # github.com/skip2/go-qrcode v0.0.0-20200617195104-da1b6568686e ## explicit github.com/skip2/go-qrcode